All of lore.kernel.org
 help / color / mirror / Atom feed
* [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib
@ 2019-11-13  4:31 Vanshidhar Konda
  2019-11-13  4:35 ` Vanshidhar Konda
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Vanshidhar Konda @ 2019-11-13  4:31 UTC (permalink / raw)
  To: igt-dev

Switch the test from using libdrm methods to using methods provided by
the igt library. Like some of the other gem_tiled* tests, this test also
creates the batch buffer used to do the blitter copies. Also, the test
avoids calling GET/SET_TILING IOCTLs - something that will not be
supported on Gen12+.

Signed-off-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com>
---
 tests/i915/gem_tiled_blits.c | 176 +++++++++++++++++++++++++----------
 1 file changed, 126 insertions(+), 50 deletions(-)

diff --git a/tests/i915/gem_tiled_blits.c b/tests/i915/gem_tiled_blits.c
index df0699f3..b72ec600 100644
--- a/tests/i915/gem_tiled_blits.c
+++ b/tests/i915/gem_tiled_blits.c
@@ -57,52 +57,135 @@
 IGT_TEST_DESCRIPTION("Test doing many tiled blits, with a working set larger"
 		     " than the aperture size.");
 
-static drm_intel_bufmgr *bufmgr;
-struct intel_batchbuffer *batch;
 static int width = 512, height = 512;
+int device_gen;
 
-static drm_intel_bo *
-create_bo(uint32_t start_val)
+static void
+copy(int fd, uint32_t dst, uint32_t dst_tiling,
+     uint32_t src, uint32_t src_tiling)
 {
-	drm_intel_bo *bo, *linear_bo;
+	uint32_t batch[12];
+	struct drm_i915_gem_relocation_entry reloc[2];
+	struct drm_i915_gem_exec_object2 obj[3];
+	struct drm_i915_gem_execbuffer2 exec;
+	int src_pitch, dst_pitch;
+	int tile_height, tile_width;
+	int i = 0;
+
+	src_pitch = 4096;
+	dst_pitch = 4096;
+	tile_width = 1024;
+	tile_height = width*height*4/4096;
+
+	batch[i++] = XY_SRC_COPY_BLT_CMD |
+		  XY_SRC_COPY_BLT_WRITE_ALPHA |
+		  XY_SRC_COPY_BLT_WRITE_RGB;
+	if (device_gen >= 8)
+		batch[i - 1] |= 8;
+	else
+		batch[i - 1] |= 6;
+
+	if (device_gen >= 4 && src_tiling != I915_TILING_NONE) {
+		src_pitch /= 4;
+		batch[i - 1] |= XY_SRC_COPY_BLT_SRC_TILED;
+	}
+	if (device_gen >= 4 && dst_tiling != I915_TILING_NONE) {
+		dst_pitch /= 4;
+		batch[i - 1] |= XY_SRC_COPY_BLT_DST_TILED;
+	}
+
+	batch[i++] = (3 << 24) | /* 32 bits */
+		  (0xcc << 16) | /* copy ROP */
+		  dst_pitch;
+	batch[i++] = 0; /* dst x1,y1 */
+	batch[i++] = (tile_height << 16) | tile_width; /* dst x2,y2 */
+	batch[i++] = 0; /* dst reloc */
+	if (device_gen >= 8)
+		batch[i++] = 0;
+	batch[i++] = 0; /* src x1,y1 */
+	batch[i++] = src_pitch;
+	batch[i++] = 0; /* src reloc */
+	if (device_gen >= 8)
+		batch[i++] = 0;
+	batch[i++] = MI_BATCH_BUFFER_END;
+	batch[i++] = MI_NOOP;
+
+	memset(reloc, 0, sizeof(reloc));
+	reloc[0].target_handle = dst;
+	reloc[0].delta = 0;
+	reloc[0].offset = 4 * sizeof(batch[0]);
+	reloc[0].presumed_offset = 0;
+	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
+	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
+
+	reloc[1].target_handle = src;
+	reloc[1].delta = 0;
+	reloc[1].offset = 7 * sizeof(batch[0]);
+	if (device_gen >= 8)
+		reloc[1].offset += sizeof(batch[0]);
+	reloc[1].presumed_offset = 0;
+	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
+	reloc[1].write_domain = 0;
+
+	memset(obj, 0, sizeof(obj));
+	obj[0].handle = dst;
+	obj[1].handle = src;
+	obj[2].handle = gem_create(fd, 4096);
+	gem_write(fd, obj[2].handle, 0, batch, i * sizeof(batch[0]));
+	obj[2].relocation_count = 2;
+	obj[2].relocs_ptr = to_user_pointer(reloc);
+
+	memset(&exec, 0, sizeof(exec));
+	exec.buffers_ptr = to_user_pointer(obj);
+	exec.buffer_count = 3;
+	exec.batch_len = i * sizeof(batch[0]);
+	exec.flags = gem_has_blt(fd) ? I915_EXEC_BLT : 0;
+
+	gem_execbuf(fd, &exec);
+	if (dst_tiling == I915_TILING_NONE)
+		gem_sync(fd, obj[2].handle);
+	gem_close(fd, obj[2].handle);
+}
+
+static uint32_t
+create_bo(int fd, uint32_t start_val)
+{
+	uint32_t bo, linear_bo;
 	uint32_t *linear;
-	uint32_t tiling = I915_TILING_X;
 	int i;
+	const uint32_t buf_size = 1024 * 1024;
 
-	bo = drm_intel_bo_alloc(bufmgr, "tiled bo", 1024 * 1024, 4096);
-	do_or_die(drm_intel_bo_set_tiling(bo, &tiling, width * 4));
-	igt_assert(tiling == I915_TILING_X);
-
-	linear_bo = drm_intel_bo_alloc(bufmgr, "linear src", 1024 * 1024, 4096);
+	bo = gem_create(fd, buf_size);
+	linear_bo = gem_create(fd, buf_size);
 
+	linear = (uint32_t *)gem_mmap__wc(fd, linear_bo, 0, buf_size,
+					  PROT_WRITE);
 	/* Fill the BO with dwords starting at start_val */
-	do_or_die(drm_intel_bo_map(linear_bo, 1));
-	linear = linear_bo->virtual;
-	for (i = 0; i < 1024 * 1024 / 4; i++)
+	for (i = 0; i < buf_size / 4; i++)
 		linear[i] = start_val++;
-	drm_intel_bo_unmap(linear_bo);
-
-	intel_copy_bo (batch, bo, linear_bo, width*height*4);
 
-	drm_intel_bo_unreference(linear_bo);
+	munmap(linear, buf_size);
+	copy(fd, bo, I915_TILING_X, linear_bo, I915_TILING_NONE);
 
+	gem_close(fd, linear_bo);
 	return bo;
 }
 
 static void
-check_bo(drm_intel_bo *bo, uint32_t val)
+check_bo(int fd, uint32_t bo, uint32_t val)
 {
-	drm_intel_bo *linear_bo;
+	uint32_t linear_bo;
 	uint32_t *linear;
 	int num_errors;
 	int i;
+	const uint32_t buf_size = 1024 * 1024;
 
-	linear_bo = drm_intel_bo_alloc(bufmgr, "linear dst", 1024 * 1024, 4096);
+	linear_bo = gem_create(fd, buf_size);
 
-	intel_copy_bo(batch, linear_bo, bo, width*height*4);
+	copy(fd, linear_bo, I915_TILING_NONE, bo, I915_TILING_X);
 
-	do_or_die(drm_intel_bo_map(linear_bo, 0));
-	linear = linear_bo->virtual;
+	linear = (uint32_t *)gem_mmap__wc(fd, linear_bo, 0, buf_size,
+					  PROT_READ);
 
 	num_errors = 0;
 	for (i = 0; i < 1024 * 1024 / 4; i++) {
@@ -112,31 +195,31 @@ check_bo(drm_intel_bo *bo, uint32_t val)
 		val++;
 	}
 	igt_assert_eq(num_errors, 0);
-	drm_intel_bo_unmap(linear_bo);
 
-	drm_intel_bo_unreference(linear_bo);
+	munmap(linear, buf_size);
+	gem_close(fd, linear_bo);
 }
 
-static void run_test(int count)
+static void run_test(int fd, int count)
 {
-	drm_intel_bo **bo;
+	uint32_t *bo;
 	uint32_t *bo_start_val;
 	uint32_t start = 0;
 	int i;
 
 	igt_debug("Using %d 1MiB buffers\n", count);
 
-	bo = malloc(sizeof(drm_intel_bo *)*count);
+	bo = malloc(sizeof(uint32_t)*count);
 	bo_start_val = malloc(sizeof(uint32_t)*count);
 
 	for (i = 0; i < count; i++) {
-		bo[i] = create_bo(start);
+		bo[i] = create_bo(fd, start);
 		bo_start_val[i] = start;
 		start += 1024 * 1024 / 4;
 	}
 	igt_info("Verifying initialisation...\n");
 	for (i = 0; i < count; i++)
-		check_bo(bo[i], bo_start_val[i]);
+		check_bo(fd, bo[i], bo_start_val[i]);
 
 	igt_info("Cyclic blits, forward...\n");
 	for (i = 0; i < count * 4; i++) {
@@ -146,15 +229,15 @@ static void run_test(int count)
 		if (src == dst)
 			continue;
 
-		intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
+		copy(fd, bo[dst], I915_TILING_X, bo[src], I915_TILING_X);
 		bo_start_val[dst] = bo_start_val[src];
 	}
 	for (i = 0; i < count; i++)
-		check_bo(bo[i], bo_start_val[i]);
+		check_bo(fd, bo[i], bo_start_val[i]);
 
 	if (igt_run_in_simulation()) {
 		for (i = 0; i < count; i++)
-			drm_intel_bo_unreference(bo[i]);
+			gem_close(fd, bo[i]);
 		free(bo_start_val);
 		free(bo);
 		return;
@@ -168,11 +251,11 @@ static void run_test(int count)
 		if (src == dst)
 			continue;
 
-		intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
+		copy(fd, bo[dst], I915_TILING_X, bo[src], I915_TILING_X);
 		bo_start_val[dst] = bo_start_val[src];
 	}
 	for (i = 0; i < count; i++)
-		check_bo(bo[i], bo_start_val[i]);
+		check_bo(fd, bo[i], bo_start_val[i]);
 
 	igt_info("Random blits...\n");
 	for (i = 0; i < count * 4; i++) {
@@ -182,12 +265,12 @@ static void run_test(int count)
 		if (src == dst)
 			continue;
 
-		intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
+		copy(fd, bo[dst], I915_TILING_X, bo[src], I915_TILING_X);
 		bo_start_val[dst] = bo_start_val[src];
 	}
 	for (i = 0; i < count; i++) {
-		check_bo(bo[i], bo_start_val[i]);
-		drm_intel_bo_unreference(bo[i]);
+		check_bo(fd, bo[i], bo_start_val[i]);
+		gem_close(fd, bo[i]);
 	}
 
 	free(bo_start_val);
@@ -204,15 +287,11 @@ igt_main
 		fd = drm_open_driver(DRIVER_INTEL);
 		igt_require_gem(fd);
 		gem_require_blitter(fd);
-
-		bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
-		drm_intel_bufmgr_gem_enable_reuse(bufmgr);
-		drm_intel_bufmgr_gem_set_vma_cache_size(bufmgr, 32);
-		batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
+		device_gen = intel_gen(intel_get_drm_devid(fd));
 	}
 
 	igt_subtest("basic")
-		run_test(2);
+		run_test(fd, 2);
 
 	igt_subtest("normal") {
 		uint64_t count;
@@ -224,7 +303,7 @@ igt_main
 		count += (count & 1) == 0;
 		intel_require_memory(count, 1024*1024, CHECK_RAM);
 
-		run_test(count);
+		run_test(fd, count);
 	}
 
 	igt_subtest("interruptible") {
@@ -238,14 +317,11 @@ igt_main
 		intel_require_memory(count, 1024*1024, CHECK_RAM);
 
 		igt_fork_signal_helper();
-		run_test(count);
+		run_test(fd, count);
 		igt_stop_signal_helper();
 	}
 
 	igt_fixture {
-		intel_batchbuffer_free(batch);
-		drm_intel_bufmgr_destroy(bufmgr);
-
 		close(fd);
 	}
 }
-- 
2.24.0

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib
  2019-11-13  4:31 [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib Vanshidhar Konda
@ 2019-11-13  4:35 ` Vanshidhar Konda
  2019-11-13  5:12 ` [igt-dev] ✓ Fi.CI.BAT: success for " Patchwork
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Vanshidhar Konda @ 2019-11-13  4:35 UTC (permalink / raw)
  To: igt-dev

On Tue, Nov 12, 2019 at 08:31:50PM -0800, Vanshidhar Konda wrote:
>Switch the test from using libdrm methods to using methods provided by
>the igt library. Like some of the other gem_tiled* tests, this test also
>creates the batch buffer used to do the blitter copies. Also, the test
>avoids calling GET/SET_TILING IOCTLs - something that will not be
>supported on Gen12+.
>
>Signed-off-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com>
>---
> tests/i915/gem_tiled_blits.c | 176 +++++++++++++++++++++++++----------
> 1 file changed, 126 insertions(+), 50 deletions(-)
>
>diff --git a/tests/i915/gem_tiled_blits.c b/tests/i915/gem_tiled_blits.c
>index df0699f3..b72ec600 100644
>--- a/tests/i915/gem_tiled_blits.c
>+++ b/tests/i915/gem_tiled_blits.c
>@@ -57,52 +57,135 @@
> IGT_TEST_DESCRIPTION("Test doing many tiled blits, with a working set larger"
> 		     " than the aperture size.");
>
>-static drm_intel_bufmgr *bufmgr;
>-struct intel_batchbuffer *batch;
> static int width = 512, height = 512;
>+int device_gen;
>
>-static drm_intel_bo *
>-create_bo(uint32_t start_val)
>+static void
>+copy(int fd, uint32_t dst, uint32_t dst_tiling,
>+     uint32_t src, uint32_t src_tiling)
> {
>-	drm_intel_bo *bo, *linear_bo;
>+	uint32_t batch[12];
>+	struct drm_i915_gem_relocation_entry reloc[2];
>+	struct drm_i915_gem_exec_object2 obj[3];
>+	struct drm_i915_gem_execbuffer2 exec;
>+	int src_pitch, dst_pitch;
>+	int tile_height, tile_width;
>+	int i = 0;
>+
>+	src_pitch = 4096;
>+	dst_pitch = 4096;
>+	tile_width = 1024;
>+	tile_height = width*height*4/4096;
>+
>+	batch[i++] = XY_SRC_COPY_BLT_CMD |
>+		  XY_SRC_COPY_BLT_WRITE_ALPHA |
>+		  XY_SRC_COPY_BLT_WRITE_RGB;
>+	if (device_gen >= 8)
>+		batch[i - 1] |= 8;
>+	else
>+		batch[i - 1] |= 6;
>+
>+	if (device_gen >= 4 && src_tiling != I915_TILING_NONE) {
>+		src_pitch /= 4;
>+		batch[i - 1] |= XY_SRC_COPY_BLT_SRC_TILED;
>+	}
>+	if (device_gen >= 4 && dst_tiling != I915_TILING_NONE) {
>+		dst_pitch /= 4;
>+		batch[i - 1] |= XY_SRC_COPY_BLT_DST_TILED;
>+	}
>+
>+	batch[i++] = (3 << 24) | /* 32 bits */
>+		  (0xcc << 16) | /* copy ROP */
>+		  dst_pitch;
>+	batch[i++] = 0; /* dst x1,y1 */
>+	batch[i++] = (tile_height << 16) | tile_width; /* dst x2,y2 */
>+	batch[i++] = 0; /* dst reloc */
>+	if (device_gen >= 8)
>+		batch[i++] = 0;
>+	batch[i++] = 0; /* src x1,y1 */
>+	batch[i++] = src_pitch;
>+	batch[i++] = 0; /* src reloc */
>+	if (device_gen >= 8)
>+		batch[i++] = 0;
>+	batch[i++] = MI_BATCH_BUFFER_END;
>+	batch[i++] = MI_NOOP;
>+
>+	memset(reloc, 0, sizeof(reloc));
>+	reloc[0].target_handle = dst;
>+	reloc[0].delta = 0;
>+	reloc[0].offset = 4 * sizeof(batch[0]);
>+	reloc[0].presumed_offset = 0;
>+	reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
>+	reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
>+
>+	reloc[1].target_handle = src;
>+	reloc[1].delta = 0;
>+	reloc[1].offset = 7 * sizeof(batch[0]);
>+	if (device_gen >= 8)
>+		reloc[1].offset += sizeof(batch[0]);
>+	reloc[1].presumed_offset = 0;
>+	reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
>+	reloc[1].write_domain = 0;
>+
>+	memset(obj, 0, sizeof(obj));
>+	obj[0].handle = dst;
>+	obj[1].handle = src;
>+	obj[2].handle = gem_create(fd, 4096);
>+	gem_write(fd, obj[2].handle, 0, batch, i * sizeof(batch[0]));
>+	obj[2].relocation_count = 2;
>+	obj[2].relocs_ptr = to_user_pointer(reloc);
>+
>+	memset(&exec, 0, sizeof(exec));
>+	exec.buffers_ptr = to_user_pointer(obj);
>+	exec.buffer_count = 3;
>+	exec.batch_len = i * sizeof(batch[0]);
>+	exec.flags = gem_has_blt(fd) ? I915_EXEC_BLT : 0;
>+
>+	gem_execbuf(fd, &exec);
>+	if (dst_tiling == I915_TILING_NONE)
>+		gem_sync(fd, obj[2].handle);

I noticed that the test doesn't work without adding this gem_sync. Also,
the execution time for the test has become 20x after these changes. Any
suggestions on how to improve are greatly appreciated.

Vanshi

>+	gem_close(fd, obj[2].handle);
>+}
>+
>+static uint32_t
>+create_bo(int fd, uint32_t start_val)
>+{
>+	uint32_t bo, linear_bo;
> 	uint32_t *linear;
>-	uint32_t tiling = I915_TILING_X;
> 	int i;
>+	const uint32_t buf_size = 1024 * 1024;
>
>-	bo = drm_intel_bo_alloc(bufmgr, "tiled bo", 1024 * 1024, 4096);
>-	do_or_die(drm_intel_bo_set_tiling(bo, &tiling, width * 4));
>-	igt_assert(tiling == I915_TILING_X);
>-
>-	linear_bo = drm_intel_bo_alloc(bufmgr, "linear src", 1024 * 1024, 4096);
>+	bo = gem_create(fd, buf_size);
>+	linear_bo = gem_create(fd, buf_size);
>
>+	linear = (uint32_t *)gem_mmap__wc(fd, linear_bo, 0, buf_size,
>+					  PROT_WRITE);
> 	/* Fill the BO with dwords starting at start_val */
>-	do_or_die(drm_intel_bo_map(linear_bo, 1));
>-	linear = linear_bo->virtual;
>-	for (i = 0; i < 1024 * 1024 / 4; i++)
>+	for (i = 0; i < buf_size / 4; i++)
> 		linear[i] = start_val++;
>-	drm_intel_bo_unmap(linear_bo);
>-
>-	intel_copy_bo (batch, bo, linear_bo, width*height*4);
>
>-	drm_intel_bo_unreference(linear_bo);
>+	munmap(linear, buf_size);
>+	copy(fd, bo, I915_TILING_X, linear_bo, I915_TILING_NONE);
>
>+	gem_close(fd, linear_bo);
> 	return bo;
> }
>
> static void
>-check_bo(drm_intel_bo *bo, uint32_t val)
>+check_bo(int fd, uint32_t bo, uint32_t val)
> {
>-	drm_intel_bo *linear_bo;
>+	uint32_t linear_bo;
> 	uint32_t *linear;
> 	int num_errors;
> 	int i;
>+	const uint32_t buf_size = 1024 * 1024;
>
>-	linear_bo = drm_intel_bo_alloc(bufmgr, "linear dst", 1024 * 1024, 4096);
>+	linear_bo = gem_create(fd, buf_size);
>
>-	intel_copy_bo(batch, linear_bo, bo, width*height*4);
>+	copy(fd, linear_bo, I915_TILING_NONE, bo, I915_TILING_X);
>
>-	do_or_die(drm_intel_bo_map(linear_bo, 0));
>-	linear = linear_bo->virtual;
>+	linear = (uint32_t *)gem_mmap__wc(fd, linear_bo, 0, buf_size,
>+					  PROT_READ);
>
> 	num_errors = 0;
> 	for (i = 0; i < 1024 * 1024 / 4; i++) {
>@@ -112,31 +195,31 @@ check_bo(drm_intel_bo *bo, uint32_t val)
> 		val++;
> 	}
> 	igt_assert_eq(num_errors, 0);
>-	drm_intel_bo_unmap(linear_bo);
>
>-	drm_intel_bo_unreference(linear_bo);
>+	munmap(linear, buf_size);
>+	gem_close(fd, linear_bo);
> }
>
>-static void run_test(int count)
>+static void run_test(int fd, int count)
> {
>-	drm_intel_bo **bo;
>+	uint32_t *bo;
> 	uint32_t *bo_start_val;
> 	uint32_t start = 0;
> 	int i;
>
> 	igt_debug("Using %d 1MiB buffers\n", count);
>
>-	bo = malloc(sizeof(drm_intel_bo *)*count);
>+	bo = malloc(sizeof(uint32_t)*count);
> 	bo_start_val = malloc(sizeof(uint32_t)*count);
>
> 	for (i = 0; i < count; i++) {
>-		bo[i] = create_bo(start);
>+		bo[i] = create_bo(fd, start);
> 		bo_start_val[i] = start;
> 		start += 1024 * 1024 / 4;
> 	}
> 	igt_info("Verifying initialisation...\n");
> 	for (i = 0; i < count; i++)
>-		check_bo(bo[i], bo_start_val[i]);
>+		check_bo(fd, bo[i], bo_start_val[i]);
>
> 	igt_info("Cyclic blits, forward...\n");
> 	for (i = 0; i < count * 4; i++) {
>@@ -146,15 +229,15 @@ static void run_test(int count)
> 		if (src == dst)
> 			continue;
>
>-		intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
>+		copy(fd, bo[dst], I915_TILING_X, bo[src], I915_TILING_X);
> 		bo_start_val[dst] = bo_start_val[src];
> 	}
> 	for (i = 0; i < count; i++)
>-		check_bo(bo[i], bo_start_val[i]);
>+		check_bo(fd, bo[i], bo_start_val[i]);
>
> 	if (igt_run_in_simulation()) {
> 		for (i = 0; i < count; i++)
>-			drm_intel_bo_unreference(bo[i]);
>+			gem_close(fd, bo[i]);
> 		free(bo_start_val);
> 		free(bo);
> 		return;
>@@ -168,11 +251,11 @@ static void run_test(int count)
> 		if (src == dst)
> 			continue;
>
>-		intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
>+		copy(fd, bo[dst], I915_TILING_X, bo[src], I915_TILING_X);
> 		bo_start_val[dst] = bo_start_val[src];
> 	}
> 	for (i = 0; i < count; i++)
>-		check_bo(bo[i], bo_start_val[i]);
>+		check_bo(fd, bo[i], bo_start_val[i]);
>
> 	igt_info("Random blits...\n");
> 	for (i = 0; i < count * 4; i++) {
>@@ -182,12 +265,12 @@ static void run_test(int count)
> 		if (src == dst)
> 			continue;
>
>-		intel_copy_bo(batch, bo[dst], bo[src], width*height*4);
>+		copy(fd, bo[dst], I915_TILING_X, bo[src], I915_TILING_X);
> 		bo_start_val[dst] = bo_start_val[src];
> 	}
> 	for (i = 0; i < count; i++) {
>-		check_bo(bo[i], bo_start_val[i]);
>-		drm_intel_bo_unreference(bo[i]);
>+		check_bo(fd, bo[i], bo_start_val[i]);
>+		gem_close(fd, bo[i]);
> 	}
>
> 	free(bo_start_val);
>@@ -204,15 +287,11 @@ igt_main
> 		fd = drm_open_driver(DRIVER_INTEL);
> 		igt_require_gem(fd);
> 		gem_require_blitter(fd);
>-
>-		bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
>-		drm_intel_bufmgr_gem_enable_reuse(bufmgr);
>-		drm_intel_bufmgr_gem_set_vma_cache_size(bufmgr, 32);
>-		batch = intel_batchbuffer_alloc(bufmgr, intel_get_drm_devid(fd));
>+		device_gen = intel_gen(intel_get_drm_devid(fd));
> 	}
>
> 	igt_subtest("basic")
>-		run_test(2);
>+		run_test(fd, 2);
>
> 	igt_subtest("normal") {
> 		uint64_t count;
>@@ -224,7 +303,7 @@ igt_main
> 		count += (count & 1) == 0;
> 		intel_require_memory(count, 1024*1024, CHECK_RAM);
>
>-		run_test(count);
>+		run_test(fd, count);
> 	}
>
> 	igt_subtest("interruptible") {
>@@ -238,14 +317,11 @@ igt_main
> 		intel_require_memory(count, 1024*1024, CHECK_RAM);
>
> 		igt_fork_signal_helper();
>-		run_test(count);
>+		run_test(fd, count);
> 		igt_stop_signal_helper();
> 	}
>
> 	igt_fixture {
>-		intel_batchbuffer_free(batch);
>-		drm_intel_bufmgr_destroy(bufmgr);
>-
> 		close(fd);
> 	}
> }
>-- 
>2.24.0
>
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [igt-dev] ✓ Fi.CI.BAT: success for i915/gem_tiled_blits: Switch from libdrm methods to igt lib
  2019-11-13  4:31 [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib Vanshidhar Konda
  2019-11-13  4:35 ` Vanshidhar Konda
@ 2019-11-13  5:12 ` Patchwork
  2019-11-13  9:51 ` [igt-dev] [RFC PATCH] " Chris Wilson
  2019-11-13 16:13 ` [igt-dev] ✓ Fi.CI.IGT: success for " Patchwork
  3 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2019-11-13  5:12 UTC (permalink / raw)
  To: Vanshidhar Konda; +Cc: igt-dev

== Series Details ==

Series: i915/gem_tiled_blits: Switch from libdrm methods to igt lib
URL   : https://patchwork.freedesktop.org/series/69389/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7324 -> IGTPW_3689
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/index.html

Known issues
------------

  Here are the changes found in IGTPW_3689 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@live_gem_contexts:
    - fi-cfl-8700k:       [PASS][1] -> [INCOMPLETE][2] ([fdo#111700])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-hsw-peppy:       [PASS][3] -> [DMESG-WARN][4] ([fdo#102614])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/fi-hsw-peppy/igt@kms_frontbuffer_tracking@basic.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/fi-hsw-peppy/igt@kms_frontbuffer_tracking@basic.html

  
#### Possible fixes ####

  * igt@gem_exec_suspend@basic-s4-devices:
    - fi-kbl-7500u:       [DMESG-WARN][5] ([fdo#107139]) -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/fi-kbl-7500u/igt@gem_exec_suspend@basic-s4-devices.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/fi-kbl-7500u/igt@gem_exec_suspend@basic-s4-devices.html

  * igt@kms_busy@basic-flip-pipe-a:
    - fi-icl-u2:          [INCOMPLETE][7] ([fdo#107713]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/fi-icl-u2/igt@kms_busy@basic-flip-pipe-a.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/fi-icl-u2/igt@kms_busy@basic-flip-pipe-a.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-icl-guc:         [FAIL][9] ([fdo#103167]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/fi-icl-guc/igt@kms_frontbuffer_tracking@basic.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/fi-icl-guc/igt@kms_frontbuffer_tracking@basic.html

  
  [fdo#102614]: https://bugs.freedesktop.org/show_bug.cgi?id=102614
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#107139]: https://bugs.freedesktop.org/show_bug.cgi?id=107139
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#111700]: https://bugs.freedesktop.org/show_bug.cgi?id=111700


Participating hosts (51 -> 46)
------------------------------

  Additional (1): fi-skl-6770hq 
  Missing    (6): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * IGT: IGT_5274 -> IGTPW_3689

  CI-20190529: 20190529
  CI_DRM_7324: dd07789205270dd69eca30ef7d123b5d2322d7a8 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_3689: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/index.html
  IGT_5274: 1345346c97c630563aae08cc2f1276c70b90378d @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/index.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib
  2019-11-13  4:31 [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib Vanshidhar Konda
  2019-11-13  4:35 ` Vanshidhar Konda
  2019-11-13  5:12 ` [igt-dev] ✓ Fi.CI.BAT: success for " Patchwork
@ 2019-11-13  9:51 ` Chris Wilson
  2019-11-13 18:15   ` Vanshidhar Konda
  2019-11-13 16:13 ` [igt-dev] ✓ Fi.CI.IGT: success for " Patchwork
  3 siblings, 1 reply; 7+ messages in thread
From: Chris Wilson @ 2019-11-13  9:51 UTC (permalink / raw)
  To: Vanshidhar Konda, igt-dev

Quoting Vanshidhar Konda (2019-11-13 04:31:50)
> Switch the test from using libdrm methods to using methods provided by
> the igt library. Like some of the other gem_tiled* tests, this test also
> creates the batch buffer used to do the blitter copies. Also, the test
> avoids calling GET/SET_TILING IOCTLs - something that will not be
> supported on Gen12+.
> 
> Signed-off-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com>
> ---
>  tests/i915/gem_tiled_blits.c | 176 +++++++++++++++++++++++++----------
>  1 file changed, 126 insertions(+), 50 deletions(-)
> 
> diff --git a/tests/i915/gem_tiled_blits.c b/tests/i915/gem_tiled_blits.c
> index df0699f3..b72ec600 100644
> --- a/tests/i915/gem_tiled_blits.c
> +++ b/tests/i915/gem_tiled_blits.c
> @@ -57,52 +57,135 @@
>  IGT_TEST_DESCRIPTION("Test doing many tiled blits, with a working set larger"
>                      " than the aperture size.");
>  
> -static drm_intel_bufmgr *bufmgr;
> -struct intel_batchbuffer *batch;
>  static int width = 512, height = 512;
> +int device_gen;
>  
> -static drm_intel_bo *
> -create_bo(uint32_t start_val)
> +static void
> +copy(int fd, uint32_t dst, uint32_t dst_tiling,
> +     uint32_t src, uint32_t src_tiling)
>  {
> -       drm_intel_bo *bo, *linear_bo;
> +       uint32_t batch[12];

Be considerate and think in cachelines.

> +       struct drm_i915_gem_relocation_entry reloc[2];
> +       struct drm_i915_gem_exec_object2 obj[3];
> +       struct drm_i915_gem_execbuffer2 exec;
> +       int src_pitch, dst_pitch;
> +       int tile_height, tile_width;
> +       int i = 0;
> +
> +       src_pitch = 4096;
> +       dst_pitch = 4096;
> +       tile_width = 1024;
> +       tile_height = width*height*4/4096;
> +
> +       batch[i++] = XY_SRC_COPY_BLT_CMD |
> +                 XY_SRC_COPY_BLT_WRITE_ALPHA |
> +                 XY_SRC_COPY_BLT_WRITE_RGB;
> +       if (device_gen >= 8)
> +               batch[i - 1] |= 8;
> +       else
> +               batch[i - 1] |= 6;
> +
> +       if (device_gen >= 4 && src_tiling != I915_TILING_NONE) {
> +               src_pitch /= 4;
> +               batch[i - 1] |= XY_SRC_COPY_BLT_SRC_TILED;
> +       }
> +       if (device_gen >= 4 && dst_tiling != I915_TILING_NONE) {
> +               dst_pitch /= 4;
> +               batch[i - 1] |= XY_SRC_COPY_BLT_DST_TILED;
> +       }
> +
> +       batch[i++] = (3 << 24) | /* 32 bits */
> +                 (0xcc << 16) | /* copy ROP */
> +                 dst_pitch;
> +       batch[i++] = 0; /* dst x1,y1 */
> +       batch[i++] = (tile_height << 16) | tile_width; /* dst x2,y2 */
> +       batch[i++] = 0; /* dst reloc */
> +       if (device_gen >= 8)
> +               batch[i++] = 0;
> +       batch[i++] = 0; /* src x1,y1 */
> +       batch[i++] = src_pitch;
> +       batch[i++] = 0; /* src reloc */
> +       if (device_gen >= 8)
> +               batch[i++] = 0;
> +       batch[i++] = MI_BATCH_BUFFER_END;
> +       batch[i++] = MI_NOOP;
> +
> +       memset(reloc, 0, sizeof(reloc));
> +       reloc[0].target_handle = dst;
> +       reloc[0].delta = 0;
> +       reloc[0].offset = 4 * sizeof(batch[0]);
> +       reloc[0].presumed_offset = 0;
> +       reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
> +       reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
> +
> +       reloc[1].target_handle = src;
> +       reloc[1].delta = 0;
> +       reloc[1].offset = 7 * sizeof(batch[0]);
> +       if (device_gen >= 8)
> +               reloc[1].offset += sizeof(batch[0]);
> +       reloc[1].presumed_offset = 0;
> +       reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
> +       reloc[1].write_domain = 0;

Hmm. Since the batch is fresh every time, relocations are not a gpu
stall. So horrible but not a test breaker. (Stalling for relocations
would very effectively nerf the test.)

> +       memset(obj, 0, sizeof(obj));
> +       obj[0].handle = dst;
> +       obj[1].handle = src;
> +       obj[2].handle = gem_create(fd, 4096);
> +       gem_write(fd, obj[2].handle, 0, batch, i * sizeof(batch[0]));
> +       obj[2].relocation_count = 2;
> +       obj[2].relocs_ptr = to_user_pointer(reloc);
> +
> +       memset(&exec, 0, sizeof(exec));
> +       exec.buffers_ptr = to_user_pointer(obj);
> +       exec.buffer_count = 3;
> +       exec.batch_len = i * sizeof(batch[0]);
> +       exec.flags = gem_has_blt(fd) ? I915_EXEC_BLT : 0;
> +
> +       gem_execbuf(fd, &exec);
> +       if (dst_tiling == I915_TILING_NONE)
> +               gem_sync(fd, obj[2].handle);

Why is there a sync here? What bug are you trying to hide?

> +       gem_close(fd, obj[2].handle);
> +}
> +
> +static uint32_t
> +create_bo(int fd, uint32_t start_val)
> +{
> +       uint32_t bo, linear_bo;
>         uint32_t *linear;
> -       uint32_t tiling = I915_TILING_X;
>         int i;
> +       const uint32_t buf_size = 1024 * 1024;
>  
> -       bo = drm_intel_bo_alloc(bufmgr, "tiled bo", 1024 * 1024, 4096);
> -       do_or_die(drm_intel_bo_set_tiling(bo, &tiling, width * 4));
> -       igt_assert(tiling == I915_TILING_X);
> -
> -       linear_bo = drm_intel_bo_alloc(bufmgr, "linear src", 1024 * 1024, 4096);
> +       bo = gem_create(fd, buf_size);
> +       linear_bo = gem_create(fd, buf_size);
>  
> +       linear = (uint32_t *)gem_mmap__wc(fd, linear_bo, 0, buf_size,
> +                                         PROT_WRITE);

gem_mmap__wc() is not universal. So you need a fallback to __gtt. Or a
fallback from __gtt.

>         /* Fill the BO with dwords starting at start_val */
> -       do_or_die(drm_intel_bo_map(linear_bo, 1));
> -       linear = linear_bo->virtual;
> -       for (i = 0; i < 1024 * 1024 / 4; i++)
> +       for (i = 0; i < buf_size / 4; i++)
>                 linear[i] = start_val++;
> -       drm_intel_bo_unmap(linear_bo);
> -
> -       intel_copy_bo (batch, bo, linear_bo, width*height*4);
>  
> -       drm_intel_bo_unreference(linear_bo);
> +       munmap(linear, buf_size);
> +       copy(fd, bo, I915_TILING_X, linear_bo, I915_TILING_NONE);
>  
> +       gem_close(fd, linear_bo);
>         return bo;
>  }
>  
>  static void
> -check_bo(drm_intel_bo *bo, uint32_t val)
> +check_bo(int fd, uint32_t bo, uint32_t val)
>  {
> -       drm_intel_bo *linear_bo;
> +       uint32_t linear_bo;
>         uint32_t *linear;
>         int num_errors;
>         int i;
> +       const uint32_t buf_size = 1024 * 1024;
>  
> -       linear_bo = drm_intel_bo_alloc(bufmgr, "linear dst", 1024 * 1024, 4096);
> +       linear_bo = gem_create(fd, buf_size);
>  
> -       intel_copy_bo(batch, linear_bo, bo, width*height*4);
> +       copy(fd, linear_bo, I915_TILING_NONE, bo, I915_TILING_X);
>  
> -       do_or_die(drm_intel_bo_map(linear_bo, 0));
> -       linear = linear_bo->virtual;
> +       linear = (uint32_t *)gem_mmap__wc(fd, linear_bo, 0, buf_size,
> +                                         PROT_READ);

And this is the error above you were trying to hide.

Please do not even think about reading a few megabytes from WC.
linear = mmap__wb(); with a fallback to copy to wb.
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [igt-dev] ✓ Fi.CI.IGT: success for i915/gem_tiled_blits: Switch from libdrm methods to igt lib
  2019-11-13  4:31 [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib Vanshidhar Konda
                   ` (2 preceding siblings ...)
  2019-11-13  9:51 ` [igt-dev] [RFC PATCH] " Chris Wilson
@ 2019-11-13 16:13 ` Patchwork
  3 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2019-11-13 16:13 UTC (permalink / raw)
  To: Vanshidhar Konda; +Cc: igt-dev

== Series Details ==

Series: i915/gem_tiled_blits: Switch from libdrm methods to igt lib
URL   : https://patchwork.freedesktop.org/series/69389/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7324_full -> IGTPW_3689_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/index.html

Known issues
------------

  Here are the changes found in IGTPW_3689_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@bcs0-s3:
    - shard-apl:          [PASS][1] -> [DMESG-WARN][2] ([fdo#108566]) +3 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-apl6/igt@gem_ctx_isolation@bcs0-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-apl4/igt@gem_ctx_isolation@bcs0-s3.html

  * igt@gem_ctx_isolation@vcs0-s3:
    - shard-tglb:         [PASS][3] -> [INCOMPLETE][4] ([fdo#111832])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb9/igt@gem_ctx_isolation@vcs0-s3.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb1/igt@gem_ctx_isolation@vcs0-s3.html

  * igt@gem_ctx_isolation@vcs1-reset:
    - shard-iclb:         [PASS][5] -> [SKIP][6] ([fdo#109276] / [fdo#112080]) +1 similar issue
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb1/igt@gem_ctx_isolation@vcs1-reset.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb3/igt@gem_ctx_isolation@vcs1-reset.html

  * igt@gem_ctx_shared@q-smoketest-blt:
    - shard-tglb:         [PASS][7] -> [INCOMPLETE][8] ([fdo#111735])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb9/igt@gem_ctx_shared@q-smoketest-blt.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb6/igt@gem_ctx_shared@q-smoketest-blt.html

  * igt@gem_eio@reset-stress:
    - shard-snb:          [PASS][9] -> [FAIL][10] ([fdo#109661])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-snb4/igt@gem_eio@reset-stress.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-snb1/igt@gem_eio@reset-stress.html

  * igt@gem_eio@suspend:
    - shard-tglb:         [PASS][11] -> [INCOMPLETE][12] ([fdo#111850]) +1 similar issue
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb9/igt@gem_eio@suspend.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb1/igt@gem_eio@suspend.html

  * igt@gem_exec_nop@basic-parallel:
    - shard-tglb:         [PASS][13] -> [INCOMPLETE][14] ([fdo#111747])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb2/igt@gem_exec_nop@basic-parallel.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb3/igt@gem_exec_nop@basic-parallel.html

  * igt@gem_exec_parallel@vcs1-fds:
    - shard-iclb:         [PASS][15] -> [SKIP][16] ([fdo#112080]) +14 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb4/igt@gem_exec_parallel@vcs1-fds.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb8/igt@gem_exec_parallel@vcs1-fds.html

  * igt@gem_exec_schedule@independent-bsd2:
    - shard-iclb:         [PASS][17] -> [SKIP][18] ([fdo#109276]) +12 similar issues
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb4/igt@gem_exec_schedule@independent-bsd2.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb5/igt@gem_exec_schedule@independent-bsd2.html

  * igt@gem_exec_schedule@preempt-bsd:
    - shard-iclb:         [PASS][19] -> [SKIP][20] ([fdo#112146]) +4 similar issues
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb7/igt@gem_exec_schedule@preempt-bsd.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb2/igt@gem_exec_schedule@preempt-bsd.html

  * igt@gem_persistent_relocs@forked-thrashing:
    - shard-tglb:         [PASS][21] -> [TIMEOUT][22] ([fdo#112068 ] / [fdo#112126])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb2/igt@gem_persistent_relocs@forked-thrashing.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb2/igt@gem_persistent_relocs@forked-thrashing.html

  * igt@gem_userptr_blits@map-fixed-invalidate-busy:
    - shard-snb:          [PASS][23] -> [DMESG-WARN][24] ([fdo#111870]) +3 similar issues
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-snb6/igt@gem_userptr_blits@map-fixed-invalidate-busy.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-snb1/igt@gem_userptr_blits@map-fixed-invalidate-busy.html

  * igt@gem_userptr_blits@map-fixed-invalidate-overlap-busy:
    - shard-hsw:          [PASS][25] -> [DMESG-WARN][26] ([fdo#111870]) +2 similar issues
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-hsw4/igt@gem_userptr_blits@map-fixed-invalidate-overlap-busy.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-hsw5/igt@gem_userptr_blits@map-fixed-invalidate-overlap-busy.html

  * igt@i915_selftest@live_execlists:
    - shard-iclb:         [PASS][27] -> [INCOMPLETE][28] ([fdo#107713])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb8/igt@i915_selftest@live_execlists.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb8/igt@i915_selftest@live_execlists.html

  * igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack:
    - shard-tglb:         [PASS][29] -> [FAIL][30] ([fdo#103167]) +2 similar issues
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb4/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb9/igt@kms_frontbuffer_tracking@fbc-1p-indfb-fliptrack.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render:
    - shard-iclb:         [PASS][31] -> [FAIL][32] ([fdo#103167]) +4 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-kbl:          [PASS][33] -> [DMESG-WARN][34] ([fdo#108566]) +2 similar issues
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-kbl3/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-kbl7/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_plane_lowres@pipe-a-tiling-y:
    - shard-iclb:         [PASS][35] -> [FAIL][36] ([fdo#103166])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb3/igt@kms_plane_lowres@pipe-a-tiling-y.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb6/igt@kms_plane_lowres@pipe-a-tiling-y.html

  * igt@kms_psr@no_drrs:
    - shard-iclb:         [PASS][37] -> [FAIL][38] ([fdo#108341])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb2/igt@kms_psr@no_drrs.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb1/igt@kms_psr@no_drrs.html

  * igt@kms_psr@psr2_primary_mmap_cpu:
    - shard-iclb:         [PASS][39] -> [SKIP][40] ([fdo#109441]) +1 similar issue
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb2/igt@kms_psr@psr2_primary_mmap_cpu.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb5/igt@kms_psr@psr2_primary_mmap_cpu.html

  * igt@kms_psr@suspend:
    - shard-tglb:         [PASS][41] -> [INCOMPLETE][42] ([fdo#111832] / [fdo#111850]) +2 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb9/igt@kms_psr@suspend.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb7/igt@kms_psr@suspend.html

  
#### Possible fixes ####

  * igt@gem_ctx_persistence@vcs1-mixed-process:
    - shard-iclb:         [SKIP][43] ([fdo#109276] / [fdo#112080]) -> [PASS][44] +2 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb5/igt@gem_ctx_persistence@vcs1-mixed-process.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb1/igt@gem_ctx_persistence@vcs1-mixed-process.html

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [SKIP][45] ([fdo#110841]) -> [PASS][46]
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb1/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb3/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_eio@reset-stress:
    - shard-tglb:         [INCOMPLETE][47] ([fdo#111867]) -> [PASS][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb9/igt@gem_eio@reset-stress.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb9/igt@gem_eio@reset-stress.html

  * igt@gem_exec_schedule@preempt-other-chain-bsd:
    - shard-iclb:         [SKIP][49] ([fdo#112146]) -> [PASS][50] +8 similar issues
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb4/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb7/igt@gem_exec_schedule@preempt-other-chain-bsd.html

  * igt@gem_exec_schedule@preempt-queue-contexts-bsd1:
    - shard-tglb:         [INCOMPLETE][51] ([fdo#111606] / [fdo#111677]) -> [PASS][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb6/igt@gem_exec_schedule@preempt-queue-contexts-bsd1.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb3/igt@gem_exec_schedule@preempt-queue-contexts-bsd1.html

  * igt@gem_exec_suspend@basic-s3:
    - shard-kbl:          [DMESG-WARN][53] ([fdo#108566]) -> [PASS][54] +3 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-kbl7/igt@gem_exec_suspend@basic-s3.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-kbl6/igt@gem_exec_suspend@basic-s3.html
    - shard-tglb:         [INCOMPLETE][55] ([fdo#111736] / [fdo#111850]) -> [PASS][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb8/igt@gem_exec_suspend@basic-s3.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb7/igt@gem_exec_suspend@basic-s3.html

  * igt@gem_persistent_relocs@forked-interruptible-thrashing:
    - shard-iclb:         [FAIL][57] ([fdo#112037]) -> [PASS][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb3/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb4/igt@gem_persistent_relocs@forked-interruptible-thrashing.html

  * igt@gem_userptr_blits@sync-unmap-after-close:
    - shard-hsw:          [DMESG-WARN][59] ([fdo#111870]) -> [PASS][60] +2 similar issues
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-hsw5/igt@gem_userptr_blits@sync-unmap-after-close.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-hsw6/igt@gem_userptr_blits@sync-unmap-after-close.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-snb:          [DMESG-WARN][61] ([fdo#111870]) -> [PASS][62] +2 similar issues
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-snb4/igt@gem_userptr_blits@sync-unmap-cycles.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-snb7/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@gem_workarounds@suspend-resume:
    - shard-tglb:         [INCOMPLETE][63] ([fdo#111832] / [fdo#111850]) -> [PASS][64] +4 similar issues
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb4/igt@gem_workarounds@suspend-resume.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb2/igt@gem_workarounds@suspend-resume.html

  * igt@i915_pm_dc@dc6-psr:
    - shard-iclb:         [FAIL][65] ([fdo#111830 ]) -> [PASS][66]
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb7/igt@i915_pm_dc@dc6-psr.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb5/igt@i915_pm_dc@dc6-psr.html

  * igt@kms_cursor_crc@pipe-a-cursor-128x128-sliding:
    - shard-apl:          [FAIL][67] ([fdo#103232]) -> [PASS][68]
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-apl3/igt@kms_cursor_crc@pipe-a-cursor-128x128-sliding.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-apl3/igt@kms_cursor_crc@pipe-a-cursor-128x128-sliding.html
    - shard-kbl:          [FAIL][69] ([fdo#103232]) -> [PASS][70]
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-kbl2/igt@kms_cursor_crc@pipe-a-cursor-128x128-sliding.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-kbl4/igt@kms_cursor_crc@pipe-a-cursor-128x128-sliding.html

  * igt@kms_fbcon_fbt@fbc-suspend:
    - shard-tglb:         [INCOMPLETE][71] ([fdo#111747] / [fdo#111832] / [fdo#111850]) -> [PASS][72]
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb3/igt@kms_fbcon_fbt@fbc-suspend.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb9/igt@kms_fbcon_fbt@fbc-suspend.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-pwrite:
    - shard-iclb:         [FAIL][73] ([fdo#103167]) -> [PASS][74] +6 similar issues
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb6/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-pwrite.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-pwrite.html

  * igt@kms_frontbuffer_tracking@fbc-suspend:
    - shard-tglb:         [INCOMPLETE][75] ([fdo#111832] / [fdo#111850] / [fdo#111884]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb8/igt@kms_frontbuffer_tracking@fbc-suspend.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb9/igt@kms_frontbuffer_tracking@fbc-suspend.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-indfb-plflip-blt:
    - shard-tglb:         [FAIL][77] ([fdo#103167]) -> [PASS][78] +3 similar issues
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb3/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-indfb-plflip-blt.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb2/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-indfb-plflip-blt.html

  * igt@kms_psr@psr2_primary_page_flip:
    - shard-iclb:         [SKIP][79] ([fdo#109441]) -> [PASS][80] +1 similar issue
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb4/igt@kms_psr@psr2_primary_page_flip.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb2/igt@kms_psr@psr2_primary_page_flip.html

  * igt@perf_pmu@busy-vcs1:
    - shard-iclb:         [SKIP][81] ([fdo#112080]) -> [PASS][82] +5 similar issues
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb7/igt@perf_pmu@busy-vcs1.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb4/igt@perf_pmu@busy-vcs1.html

  * igt@prime_vgem@fence-wait-bsd2:
    - shard-iclb:         [SKIP][83] ([fdo#109276]) -> [PASS][84] +21 similar issues
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb7/igt@prime_vgem@fence-wait-bsd2.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb2/igt@prime_vgem@fence-wait-bsd2.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv-switch:
    - shard-iclb:         [FAIL][85] ([fdo#111329]) -> [SKIP][86] ([fdo#109276] / [fdo#112080])
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb2/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb5/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html

  * igt@gem_ctx_isolation@vcs2-reset:
    - shard-tglb:         [SKIP][87] ([fdo#112080]) -> [SKIP][88] ([fdo#111912] / [fdo#112080])
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb9/igt@gem_ctx_isolation@vcs2-reset.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb3/igt@gem_ctx_isolation@vcs2-reset.html

  * igt@gem_exec_schedule@deep-bsd1:
    - shard-tglb:         [FAIL][89] ([fdo#111646]) -> [INCOMPLETE][90] ([fdo#111671])
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb3/igt@gem_exec_schedule@deep-bsd1.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb9/igt@gem_exec_schedule@deep-bsd1.html

  * igt@gem_mocs_settings@mocs-rc6-bsd2:
    - shard-iclb:         [SKIP][91] ([fdo#109276]) -> [FAIL][92] ([fdo#111330])
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb6/igt@gem_mocs_settings@mocs-rc6-bsd2.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb4/igt@gem_mocs_settings@mocs-rc6-bsd2.html

  * igt@kms_cursor_crc@pipe-b-cursor-suspend:
    - shard-tglb:         [FAIL][93] ([fdo#111703]) -> [INCOMPLETE][94] ([fdo#111832] / [fdo#111850])
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-tglb5/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-tglb2/igt@kms_cursor_crc@pipe-b-cursor-suspend.html

  * igt@kms_dp_dsc@basic-dsc-enable-edp:
    - shard-iclb:         [SKIP][95] ([fdo#109349]) -> [DMESG-WARN][96] ([fdo#107724])
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7324/shard-iclb4/igt@kms_dp_dsc@basic-dsc-enable-edp.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html

  
  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108341]: https://bugs.freedesktop.org/show_bug.cgi?id=108341
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109661]: https://bugs.freedesktop.org/show_bug.cgi?id=109661
  [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
  [fdo#111329]: https://bugs.freedesktop.org/show_bug.cgi?id=111329
  [fdo#111330]: https://bugs.freedesktop.org/show_bug.cgi?id=111330
  [fdo#111606]: https://bugs.freedesktop.org/show_bug.cgi?id=111606
  [fdo#111646]: https://bugs.freedesktop.org/show_bug.cgi?id=111646
  [fdo#111671]: https://bugs.freedesktop.org/show_bug.cgi?id=111671
  [fdo#111677]: https://bugs.freedesktop.org/show_bug.cgi?id=111677
  [fdo#111703]: https://bugs.freedesktop.org/show_bug.cgi?id=111703
  [fdo#111735]: https://bugs.freedesktop.org/show_bug.cgi?id=111735
  [fdo#111736]: https://bugs.freedesktop.org/show_bug.cgi?id=111736
  [fdo#111747]: https://bugs.freedesktop.org/show_bug.cgi?id=111747
  [fdo#111830 ]: https://bugs.freedesktop.org/show_bug.cgi?id=111830 
  [fdo#111832]: https://bugs.freedesktop.org/show_bug.cgi?id=111832
  [fdo#111850]: https://bugs.freedesktop.org/show_bug.cgi?id=111850
  [fdo#111867]: https://bugs.freedesktop.org/show_bug.cgi?id=111867
  [fdo#111870]: https://bugs.freedesktop.org/show_bug.cgi?id=111870
  [fdo#111884]: https://bugs.freedesktop.org/show_bug.cgi?id=111884
  [fdo#111912]: https://bugs.freedesktop.org/show_bug.cgi?id=111912
  [fdo#112037]: https://bugs.freedesktop.org/show_bug.cgi?id=112037
  [fdo#112068 ]: https://bugs.freedesktop.org/show_bug.cgi?id=112068 
  [fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
  [fdo#112126]: https://bugs.freedesktop.org/show_bug

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3689/index.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib
  2019-11-13  9:51 ` [igt-dev] [RFC PATCH] " Chris Wilson
@ 2019-11-13 18:15   ` Vanshidhar Konda
  2019-11-13 19:58     ` Chris Wilson
  0 siblings, 1 reply; 7+ messages in thread
From: Vanshidhar Konda @ 2019-11-13 18:15 UTC (permalink / raw)
  To: Chris Wilson; +Cc: igt-dev

On Wed, Nov 13, 2019 at 09:51:01AM +0000, Chris Wilson wrote:
>Quoting Vanshidhar Konda (2019-11-13 04:31:50)
>> Switch the test from using libdrm methods to using methods provided by
>> the igt library. Like some of the other gem_tiled* tests, this test also
>> creates the batch buffer used to do the blitter copies. Also, the test
>> avoids calling GET/SET_TILING IOCTLs - something that will not be
>> supported on Gen12+.
>>
>> Signed-off-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com>
>> ---
>>  tests/i915/gem_tiled_blits.c | 176 +++++++++++++++++++++++++----------
>>  1 file changed, 126 insertions(+), 50 deletions(-)
>>
>> diff --git a/tests/i915/gem_tiled_blits.c b/tests/i915/gem_tiled_blits.c
>> index df0699f3..b72ec600 100644
>> --- a/tests/i915/gem_tiled_blits.c
>> +++ b/tests/i915/gem_tiled_blits.c
>> @@ -57,52 +57,135 @@
>>  IGT_TEST_DESCRIPTION("Test doing many tiled blits, with a working set larger"
>>                      " than the aperture size.");
>>
>> -static drm_intel_bufmgr *bufmgr;
>> -struct intel_batchbuffer *batch;
>>  static int width = 512, height = 512;
>> +int device_gen;
>>
>> -static drm_intel_bo *
>> -create_bo(uint32_t start_val)
>> +static void
>> +copy(int fd, uint32_t dst, uint32_t dst_tiling,
>> +     uint32_t src, uint32_t src_tiling)
>>  {
>> -       drm_intel_bo *bo, *linear_bo;
>> +       uint32_t batch[12];
>
>Be considerate and think in cachelines.

Thanks for pointing that out. I'll keep it in mind.

>
>> +       struct drm_i915_gem_relocation_entry reloc[2];
>> +       struct drm_i915_gem_exec_object2 obj[3];
>> +       struct drm_i915_gem_execbuffer2 exec;
>> +       int src_pitch, dst_pitch;
>> +       int tile_height, tile_width;
>> +       int i = 0;
>> +
>> +       src_pitch = 4096;
>> +       dst_pitch = 4096;
>> +       tile_width = 1024;
>> +       tile_height = width*height*4/4096;
>> +
>> +       batch[i++] = XY_SRC_COPY_BLT_CMD |
>> +                 XY_SRC_COPY_BLT_WRITE_ALPHA |
>> +                 XY_SRC_COPY_BLT_WRITE_RGB;
>> +       if (device_gen >= 8)
>> +               batch[i - 1] |= 8;
>> +       else
>> +               batch[i - 1] |= 6;
>> +
>> +       if (device_gen >= 4 && src_tiling != I915_TILING_NONE) {
>> +               src_pitch /= 4;
>> +               batch[i - 1] |= XY_SRC_COPY_BLT_SRC_TILED;
>> +       }
>> +       if (device_gen >= 4 && dst_tiling != I915_TILING_NONE) {
>> +               dst_pitch /= 4;
>> +               batch[i - 1] |= XY_SRC_COPY_BLT_DST_TILED;
>> +       }
>> +
>> +       batch[i++] = (3 << 24) | /* 32 bits */
>> +                 (0xcc << 16) | /* copy ROP */
>> +                 dst_pitch;
>> +       batch[i++] = 0; /* dst x1,y1 */
>> +       batch[i++] = (tile_height << 16) | tile_width; /* dst x2,y2 */
>> +       batch[i++] = 0; /* dst reloc */
>> +       if (device_gen >= 8)
>> +               batch[i++] = 0;
>> +       batch[i++] = 0; /* src x1,y1 */
>> +       batch[i++] = src_pitch;
>> +       batch[i++] = 0; /* src reloc */
>> +       if (device_gen >= 8)
>> +               batch[i++] = 0;
>> +       batch[i++] = MI_BATCH_BUFFER_END;
>> +       batch[i++] = MI_NOOP;
>> +
>> +       memset(reloc, 0, sizeof(reloc));
>> +       reloc[0].target_handle = dst;
>> +       reloc[0].delta = 0;
>> +       reloc[0].offset = 4 * sizeof(batch[0]);
>> +       reloc[0].presumed_offset = 0;
>> +       reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
>> +       reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
>> +
>> +       reloc[1].target_handle = src;
>> +       reloc[1].delta = 0;
>> +       reloc[1].offset = 7 * sizeof(batch[0]);
>> +       if (device_gen >= 8)
>> +               reloc[1].offset += sizeof(batch[0]);
>> +       reloc[1].presumed_offset = 0;
>> +       reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
>> +       reloc[1].write_domain = 0;
>
>Hmm. Since the batch is fresh every time, relocations are not a gpu
>stall. So horrible but not a test breaker. (Stalling for relocations
>would very effectively nerf the test.)

I'm not sure I follow the comment here. Are you saying it would be
better to keep track of the buffer offset after it has been "relocated"
and then reuse that? Also, would it be better to reuse the batch buffer?
I saw the same approach being taken in gem_linear_blits and thought it
would suffice for testing.

>
>> +       memset(obj, 0, sizeof(obj));
>> +       obj[0].handle = dst;
>> +       obj[1].handle = src;
>> +       obj[2].handle = gem_create(fd, 4096);
>> +       gem_write(fd, obj[2].handle, 0, batch, i * sizeof(batch[0]));
>> +       obj[2].relocation_count = 2;
>> +       obj[2].relocs_ptr = to_user_pointer(reloc);
>> +
>> +       memset(&exec, 0, sizeof(exec));
>> +       exec.buffers_ptr = to_user_pointer(obj);
>> +       exec.buffer_count = 3;
>> +       exec.batch_len = i * sizeof(batch[0]);
>> +       exec.flags = gem_has_blt(fd) ? I915_EXEC_BLT : 0;
>> +
>> +       gem_execbuf(fd, &exec);
>> +       if (dst_tiling == I915_TILING_NONE)
>> +               gem_sync(fd, obj[2].handle);
>
>Why is there a sync here? What bug are you trying to hide?

This is what I've been trying to figure out as well. I tried using
gem_mmap__cpu() in check_bo() method like you mentioned. Without this
gem_sync() the test fails regardless of gem_mmap__cpu() or
gem_mmap__wc() in the check_bo() method. The data doesn't match the
expected values and the assert in check_bo() method is triggered.

>
>> +       gem_close(fd, obj[2].handle);
>> +}
>> +
>> +static uint32_t
>> +create_bo(int fd, uint32_t start_val)
>> +{
>> +       uint32_t bo, linear_bo;
>>         uint32_t *linear;
>> -       uint32_t tiling = I915_TILING_X;
>>         int i;
>> +       const uint32_t buf_size = 1024 * 1024;
>>
>> -       bo = drm_intel_bo_alloc(bufmgr, "tiled bo", 1024 * 1024, 4096);
>> -       do_or_die(drm_intel_bo_set_tiling(bo, &tiling, width * 4));
>> -       igt_assert(tiling == I915_TILING_X);
>> -
>> -       linear_bo = drm_intel_bo_alloc(bufmgr, "linear src", 1024 * 1024, 4096);
>> +       bo = gem_create(fd, buf_size);
>> +       linear_bo = gem_create(fd, buf_size);
>>
>> +       linear = (uint32_t *)gem_mmap__wc(fd, linear_bo, 0, buf_size,
>> +                                         PROT_WRITE);
>
>gem_mmap__wc() is not universal. So you need a fallback to __gtt. Or a
>fallback from __gtt.
>
>>         /* Fill the BO with dwords starting at start_val */
>> -       do_or_die(drm_intel_bo_map(linear_bo, 1));
>> -       linear = linear_bo->virtual;
>> -       for (i = 0; i < 1024 * 1024 / 4; i++)
>> +       for (i = 0; i < buf_size / 4; i++)
>>                 linear[i] = start_val++;
>> -       drm_intel_bo_unmap(linear_bo);
>> -
>> -       intel_copy_bo (batch, bo, linear_bo, width*height*4);
>>
>> -       drm_intel_bo_unreference(linear_bo);
>> +       munmap(linear, buf_size);
>> +       copy(fd, bo, I915_TILING_X, linear_bo, I915_TILING_NONE);
>>
>> +       gem_close(fd, linear_bo);
>>         return bo;
>>  }
>>
>>  static void
>> -check_bo(drm_intel_bo *bo, uint32_t val)
>> +check_bo(int fd, uint32_t bo, uint32_t val)
>>  {
>> -       drm_intel_bo *linear_bo;
>> +       uint32_t linear_bo;
>>         uint32_t *linear;
>>         int num_errors;
>>         int i;
>> +       const uint32_t buf_size = 1024 * 1024;
>>
>> -       linear_bo = drm_intel_bo_alloc(bufmgr, "linear dst", 1024 * 1024, 4096);
>> +       linear_bo = gem_create(fd, buf_size);
>>
>> -       intel_copy_bo(batch, linear_bo, bo, width*height*4);
>> +       copy(fd, linear_bo, I915_TILING_NONE, bo, I915_TILING_X);
>>
>> -       do_or_die(drm_intel_bo_map(linear_bo, 0));
>> -       linear = linear_bo->virtual;
>> +       linear = (uint32_t *)gem_mmap__wc(fd, linear_bo, 0, buf_size,
>> +                                         PROT_READ);
>
>And this is the error above you were trying to hide.
>
>Please do not even think about reading a few megabytes from WC.
>linear = mmap__wb(); with a fallback to copy to wb.

Thank you for pointing this out. I'll keep this in mind.

Thanks,
Vanshi
>-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib
  2019-11-13 18:15   ` Vanshidhar Konda
@ 2019-11-13 19:58     ` Chris Wilson
  0 siblings, 0 replies; 7+ messages in thread
From: Chris Wilson @ 2019-11-13 19:58 UTC (permalink / raw)
  To: Vanshidhar Konda; +Cc: igt-dev

Quoting Vanshidhar Konda (2019-11-13 18:15:55)
> On Wed, Nov 13, 2019 at 09:51:01AM +0000, Chris Wilson wrote:
> >Quoting Vanshidhar Konda (2019-11-13 04:31:50)
> >> Switch the test from using libdrm methods to using methods provided by
> >> the igt library. Like some of the other gem_tiled* tests, this test also
> >> creates the batch buffer used to do the blitter copies. Also, the test
> >> avoids calling GET/SET_TILING IOCTLs - something that will not be
> >> supported on Gen12+.
> >>
> >> Signed-off-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com>
> >> ---
> >>  tests/i915/gem_tiled_blits.c | 176 +++++++++++++++++++++++++----------
> >>  1 file changed, 126 insertions(+), 50 deletions(-)
> >>
> >> diff --git a/tests/i915/gem_tiled_blits.c b/tests/i915/gem_tiled_blits.c
> >> index df0699f3..b72ec600 100644
> >> --- a/tests/i915/gem_tiled_blits.c
> >> +++ b/tests/i915/gem_tiled_blits.c
> >> @@ -57,52 +57,135 @@
> >>  IGT_TEST_DESCRIPTION("Test doing many tiled blits, with a working set larger"
> >>                      " than the aperture size.");
> >>
> >> -static drm_intel_bufmgr *bufmgr;
> >> -struct intel_batchbuffer *batch;
> >>  static int width = 512, height = 512;
> >> +int device_gen;
> >>
> >> -static drm_intel_bo *
> >> -create_bo(uint32_t start_val)
> >> +static void
> >> +copy(int fd, uint32_t dst, uint32_t dst_tiling,
> >> +     uint32_t src, uint32_t src_tiling)
> >>  {
> >> -       drm_intel_bo *bo, *linear_bo;
> >> +       uint32_t batch[12];
> >
> >Be considerate and think in cachelines.
> 
> Thanks for pointing that out. I'll keep it in mind.
> 
> >
> >> +       struct drm_i915_gem_relocation_entry reloc[2];
> >> +       struct drm_i915_gem_exec_object2 obj[3];
> >> +       struct drm_i915_gem_execbuffer2 exec;
> >> +       int src_pitch, dst_pitch;
> >> +       int tile_height, tile_width;
> >> +       int i = 0;
> >> +
> >> +       src_pitch = 4096;
> >> +       dst_pitch = 4096;
> >> +       tile_width = 1024;
> >> +       tile_height = width*height*4/4096;
> >> +
> >> +       batch[i++] = XY_SRC_COPY_BLT_CMD |
> >> +                 XY_SRC_COPY_BLT_WRITE_ALPHA |
> >> +                 XY_SRC_COPY_BLT_WRITE_RGB;
> >> +       if (device_gen >= 8)
> >> +               batch[i - 1] |= 8;
> >> +       else
> >> +               batch[i - 1] |= 6;
> >> +
> >> +       if (device_gen >= 4 && src_tiling != I915_TILING_NONE) {
> >> +               src_pitch /= 4;
> >> +               batch[i - 1] |= XY_SRC_COPY_BLT_SRC_TILED;
> >> +       }
> >> +       if (device_gen >= 4 && dst_tiling != I915_TILING_NONE) {
> >> +               dst_pitch /= 4;
> >> +               batch[i - 1] |= XY_SRC_COPY_BLT_DST_TILED;
> >> +       }
> >> +
> >> +       batch[i++] = (3 << 24) | /* 32 bits */
> >> +                 (0xcc << 16) | /* copy ROP */
> >> +                 dst_pitch;
> >> +       batch[i++] = 0; /* dst x1,y1 */
> >> +       batch[i++] = (tile_height << 16) | tile_width; /* dst x2,y2 */
> >> +       batch[i++] = 0; /* dst reloc */
> >> +       if (device_gen >= 8)
> >> +               batch[i++] = 0;
> >> +       batch[i++] = 0; /* src x1,y1 */
> >> +       batch[i++] = src_pitch;
> >> +       batch[i++] = 0; /* src reloc */
> >> +       if (device_gen >= 8)
> >> +               batch[i++] = 0;
> >> +       batch[i++] = MI_BATCH_BUFFER_END;
> >> +       batch[i++] = MI_NOOP;
> >> +
> >> +       memset(reloc, 0, sizeof(reloc));
> >> +       reloc[0].target_handle = dst;
> >> +       reloc[0].delta = 0;
> >> +       reloc[0].offset = 4 * sizeof(batch[0]);
> >> +       reloc[0].presumed_offset = 0;
> >> +       reloc[0].read_domains = I915_GEM_DOMAIN_RENDER;
> >> +       reloc[0].write_domain = I915_GEM_DOMAIN_RENDER;
> >> +
> >> +       reloc[1].target_handle = src;
> >> +       reloc[1].delta = 0;
> >> +       reloc[1].offset = 7 * sizeof(batch[0]);
> >> +       if (device_gen >= 8)
> >> +               reloc[1].offset += sizeof(batch[0]);
> >> +       reloc[1].presumed_offset = 0;
> >> +       reloc[1].read_domains = I915_GEM_DOMAIN_RENDER;
> >> +       reloc[1].write_domain = 0;
> >
> >Hmm. Since the batch is fresh every time, relocations are not a gpu
> >stall. So horrible but not a test breaker. (Stalling for relocations
> >would very effectively nerf the test.)
> 
> I'm not sure I follow the comment here. Are you saying it would be
> better to keep track of the buffer offset after it has been "relocated"
> and then reuse that? Also, would it be better to reuse the batch buffer?
> I saw the same approach being taken in gem_linear_blits and thought it
> would suffice for testing.

The choice is either reuse batches and so avoid the penalty of
allocating and flushing fresh pages everytime; or to force the system to
find room for another page everytime. Both have their merits :)

Now, the problem I was referring to above is that since we are not
reusing batches, we cannot avoid relocations (as we don't know where the
buffers are). Relocations are slow so best avoided, but worse if you
relocate something busy, it may require a complete pipeline stall.
Nowadays (say last few years), we use gpu relocations such that we could
just reuse one batch with the fixed copy and just modify the addresses
each time. Hmm, that might be interesting for a later variation.

So what I was worrying about was whether the relocations would stall and
in doing so make the test much more relaxed as there would be no
inflight requests making eviction hard.

> >> +       memset(obj, 0, sizeof(obj));
> >> +       obj[0].handle = dst;
> >> +       obj[1].handle = src;
> >> +       obj[2].handle = gem_create(fd, 4096);
> >> +       gem_write(fd, obj[2].handle, 0, batch, i * sizeof(batch[0]));
> >> +       obj[2].relocation_count = 2;
> >> +       obj[2].relocs_ptr = to_user_pointer(reloc);
> >> +
> >> +       memset(&exec, 0, sizeof(exec));
> >> +       exec.buffers_ptr = to_user_pointer(obj);
> >> +       exec.buffer_count = 3;
> >> +       exec.batch_len = i * sizeof(batch[0]);
> >> +       exec.flags = gem_has_blt(fd) ? I915_EXEC_BLT : 0;
> >> +
> >> +       gem_execbuf(fd, &exec);
> >> +       if (dst_tiling == I915_TILING_NONE)
> >> +               gem_sync(fd, obj[2].handle);
> >
> >Why is there a sync here? What bug are you trying to hide?
> 
> This is what I've been trying to figure out as well. I tried using
> gem_mmap__cpu() in check_bo() method like you mentioned. Without this
> gem_sync() the test fails regardless of gem_mmap__cpu() or
> gem_mmap__wc() in the check_bo() method. The data doesn't match the
> expected values and the assert in check_bo() method is triggered.

The issue is that a "gem_mmap__wc" is asynchronous, nor prepares the
data to be coherent. (Though it's wc it should be!) The trick is that
you have to tell the kernel you are about to access the pointer through
the WC domain, or manually do the gem_sync() + invalidation yourself.
...

> >>  static void
> >> -check_bo(drm_intel_bo *bo, uint32_t val)
> >> +check_bo(int fd, uint32_t bo, uint32_t val)
> >>  {
> >> -       drm_intel_bo *linear_bo;
> >> +       uint32_t linear_bo;
> >>         uint32_t *linear;
> >>         int num_errors;
> >>         int i;
> >> +       const uint32_t buf_size = 1024 * 1024;
> >>
> >> -       linear_bo = drm_intel_bo_alloc(bufmgr, "linear dst", 1024 * 1024, 4096);
> >> +       linear_bo = gem_create(fd, buf_size);
> >>
> >> -       intel_copy_bo(batch, linear_bo, bo, width*height*4);
> >> +       copy(fd, linear_bo, I915_TILING_NONE, bo, I915_TILING_X);
> >>
> >> -       do_or_die(drm_intel_bo_map(linear_bo, 0));
> >> -       linear = linear_bo->virtual;
> >> +       linear = (uint32_t *)gem_mmap__wc(fd, linear_bo, 0, buf_size,
> >> +                                         PROT_READ);
> >
> >And this is the error above you were trying to hide.

So here, you need to have a
gem_set_domain(fd, linear_bo, I915_GEM_DOMAIN_WC, 0);
(replace DOMAIN_WC with the appropriate domain if you have to fallback).
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2019-11-13 19:58 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-13  4:31 [igt-dev] [RFC PATCH] i915/gem_tiled_blits: Switch from libdrm methods to igt lib Vanshidhar Konda
2019-11-13  4:35 ` Vanshidhar Konda
2019-11-13  5:12 ` [igt-dev] ✓ Fi.CI.BAT: success for " Patchwork
2019-11-13  9:51 ` [igt-dev] [RFC PATCH] " Chris Wilson
2019-11-13 18:15   ` Vanshidhar Konda
2019-11-13 19:58     ` Chris Wilson
2019-11-13 16:13 ` [igt-dev] ✓ Fi.CI.IGT: success for " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.