All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH i-g-t] i915/gem_exec_schedule: Switch reorder-wide to sw_sync
@ 2019-08-28 16:18 ` Chris Wilson
  0 siblings, 0 replies; 4+ messages in thread
From: Chris Wilson @ 2019-08-28 16:18 UTC (permalink / raw)
  To: intel-gfx; +Cc: igt-dev

Switch to using sw_sync to avoid the builtin timeout on vgem's fences.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 tests/i915/gem_exec_schedule.c | 157 ++++++++++++++++++++-------------
 1 file changed, 95 insertions(+), 62 deletions(-)

diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index 058102103..ddcb1f21a 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -34,6 +34,7 @@
 #include "igt_sysfs.h"
 #include "igt_vgem.h"
 #include "i915/gem_ring.h"
+#include "sw_sync.h"
 
 #define LO 0
 #define HI 1
@@ -85,7 +86,7 @@ void __sync_read_u32_count(int fd, uint32_t handle, uint32_t *dst, uint64_t size
 
 static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
 			      uint32_t target, uint32_t offset, uint32_t value,
-			      uint32_t cork, unsigned write_domain)
+			      uint32_t cork, int fence, unsigned write_domain)
 {
 	const int gen = intel_gen(intel_get_drm_devid(fd));
 	struct drm_i915_gem_exec_object2 obj[3];
@@ -102,6 +103,11 @@ static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
 		execbuf.flags |= I915_EXEC_SECURE;
 	execbuf.rsvd1 = ctx;
 
+	if (fence != -1) {
+		execbuf.flags |= I915_EXEC_FENCE_IN;
+		execbuf.rsvd2 = fence;
+	}
+
 	memset(obj, 0, sizeof(obj));
 	obj[0].handle = cork;
 	obj[1].handle = target;
@@ -140,11 +146,29 @@ static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
 
 static void store_dword(int fd, uint32_t ctx, unsigned ring,
 			uint32_t target, uint32_t offset, uint32_t value,
-			uint32_t cork, unsigned write_domain)
+			unsigned write_domain)
 {
 	gem_close(fd, __store_dword(fd, ctx, ring,
 				    target, offset, value,
-				    cork, write_domain));
+				    0, -1, write_domain));
+}
+
+static void store_dword_plug(int fd, uint32_t ctx, unsigned ring,
+			     uint32_t target, uint32_t offset, uint32_t value,
+			     uint32_t cork, unsigned write_domain)
+{
+	gem_close(fd, __store_dword(fd, ctx, ring,
+				    target, offset, value,
+				    cork, -1, write_domain));
+}
+
+static void store_dword_fenced(int fd, uint32_t ctx, unsigned ring,
+			       uint32_t target, uint32_t offset, uint32_t value,
+			       int fence, unsigned write_domain)
+{
+	gem_close(fd, __store_dword(fd, ctx, ring,
+				    target, offset, value,
+				    0, fence, write_domain));
 }
 
 static uint32_t create_highest_priority(int fd)
@@ -189,20 +213,21 @@ static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine)
 
 static void fifo(int fd, unsigned ring)
 {
-	IGT_CORK_HANDLE(cork);
-	uint32_t scratch, plug;
+	IGT_CORK_FENCE(cork);
+	uint32_t scratch;
 	uint32_t result;
+	int fence;
 
 	scratch = gem_create(fd, 4096);
 
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* Same priority, same timeline, final result will be the second eb */
-	store_dword(fd, 0, ring, scratch, 0, 1, plug, 0);
-	store_dword(fd, 0, ring, scratch, 0, 2, plug, 0);
+	store_dword_fenced(fd, 0, ring, scratch, 0, 1, fence, 0);
+	store_dword_fenced(fd, 0, ring, scratch, 0, 2, fence, 0);
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	result =  __sync_read_u32(fd, scratch, 0);
 	gem_close(fd, scratch);
@@ -212,11 +237,12 @@ static void fifo(int fd, unsigned ring)
 
 static void independent(int fd, unsigned int engine)
 {
-	IGT_CORK_HANDLE(cork);
-	uint32_t scratch, plug, batch;
+	IGT_CORK_FENCE(cork);
 	igt_spin_t *spin = NULL;
+	uint32_t scratch, batch;
 	unsigned int other;
 	uint32_t *ptr;
+	int fence;
 
 	igt_require(engine != 0);
 
@@ -224,7 +250,7 @@ static void independent(int fd, unsigned int engine)
 	ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
 	igt_assert_eq(ptr[0], 0);
 
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* Check that we can submit to engine while all others are blocked */
 	for_each_physical_engine(fd, other) {
@@ -245,15 +271,15 @@ static void independent(int fd, unsigned int engine)
 			gem_execbuf(fd, &eb);
 		}
 
-		store_dword(fd, 0, other, scratch, 0, other, plug, 0);
+		store_dword_fenced(fd, 0, other, scratch, 0, other, fence, 0);
 	}
 	igt_require(spin);
 
 	/* Same priority, but different timeline (as different engine) */
-	batch = __store_dword(fd, 0, engine, scratch, 0, engine, plug, 0);
+	batch = __store_dword(fd, 0, engine, scratch, 0, engine, 0, fence, 0);
 
 	unplug_show_queue(fd, &cork, engine);
-	gem_close(fd, plug);
+	close(fence);
 
 	gem_sync(fd, batch);
 	igt_assert(!gem_bo_busy(fd, batch));
@@ -312,11 +338,11 @@ static void smoketest(int fd, unsigned ring, unsigned timeout)
 			engine = engines[hars_petruska_f54_1_random_unsafe_max(nengine)];
 			store_dword(fd, ctx, engine, scratch,
 				    8*child + 0, ~child,
-				    0, 0);
+				    0);
 			for (unsigned int step = 0; step < 8; step++)
 				store_dword(fd, ctx, engine, scratch,
 					    8*child + 4, count++,
-					    0, 0);
+					    0);
 		}
 		gem_context_destroy(fd, ctx);
 	}
@@ -698,10 +724,11 @@ static void semaphore_noskip(int i915)
 static void reorder(int fd, unsigned ring, unsigned flags)
 #define EQUAL 1
 {
-	IGT_CORK_HANDLE(cork);
-	uint32_t scratch, plug;
+	IGT_CORK_FENCE(cork);
+	uint32_t scratch;
 	uint32_t result;
 	uint32_t ctx[2];
+	int fence;
 
 	ctx[LO] = gem_context_create(fd);
 	gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
@@ -710,16 +737,16 @@ static void reorder(int fd, unsigned ring, unsigned flags)
 	gem_context_set_priority(fd, ctx[HI], flags & EQUAL ? MIN_PRIO : 0);
 
 	scratch = gem_create(fd, 4096);
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* We expect the high priority context to be executed first, and
 	 * so the final result will be value from the low priority context.
 	 */
-	store_dword(fd, ctx[LO], ring, scratch, 0, ctx[LO], plug, 0);
-	store_dword(fd, ctx[HI], ring, scratch, 0, ctx[HI], plug, 0);
+	store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO], fence, 0);
+	store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI], fence, 0);
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	gem_context_destroy(fd, ctx[LO]);
 	gem_context_destroy(fd, ctx[HI]);
@@ -735,11 +762,11 @@ static void reorder(int fd, unsigned ring, unsigned flags)
 
 static void promotion(int fd, unsigned ring)
 {
-	IGT_CORK_HANDLE(cork);
+	IGT_CORK_FENCE(cork);
 	uint32_t result, dep;
 	uint32_t result_read, dep_read;
 	uint32_t ctx[3];
-	uint32_t plug;
+	int fence;
 
 	ctx[LO] = gem_context_create(fd);
 	gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
@@ -753,24 +780,24 @@ static void promotion(int fd, unsigned ring)
 	result = gem_create(fd, 4096);
 	dep = gem_create(fd, 4096);
 
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* Expect that HI promotes LO, so the order will be LO, HI, NOISE.
 	 *
 	 * fifo would be NOISE, LO, HI.
 	 * strict priority would be  HI, NOISE, LO
 	 */
-	store_dword(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], plug, 0);
-	store_dword(fd, ctx[LO], ring, result, 0, ctx[LO], plug, 0);
+	store_dword_fenced(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], fence, 0);
+	store_dword_fenced(fd, ctx[LO], ring, result, 0, ctx[LO], fence, 0);
 
 	/* link LO <-> HI via a dependency on another buffer */
-	store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
-	store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
+	store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], I915_GEM_DOMAIN_INSTRUCTION);
+	store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0);
 
-	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
+	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0);
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	gem_context_destroy(fd, ctx[NOISE]);
 	gem_context_destroy(fd, ctx[LO]);
@@ -816,7 +843,7 @@ static void preempt(int fd, unsigned ring, unsigned flags)
 					 .engine = ring);
 		igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
 
-		store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
+		store_dword(fd, ctx[HI], ring, result, 0, n + 1, I915_GEM_DOMAIN_RENDER);
 
 		result_read = __sync_read_u32(fd, result, 0);
 		igt_assert_eq_u32(result_read, n + 1);
@@ -875,21 +902,21 @@ static void __preempt_other(int fd,
 	n = 0;
 	store_dword(fd, ctx[LO], primary,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 	n++;
 
 	if (flags & CHAIN) {
 		for_each_physical_engine(fd, other) {
 			store_dword(fd, ctx[LO], other,
 				    result, (n + 1)*sizeof(uint32_t), n + 1,
-				    0, I915_GEM_DOMAIN_RENDER);
+				    I915_GEM_DOMAIN_RENDER);
 			n++;
 		}
 	}
 
 	store_dword(fd, ctx[HI], target,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 
 	igt_debugfs_dump(fd, "i915_engine_info");
 	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
@@ -980,21 +1007,21 @@ static void __preempt_queue(int fd,
 	n = 0;
 	store_dword(fd, ctx[LO], primary,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 	n++;
 
 	if (flags & CHAIN) {
 		for_each_physical_engine(fd, other) {
 			store_dword(fd, ctx[LO], other,
 				    result, (n + 1)*sizeof(uint32_t), n + 1,
-				    0, I915_GEM_DOMAIN_RENDER);
+				    I915_GEM_DOMAIN_RENDER);
 			n++;
 		}
 	}
 
 	store_dword(fd, ctx[HI], target,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 
 	igt_debugfs_dump(fd, "i915_engine_info");
 
@@ -1062,13 +1089,13 @@ static void preempt_self(int fd, unsigned ring)
 					 .engine = other);
 		store_dword(fd, ctx[HI], other,
 			    result, (n + 1)*sizeof(uint32_t), n + 1,
-			    0, I915_GEM_DOMAIN_RENDER);
+			    I915_GEM_DOMAIN_RENDER);
 		n++;
 	}
 	gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
 	store_dword(fd, ctx[HI], ring,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 
 	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
 
@@ -1242,8 +1269,8 @@ static void deep(int fd, unsigned ring)
 		gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
 
 		for (int m = 0; m < XS; m++) {
-			store_dword(fd, context, ring, result, 4*n, context, dep[m], 0);
-			store_dword(fd, context, ring, result, 4*m, context, 0, I915_GEM_DOMAIN_INSTRUCTION);
+			store_dword_plug(fd, context, ring, result, 4*n, context, dep[m], 0);
+			store_dword(fd, context, ring, result, 4*m, context, I915_GEM_DOMAIN_INSTRUCTION);
 		}
 		expected = context;
 	}
@@ -1293,12 +1320,12 @@ static void wide(int fd, unsigned ring)
 	struct timespec tv = {};
 	unsigned int ring_size = gem_measure_ring_inflight(fd, ring, MEASURE_RING_NEW_CTX);
 
-	IGT_CORK_HANDLE(cork);
-	uint32_t plug;
+	IGT_CORK_FENCE(cork);
 	uint32_t result;
 	uint32_t result_read[MAX_CONTEXTS];
 	uint32_t *ctx;
 	unsigned int count;
+	int fence;
 
 	ctx = malloc(sizeof(*ctx)*MAX_CONTEXTS);
 	for (int n = 0; n < MAX_CONTEXTS; n++)
@@ -1306,21 +1333,22 @@ static void wide(int fd, unsigned ring)
 
 	result = gem_create(fd, 4*MAX_CONTEXTS);
 
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* Lots of in-order requests, plugged and submitted simultaneously */
 	for (count = 0;
 	     igt_seconds_elapsed(&tv) < 5 && count < ring_size;
 	     count++) {
 		for (int n = 0; n < MAX_CONTEXTS; n++) {
-			store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], plug, I915_GEM_DOMAIN_INSTRUCTION);
+			store_dword_fenced(fd, ctx[n], ring, result, 4*n, ctx[n],
+					   fence, I915_GEM_DOMAIN_INSTRUCTION);
 		}
 	}
 	igt_info("Submitted %d requests over %d contexts in %.1fms\n",
 		 count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	for (int n = 0; n < MAX_CONTEXTS; n++)
 		gem_context_destroy(fd, ctx[n]);
@@ -1337,27 +1365,27 @@ static void reorder_wide(int fd, unsigned ring)
 {
 	const int gen = intel_gen(intel_get_drm_devid(fd));
 	struct drm_i915_gem_relocation_entry reloc;
-	struct drm_i915_gem_exec_object2 obj[3];
+	struct drm_i915_gem_exec_object2 obj[2];
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct timespec tv = {};
 	unsigned int ring_size = gem_measure_ring_inflight(fd, ring, MEASURE_RING_NEW_CTX);
-	IGT_CORK_HANDLE(cork);
-	uint32_t result, target, plug;
+	IGT_CORK_FENCE(cork);
+	uint32_t result, target;
 	uint32_t result_read[1024];
 	uint32_t *expected;
+	int fence;
 
 	result = gem_create(fd, 4096);
 	target = gem_create(fd, 4096);
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	expected = gem_mmap__cpu(fd, target, 0, 4096, PROT_WRITE);
 	gem_set_domain(fd, target, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
 
 	memset(obj, 0, sizeof(obj));
-	obj[0].handle = plug;
-	obj[1].handle = result;
-	obj[2].relocs_ptr = to_user_pointer(&reloc);
-	obj[2].relocation_count = 1;
+	obj[0].handle = result;
+	obj[1].relocs_ptr = to_user_pointer(&reloc);
+	obj[1].relocation_count = 1;
 
 	memset(&reloc, 0, sizeof(reloc));
 	reloc.target_handle = result;
@@ -1366,11 +1394,14 @@ static void reorder_wide(int fd, unsigned ring)
 
 	memset(&execbuf, 0, sizeof(execbuf));
 	execbuf.buffers_ptr = to_user_pointer(obj);
-	execbuf.buffer_count = 3;
+	execbuf.buffer_count = ARRAY_SIZE(obj);
 	execbuf.flags = ring;
 	if (gen < 6)
 		execbuf.flags |= I915_EXEC_SECURE;
 
+	execbuf.flags |= I915_EXEC_FENCE_IN;
+	execbuf.rsvd2 = fence;
+
 	for (int n = MIN_PRIO, x = 1;
 	     igt_seconds_elapsed(&tv) < 5 && n <= MAX_PRIO;
 	     n++, x++) {
@@ -1380,9 +1411,9 @@ static void reorder_wide(int fd, unsigned ring)
 		execbuf.rsvd1 = gem_context_create(fd);
 		gem_context_set_priority(fd, execbuf.rsvd1, n);
 
-		obj[2].handle = gem_create(fd, sz);
-		batch = gem_mmap__gtt(fd, obj[2].handle, sz, PROT_WRITE);
-		gem_set_domain(fd, obj[2].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+		obj[1].handle = gem_create(fd, sz);
+		batch = gem_mmap__gtt(fd, obj[1].handle, sz, PROT_WRITE);
+		gem_set_domain(fd, obj[1].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
 
 		for (int m = 0; m < ring_size; m++) {
 			uint64_t addr;
@@ -1417,12 +1448,12 @@ static void reorder_wide(int fd, unsigned ring)
 		}
 
 		munmap(batch, sz);
-		gem_close(fd, obj[2].handle);
+		gem_close(fd, obj[1].handle);
 		gem_context_destroy(fd, execbuf.rsvd1);
 	}
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
 	for (int n = 0; n < 1024; n++)
@@ -1641,6 +1672,8 @@ igt_main
 	igt_skip_on_simulation();
 
 	igt_fixture {
+		igt_require_sw_sync();
+
 		fd = drm_open_driver_master(DRIVER_INTEL);
 		gem_submission_print_method(fd);
 		gem_scheduler_print_capability(fd);
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [igt-dev] [PATCH i-g-t] i915/gem_exec_schedule: Switch reorder-wide to sw_sync
@ 2019-08-28 16:18 ` Chris Wilson
  0 siblings, 0 replies; 4+ messages in thread
From: Chris Wilson @ 2019-08-28 16:18 UTC (permalink / raw)
  To: intel-gfx; +Cc: igt-dev

Switch to using sw_sync to avoid the builtin timeout on vgem's fences.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 tests/i915/gem_exec_schedule.c | 157 ++++++++++++++++++++-------------
 1 file changed, 95 insertions(+), 62 deletions(-)

diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index 058102103..ddcb1f21a 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -34,6 +34,7 @@
 #include "igt_sysfs.h"
 #include "igt_vgem.h"
 #include "i915/gem_ring.h"
+#include "sw_sync.h"
 
 #define LO 0
 #define HI 1
@@ -85,7 +86,7 @@ void __sync_read_u32_count(int fd, uint32_t handle, uint32_t *dst, uint64_t size
 
 static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
 			      uint32_t target, uint32_t offset, uint32_t value,
-			      uint32_t cork, unsigned write_domain)
+			      uint32_t cork, int fence, unsigned write_domain)
 {
 	const int gen = intel_gen(intel_get_drm_devid(fd));
 	struct drm_i915_gem_exec_object2 obj[3];
@@ -102,6 +103,11 @@ static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
 		execbuf.flags |= I915_EXEC_SECURE;
 	execbuf.rsvd1 = ctx;
 
+	if (fence != -1) {
+		execbuf.flags |= I915_EXEC_FENCE_IN;
+		execbuf.rsvd2 = fence;
+	}
+
 	memset(obj, 0, sizeof(obj));
 	obj[0].handle = cork;
 	obj[1].handle = target;
@@ -140,11 +146,29 @@ static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
 
 static void store_dword(int fd, uint32_t ctx, unsigned ring,
 			uint32_t target, uint32_t offset, uint32_t value,
-			uint32_t cork, unsigned write_domain)
+			unsigned write_domain)
 {
 	gem_close(fd, __store_dword(fd, ctx, ring,
 				    target, offset, value,
-				    cork, write_domain));
+				    0, -1, write_domain));
+}
+
+static void store_dword_plug(int fd, uint32_t ctx, unsigned ring,
+			     uint32_t target, uint32_t offset, uint32_t value,
+			     uint32_t cork, unsigned write_domain)
+{
+	gem_close(fd, __store_dword(fd, ctx, ring,
+				    target, offset, value,
+				    cork, -1, write_domain));
+}
+
+static void store_dword_fenced(int fd, uint32_t ctx, unsigned ring,
+			       uint32_t target, uint32_t offset, uint32_t value,
+			       int fence, unsigned write_domain)
+{
+	gem_close(fd, __store_dword(fd, ctx, ring,
+				    target, offset, value,
+				    0, fence, write_domain));
 }
 
 static uint32_t create_highest_priority(int fd)
@@ -189,20 +213,21 @@ static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine)
 
 static void fifo(int fd, unsigned ring)
 {
-	IGT_CORK_HANDLE(cork);
-	uint32_t scratch, plug;
+	IGT_CORK_FENCE(cork);
+	uint32_t scratch;
 	uint32_t result;
+	int fence;
 
 	scratch = gem_create(fd, 4096);
 
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* Same priority, same timeline, final result will be the second eb */
-	store_dword(fd, 0, ring, scratch, 0, 1, plug, 0);
-	store_dword(fd, 0, ring, scratch, 0, 2, plug, 0);
+	store_dword_fenced(fd, 0, ring, scratch, 0, 1, fence, 0);
+	store_dword_fenced(fd, 0, ring, scratch, 0, 2, fence, 0);
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	result =  __sync_read_u32(fd, scratch, 0);
 	gem_close(fd, scratch);
@@ -212,11 +237,12 @@ static void fifo(int fd, unsigned ring)
 
 static void independent(int fd, unsigned int engine)
 {
-	IGT_CORK_HANDLE(cork);
-	uint32_t scratch, plug, batch;
+	IGT_CORK_FENCE(cork);
 	igt_spin_t *spin = NULL;
+	uint32_t scratch, batch;
 	unsigned int other;
 	uint32_t *ptr;
+	int fence;
 
 	igt_require(engine != 0);
 
@@ -224,7 +250,7 @@ static void independent(int fd, unsigned int engine)
 	ptr = gem_mmap__gtt(fd, scratch, 4096, PROT_READ);
 	igt_assert_eq(ptr[0], 0);
 
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* Check that we can submit to engine while all others are blocked */
 	for_each_physical_engine(fd, other) {
@@ -245,15 +271,15 @@ static void independent(int fd, unsigned int engine)
 			gem_execbuf(fd, &eb);
 		}
 
-		store_dword(fd, 0, other, scratch, 0, other, plug, 0);
+		store_dword_fenced(fd, 0, other, scratch, 0, other, fence, 0);
 	}
 	igt_require(spin);
 
 	/* Same priority, but different timeline (as different engine) */
-	batch = __store_dword(fd, 0, engine, scratch, 0, engine, plug, 0);
+	batch = __store_dword(fd, 0, engine, scratch, 0, engine, 0, fence, 0);
 
 	unplug_show_queue(fd, &cork, engine);
-	gem_close(fd, plug);
+	close(fence);
 
 	gem_sync(fd, batch);
 	igt_assert(!gem_bo_busy(fd, batch));
@@ -312,11 +338,11 @@ static void smoketest(int fd, unsigned ring, unsigned timeout)
 			engine = engines[hars_petruska_f54_1_random_unsafe_max(nengine)];
 			store_dword(fd, ctx, engine, scratch,
 				    8*child + 0, ~child,
-				    0, 0);
+				    0);
 			for (unsigned int step = 0; step < 8; step++)
 				store_dword(fd, ctx, engine, scratch,
 					    8*child + 4, count++,
-					    0, 0);
+					    0);
 		}
 		gem_context_destroy(fd, ctx);
 	}
@@ -698,10 +724,11 @@ static void semaphore_noskip(int i915)
 static void reorder(int fd, unsigned ring, unsigned flags)
 #define EQUAL 1
 {
-	IGT_CORK_HANDLE(cork);
-	uint32_t scratch, plug;
+	IGT_CORK_FENCE(cork);
+	uint32_t scratch;
 	uint32_t result;
 	uint32_t ctx[2];
+	int fence;
 
 	ctx[LO] = gem_context_create(fd);
 	gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
@@ -710,16 +737,16 @@ static void reorder(int fd, unsigned ring, unsigned flags)
 	gem_context_set_priority(fd, ctx[HI], flags & EQUAL ? MIN_PRIO : 0);
 
 	scratch = gem_create(fd, 4096);
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* We expect the high priority context to be executed first, and
 	 * so the final result will be value from the low priority context.
 	 */
-	store_dword(fd, ctx[LO], ring, scratch, 0, ctx[LO], plug, 0);
-	store_dword(fd, ctx[HI], ring, scratch, 0, ctx[HI], plug, 0);
+	store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO], fence, 0);
+	store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI], fence, 0);
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	gem_context_destroy(fd, ctx[LO]);
 	gem_context_destroy(fd, ctx[HI]);
@@ -735,11 +762,11 @@ static void reorder(int fd, unsigned ring, unsigned flags)
 
 static void promotion(int fd, unsigned ring)
 {
-	IGT_CORK_HANDLE(cork);
+	IGT_CORK_FENCE(cork);
 	uint32_t result, dep;
 	uint32_t result_read, dep_read;
 	uint32_t ctx[3];
-	uint32_t plug;
+	int fence;
 
 	ctx[LO] = gem_context_create(fd);
 	gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
@@ -753,24 +780,24 @@ static void promotion(int fd, unsigned ring)
 	result = gem_create(fd, 4096);
 	dep = gem_create(fd, 4096);
 
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* Expect that HI promotes LO, so the order will be LO, HI, NOISE.
 	 *
 	 * fifo would be NOISE, LO, HI.
 	 * strict priority would be  HI, NOISE, LO
 	 */
-	store_dword(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], plug, 0);
-	store_dword(fd, ctx[LO], ring, result, 0, ctx[LO], plug, 0);
+	store_dword_fenced(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], fence, 0);
+	store_dword_fenced(fd, ctx[LO], ring, result, 0, ctx[LO], fence, 0);
 
 	/* link LO <-> HI via a dependency on another buffer */
-	store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
-	store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
+	store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], I915_GEM_DOMAIN_INSTRUCTION);
+	store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0);
 
-	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
+	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0);
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	gem_context_destroy(fd, ctx[NOISE]);
 	gem_context_destroy(fd, ctx[LO]);
@@ -816,7 +843,7 @@ static void preempt(int fd, unsigned ring, unsigned flags)
 					 .engine = ring);
 		igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
 
-		store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
+		store_dword(fd, ctx[HI], ring, result, 0, n + 1, I915_GEM_DOMAIN_RENDER);
 
 		result_read = __sync_read_u32(fd, result, 0);
 		igt_assert_eq_u32(result_read, n + 1);
@@ -875,21 +902,21 @@ static void __preempt_other(int fd,
 	n = 0;
 	store_dword(fd, ctx[LO], primary,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 	n++;
 
 	if (flags & CHAIN) {
 		for_each_physical_engine(fd, other) {
 			store_dword(fd, ctx[LO], other,
 				    result, (n + 1)*sizeof(uint32_t), n + 1,
-				    0, I915_GEM_DOMAIN_RENDER);
+				    I915_GEM_DOMAIN_RENDER);
 			n++;
 		}
 	}
 
 	store_dword(fd, ctx[HI], target,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 
 	igt_debugfs_dump(fd, "i915_engine_info");
 	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
@@ -980,21 +1007,21 @@ static void __preempt_queue(int fd,
 	n = 0;
 	store_dword(fd, ctx[LO], primary,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 	n++;
 
 	if (flags & CHAIN) {
 		for_each_physical_engine(fd, other) {
 			store_dword(fd, ctx[LO], other,
 				    result, (n + 1)*sizeof(uint32_t), n + 1,
-				    0, I915_GEM_DOMAIN_RENDER);
+				    I915_GEM_DOMAIN_RENDER);
 			n++;
 		}
 	}
 
 	store_dword(fd, ctx[HI], target,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 
 	igt_debugfs_dump(fd, "i915_engine_info");
 
@@ -1062,13 +1089,13 @@ static void preempt_self(int fd, unsigned ring)
 					 .engine = other);
 		store_dword(fd, ctx[HI], other,
 			    result, (n + 1)*sizeof(uint32_t), n + 1,
-			    0, I915_GEM_DOMAIN_RENDER);
+			    I915_GEM_DOMAIN_RENDER);
 		n++;
 	}
 	gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
 	store_dword(fd, ctx[HI], ring,
 		    result, (n + 1)*sizeof(uint32_t), n + 1,
-		    0, I915_GEM_DOMAIN_RENDER);
+		    I915_GEM_DOMAIN_RENDER);
 
 	gem_set_domain(fd, result, I915_GEM_DOMAIN_GTT, 0);
 
@@ -1242,8 +1269,8 @@ static void deep(int fd, unsigned ring)
 		gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
 
 		for (int m = 0; m < XS; m++) {
-			store_dword(fd, context, ring, result, 4*n, context, dep[m], 0);
-			store_dword(fd, context, ring, result, 4*m, context, 0, I915_GEM_DOMAIN_INSTRUCTION);
+			store_dword_plug(fd, context, ring, result, 4*n, context, dep[m], 0);
+			store_dword(fd, context, ring, result, 4*m, context, I915_GEM_DOMAIN_INSTRUCTION);
 		}
 		expected = context;
 	}
@@ -1293,12 +1320,12 @@ static void wide(int fd, unsigned ring)
 	struct timespec tv = {};
 	unsigned int ring_size = gem_measure_ring_inflight(fd, ring, MEASURE_RING_NEW_CTX);
 
-	IGT_CORK_HANDLE(cork);
-	uint32_t plug;
+	IGT_CORK_FENCE(cork);
 	uint32_t result;
 	uint32_t result_read[MAX_CONTEXTS];
 	uint32_t *ctx;
 	unsigned int count;
+	int fence;
 
 	ctx = malloc(sizeof(*ctx)*MAX_CONTEXTS);
 	for (int n = 0; n < MAX_CONTEXTS; n++)
@@ -1306,21 +1333,22 @@ static void wide(int fd, unsigned ring)
 
 	result = gem_create(fd, 4*MAX_CONTEXTS);
 
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	/* Lots of in-order requests, plugged and submitted simultaneously */
 	for (count = 0;
 	     igt_seconds_elapsed(&tv) < 5 && count < ring_size;
 	     count++) {
 		for (int n = 0; n < MAX_CONTEXTS; n++) {
-			store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], plug, I915_GEM_DOMAIN_INSTRUCTION);
+			store_dword_fenced(fd, ctx[n], ring, result, 4*n, ctx[n],
+					   fence, I915_GEM_DOMAIN_INSTRUCTION);
 		}
 	}
 	igt_info("Submitted %d requests over %d contexts in %.1fms\n",
 		 count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	for (int n = 0; n < MAX_CONTEXTS; n++)
 		gem_context_destroy(fd, ctx[n]);
@@ -1337,27 +1365,27 @@ static void reorder_wide(int fd, unsigned ring)
 {
 	const int gen = intel_gen(intel_get_drm_devid(fd));
 	struct drm_i915_gem_relocation_entry reloc;
-	struct drm_i915_gem_exec_object2 obj[3];
+	struct drm_i915_gem_exec_object2 obj[2];
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct timespec tv = {};
 	unsigned int ring_size = gem_measure_ring_inflight(fd, ring, MEASURE_RING_NEW_CTX);
-	IGT_CORK_HANDLE(cork);
-	uint32_t result, target, plug;
+	IGT_CORK_FENCE(cork);
+	uint32_t result, target;
 	uint32_t result_read[1024];
 	uint32_t *expected;
+	int fence;
 
 	result = gem_create(fd, 4096);
 	target = gem_create(fd, 4096);
-	plug = igt_cork_plug(&cork, fd);
+	fence = igt_cork_plug(&cork, fd);
 
 	expected = gem_mmap__cpu(fd, target, 0, 4096, PROT_WRITE);
 	gem_set_domain(fd, target, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
 
 	memset(obj, 0, sizeof(obj));
-	obj[0].handle = plug;
-	obj[1].handle = result;
-	obj[2].relocs_ptr = to_user_pointer(&reloc);
-	obj[2].relocation_count = 1;
+	obj[0].handle = result;
+	obj[1].relocs_ptr = to_user_pointer(&reloc);
+	obj[1].relocation_count = 1;
 
 	memset(&reloc, 0, sizeof(reloc));
 	reloc.target_handle = result;
@@ -1366,11 +1394,14 @@ static void reorder_wide(int fd, unsigned ring)
 
 	memset(&execbuf, 0, sizeof(execbuf));
 	execbuf.buffers_ptr = to_user_pointer(obj);
-	execbuf.buffer_count = 3;
+	execbuf.buffer_count = ARRAY_SIZE(obj);
 	execbuf.flags = ring;
 	if (gen < 6)
 		execbuf.flags |= I915_EXEC_SECURE;
 
+	execbuf.flags |= I915_EXEC_FENCE_IN;
+	execbuf.rsvd2 = fence;
+
 	for (int n = MIN_PRIO, x = 1;
 	     igt_seconds_elapsed(&tv) < 5 && n <= MAX_PRIO;
 	     n++, x++) {
@@ -1380,9 +1411,9 @@ static void reorder_wide(int fd, unsigned ring)
 		execbuf.rsvd1 = gem_context_create(fd);
 		gem_context_set_priority(fd, execbuf.rsvd1, n);
 
-		obj[2].handle = gem_create(fd, sz);
-		batch = gem_mmap__gtt(fd, obj[2].handle, sz, PROT_WRITE);
-		gem_set_domain(fd, obj[2].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+		obj[1].handle = gem_create(fd, sz);
+		batch = gem_mmap__gtt(fd, obj[1].handle, sz, PROT_WRITE);
+		gem_set_domain(fd, obj[1].handle, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
 
 		for (int m = 0; m < ring_size; m++) {
 			uint64_t addr;
@@ -1417,12 +1448,12 @@ static void reorder_wide(int fd, unsigned ring)
 		}
 
 		munmap(batch, sz);
-		gem_close(fd, obj[2].handle);
+		gem_close(fd, obj[1].handle);
 		gem_context_destroy(fd, execbuf.rsvd1);
 	}
 
 	unplug_show_queue(fd, &cork, ring);
-	gem_close(fd, plug);
+	close(fence);
 
 	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
 	for (int n = 0; n < 1024; n++)
@@ -1641,6 +1672,8 @@ igt_main
 	igt_skip_on_simulation();
 
 	igt_fixture {
+		igt_require_sw_sync();
+
 		fd = drm_open_driver_master(DRIVER_INTEL);
 		gem_submission_print_method(fd);
 		gem_scheduler_print_capability(fd);
-- 
2.23.0

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [igt-dev] ✓ Fi.CI.BAT: success for i915/gem_exec_schedule: Switch reorder-wide to sw_sync
  2019-08-28 16:18 ` [igt-dev] " Chris Wilson
  (?)
@ 2019-08-29 10:10 ` Patchwork
  -1 siblings, 0 replies; 4+ messages in thread
From: Patchwork @ 2019-08-29 10:10 UTC (permalink / raw)
  To: Chris Wilson; +Cc: igt-dev

== Series Details ==

Series: i915/gem_exec_schedule: Switch reorder-wide to sw_sync
URL   : https://patchwork.freedesktop.org/series/65942/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6798 -> IGTPW_3390
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/65942/revisions/1/mbox/

Known issues
------------

  Here are the changes found in IGTPW_3390 that come from known issues:

### IGT changes ###

#### Possible fixes ####

  * igt@gem_exec_suspend@basic-s3:
    - fi-blb-e6850:       [INCOMPLETE][1] ([fdo#107718]) -> [PASS][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/fi-blb-e6850/igt@gem_exec_suspend@basic-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/fi-blb-e6850/igt@gem_exec_suspend@basic-s3.html

  * igt@i915_selftest@live_gem_contexts:
    - fi-cfl-guc:         [INCOMPLETE][3] ([fdo#111514]) -> [PASS][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/fi-cfl-guc/igt@i915_selftest@live_gem_contexts.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/fi-cfl-guc/igt@i915_selftest@live_gem_contexts.html
    - fi-skl-iommu:       [INCOMPLETE][5] -> [PASS][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/fi-skl-iommu/igt@i915_selftest@live_gem_contexts.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/fi-skl-iommu/igt@i915_selftest@live_gem_contexts.html

  
#### Warnings ####

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [FAIL][7] ([fdo#111096]) -> [FAIL][8] ([fdo#111407])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#107718]: https://bugs.freedesktop.org/show_bug.cgi?id=107718
  [fdo#111096]: https://bugs.freedesktop.org/show_bug.cgi?id=111096
  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
  [fdo#111514]: https://bugs.freedesktop.org/show_bug.cgi?id=111514


Participating hosts (52 -> 45)
------------------------------

  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * IGT: IGT_5152 -> IGTPW_3390

  CI-20190529: 20190529
  CI_DRM_6798: 9c51d473851d23f32a0667e3f2b8ed5bda27bf42 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_3390: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/
  IGT_5152: f9d17c54c6946eb6391fce88687f9b071be9446b @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [igt-dev] ✓ Fi.CI.IGT: success for i915/gem_exec_schedule: Switch reorder-wide to sw_sync
  2019-08-28 16:18 ` [igt-dev] " Chris Wilson
  (?)
  (?)
@ 2019-08-29 16:56 ` Patchwork
  -1 siblings, 0 replies; 4+ messages in thread
From: Patchwork @ 2019-08-29 16:56 UTC (permalink / raw)
  To: Chris Wilson; +Cc: igt-dev

== Series Details ==

Series: i915/gem_exec_schedule: Switch reorder-wide to sw_sync
URL   : https://patchwork.freedesktop.org/series/65942/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6798_full -> IGTPW_3390_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/65942/revisions/1/mbox/

Known issues
------------

  Here are the changes found in IGTPW_3390_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@bcs0-s3:
    - shard-kbl:          ([PASS][1], [PASS][2]) -> [DMESG-WARN][3] ([fdo#108566]) +2 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl1/igt@gem_ctx_isolation@bcs0-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl3/igt@gem_ctx_isolation@bcs0-s3.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-kbl6/igt@gem_ctx_isolation@bcs0-s3.html

  * igt@gem_exec_schedule@deep-bsd:
    - shard-iclb:         ([PASS][4], [PASS][5]) -> [SKIP][6] ([fdo#111325]) +4 similar issues
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb6/igt@gem_exec_schedule@deep-bsd.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb5/igt@gem_exec_schedule@deep-bsd.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb4/igt@gem_exec_schedule@deep-bsd.html

  * igt@gem_exec_schedule@preempt-bsd1:
    - shard-iclb:         [PASS][7] -> [SKIP][8] ([fdo#109276]) +4 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb2/igt@gem_exec_schedule@preempt-bsd1.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb3/igt@gem_exec_schedule@preempt-bsd1.html

  * igt@gem_workarounds@suspend-resume:
    - shard-apl:          [PASS][9] -> [DMESG-WARN][10] ([fdo#108566]) +2 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl8/igt@gem_workarounds@suspend-resume.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-apl1/igt@gem_workarounds@suspend-resume.html

  * igt@kms_cursor_legacy@cursor-vs-flip-legacy:
    - shard-hsw:          ([PASS][11], [PASS][12]) -> [FAIL][13] ([fdo#103355])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-hsw8/igt@kms_cursor_legacy@cursor-vs-flip-legacy.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-hsw2/igt@kms_cursor_legacy@cursor-vs-flip-legacy.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-hsw6/igt@kms_cursor_legacy@cursor-vs-flip-legacy.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-msflip-blt:
    - shard-iclb:         ([PASS][14], [PASS][15]) -> [FAIL][16] ([fdo#103167]) +3 similar issues
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-msflip-blt.html
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb7/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-msflip-blt.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb6/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-msflip-blt.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-apl:          ([PASS][17], [PASS][18]) -> [DMESG-WARN][19] ([fdo#108566]) +1 similar issue
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl7/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl4/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-apl1/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_setmode@basic:
    - shard-apl:          ([PASS][20], [PASS][21]) -> [FAIL][22] ([fdo#99912])
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl4/igt@kms_setmode@basic.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl5/igt@kms_setmode@basic.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-apl2/igt@kms_setmode@basic.html

  * igt@kms_vblank@pipe-a-wait-forked-hang:
    - shard-iclb:         ([PASS][23], [PASS][24]) -> [INCOMPLETE][25] ([fdo#107713])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb2/igt@kms_vblank@pipe-a-wait-forked-hang.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb5/igt@kms_vblank@pipe-a-wait-forked-hang.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb7/igt@kms_vblank@pipe-a-wait-forked-hang.html

  * igt@prime_busy@after-bsd2:
    - shard-iclb:         ([PASS][26], [PASS][27]) -> [SKIP][28] ([fdo#109276]) +1 similar issue
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb1/igt@prime_busy@after-bsd2.html
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb4/igt@prime_busy@after-bsd2.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb3/igt@prime_busy@after-bsd2.html

  
#### Possible fixes ####

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         ([SKIP][29], [PASS][30]) ([fdo#110841]) -> [PASS][31]
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb4/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb3/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb7/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_eio@in-flight-suspend:
    - shard-apl:          ([DMESG-WARN][32], [DMESG-WARN][33]) ([fdo#108566]) -> [PASS][34] +1 similar issue
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl2/igt@gem_eio@in-flight-suspend.html
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl7/igt@gem_eio@in-flight-suspend.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-apl4/igt@gem_eio@in-flight-suspend.html

  * igt@gem_exec_schedule@independent-bsd:
    - shard-iclb:         ([PASS][35], [SKIP][36]) ([fdo#111325]) -> [PASS][37] +4 similar issues
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb8/igt@gem_exec_schedule@independent-bsd.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb4/igt@gem_exec_schedule@independent-bsd.html
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb3/igt@gem_exec_schedule@independent-bsd.html

  * igt@gem_exec_schedule@preempt-contexts-bsd2:
    - shard-iclb:         [SKIP][38] ([fdo#109276]) -> [PASS][39] +3 similar issues
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb3/igt@gem_exec_schedule@preempt-contexts-bsd2.html
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb1/igt@gem_exec_schedule@preempt-contexts-bsd2.html

  * igt@gem_exec_schedule@preempt-other-bsd1:
    - shard-iclb:         ([SKIP][40], [PASS][41]) ([fdo#109276]) -> [PASS][42] +12 similar issues
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb5/igt@gem_exec_schedule@preempt-other-bsd1.html
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb2/igt@gem_exec_schedule@preempt-other-bsd1.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb2/igt@gem_exec_schedule@preempt-other-bsd1.html

  * igt@gem_exec_schedule@preempt-other-chain-bsd:
    - shard-iclb:         ([SKIP][43], [SKIP][44]) ([fdo#111325]) -> [PASS][45]
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb4/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb1/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb8/igt@gem_exec_schedule@preempt-other-chain-bsd.html

  * igt@gem_exec_schedule@reorder-wide-bsd1:
    - shard-iclb:         ([SKIP][46], [SKIP][47]) ([fdo#109276]) -> [PASS][48] +6 similar issues
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb7/igt@gem_exec_schedule@reorder-wide-bsd1.html
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb3/igt@gem_exec_schedule@reorder-wide-bsd1.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb2/igt@gem_exec_schedule@reorder-wide-bsd1.html

  * igt@gem_softpin@noreloc-s3:
    - shard-kbl:          ([PASS][49], [DMESG-WARN][50]) ([fdo#108566]) -> [PASS][51] +1 similar issue
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl3/igt@gem_softpin@noreloc-s3.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl6/igt@gem_softpin@noreloc-s3.html
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-kbl4/igt@gem_softpin@noreloc-s3.html

  * igt@gem_tiled_swapping@non-threaded:
    - shard-kbl:          ([DMESG-WARN][52], [PASS][53]) ([fdo#108686]) -> [PASS][54]
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl1/igt@gem_tiled_swapping@non-threaded.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl3/igt@gem_tiled_swapping@non-threaded.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-kbl7/igt@gem_tiled_swapping@non-threaded.html

  * igt@i915_pm_rc6_residency@rc6-accuracy:
    - shard-kbl:          ([SKIP][55], [PASS][56]) ([fdo#109271]) -> [PASS][57]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl1/igt@i915_pm_rc6_residency@rc6-accuracy.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl3/igt@i915_pm_rc6_residency@rc6-accuracy.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-kbl1/igt@i915_pm_rc6_residency@rc6-accuracy.html

  * igt@kms_atomic_transition@plane-all-transition-fencing:
    - shard-iclb:         ([INCOMPLETE][58], [PASS][59]) ([fdo#107713]) -> [PASS][60]
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb7/igt@kms_atomic_transition@plane-all-transition-fencing.html
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb3/igt@kms_atomic_transition@plane-all-transition-fencing.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb6/igt@kms_atomic_transition@plane-all-transition-fencing.html

  * igt@kms_cursor_crc@pipe-c-cursor-suspend:
    - shard-apl:          [DMESG-WARN][61] ([fdo#108566]) -> [PASS][62]
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl7/igt@kms_cursor_crc@pipe-c-cursor-suspend.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-apl8/igt@kms_cursor_crc@pipe-c-cursor-suspend.html

  * igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic:
    - shard-hsw:          ([PASS][63], [FAIL][64]) ([fdo#105767]) -> [PASS][65]
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-hsw5/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-hsw8/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic.html
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-hsw5/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic.html

  * igt@kms_cursor_legacy@cursor-vs-flip-toggle:
    - shard-hsw:          ([FAIL][66], [PASS][67]) ([fdo#103355]) -> [PASS][68]
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-hsw6/igt@kms_cursor_legacy@cursor-vs-flip-toggle.html
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-hsw5/igt@kms_cursor_legacy@cursor-vs-flip-toggle.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-hsw1/igt@kms_cursor_legacy@cursor-vs-flip-toggle.html

  * igt@kms_flip@2x-flip-vs-expired-vblank-interruptible:
    - shard-glk:          ([PASS][69], [FAIL][70]) ([fdo#105363]) -> [PASS][71]
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-glk1/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-glk4/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible.html
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-glk4/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-suspend:
    - shard-apl:          ([PASS][72], [DMESG-WARN][73]) ([fdo#108566]) -> [PASS][74] +2 similar issues
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl5/igt@kms_frontbuffer_tracking@fbc-suspend.html
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl1/igt@kms_frontbuffer_tracking@fbc-suspend.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-apl7/igt@kms_frontbuffer_tracking@fbc-suspend.html

  * igt@kms_frontbuffer_tracking@fbc-tilingchange:
    - shard-apl:          ([INCOMPLETE][75], [PASS][76]) ([fdo#103927]) -> [PASS][77] +2 similar issues
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl7/igt@kms_frontbuffer_tracking@fbc-tilingchange.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl5/igt@kms_frontbuffer_tracking@fbc-tilingchange.html
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-apl8/igt@kms_frontbuffer_tracking@fbc-tilingchange.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render:
    - shard-iclb:         ([FAIL][78], [PASS][79]) ([fdo#103167]) -> [PASS][80] +10 similar issues
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb2/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render.html
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb1/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb7/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-render.html

  * igt@kms_psr@no_drrs:
    - shard-iclb:         ([PASS][81], [FAIL][82]) ([fdo#108341]) -> [PASS][83]
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb6/igt@kms_psr@no_drrs.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb1/igt@kms_psr@no_drrs.html
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb4/igt@kms_psr@no_drrs.html

  * igt@kms_psr@psr2_primary_mmap_gtt:
    - shard-iclb:         ([SKIP][84], [SKIP][85]) ([fdo#109441]) -> [PASS][86] +1 similar issue
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb7/igt@kms_psr@psr2_primary_mmap_gtt.html
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb3/igt@kms_psr@psr2_primary_mmap_gtt.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb2/igt@kms_psr@psr2_primary_mmap_gtt.html

  * igt@kms_setmode@basic:
    - shard-kbl:          [FAIL][87] ([fdo#99912]) -> [PASS][88]
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl2/igt@kms_setmode@basic.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-kbl7/igt@kms_setmode@basic.html

  
#### Warnings ####

  * igt@gem_bad_reloc@negative-reloc-bsd2:
    - shard-iclb:         ([PASS][89], [SKIP][90]) ([fdo#109276]) -> [SKIP][91] ([fdo#109276]) +15 similar issues
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb4/igt@gem_bad_reloc@negative-reloc-bsd2.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb3/igt@gem_bad_reloc@negative-reloc-bsd2.html
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb3/igt@gem_bad_reloc@negative-reloc-bsd2.html

  * igt@gem_exec_async@concurrent-writes-bsd:
    - shard-iclb:         ([PASS][92], [SKIP][93]) ([fdo#111325]) -> [SKIP][94] ([fdo#111325]) +5 similar issues
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb7/igt@gem_exec_async@concurrent-writes-bsd.html
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb1/igt@gem_exec_async@concurrent-writes-bsd.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb4/igt@gem_exec_async@concurrent-writes-bsd.html

  * igt@gem_mocs_settings@mocs-rc6-bsd2:
    - shard-iclb:         ([FAIL][95], [FAIL][96]) ([fdo#111330]) -> [SKIP][97] ([fdo#109276])
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb2/igt@gem_mocs_settings@mocs-rc6-bsd2.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb1/igt@gem_mocs_settings@mocs-rc6-bsd2.html
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb8/igt@gem_mocs_settings@mocs-rc6-bsd2.html

  * igt@gem_mocs_settings@mocs-reset-bsd2:
    - shard-iclb:         ([FAIL][98], [SKIP][99]) ([fdo#109276] / [fdo#111330]) -> [FAIL][100] ([fdo#111330])
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb2/igt@gem_mocs_settings@mocs-reset-bsd2.html
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb5/igt@gem_mocs_settings@mocs-reset-bsd2.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb1/igt@gem_mocs_settings@mocs-reset-bsd2.html

  * igt@gem_mocs_settings@mocs-settings-bsd2:
    - shard-iclb:         ([FAIL][101], [SKIP][102]) ([fdo#109276] / [fdo#111330]) -> [SKIP][103] ([fdo#109276])
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb4/igt@gem_mocs_settings@mocs-settings-bsd2.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb3/igt@gem_mocs_settings@mocs-settings-bsd2.html
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb3/igt@gem_mocs_settings@mocs-settings-bsd2.html

  * igt@i915_pm_rpm@i2c:
    - shard-hsw:          ([PASS][104], [FAIL][105]) ([fdo#104097]) -> [FAIL][106] ([fdo#104097])
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-hsw2/igt@i915_pm_rpm@i2c.html
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-hsw1/igt@i915_pm_rpm@i2c.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-hsw6/igt@i915_pm_rpm@i2c.html

  * igt@kms_flip@flip-vs-suspend-interruptible:
    - shard-apl:          ([PASS][107], [DMESG-WARN][108]) ([fdo#108566]) -> [DMESG-WARN][109] ([fdo#108566]) +1 similar issue
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl2/igt@kms_flip@flip-vs-suspend-interruptible.html
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-apl8/igt@kms_flip@flip-vs-suspend-interruptible.html
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-apl8/igt@kms_flip@flip-vs-suspend-interruptible.html

  * igt@kms_psr@psr2_cursor_render:
    - shard-iclb:         ([SKIP][110], [PASS][111]) ([fdo#109441]) -> [SKIP][112] ([fdo#109441]) +3 similar issues
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb5/igt@kms_psr@psr2_cursor_render.html
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-iclb2/igt@kms_psr@psr2_cursor_render.html
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-iclb5/igt@kms_psr@psr2_cursor_render.html

  * igt@perf_pmu@rc6:
    - shard-kbl:          ([SKIP][113], [PASS][114]) ([fdo#109271]) -> [SKIP][115] ([fdo#109271])
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl7/igt@perf_pmu@rc6.html
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6798/shard-kbl3/igt@perf_pmu@rc6.html
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/shard-kbl4/igt@perf_pmu@rc6.html

  
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103355]: https://bugs.freedesktop.org/show_bug.cgi?id=103355
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#104097]: https://bugs.freedesktop.org/show_bug.cgi?id=104097
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#105767]: https://bugs.freedesktop.org/show_bug.cgi?id=105767
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108341]: ht

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3390/
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-08-29 16:56 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-08-28 16:18 [PATCH i-g-t] i915/gem_exec_schedule: Switch reorder-wide to sw_sync Chris Wilson
2019-08-28 16:18 ` [igt-dev] " Chris Wilson
2019-08-29 10:10 ` [igt-dev] ✓ Fi.CI.BAT: success for " Patchwork
2019-08-29 16:56 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.