All of lore.kernel.org
 help / color / mirror / Atom feed
* CI urgent
@ 2017-09-11  8:55 Chris Wilson
  2017-09-11  8:55 ` [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging Chris Wilson
                   ` (6 more replies)
  0 siblings, 7 replies; 16+ messages in thread
From: Chris Wilson @ 2017-09-11  8:55 UTC (permalink / raw)
  To: intel-gfx

Just a few fixes for unhappy tests being run by CI.
-Chris

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging
  2017-09-11  8:55 CI urgent Chris Wilson
@ 2017-09-11  8:55 ` Chris Wilson
  2017-09-11 10:57   ` Petri Latvala
  2017-09-11 12:03   ` Michał Winiarski
  2017-09-11  8:55 ` [PATCH igt 2/5] igt/gem_eio: Exercise wedged with native in-flight requests Chris Wilson
                   ` (5 subsequent siblings)
  6 siblings, 2 replies; 16+ messages in thread
From: Chris Wilson @ 2017-09-11  8:55 UTC (permalink / raw)
  To: intel-gfx

As our hangcheck may exceed 10s to declare the device wedged, we need to
hold the plugging fence indefinitely. This makes using vgem as our input
fence unusable, so resort to using sw_sync. At the same time, we can
then check that the async result is also -EIO.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102616
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/gem_eio.c | 79 ++++++++++++++++++++++-----------------------------------
 1 file changed, 31 insertions(+), 48 deletions(-)

diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 15120842..249510e7 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -39,7 +39,7 @@
 
 #include <drm.h>
 
-#include "igt_vgem.h"
+#include "sw_sync.h"
 
 IGT_TEST_DESCRIPTION("Test that specific ioctls report a wedged GPU (EIO).");
 
@@ -158,66 +158,49 @@ static void test_wait(int fd)
 	trigger_reset(fd);
 }
 
-struct cork {
-	int device;
-	uint32_t handle;
-	uint32_t fence;
-};
-
-static void plug(int fd, struct cork *c)
-{
-	struct vgem_bo bo;
-	int dmabuf;
-
-	c->device = __drm_open_driver(DRIVER_VGEM);
-	igt_require(c->device != -1);
-
-	bo.width = bo.height = 1;
-	bo.bpp = 4;
-	vgem_create(c->device, &bo);
-	c->fence = vgem_fence_attach(c->device, &bo, VGEM_FENCE_WRITE);
-
-	dmabuf = prime_handle_to_fd(c->device, bo.handle);
-	c->handle = prime_fd_to_handle(fd, dmabuf);
-	close(dmabuf);
-}
-
-static void unplug(struct cork *c)
-{
-	vgem_fence_signal(c->device, c->fence);
-	close(c->device);
-}
-
-static void test_inflight(int fd)
+static void test_inflight_external(int fd)
 {
 	struct drm_i915_gem_execbuffer2 execbuf;
-	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_exec_object2 obj;
 	uint32_t bbe = MI_BATCH_BUFFER_END;
 	igt_hang_t hang;
-	struct cork cork;
+	int timeline, fence;
+
+	igt_require_sw_sync();
+	igt_require(gem_has_exec_fence(fd));
+
+	timeline = sw_sync_timeline_create();
+	fence = sw_sync_timeline_create_fence(timeline, 1);
 
 	igt_require(i915_reset_control(false));
 	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
 
-	plug(fd, &cork);
-
-	memset(obj, 0, sizeof(obj));
-	obj[0].handle = cork.handle;
-	obj[1].handle = gem_create(fd, 4096);
-	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
+	memset(&obj, 0, sizeof(obj));
+	obj.handle = gem_create(fd, 4096);
+	gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
 
 	memset(&execbuf, 0, sizeof(execbuf));
-	execbuf.buffers_ptr = to_user_pointer(obj);
-	execbuf.buffer_count = 2;
+	execbuf.buffers_ptr = to_user_pointer(&obj);
+	execbuf.buffer_count = 1;
+	execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
+	execbuf.rsvd2 = (uint32_t)fence;
 
-	gem_execbuf(fd, &execbuf);
+	gem_execbuf_wr(fd, &execbuf);
+	close(fence);
 
-	igt_post_hang_ring(fd, hang);
-	unplug(&cork); /* only now submit our batches */
-	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	fence = execbuf.rsvd2 >> 32;
+	igt_assert(fence != -1);
+
+	igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */
+	sw_sync_timeline_inc(timeline, 1); /* only now submit our batches */
+
+	igt_assert_eq(__gem_wait(fd, obj.handle, -1), 0);
+	igt_assert_eq(sync_fence_status(fence), -EIO);
+	close(fence);
 
 	igt_require(i915_reset_control(true));
 	trigger_reset(fd);
+	close(timeline);
 }
 
 igt_main
@@ -241,8 +224,8 @@ igt_main
 	igt_subtest("wait")
 		test_wait(fd);
 
-	igt_subtest("in-flight")
-		test_inflight(fd);
+	igt_subtest("in-flight-external")
+		test_inflight_external(fd);
 
 	igt_fixture
 		close(fd);
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH igt 2/5] igt/gem_eio: Exercise wedged with native in-flight requests
  2017-09-11  8:55 CI urgent Chris Wilson
  2017-09-11  8:55 ` [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging Chris Wilson
@ 2017-09-11  8:55 ` Chris Wilson
  2017-09-11  8:56 ` [PATCH igt 3/5] igt/gem_eio: Install an exithandler to unwedge the device after failure Chris Wilson
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2017-09-11  8:55 UTC (permalink / raw)
  To: intel-gfx

If we wedged one engine with unready requests to a second engine
(blocked by waiting on requests from the first, using a dma-fence),
check that we propagate the -EIO to those in-flight requests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/gem_eio.c | 89 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 88 insertions(+), 1 deletion(-)

diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 249510e7..4e3b64b3 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -27,7 +27,6 @@
  *
  */
 
-#include "igt.h"
 #include <stdlib.h>
 #include <stdio.h>
 #include <string.h>
@@ -39,6 +38,8 @@
 
 #include <drm.h>
 
+#include "igt.h"
+#include "igt_sysfs.h"
 #include "sw_sync.h"
 
 IGT_TEST_DESCRIPTION("Test that specific ioctls report a wedged GPU (EIO).");
@@ -203,14 +204,95 @@ static void test_inflight_external(int fd)
 	close(timeline);
 }
 
+static void test_inflight_internal(int fd)
+{
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 obj[2];
+	uint32_t bbe = MI_BATCH_BUFFER_END;
+	igt_hang_t hang;
+	int fence;
+
+	igt_require(gem_has_exec_fence(fd));
+
+	gem_require_ring(fd, I915_EXEC_BLT);
+
+	igt_require(i915_reset_control(false));
+	hang = igt_hang_ring(fd, I915_EXEC_RENDER);
+
+	memset(obj, 0, sizeof(obj));
+	obj[0].handle = hang.handle;
+	obj[0].flags = EXEC_OBJECT_WRITE;
+	obj[1].handle = gem_create(fd, 4096);
+	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = to_user_pointer(obj);
+	execbuf.buffer_count = 2;
+	execbuf.flags = I915_EXEC_BLT | I915_EXEC_FENCE_OUT;
+
+	gem_execbuf_wr(fd, &execbuf);
+	fence = execbuf.rsvd2 >> 32;
+	igt_assert(fence != -1);
+
+	igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */
+
+	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	igt_assert_eq(sync_fence_status(fence), -EIO);
+	close(fence);
+
+	igt_require(i915_reset_control(true));
+	trigger_reset(fd);
+}
+
+#define HAVE_EXECLISTS 0x1
+#define HAVE_GUC 0x2
+#define HAVE_SEMAPHORES 0x4
+
+static unsigned print_welcome(int fd)
+{
+	unsigned flags = 0;
+	bool active;
+	int dir;
+
+	dir = igt_sysfs_open_parameters(fd);
+	if (dir < 0)
+		return 0;
+
+	active = igt_sysfs_get_boolean(dir, "enable_guc_submission");
+	if (active) {
+		igt_info("Using GuC submission\n");
+		flags |= HAVE_GUC | HAVE_EXECLISTS;
+		goto out;
+	}
+
+	active = igt_sysfs_get_boolean(dir, "enable_execlists");
+	if (active) {
+		igt_info("Using Execlists submission\n");
+		flags |= HAVE_EXECLISTS;
+		goto out;
+	}
+
+	active = igt_sysfs_get_boolean(dir, "semaphores");
+	if (active)
+		flags |= HAVE_SEMAPHORES;
+	igt_info("Using Legacy submission%s\n",
+		 active ? ", with semaphores" : "");
+
+out:
+	close(dir);
+	return flags;
+}
+
 igt_main
 {
+	unsigned int caps = 0;
 	int fd = -1;
 
 	igt_skip_on_simulation();
 
 	igt_fixture {
 		fd = drm_open_driver(DRIVER_INTEL);
+		caps = print_welcome(fd);
 		igt_require_gem(fd);
 		igt_require_hang_ring(fd, I915_EXEC_DEFAULT);
 	}
@@ -227,6 +309,11 @@ igt_main
 	igt_subtest("in-flight-external")
 		test_inflight_external(fd);
 
+	igt_subtest("in-flight-internal") {
+		igt_require(!(caps & HAVE_SEMAPHORES));
+		test_inflight_internal(fd);
+	}
+
 	igt_fixture
 		close(fd);
 }
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH igt 3/5] igt/gem_eio: Install an exithandler to unwedge the device after failure
  2017-09-11  8:55 CI urgent Chris Wilson
  2017-09-11  8:55 ` [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging Chris Wilson
  2017-09-11  8:55 ` [PATCH igt 2/5] igt/gem_eio: Exercise wedged with native in-flight requests Chris Wilson
@ 2017-09-11  8:56 ` Chris Wilson
  2017-09-11 10:45   ` Arkadiusz Hiler
  2017-09-11  8:56 ` [PATCH igt 4/5] igt/gem_fence_thresh: Use streaming reads for verify Chris Wilson
                   ` (3 subsequent siblings)
  6 siblings, 1 reply; 16+ messages in thread
From: Chris Wilson @ 2017-09-11  8:56 UTC (permalink / raw)
  To: intel-gfx

Under normal conditions, we try to repair the damage we inflict to the
GPU, but if we fail we don't. Make sure that if the test does die, we do
try to restore normal operation by using an atexit handler.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/gem_eio.c | 18 ++++++++++++++----
 1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 4e3b64b3..1079fea7 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -283,15 +283,28 @@ out:
 	return flags;
 }
 
+static int fd = -1;
+
+static void
+exit_handler(int sig)
+{
+	i915_reset_control(true);
+	igt_force_gpu_reset(fd);
+}
+
 igt_main
 {
 	unsigned int caps = 0;
-	int fd = -1;
 
 	igt_skip_on_simulation();
 
 	igt_fixture {
 		fd = drm_open_driver(DRIVER_INTEL);
+
+		igt_require(i915_reset_control(true));
+		igt_force_gpu_reset(fd);
+		igt_install_exit_handler(exit_handler);
+
 		caps = print_welcome(fd);
 		igt_require_gem(fd);
 		igt_require_hang_ring(fd, I915_EXEC_DEFAULT);
@@ -313,7 +326,4 @@ igt_main
 		igt_require(!(caps & HAVE_SEMAPHORES));
 		test_inflight_internal(fd);
 	}
-
-	igt_fixture
-		close(fd);
 }
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH igt 4/5] igt/gem_fence_thresh: Use streaming reads for verify
  2017-09-11  8:55 CI urgent Chris Wilson
                   ` (2 preceding siblings ...)
  2017-09-11  8:56 ` [PATCH igt 3/5] igt/gem_eio: Install an exithandler to unwedge the device after failure Chris Wilson
@ 2017-09-11  8:56 ` Chris Wilson
  2017-09-11  8:56 ` [PATCH igt 5/5] igt/gem_flink_race: Limit name subtest to 5s Chris Wilson
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2017-09-11  8:56 UTC (permalink / raw)
  To: intel-gfx

At the moment, the verify tests use an extremely brutal write-read of
every dword, degrading performance to UC. If we break those up into
cachelines, we can do a wcb write/read at a time instead, roughly 8x
faster. We lose the accuracy of the forced wcb flushes around every dword,
but we are retaining the overall behaviour of checking reads following
writes instead. To compensate, we do check that a single dword write/read
before using wcb aligned accesses.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/gem_fence_thrash.c | 116 +++++++++++++++++++++++++++++++++++++++++------
 1 file changed, 101 insertions(+), 15 deletions(-)

diff --git a/tests/gem_fence_thrash.c b/tests/gem_fence_thrash.c
index 14d026a9..cf1bbb0b 100644
--- a/tests/gem_fence_thrash.c
+++ b/tests/gem_fence_thrash.c
@@ -28,7 +28,6 @@
 
 #include "config.h"
 
-#include "igt.h"
 #include <unistd.h>
 #include <stdlib.h>
 #include <stdio.h>
@@ -41,6 +40,12 @@
 #include <pthread.h>
 #include "drm.h"
 
+#include "igt.h"
+#include "igt_x86.h"
+
+#define PAGE_SIZE 4096
+#define CACHELINE 64
+
 #define OBJECT_SIZE (128*1024) /* restricted to 1MiB alignment on i915 fences */
 
 /* Before introduction of the LRU list for fences, allocation of a fence for a page
@@ -102,15 +107,78 @@ bo_copy (void *_arg)
 	return NULL;
 }
 
+#if defined(__x86_64__) && !defined(__clang__)
+#define MOVNT 512
+
+#pragma GCC push_options
+#pragma GCC target("sse4.1")
+
+#include <smmintrin.h>
+__attribute__((noinline))
+static void copy_wc_page(void *dst, void *src)
+{
+	if (igt_x86_features() & SSE4_1) {
+		__m128i *S = (__m128i *)src;
+		__m128i *D = (__m128i *)dst;
+
+		for (int i = 0; i < PAGE_SIZE/CACHELINE; i++) {
+			__m128i tmp[4];
+
+			tmp[0] = _mm_stream_load_si128(S++);
+			tmp[1] = _mm_stream_load_si128(S++);
+			tmp[2] = _mm_stream_load_si128(S++);
+			tmp[3] = _mm_stream_load_si128(S++);
+
+			_mm_store_si128(D++, tmp[0]);
+			_mm_store_si128(D++, tmp[1]);
+			_mm_store_si128(D++, tmp[2]);
+			_mm_store_si128(D++, tmp[3]);
+		}
+	} else
+		memcpy(dst, src, PAGE_SIZE);
+}
+static void copy_wc_cacheline(void *dst, void *src)
+{
+	if (igt_x86_features() & SSE4_1) {
+		__m128i *S = (__m128i *)src;
+		__m128i *D = (__m128i *)dst;
+		__m128i tmp[4];
+
+		tmp[0] = _mm_stream_load_si128(S++);
+		tmp[1] = _mm_stream_load_si128(S++);
+		tmp[2] = _mm_stream_load_si128(S++);
+		tmp[3] = _mm_stream_load_si128(S++);
+
+		_mm_store_si128(D++, tmp[0]);
+		_mm_store_si128(D++, tmp[1]);
+		_mm_store_si128(D++, tmp[2]);
+		_mm_store_si128(D++, tmp[3]);
+	} else
+		memcpy(dst, src, CACHELINE);
+}
+
+#pragma GCC pop_options
+
+#else
+static void copy_wc_page(void *dst, const void *src)
+{
+	memcpy(dst, src, PAGE_SIZE);
+}
+static void copy_wc_cacheline(void *dst, const void *src)
+{
+	memcpy(dst, src, CACHELINE);
+}
+#endif
+
 static void
 _bo_write_verify(struct test *t)
 {
 	int fd = t->fd;
 	int i, k;
 	uint32_t **s;
-	uint32_t v;
 	unsigned int dwords = OBJECT_SIZE >> 2;
 	const char *tile_str[] = { "none", "x", "y" };
+	uint32_t tmp[PAGE_SIZE/sizeof(uint32_t)];
 
 	igt_assert(t->tiling >= 0 && t->tiling <= I915_TILING_Y);
 	igt_assert_lt(0, t->num_surfaces);
@@ -122,21 +190,39 @@ _bo_write_verify(struct test *t)
 		s[k] = bo_create(fd, t->tiling);
 
 	for (k = 0; k < t->num_surfaces; k++) {
-		volatile uint32_t *a = s[k];
-
-		for (i = 0; i < dwords; i++) {
-			a[i] = i;
-			v = a[i];
-			igt_assert_f(v == i,
-				     "tiling %s: write failed at %d (%x)\n",
-				     tile_str[t->tiling], i, v);
+		uint32_t *a = s[k];
+
+		a[0] = 0xdeadbeef;
+		igt_assert_f(a[0] == 0xdeadbeef,
+			     "tiling %s: write failed at start (%x)\n",
+			     tile_str[t->tiling], a[0]);
+
+		a[dwords - 1] = 0xc0ffee;
+		igt_assert_f(a[dwords - 1] == 0xc0ffee,
+			     "tiling %s: write failed at end (%x)\n",
+			     tile_str[t->tiling], a[dwords - 1]);
+
+		for (i = 0; i < dwords; i += CACHELINE/sizeof(uint32_t)) {
+			for (int j = 0; j < CACHELINE/sizeof(uint32_t); j++)
+				a[i + j] = ~(i + j);
+
+			copy_wc_cacheline(tmp, a + i);
+			for (int j = 0; j < CACHELINE/sizeof(uint32_t); j++)
+				igt_assert_f(tmp[j] == ~(i+ j),
+					     "tiling %s: write failed at %d (%x)\n",
+					     tile_str[t->tiling], i + j, tmp[j]);
+
+			for (int j = 0; j < CACHELINE/sizeof(uint32_t); j++)
+				a[i + j] = i + j;
 		}
 
-		for (i = 0; i < dwords; i++) {
-			v = a[i];
-			igt_assert_f(v == i,
-				     "tiling %s: verify failed at %d (%x)\n",
-				     tile_str[t->tiling], i, v);
+		for (i = 0; i < dwords; i += PAGE_SIZE/sizeof(uint32_t)) {
+			copy_wc_page(tmp, a + i);
+			for (int j = 0; j < PAGE_SIZE/sizeof(uint32_t); j++) {
+				igt_assert_f(tmp[j] == i + j,
+					     "tiling %s: verify failed at %d (%x)\n",
+					     tile_str[t->tiling], i + j, tmp[j]);
+			}
 		}
 	}
 
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH igt 5/5] igt/gem_flink_race: Limit name subtest to 5s
  2017-09-11  8:55 CI urgent Chris Wilson
                   ` (3 preceding siblings ...)
  2017-09-11  8:56 ` [PATCH igt 4/5] igt/gem_fence_thresh: Use streaming reads for verify Chris Wilson
@ 2017-09-11  8:56 ` Chris Wilson
  2017-09-11 10:09   ` Arkadiusz Hiler
  2017-09-11 10:10   ` Szwichtenberg, Radoslaw
  2017-09-11  9:50 ` ✓ Fi.CI.BAT: success for series starting with [1/5] igt/gem_eio: inflight wedged requires long plugging Patchwork
  2017-09-11 11:51 ` ✓ Fi.CI.IGT: " Patchwork
  6 siblings, 2 replies; 16+ messages in thread
From: Chris Wilson @ 2017-09-11  8:56 UTC (permalink / raw)
  To: intel-gfx

At present, we try to do 1,000,000 cycles, which may be a reasonable
estimate for detecting the race, takes 6 minutes in practice on bxt on a
good day (as it spends more time doing rpm suspend/resume than actual work,
and that accounts for more of the relative difference in performance
between bxt and big core than the difference in clocks+ipc).

An ideal solution would be to have a data-race detector in the kernel
combined with a short test to exercise the different paths. Lacking the
DRD, use a shorter test anyway. 5s is chosen simply on the basis that
the other race subtest is also run over a 5s interval.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/gem_flink_race.c | 35 +++++++++++++++++++++++++----------
 1 file changed, 25 insertions(+), 10 deletions(-)

diff --git a/tests/gem_flink_race.c b/tests/gem_flink_race.c
index 9e3d3a62..322befe9 100644
--- a/tests/gem_flink_race.c
+++ b/tests/gem_flink_race.c
@@ -48,8 +48,14 @@ IGT_TEST_DESCRIPTION("Check for flink/open vs. gem close races.");
 volatile int pls_die = 0;
 int fd;
 
+struct flink_name {
+	pthread_t thread;
+	unsigned long count;
+};
+
 static void *thread_fn_flink_name(void *p)
 {
+	struct flink_name *t = p;
 	struct drm_gem_open open_struct;
 	int ret;
 
@@ -64,6 +70,7 @@ static void *thread_fn_flink_name(void *p)
 			igt_assert(name == 1);
 
 			gem_close(fd, open_struct.handle);
+			t->count++;
 		} else
 			igt_assert(errno == ENOENT);
 	}
@@ -71,42 +78,50 @@ static void *thread_fn_flink_name(void *p)
 	return (void *)0;
 }
 
-static void test_flink_name(void)
+static void test_flink_name(int timeout)
 {
-	pthread_t *threads;
+	struct flink_name *threads;
 	int r, i, num_threads;
+	unsigned long count;
+	char buf[256];
 	void *status;
+	int len;
 
 	num_threads = sysconf(_SC_NPROCESSORS_ONLN) - 1;
 	if (!num_threads)
 		num_threads = 1;
 
-	threads = calloc(num_threads, sizeof(pthread_t));
+	threads = calloc(num_threads, sizeof(*threads));
 
 	fd = drm_open_driver(DRIVER_INTEL);
 
 	for (i = 0; i < num_threads; i++) {
-		r = pthread_create(&threads[i], NULL,
-				   thread_fn_flink_name, NULL);
+		r = pthread_create(&threads[i].thread, NULL,
+				   thread_fn_flink_name, &threads[i]);
 		igt_assert_eq(r, 0);
 	}
 
-	for (i = 0; i < 1000000; i++) {
+	count = 0;
+	igt_until_timeout(timeout) {
 		uint32_t handle;
 
 		handle = gem_create(fd, 4096);
-
 		gem_flink(fd, handle);
-
 		gem_close(fd, handle);
+
+		count++;
 	}
 
 	pls_die = 1;
 
+	len = snprintf(buf, sizeof(buf), "Completed %lu cycles with [", count);
 	for (i = 0;  i < num_threads; i++) {
-		pthread_join(threads[i], &status);
+		pthread_join(threads[i].thread, &status);
 		igt_assert(status == 0);
+		len += snprintf(buf + len, sizeof(buf) - len, "%lu, ", threads[i].count);
 	}
+	snprintf(buf + len - 2, sizeof(buf) - len + 2, "] races");
+	igt_info("%s\n", buf);
 
 	close(fd);
 }
@@ -185,7 +200,7 @@ igt_main
 	igt_skip_on_simulation();
 
 	igt_subtest("flink_name")
-		test_flink_name();
+		test_flink_name(5);
 
 	igt_subtest("flink_close")
 		test_flink_close();
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [1/5] igt/gem_eio: inflight wedged requires long plugging
  2017-09-11  8:55 CI urgent Chris Wilson
                   ` (4 preceding siblings ...)
  2017-09-11  8:56 ` [PATCH igt 5/5] igt/gem_flink_race: Limit name subtest to 5s Chris Wilson
@ 2017-09-11  9:50 ` Patchwork
  2017-09-11 11:51 ` ✓ Fi.CI.IGT: " Patchwork
  6 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2017-09-11  9:50 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/5] igt/gem_eio: inflight wedged requires long plugging
URL   : https://patchwork.freedesktop.org/series/30109/
State : success

== Summary ==

IGT patchset tested on top of latest successful build
866970a57a9def534f4b4671b59670fb70f54141 meson: Bump required version to 0.40

with latest DRM-Tip kernel build CI_DRM_3068
9ef5732ddf69 drm-tip: 2017y-09m-10d-21h-59m-53s UTC integration manifest

Test kms_cursor_legacy:
        Subgroup basic-busy-flip-before-cursor-atomic:
                pass       -> FAIL       (fi-snb-2600) fdo#100215
Test kms_flip:
        Subgroup basic-flip-vs-modeset:
                pass       -> SKIP       (fi-skl-x1585l) fdo#101781

fdo#100215 https://bugs.freedesktop.org/show_bug.cgi?id=100215
fdo#101781 https://bugs.freedesktop.org/show_bug.cgi?id=101781

fi-bdw-5557u     total:289  pass:268  dwarn:0   dfail:0   fail:0   skip:21  time:448s
fi-bdw-gvtdvm    total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:458s
fi-blb-e6850     total:289  pass:224  dwarn:1   dfail:0   fail:0   skip:64  time:379s
fi-bsw-n3050     total:289  pass:243  dwarn:0   dfail:0   fail:0   skip:46  time:526s
fi-bwr-2160      total:289  pass:184  dwarn:0   dfail:0   fail:0   skip:105 time:268s
fi-bxt-j4205     total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:512s
fi-byt-j1900     total:289  pass:254  dwarn:1   dfail:0   fail:0   skip:34  time:508s
fi-byt-n2820     total:289  pass:250  dwarn:1   dfail:0   fail:0   skip:38  time:501s
fi-cfl-s         total:289  pass:250  dwarn:4   dfail:0   fail:0   skip:35  time:452s
fi-elk-e7500     total:289  pass:230  dwarn:0   dfail:0   fail:0   skip:59  time:455s
fi-glk-2a        total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:598s
fi-hsw-4770      total:289  pass:263  dwarn:0   dfail:0   fail:0   skip:26  time:431s
fi-hsw-4770r     total:289  pass:263  dwarn:0   dfail:0   fail:0   skip:26  time:408s
fi-ilk-650       total:289  pass:229  dwarn:0   dfail:0   fail:0   skip:60  time:441s
fi-ivb-3520m     total:289  pass:261  dwarn:0   dfail:0   fail:0   skip:28  time:492s
fi-ivb-3770      total:289  pass:261  dwarn:0   dfail:0   fail:0   skip:28  time:467s
fi-kbl-7500u     total:289  pass:264  dwarn:1   dfail:0   fail:0   skip:24  time:495s
fi-kbl-7560u     total:289  pass:270  dwarn:0   dfail:0   fail:0   skip:19  time:584s
fi-kbl-r         total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:588s
fi-pnv-d510      total:289  pass:223  dwarn:1   dfail:0   fail:0   skip:65  time:552s
fi-skl-6260u     total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:469s
fi-skl-6700k     total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:533s
fi-skl-6770hq    total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:506s
fi-skl-gvtdvm    total:289  pass:266  dwarn:0   dfail:0   fail:0   skip:23  time:459s
fi-skl-x1585l    total:289  pass:268  dwarn:0   dfail:0   fail:0   skip:21  time:489s
fi-snb-2520m     total:289  pass:251  dwarn:0   dfail:0   fail:0   skip:38  time:573s
fi-snb-2600      total:289  pass:249  dwarn:0   dfail:0   fail:1   skip:39  time:427s

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_169/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH igt 5/5] igt/gem_flink_race: Limit name subtest to 5s
  2017-09-11  8:56 ` [PATCH igt 5/5] igt/gem_flink_race: Limit name subtest to 5s Chris Wilson
@ 2017-09-11 10:09   ` Arkadiusz Hiler
  2017-09-11 10:10   ` Szwichtenberg, Radoslaw
  1 sibling, 0 replies; 16+ messages in thread
From: Arkadiusz Hiler @ 2017-09-11 10:09 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On Mon, Sep 11, 2017 at 09:56:02AM +0100, Chris Wilson wrote:
> At present, we try to do 1,000,000 cycles, which may be a reasonable
> estimate for detecting the race, takes 6 minutes in practice on bxt on a
> good day (as it spends more time doing rpm suspend/resume than actual work,
> and that accounts for more of the relative difference in performance
> between bxt and big core than the difference in clocks+ipc).
> 
> An ideal solution would be to have a data-race detector in the kernel
> combined with a short test to exercise the different paths. Lacking the
> DRD, use a shorter test anyway. 5s is chosen simply on the basis that
> the other race subtest is also run over a 5s interval.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH igt 5/5] igt/gem_flink_race: Limit name subtest to 5s
  2017-09-11  8:56 ` [PATCH igt 5/5] igt/gem_flink_race: Limit name subtest to 5s Chris Wilson
  2017-09-11 10:09   ` Arkadiusz Hiler
@ 2017-09-11 10:10   ` Szwichtenberg, Radoslaw
  1 sibling, 0 replies; 16+ messages in thread
From: Szwichtenberg, Radoslaw @ 2017-09-11 10:10 UTC (permalink / raw)
  To: intel-gfx, chris

On Mon, 2017-09-11 at 09:56 +0100, Chris Wilson wrote:
> At present, we try to do 1,000,000 cycles, which may be a reasonable
> estimate for detecting the race, takes 6 minutes in practice on bxt on a
> good day (as it spends more time doing rpm suspend/resume than actual work,
> and that accounts for more of the relative difference in performance
> between bxt and big core than the difference in clocks+ipc).
> 
> An ideal solution would be to have a data-race detector in the kernel
> combined with a short test to exercise the different paths. Lacking the
> DRD, use a shorter test anyway. 5s is chosen simply on the basis that
> the other race subtest is also run over a 5s interval.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Radoslaw Szwichtenberg <radoslaw.szwichtenberg@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH igt 3/5] igt/gem_eio: Install an exithandler to unwedge the device after failure
  2017-09-11  8:56 ` [PATCH igt 3/5] igt/gem_eio: Install an exithandler to unwedge the device after failure Chris Wilson
@ 2017-09-11 10:45   ` Arkadiusz Hiler
  0 siblings, 0 replies; 16+ messages in thread
From: Arkadiusz Hiler @ 2017-09-11 10:45 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On Mon, Sep 11, 2017 at 09:56:00AM +0100, Chris Wilson wrote:
> Under normal conditions, we try to repair the damage we inflict to the
> GPU, but if we fail we don't. Make sure that if the test does die, we do
> try to restore normal operation by using an atexit handler.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Arkadiusz Hiler <arkadiusz.hiler@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging
  2017-09-11  8:55 ` [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging Chris Wilson
@ 2017-09-11 10:57   ` Petri Latvala
  2017-09-11 11:06     ` Martin Peres
  2017-09-11 12:03   ` Michał Winiarski
  1 sibling, 1 reply; 16+ messages in thread
From: Petri Latvala @ 2017-09-11 10:57 UTC (permalink / raw)
  To: Martin Peres; +Cc: intel-gfx

On Mon, Sep 11, 2017 at 09:55:58AM +0100, Chris Wilson wrote:
> As our hangcheck may exceed 10s to declare the device wedged, we need to
> hold the plugging fence indefinitely. This makes using vgem as our input
> fence unusable, so resort to using sw_sync. At the same time, we can
> then check that the async result is also -EIO.
> 
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102616
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  tests/gem_eio.c | 79 ++++++++++++++++++++++-----------------------------------
>  1 file changed, 31 insertions(+), 48 deletions(-)
> 
> diff --git a/tests/gem_eio.c b/tests/gem_eio.c
> index 15120842..249510e7 100644
> --- a/tests/gem_eio.c
> +++ b/tests/gem_eio.c
> @@ -39,7 +39,7 @@
>  
>  #include <drm.h>
>  
> -#include "igt_vgem.h"
> +#include "sw_sync.h"
>  
>  IGT_TEST_DESCRIPTION("Test that specific ioctls report a wedged GPU (EIO).");
>  
> @@ -158,66 +158,49 @@ static void test_wait(int fd)
>  	trigger_reset(fd);
>  }
>  
> -struct cork {
> -	int device;
> -	uint32_t handle;
> -	uint32_t fence;
> -};
> -
> -static void plug(int fd, struct cork *c)
> -{
> -	struct vgem_bo bo;
> -	int dmabuf;
> -
> -	c->device = __drm_open_driver(DRIVER_VGEM);
> -	igt_require(c->device != -1);
> -
> -	bo.width = bo.height = 1;
> -	bo.bpp = 4;
> -	vgem_create(c->device, &bo);
> -	c->fence = vgem_fence_attach(c->device, &bo, VGEM_FENCE_WRITE);
> -
> -	dmabuf = prime_handle_to_fd(c->device, bo.handle);
> -	c->handle = prime_fd_to_handle(fd, dmabuf);
> -	close(dmabuf);
> -}
> -
> -static void unplug(struct cork *c)
> -{
> -	vgem_fence_signal(c->device, c->fence);
> -	close(c->device);
> -}
> -
> -static void test_inflight(int fd)
> +static void test_inflight_external(int fd)
>  {
>  	struct drm_i915_gem_execbuffer2 execbuf;
> -	struct drm_i915_gem_exec_object2 obj[2];
> +	struct drm_i915_gem_exec_object2 obj;
>  	uint32_t bbe = MI_BATCH_BUFFER_END;
>  	igt_hang_t hang;
> -	struct cork cork;
> +	int timeline, fence;
> +
> +	igt_require_sw_sync();
> +	igt_require(gem_has_exec_fence(fd));
> +
> +	timeline = sw_sync_timeline_create();
> +	fence = sw_sync_timeline_create_fence(timeline, 1);
>  
>  	igt_require(i915_reset_control(false));
>  	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
>  
> -	plug(fd, &cork);
> -
> -	memset(obj, 0, sizeof(obj));
> -	obj[0].handle = cork.handle;
> -	obj[1].handle = gem_create(fd, 4096);
> -	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
> +	memset(&obj, 0, sizeof(obj));
> +	obj.handle = gem_create(fd, 4096);
> +	gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
>  
>  	memset(&execbuf, 0, sizeof(execbuf));
> -	execbuf.buffers_ptr = to_user_pointer(obj);
> -	execbuf.buffer_count = 2;
> +	execbuf.buffers_ptr = to_user_pointer(&obj);
> +	execbuf.buffer_count = 1;
> +	execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
> +	execbuf.rsvd2 = (uint32_t)fence;
>  
> -	gem_execbuf(fd, &execbuf);
> +	gem_execbuf_wr(fd, &execbuf);
> +	close(fence);
>  
> -	igt_post_hang_ring(fd, hang);
> -	unplug(&cork); /* only now submit our batches */
> -	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +	fence = execbuf.rsvd2 >> 32;
> +	igt_assert(fence != -1);
> +
> +	igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */
> +	sw_sync_timeline_inc(timeline, 1); /* only now submit our batches */
> +
> +	igt_assert_eq(__gem_wait(fd, obj.handle, -1), 0);
> +	igt_assert_eq(sync_fence_status(fence), -EIO);
> +	close(fence);
>  
>  	igt_require(i915_reset_control(true));
>  	trigger_reset(fd);
> +	close(timeline);
>  }
>  
>  igt_main
> @@ -241,8 +224,8 @@ igt_main
>  	igt_subtest("wait")
>  		test_wait(fd);
>  
> -	igt_subtest("in-flight")
> -		test_inflight(fd);
> +	igt_subtest("in-flight-external")
> +		test_inflight_external(fd);
>


gem_eio@in-flight is currently filtered in cibuglog. Martin, are you
prepared for this subtest rename?


-- 
Petri Latvala
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging
  2017-09-11 10:57   ` Petri Latvala
@ 2017-09-11 11:06     ` Martin Peres
  2017-09-11 11:11       ` Chris Wilson
  0 siblings, 1 reply; 16+ messages in thread
From: Martin Peres @ 2017-09-11 11:06 UTC (permalink / raw)
  To: Petri Latvala; +Cc: intel-gfx

On 11/09/17 13:57, Petri Latvala wrote:
> gem_eio@in-flight is currently filtered in cibuglog. Martin, are you
> prepared for this subtest rename?
> 

Yes, thanks for the heads up!
---------------------------------------------------------------------
Intel Finland Oy
Registered Address: PL 281, 00181 Helsinki 
Business Identity Code: 0357606 - 4 
Domiciled in Helsinki 

This e-mail and any attachments may contain confidential material for
the sole use of the intended recipient(s). Any review or distribution
by others is strictly prohibited. If you are not the intended
recipient, please contact the sender and delete all copies.
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging
  2017-09-11 11:06     ` Martin Peres
@ 2017-09-11 11:11       ` Chris Wilson
  0 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2017-09-11 11:11 UTC (permalink / raw)
  To: Martin Peres, Petri Latvala; +Cc: intel-gfx

Quoting Martin Peres (2017-09-11 12:06:55)
> On 11/09/17 13:57, Petri Latvala wrote:
> > gem_eio@in-flight is currently filtered in cibuglog. Martin, are you
> > prepared for this subtest rename?
> > 
> 
> Yes, thanks for the heads up!

We wanted to run the others bug not inflight, once the new test stops
killing the kernel, we should be ok to run all of gem_eio (or right now
if killing the kernel with an oops and timeout is ok).
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* ✓ Fi.CI.IGT: success for series starting with [1/5] igt/gem_eio: inflight wedged requires long plugging
  2017-09-11  8:55 CI urgent Chris Wilson
                   ` (5 preceding siblings ...)
  2017-09-11  9:50 ` ✓ Fi.CI.BAT: success for series starting with [1/5] igt/gem_eio: inflight wedged requires long plugging Patchwork
@ 2017-09-11 11:51 ` Patchwork
  6 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2017-09-11 11:51 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/5] igt/gem_eio: inflight wedged requires long plugging
URL   : https://patchwork.freedesktop.org/series/30109/
State : success

== Summary ==

Test perf:
        Subgroup blocking:
                pass       -> FAIL       (shard-hsw) fdo#102252 +1

fdo#102252 https://bugs.freedesktop.org/show_bug.cgi?id=102252

shard-hsw        total:2303 pass:1237 dwarn:0   dfail:0   fail:13  skip:1053 time:9266s

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_169/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging
  2017-09-11  8:55 ` [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging Chris Wilson
  2017-09-11 10:57   ` Petri Latvala
@ 2017-09-11 12:03   ` Michał Winiarski
  2017-09-11 12:14     ` Chris Wilson
  1 sibling, 1 reply; 16+ messages in thread
From: Michał Winiarski @ 2017-09-11 12:03 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On Mon, Sep 11, 2017 at 09:55:58AM +0100, Chris Wilson wrote:
> As our hangcheck may exceed 10s to declare the device wedged, we need to
> hold the plugging fence indefinitely. This makes using vgem as our input
> fence unusable, so resort to using sw_sync. At the same time, we can
> then check that the async result is also -EIO.
> 
> Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=102616
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  tests/gem_eio.c | 79 ++++++++++++++++++++++-----------------------------------
>  1 file changed, 31 insertions(+), 48 deletions(-)
> 
> diff --git a/tests/gem_eio.c b/tests/gem_eio.c
> index 15120842..249510e7 100644
> --- a/tests/gem_eio.c
> +++ b/tests/gem_eio.c
> @@ -39,7 +39,7 @@
>  
>  #include <drm.h>
>  
> -#include "igt_vgem.h"
> +#include "sw_sync.h"
>  
>  IGT_TEST_DESCRIPTION("Test that specific ioctls report a wedged GPU (EIO).");
>  
> @@ -158,66 +158,49 @@ static void test_wait(int fd)
>  	trigger_reset(fd);
>  }
>  
> -struct cork {
> -	int device;
> -	uint32_t handle;
> -	uint32_t fence;
> -};
> -
> -static void plug(int fd, struct cork *c)
> -{
> -	struct vgem_bo bo;
> -	int dmabuf;
> -
> -	c->device = __drm_open_driver(DRIVER_VGEM);
> -	igt_require(c->device != -1);
> -
> -	bo.width = bo.height = 1;
> -	bo.bpp = 4;
> -	vgem_create(c->device, &bo);
> -	c->fence = vgem_fence_attach(c->device, &bo, VGEM_FENCE_WRITE);
> -
> -	dmabuf = prime_handle_to_fd(c->device, bo.handle);
> -	c->handle = prime_fd_to_handle(fd, dmabuf);
> -	close(dmabuf);
> -}
> -
> -static void unplug(struct cork *c)
> -{
> -	vgem_fence_signal(c->device, c->fence);
> -	close(c->device);
> -}
> -
> -static void test_inflight(int fd)
> +static void test_inflight_external(int fd)
>  {
>  	struct drm_i915_gem_execbuffer2 execbuf;
> -	struct drm_i915_gem_exec_object2 obj[2];
> +	struct drm_i915_gem_exec_object2 obj;
>  	uint32_t bbe = MI_BATCH_BUFFER_END;
>  	igt_hang_t hang;
> -	struct cork cork;
> +	int timeline, fence;
> +
> +	igt_require_sw_sync();
> +	igt_require(gem_has_exec_fence(fd));
> +
> +	timeline = sw_sync_timeline_create();
> +	fence = sw_sync_timeline_create_fence(timeline, 1);
>  
>  	igt_require(i915_reset_control(false));
>  	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
>  
> -	plug(fd, &cork);
> -
> -	memset(obj, 0, sizeof(obj));
> -	obj[0].handle = cork.handle;
> -	obj[1].handle = gem_create(fd, 4096);
> -	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
> +	memset(&obj, 0, sizeof(obj));
> +	obj.handle = gem_create(fd, 4096);
> +	gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
>  
>  	memset(&execbuf, 0, sizeof(execbuf));
> -	execbuf.buffers_ptr = to_user_pointer(obj);
> -	execbuf.buffer_count = 2;
> +	execbuf.buffers_ptr = to_user_pointer(&obj);
> +	execbuf.buffer_count = 1;
> +	execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
> +	execbuf.rsvd2 = (uint32_t)fence;
>  
> -	gem_execbuf(fd, &execbuf);
> +	gem_execbuf_wr(fd, &execbuf);
> +	close(fence);
>  
> -	igt_post_hang_ring(fd, hang);
> -	unplug(&cork); /* only now submit our batches */
> -	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +	fence = execbuf.rsvd2 >> 32;
> +	igt_assert(fence != -1);
> +
> +	igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */
> +	sw_sync_timeline_inc(timeline, 1); /* only now submit our batches */
> +
> +	igt_assert_eq(__gem_wait(fd, obj.handle, -1), 0);

Is there any reason for rolling our own __gem_wait here? :>
(not a part of this patch... just curious)

> +	igt_assert_eq(sync_fence_status(fence), -EIO);
> +	close(fence);

I'd leave the cork & plug interface intact to make sure the intention is clear
(replacing just the inner workings), but that's just me.
You could add a comment stating that we're using the fence to plug the GPU, but
I guess the intent is kind of obvious here... So:

Reviewed-by: Michał Winiarski <michal.winiarski@intel.com>

-Michał

>  
>  	igt_require(i915_reset_control(true));
>  	trigger_reset(fd);
> +	close(timeline);
>  }
>  
>  igt_main
> @@ -241,8 +224,8 @@ igt_main
>  	igt_subtest("wait")
>  		test_wait(fd);
>  
> -	igt_subtest("in-flight")
> -		test_inflight(fd);
> +	igt_subtest("in-flight-external")
> +		test_inflight_external(fd);
>  
>  	igt_fixture
>  		close(fd);
> -- 
> 2.14.1
> 
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging
  2017-09-11 12:03   ` Michał Winiarski
@ 2017-09-11 12:14     ` Chris Wilson
  0 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2017-09-11 12:14 UTC (permalink / raw)
  To: Michał Winiarski; +Cc: intel-gfx

Quoting Michał Winiarski (2017-09-11 13:03:28)
> On Mon, Sep 11, 2017 at 09:55:58AM +0100, Chris Wilson wrote:
> > -     igt_post_hang_ring(fd, hang);
> > -     unplug(&cork); /* only now submit our batches */
> > -     igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> > +     fence = execbuf.rsvd2 >> 32;
> > +     igt_assert(fence != -1);
> > +
> > +     igt_post_hang_ring(fd, hang); /* wedged, with an unready batch */
> > +     sw_sync_timeline_inc(timeline, 1); /* only now submit our batches */
> > +
> > +     igt_assert_eq(__gem_wait(fd, obj.handle, -1), 0);
> 
> Is there any reason for rolling our own __gem_wait here? :>
> (not a part of this patch... just curious)

A modern invention.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2017-09-11 12:14 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-11  8:55 CI urgent Chris Wilson
2017-09-11  8:55 ` [PATCH igt 1/5] igt/gem_eio: inflight wedged requires long plugging Chris Wilson
2017-09-11 10:57   ` Petri Latvala
2017-09-11 11:06     ` Martin Peres
2017-09-11 11:11       ` Chris Wilson
2017-09-11 12:03   ` Michał Winiarski
2017-09-11 12:14     ` Chris Wilson
2017-09-11  8:55 ` [PATCH igt 2/5] igt/gem_eio: Exercise wedged with native in-flight requests Chris Wilson
2017-09-11  8:56 ` [PATCH igt 3/5] igt/gem_eio: Install an exithandler to unwedge the device after failure Chris Wilson
2017-09-11 10:45   ` Arkadiusz Hiler
2017-09-11  8:56 ` [PATCH igt 4/5] igt/gem_fence_thresh: Use streaming reads for verify Chris Wilson
2017-09-11  8:56 ` [PATCH igt 5/5] igt/gem_flink_race: Limit name subtest to 5s Chris Wilson
2017-09-11 10:09   ` Arkadiusz Hiler
2017-09-11 10:10   ` Szwichtenberg, Radoslaw
2017-09-11  9:50 ` ✓ Fi.CI.BAT: success for series starting with [1/5] igt/gem_eio: inflight wedged requires long plugging Patchwork
2017-09-11 11:51 ` ✓ Fi.CI.IGT: " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.