All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH i-g-t 1/3] lib/dummyload: Add pollable spin batch
@ 2018-03-22 17:24 ` Tvrtko Ursulin
  0 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-22 17:24 UTC (permalink / raw)
  To: igt-dev; +Cc: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Pollable spin batch exports a spin->running pointer which can be checked
by dereferencing it to see if the spinner is actually executing on the
GPU.

This is useful for tests which want to make sure they do not proceed with
their next step whilst the spinner is potentially only being processed by
the driver and not actually executing.

Pollable spinner can be created with igt_spin_batch_new_poll or
__igt_spin_batch_new_poll, after which igt_spin_busywait_until_running can
be used to busy wait until it is executing.

v2:
 * Move READ_ONCE to igt_core.
 * Add igt_spin_busywait_until_running. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 benchmarks/gem_wsim.c |   2 -
 lib/igt_core.h        |   2 +
 lib/igt_dummyload.c   | 192 ++++++++++++++++++++++++++++++++++++++++----------
 lib/igt_dummyload.h   |  17 +++++
 lib/igt_gt.c          |   2 +-
 lib/ioctl_wrappers.c  |   2 +-
 lib/ioctl_wrappers.h  |   1 +
 7 files changed, 175 insertions(+), 43 deletions(-)

diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index c15dc365ea95..57dec7b5de34 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -980,8 +980,6 @@ current_seqno(struct workload *wrk, enum intel_engine_id engine)
 		return wrk->seqno[engine];
 }
 
-#define READ_ONCE(x) (*(volatile typeof(x) *)(&(x)))
-
 static uint32_t
 read_status_page(struct workload *wrk, unsigned int idx)
 {
diff --git a/lib/igt_core.h b/lib/igt_core.h
index 66523a208c31..57e97f2bb5f4 100644
--- a/lib/igt_core.h
+++ b/lib/igt_core.h
@@ -950,4 +950,6 @@ void igt_kmsg(const char *format, ...);
 #define KMSG_INFO	"<6>[IGT] "
 #define KMSG_DEBUG	"<7>[IGT] "
 
+#define READ_ONCE(x) (*(volatile typeof(x) *)(&(x)))
+
 #endif /* IGT_CORE_H */
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index dbc92e8f2951..98ab7ac2c6e9 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -74,35 +74,48 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc,
 	reloc->write_domain = write_domains;
 }
 
-static int emit_recursive_batch(igt_spin_t *spin,
-				int fd, uint32_t ctx, unsigned engine,
-				uint32_t dep, bool out_fence)
+#define OUT_FENCE	(1 << 0)
+#define POLL_RUN	(1 << 1)
+
+static int
+emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
+		     uint32_t dep, unsigned int flags)
 {
 #define SCRATCH 0
 #define BATCH 1
 	const int gen = intel_gen(intel_get_drm_devid(fd));
-	struct drm_i915_gem_exec_object2 obj[2];
-	struct drm_i915_gem_relocation_entry relocs[2];
-	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_relocation_entry relocs[2], *r;
+	struct drm_i915_gem_execbuffer2 *execbuf;
+	struct drm_i915_gem_exec_object2 *obj;
 	unsigned int engines[16];
 	unsigned int nengine;
 	int fence_fd = -1;
-	uint32_t *batch;
+	uint32_t *batch, *batch_start;
 	int i;
 
 	nengine = 0;
 	if (engine == ALL_ENGINES) {
-		for_each_engine(fd, engine)
-			if (engine)
+		for_each_engine(fd, engine) {
+			if (engine) {
+			if (flags & POLL_RUN)
+				igt_require(!(flags & POLL_RUN) ||
+					    gem_can_store_dword(fd, engine));
+
 				engines[nengine++] = engine;
+			}
+		}
 	} else {
 		gem_require_ring(fd, engine);
+		igt_require(!(flags & POLL_RUN) ||
+			    gem_can_store_dword(fd, engine));
 		engines[nengine++] = engine;
 	}
 	igt_require(nengine);
 
-	memset(&execbuf, 0, sizeof(execbuf));
-	memset(obj, 0, sizeof(obj));
+	memset(&spin->execbuf, 0, sizeof(spin->execbuf));
+	execbuf = &spin->execbuf;
+	memset(spin->obj, 0, sizeof(spin->obj));
+	obj = spin->obj;
 	memset(relocs, 0, sizeof(relocs));
 
 	obj[BATCH].handle = gem_create(fd, BATCH_SIZE);
@@ -113,19 +126,66 @@ static int emit_recursive_batch(igt_spin_t *spin,
 				       	BATCH_SIZE, PROT_WRITE);
 	gem_set_domain(fd, obj[BATCH].handle,
 			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
-	execbuf.buffer_count++;
+	execbuf->buffer_count++;
+	batch_start = batch;
 
 	if (dep) {
+		igt_assert(!(flags & POLL_RUN));
+
 		/* dummy write to dependency */
 		obj[SCRATCH].handle = dep;
 		fill_reloc(&relocs[obj[BATCH].relocation_count++],
 			   dep, 1020,
 			   I915_GEM_DOMAIN_RENDER,
 			   I915_GEM_DOMAIN_RENDER);
-		execbuf.buffer_count++;
+		execbuf->buffer_count++;
+	} else if (flags & POLL_RUN) {
+		unsigned int offset;
+
+		igt_assert(!dep);
+
+		if (gen == 4 || gen == 5)
+			execbuf->flags |= I915_EXEC_SECURE;
+
+		spin->poll_handle = gem_create(fd, 4096);
+
+		if (__gem_set_caching(fd, spin->poll_handle,
+				      I915_CACHING_CACHED) == 0)
+			spin->running = __gem_mmap__cpu(fd, spin->poll_handle,
+							0, 4096,
+							PROT_READ | PROT_WRITE);
+		else
+			spin->running = __gem_mmap__wc(fd, spin->poll_handle,
+						       0, 4096,
+						       PROT_READ | PROT_WRITE);
+		igt_assert(spin->running);
+		igt_assert_eq(*spin->running, 0);
+
+		*batch++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+
+		if (gen >= 8) {
+			offset = 1;
+			*batch++ = 0;
+			*batch++ = 0;
+		} else if (gen >= 4) {
+			offset = 2;
+			*batch++ = 0;
+			*batch++ = 0;
+		} else {
+			offset = 1;
+			batch[-1]--;
+			*batch++ = 0;
+		}
+
+		*batch++ = 1;
+
+		obj[SCRATCH].handle = spin->poll_handle;
+		fill_reloc(&relocs[obj[BATCH].relocation_count++],
+			   spin->poll_handle, offset, 0, 0);
+		execbuf->buffer_count++;
 	}
 
-	spin->batch = batch;
+	spin->batch = batch = batch_start + 64 / sizeof(*batch);
 	spin->handle = obj[BATCH].handle;
 
 	/* Allow ourselves to be preempted */
@@ -145,40 +205,42 @@ static int emit_recursive_batch(igt_spin_t *spin,
 	batch += 1000;
 
 	/* recurse */
-	fill_reloc(&relocs[obj[BATCH].relocation_count],
-		   obj[BATCH].handle, (batch - spin->batch) + 1,
-		   I915_GEM_DOMAIN_COMMAND, 0);
+	r = &relocs[obj[BATCH].relocation_count++];
+	r->target_handle = obj[BATCH].handle;
+	r->offset = (batch + 1 - batch_start) * sizeof(*batch);
+	r->read_domains = I915_GEM_DOMAIN_COMMAND;
+	r->delta = 64;
 	if (gen >= 8) {
 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-		*batch++ = 0;
+		*batch++ = r->delta;
 		*batch++ = 0;
 	} else if (gen >= 6) {
 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
-		*batch++ = 0;
+		*batch++ = r->delta;
 	} else {
 		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
-		*batch = 0;
-		if (gen < 4) {
-			*batch |= 1;
-			relocs[obj[BATCH].relocation_count].delta = 1;
-		}
+		if (gen < 4)
+			r->delta |= 1;
+		*batch = r->delta;
 		batch++;
 	}
-	obj[BATCH].relocation_count++;
 	obj[BATCH].relocs_ptr = to_user_pointer(relocs);
 
-	execbuf.buffers_ptr = to_user_pointer(obj + (2 - execbuf.buffer_count));
-	execbuf.rsvd1 = ctx;
+	execbuf->buffers_ptr = to_user_pointer(obj +
+					       (2 - execbuf->buffer_count));
+	execbuf->rsvd1 = ctx;
 
-	if (out_fence)
-		execbuf.flags |= I915_EXEC_FENCE_OUT;
+	if (flags & OUT_FENCE)
+		execbuf->flags |= I915_EXEC_FENCE_OUT;
 
 	for (i = 0; i < nengine; i++) {
-		execbuf.flags &= ~ENGINE_MASK;
-		execbuf.flags |= engines[i];
-		gem_execbuf_wr(fd, &execbuf);
-		if (out_fence) {
-			int _fd = execbuf.rsvd2 >> 32;
+		execbuf->flags &= ~ENGINE_MASK;
+		execbuf->flags |= engines[i];
+
+		gem_execbuf_wr(fd, execbuf);
+
+		if (flags & OUT_FENCE) {
+			int _fd = execbuf->rsvd2 >> 32;
 
 			igt_assert(_fd >= 0);
 			if (fence_fd == -1) {
@@ -194,12 +256,20 @@ static int emit_recursive_batch(igt_spin_t *spin,
 		}
 	}
 
+	/* Make it easier for callers to resubmit. */
+
+	obj[BATCH].relocation_count = 0;
+	obj[BATCH].relocs_ptr = 0;
+
+	obj[SCRATCH].flags = EXEC_OBJECT_PINNED;
+	obj[BATCH].flags = EXEC_OBJECT_PINNED;
+
 	return fence_fd;
 }
 
 static igt_spin_t *
 ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
-		      int out_fence)
+		      unsigned int flags)
 {
 	igt_spin_t *spin;
 
@@ -207,7 +277,7 @@ ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
 	igt_assert(spin);
 
 	spin->out_fence = emit_recursive_batch(spin, fd, ctx, engine, dep,
-					       out_fence);
+					       flags);
 
 	pthread_mutex_lock(&list_lock);
 	igt_list_add(&spin->link, &spin_list);
@@ -219,7 +289,7 @@ ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
 igt_spin_t *
 __igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
 {
-	return ___igt_spin_batch_new(fd, ctx, engine, dep, false);
+	return ___igt_spin_batch_new(fd, ctx, engine, dep, 0);
 }
 
 /**
@@ -253,7 +323,7 @@ igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
 igt_spin_t *
 __igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
 {
-	return ___igt_spin_batch_new(fd, ctx, engine, 0, true);
+	return ___igt_spin_batch_new(fd, ctx, engine, 0, OUT_FENCE);
 }
 
 /**
@@ -286,6 +356,42 @@ igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
 	return spin;
 }
 
+igt_spin_t *
+__igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
+{
+	return ___igt_spin_batch_new(fd, ctx, engine, 0, POLL_RUN);
+}
+
+/**
+ * igt_spin_batch_new_poll:
+ * @fd: open i915 drm file descriptor
+ * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
+ *          than 0, execute on all available rings.
+ *
+ * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
+ * contains the batch's handle that can be waited upon. The returned structure
+ * must be passed to igt_spin_batch_free() for post-processing.
+ *
+ * igt_spin_t->running will containt a pointer which target will change from
+ * zero to one once the spinner actually starts executing on the GPU.
+ *
+ * Returns:
+ * Structure with helper internal state for igt_spin_batch_free().
+ */
+igt_spin_t *
+igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
+{
+	igt_spin_t *spin;
+
+	igt_require_gem(fd);
+	igt_require(gem_mmap__has_wc(fd));
+
+	spin = __igt_spin_batch_new_poll(fd, ctx, engine);
+	igt_assert(gem_bo_busy(fd, spin->handle));
+
+	return spin;
+}
+
 static void notify(union sigval arg)
 {
 	igt_spin_t *spin = arg.sival_ptr;
@@ -340,6 +446,8 @@ void igt_spin_batch_end(igt_spin_t *spin)
 	if (!spin)
 		return;
 
+	igt_assert(*spin->batch == MI_ARB_CHK ||
+		   *spin->batch == MI_BATCH_BUFFER_END);
 	*spin->batch = MI_BATCH_BUFFER_END;
 	__sync_synchronize();
 }
@@ -365,7 +473,13 @@ void igt_spin_batch_free(int fd, igt_spin_t *spin)
 		timer_delete(spin->timer);
 
 	igt_spin_batch_end(spin);
-	gem_munmap(spin->batch, BATCH_SIZE);
+	gem_munmap((void *)((unsigned long)spin->batch & (~4095UL)),
+		   BATCH_SIZE);
+
+	if (spin->running) {
+		gem_munmap(spin->running, 4096);
+		gem_close(fd, spin->poll_handle);
+	}
 
 	gem_close(fd, spin->handle);
 
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index 4103e4ab9e36..a8ec213fe8f3 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -36,6 +36,10 @@ typedef struct igt_spin {
 	struct igt_list link;
 	uint32_t *batch;
 	int out_fence;
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_execbuffer2 execbuf;
+	uint32_t poll_handle;
+	bool *running;
 } igt_spin_t;
 
 igt_spin_t *__igt_spin_batch_new(int fd,
@@ -55,10 +59,23 @@ igt_spin_t *igt_spin_batch_new_fence(int fd,
 				     uint32_t ctx,
 				     unsigned engine);
 
+igt_spin_t *__igt_spin_batch_new_poll(int fd,
+				       uint32_t ctx,
+				       unsigned engine);
+igt_spin_t *igt_spin_batch_new_poll(int fd,
+				    uint32_t ctx,
+				    unsigned engine);
+
 void igt_spin_batch_set_timeout(igt_spin_t *spin, int64_t ns);
 void igt_spin_batch_end(igt_spin_t *spin);
 void igt_spin_batch_free(int fd, igt_spin_t *spin);
 
+static inline void igt_spin_busywait_until_running(igt_spin_t *spin)
+{
+	while (!READ_ONCE(*spin->running))
+		;
+}
+
 void igt_terminate_spin_batches(void);
 
 enum igt_cork_type {
diff --git a/lib/igt_gt.c b/lib/igt_gt.c
index 01aebc670862..4569fd36bd85 100644
--- a/lib/igt_gt.c
+++ b/lib/igt_gt.c
@@ -609,7 +609,7 @@ bool gem_can_store_dword(int fd, unsigned int engine)
 	if (gen == 3 && (info->is_grantsdale || info->is_alviso))
 		return false; /* only supports physical addresses */
 
-	if (gen == 6 && (engine & ~(3<<13)) == I915_EXEC_BSD)
+	if (gen == 6 && ((engine & 0x3f) == I915_EXEC_BSD))
 		return false; /* kills the machine! */
 
 	if (info->is_broadwater)
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 8748cfcfc04f..4e1a08bf06b4 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -198,7 +198,7 @@ void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
 	igt_assert(__gem_set_tiling(fd, handle, tiling, stride) == 0);
 }
 
-static int __gem_set_caching(int fd, uint32_t handle, uint32_t caching)
+int __gem_set_caching(int fd, uint32_t handle, uint32_t caching)
 {
 	struct drm_i915_gem_caching arg;
 	int err;
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index 13fbe3c103c0..b966f72c90a8 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -61,6 +61,7 @@ bool gem_get_tiling(int fd, uint32_t handle, uint32_t *tiling, uint32_t *swizzle
 void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride);
 int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride);
 
+int __gem_set_caching(int fd, uint32_t handle, uint32_t caching);
 void gem_set_caching(int fd, uint32_t handle, uint32_t caching);
 uint32_t gem_get_caching(int fd, uint32_t handle);
 uint32_t gem_flink(int fd, uint32_t handle);
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [igt-dev] [PATCH i-g-t 1/3] lib/dummyload: Add pollable spin batch
@ 2018-03-22 17:24 ` Tvrtko Ursulin
  0 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-22 17:24 UTC (permalink / raw)
  To: igt-dev; +Cc: Intel-gfx, Tvrtko Ursulin

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Pollable spin batch exports a spin->running pointer which can be checked
by dereferencing it to see if the spinner is actually executing on the
GPU.

This is useful for tests which want to make sure they do not proceed with
their next step whilst the spinner is potentially only being processed by
the driver and not actually executing.

Pollable spinner can be created with igt_spin_batch_new_poll or
__igt_spin_batch_new_poll, after which igt_spin_busywait_until_running can
be used to busy wait until it is executing.

v2:
 * Move READ_ONCE to igt_core.
 * Add igt_spin_busywait_until_running. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 benchmarks/gem_wsim.c |   2 -
 lib/igt_core.h        |   2 +
 lib/igt_dummyload.c   | 192 ++++++++++++++++++++++++++++++++++++++++----------
 lib/igt_dummyload.h   |  17 +++++
 lib/igt_gt.c          |   2 +-
 lib/ioctl_wrappers.c  |   2 +-
 lib/ioctl_wrappers.h  |   1 +
 7 files changed, 175 insertions(+), 43 deletions(-)

diff --git a/benchmarks/gem_wsim.c b/benchmarks/gem_wsim.c
index c15dc365ea95..57dec7b5de34 100644
--- a/benchmarks/gem_wsim.c
+++ b/benchmarks/gem_wsim.c
@@ -980,8 +980,6 @@ current_seqno(struct workload *wrk, enum intel_engine_id engine)
 		return wrk->seqno[engine];
 }
 
-#define READ_ONCE(x) (*(volatile typeof(x) *)(&(x)))
-
 static uint32_t
 read_status_page(struct workload *wrk, unsigned int idx)
 {
diff --git a/lib/igt_core.h b/lib/igt_core.h
index 66523a208c31..57e97f2bb5f4 100644
--- a/lib/igt_core.h
+++ b/lib/igt_core.h
@@ -950,4 +950,6 @@ void igt_kmsg(const char *format, ...);
 #define KMSG_INFO	"<6>[IGT] "
 #define KMSG_DEBUG	"<7>[IGT] "
 
+#define READ_ONCE(x) (*(volatile typeof(x) *)(&(x)))
+
 #endif /* IGT_CORE_H */
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index dbc92e8f2951..98ab7ac2c6e9 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -74,35 +74,48 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc,
 	reloc->write_domain = write_domains;
 }
 
-static int emit_recursive_batch(igt_spin_t *spin,
-				int fd, uint32_t ctx, unsigned engine,
-				uint32_t dep, bool out_fence)
+#define OUT_FENCE	(1 << 0)
+#define POLL_RUN	(1 << 1)
+
+static int
+emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
+		     uint32_t dep, unsigned int flags)
 {
 #define SCRATCH 0
 #define BATCH 1
 	const int gen = intel_gen(intel_get_drm_devid(fd));
-	struct drm_i915_gem_exec_object2 obj[2];
-	struct drm_i915_gem_relocation_entry relocs[2];
-	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_relocation_entry relocs[2], *r;
+	struct drm_i915_gem_execbuffer2 *execbuf;
+	struct drm_i915_gem_exec_object2 *obj;
 	unsigned int engines[16];
 	unsigned int nengine;
 	int fence_fd = -1;
-	uint32_t *batch;
+	uint32_t *batch, *batch_start;
 	int i;
 
 	nengine = 0;
 	if (engine == ALL_ENGINES) {
-		for_each_engine(fd, engine)
-			if (engine)
+		for_each_engine(fd, engine) {
+			if (engine) {
+			if (flags & POLL_RUN)
+				igt_require(!(flags & POLL_RUN) ||
+					    gem_can_store_dword(fd, engine));
+
 				engines[nengine++] = engine;
+			}
+		}
 	} else {
 		gem_require_ring(fd, engine);
+		igt_require(!(flags & POLL_RUN) ||
+			    gem_can_store_dword(fd, engine));
 		engines[nengine++] = engine;
 	}
 	igt_require(nengine);
 
-	memset(&execbuf, 0, sizeof(execbuf));
-	memset(obj, 0, sizeof(obj));
+	memset(&spin->execbuf, 0, sizeof(spin->execbuf));
+	execbuf = &spin->execbuf;
+	memset(spin->obj, 0, sizeof(spin->obj));
+	obj = spin->obj;
 	memset(relocs, 0, sizeof(relocs));
 
 	obj[BATCH].handle = gem_create(fd, BATCH_SIZE);
@@ -113,19 +126,66 @@ static int emit_recursive_batch(igt_spin_t *spin,
 				       	BATCH_SIZE, PROT_WRITE);
 	gem_set_domain(fd, obj[BATCH].handle,
 			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
-	execbuf.buffer_count++;
+	execbuf->buffer_count++;
+	batch_start = batch;
 
 	if (dep) {
+		igt_assert(!(flags & POLL_RUN));
+
 		/* dummy write to dependency */
 		obj[SCRATCH].handle = dep;
 		fill_reloc(&relocs[obj[BATCH].relocation_count++],
 			   dep, 1020,
 			   I915_GEM_DOMAIN_RENDER,
 			   I915_GEM_DOMAIN_RENDER);
-		execbuf.buffer_count++;
+		execbuf->buffer_count++;
+	} else if (flags & POLL_RUN) {
+		unsigned int offset;
+
+		igt_assert(!dep);
+
+		if (gen == 4 || gen == 5)
+			execbuf->flags |= I915_EXEC_SECURE;
+
+		spin->poll_handle = gem_create(fd, 4096);
+
+		if (__gem_set_caching(fd, spin->poll_handle,
+				      I915_CACHING_CACHED) == 0)
+			spin->running = __gem_mmap__cpu(fd, spin->poll_handle,
+							0, 4096,
+							PROT_READ | PROT_WRITE);
+		else
+			spin->running = __gem_mmap__wc(fd, spin->poll_handle,
+						       0, 4096,
+						       PROT_READ | PROT_WRITE);
+		igt_assert(spin->running);
+		igt_assert_eq(*spin->running, 0);
+
+		*batch++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+
+		if (gen >= 8) {
+			offset = 1;
+			*batch++ = 0;
+			*batch++ = 0;
+		} else if (gen >= 4) {
+			offset = 2;
+			*batch++ = 0;
+			*batch++ = 0;
+		} else {
+			offset = 1;
+			batch[-1]--;
+			*batch++ = 0;
+		}
+
+		*batch++ = 1;
+
+		obj[SCRATCH].handle = spin->poll_handle;
+		fill_reloc(&relocs[obj[BATCH].relocation_count++],
+			   spin->poll_handle, offset, 0, 0);
+		execbuf->buffer_count++;
 	}
 
-	spin->batch = batch;
+	spin->batch = batch = batch_start + 64 / sizeof(*batch);
 	spin->handle = obj[BATCH].handle;
 
 	/* Allow ourselves to be preempted */
@@ -145,40 +205,42 @@ static int emit_recursive_batch(igt_spin_t *spin,
 	batch += 1000;
 
 	/* recurse */
-	fill_reloc(&relocs[obj[BATCH].relocation_count],
-		   obj[BATCH].handle, (batch - spin->batch) + 1,
-		   I915_GEM_DOMAIN_COMMAND, 0);
+	r = &relocs[obj[BATCH].relocation_count++];
+	r->target_handle = obj[BATCH].handle;
+	r->offset = (batch + 1 - batch_start) * sizeof(*batch);
+	r->read_domains = I915_GEM_DOMAIN_COMMAND;
+	r->delta = 64;
 	if (gen >= 8) {
 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-		*batch++ = 0;
+		*batch++ = r->delta;
 		*batch++ = 0;
 	} else if (gen >= 6) {
 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
-		*batch++ = 0;
+		*batch++ = r->delta;
 	} else {
 		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
-		*batch = 0;
-		if (gen < 4) {
-			*batch |= 1;
-			relocs[obj[BATCH].relocation_count].delta = 1;
-		}
+		if (gen < 4)
+			r->delta |= 1;
+		*batch = r->delta;
 		batch++;
 	}
-	obj[BATCH].relocation_count++;
 	obj[BATCH].relocs_ptr = to_user_pointer(relocs);
 
-	execbuf.buffers_ptr = to_user_pointer(obj + (2 - execbuf.buffer_count));
-	execbuf.rsvd1 = ctx;
+	execbuf->buffers_ptr = to_user_pointer(obj +
+					       (2 - execbuf->buffer_count));
+	execbuf->rsvd1 = ctx;
 
-	if (out_fence)
-		execbuf.flags |= I915_EXEC_FENCE_OUT;
+	if (flags & OUT_FENCE)
+		execbuf->flags |= I915_EXEC_FENCE_OUT;
 
 	for (i = 0; i < nengine; i++) {
-		execbuf.flags &= ~ENGINE_MASK;
-		execbuf.flags |= engines[i];
-		gem_execbuf_wr(fd, &execbuf);
-		if (out_fence) {
-			int _fd = execbuf.rsvd2 >> 32;
+		execbuf->flags &= ~ENGINE_MASK;
+		execbuf->flags |= engines[i];
+
+		gem_execbuf_wr(fd, execbuf);
+
+		if (flags & OUT_FENCE) {
+			int _fd = execbuf->rsvd2 >> 32;
 
 			igt_assert(_fd >= 0);
 			if (fence_fd == -1) {
@@ -194,12 +256,20 @@ static int emit_recursive_batch(igt_spin_t *spin,
 		}
 	}
 
+	/* Make it easier for callers to resubmit. */
+
+	obj[BATCH].relocation_count = 0;
+	obj[BATCH].relocs_ptr = 0;
+
+	obj[SCRATCH].flags = EXEC_OBJECT_PINNED;
+	obj[BATCH].flags = EXEC_OBJECT_PINNED;
+
 	return fence_fd;
 }
 
 static igt_spin_t *
 ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
-		      int out_fence)
+		      unsigned int flags)
 {
 	igt_spin_t *spin;
 
@@ -207,7 +277,7 @@ ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
 	igt_assert(spin);
 
 	spin->out_fence = emit_recursive_batch(spin, fd, ctx, engine, dep,
-					       out_fence);
+					       flags);
 
 	pthread_mutex_lock(&list_lock);
 	igt_list_add(&spin->link, &spin_list);
@@ -219,7 +289,7 @@ ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
 igt_spin_t *
 __igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
 {
-	return ___igt_spin_batch_new(fd, ctx, engine, dep, false);
+	return ___igt_spin_batch_new(fd, ctx, engine, dep, 0);
 }
 
 /**
@@ -253,7 +323,7 @@ igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
 igt_spin_t *
 __igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
 {
-	return ___igt_spin_batch_new(fd, ctx, engine, 0, true);
+	return ___igt_spin_batch_new(fd, ctx, engine, 0, OUT_FENCE);
 }
 
 /**
@@ -286,6 +356,42 @@ igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
 	return spin;
 }
 
+igt_spin_t *
+__igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
+{
+	return ___igt_spin_batch_new(fd, ctx, engine, 0, POLL_RUN);
+}
+
+/**
+ * igt_spin_batch_new_poll:
+ * @fd: open i915 drm file descriptor
+ * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
+ *          than 0, execute on all available rings.
+ *
+ * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
+ * contains the batch's handle that can be waited upon. The returned structure
+ * must be passed to igt_spin_batch_free() for post-processing.
+ *
+ * igt_spin_t->running will containt a pointer which target will change from
+ * zero to one once the spinner actually starts executing on the GPU.
+ *
+ * Returns:
+ * Structure with helper internal state for igt_spin_batch_free().
+ */
+igt_spin_t *
+igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
+{
+	igt_spin_t *spin;
+
+	igt_require_gem(fd);
+	igt_require(gem_mmap__has_wc(fd));
+
+	spin = __igt_spin_batch_new_poll(fd, ctx, engine);
+	igt_assert(gem_bo_busy(fd, spin->handle));
+
+	return spin;
+}
+
 static void notify(union sigval arg)
 {
 	igt_spin_t *spin = arg.sival_ptr;
@@ -340,6 +446,8 @@ void igt_spin_batch_end(igt_spin_t *spin)
 	if (!spin)
 		return;
 
+	igt_assert(*spin->batch == MI_ARB_CHK ||
+		   *spin->batch == MI_BATCH_BUFFER_END);
 	*spin->batch = MI_BATCH_BUFFER_END;
 	__sync_synchronize();
 }
@@ -365,7 +473,13 @@ void igt_spin_batch_free(int fd, igt_spin_t *spin)
 		timer_delete(spin->timer);
 
 	igt_spin_batch_end(spin);
-	gem_munmap(spin->batch, BATCH_SIZE);
+	gem_munmap((void *)((unsigned long)spin->batch & (~4095UL)),
+		   BATCH_SIZE);
+
+	if (spin->running) {
+		gem_munmap(spin->running, 4096);
+		gem_close(fd, spin->poll_handle);
+	}
 
 	gem_close(fd, spin->handle);
 
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index 4103e4ab9e36..a8ec213fe8f3 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -36,6 +36,10 @@ typedef struct igt_spin {
 	struct igt_list link;
 	uint32_t *batch;
 	int out_fence;
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_execbuffer2 execbuf;
+	uint32_t poll_handle;
+	bool *running;
 } igt_spin_t;
 
 igt_spin_t *__igt_spin_batch_new(int fd,
@@ -55,10 +59,23 @@ igt_spin_t *igt_spin_batch_new_fence(int fd,
 				     uint32_t ctx,
 				     unsigned engine);
 
+igt_spin_t *__igt_spin_batch_new_poll(int fd,
+				       uint32_t ctx,
+				       unsigned engine);
+igt_spin_t *igt_spin_batch_new_poll(int fd,
+				    uint32_t ctx,
+				    unsigned engine);
+
 void igt_spin_batch_set_timeout(igt_spin_t *spin, int64_t ns);
 void igt_spin_batch_end(igt_spin_t *spin);
 void igt_spin_batch_free(int fd, igt_spin_t *spin);
 
+static inline void igt_spin_busywait_until_running(igt_spin_t *spin)
+{
+	while (!READ_ONCE(*spin->running))
+		;
+}
+
 void igt_terminate_spin_batches(void);
 
 enum igt_cork_type {
diff --git a/lib/igt_gt.c b/lib/igt_gt.c
index 01aebc670862..4569fd36bd85 100644
--- a/lib/igt_gt.c
+++ b/lib/igt_gt.c
@@ -609,7 +609,7 @@ bool gem_can_store_dword(int fd, unsigned int engine)
 	if (gen == 3 && (info->is_grantsdale || info->is_alviso))
 		return false; /* only supports physical addresses */
 
-	if (gen == 6 && (engine & ~(3<<13)) == I915_EXEC_BSD)
+	if (gen == 6 && ((engine & 0x3f) == I915_EXEC_BSD))
 		return false; /* kills the machine! */
 
 	if (info->is_broadwater)
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 8748cfcfc04f..4e1a08bf06b4 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -198,7 +198,7 @@ void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
 	igt_assert(__gem_set_tiling(fd, handle, tiling, stride) == 0);
 }
 
-static int __gem_set_caching(int fd, uint32_t handle, uint32_t caching)
+int __gem_set_caching(int fd, uint32_t handle, uint32_t caching)
 {
 	struct drm_i915_gem_caching arg;
 	int err;
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index 13fbe3c103c0..b966f72c90a8 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -61,6 +61,7 @@ bool gem_get_tiling(int fd, uint32_t handle, uint32_t *tiling, uint32_t *swizzle
 void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride);
 int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride);
 
+int __gem_set_caching(int fd, uint32_t handle, uint32_t caching);
 void gem_set_caching(int fd, uint32_t handle, uint32_t caching);
 uint32_t gem_get_caching(int fd, uint32_t handle);
 uint32_t gem_flink(int fd, uint32_t handle);
-- 
2.14.1

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH i-g-t 2/3] tests/gem_eio: Speed up test execution
  2018-03-22 17:24 ` [igt-dev] " Tvrtko Ursulin
@ 2018-03-22 17:24   ` Tvrtko Ursulin
  -1 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-22 17:24 UTC (permalink / raw)
  To: igt-dev; +Cc: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

If we stop relying on regular GPU hangs to be detected, but trigger them
manually as soon as we know our batch of interest is actually executing
on the GPU, we can dramatically speed up various subtests.

This is enabled by the pollable spin batch added in the previous patch.

v2:
 * Test gem_wait after reset/wedge and with reset/wedge after a few
   predefined intervals since gem_wait invocation. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
---
 lib.tar         | Bin 0 -> 102400 bytes
 tests/gem_eio.c | 214 ++++++++++++++++++++++++++++++++++++++++++--------------
 2 files changed, 160 insertions(+), 54 deletions(-)
 create mode 100644 lib.tar

diff --git a/lib.tar b/lib.tar
new file mode 100644
index 0000000000000000000000000000000000000000..ea04fad219a87f2e975852989526f8da4c9b7d6d
GIT binary patch
literal 102400
zcmeHw>vkJQlBWMsPf=!{kwJ=gUAkMAJc3A2!kPrQASAWM<5LF&3M57#fW}3X+V;H9
zzQ#V;eqTgnR#u_Fi%h%Sv+Oft5m|YSjEIa|M)rFro4wO%+?k!9f9?-kosITaxBb5@
z{O`$=M_=Ke?LR->3jamXqphu-hhJ?!+<E-y(c?!ue@4CS$Ef#Jv~}APDe7-FnYPAJ
z^wo4Wp3M5aK~Wu+UG;rYyFYmO=IXs@HQF7HK975+XVd8a{-0=PYx_x59ZZux%EzPO
zxHat!2dKuHN6GlSH<_Ry>P@1vWSpFQj!wrdRPU@s-Eopc!*0|*YmHBnwP-qwT7%Eg
zC>c-CV0bcZ^#;AcY1Cp@Z4AoF(=+rm8Fr@^t#N|-ov1aL4BNdHx{Nx*_Ut?vOl1yH
zx7SZ5QE7UXM9at4VtHkay<w<M(&|UO0Xj?~_cglcP0xn2X*5nI({ZoOrL3V+yFcr2
zChl{;ciz)Mq%-s&@R+dQ*#t}BY}camVW-#Szlp4GG&||{CTD9=r^nt;W>eIhu&|vB
z(29LwL7T&IG)ek>cGANFWYOsy*JRNc2yntI3|unw#o6$j>tRQJNdQ-OHXdML0Ep`z
z0(>&=e<kfHO9+APu-_kEaE<NZpwr{RCr{B2yK3Tl>ty&bk!9)54~A3BTBpJtk;&*z
z+ow_rHV!aIK26!#s5Nc@14d^n(d4X!Rh}dUU`!9&!6LVCz+rX*iW^LO*el?cOv?Zg
zE^@=o^mQdVuJ1SBl^d0)dK?`!>Tj!imAz=We2m}AYtg%E^L71AGeV6<xz>Ci)%T-v
z?S1rpwYIkwRsMF=s2m?h^#*&d9v&T3D=4nkb`Rd{Rco)JmuOn6H=~2<VYP{VoAs!P
zF|D_11x@#(!%AcKH99Z9tR7UG?`2>2tIZmF-mf>Ja&%N~G^@LB4$6(_=uP9Oeq6yM
z_Rw>!TH9}6pvqyT*4&6NG76*0Tl|WSUzZOKI4_P^euH&1IMZmie)PUkef7E-y{;eZ
zRZ#e{f(e#i9#nKVEN}OqTs>Ti_R5FlSDa)cs-qWKC2Q(Dqj#?>Eaj+W{NHY~TCZ_I
zyY*VLfuCzwZ=>m3zpEZs)}nHwdW;!!V*8Cc1|fLS2xFihG_6&%BZ6M$?so`PxRy6q
zzME0BS1BK$yJOjaTBg43^5(sJe^T5^qG!|Id9rc#T~IRVoeo<4Y}sgfHcoI_Wh;K3
zY>t5TEDhTC{<{4*?u^f~#mhYQ+QTshTTUx4quvy(B3qa}Av5*)=`>%R%rkDE^+uDV
z@Y6Us%?&VYPy6u&sEE8W`)zU&Pd*RY*+LHc@if7jmhau$TotN5)@^y3nJuCIVsZvb
zi#y4rJ?@Q2a!;d!-pRN%{tPxDGzq>lC9imO^d`C(jz3Tom`K}cZ`x0uMte>`kV4z7
zrxC{5I71Oj%Ta$gB`u>cA`siH3Ao)L0UwwQ&y!5&WK-v@4+$6oxqdWmf%$-4b%qx>
zS=il4?=(sV!`bQCM)YQYc}`~ooC*EU(OK)GoFX_6KB4c{`EWLnf!)lbK5&B!l>JOb
zvvF@Y!%s{p>b81)j0h$?j=%uN=&nD+!pG<Wn1}L#PPW^cOgATBq0y+{nqv6#3DFh2
z6U?!1&;x7bnMQ^@kh7)L?@d2*ZQyg{<r#Q_FSn*(lEYcQ!>-VJg3~QZKDDs@p8<u*
zC~5b)y>_rBp$F6^k#v%7Z;(U>_1*G8TqRmp{#MzIUmowp$FHmV%}D-N+J3llY2Cx}
z@%LIaDm{#zJzL!2N;+tz_NrQ|#M-u$4o>E#r9T(;!v;Y&ht;^;cp2}${yr*geR|B%
zcQD<1I23UH2uP+lHKx(aa&z}}d|dsHO7uK>wDs2~-?}>0SIzjKdfY5cM!i9d19-aP
zzZqYM&v6ugil^A9$u#Z{+aJ)$QS)`9Qr?RX-!v<Ki>tM2vkK1iAC<<pe)x~WUgsX9
zjD9>$(5i$}eTE|yOd%F70*ccwB9FunOh7`bQg`j%5@0&R)F19(VAedt+24mm;0uS{
zE;vH2Ciz(08J-i5g1Q&u-ZXLLEBAi7w<N=?e+PN>G?`lGSXMgHx4w=Jgpc+IsG#?c
zWY{gasQ_ZGzi@oC2!E!P!r7ow_;d2@y<bu^!C4XSS8BVJrP4MxcPmB3qx!)?+;~&7
zFWc#^18DaWkPBejh8*7en8YWoY5S~1RwlbVjaK<DMK{neL`}Dls6G9JUjrTz;04Lx
z6chpob2EbN=p>_{mUPl@olXc>M^(qW4WeAM<x2zwqwRZ3Bvi%9(`10<qdgcr{)R}T
ze}TIKM>@&JUZ>RUtPs4{!X4Su$qzd}t^th}G)g`t?UPxzn~Zm&Rr_&C)p&S9IS_U6
z2kLp)aMcQ*$q(C4e$0Ow=r<G#c#=UoiMt)Fc72;wQ7DMLDm#Pe%D*wyr>V8+fsmH0
z8?@VEaW^VyztQvO5yYKX`N;7~^b<A-Y&A|=?K4mk_YI(1vnlHLpnXu#<<>>_BZp8c
z=xJ$*xH3-uKI@H>($}TAI?>NROEp0Z(oDNG!0aIa#2uU)+%`WpwomFhXxW+`JopjI
z@?ct8`USP{|Gz{@f0D?8ltIWO;xP`XuvdWNnv*2zUOmO7rOW4snT0U1MabIDlXEbl
z(tXm@`gev?Yta_82;~!*4}#8C2>{5p!m6w{Z3Cp6?+As`3I0U~zGB)|=^$*VC2VzX
z3C(^GV*GJKc`7;)_<&hW6>HMt0xgyVH&H1Dk)Frr=dDp3U$kX2=K3PDP;L<s0BJ{S
z)Ha&&y9N{kc1m3ST6!kJ6%BDZonA7A8)Rwm5S%SG#@PNPeaZO?r6S&|AC{}N_*E0K
zd_f`SZ9BLAoiY&!y^wVtJaAxA5(cnjMFck&3Wli^M-+lWo{XnOR3+Q9w7D9I0ZGpV
zia%(c3_9Q;I370vEkfNGY_Nx@g;^{G2kw1ih`}yNiJrGM^B>)o30L3V+Szgic?dKr
zHRw;5e<g@6fsqk6VKUECenQg>f7SC=*hdIBLQ@OTjhrEnX~2<y#W+u%M~{Vt`Hj+n
zetxbyDl#5~`!|h>^r7TKMJFo+v{P!Jyp#^8R6x9HgV#E)G}n!Q$&ge4xm(_SP3eyR
zR`ymnQ5@%>7HtZVS*$r(GHZ`!g0~C0M{5un=cmKrw@?`+v*_m>#VwJqUNxZ6^}_MU
zO!2?ms9LDw6^|Y4UuM06FX_iP`FlxZllAWmAmCyOdp#l_JOFJ$>^+96U5WSJ)f;<p
z_3#jj6-;;*J&FE35?N_yCwl7I65og%-#w51Vn?4B>UbV6Ed{;vq@6DaFxPr|l<#S$
zu;E<S!2!3Fn}slqAJ(^jTwgEj3!-6WW|%t)irK;37P7}LW)5?`t(i0|rwq5~QnJT1
zwFk%OGyfXXd=Nc(6m7agQ+I?NYfFDt1>8)_2m~<AGEH>R5F&5V{|Ma^j1$JQ5v-`^
zBU(SnI)iOabth|o)ar-@6PjZ;xrheC5iO~zW?m#T48n8=o$7qlPgHXro{DjB@_90a
z@c=5zD*rz^f)Hm`xTrOqLK&x(fF-kus@h&M*JFa#U(sfv<3e3$AF%m$s6Ep*sQp9q
zgXlUF!vaf}otC^DwP_VZA24{p>N|k;D}6w4no=Le;Xy56Wg~h$yhxy?!<LK=s7;@@
zU;waXpdq9^HyVUV=|q8X-Wxy%ghmB$eq==``TQfQ9acA@|1ASuoIzKoK9Bzc-0)wh
z@vt3?!JDum*mKz$I(q8_>N%E*vj_r*Y!v5nbO<%F**JtcP18ebjFVMh0D60}iU0gr
zKpa)O5w_zLD+;00icY|>KSZ#gn?=K|QNc#Dh}%}3PcpqoumT50fg)BY8$r`iYdnE?
zN|Omu=|hr?qT||2AQ(4Sx|m{$C;en2I)<hGY&soHo^EcQ%uXj8wB5r(^kF(2Z4Ae!
zn=n&c#Q3t&KJEQ`uk#$o?9P)Nb^@eST6pk0+J*<mkWn!tk?&)$yz3q1AeQmGPFp}Q
ztmb)7$tD24xvmJQ1i6A;GYK{&Ml=kST1+i3ChXP^4`Hs=xpb2Lw1rAf9^v;q8_v?B
zigGXC?C)0^;P9{lgRcuS{|o;>4<P|#CenMZnCyF=SwB%`eeLmbod#nC?E4zM?NDup
zwFSzGok7o{M{?}DE+a@L!O{RL_l2!wYec++0upSbN_O#RO5~UhW4Kku@n}d5dR)?v
za%_4jNX6C3f)?mGdVt~ZKRe4yJ3xk9YZZ1w1qR44`PkXU9AG&h>T;D%vh&spuGe=}
zAdZQNBSv-h0LBxQdgyWM+o%^kgT+&{S^RkLKsPbh)&1wue+Tw7LM>MV5O&I451KkE
zP&KehFcgc3Wj|HeC|-srrJ~TQF7JmS5O$*PzKb401|S$yiKfJ=kUR8<RQPyn0L=j=
zMJ#oFX@n+Rg+t??__qu@?00lzS6N33+tRun>u2$+m9)(oJC##<X{p^GP7*866!|Jt
z#ltnY;nIarQO8Pg>>kqY3~rvZCcR`VCUBldu)LFPlHt>KyFpGikHLW^B0D}`!(ps2
zMiPKmd=!zM!z%@=ZT;o{g26sMs=}W_))SD*W%X#cgEJbZfI(l(Z)QH_CQ|l*0dn<T
z9LN5*5M?|_E@-j3h5;?LuTjSWY?a0`x{l0JER%j1Gt+U8ISmpS01HFvc4}Erld5YX
zq{$zqW3rhbw2DwTx)e3gRiTBD)!=xB?M4iNN;GiD>a2ki#vF9AoLJUGOxtkjzBdyS
zxi}jsXmvWyM1shG4{K`qa2DrWN3+3vSAMTJOW7y4Y}+9J^H=SUS%(EHiZ{IwK0@Fi
z33DIK&xP*d=tZ~lG#Wx6i+XT4&=f9ictBXaNf5eNO9B5hYEaGs6_L?E{xWH?-uT8O
zRbxPr?8gQrqmQlr4CDx>N-`10kL_4Qxn@AU&GiN0l44`)BYZfJ$mpXVKys1svEff4
zM$zET;UQaz#S}pY;uObcvq#97r=lv4{Rr4;4NCB1^N-Z>0A@@}KROn1(fVfafW;fg
z{&6Op%rR}Em_lA==KN*On8W#b(qV`L<VvMy$imT|{2su8244|7^DZU|hce7J2)wX`
z(w_u7Cbfo(7DVSxG#d>E8{**CZc(I$XgIyt-8lkYM9Cn%N%(*tQ*n@TU};5|;%GRT
zu8+pUHZ}&3cFSuB|B+w<ER_xty6^}L1cdbAz!dxV7(QDJjU`H7g`=>?&3pb6nU??Y
z>nXx_6>ue%nUR>LK^&u=*?_|oGRz{{acshv7SHCBA*AKW=fL)%o1_*@<Js5e1IqQT
z$F0AB{y|l#tRtLhAs5h@{Oj<7{|_iI!0Zb+D;<wO@D6po5!5{q4lIz0G-%vcxffk)
z|0H6*F<J*p6)PK0@IoREOJO;q_jMuyvkj?UIWLzKk-D#$5hiX(>!A=z8*S;n^CI&X
zX7N)_vVofcj?rrEm$e8|(w3Q7xz3w24H@uS65f~b4DWHff@N?XcN~KzvOma~osy9^
zQ{%)=SqH@x)5W8UvtAnpBndOHN;t|+agL9N=j5*cNX8I^hA{Ra+7oVKSthZ1fU_T_
z!x^Ns&pi9bQ`n=t3D9hVFyj5Qn9mK#BL$j9A6%dJsK=2UNW2p&0}w(Bgbdh0z$tQ)
z#tj+IJijodkU=dp1S4{_G#k*ai(m;z-4S-UP<_KB@o57V*jAs`7q@OMKm#-2ukT3s
z4d%E|gP_BS(Mvdzlozo5Q}O<qT43TK(26l*s=20e-wJW<1)^4S(NGXCqd{u`ivVO}
zhIu(6lXDPeUp3I0*1%5We<um-G(cj6AW!H7b4YkBF;|SfvdL0>hB|^JYTtP=9zvfB
zPZpu~g=}oA3p*AE<8-Vi$m!^(-$EQ<M`c~vi`-fRI28xHJb46laUj7eOYC`!pYku1
zRf{sgj*68VamF9hQbhftHiODw?m(Q59-St%B5LhBweiq&^febjtCt=+xw`TdW?FWK
zE%z?}`xq{C+T*7v^!OL(gDJ&|cfhFQ>Z{7zSVOQlLtjJmzZBU}l~c)7(N1D-_kM2Z
zj7o&1El5FQUeH|x$$cLm))Cu_7~Eaz?wsm<U4ptY3z}yZ2bhJ6!%wwVOeY(NRnmD+
zKf+<(#-stx&8P-9ZG1#iaAlXh=V~G*+^YWDT-E$KfEAo;NhU1QxG(dmy*W4tWCX&>
zHU^WB%9oHNaQcf$k+@xfdxQ9WJ(4(Y4df_TjQV6Fo*GO|Q<iikN}A7A*af#Ub&hGI
z4`{xiO2%G~-~jU2Y&CY2uZ2pn1mVhm&c?$5qGU>!@jhzNHxYoijW=YC+z(+?o-8^;
zaz1R&Afd|6@ZOvX9|@PzqnU=9z(&FD6qv%){H_H2c=0}p(ZI=_zUehK^!mm)I8H)i
z95*i8xYz(k-WJdZIh+s)V)f_~&3toAkM-xzIkCW>&jtu4DwTwfudbAEpjzZ1qli~@
zBn9LBsPx~6b9nsbU<FrH#1N@?#h6y%GwIwcazG91R9<Wx?a#**SXhTT1Jou?nL%q&
zgs^}R0*AJ4_LQ$?RtZ_FhfQi`6}vxXtIf1$|5au!d(98{g+cs@hT{)DSLm9-Ii5@e
zQbN*pQaS91R$;Fa(IUHz^Hx;h@nVp5;esVL4b8D@2=pC%;O4sSgZ*P=LL@s*5xQVG
zCNueP$5DTFDv|aETBpo$9NZrG5UujRE-p1(D8ut%6^c%V7@M@wfg5?&#Sh=P?gh7k
zoveQ+5&aAzg-rciqK;{JMQABJ$K%poqxu#{%UgtfaXFlUi+7Z<C~k`0KPk<1rxNJN
zaO0xaft?P)Uc-&EM6P_PFAu*@Mk9Qn*HgN)gLJMd!2M*%2?Sr%-0(b-hU%C=7y*Kf
z=4T?!*CMV$1BHEn-MuA+5GFAi<LEPp$M6+)J4GFF4Ppr8Z1gYRw%cJlEfm38`<8i&
zGs*w<a<kiw6bl5mXM^kEfb4w$0P=-azi8fhz@?K4z9<^tQ#u`wH!>l0n?Vdu9V%?D
z2&soCT|M*?ImYHJpLV5$Cz#ttcguv#5wt}7vm%k<Sri*aaJp{87t}FJiY_PLHS8QR
zu$eb(hUqdxs9(Lvxf$SYiUGZNU}J5?b5nQ+woq1!3Gdu#zQ_-9p5VSAJSO@twPeJ<
z^dT;&n-RU4>JhzQ*EsXN1zo~8L@9?kpJ7j_)ebQ(VoA4~gKS9(=9yg^PH{a)413fz
zFyJ}udm;z09*=pQ46Dxz4Y2PcXdQw>3)OIO6WS539O44Yl)e^mg~}~J_$(L=W0j{D
za<}G$+&Ue>UoaHvep#0ZR?m6QkNuN4FzqyCiM9jf6XuG5(^W(oR5G^)q6By$RJ}8)
z7YFgCbp8bMOXqAB2k5!H)6*A7D84exN2pTOF#_ZGU3~n$wkx~4D_$Ya)KdZ23)|Ci
zqg{v*16~(bLwzm!EtoR9=B(j08>IGFfRz&0-=U#SHKNIWsF?-ZE>Opcb2Ddm1(R<=
z*5ma{>@S_yM&9DpnGIxbWbr9LWI2E>Vw?uE5wv`c2(6eGH8}=_N895Lv}s_OM^A@6
zjllc!THR2Fl|9*L^!2h1Rp|Hvaan`5fo*?}?Kkgv7Us@Rp{-&bmN`^e-}F4rYyCT3
zO^{#&@~{bHR^YtR{q*xF5mAa4&~Ra~@xAPEyubGp5`wXv3=d_O$_#xAL1A3_+&ie`
zTM3ib(?Uk1|5;k&epYMNhqXiH5S?$_Ey3HeTpd7Rgds+t${BwBpMzzz@)MFMpqw>@
zm18}Cl>gkAy~uFNJZQcXJB#$n^_OU3;l5|}2*VxKB~s);oSIS(36GefSRWEFO4*&q
z1kf>2?rO!vVHec(3b`4A(-gOKw}4a_J%cl(2iOIG%P<6C2;~RD2S;FpiQ=1=P6J35
zW1F6s1z7g=7h#!64etEn@Uqzw@+ZgcB1|igH|AS^5vJjYHKEPj>xnj3qecQ#gQ6t}
z^p|^o@Qej_)4NRw;38`-|Hs*Fb}xM)MD&vUA6t)~JlS&jKX!H=K4JckovkN#`9E%b
zBMLga%m1P2LGJQ@(8f-2Tm)X6t#|o9knrO!|Hto^|D%iRf?Y&l@pk{-o5RER2X)-l
ze;vpEI(E^wG;_zZ$rKrVkc}ct9YQUwAniwM_DK?FPz>j&feD3%O$JSSyDlwvDvLr^
zXMY9WHwvaSP>K~2)x4IY;r1wDFW!ub-!@_<u>d7)e|ekHz&pI7y{M+fAnv$;_i)s}
z5{%-L;joX(<$75U=A_isihJ8>yd8}7MKLih0u6c!OxS*547Rl*X8^a*+gwFP;zIBx
zga}&3z52gX*8;%<OE%l8vvz(YS6(oj`-EQPin@)N8MJ6^W~B-SUpTt|kR6^{pI=Vl
z(iV+T07)*xqn$!C-+$pM!l(v?z5-;y5JiJrv4+ACin}jZfY-qU0d&)^?eqRNbua|Y
z3;l-0RWEl<VgLN}U_)8xz(BoZFbmXYvJUcz(7FJd={9g!iURY&y<gBeU@@YKiY*N7
ziF|764JlYOYtU*zLo;e}Y_@fFsKUyuR4{GM9X*4`F&G4Oq$l3sz@LQ^<!mv(xsPr#
z_Ajbn%AB=JszGi}c$5Y(QX#C4_6!b>Twyu0H7fvsKSdh?THG9Pb=RT|(^r3)>o-;l
zjdZ^f<wnbIM4F1kRa6heC9P>2gxv_KN<6xgR%C8sLCG&8>kbM~Qktl4zrX-NumQ2*
z6@*q^aM;m)Rv^Ovsv@WVA&pb}S9Nqr<IDBGM>~)I{O6qh_wdQXtvmhi5|G~7r(*r@
z(Ov$>yZn!7rcYB1Aqu**nKaPe<$t`(|9A`eA2Z1x#SDY|hoKIK6BZ7KqOyF&f;^GQ
zcsxLqK&C%riKK{;Z;fL5N#-l}n4B?N+-)Oe0gF=ob8+TK(>8sI%LSPyDOb#8lBC-7
zdD6|Lgk;rDg3I}HA1=uw326~Y%jQ#0N^y{^(%lCP(@CDnRkWf!lCPTZ!H($Rbx9HF
z0D56rBVRS;S7EwH@j1Cfir%sOv8Blx0W`SH8Nz0F%3PG2xVX(ZgbWMGxYZYz6R(kL
z+KP!fn8~9syM7OseGxd+PA+)uS6Tv}=v?H@K>D&!hJ0d7go)$_P*P9Xp(<c<yAUW!
z%s_fV(WMz@b2^#<l+a>KKeoUhsYJ0{!s0D;8pFKgH?!f1xNSmXp#4w9jT@bgC}pz)
zI13>S%N61!m#yn`6o#XojwBj&>2$QAjh{ykAAd{P@pbt)MwESw%(s;$rN$*zl6Y10
zc^`j>q|bQ?ZcBX3UIWQ2>bp%#EcvR^L{8f>vcKNvK<Hx0)}(j@&~xdM1}~rdj(4sf
zb0IE$kG15Q5OPc{!3W*&6I2EdVbR8r)hh<&A~~FRE(OL=l6+80rze49_y_9|-IX*F
zGgu0Vs}SZZL9!T!OJps{L+CuMTs)vuMrLZsW5~z>B8!X&9N?8j$pV7BAkgUnzH|ug
zI68zsS85~w@Ul2@HCzr~ps`~yK>&%1xgnBCO1NAg$)&ey$@!i-6OUXSm5Z~$Pon7V
zaDap+Jx2IAD#23PX9?0>UYsEih1W4UA6xj|5hQmw53P^U1WLE!TV;2I!G~l#Ncu>)
z%FL~@><_&W0o0mJk)jebpm#f@6!n=?brV4%l3NwfrvH_F^(KRFc!w2`G+!)B8LNUq
zgijzH3Zf-YWddI@E{psZucb;-81dkGSG9*4Dsq|3BP(;oxS(?+lIG-KIUBco@b!Z%
z(RBy81W$3vm04AW<Bez^q^*})Ff5T4VIf1rW+CYeCWYMHa+#QIg`^!|)+7PU1R*4S
zKaw&HL~(Fph`dA=74Q~Jpv4?JerCx1f@}@F;YJ&hG*a!f5T%K+nb?LIx(NZorrR51
z*}cKXVO!D)FiQ)82lgJIO-Im%mv#j#h@(M}PQTxRhK#j*!qpwv>=7p;U1ph+T%qVN
zIs}#tY%~T%;s8vTu|0htI`8TN;{n=<h2G%H@B)c&)ccPi9Ex#fi1g@40vW|Jf*FX$
zAeRg;#bVETeJpJ>ldBdoGfhME3HfmaOiFXa8;%5kFc<|vxxEwcaZMRC{>%ZeznUk9
z5IY%u+CX#mAm@iW6^PHRK63aFCSydx3RcUw$>3uT5!=in56F#s+-x7HL_+{f2(O|N
zSv#U)R**vkc-%#2%;iozC#-^b+C}gTCU8#Co<)B|uxt<&SmyeVEls)c<lJa%fGqHm
z4T`HIa%r+6At}KtrA!T_VBCW{b(s?h;rPsD{8@vM&LznkZ)9WQycrIH05~M{8#(qh
zuXBk=19%ypNn8>@z`Nm^WQZ#U_*aw)zz8LXL+2=B8qrDXaG~5KW}HIgAaWo9uVf8|
z7r2t10gyl!C7Y@7Ex>nWlXkFb5D@vGa(^)NuSp-m<q%#gn}+L1A8{XJTv~SU24f~r
z<=}E`thAh+lcRizEnzkqa`Jl;H0@@?apiQB3E)5>T^reX5<O`R21F`UL<e%_TxW_|
zl)!`qDt^W`g;JKF^%iS-TzYe>n-o(6V?rK!X@%)06H?|=8oS0SM>uKXC!F9LF!aZW
zT9Rxzw9k84Lo)={YsjUHBqkIUaBZSiW0HE*qYY3Xp}7a$0Gyq4FF?I7f68T+^-8t&
zwv3A{9F!Yov0hTci7qZBjTXv(F|p!_@D@(B&b%s_Ju935Dls(11%@%8PKTTmOx!wX
zzFq|7AkQ$W{EjeRa&W>ujzPKyrR)}P>H@ki+ybtNQ2iB~P&ftI1Z)F0pL7aOk>CND
zq7)RObO7u$9nHLWof55Ljl-Rvu#h~819emHA66@voMNPK6A?H4GI+kCBY0f|rpqEl
zP&E9v4*rV_ambJUHwxx_3`Io@3lCo1)WCeosn_8dU(D7rMBum^7WX?dF69Y6-4;*Y
z7)hb)?~r5ksEGupcC$^`z?B$QtH+f_quy|hQ?zutj$|nV;Ol)JNqU4%B!wW4P{@Y~
zUgOAi!UM;oMT)N6*Hy}HBB4StQJI3uiKz;xF1M`Ya4GRvJ%b`5;vO=%NOPX)`pOE<
z#}(uR)GT_qR2H5m9-l(W6tZB#JxQjd0v}o_`>_kd?$jxQD|oWO6r<+Zm%nhV2}_Lu
zhVBV2YUL*;;gR%8m<G-%Nxw};PvAVG6g^L>0WrJF#zk}BZDZ-5e_#wbhe;D97U+2Q
zq1Il8EjJ_jfa=}gfX}6;+Z>`u1BYpnf|#u1dYg)Rc_mk+hhGBqkw-tzUj_!;j@F1v
z2h&easW^(_phBC7QVCzAppYhFb<XCZ(moWzE<$bxkkTd0+3O$aB>@$~!Lg+~SV1UP
zLWZLF$Icr;@OTsQ8G978al>!USy~(b=Gi|x2LFdbCtiP4TzJ?Lg7X0rMJSUA!r%|H
z7EB;ae~$35j>heqG!A8T@Bvr6DST7(#d}TQDw7B=a;Q3DKah-{W&~n1XN}zT|6O|c
z>=_>F5aJXO#y#a>G7#!oRehrab))<Dqp!&^h!`i6&JGxbnr4i0MS?Vr;fzLHLufYQ
zZ!sK&6#$!j9TZ75*2mu)3O<+HE}y^gzI2PBv(+{IB7`(DV)seu8A&4niJ&->mJtRT
z$yZv%V`7G^Su7*}Hq7CUK>ZVS#{i+yr1Ve+^BrHnI8aorR_8lRMANq97N>$c7&5m!
z6NgFrlY|>4gHMr1hh^AG8%G(qElk<A{V=Ik2FRO)M7-hZN!>RgY$0AACWHwTe|s)Q
z^A5aVkHy9!Byl6p#$_W!dSi*Y1E7BP18rF>Ot_~c&%8x1G|lQ$umaxn#<O^Quq#4x
zd$i>PChUT<;8F!nmv9c%0C}f|t0pM*b}Z!kY!U^m4JR+=B<m1;5f_dW&TKBZ(FAf>
zH*`D8IwrYF=8~F`*YND&!K~6Gfm717X6$6{VsuGwMsry-1hI~E0!HbwA(NI*n17dQ
zZ+e5M5%9F*bYfShL(5N&Yu0RNwoHF|0yd8Q@z4ob9(6J?7;}O0!)tu9%UT4$N)m}x
z-N1!>GR=5rZ`()#h;$LFRIU(4p)IQEBio2I3wktCSN%k9Hle0p?n(aH5{L@??qag~
z34da(Osnt2b|>r#Oa%)l_3#goG9VnP!Bo$-b$_-29kU$AxQ%jh;FeR+iMY&+&vgD|
z%=j%Qwy*=)70OO3=c`D|8cwIigtd@qi@_+x;x7k<66sZ9&z+FmI;Z?g1^#ytlHIW>
zlo1}9QZRS2hBb)!a}_{A`8p_4bYX*qFP^w$08%L+Ts_FrBX>i-Ej+k&+N2g+*$Jle
zsr?wnPkjpJMCStHQuKr>$-h8mSHFQoL}vB$6@!x>(2+i*lTWl!oIX2fK+n-q?)SpH
z*+IR_2U^_ePubxXb(1E{mNEH8kj&IAp6Rniq1Czn=+kFIiopHZ4M_=(Mxv}TkFh{V
za5`LVet2XrK^;FNKG*8?BNS0W6vviupa6#i&#=AOjnzr`)av%pF<sJ@*3g7tJWZKn
z73p|W<n(o}n`b>027fcLl72OpFq=<j$-6r4fWa$WoW9T=nX^I7%Cdr-93;SFcXT47
zy9buQK!35CNrjTqM$d5Q&H<bjY0-hZ&>vGbkqdds6ia@TVG1?6MRHjng7Lz)m%rH2
zjYP18@)vvJDz77g377fj%U>ckP_Cfpz<))2&^P%c>vBd?V<i|&^ytSQ=8VP(73_0X
z_`Atw!(Jhoj-ix@t9ss7y5L&x)wgghJy6Swz*sCDd#cU*yg(jDOR!~89`Yg7*;P52
zRZv0cA<a1lilXHO#llW#+Xw4Yd?PK9i}CiX5Mx@?k!qUOCi9IKT*bB_<PL$rHwT%+
z#dc?33hYb7T8WHF!&!)kBv2`)+Le06^(7c8NWxmpt7u_JN~m2t;tC(6xB?|{gTBCO
z?n!lDPkP7?NmUaXJKql%O_Io=VtA+5k;nm<OHUcTps!~;wM>HxCev9PDR&a6Sg3N6
zv|G;mP0bAET}?Zd`uF@IB+4gfz@<u?GdMV5Vom3P;HoTY=Kv+R%>W*lV8{r_6(lf3
z;Ub3yz$@S>EjM*JVAJx3jraic`ahE1_$U^GWsH)Nb?y3*ATZ*zMV|y)0H46}`nE)&
za6_#oqAR+^b;T)hU#AIb0>x-P9En|7l7KmtV6Z6kXO6h9eFNcRvJ9j%g4c9H4nS6f
z_$X8UgdIvM@ECzz%ta=Ujn%B^A8Qr15yyORNZwA5<5FpWY!qavuxkm&&?c^%(x`{L
zJ`w(_eAs}`XoDmw*~1i+yFS2N$I9TuFtI_&J3@E?wum}>r^mPI;rkY|!38ju79Bpp
z(VLdk5-uyxF*C4r)}ot7)(4bLpLk3yczc!o@|%OE3e8KCJ|5x0#2{dVmR)uLg!-Vi
z30<Qjy?vB}@DP#L?=TsMfKQ1}4q$DkiPm8*A(VuMoaU;5Ys73b(=b8+ehWnY&H61%
z&JMG+gtoDo$OkZ>VKck4ekPtq-aodhV=xwV0OZLGi8h$p1Q$dl@jISJZYPYckYE#y
zv*DZ15~zomfbkG$0_r;D1Go4$r2rltegZy{V?NSIIE9*BunKuxljC6y*i*z1!I`G-
zRIxLHmSG!nXHy>DX=XBavYXXFE}M}43X*TZnO8Xws=0|4WK)WV1AFrAv2>AQf$V1K
zet_{j41=GZ0O0yTh+?ONxD0U^IP#SrSrLQLj711`jB<<a$>7*z)Vg4hkU!_=ASTB>
zfn3$ypwSiC#9${O6J?)3BfpWB4o99=!K+xa=>&O(h?MpQH#<Vyle|c%$R#Oj__{U%
zH5J!;p9II?Y?I$MQcmi~FErR$uz~=J8!pctfGc<+5GnwE-eN150B~p!sp&wDV1<6%
zs&T>~3Q@u9&PLx*U(UdpM($(pl4(^0s#1<4NG3Cwb#V;{UK6vxLVb}KKgKv^!tgp{
z#H(`07=tAz^ThI@IMQka#>pm*ZlslBl$CqB!?B2ThJY63F#vpho8B1-&9Xz8Z=7)-
z75t<c0<jP_Ci>~oh)n?6B1T8!bKpqZuQzrp@5<j-mc@UeEMFfQw!hi4I}bU6$Z^YZ
z?D@&c6H++->PR71uN}MxQGo*fgjs_ZGTsfL51;<>B!=lTUZHs|E$%%;dA#DzEpI~P
zqT2m#A8~6jHDR2OZk;%MXCtIjl#eNj?N<*f(JJ8z9*_K4<7n0Lr%~=l8N0!m#ed8)
zApC}*e++LO{DY5;QTJ!D7!Y*`e+UD7;gnJ_nXT^efkbxP6(}ul^2FH0Bb>cKcSzAd
zqMF3@q!pj5J^~6bNQ=eMPEc$UC(j3Q@QOBa=3>%dG#Fte(mXgCpo1yls2{Y?M<uPf
zYzNhFj4}TjdhuoGuO-samdgdejssi42dtVeJZ3;4wiE0lNOXP}F^Xb{$8$qqKKMU3
z9Rrx67da{HvL38vXel1SRpZhz(DqJdB;DZl5ZOKfa}3x%b7Ow<^cx=YI&G<;3k~-U
zZCcY|4_Tww!2?Nzr%y;@ANlYKp~+slrr!|q++^FUa2;?4hFuHp=Y%OW*_4*?)XNS)
zv63okuH_igLn6o=t>O_J1U3^3xppC4T`t1TSHY1u5B!5Ql(c+>EwBn!$DElx(tSZ<
zRCI*g;qg>!(c>AEx$)T)5trL4F2#64rPoK;VccuMr+Y5fV40;*pIG}g8X=OKGr=Et
zd@$^Uu+BBN&n4bJU-XUZ;qVeSf8IK!44WWE6u0>`A{WdCsi&q7L`KXFkj@VmQzh1&
zz>|Zem{)ABI{5`bD4IR%fm;RQEQdHtkB=)m*aKh|JY1qBQ8-OW6@=lnZMXg_HnEFr
znuBQ<6pJ7jhOTpl*|saZc<#c%RWad`co{XdT8P1y!NhH71%nQ{z=M<Vd5;eX#l7+0
zuW7`9%bp3lAi6@5!%3tXAXGFQemwtj$7v78k)4s0W3gX2=muzuZNdi^=T23SfwK~Q
zE?zq1GnA>RYC4{cC<61M3I<0qF&>wiSal;RGk3d$#v<Zh#1bw|N1f}0EJ%Wc@Mt{L
zPprd5kh4q45Wz0si|?Pb5Q$F=;uqeR6J=A6&+y2BA$=iS>44<qC*hIm8|&<V-^4uV
zjigapP7JgITq%<Gn1zWHYnmoT+B(&CO$;%`PtaN8-SRpmEnLC=^R)T5Px$YB?MoP^
zs6J%mRz{|a&I|R~g8sHNCO1@3!Gw6J3(STmg;9os1IPmgAgCEiK(hCJF$0Tx8(vNL
zp!E{KeDUN5+U|e4xAarAY}yo7!(&_wdRx&i&;aob(inE0%N@5TTlhEC{_MeVcmu62
z!ZcX%W4<CHM9^taYkP6EZAOYfiA{$=>$riZxwlt0gD!Tg3wD*MyB6&%>|!VAqCaI1
zCv$T>z~l0Mt{)Qw;mb%#zT(@;%lhBasv+bgD}Y>|B6<RoAPhj&k2?@5kpmvsOS7rR
zYZh1v3|d!sm~20R6vb2tJc01L=;{#=qG0<qzd}rj2-!#Y-%JORZ&|+b@=#BLxV0C~
zLiIevJz$dE;re&@EyP5e=C^0hBZvqgXCIw?p75Rw?%s4@>ETTpF)$daT96J%)_Q>R
z_#s1kY=9namw}^CC!H1qJ4mfME{a?IkG;t-l$f<Ijy@dl$rWYcNZwR9h0KsoWXO^c
z#C`bu9(ILdGni967_BSD9rdok*Ic1??`L~59=19cyko+%IkR|9xP}0p2_KI8oJ==1
z?6FO&g~LxTg%xZj-6<h0^Ry%`<jW^J0X1jnwDzi6tHiqp<>O;q$EfX98Y#+Vkw2`t
zTR(aq)XNZbSnq9huTsxeOzE+p;<h8;j~Q~Bs~J~n2!PzJq+P?H1a=>i%srv4J~%hf
zS+Dgl3wCl6i`*<^#wupY(ljxkGbA<%?jqvml5j~J@aPHh3H~dlFRLGp2;l2DUenV?
zP!<%A=GH-E2#*(Q?_cDwJA*}4n46wh>&7JQX)0--|CMtUXRD-sou7j4caYFE#diHf
z4#Lcw^G%5Wg$-XG;1%FStppt;AuOofPn_{0<`Y&f5XAK91_|MLvPoyj$iknq++9xs
zOd-8+>Bue9j<P}KfnKAwOr^rkci1^R(+eOYYjpTo2qHNc76VD{epH#A*N$BQSSo|i
zC@Yy|?%nf82vs4)(;Qk8ORtO4th*SU4L;lujz;kWEI&g&{3#e;OmJ2^pdoCMSdbmr
zU}cb5{HjC*0;_inJX~VtfMi8abNoR9N_Wy(p|Nin&I=R(?<EpJ2!}ao0QdBCjO5l3
zYAMRL&yY>m-vNUfgeQ{G_c=_zdWsQUU0~`HgrsIhMZb#GvL4)a?DyJuDF-qTPF$HJ
zPPvDCUmVe_#3`vq*fN?GbsVT&a4YNB{4>4%yBJEO(bMQ_*8!0P3$zCQ5Xl0Um2U=J
z3AtUgG29f{i@8HCorzl&>xmsp8gS!pEsx8Wb|m?KvPlElU)tBAc9-V=+1^36*T=d1
zKaU?GB~Y}rsKeVVhid{RyXY&JnGxvP8!YPks_%>1{lUu%^Z#u984APJHeRE|e;>+!
z?=k|}4IYTSc+!E<<n7_;^BOrL-qpju$@mn7quB{eM`w6|yax?byqa(|vak)WK(a>5
z0BjhWun9~OT+c-(v|^a2?s(Xp!VH0a#LfX90WWUD-w7`(9p)GsHw^GRgQi(`)p_fp
z3!f<lR>MjM&o>NZTzUK&&1it=;#&0Zg6te(bq8<5q07{Vfz+=jxI`?=(z{9svA@d*
zyB9U<QS)^rdRJ~V?lQtg`;FftBWzTjAusb--x!CCRvE)ETOOPZ`_t$d41etxE%~uA
zNT%PBrtrT#WJO5&(ObNm2eaI>j{MyCC_i6xyBGcbMjIJ>#K<T~hq@3pajEQWB|fOW
zd{wE%uhVQk&u}n8yO*p#Y6kV`dnD0pE~k*s2Ncx0Nj@R+in$y|=m77C>KrVCPV`J)
zV8~`88nt`4zXD4`wiwQw;V7TaXxhQbvh|P#5~ks78SHbLd{KVBbW!T0&l5U&=j+4m
z%hZn957?pe-Y_>`G3&8jx(y4GDlxPZ3M+H?dVnvh86Zexl>dZu9N7j?FwDg`|3M6t
z*<x_bZVwS_%i?47$b)O^uV9PYOQ-S-(G{oI(9Wj&BfEN6x`~KuiF(pSqet2%-@Yn+
zT8TcHN5Li1)KW0^pMJp^K$jmvRlJElSZY?7<76%B(C2f?)FAN`<#m$8K**2Nf`b{B
zaGpm91BNxH;~o5BYIm8Qvh}6c#aW;1dLb}z%nVNe?3BSFa3nE{N;-fXc$k75U*pyV
ztrVL*?7m&)<uEd^Brk!9eK<ac-3_<#CgM67@M0eBk-&<<tJgp<oVyHTk)xDnq68;M
zm>Cas=`U3~S5zZnIvpDEU2}JKiX8%}T{OWAGt<~)B21NCdFW|S3^q@u0?(i8O5R<9
z4XpLC)$22dgt+{nfpM|EIqxa6zZSBBGHjsR^GQ(lF)v?FhtW7XPPmIJrnIn~;Neh$
z5PvczL-M2`Fb8iqc~Hq^FOFd#<Yh)n&7<lRASl_dkRpXpNny7Uy~KO-5`n<58>Y|V
zSN6rA5HT4bSf>Z<!3!=j4=7BhXNUw62Nx?Nfz0$9Uak_>tpZjM$iu$iyMW(`U1+3(
zEqWt5W;{|FeIa+c-QAqnY9qo<(#vW%B1JRIat3jUgP(7pe;f!j_Lf#@cM_CZ5wk)@
z=jIcS;=seY)*^Akts<fnN-z<#O1grL#nGE;eT=4{4Gy?Y6Whs9y<~_yKR+EmWzIAd
zBhgS7K^}};Y%%K5*@?LD7hSw2M&G`KU2>p&5Qo&_`kuOeWU&C0_AcKwuH&g(ZM4yF
z!;#sD^b0Dno;cr(JRMN&IUiyG(Gu%&<5;i=Ag36SSq;0h1rfsh*|2C7iEHtX1Z7&e
zCcIaO+3DU8a?0NEpwOyVId$j^WF=<OstI_bRRyz)6{O4#y9<mQjM5;Gg6qTDp_o+6
zcf?V-iCw6L-~jwDEDS^wkOcn9&Wg`G`a3lDr8Z2p{<4L2PpgM~^MTI>f0UzFIwpul
zVkJm3vdLSC2i4m5@U2N}b`w}gr6o;@D3((c{YY2`z@_g<I_PP_0eR;vX24NQD3lzS
z6eK!7{+$~}o_+N86)qU&7U<86{KNqF^foNS4m=qMXq|cr3IIy(Et5#AA@aaIL{6{I
zSe%&nM;9S(5>J&mz=*+%&_TMoxLE~=UP+L*SWmT!bpc2oFn|~oCLIimup^SYozCl;
z{%j)l3*FvJ$%{!536-Z&2{BYSTjZCR)|~#Al9qG|tM)(s(Fa*;1F-Sv30`HCLTP7Y
zB}7YxsN6M$g(6dq`a+jEc`%-6PW?r0QVh%EDTyP}JI(c*QY39^LWe21{BFCeLx$M_
zjjd`IVTmm#Pj^LVk`5J8T3Q;CTUyG<61c=2PR`R^NJ(+>cT1O(wh)xNpZ-SVr3MMx
zYJjHc#|>R!OoJ)LU3I&asbhs!$rDu23k{TUSdDhp;CuK<sE}Mq$q!*_dGcGlA#?jd
zMWmy#KSe~Fb`A$S*5{d_n3bErmLN?TLvue2h8Hv}Ktup8F-KjB^>|!e6XH#fgr$Zc
zI!lpYRh(s6$vS@Dr7v{Bw+XGU?wgvytr>d`(S;{>%9STmK@W=6=wJeM2ZFAI)9D{r
zB1^Sr*)Qz2{<2RBFkc$|J0lMm=lWDR@FiT`GcRQ6^vvr%ri8A&kZ-IV!I<<A3oj!s
zAi+ViL4c$s<yYluO_oC&J=?(v7hI}|94E}25#ceCU#VLvCu9s0r%IMxrWo;?u}j-m
zl9Y(D${PjB@_edA6_Zz)WFyk3hRpLVPbLFo>$M0GR+<b#WB(xpZx!><bW2hmYWJLY
zNTjR22G`U12TU&@vfvP(BVWp%iiSvCA!Gw-Lg|wzKqiE}$Wne={PF3@yO{K?=0e76
z-+5&U+s{#eY9S@zLr6+I!-**Pw}6UR>Uu=rp3aiJq0^h3^W2x+^6qOn=<myzAW`k4
z7{3>v^xQ<7$*tqA>(?z+k9jI$i}B6YWe67}OPyxI>ai6_(h6G(6leTb)2%{larp>V
zR2d5(1+|O3*ejVpD~Tu129r~JlM|oxMb%AoQD~b8(}B)-(fW);H*^*<gDE~o{UO68
zRp}IOqRbQ18$z9fP5WZNp5{aL9RV4Pu0cUoQ~jO6Qd5#~eT6jRS_Q<Clk=>`E$=>8
zT_BQWx7X015o$#3*%+Z^%!8|UB0R@anak6%6I0!yNt|4`ub^Dx2BZxl`C`&WTTpP3
z+Tss5N~Bq;B}+*SEz(7i{(=t_(*y32KYyqh-(>%&);v{Z&Yc!4vhA4Lw!^r7RH^Ce
zB&#iq4KQK=-6eumbyygetVed06Sq|-U<my>;&4AQxwM^9CTp1QM^yY!>GiwG?UvyF
zxqTKLjq!0f77#8hK|HRu*bES4#}CH9bGytwtwPAigJNOm!S4+Yh|T6d=yUPTQzawd
zpsxnac|0uG^`JOt;0_WIWYE}z9hIyMH|DG7QAG6Fo8X9U46r2JG{Dtu+(N@isDmBG
z`_$MMq7Fn~Ce6nU+kD65AHqc`(P-@A`e}c70zFNZBH+hmE#9>~MgnTL<Cflt0HG01
zFY2)~!#CMH=DLD0jpin@8tPd^Hl;ZvZelKNu^YeuEC!LF*8=_|br!MqJZItxv{U(S
zF}lj95K*N-P*nbM$nyL*wWt`@>4-`PS%FwWmA?er=J3GQv6waO9@LL3Sp>t`3qxzd
z2sQ-u6yq;ZLQ!1zO$?db0*$hs<1)pkiEn`5Cd3TEvdI%9rQ>)2U0R!B(?I&Eam+Oc
zH`A5Le!3q8TiO|_c{XPVBEu1IfaAE<dHGm#qAXaRo}Q8<OY?a+W1*UPf$TvIBxpIE
zp5?LF+bl-<tN<aTe;PvP1nBg82#B;)K#~3|07mf9rzCONNaPxT4!<F{Y@SCtZA>O;
z*J({#G4AP5gB{9+IwVa4jFK^}g0gK{5gk@fv2^D)z@PO;?;6!+1>2AgQe=%YIuej%
z2}po<%LJ6>v@<yI-l};LjE~td3~^Pi(~wAB;L}s2DG_iuuLl9fzjcrCNlo9{i?mWO
zLp9~yrulAJCnskp%(9eBR|z6bMtww4gO<T^aU~^kqqitm`-x(Zf^#HC-M<O?2lVbu
zXF5<Qi93n{kk$+f&5DMtPYuXs!D14jMJGo<gs#&sAy5t>=O(;qEnI9X(r*QS`FV~y
zS(p+he=VX-`DQ5h9Js*jiH`o{TOno2m)j@IMy0&x=#oQRO_qVW#0GzSqU0<^cKT96
zB#esaumf%O)+mv)ygdXjB0(A9+7!qmF;%5Ic;}@TJRpFtl3&-I<z6r0Ac4{;WWVrC
zq_IVNW)f2SvzYMG?;+)-r9yh?&jQ*DK5~*a+5~4De`3_2f=t^@?YXk`FEmW;)em8U
z1&O8I7tolur52ohX~SEmHLhHBQQHQI3SgK3zc2wV0*Hic1S0vW5&<D!(y9d~8LdtV
zhB+#7Iy0j^m~)oeO?$|wghBf=cq4mb8ThW#SWMCKPm-_&lCv@xh1qD&5u5@|2f-i6
zREG0X0vj+44o-H)>``ZU`Gv=@<f&i^3S78nY6UEsclh`h!~jHk@f|UJ>VzPXtOw|6
zo#6}Y3t=ggsU^Aqhw*TT6fL|PW;v?DJXlibQT%c`7s4X^3+bqcvQrEZ0n8Dl_Ms#Q
zs!`F`*te24qq%M!sd(EJP>m5t8vW24{0KWW3<Q*pFxp!51HSzTP?Mn$)bbS&fd)H(
z&Hzm4z;hEs9=MPL-0MXLh-$@N6z>|w&<BO4bmEV6$B91alz<kJ%{eV0k|0q5?)WhS
z5NmT7mLgb-(N{fKXMiBwak)^ZQY!2{BkIjAZU>X{88-qv!d0mu<Cmsl0zxKVuvH=h
z$5@O8sHLfS(`hbbqB2|l#~3H37wT2~60%JG7G}vmHcp0?w*+iZ(FVQ9mvm%m0e(y2
z%Yuv3Z7>{K>r9*=E#x{GDoc<7W}$yx&sG1fTy26!>f~U&*XF75p5Ci;SUUE?S%5=x
zF=+(?K(`5q25@AR3G#qj#(E-$$nRf-tT~}vGTaeS7TIqJ4Ua_Oi7r_M37s6j3F_dZ
zd$-=JXUd%8SIuU4rLW*KSV(AZ16b2rN@@s-<yI7~@j~H=?dqxFxnZ!23<18uot>!&
zj1hDSESIJ~hfg6AGKiM6$`I25je@@X!-_6B@6|6Q{zWoA>_XmaR^qpn#&NY?6AyIZ
zQ;C~+-3v_34gfL^ctUuBs1{-H5F0p%hS|g}v4@8bD+0)poMa9-Ts7`H^5z{t?=XCR
z?Sk+IL?n}K^<x7g%sDs|rb+y9ht6$EZNg`$aiCK##d%iXOGXrtT->Q(hJoqmpuAf-
zM8^F1xLH4dS-gas>GUxBrotC6B@m_S;Gn)+#+#Di%aSnF2;3R9jsIz!)4N8Af9%!|
z56iVZyd?$G)&u><kRdVfNGoxI(%QXfxhv^g?RxYXAQZ+KPV6BPvfM@en*9^NBU%cx
z32a&d?2!BFsKS5s&s0!l;sPr(1<-cI3LSgB4EX3mIVYur6x%KVH<+%?i9C~}#11k~
zV=6<3V4BabkXb>V$?8WV@SAX3uKFoV+>_oIlq8@qdx<xvcy628)(6Q&l%hjc&V)mh
zZz&>lsfC0aVX&f{=^F8fF<!`E6eNB6Itqe{BDl<RK15tHGUg$Sm>ipp8=<Q%V_H=b
za;6@I26BvOOsC4Q=l~(6o=(WKGf=w)to-NVV#Y3eO(YDg#ia~_iP;gzN#9@QgJ2%1
zFC^?@*aZ^xs%}t7;}W~+LJ@Tc8wqF4PhiT<W=cH4Ny(ReIIg#aDgk9_FAdlf!cB2B
zr9AaTqko$dM$0YGJ({Z{rD9P5QKG1r6n}dfOxJc@`m=yuQ}l%XZjZ7G2&)(?1P6R=
zclKd3y)!xu0o08VHA}D{Vtz(Mhxq_N!%){tDtRh)G9v|;z)d#8HGz2H9xPImub%J-
z<~L@PGa&QrpqFp<_bUyg!(afh^Hsx2j0=Pkyw8wM<4LOG>Psh<qIWi6xCb>M>e2Fo
z6ZJTzYD8bm@~EiDa}Q*x&!4D{lUc0?`@CR>yb^v@co4~c$c|)4>w0QLuw=#&3b+ca
zEujqSq^JVAu@3%a_DaqZ@3^zse<AD(P~UY(Wx~O>*sNe*@!-Jk3_Ij}{3oM2*8GJK
z4-b+XV>=d}ZVDB^>`8Gmg#Q=Yw?ef4@_ks0`Rwy*<j3{Q5xC#<_6=)1aIzjmh>Rvu
z0tnFyn6ai$ouJAexGV!<RquKsJ+oL_k>-w5EPf9FHa593FdNY(h9}~ARSYa2p?$cC
z#>-L2pr?Yc8Cqd68xLova%+l{fDvKt2ruXTrFx1Y05{W>rd8w1YnBX{9a(_wsvamF
zxSq&z=eA0CUWLO?_Yf4}u~6ERe#Lyu-9u;@L_75%!VMmp)1)H4hf19*6SdC$NEJIL
z3S5t<q3IrpB?cMuY%X$wnOvdyW06-=yz{K_<Wb3D*O6%eYg~)i!}05Kqq4_*&`3jI
z{cNbOkwzRUdeX&nsl8uD!rDFZ4eeJ)38YS^`rTW?ol|$Y6))I+Ux^P9Wr55xNT0RG
zF7<K~LE#M)D9wwcGNpeZA~|68={yRq;23NtNFfOu5U!IV^?43=Ne>ODAzqUR0+gph
z=mGUB5Z)s)6pQ>C0?VN2+KNMhBAyX)TUlZ#=(dycnM`pu0D@V*Siu0&q$fcy%b__Y
zaIUqb9vPY-inBXN1Us;>5Xoxw^6qZs_;>-9p?kqHpV>!2N9)){+n{IkpD{sRv@t81
z9hEAyK2@@HdpclVXbr8Aq5;ZKcEi|ugSC(FnNH(uIacR}u*)FVwog9V68og%5Q{C8
z!Km1_>zf@>_u7+r7))lNsWgPkO=q$>XI`c4MtT%&Mime<UBBCPCRQ-Ea8%k$<(f<Y
z6S1LMrnULaRhrW)4gd&KW9<#)+8eYWjl4vh%}4?@FWT!IY(lSyJQv}>mpdWwQx1ov
z>`jQ&;v;f}d%R6+`3Mmj2DOOv0`y`%UzRZo3TnLDm5(0FuCzz66|51Qq?jV4xy(%-
z0U?XPH#^YC2$=RQBU0w9(IBbsh}?PQS2hOJcoVHaPtPkZQ7Wu(C0d^%wID7m!5iH4
zaZ*kIYe@zzvSzV2WCf~R1)it>EP;wM)C=zF-xgmyywRVTFF+ip@Hp@h8yYO>0cYet
z@YMMmaTSDL=>KL{*>;f9;0ni!rRR8qgE7CQ-UcFv;lzNv2Rz4l?=OPD`^PJ)fj6E|
zvOE1=_CsQU0n#?pJc@3|6aOs4O%K3pOM#xN2Ub1BmOuzpj0g(UYIA+5&@loYQIhmc
zk=TwE<VjXW;w*8MTK)L_F=@n3OB1C?Ay+Vml*T~gUT9>gX9fBPdS|I|1O8(r3wQyi
zeiJ4@IWRN(E2KWTGY;Ki?4v|EJtn$k5GgEUZni2FI#7|X*)Ui15ONZcIDmig|9Cz!
zCvyVfe?O7)cByNT2#Qfc3Di-m5~*lxK$6rByFe1n#Iz`Vf+60WGS^VsuYJm-$I5T6
z-ed2sq3t+a$tW8)(f7Sxd|UdxU|uo5_Mfs$`zD;(bx~vuDAbpnQa;aIT~{HG_GO&m
z4H&g==lOjxxBeebkaO&sX#D%}>suz8MGTw6+#bi4!@Si&2K(gJxizm41c3zSIt;w|
zC<fOf<I6eB4$FIQtNF`Q5QfEU3Z24o62TlQDarWU9DBiYTe!3*3G;CEIpg7Xw{|z8
zx<nZ;rcgo*MOLRK!oX)5Soi9+X01}$V~1pZ?l!m8w+tG*X#D|E5wPWsX2>nd>|o|Y
zg}@!XW7aIa;-}hx1}$oU0~UhL_z0gqyNCPOKH&i;A@7o4Chw{T2b>8qZ^0$V*TL|H
z5@9gM_jNer=W^Oe2F6^K?`ujgq6&MN=UKR14$K}9XEBB{f>=HzeS^-?PcNtQ=`my;
z<qXmgN*itQQ9%QYZ1-ZbuOS7y%A5lDow4xZw<z$eSYX+eV*Uq`yNn2_Mar;bgt-Fb
z`kGfJAfg~Lz%UTXI9rQp%p~MYZ^B`@I<@9e^oF4A0pV#keYi>gVn8t78|))Btv8#C
z&Jg^*DlWnXBSLV{5NcKw0?vvAcJ}uM|6OpqjOqeFm#LXOG2JJp>d3#85uxMw<xzpj
zR{YuWpuah;G>)1LJn>N8i}^g9Gy7esA6ZLmjrAO!!)ZiD8p7cw;59kh$&6iu4A3gx
zgaHChPu&iLz7^yyp>;r?W;XN2rIA$83lB6HLPkj&d5e86D4Q7V0lYs9EM{d1f+pR*
z5ZJ@$LaTwFjTFjfXSvaJqqP=D{5GQ{eGn9DH`Gk-yUEo>Xh|mdo1l0AyxYh9ROv{A
zKMWvp0AdT&)qzy+DOt(tU51wQI$LmOYH|@XrhIaCll^v}#ENqVjD7(G3B=racKj}e
zTc&#aI_nd6CmQ&(JX^?MdOkEcnCjWZo?s%gn-CpleGGhjp-l<~4Ti*-H3(NOj?XOF
zBOxWX(b<{2F&=||$2gV1*5xp0@p-Lq*kDXIoH*oiW1zz@=Z=G>M*z>*6fz50zuyNO
zaNLfNk!eZD$FxM)nEp9`2q5=g@ik;0_vQBkOvF0O%gTty$HTave|y|``R4e27A^2z
z4&Lv<9R?w1B=!tL70CnSOy+(vN9RXo?!+$pXVYkjgnZF*p&`g6FeflHLG@goo{=s#
zGBc1K@!p4H@VN2r$|(F#xUKvhZYqDvmhz7;*-!!zdnx^JAmi{~e=QyIKeE5bB<Rzw
zN|GGSr73S`WU>sxS;S1r*WZ2a#=a$Vm7dRA?J@2i^UV%ssJ%GDg(P-KSd*HtjVB3(
zt#Ip9xRg=1@bICfl|XfNzl{?A<i}`ar8_9ayU<7y$P(P&?IEHJ$bgX_?l32zJ#&RK
z1t}@m&%<1cCy$D%v8Ejb$UYlPJe_q`2*l_DGsBPt<Q_WgC^zZ#0i49CQMT}Q!0BjX
z)Ec+WDZc6DfY;^YxO`A8A0ydRd~`%RIz2zAh(2fZ&fLcz>{xXBg1nu^4uXJxJx7M!
z(&>oraeCvc%&)*&Dg&gx6*~s;f(iHHHjzhJ0ncjFKLW)B2m~dBBVyCJZ4COFF_Dd?
ziQLHNssifCT|7R)qF1FZl5e12XiByendZ0#)C^%s*0E?!c(d6bN-~KY-{9;N9wA~F
zeG*_EZ(zaCJF7fEIjMgFR5>gmiX_`e52SCsE1bU+$m}hEGiXn@?#8grZqsIZ`$qtt
z!s~iahtoZa#yCWfvW(XmZ_Wq*Hy-~?WmoBC-5#<l?LZV$wze)Ga7kr_I;GZfPjxbR
zIsipP^{KZ}z%tL1Ug5n_?Lye9uZT*iW}00_2#a}lbDlM2+k&5HXoB+{UB#w^yBw7p
z<-?e)3e$zIgA2G&kwPAo9R(jR&)^X-t57M;_p)d+;ctbZ;s&V#2=dF%wJTW@#J^KH
z1m*s+_(*YA)3y#AjT<R9I9U;XeT$X5nr(4ziZ>3s$RPX7UVf4AzhviwQy6CKyHsAe
z9IYIWV^{hBEy5dcyz0EDIh_J4c{3#$!zu%>t<E9G_R<Y8kcYSza=#3nL7yq)fjWO9
z$UoX1;&N^ecXpYmSm$XMa&y~>-QtFM`n@@zalu#Lhn0QI0lWxAH-Y&a2lNaj7Js@v
zc5<zB?~7@M&m!yF#&(RmP=CDfcmgGinPMEnQ#ZcQ%xog&MG@-d>{US>woE=N%8XJU
z@gk|H^T?R8jxd5pbLUAD_`G-&1ttYD9=U!Fw)7B=G^%Mradu}FJaeR|6%_lMpO<o5
z$XS;%Rq{(G`~_UK_6AAZDtL1fa^PEpO@eo3IBPl&3V}=MPt2Ac?`uY5Wn@A&3*|rt
z6Bsq8{mWVE6(`0g^!t~^z2R#HfYw6p{#oW@pFgmtx1xfx3N!i==<!TCIFJCP@Hn!u
z1!8U<zX`bfKmeiPlz@!ove|}oYA=-%Q7M9}VG0C1JA69Az7Tn*=X~Y<TH1tH63_;%
z0~C|7yxxG=%k=u@oS`^8jstzcKO!9Bis1?8n2X6G&2im$G4VE(2IVr&e!zo_FG^}Q
zjRJ|PhUj#N@X$~3h6f{%Uef$11|lY$g%HLr-tHc27i3v)k)mN3?GE8`#7TI}TTF<A
zD!7apGWo(uWc&#kngK8t@Erq{(Tu2fuE?&-x}S{Rap*)}S-?Gr|9lV=dv6<Kjq;MP
z6!&jMo15PlMDoj{D!+1bUWS|V()zsN_AH#rC0o>YnKMk=_eX;PZElPBK$vDdPitJU
zM+eRP9`)z)NU$?``24paAL;s66Hq1GB&DFr076nbq5slR6NQIs64ZieE#5C_a0yXq
zhnD(*N4bY<xNEM~<@+XY3g>oCS_*x+^aJw*xf4Cy4oz`_CwPALx7k^6fSq5xvGe<S
zL(<E0M)4&Fe%3r++__{A<<;HX7D`!2clw+!L90h?F>l<-Z6CXl-7{$}mx+N7V6>nW
z;R0$xu_caYPaZ`qiRn26qYX+@5i|46BJ_P-h8IqUQ{>IaRI_{>;TK#1cSt3tw987t
z<ksb*N~8IvQCZshwDZtcE4&6DR=3m_jVLOy%gb_Kgv}r@YQE=7;Ik5<xGsaf8;bd|
z(%Zv!p4~ZRL&g(dk*F1f?7X@_pr>^*Mr663q8ft_`JxC@u#POEiWGYZR0LUwK9s(k
zQ0q`C^cT1?@fl>CJy&ijyac{Ahq1%om2KoPE^A3KjI&%h0Nn4eQI_@Ml5NW5#aOs8
zMl|f_UvFEAC_eKrRLW4?R{r*c<j2(Lf5-a>VIJd))OLL1=Z3qFM%6s%3U>T!k(+Pg
zxvs3ZU~dDRNng-%Ao?<bbFDY{hl~k1Z|Cuo)$OgFNAe%=?+jJW=}r11nB&V=(WH!%
zE|>7lOX+S-zx}2w_k?}V1JCorxm~ro*WOXXpn`oA$=Y+<%xay+OMxx-0lKhWikP>*
zAj1vf43H;FMSuuOa9~Loh1?dtFl^{jYJ}PP^pFJ{P~)ClR}=wKiQ@i9Z!y^guiV@s
zbJVe?U!tUs{Ea{1&+xGXGj?2pv#IUF^&U>#mFfJK+|SGKmNWQr)aC84a1XIH9!t_u
zWyP7qVVOpG)b@fwrWOw#;#;wCexae!bvDnjcy9O{Ba|9ih^7hn67_yMS#d6BLQQNQ
z8E%%2a$VGSEpoh!(M6#r8E+}$QQ(h!_<Ys=28`(p?aR}E>Gu+GmNuc2mW0L;SzWzB
z$?C0tr=7(Z1uD)%ns88GDau{C)&fYrURdz0H3ik=2UPh{ZUN=@PF-EuMg68Cg9{lb
zVoz7?ZXhZmdH{H={tet7LhjiJ5wC*@o*3rSz4PKC@WpMRgg0F41S^8h|J38F=+L49
zHW_xOxWRXOND<ootaHik>hfVCgk;L6eo5|jdsfp0vA|QYGu&AZu4cw}h6aJ(Iao|~
z4>Lv~*PrvIIxB-K|4lsxwD~bHQG8tAZyr@^WW;&nw@pM&!(d6q5^mXN5Dbv#9Q?#U
z4)w6yK6>;9n-Xm(cSYSTYIv9|V-VjvRvhti*BsG19MSuxzK_7fJdYvik}g4#v=YFc
zO$ZQt`h8GEJWNH1lFr2AN?P>Wp-VbO9%Jl<xtl+lN5a2MjBp$!6CUU;RN$7gCJfJG
zOH`gsk*glgLPVM((9qtm(`(9%=}lk60~7Ff3Vb|qZaE>k<RVK6f7ns=Wl$O-g4A&C
zCO?ZA&Eg(q3vbRaAGhS`m8WTfOEoCZXW2DZmCRDy&?b(A)1%aZ`{Hbje}D>HZ6`UI
zopvWNO6cXMa1y|P^^hMyAL0>FWpTXPVcdM(s5hGj6}$1wj(UUcu(Z5CoWUP+k#tTG
z0jQ!45^0Y=%e4@`J;duJ5`lqD>kVefe-4&86`2cep<{liQNnE2qGgf3wXbFTT;G<e
zNI1ap3M&t&PE3-&4~A?Y^^r+cQWv003<P6}KrUE1Uz-krC$E2xTGO*1pi%xvFaery
zqY;{6Gm+&Kl6>9v-V%`b9il7g5x<}}93l)tp~QE^DCX%DPrPZm3QDrfy0>1b)&UdV
z83KT$2Rh*M@B?a!Yh52>mhO9U(FW;}g6!wgo=A%fO_Q&qV?=&JfM&*WNp(scoM}mY
z(7lPV;&_9Pt&J4+NM?#<Ac%1?z+-fSX}3f`cn36pAbe_hlc8vvlce9>>~=Q)tpi{y
zMs<0+g%<>@T|I8>y=ypJ)5JJ&f0{5&k+krTYY1jg05kzoc9BW+Sq7wMMxAbyAhCge
zd=isR1`}B8=pbxOW&Nxz;ja=iL&!0U6;eO-DMBI|74NF`d+pwo93jCTAib3i#d^$S
zAl+bhusW9|u|=H0snTx4?Kps45{(&LLaYXF!bK~)`Gn5;L+b*)&Um)r#zfagLK=88
zglm8RiW$?s0r`P)v|W1b#PJY7J&Um2l2iqeV+-F5Fb8C<pCEi&UU}Q(y$VGMrAd%V
z#_UfIq0vcjwU(u!ew4BF@TgFpz2lB`&{L5qKn@kX1)^0s`Ax`&h<y4Sm0wmh9lzO;
z;S(Nwz~;kP|Cx6ZF3yJZ1&Nng0!!@&vPc4#3C@!EH}6OPU3&QJ+4e*5EoAi(-;ao6
z^uvns&$?E%^e}q%EJ9trtSspsC=t;GL87Ok=1jNdY?nmb&S=a9g_Z#R7Artl0kFx}
zL6KB5&}}IA;3g@2g6qC?3z%lB!?H311Ja9T?vva{K^d2a7G`!<^NH1wlSlgGG$ip6
zl2{<2_o9=YeO+HN#0?|XS9ceu!)$cCzz8<UXL%&_$4$k(`*0n(0IyxieI<ki$>!z5
zHdXJ`EGBr9HV_A}-{ct9D)S01(xgx>VQtJ>j!-W!P!bKUo;XQ?-CRN<!RaA6`-xA|
zgt7qt;Lc~w{j736Y@?n_rgD97UstLk0PHLv-5l2GPISX@r2rqFW=}pLoeTD85dHOm
z0wBX5veOJtR6{j6Fmc+-+d*b7A-mw%9ib^oAb-t+zyUYg{~rkqa}r~Asv#(fP#{;E
z%*9}UBz`@-mjYCC-a09;cRJwiin}+6*NI-JjQXn{E<I{g4=ac#uI(LE;$}VGpMMTZ
zs2Rdf!3(BDCgt)pnIW-|JMA^+&<KG*5DSGqad~AjSy#MVi8|-4b?_v8vJ%%A+v6VI
zbQ!ZhT@(?Lzr6hbIBW6`1X0)8T|89u#S|368Gxn)jPJqAR{2vd+zO_(nefPR+?~He
z*3RF;*ZIf%{URpunQS-?Exz7^I!`7rYW+mPN)^!s#^L#mp;Lpya$ZJ))>lYk;W8AX
zVu+`bZ1DOt_GPGb%-4d!aptEGwN8eROHObbQF2QYB9tV-jp3q34ytJ{0etms@pfy#
zoyUb&U>@!rh=9wdj)ZNy2N;>0;u|U@4j1_NX^KfV!yiVttZzqldYDj;RM=vd)ZqX5
zMR~aXp)f4^S)vRG-2!BzJVc&V6oI%H<05&G84{AAog(g4IN0qF|BOd*N3Rv@9KFDf
z;-+MmyfEEeN9o2Ea@S#X>ic`-V9Lv~7a$=(I4<U%@OE8uV)D8}iaVZ{<*(#kid0ZL
za~CU9PPUmo1)+Ep!p5Ty`B%F5TjWoo#-%-bynx8YNAeKfs$5AYtR)K_uJf>7#F(_Z
zdFDi?9<A5PkJ5Qvd%NM<OFke03$sWK+jv{fZEotN>J2`&#yzU-97*EfxziYo=gvmN
zD{M@BZ=o!>0&9668K3Gn5w&Q&Y{`W9;aR^zqX#>j;F%!a=79D!Lo6aA>cvV0u~hX|
znyILU7UE97*G>4e)%yH&MT#@trbjG$*u00_NC;3?EOLMp3`*fBn4|STqjp?iR~2k&
zJvq>hE0yo#N=@Qcxk6Ry<*69jkD{M{rm=)cf}cl~<D=?P1qV=QIPq0kZdEm6+8Bh)
zAr*sl>U;<bFUY_QKq$UQCsm9D(F=Q}kQeed^<6^nZ2npo3ypuzklh{5AlTykpsbH0
z!m^#nXgou9aX*O1hrMh_!w%j^H_Q+qiv*aTXb#HA*Ej^~=^K3ae)4*-h9kKJah>uq
zp)NL*DN+!Gl0{l7@<sa7ixV1=w0BsJ@dQMC{JsYFL$loAJ+QgjNRClZohdT7i8u^w
z`tXwb4Y7fWo62j?XzQgGw@~Ul*`f3eD9#TIBJJJ@!y?`VFh*K}-yRh#HG!lB_%c+$
zG=C)`pocj$K-nA;pe#iJ{J0DOK)`PM)0)LQxN!5YYWS<Ro{I@jf`FOE-4p1<onE(x
z#1g5DT8G(@qKur*P-vZwkTTN0&w8{K;)c>hoKCRIiKvYn>E*q>{g*rOVSTT<U&U*0
z$~)&w#o@?y4BN3QK1LyMRoQKNcqJYIq<*$BO_PaiCyD9nqK|5b@{u-~D%dkIM;n`_
zd<8xYCHhq|-9_SU64jz5fv7Tlk^Kr^4mdcIZf7}owT2Y$QZ=(Kj4ETZKFnj8j>-A?
z668{*)RvBhy!DOG;P9DaA=AI^Jq4UN{%}5t;mz-!7{r4NMJh1_1sA;z?bWG&Mz3LF
zGQJ4rM_6%#I-}kvSR)u}(87gH4|um}d<W4tUe#2CIPrav<$3YHS1F5TgGOlOKz9i-
zw|(B(iMm)n7Y7*PdJ(C65q{Gy5>kK0U?`@-drRF7S#|{2${*;wyKyEfN0I&^W#LNk
zCHyJhodPQVYA@o|n*6Z!BYTn`))QU}N~~NVD&-Dn+Q9wd0_gK7_0Y@qw^0wFE{}*e
z2F>8lA9_FH`-2Bc1z3hnpqH5d);H?u4CI5K0Y11v(Gx~<zc4#|v+?v(`U4+{jY>`2
zi6V6eyrE)z0;JTHU9r8!INc5GZBHm0-SA;D_683z@`2mSNHBbAUiz>8b^eDucbftX
zJa_{42pI0h*=^5$3;%oa<k467XZz2Ox59r>w6n8~Z`%)d9zS~YcxUI~ldrb6A3xf9
z_*JxZ+Y>4350W9mv7)cAr4ZPAgQ7YtyXyO*c7O14_%$IP?+!<w$Gp!T{onr+?QCuR
zd2MHF`%zS7)EhpI;lPxzEYxN7qhxH?@ffYcJmPq0<`mIeIQroC8z-V)(a(AcK1^wP
z@R`wG6Z$hxm?e**R*PM=F*21gxdq6`m<t32iUS#^*09}!dlM#|VSC0KOfm;K{wH`M
z81}#AV{5Uzvc}#pR42i$0%%?Q?7oVPiLodU3~)HeLpi+l+3wHeEpVEk_*pXBxP_YE
z06>BFQ9Qjk>oHrru|nB||0c4&(M(>;S%XKOy~C!3MdRHycp>4v#N&aO7?|89_#=Fd
zeRcxlmM%J-BQj^hv5tWNC*;>LaLLf{ew}kR?8q+(fbW6}A)6Hd#B~p$ddj%}g_nR>
zLI`w+e9Dc`=3Cpan86`PiHcn{@m<YuT$XM>{Hct5l&SDurc6dIUNU}ypKwMyuyFu7
z<k=Zxa)AM(vz2IaMl~ARme$1ds00TK2OMT6AUm-HBc&aj<CvBKBwVDE?3%AD5d*5;
zl^d0)dK?`!>Tj!iNL#sFKE`i&r{7hZuj_A`5o$EbwdVV%z8{rq@1yUlwY{~d^0y--
z#yF1Z4fb9=JUXcIy_{-o_u$PQlKVt2(X>`?MhDfyY7_l7>k-Ga-l`Qe-H#3{ja}H1
zns8PgRGaT*U-zrX<cXg5>y4-!As0upy8Gs!+=!0eG>+=W6-;6eJ=dzW{RRe-$8%w8
zsMb&zRo=q$7UA`tg9F(Qj97kybu>8BXt#d!zEOSkx*5H$AM8P7jb2tT!Sc(4iVlb6
z?H-h?hmZ)%hviqCWFx9$-m*&8)OkklURPMkQOo$h-6o$p#)5Y1wPpi9*RbA3)3<(y
zyr^qYxlv`_BTj6;QO6(zFB)MC^n<3gigrZM3$*+Wp$eggEQ-F*Xs?1yO0`$VvH`VB
zeYeYq<~bXhz>imfD<IUUJUAQjMrVQ_FIw_rV~|Y0V>f&`xHag3L%oG}9?@jaI`VVl
zqx^i)?Oycz8|^bz#{a>`a_>*QF5h{NR1<yIC?Db8)^YsW-`JY_fF^?PN%U+o1+$oL
zpubERb%I>s=gB7TEoV#77E&l=$Me&1?h9{{L4;&!+P6f<Y)Y_^hS6`FEf;;AKu+t-
z`bfcs@6L_nWt>!flF*P;Ya#?+STM;`1qj%q#FkhV2dM!T{*=xS;lD2Zqyki(u3!>~
zXHnS!6(u|fYpF_f5n@Ta37;IHH9I9A2TKsEzc_WluG<iuIob(KC2;%q`2f?OxCAuD
zbu~h6Jf4lFJzQQ>#f`6TLj=Q@sI=-AyOJsp?j<lrYq)i-MWXMmVmTD<Hdisy3FgXf
z5Q^MtlTQLqD9zh$4KXWjfl8?~Uu%>2WLOHS$)-jW7O}aSPXL-4B+@1K{yNw%Oi&Vf
zv^s!WB+xIkZ<|q07L(^w2pTDOa0bY6R!oqGtq<8{GDY@2@rfWv96{1ZLyNqNIO%LT
z-ib@=puX>s!U^jPTwuYhR<&kWGP&se;~#kb^ji(FB3uv`bNw#odQ_<%{c#^PP~nz3
z%=PGE@;DAvXs^pD?1KWUnP20Vbl?Xepa}abI>DHkCS6FQ9~SLYzOuw*%`K0Z7A{3Y
zhvuqz3{3Yy%z%pqS9r;*U#eqKG9rE}eSr*u8hzx=K~!0QG&k-0MxE0Fe1mykI^z@^
zM9U6hO?wjCa0L!=3IR%~MKxIeZFI(|5w9*<J(nRN7}B9NYRN8vn9mbPxbcAAVp+3L
zFHF3PV1;^~acG!f3@(W1O}m+@-Vtk!${uy?z7~Km#Z6<Wof$(WL?2Aph_kU#u4t0(
ziN)Zuz|pktJAj2tave-D9c|&vJd7+{>5JYN4H?_7>nA@QKX0(m8N_`#i`;YOHWUKv
zgIAUd=JF+-DDtclg7lbRv!|EfACV&56L%`xYcEQCX<xVat^v+>kG?ulBCmhw#!*4A
z5dST}9Y;rg9gf}k0wOXR<Q4`w%>+%CB*4F2mx`KmpQ5c#f7!bCCs@XNT?=ErrKeDh
z<}<zBDzJ4YmGl+dw+c?uxS+`u&rga}C(12ryTGz6%St6;o|zIFOY+Hk6`YI<ISTjA
zEYq&Klih6v=OL!$6Z-A24buT6NOjIaJs(4D*Rd(V*{w#(%Ps-i#uwgTaA_zOFOpW}
z=szidQaCn1NKE1-yqs-dvz2Bdw@QP#gapzz@=GB^)~OreL}dGK^6npjbDbbi1o+dk
zp^;?3UgxdV2zLMG9lN7t0*q-zow4h9&*%g%Gq^3_H9~9@HiD+6OE*J1yXr<s(S8^h
z1X%e!$S@>^1KtyPRWKZHLEwcH&zk+Q>R1s;0k)GVk5A7_EGLEIDE58<HDvq`cd4&P
zN#g3$vsA4k3acm^N%0tqQjs74gHJ}RAoE&LP7eFbkZjLNS;jDE80)iT<gr#Lx{7%(
z9Mpq}*+SAMACUa@*)ybiy`=7&8a>ju?^3J&2TVU~@NgT^-#N&`R~bl7Hh;fO%Kzk@
z0NEfp75Eor!RtQ55gyUM(Ex$$cV#yH@CdysH@h(2g^AAvIsp-cH*x|olP7eL$1wK=
zf%yh*V7@=w+2$lo5|;ZS>7W+1VxT{_NE~qdgY@TfMf)#)C~OJB^qB=aZI=BwpA%WP
zR65S4VV&=OVLsPjO_!x0O^5O<EtgzVC18=qI3s23WQIN|P`WTrfoV;5i~_e8)Jq4>
zBobIu8^&d(;>qNq$j$Mx``fvyej(YhiF<j3p`QUKKsqnKjYKDk&kDJ;0*yr)`Q<J-
z^ZA4x?kX|5Yn?G&Vsb&s{m&9*L_vTU&o2l}gH=h0fY5YeN!A>YpW`K04FW#o`nt4s
zW_7t>bdQH~(_2)LppYmXE-J&_UE~i#dorBe7&aJajuB^KmKcYkCst{jH*@CLcBZxj
zpGfu6&egRW@3dzm+RDZxNwk!V{Btf^NxsM^`Qsxq4~ewWd2DeWJD1i*SdL9Z*LZnt
zL}pa?;{Aj2tK;~veEhwLW`zrQhbZ5Ig)xk85zJqfVW0oz<eLRsk_Kx<lnHX9va!Y|
zg~xk{04CFDJeLYD8hvg=HK%2P0G!rDIfU`berm2D())%^M<Vjh4{&uDc3=w7=GTe7
z{q|asmPVV}Z5rbyi+13lwL)kzV;KsXKm)wI0clVoW)Nv|;zJ<hk&!G#=HpUK-cm72
z;~7#x$6{-IJc*Zpa&aWS+=Y?&0^R4y507}Q)|a|Sj%=xm&hVvjFu8CJ>7syP05c>`
zWB+9u%e6$A4I-OI7o!B!g<tNl++@`l&bGaJBd*q(l>?;d+uBa+YlLQZI2*UyjN`%e
zx=vF1h;10cnvXobs!NsdKQ4+BUAb(GKm94v#z^_{EPDK81=LBcs)Vkh`w+;twz`@B
z9Z&_x%BV#7VD|7KR+ji^LAI9Ma15&bEpAp1Dtk-CE0VRZMF7?1y}Z9pFLSIbKbQC2
z>vV&RIM<p?yt{jR`LZ5e*^^D^>aHFaZilkOAWUe<DNLoUPcQ$nW|}j6K9>?<Vc)w2
zF^a&_?X80Q>_tI}B7lqKBZ#MSp8j1i#?n#O#RE4;6VPSyjWCFuoHS^-h(`xZ`wtX|
zm#!WDIn^SESWq%{>pyILK)?krlEX598|NeajH~>3!AN{*PaduFsw#30Op|kcMFN%E
zEe3Pqh=HYOjBCm?^yuS6h%e>Ebu|p&tqF0@ZNOSUkqBm>HhhY3-{8iQ@e2fGWTsXM
z9VVhEgEd=VnJ^iYfy;<GoQBHJJ@4yQxQm%`fAys$%{(slCpS0xI2qM$OD~$LNPw$z
zLwJ@KA=#<~!@6l|gOy;qs39@EYQv0hO648ugwP*6B<gN@cM!OPz#RncAaDnPI|$rC
z;0^+J5V(WD9R%(ma0h`q2;4#74gz-&xP!nQ1nwYk2Z1{X+(F<D0(TI&gTNgG?jUdn
hfjbD?LEsJocM!OPz#RncAaDnPI|$rC;13Of{|8wta-0AF

literal 0
HcmV?d00001

diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 4bcc5937db39..2f9e954085ec 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -35,6 +35,7 @@
 #include <inttypes.h>
 #include <errno.h>
 #include <sys/ioctl.h>
+#include <signal.h>
 
 #include <drm.h>
 
@@ -71,26 +72,23 @@ static void trigger_reset(int fd)
 	gem_quiescent_gpu(fd);
 }
 
-static void wedge_gpu(int fd)
+static void manual_hang(int drm_fd)
 {
-	/* First idle the GPU then disable GPU resets before injecting a hang */
-	gem_quiescent_gpu(fd);
-
-	igt_require(i915_reset_control(false));
+	int dir = igt_debugfs_dir(drm_fd);
 
-	igt_debug("Wedging GPU by injecting hang\n");
-	igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));
+	igt_sysfs_set(dir, "i915_wedged", "-1");
 
-	igt_assert(i915_reset_control(true));
+	close(dir);
 }
 
-static void wedgeme(int drm_fd)
+static void wedge_gpu(int fd)
 {
-	int dir = igt_debugfs_dir(drm_fd);
-
-	igt_sysfs_set(dir, "i915_wedged", "-1");
+	/* First idle the GPU then disable GPU resets before injecting a hang */
+	gem_quiescent_gpu(fd);
 
-	close(dir);
+	igt_require(i915_reset_control(false));
+	manual_hang(fd);
+	igt_assert(i915_reset_control(true));
 }
 
 static int __gem_throttle(int fd)
@@ -149,26 +147,111 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
 	return err;
 }
 
-static void test_wait(int fd)
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
+{
+	if (gem_can_store_dword(fd, flags))
+		return __igt_spin_batch_new_poll(fd, ctx, flags);
+	else
+		return __igt_spin_batch_new(fd, ctx, flags, 0);
+}
+
+static void __spin_wait(int fd, igt_spin_t *spin)
+{
+	if (spin->running) {
+		igt_spin_busywait_until_running(spin);
+	} else {
+		igt_debug("__spin_wait - usleep mode\n");
+		usleep(500e3); /* Better than nothing! */
+	}
+}
+
+static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
+{
+	igt_spin_t *spin = __spin_poll(fd, ctx, flags);
+
+	__spin_wait(fd, spin);
+
+	return spin;
+}
+
+static int debugfs_dir = -1;
+
+static void hang_handler(int sig)
+{
+	igt_sysfs_set(debugfs_dir, "i915_wedged", "-1");
+}
+
+static void hang_after(int fd, unsigned int us)
+{
+        struct sigaction sa = { .sa_handler = hang_handler };
+	struct itimerval itv = { };
+
+	debugfs_dir = igt_debugfs_dir(fd);
+	igt_assert_fd(debugfs_dir);
+
+	igt_assert_eq(sigaction(SIGALRM, &sa, NULL), 0);
+
+	itv.it_value.tv_sec = us / 1000000;
+	itv.it_value.tv_usec = us % 1000000;
+	setitimer(ITIMER_REAL, &itv, NULL);
+}
+
+static void cleanup_hang(void)
+{
+	struct itimerval itv = { };
+
+	igt_assert_eq(setitimer(ITIMER_REAL, &itv, NULL), 0);
+
+	igt_assert_fd(debugfs_dir);
+	close(debugfs_dir);
+	debugfs_dir = -1;
+}
+
+static int __check_wait(int fd, uint32_t bo, unsigned int wait)
+{
+	unsigned long wait_timeout = 250e6; /* Some margin for actual reset. */
+	int ret;
+
+	if (wait) {
+		wait_timeout += wait * 2000; /* x2 for safety. */
+		wait_timeout += 250e6; /* Margin for signal delay. */;
+		hang_after(fd, wait);
+	} else {
+		manual_hang(fd);
+	}
+
+	ret = __gem_wait(fd, bo, wait_timeout);
+
+	if (wait)
+		cleanup_hang();
+
+	return ret;
+}
+
+#define TEST_WEDGE (1)
+
+static void test_wait(int fd, unsigned int flags, unsigned int wait)
 {
-	igt_hang_t hang;
+	igt_spin_t *hang;
 
 	igt_require_gem(fd);
 
-	/* If the request we wait on completes due to a hang (even for
+	/*
+	 * If the request we wait on completes due to a hang (even for
 	 * that request), the user expects the return value to 0 (success).
 	 */
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
-	igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
-	igt_post_hang_ring(fd, hang);
 
-	/* If the GPU is wedged during the wait, again we expect the return
-	 * value to be 0 (success).
-	 */
-	igt_require(i915_reset_control(false));
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
-	igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
-	igt_post_hang_ring(fd, hang);
+	if (flags & TEST_WEDGE)
+		igt_require(i915_reset_control(false));
+	else
+		igt_require(i915_reset_control(true));
+
+	hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
+
+	igt_assert_eq(__check_wait(fd, hang->handle, wait), 0);
+
+	igt_spin_batch_free(fd, hang);
+
 	igt_require(i915_reset_control(true));
 
 	trigger_reset(fd);
@@ -181,7 +264,7 @@ static void test_suspend(int fd, int state)
 
 	/* Check we can suspend when the driver is already wedged */
 	igt_require(i915_reset_control(false));
-	wedgeme(fd);
+	manual_hang(fd);
 
 	igt_system_suspend_autoresume(state, SUSPEND_TEST_DEVICES);
 
@@ -189,7 +272,7 @@ static void test_suspend(int fd, int state)
 	trigger_reset(fd);
 }
 
-static void test_inflight(int fd)
+static void test_inflight(int fd, unsigned int wait)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	struct drm_i915_gem_exec_object2 obj[2];
@@ -209,11 +292,10 @@ static void test_inflight(int fd)
 		int fence[64]; /* conservative estimate of ring size */
 
 		gem_quiescent_gpu(fd);
-
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		hang = spin_sync(fd, 0, engine);
 		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
@@ -227,7 +309,8 @@ static void test_inflight(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+		igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
@@ -256,7 +339,7 @@ static void test_inflight_suspend(int fd)
 	obj[1].handle = gem_create(fd, 4096);
 	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
 
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = spin_sync(fd, 0, 0);
 	obj[0].handle = hang->handle;
 
 	memset(&execbuf, 0, sizeof(execbuf));
@@ -273,7 +356,8 @@ static void test_inflight_suspend(int fd)
 	igt_set_autoresume_delay(30);
 	igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE);
 
-	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	igt_assert_eq(__check_wait(fd, obj[1].handle, 10), 0);
+
 	for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 		igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 		close(fence[n]);
@@ -301,7 +385,7 @@ static uint32_t context_create_safe(int i915)
 	return param.ctx_id;
 }
 
-static void test_inflight_contexts(int fd)
+static void test_inflight_contexts(int fd, unsigned int wait)
 {
 	struct drm_i915_gem_exec_object2 obj[2];
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -330,7 +414,7 @@ static void test_inflight_contexts(int fd)
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		hang = spin_sync(fd, 0, engine);
 		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
@@ -345,7 +429,8 @@ static void test_inflight_contexts(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+		igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
@@ -375,7 +460,7 @@ static void test_inflight_external(int fd)
 	fence = igt_cork_plug(&cork, fd);
 
 	igt_require(i915_reset_control(false));
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = __spin_poll(fd, 0, 0);
 
 	memset(&obj, 0, sizeof(obj));
 	obj.handle = gem_create(fd, 4096);
@@ -393,6 +478,9 @@ static void test_inflight_external(int fd)
 	fence = execbuf.rsvd2 >> 32;
 	igt_assert(fence != -1);
 
+	__spin_wait(fd, hang);
+	manual_hang(fd);
+
 	gem_sync(fd, hang->handle); /* wedged, with an unready batch */
 	igt_assert(!gem_bo_busy(fd, hang->handle));
 	igt_assert(gem_bo_busy(fd, obj.handle));
@@ -407,7 +495,7 @@ static void test_inflight_external(int fd)
 	trigger_reset(fd);
 }
 
-static void test_inflight_internal(int fd)
+static void test_inflight_internal(int fd, unsigned int wait)
 {
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct drm_i915_gem_exec_object2 obj[2];
@@ -420,7 +508,7 @@ static void test_inflight_internal(int fd)
 	igt_require(gem_has_exec_fence(fd));
 
 	igt_require(i915_reset_control(false));
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = spin_sync(fd, 0, 0);
 
 	memset(obj, 0, sizeof(obj));
 	obj[0].handle = hang->handle;
@@ -441,7 +529,8 @@ static void test_inflight_internal(int fd)
 		nfence++;
 	}
 
-	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 	while (nfence--) {
 		igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
 		close(fences[nfence]);
@@ -484,29 +573,46 @@ igt_main
 	igt_subtest("execbuf")
 		test_execbuf(fd);
 
-	igt_subtest("wait")
-		test_wait(fd);
-
 	igt_subtest("suspend")
 		test_suspend(fd, SUSPEND_STATE_MEM);
 
 	igt_subtest("hibernate")
 		test_suspend(fd, SUSPEND_STATE_DISK);
 
-	igt_subtest("in-flight")
-		test_inflight(fd);
-
-	igt_subtest("in-flight-contexts")
-		test_inflight_contexts(fd);
-
 	igt_subtest("in-flight-external")
 		test_inflight_external(fd);
 
-	igt_subtest("in-flight-internal") {
-		igt_skip_on(gem_has_semaphores(fd));
-		test_inflight_internal(fd);
-	}
-
 	igt_subtest("in-flight-suspend")
 		test_inflight_suspend(fd);
+
+	igt_subtest_group {
+		const struct {
+			unsigned int wait;
+			const char *name;
+		} waits[] = {
+			{ .wait = 0, .name = "immediate" },
+			{ .wait = 10, .name = "10us" },
+			{ .wait = 10000, .name = "10ms" },
+		};
+		unsigned int i;
+
+		for (i = 0; i < sizeof(waits) / sizeof(waits[0]); i++) {
+			igt_subtest_f("wait-%s", waits[i].name)
+				test_wait(fd, 0, waits[i].wait);
+
+			igt_subtest_f("wait-wedge-%s", waits[i].name)
+				test_wait(fd, TEST_WEDGE, waits[i].wait);
+
+			igt_subtest_f("in-flight-%s", waits[i].name)
+				test_inflight(fd, waits[i].wait);
+
+			igt_subtest_f("in-flight-contexts-%s", waits[i].name)
+				test_inflight_contexts(fd, waits[i].wait);
+
+			igt_subtest_f("in-flight-internal-%s", waits[i].name) {
+				igt_skip_on(gem_has_semaphores(fd));
+				test_inflight_internal(fd, waits[i].wait);
+			}
+		}
+	}
 }
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [igt-dev] [PATCH i-g-t 2/3] tests/gem_eio: Speed up test execution
@ 2018-03-22 17:24   ` Tvrtko Ursulin
  0 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-22 17:24 UTC (permalink / raw)
  To: igt-dev; +Cc: Intel-gfx, Tvrtko Ursulin

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

If we stop relying on regular GPU hangs to be detected, but trigger them
manually as soon as we know our batch of interest is actually executing
on the GPU, we can dramatically speed up various subtests.

This is enabled by the pollable spin batch added in the previous patch.

v2:
 * Test gem_wait after reset/wedge and with reset/wedge after a few
   predefined intervals since gem_wait invocation. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
---
 lib.tar         | Bin 0 -> 102400 bytes
 tests/gem_eio.c | 214 ++++++++++++++++++++++++++++++++++++++++++--------------
 2 files changed, 160 insertions(+), 54 deletions(-)
 create mode 100644 lib.tar

diff --git a/lib.tar b/lib.tar
new file mode 100644
index 0000000000000000000000000000000000000000..ea04fad219a87f2e975852989526f8da4c9b7d6d
GIT binary patch
literal 102400
zcmeHw>vkJQlBWMsPf=!{kwJ=gUAkMAJc3A2!kPrQASAWM<5LF&3M57#fW}3X+V;H9
zzQ#V;eqTgnR#u_Fi%h%Sv+Oft5m|YSjEIa|M)rFro4wO%+?k!9f9?-kosITaxBb5@
z{O`$=M_=Ke?LR->3jamXqphu-hhJ?!+<E-y(c?!ue@4CS$Ef#Jv~}APDe7-FnYPAJ
z^wo4Wp3M5aK~Wu+UG;rYyFYmO=IXs@HQF7HK975+XVd8a{-0=PYx_x59ZZux%EzPO
zxHat!2dKuHN6GlSH<_Ry>P@1vWSpFQj!wrdRPU@s-Eopc!*0|*YmHBnwP-qwT7%Eg
zC>c-CV0bcZ^#;AcY1Cp@Z4AoF(=+rm8Fr@^t#N|-ov1aL4BNdHx{Nx*_Ut?vOl1yH
zx7SZ5QE7UXM9at4VtHkay<w<M(&|UO0Xj?~_cglcP0xn2X*5nI({ZoOrL3V+yFcr2
zChl{;ciz)Mq%-s&@R+dQ*#t}BY}camVW-#Szlp4GG&||{CTD9=r^nt;W>eIhu&|vB
z(29LwL7T&IG)ek>cGANFWYOsy*JRNc2yntI3|unw#o6$j>tRQJNdQ-OHXdML0Ep`z
z0(>&=e<kfHO9+APu-_kEaE<NZpwr{RCr{B2yK3Tl>ty&bk!9)54~A3BTBpJtk;&*z
z+ow_rHV!aIK26!#s5Nc@14d^n(d4X!Rh}dUU`!9&!6LVCz+rX*iW^LO*el?cOv?Zg
zE^@=o^mQdVuJ1SBl^d0)dK?`!>Tj!imAz=We2m}AYtg%E^L71AGeV6<xz>Ci)%T-v
z?S1rpwYIkwRsMF=s2m?h^#*&d9v&T3D=4nkb`Rd{Rco)JmuOn6H=~2<VYP{VoAs!P
zF|D_11x@#(!%AcKH99Z9tR7UG?`2>2tIZmF-mf>Ja&%N~G^@LB4$6(_=uP9Oeq6yM
z_Rw>!TH9}6pvqyT*4&6NG76*0Tl|WSUzZOKI4_P^euH&1IMZmie)PUkef7E-y{;eZ
zRZ#e{f(e#i9#nKVEN}OqTs>Ti_R5FlSDa)cs-qWKC2Q(Dqj#?>Eaj+W{NHY~TCZ_I
zyY*VLfuCzwZ=>m3zpEZs)}nHwdW;!!V*8Cc1|fLS2xFihG_6&%BZ6M$?so`PxRy6q
zzME0BS1BK$yJOjaTBg43^5(sJe^T5^qG!|Id9rc#T~IRVoeo<4Y}sgfHcoI_Wh;K3
zY>t5TEDhTC{<{4*?u^f~#mhYQ+QTshTTUx4quvy(B3qa}Av5*)=`>%R%rkDE^+uDV
z@Y6Us%?&VYPy6u&sEE8W`)zU&Pd*RY*+LHc@if7jmhau$TotN5)@^y3nJuCIVsZvb
zi#y4rJ?@Q2a!;d!-pRN%{tPxDGzq>lC9imO^d`C(jz3Tom`K}cZ`x0uMte>`kV4z7
zrxC{5I71Oj%Ta$gB`u>cA`siH3Ao)L0UwwQ&y!5&WK-v@4+$6oxqdWmf%$-4b%qx>
zS=il4?=(sV!`bQCM)YQYc}`~ooC*EU(OK)GoFX_6KB4c{`EWLnf!)lbK5&B!l>JOb
zvvF@Y!%s{p>b81)j0h$?j=%uN=&nD+!pG<Wn1}L#PPW^cOgATBq0y+{nqv6#3DFh2
z6U?!1&;x7bnMQ^@kh7)L?@d2*ZQyg{<r#Q_FSn*(lEYcQ!>-VJg3~QZKDDs@p8<u*
zC~5b)y>_rBp$F6^k#v%7Z;(U>_1*G8TqRmp{#MzIUmowp$FHmV%}D-N+J3llY2Cx}
z@%LIaDm{#zJzL!2N;+tz_NrQ|#M-u$4o>E#r9T(;!v;Y&ht;^;cp2}${yr*geR|B%
zcQD<1I23UH2uP+lHKx(aa&z}}d|dsHO7uK>wDs2~-?}>0SIzjKdfY5cM!i9d19-aP
zzZqYM&v6ugil^A9$u#Z{+aJ)$QS)`9Qr?RX-!v<Ki>tM2vkK1iAC<<pe)x~WUgsX9
zjD9>$(5i$}eTE|yOd%F70*ccwB9FunOh7`bQg`j%5@0&R)F19(VAedt+24mm;0uS{
zE;vH2Ciz(08J-i5g1Q&u-ZXLLEBAi7w<N=?e+PN>G?`lGSXMgHx4w=Jgpc+IsG#?c
zWY{gasQ_ZGzi@oC2!E!P!r7ow_;d2@y<bu^!C4XSS8BVJrP4MxcPmB3qx!)?+;~&7
zFWc#^18DaWkPBejh8*7en8YWoY5S~1RwlbVjaK<DMK{neL`}Dls6G9JUjrTz;04Lx
z6chpob2EbN=p>_{mUPl@olXc>M^(qW4WeAM<x2zwqwRZ3Bvi%9(`10<qdgcr{)R}T
ze}TIKM>@&JUZ>RUtPs4{!X4Su$qzd}t^th}G)g`t?UPxzn~Zm&Rr_&C)p&S9IS_U6
z2kLp)aMcQ*$q(C4e$0Ow=r<G#c#=UoiMt)Fc72;wQ7DMLDm#Pe%D*wyr>V8+fsmH0
z8?@VEaW^VyztQvO5yYKX`N;7~^b<A-Y&A|=?K4mk_YI(1vnlHLpnXu#<<>>_BZp8c
z=xJ$*xH3-uKI@H>($}TAI?>NROEp0Z(oDNG!0aIa#2uU)+%`WpwomFhXxW+`JopjI
z@?ct8`USP{|Gz{@f0D?8ltIWO;xP`XuvdWNnv*2zUOmO7rOW4snT0U1MabIDlXEbl
z(tXm@`gev?Yta_82;~!*4}#8C2>{5p!m6w{Z3Cp6?+As`3I0U~zGB)|=^$*VC2VzX
z3C(^GV*GJKc`7;)_<&hW6>HMt0xgyVH&H1Dk)Frr=dDp3U$kX2=K3PDP;L<s0BJ{S
z)Ha&&y9N{kc1m3ST6!kJ6%BDZonA7A8)Rwm5S%SG#@PNPeaZO?r6S&|AC{}N_*E0K
zd_f`SZ9BLAoiY&!y^wVtJaAxA5(cnjMFck&3Wli^M-+lWo{XnOR3+Q9w7D9I0ZGpV
zia%(c3_9Q;I370vEkfNGY_Nx@g;^{G2kw1ih`}yNiJrGM^B>)o30L3V+Szgic?dKr
zHRw;5e<g@6fsqk6VKUECenQg>f7SC=*hdIBLQ@OTjhrEnX~2<y#W+u%M~{Vt`Hj+n
zetxbyDl#5~`!|h>^r7TKMJFo+v{P!Jyp#^8R6x9HgV#E)G}n!Q$&ge4xm(_SP3eyR
zR`ymnQ5@%>7HtZVS*$r(GHZ`!g0~C0M{5un=cmKrw@?`+v*_m>#VwJqUNxZ6^}_MU
zO!2?ms9LDw6^|Y4UuM06FX_iP`FlxZllAWmAmCyOdp#l_JOFJ$>^+96U5WSJ)f;<p
z_3#jj6-;;*J&FE35?N_yCwl7I65og%-#w51Vn?4B>UbV6Ed{;vq@6DaFxPr|l<#S$
zu;E<S!2!3Fn}slqAJ(^jTwgEj3!-6WW|%t)irK;37P7}LW)5?`t(i0|rwq5~QnJT1
zwFk%OGyfXXd=Nc(6m7agQ+I?NYfFDt1>8)_2m~<AGEH>R5F&5V{|Ma^j1$JQ5v-`^
zBU(SnI)iOabth|o)ar-@6PjZ;xrheC5iO~zW?m#T48n8=o$7qlPgHXro{DjB@_90a
z@c=5zD*rz^f)Hm`xTrOqLK&x(fF-kus@h&M*JFa#U(sfv<3e3$AF%m$s6Ep*sQp9q
zgXlUF!vaf}otC^DwP_VZA24{p>N|k;D}6w4no=Le;Xy56Wg~h$yhxy?!<LK=s7;@@
zU;waXpdq9^HyVUV=|q8X-Wxy%ghmB$eq==``TQfQ9acA@|1ASuoIzKoK9Bzc-0)wh
z@vt3?!JDum*mKz$I(q8_>N%E*vj_r*Y!v5nbO<%F**JtcP18ebjFVMh0D60}iU0gr
zKpa)O5w_zLD+;00icY|>KSZ#gn?=K|QNc#Dh}%}3PcpqoumT50fg)BY8$r`iYdnE?
zN|Omu=|hr?qT||2AQ(4Sx|m{$C;en2I)<hGY&soHo^EcQ%uXj8wB5r(^kF(2Z4Ae!
zn=n&c#Q3t&KJEQ`uk#$o?9P)Nb^@eST6pk0+J*<mkWn!tk?&)$yz3q1AeQmGPFp}Q
ztmb)7$tD24xvmJQ1i6A;GYK{&Ml=kST1+i3ChXP^4`Hs=xpb2Lw1rAf9^v;q8_v?B
zigGXC?C)0^;P9{lgRcuS{|o;>4<P|#CenMZnCyF=SwB%`eeLmbod#nC?E4zM?NDup
zwFSzGok7o{M{?}DE+a@L!O{RL_l2!wYec++0upSbN_O#RO5~UhW4Kku@n}d5dR)?v
za%_4jNX6C3f)?mGdVt~ZKRe4yJ3xk9YZZ1w1qR44`PkXU9AG&h>T;D%vh&spuGe=}
zAdZQNBSv-h0LBxQdgyWM+o%^kgT+&{S^RkLKsPbh)&1wue+Tw7LM>MV5O&I451KkE
zP&KehFcgc3Wj|HeC|-srrJ~TQF7JmS5O$*PzKb401|S$yiKfJ=kUR8<RQPyn0L=j=
zMJ#oFX@n+Rg+t??__qu@?00lzS6N33+tRun>u2$+m9)(oJC##<X{p^GP7*866!|Jt
z#ltnY;nIarQO8Pg>>kqY3~rvZCcR`VCUBldu)LFPlHt>KyFpGikHLW^B0D}`!(ps2
zMiPKmd=!zM!z%@=ZT;o{g26sMs=}W_))SD*W%X#cgEJbZfI(l(Z)QH_CQ|l*0dn<T
z9LN5*5M?|_E@-j3h5;?LuTjSWY?a0`x{l0JER%j1Gt+U8ISmpS01HFvc4}Erld5YX
zq{$zqW3rhbw2DwTx)e3gRiTBD)!=xB?M4iNN;GiD>a2ki#vF9AoLJUGOxtkjzBdyS
zxi}jsXmvWyM1shG4{K`qa2DrWN3+3vSAMTJOW7y4Y}+9J^H=SUS%(EHiZ{IwK0@Fi
z33DIK&xP*d=tZ~lG#Wx6i+XT4&=f9ictBXaNf5eNO9B5hYEaGs6_L?E{xWH?-uT8O
zRbxPr?8gQrqmQlr4CDx>N-`10kL_4Qxn@AU&GiN0l44`)BYZfJ$mpXVKys1svEff4
zM$zET;UQaz#S}pY;uObcvq#97r=lv4{Rr4;4NCB1^N-Z>0A@@}KROn1(fVfafW;fg
z{&6Op%rR}Em_lA==KN*On8W#b(qV`L<VvMy$imT|{2su8244|7^DZU|hce7J2)wX`
z(w_u7Cbfo(7DVSxG#d>E8{**CZc(I$XgIyt-8lkYM9Cn%N%(*tQ*n@TU};5|;%GRT
zu8+pUHZ}&3cFSuB|B+w<ER_xty6^}L1cdbAz!dxV7(QDJjU`H7g`=>?&3pb6nU??Y
z>nXx_6>ue%nUR>LK^&u=*?_|oGRz{{acshv7SHCBA*AKW=fL)%o1_*@<Js5e1IqQT
z$F0AB{y|l#tRtLhAs5h@{Oj<7{|_iI!0Zb+D;<wO@D6po5!5{q4lIz0G-%vcxffk)
z|0H6*F<J*p6)PK0@IoREOJO;q_jMuyvkj?UIWLzKk-D#$5hiX(>!A=z8*S;n^CI&X
zX7N)_vVofcj?rrEm$e8|(w3Q7xz3w24H@uS65f~b4DWHff@N?XcN~KzvOma~osy9^
zQ{%)=SqH@x)5W8UvtAnpBndOHN;t|+agL9N=j5*cNX8I^hA{Ra+7oVKSthZ1fU_T_
z!x^Ns&pi9bQ`n=t3D9hVFyj5Qn9mK#BL$j9A6%dJsK=2UNW2p&0}w(Bgbdh0z$tQ)
z#tj+IJijodkU=dp1S4{_G#k*ai(m;z-4S-UP<_KB@o57V*jAs`7q@OMKm#-2ukT3s
z4d%E|gP_BS(Mvdzlozo5Q}O<qT43TK(26l*s=20e-wJW<1)^4S(NGXCqd{u`ivVO}
zhIu(6lXDPeUp3I0*1%5We<um-G(cj6AW!H7b4YkBF;|SfvdL0>hB|^JYTtP=9zvfB
zPZpu~g=}oA3p*AE<8-Vi$m!^(-$EQ<M`c~vi`-fRI28xHJb46laUj7eOYC`!pYku1
zRf{sgj*68VamF9hQbhftHiODw?m(Q59-St%B5LhBweiq&^febjtCt=+xw`TdW?FWK
zE%z?}`xq{C+T*7v^!OL(gDJ&|cfhFQ>Z{7zSVOQlLtjJmzZBU}l~c)7(N1D-_kM2Z
zj7o&1El5FQUeH|x$$cLm))Cu_7~Eaz?wsm<U4ptY3z}yZ2bhJ6!%wwVOeY(NRnmD+
zKf+<(#-stx&8P-9ZG1#iaAlXh=V~G*+^YWDT-E$KfEAo;NhU1QxG(dmy*W4tWCX&>
zHU^WB%9oHNaQcf$k+@xfdxQ9WJ(4(Y4df_TjQV6Fo*GO|Q<iikN}A7A*af#Ub&hGI
z4`{xiO2%G~-~jU2Y&CY2uZ2pn1mVhm&c?$5qGU>!@jhzNHxYoijW=YC+z(+?o-8^;
zaz1R&Afd|6@ZOvX9|@PzqnU=9z(&FD6qv%){H_H2c=0}p(ZI=_zUehK^!mm)I8H)i
z95*i8xYz(k-WJdZIh+s)V)f_~&3toAkM-xzIkCW>&jtu4DwTwfudbAEpjzZ1qli~@
zBn9LBsPx~6b9nsbU<FrH#1N@?#h6y%GwIwcazG91R9<Wx?a#**SXhTT1Jou?nL%q&
zgs^}R0*AJ4_LQ$?RtZ_FhfQi`6}vxXtIf1$|5au!d(98{g+cs@hT{)DSLm9-Ii5@e
zQbN*pQaS91R$;Fa(IUHz^Hx;h@nVp5;esVL4b8D@2=pC%;O4sSgZ*P=LL@s*5xQVG
zCNueP$5DTFDv|aETBpo$9NZrG5UujRE-p1(D8ut%6^c%V7@M@wfg5?&#Sh=P?gh7k
zoveQ+5&aAzg-rciqK;{JMQABJ$K%poqxu#{%UgtfaXFlUi+7Z<C~k`0KPk<1rxNJN
zaO0xaft?P)Uc-&EM6P_PFAu*@Mk9Qn*HgN)gLJMd!2M*%2?Sr%-0(b-hU%C=7y*Kf
z=4T?!*CMV$1BHEn-MuA+5GFAi<LEPp$M6+)J4GFF4Ppr8Z1gYRw%cJlEfm38`<8i&
zGs*w<a<kiw6bl5mXM^kEfb4w$0P=-azi8fhz@?K4z9<^tQ#u`wH!>l0n?Vdu9V%?D
z2&soCT|M*?ImYHJpLV5$Cz#ttcguv#5wt}7vm%k<Sri*aaJp{87t}FJiY_PLHS8QR
zu$eb(hUqdxs9(Lvxf$SYiUGZNU}J5?b5nQ+woq1!3Gdu#zQ_-9p5VSAJSO@twPeJ<
z^dT;&n-RU4>JhzQ*EsXN1zo~8L@9?kpJ7j_)ebQ(VoA4~gKS9(=9yg^PH{a)413fz
zFyJ}udm;z09*=pQ46Dxz4Y2PcXdQw>3)OIO6WS539O44Yl)e^mg~}~J_$(L=W0j{D
za<}G$+&Ue>UoaHvep#0ZR?m6QkNuN4FzqyCiM9jf6XuG5(^W(oR5G^)q6By$RJ}8)
z7YFgCbp8bMOXqAB2k5!H)6*A7D84exN2pTOF#_ZGU3~n$wkx~4D_$Ya)KdZ23)|Ci
zqg{v*16~(bLwzm!EtoR9=B(j08>IGFfRz&0-=U#SHKNIWsF?-ZE>Opcb2Ddm1(R<=
z*5ma{>@S_yM&9DpnGIxbWbr9LWI2E>Vw?uE5wv`c2(6eGH8}=_N895Lv}s_OM^A@6
zjllc!THR2Fl|9*L^!2h1Rp|Hvaan`5fo*?}?Kkgv7Us@Rp{-&bmN`^e-}F4rYyCT3
zO^{#&@~{bHR^YtR{q*xF5mAa4&~Ra~@xAPEyubGp5`wXv3=d_O$_#xAL1A3_+&ie`
zTM3ib(?Uk1|5;k&epYMNhqXiH5S?$_Ey3HeTpd7Rgds+t${BwBpMzzz@)MFMpqw>@
zm18}Cl>gkAy~uFNJZQcXJB#$n^_OU3;l5|}2*VxKB~s);oSIS(36GefSRWEFO4*&q
z1kf>2?rO!vVHec(3b`4A(-gOKw}4a_J%cl(2iOIG%P<6C2;~RD2S;FpiQ=1=P6J35
zW1F6s1z7g=7h#!64etEn@Uqzw@+ZgcB1|igH|AS^5vJjYHKEPj>xnj3qecQ#gQ6t}
z^p|^o@Qej_)4NRw;38`-|Hs*Fb}xM)MD&vUA6t)~JlS&jKX!H=K4JckovkN#`9E%b
zBMLga%m1P2LGJQ@(8f-2Tm)X6t#|o9knrO!|Hto^|D%iRf?Y&l@pk{-o5RER2X)-l
ze;vpEI(E^wG;_zZ$rKrVkc}ct9YQUwAniwM_DK?FPz>j&feD3%O$JSSyDlwvDvLr^
zXMY9WHwvaSP>K~2)x4IY;r1wDFW!ub-!@_<u>d7)e|ekHz&pI7y{M+fAnv$;_i)s}
z5{%-L;joX(<$75U=A_isihJ8>yd8}7MKLih0u6c!OxS*547Rl*X8^a*+gwFP;zIBx
zga}&3z52gX*8;%<OE%l8vvz(YS6(oj`-EQPin@)N8MJ6^W~B-SUpTt|kR6^{pI=Vl
z(iV+T07)*xqn$!C-+$pM!l(v?z5-;y5JiJrv4+ACin}jZfY-qU0d&)^?eqRNbua|Y
z3;l-0RWEl<VgLN}U_)8xz(BoZFbmXYvJUcz(7FJd={9g!iURY&y<gBeU@@YKiY*N7
ziF|764JlYOYtU*zLo;e}Y_@fFsKUyuR4{GM9X*4`F&G4Oq$l3sz@LQ^<!mv(xsPr#
z_Ajbn%AB=JszGi}c$5Y(QX#C4_6!b>Twyu0H7fvsKSdh?THG9Pb=RT|(^r3)>o-;l
zjdZ^f<wnbIM4F1kRa6heC9P>2gxv_KN<6xgR%C8sLCG&8>kbM~Qktl4zrX-NumQ2*
z6@*q^aM;m)Rv^Ovsv@WVA&pb}S9Nqr<IDBGM>~)I{O6qh_wdQXtvmhi5|G~7r(*r@
z(Ov$>yZn!7rcYB1Aqu**nKaPe<$t`(|9A`eA2Z1x#SDY|hoKIK6BZ7KqOyF&f;^GQ
zcsxLqK&C%riKK{;Z;fL5N#-l}n4B?N+-)Oe0gF=ob8+TK(>8sI%LSPyDOb#8lBC-7
zdD6|Lgk;rDg3I}HA1=uw326~Y%jQ#0N^y{^(%lCP(@CDnRkWf!lCPTZ!H($Rbx9HF
z0D56rBVRS;S7EwH@j1Cfir%sOv8Blx0W`SH8Nz0F%3PG2xVX(ZgbWMGxYZYz6R(kL
z+KP!fn8~9syM7OseGxd+PA+)uS6Tv}=v?H@K>D&!hJ0d7go)$_P*P9Xp(<c<yAUW!
z%s_fV(WMz@b2^#<l+a>KKeoUhsYJ0{!s0D;8pFKgH?!f1xNSmXp#4w9jT@bgC}pz)
zI13>S%N61!m#yn`6o#XojwBj&>2$QAjh{ykAAd{P@pbt)MwESw%(s;$rN$*zl6Y10
zc^`j>q|bQ?ZcBX3UIWQ2>bp%#EcvR^L{8f>vcKNvK<Hx0)}(j@&~xdM1}~rdj(4sf
zb0IE$kG15Q5OPc{!3W*&6I2EdVbR8r)hh<&A~~FRE(OL=l6+80rze49_y_9|-IX*F
zGgu0Vs}SZZL9!T!OJps{L+CuMTs)vuMrLZsW5~z>B8!X&9N?8j$pV7BAkgUnzH|ug
zI68zsS85~w@Ul2@HCzr~ps`~yK>&%1xgnBCO1NAg$)&ey$@!i-6OUXSm5Z~$Pon7V
zaDap+Jx2IAD#23PX9?0>UYsEih1W4UA6xj|5hQmw53P^U1WLE!TV;2I!G~l#Ncu>)
z%FL~@><_&W0o0mJk)jebpm#f@6!n=?brV4%l3NwfrvH_F^(KRFc!w2`G+!)B8LNUq
zgijzH3Zf-YWddI@E{psZucb;-81dkGSG9*4Dsq|3BP(;oxS(?+lIG-KIUBco@b!Z%
z(RBy81W$3vm04AW<Bez^q^*})Ff5T4VIf1rW+CYeCWYMHa+#QIg`^!|)+7PU1R*4S
zKaw&HL~(Fph`dA=74Q~Jpv4?JerCx1f@}@F;YJ&hG*a!f5T%K+nb?LIx(NZorrR51
z*}cKXVO!D)FiQ)82lgJIO-Im%mv#j#h@(M}PQTxRhK#j*!qpwv>=7p;U1ph+T%qVN
zIs}#tY%~T%;s8vTu|0htI`8TN;{n=<h2G%H@B)c&)ccPi9Ex#fi1g@40vW|Jf*FX$
zAeRg;#bVETeJpJ>ldBdoGfhME3HfmaOiFXa8;%5kFc<|vxxEwcaZMRC{>%ZeznUk9
z5IY%u+CX#mAm@iW6^PHRK63aFCSydx3RcUw$>3uT5!=in56F#s+-x7HL_+{f2(O|N
zSv#U)R**vkc-%#2%;iozC#-^b+C}gTCU8#Co<)B|uxt<&SmyeVEls)c<lJa%fGqHm
z4T`HIa%r+6At}KtrA!T_VBCW{b(s?h;rPsD{8@vM&LznkZ)9WQycrIH05~M{8#(qh
zuXBk=19%ypNn8>@z`Nm^WQZ#U_*aw)zz8LXL+2=B8qrDXaG~5KW}HIgAaWo9uVf8|
z7r2t10gyl!C7Y@7Ex>nWlXkFb5D@vGa(^)NuSp-m<q%#gn}+L1A8{XJTv~SU24f~r
z<=}E`thAh+lcRizEnzkqa`Jl;H0@@?apiQB3E)5>T^reX5<O`R21F`UL<e%_TxW_|
zl)!`qDt^W`g;JKF^%iS-TzYe>n-o(6V?rK!X@%)06H?|=8oS0SM>uKXC!F9LF!aZW
zT9Rxzw9k84Lo)={YsjUHBqkIUaBZSiW0HE*qYY3Xp}7a$0Gyq4FF?I7f68T+^-8t&
zwv3A{9F!Yov0hTci7qZBjTXv(F|p!_@D@(B&b%s_Ju935Dls(11%@%8PKTTmOx!wX
zzFq|7AkQ$W{EjeRa&W>ujzPKyrR)}P>H@ki+ybtNQ2iB~P&ftI1Z)F0pL7aOk>CND
zq7)RObO7u$9nHLWof55Ljl-Rvu#h~819emHA66@voMNPK6A?H4GI+kCBY0f|rpqEl
zP&E9v4*rV_ambJUHwxx_3`Io@3lCo1)WCeosn_8dU(D7rMBum^7WX?dF69Y6-4;*Y
z7)hb)?~r5ksEGupcC$^`z?B$QtH+f_quy|hQ?zutj$|nV;Ol)JNqU4%B!wW4P{@Y~
zUgOAi!UM;oMT)N6*Hy}HBB4StQJI3uiKz;xF1M`Ya4GRvJ%b`5;vO=%NOPX)`pOE<
z#}(uR)GT_qR2H5m9-l(W6tZB#JxQjd0v}o_`>_kd?$jxQD|oWO6r<+Zm%nhV2}_Lu
zhVBV2YUL*;;gR%8m<G-%Nxw};PvAVG6g^L>0WrJF#zk}BZDZ-5e_#wbhe;D97U+2Q
zq1Il8EjJ_jfa=}gfX}6;+Z>`u1BYpnf|#u1dYg)Rc_mk+hhGBqkw-tzUj_!;j@F1v
z2h&easW^(_phBC7QVCzAppYhFb<XCZ(moWzE<$bxkkTd0+3O$aB>@$~!Lg+~SV1UP
zLWZLF$Icr;@OTsQ8G978al>!USy~(b=Gi|x2LFdbCtiP4TzJ?Lg7X0rMJSUA!r%|H
z7EB;ae~$35j>heqG!A8T@Bvr6DST7(#d}TQDw7B=a;Q3DKah-{W&~n1XN}zT|6O|c
z>=_>F5aJXO#y#a>G7#!oRehrab))<Dqp!&^h!`i6&JGxbnr4i0MS?Vr;fzLHLufYQ
zZ!sK&6#$!j9TZ75*2mu)3O<+HE}y^gzI2PBv(+{IB7`(DV)seu8A&4niJ&->mJtRT
z$yZv%V`7G^Su7*}Hq7CUK>ZVS#{i+yr1Ve+^BrHnI8aorR_8lRMANq97N>$c7&5m!
z6NgFrlY|>4gHMr1hh^AG8%G(qElk<A{V=Ik2FRO)M7-hZN!>RgY$0AACWHwTe|s)Q
z^A5aVkHy9!Byl6p#$_W!dSi*Y1E7BP18rF>Ot_~c&%8x1G|lQ$umaxn#<O^Quq#4x
zd$i>PChUT<;8F!nmv9c%0C}f|t0pM*b}Z!kY!U^m4JR+=B<m1;5f_dW&TKBZ(FAf>
zH*`D8IwrYF=8~F`*YND&!K~6Gfm717X6$6{VsuGwMsry-1hI~E0!HbwA(NI*n17dQ
zZ+e5M5%9F*bYfShL(5N&Yu0RNwoHF|0yd8Q@z4ob9(6J?7;}O0!)tu9%UT4$N)m}x
z-N1!>GR=5rZ`()#h;$LFRIU(4p)IQEBio2I3wktCSN%k9Hle0p?n(aH5{L@??qag~
z34da(Osnt2b|>r#Oa%)l_3#goG9VnP!Bo$-b$_-29kU$AxQ%jh;FeR+iMY&+&vgD|
z%=j%Qwy*=)70OO3=c`D|8cwIigtd@qi@_+x;x7k<66sZ9&z+FmI;Z?g1^#ytlHIW>
zlo1}9QZRS2hBb)!a}_{A`8p_4bYX*qFP^w$08%L+Ts_FrBX>i-Ej+k&+N2g+*$Jle
zsr?wnPkjpJMCStHQuKr>$-h8mSHFQoL}vB$6@!x>(2+i*lTWl!oIX2fK+n-q?)SpH
z*+IR_2U^_ePubxXb(1E{mNEH8kj&IAp6Rniq1Czn=+kFIiopHZ4M_=(Mxv}TkFh{V
za5`LVet2XrK^;FNKG*8?BNS0W6vviupa6#i&#=AOjnzr`)av%pF<sJ@*3g7tJWZKn
z73p|W<n(o}n`b>027fcLl72OpFq=<j$-6r4fWa$WoW9T=nX^I7%Cdr-93;SFcXT47
zy9buQK!35CNrjTqM$d5Q&H<bjY0-hZ&>vGbkqdds6ia@TVG1?6MRHjng7Lz)m%rH2
zjYP18@)vvJDz77g377fj%U>ckP_Cfpz<))2&^P%c>vBd?V<i|&^ytSQ=8VP(73_0X
z_`Atw!(Jhoj-ix@t9ss7y5L&x)wgghJy6Swz*sCDd#cU*yg(jDOR!~89`Yg7*;P52
zRZv0cA<a1lilXHO#llW#+Xw4Yd?PK9i}CiX5Mx@?k!qUOCi9IKT*bB_<PL$rHwT%+
z#dc?33hYb7T8WHF!&!)kBv2`)+Le06^(7c8NWxmpt7u_JN~m2t;tC(6xB?|{gTBCO
z?n!lDPkP7?NmUaXJKql%O_Io=VtA+5k;nm<OHUcTps!~;wM>HxCev9PDR&a6Sg3N6
zv|G;mP0bAET}?Zd`uF@IB+4gfz@<u?GdMV5Vom3P;HoTY=Kv+R%>W*lV8{r_6(lf3
z;Ub3yz$@S>EjM*JVAJx3jraic`ahE1_$U^GWsH)Nb?y3*ATZ*zMV|y)0H46}`nE)&
za6_#oqAR+^b;T)hU#AIb0>x-P9En|7l7KmtV6Z6kXO6h9eFNcRvJ9j%g4c9H4nS6f
z_$X8UgdIvM@ECzz%ta=Ujn%B^A8Qr15yyORNZwA5<5FpWY!qavuxkm&&?c^%(x`{L
zJ`w(_eAs}`XoDmw*~1i+yFS2N$I9TuFtI_&J3@E?wum}>r^mPI;rkY|!38ju79Bpp
z(VLdk5-uyxF*C4r)}ot7)(4bLpLk3yczc!o@|%OE3e8KCJ|5x0#2{dVmR)uLg!-Vi
z30<Qjy?vB}@DP#L?=TsMfKQ1}4q$DkiPm8*A(VuMoaU;5Ys73b(=b8+ehWnY&H61%
z&JMG+gtoDo$OkZ>VKck4ekPtq-aodhV=xwV0OZLGi8h$p1Q$dl@jISJZYPYckYE#y
zv*DZ15~zomfbkG$0_r;D1Go4$r2rltegZy{V?NSIIE9*BunKuxljC6y*i*z1!I`G-
zRIxLHmSG!nXHy>DX=XBavYXXFE}M}43X*TZnO8Xws=0|4WK)WV1AFrAv2>AQf$V1K
zet_{j41=GZ0O0yTh+?ONxD0U^IP#SrSrLQLj711`jB<<a$>7*z)Vg4hkU!_=ASTB>
zfn3$ypwSiC#9${O6J?)3BfpWB4o99=!K+xa=>&O(h?MpQH#<Vyle|c%$R#Oj__{U%
zH5J!;p9II?Y?I$MQcmi~FErR$uz~=J8!pctfGc<+5GnwE-eN150B~p!sp&wDV1<6%
zs&T>~3Q@u9&PLx*U(UdpM($(pl4(^0s#1<4NG3Cwb#V;{UK6vxLVb}KKgKv^!tgp{
z#H(`07=tAz^ThI@IMQka#>pm*ZlslBl$CqB!?B2ThJY63F#vpho8B1-&9Xz8Z=7)-
z75t<c0<jP_Ci>~oh)n?6B1T8!bKpqZuQzrp@5<j-mc@UeEMFfQw!hi4I}bU6$Z^YZ
z?D@&c6H++->PR71uN}MxQGo*fgjs_ZGTsfL51;<>B!=lTUZHs|E$%%;dA#DzEpI~P
zqT2m#A8~6jHDR2OZk;%MXCtIjl#eNj?N<*f(JJ8z9*_K4<7n0Lr%~=l8N0!m#ed8)
zApC}*e++LO{DY5;QTJ!D7!Y*`e+UD7;gnJ_nXT^efkbxP6(}ul^2FH0Bb>cKcSzAd
zqMF3@q!pj5J^~6bNQ=eMPEc$UC(j3Q@QOBa=3>%dG#Fte(mXgCpo1yls2{Y?M<uPf
zYzNhFj4}TjdhuoGuO-samdgdejssi42dtVeJZ3;4wiE0lNOXP}F^Xb{$8$qqKKMU3
z9Rrx67da{HvL38vXel1SRpZhz(DqJdB;DZl5ZOKfa}3x%b7Ow<^cx=YI&G<;3k~-U
zZCcY|4_Tww!2?Nzr%y;@ANlYKp~+slrr!|q++^FUa2;?4hFuHp=Y%OW*_4*?)XNS)
zv63okuH_igLn6o=t>O_J1U3^3xppC4T`t1TSHY1u5B!5Ql(c+>EwBn!$DElx(tSZ<
zRCI*g;qg>!(c>AEx$)T)5trL4F2#64rPoK;VccuMr+Y5fV40;*pIG}g8X=OKGr=Et
zd@$^Uu+BBN&n4bJU-XUZ;qVeSf8IK!44WWE6u0>`A{WdCsi&q7L`KXFkj@VmQzh1&
zz>|Zem{)ABI{5`bD4IR%fm;RQEQdHtkB=)m*aKh|JY1qBQ8-OW6@=lnZMXg_HnEFr
znuBQ<6pJ7jhOTpl*|saZc<#c%RWad`co{XdT8P1y!NhH71%nQ{z=M<Vd5;eX#l7+0
zuW7`9%bp3lAi6@5!%3tXAXGFQemwtj$7v78k)4s0W3gX2=muzuZNdi^=T23SfwK~Q
zE?zq1GnA>RYC4{cC<61M3I<0qF&>wiSal;RGk3d$#v<Zh#1bw|N1f}0EJ%Wc@Mt{L
zPprd5kh4q45Wz0si|?Pb5Q$F=;uqeR6J=A6&+y2BA$=iS>44<qC*hIm8|&<V-^4uV
zjigapP7JgITq%<Gn1zWHYnmoT+B(&CO$;%`PtaN8-SRpmEnLC=^R)T5Px$YB?MoP^
zs6J%mRz{|a&I|R~g8sHNCO1@3!Gw6J3(STmg;9os1IPmgAgCEiK(hCJF$0Tx8(vNL
zp!E{KeDUN5+U|e4xAarAY}yo7!(&_wdRx&i&;aob(inE0%N@5TTlhEC{_MeVcmu62
z!ZcX%W4<CHM9^taYkP6EZAOYfiA{$=>$riZxwlt0gD!Tg3wD*MyB6&%>|!VAqCaI1
zCv$T>z~l0Mt{)Qw;mb%#zT(@;%lhBasv+bgD}Y>|B6<RoAPhj&k2?@5kpmvsOS7rR
zYZh1v3|d!sm~20R6vb2tJc01L=;{#=qG0<qzd}rj2-!#Y-%JORZ&|+b@=#BLxV0C~
zLiIevJz$dE;re&@EyP5e=C^0hBZvqgXCIw?p75Rw?%s4@>ETTpF)$daT96J%)_Q>R
z_#s1kY=9namw}^CC!H1qJ4mfME{a?IkG;t-l$f<Ijy@dl$rWYcNZwR9h0KsoWXO^c
z#C`bu9(ILdGni967_BSD9rdok*Ic1??`L~59=19cyko+%IkR|9xP}0p2_KI8oJ==1
z?6FO&g~LxTg%xZj-6<h0^Ry%`<jW^J0X1jnwDzi6tHiqp<>O;q$EfX98Y#+Vkw2`t
zTR(aq)XNZbSnq9huTsxeOzE+p;<h8;j~Q~Bs~J~n2!PzJq+P?H1a=>i%srv4J~%hf
zS+Dgl3wCl6i`*<^#wupY(ljxkGbA<%?jqvml5j~J@aPHh3H~dlFRLGp2;l2DUenV?
zP!<%A=GH-E2#*(Q?_cDwJA*}4n46wh>&7JQX)0--|CMtUXRD-sou7j4caYFE#diHf
z4#Lcw^G%5Wg$-XG;1%FStppt;AuOofPn_{0<`Y&f5XAK91_|MLvPoyj$iknq++9xs
zOd-8+>Bue9j<P}KfnKAwOr^rkci1^R(+eOYYjpTo2qHNc76VD{epH#A*N$BQSSo|i
zC@Yy|?%nf82vs4)(;Qk8ORtO4th*SU4L;lujz;kWEI&g&{3#e;OmJ2^pdoCMSdbmr
zU}cb5{HjC*0;_inJX~VtfMi8abNoR9N_Wy(p|Nin&I=R(?<EpJ2!}ao0QdBCjO5l3
zYAMRL&yY>m-vNUfgeQ{G_c=_zdWsQUU0~`HgrsIhMZb#GvL4)a?DyJuDF-qTPF$HJ
zPPvDCUmVe_#3`vq*fN?GbsVT&a4YNB{4>4%yBJEO(bMQ_*8!0P3$zCQ5Xl0Um2U=J
z3AtUgG29f{i@8HCorzl&>xmsp8gS!pEsx8Wb|m?KvPlElU)tBAc9-V=+1^36*T=d1
zKaU?GB~Y}rsKeVVhid{RyXY&JnGxvP8!YPks_%>1{lUu%^Z#u984APJHeRE|e;>+!
z?=k|}4IYTSc+!E<<n7_;^BOrL-qpju$@mn7quB{eM`w6|yax?byqa(|vak)WK(a>5
z0BjhWun9~OT+c-(v|^a2?s(Xp!VH0a#LfX90WWUD-w7`(9p)GsHw^GRgQi(`)p_fp
z3!f<lR>MjM&o>NZTzUK&&1it=;#&0Zg6te(bq8<5q07{Vfz+=jxI`?=(z{9svA@d*
zyB9U<QS)^rdRJ~V?lQtg`;FftBWzTjAusb--x!CCRvE)ETOOPZ`_t$d41etxE%~uA
zNT%PBrtrT#WJO5&(ObNm2eaI>j{MyCC_i6xyBGcbMjIJ>#K<T~hq@3pajEQWB|fOW
zd{wE%uhVQk&u}n8yO*p#Y6kV`dnD0pE~k*s2Ncx0Nj@R+in$y|=m77C>KrVCPV`J)
zV8~`88nt`4zXD4`wiwQw;V7TaXxhQbvh|P#5~ks78SHbLd{KVBbW!T0&l5U&=j+4m
z%hZn957?pe-Y_>`G3&8jx(y4GDlxPZ3M+H?dVnvh86Zexl>dZu9N7j?FwDg`|3M6t
z*<x_bZVwS_%i?47$b)O^uV9PYOQ-S-(G{oI(9Wj&BfEN6x`~KuiF(pSqet2%-@Yn+
zT8TcHN5Li1)KW0^pMJp^K$jmvRlJElSZY?7<76%B(C2f?)FAN`<#m$8K**2Nf`b{B
zaGpm91BNxH;~o5BYIm8Qvh}6c#aW;1dLb}z%nVNe?3BSFa3nE{N;-fXc$k75U*pyV
ztrVL*?7m&)<uEd^Brk!9eK<ac-3_<#CgM67@M0eBk-&<<tJgp<oVyHTk)xDnq68;M
zm>Cas=`U3~S5zZnIvpDEU2}JKiX8%}T{OWAGt<~)B21NCdFW|S3^q@u0?(i8O5R<9
z4XpLC)$22dgt+{nfpM|EIqxa6zZSBBGHjsR^GQ(lF)v?FhtW7XPPmIJrnIn~;Neh$
z5PvczL-M2`Fb8iqc~Hq^FOFd#<Yh)n&7<lRASl_dkRpXpNny7Uy~KO-5`n<58>Y|V
zSN6rA5HT4bSf>Z<!3!=j4=7BhXNUw62Nx?Nfz0$9Uak_>tpZjM$iu$iyMW(`U1+3(
zEqWt5W;{|FeIa+c-QAqnY9qo<(#vW%B1JRIat3jUgP(7pe;f!j_Lf#@cM_CZ5wk)@
z=jIcS;=seY)*^Akts<fnN-z<#O1grL#nGE;eT=4{4Gy?Y6Whs9y<~_yKR+EmWzIAd
zBhgS7K^}};Y%%K5*@?LD7hSw2M&G`KU2>p&5Qo&_`kuOeWU&C0_AcKwuH&g(ZM4yF
z!;#sD^b0Dno;cr(JRMN&IUiyG(Gu%&<5;i=Ag36SSq;0h1rfsh*|2C7iEHtX1Z7&e
zCcIaO+3DU8a?0NEpwOyVId$j^WF=<OstI_bRRyz)6{O4#y9<mQjM5;Gg6qTDp_o+6
zcf?V-iCw6L-~jwDEDS^wkOcn9&Wg`G`a3lDr8Z2p{<4L2PpgM~^MTI>f0UzFIwpul
zVkJm3vdLSC2i4m5@U2N}b`w}gr6o;@D3((c{YY2`z@_g<I_PP_0eR;vX24NQD3lzS
z6eK!7{+$~}o_+N86)qU&7U<86{KNqF^foNS4m=qMXq|cr3IIy(Et5#AA@aaIL{6{I
zSe%&nM;9S(5>J&mz=*+%&_TMoxLE~=UP+L*SWmT!bpc2oFn|~oCLIimup^SYozCl;
z{%j)l3*FvJ$%{!536-Z&2{BYSTjZCR)|~#Al9qG|tM)(s(Fa*;1F-Sv30`HCLTP7Y
zB}7YxsN6M$g(6dq`a+jEc`%-6PW?r0QVh%EDTyP}JI(c*QY39^LWe21{BFCeLx$M_
zjjd`IVTmm#Pj^LVk`5J8T3Q;CTUyG<61c=2PR`R^NJ(+>cT1O(wh)xNpZ-SVr3MMx
zYJjHc#|>R!OoJ)LU3I&asbhs!$rDu23k{TUSdDhp;CuK<sE}Mq$q!*_dGcGlA#?jd
zMWmy#KSe~Fb`A$S*5{d_n3bErmLN?TLvue2h8Hv}Ktup8F-KjB^>|!e6XH#fgr$Zc
zI!lpYRh(s6$vS@Dr7v{Bw+XGU?wgvytr>d`(S;{>%9STmK@W=6=wJeM2ZFAI)9D{r
zB1^Sr*)Qz2{<2RBFkc$|J0lMm=lWDR@FiT`GcRQ6^vvr%ri8A&kZ-IV!I<<A3oj!s
zAi+ViL4c$s<yYluO_oC&J=?(v7hI}|94E}25#ceCU#VLvCu9s0r%IMxrWo;?u}j-m
zl9Y(D${PjB@_edA6_Zz)WFyk3hRpLVPbLFo>$M0GR+<b#WB(xpZx!><bW2hmYWJLY
zNTjR22G`U12TU&@vfvP(BVWp%iiSvCA!Gw-Lg|wzKqiE}$Wne={PF3@yO{K?=0e76
z-+5&U+s{#eY9S@zLr6+I!-**Pw}6UR>Uu=rp3aiJq0^h3^W2x+^6qOn=<myzAW`k4
z7{3>v^xQ<7$*tqA>(?z+k9jI$i}B6YWe67}OPyxI>ai6_(h6G(6leTb)2%{larp>V
zR2d5(1+|O3*ejVpD~Tu129r~JlM|oxMb%AoQD~b8(}B)-(fW);H*^*<gDE~o{UO68
zRp}IOqRbQ18$z9fP5WZNp5{aL9RV4Pu0cUoQ~jO6Qd5#~eT6jRS_Q<Clk=>`E$=>8
zT_BQWx7X015o$#3*%+Z^%!8|UB0R@anak6%6I0!yNt|4`ub^Dx2BZxl`C`&WTTpP3
z+Tss5N~Bq;B}+*SEz(7i{(=t_(*y32KYyqh-(>%&);v{Z&Yc!4vhA4Lw!^r7RH^Ce
zB&#iq4KQK=-6eumbyygetVed06Sq|-U<my>;&4AQxwM^9CTp1QM^yY!>GiwG?UvyF
zxqTKLjq!0f77#8hK|HRu*bES4#}CH9bGytwtwPAigJNOm!S4+Yh|T6d=yUPTQzawd
zpsxnac|0uG^`JOt;0_WIWYE}z9hIyMH|DG7QAG6Fo8X9U46r2JG{Dtu+(N@isDmBG
z`_$MMq7Fn~Ce6nU+kD65AHqc`(P-@A`e}c70zFNZBH+hmE#9>~MgnTL<Cflt0HG01
zFY2)~!#CMH=DLD0jpin@8tPd^Hl;ZvZelKNu^YeuEC!LF*8=_|br!MqJZItxv{U(S
zF}lj95K*N-P*nbM$nyL*wWt`@>4-`PS%FwWmA?er=J3GQv6waO9@LL3Sp>t`3qxzd
z2sQ-u6yq;ZLQ!1zO$?db0*$hs<1)pkiEn`5Cd3TEvdI%9rQ>)2U0R!B(?I&Eam+Oc
zH`A5Le!3q8TiO|_c{XPVBEu1IfaAE<dHGm#qAXaRo}Q8<OY?a+W1*UPf$TvIBxpIE
zp5?LF+bl-<tN<aTe;PvP1nBg82#B;)K#~3|07mf9rzCONNaPxT4!<F{Y@SCtZA>O;
z*J({#G4AP5gB{9+IwVa4jFK^}g0gK{5gk@fv2^D)z@PO;?;6!+1>2AgQe=%YIuej%
z2}po<%LJ6>v@<yI-l};LjE~td3~^Pi(~wAB;L}s2DG_iuuLl9fzjcrCNlo9{i?mWO
zLp9~yrulAJCnskp%(9eBR|z6bMtww4gO<T^aU~^kqqitm`-x(Zf^#HC-M<O?2lVbu
zXF5<Qi93n{kk$+f&5DMtPYuXs!D14jMJGo<gs#&sAy5t>=O(;qEnI9X(r*QS`FV~y
zS(p+he=VX-`DQ5h9Js*jiH`o{TOno2m)j@IMy0&x=#oQRO_qVW#0GzSqU0<^cKT96
zB#esaumf%O)+mv)ygdXjB0(A9+7!qmF;%5Ic;}@TJRpFtl3&-I<z6r0Ac4{;WWVrC
zq_IVNW)f2SvzYMG?;+)-r9yh?&jQ*DK5~*a+5~4De`3_2f=t^@?YXk`FEmW;)em8U
z1&O8I7tolur52ohX~SEmHLhHBQQHQI3SgK3zc2wV0*Hic1S0vW5&<D!(y9d~8LdtV
zhB+#7Iy0j^m~)oeO?$|wghBf=cq4mb8ThW#SWMCKPm-_&lCv@xh1qD&5u5@|2f-i6
zREG0X0vj+44o-H)>``ZU`Gv=@<f&i^3S78nY6UEsclh`h!~jHk@f|UJ>VzPXtOw|6
zo#6}Y3t=ggsU^Aqhw*TT6fL|PW;v?DJXlibQT%c`7s4X^3+bqcvQrEZ0n8Dl_Ms#Q
zs!`F`*te24qq%M!sd(EJP>m5t8vW24{0KWW3<Q*pFxp!51HSzTP?Mn$)bbS&fd)H(
z&Hzm4z;hEs9=MPL-0MXLh-$@N6z>|w&<BO4bmEV6$B91alz<kJ%{eV0k|0q5?)WhS
z5NmT7mLgb-(N{fKXMiBwak)^ZQY!2{BkIjAZU>X{88-qv!d0mu<Cmsl0zxKVuvH=h
z$5@O8sHLfS(`hbbqB2|l#~3H37wT2~60%JG7G}vmHcp0?w*+iZ(FVQ9mvm%m0e(y2
z%Yuv3Z7>{K>r9*=E#x{GDoc<7W}$yx&sG1fTy26!>f~U&*XF75p5Ci;SUUE?S%5=x
zF=+(?K(`5q25@AR3G#qj#(E-$$nRf-tT~}vGTaeS7TIqJ4Ua_Oi7r_M37s6j3F_dZ
zd$-=JXUd%8SIuU4rLW*KSV(AZ16b2rN@@s-<yI7~@j~H=?dqxFxnZ!23<18uot>!&
zj1hDSESIJ~hfg6AGKiM6$`I25je@@X!-_6B@6|6Q{zWoA>_XmaR^qpn#&NY?6AyIZ
zQ;C~+-3v_34gfL^ctUuBs1{-H5F0p%hS|g}v4@8bD+0)poMa9-Ts7`H^5z{t?=XCR
z?Sk+IL?n}K^<x7g%sDs|rb+y9ht6$EZNg`$aiCK##d%iXOGXrtT->Q(hJoqmpuAf-
zM8^F1xLH4dS-gas>GUxBrotC6B@m_S;Gn)+#+#Di%aSnF2;3R9jsIz!)4N8Af9%!|
z56iVZyd?$G)&u><kRdVfNGoxI(%QXfxhv^g?RxYXAQZ+KPV6BPvfM@en*9^NBU%cx
z32a&d?2!BFsKS5s&s0!l;sPr(1<-cI3LSgB4EX3mIVYur6x%KVH<+%?i9C~}#11k~
zV=6<3V4BabkXb>V$?8WV@SAX3uKFoV+>_oIlq8@qdx<xvcy628)(6Q&l%hjc&V)mh
zZz&>lsfC0aVX&f{=^F8fF<!`E6eNB6Itqe{BDl<RK15tHGUg$Sm>ipp8=<Q%V_H=b
za;6@I26BvOOsC4Q=l~(6o=(WKGf=w)to-NVV#Y3eO(YDg#ia~_iP;gzN#9@QgJ2%1
zFC^?@*aZ^xs%}t7;}W~+LJ@Tc8wqF4PhiT<W=cH4Ny(ReIIg#aDgk9_FAdlf!cB2B
zr9AaTqko$dM$0YGJ({Z{rD9P5QKG1r6n}dfOxJc@`m=yuQ}l%XZjZ7G2&)(?1P6R=
zclKd3y)!xu0o08VHA}D{Vtz(Mhxq_N!%){tDtRh)G9v|;z)d#8HGz2H9xPImub%J-
z<~L@PGa&QrpqFp<_bUyg!(afh^Hsx2j0=Pkyw8wM<4LOG>Psh<qIWi6xCb>M>e2Fo
z6ZJTzYD8bm@~EiDa}Q*x&!4D{lUc0?`@CR>yb^v@co4~c$c|)4>w0QLuw=#&3b+ca
zEujqSq^JVAu@3%a_DaqZ@3^zse<AD(P~UY(Wx~O>*sNe*@!-Jk3_Ij}{3oM2*8GJK
z4-b+XV>=d}ZVDB^>`8Gmg#Q=Yw?ef4@_ks0`Rwy*<j3{Q5xC#<_6=)1aIzjmh>Rvu
z0tnFyn6ai$ouJAexGV!<RquKsJ+oL_k>-w5EPf9FHa593FdNY(h9}~ARSYa2p?$cC
z#>-L2pr?Yc8Cqd68xLova%+l{fDvKt2ruXTrFx1Y05{W>rd8w1YnBX{9a(_wsvamF
zxSq&z=eA0CUWLO?_Yf4}u~6ERe#Lyu-9u;@L_75%!VMmp)1)H4hf19*6SdC$NEJIL
z3S5t<q3IrpB?cMuY%X$wnOvdyW06-=yz{K_<Wb3D*O6%eYg~)i!}05Kqq4_*&`3jI
z{cNbOkwzRUdeX&nsl8uD!rDFZ4eeJ)38YS^`rTW?ol|$Y6))I+Ux^P9Wr55xNT0RG
zF7<K~LE#M)D9wwcGNpeZA~|68={yRq;23NtNFfOu5U!IV^?43=Ne>ODAzqUR0+gph
z=mGUB5Z)s)6pQ>C0?VN2+KNMhBAyX)TUlZ#=(dycnM`pu0D@V*Siu0&q$fcy%b__Y
zaIUqb9vPY-inBXN1Us;>5Xoxw^6qZs_;>-9p?kqHpV>!2N9)){+n{IkpD{sRv@t81
z9hEAyK2@@HdpclVXbr8Aq5;ZKcEi|ugSC(FnNH(uIacR}u*)FVwog9V68og%5Q{C8
z!Km1_>zf@>_u7+r7))lNsWgPkO=q$>XI`c4MtT%&Mime<UBBCPCRQ-Ea8%k$<(f<Y
z6S1LMrnULaRhrW)4gd&KW9<#)+8eYWjl4vh%}4?@FWT!IY(lSyJQv}>mpdWwQx1ov
z>`jQ&;v;f}d%R6+`3Mmj2DOOv0`y`%UzRZo3TnLDm5(0FuCzz66|51Qq?jV4xy(%-
z0U?XPH#^YC2$=RQBU0w9(IBbsh}?PQS2hOJcoVHaPtPkZQ7Wu(C0d^%wID7m!5iH4
zaZ*kIYe@zzvSzV2WCf~R1)it>EP;wM)C=zF-xgmyywRVTFF+ip@Hp@h8yYO>0cYet
z@YMMmaTSDL=>KL{*>;f9;0ni!rRR8qgE7CQ-UcFv;lzNv2Rz4l?=OPD`^PJ)fj6E|
zvOE1=_CsQU0n#?pJc@3|6aOs4O%K3pOM#xN2Ub1BmOuzpj0g(UYIA+5&@loYQIhmc
zk=TwE<VjXW;w*8MTK)L_F=@n3OB1C?Ay+Vml*T~gUT9>gX9fBPdS|I|1O8(r3wQyi
zeiJ4@IWRN(E2KWTGY;Ki?4v|EJtn$k5GgEUZni2FI#7|X*)Ui15ONZcIDmig|9Cz!
zCvyVfe?O7)cByNT2#Qfc3Di-m5~*lxK$6rByFe1n#Iz`Vf+60WGS^VsuYJm-$I5T6
z-ed2sq3t+a$tW8)(f7Sxd|UdxU|uo5_Mfs$`zD;(bx~vuDAbpnQa;aIT~{HG_GO&m
z4H&g==lOjxxBeebkaO&sX#D%}>suz8MGTw6+#bi4!@Si&2K(gJxizm41c3zSIt;w|
zC<fOf<I6eB4$FIQtNF`Q5QfEU3Z24o62TlQDarWU9DBiYTe!3*3G;CEIpg7Xw{|z8
zx<nZ;rcgo*MOLRK!oX)5Soi9+X01}$V~1pZ?l!m8w+tG*X#D|E5wPWsX2>nd>|o|Y
zg}@!XW7aIa;-}hx1}$oU0~UhL_z0gqyNCPOKH&i;A@7o4Chw{T2b>8qZ^0$V*TL|H
z5@9gM_jNer=W^Oe2F6^K?`ujgq6&MN=UKR14$K}9XEBB{f>=HzeS^-?PcNtQ=`my;
z<qXmgN*itQQ9%QYZ1-ZbuOS7y%A5lDow4xZw<z$eSYX+eV*Uq`yNn2_Mar;bgt-Fb
z`kGfJAfg~Lz%UTXI9rQp%p~MYZ^B`@I<@9e^oF4A0pV#keYi>gVn8t78|))Btv8#C
z&Jg^*DlWnXBSLV{5NcKw0?vvAcJ}uM|6OpqjOqeFm#LXOG2JJp>d3#85uxMw<xzpj
zR{YuWpuah;G>)1LJn>N8i}^g9Gy7esA6ZLmjrAO!!)ZiD8p7cw;59kh$&6iu4A3gx
zgaHChPu&iLz7^yyp>;r?W;XN2rIA$83lB6HLPkj&d5e86D4Q7V0lYs9EM{d1f+pR*
z5ZJ@$LaTwFjTFjfXSvaJqqP=D{5GQ{eGn9DH`Gk-yUEo>Xh|mdo1l0AyxYh9ROv{A
zKMWvp0AdT&)qzy+DOt(tU51wQI$LmOYH|@XrhIaCll^v}#ENqVjD7(G3B=racKj}e
zTc&#aI_nd6CmQ&(JX^?MdOkEcnCjWZo?s%gn-CpleGGhjp-l<~4Ti*-H3(NOj?XOF
zBOxWX(b<{2F&=||$2gV1*5xp0@p-Lq*kDXIoH*oiW1zz@=Z=G>M*z>*6fz50zuyNO
zaNLfNk!eZD$FxM)nEp9`2q5=g@ik;0_vQBkOvF0O%gTty$HTave|y|``R4e27A^2z
z4&Lv<9R?w1B=!tL70CnSOy+(vN9RXo?!+$pXVYkjgnZF*p&`g6FeflHLG@goo{=s#
zGBc1K@!p4H@VN2r$|(F#xUKvhZYqDvmhz7;*-!!zdnx^JAmi{~e=QyIKeE5bB<Rzw
zN|GGSr73S`WU>sxS;S1r*WZ2a#=a$Vm7dRA?J@2i^UV%ssJ%GDg(P-KSd*HtjVB3(
zt#Ip9xRg=1@bICfl|XfNzl{?A<i}`ar8_9ayU<7y$P(P&?IEHJ$bgX_?l32zJ#&RK
z1t}@m&%<1cCy$D%v8Ejb$UYlPJe_q`2*l_DGsBPt<Q_WgC^zZ#0i49CQMT}Q!0BjX
z)Ec+WDZc6DfY;^YxO`A8A0ydRd~`%RIz2zAh(2fZ&fLcz>{xXBg1nu^4uXJxJx7M!
z(&>oraeCvc%&)*&Dg&gx6*~s;f(iHHHjzhJ0ncjFKLW)B2m~dBBVyCJZ4COFF_Dd?
ziQLHNssifCT|7R)qF1FZl5e12XiByendZ0#)C^%s*0E?!c(d6bN-~KY-{9;N9wA~F
zeG*_EZ(zaCJF7fEIjMgFR5>gmiX_`e52SCsE1bU+$m}hEGiXn@?#8grZqsIZ`$qtt
z!s~iahtoZa#yCWfvW(XmZ_Wq*Hy-~?WmoBC-5#<l?LZV$wze)Ga7kr_I;GZfPjxbR
zIsipP^{KZ}z%tL1Ug5n_?Lye9uZT*iW}00_2#a}lbDlM2+k&5HXoB+{UB#w^yBw7p
z<-?e)3e$zIgA2G&kwPAo9R(jR&)^X-t57M;_p)d+;ctbZ;s&V#2=dF%wJTW@#J^KH
z1m*s+_(*YA)3y#AjT<R9I9U;XeT$X5nr(4ziZ>3s$RPX7UVf4AzhviwQy6CKyHsAe
z9IYIWV^{hBEy5dcyz0EDIh_J4c{3#$!zu%>t<E9G_R<Y8kcYSza=#3nL7yq)fjWO9
z$UoX1;&N^ecXpYmSm$XMa&y~>-QtFM`n@@zalu#Lhn0QI0lWxAH-Y&a2lNaj7Js@v
zc5<zB?~7@M&m!yF#&(RmP=CDfcmgGinPMEnQ#ZcQ%xog&MG@-d>{US>woE=N%8XJU
z@gk|H^T?R8jxd5pbLUAD_`G-&1ttYD9=U!Fw)7B=G^%Mradu}FJaeR|6%_lMpO<o5
z$XS;%Rq{(G`~_UK_6AAZDtL1fa^PEpO@eo3IBPl&3V}=MPt2Ac?`uY5Wn@A&3*|rt
z6Bsq8{mWVE6(`0g^!t~^z2R#HfYw6p{#oW@pFgmtx1xfx3N!i==<!TCIFJCP@Hn!u
z1!8U<zX`bfKmeiPlz@!ove|}oYA=-%Q7M9}VG0C1JA69Az7Tn*=X~Y<TH1tH63_;%
z0~C|7yxxG=%k=u@oS`^8jstzcKO!9Bis1?8n2X6G&2im$G4VE(2IVr&e!zo_FG^}Q
zjRJ|PhUj#N@X$~3h6f{%Uef$11|lY$g%HLr-tHc27i3v)k)mN3?GE8`#7TI}TTF<A
zD!7apGWo(uWc&#kngK8t@Erq{(Tu2fuE?&-x}S{Rap*)}S-?Gr|9lV=dv6<Kjq;MP
z6!&jMo15PlMDoj{D!+1bUWS|V()zsN_AH#rC0o>YnKMk=_eX;PZElPBK$vDdPitJU
zM+eRP9`)z)NU$?``24paAL;s66Hq1GB&DFr076nbq5slR6NQIs64ZieE#5C_a0yXq
zhnD(*N4bY<xNEM~<@+XY3g>oCS_*x+^aJw*xf4Cy4oz`_CwPALx7k^6fSq5xvGe<S
zL(<E0M)4&Fe%3r++__{A<<;HX7D`!2clw+!L90h?F>l<-Z6CXl-7{$}mx+N7V6>nW
z;R0$xu_caYPaZ`qiRn26qYX+@5i|46BJ_P-h8IqUQ{>IaRI_{>;TK#1cSt3tw987t
z<ksb*N~8IvQCZshwDZtcE4&6DR=3m_jVLOy%gb_Kgv}r@YQE=7;Ik5<xGsaf8;bd|
z(%Zv!p4~ZRL&g(dk*F1f?7X@_pr>^*Mr663q8ft_`JxC@u#POEiWGYZR0LUwK9s(k
zQ0q`C^cT1?@fl>CJy&ijyac{Ahq1%om2KoPE^A3KjI&%h0Nn4eQI_@Ml5NW5#aOs8
zMl|f_UvFEAC_eKrRLW4?R{r*c<j2(Lf5-a>VIJd))OLL1=Z3qFM%6s%3U>T!k(+Pg
zxvs3ZU~dDRNng-%Ao?<bbFDY{hl~k1Z|Cuo)$OgFNAe%=?+jJW=}r11nB&V=(WH!%
zE|>7lOX+S-zx}2w_k?}V1JCorxm~ro*WOXXpn`oA$=Y+<%xay+OMxx-0lKhWikP>*
zAj1vf43H;FMSuuOa9~Loh1?dtFl^{jYJ}PP^pFJ{P~)ClR}=wKiQ@i9Z!y^guiV@s
zbJVe?U!tUs{Ea{1&+xGXGj?2pv#IUF^&U>#mFfJK+|SGKmNWQr)aC84a1XIH9!t_u
zWyP7qVVOpG)b@fwrWOw#;#;wCexae!bvDnjcy9O{Ba|9ih^7hn67_yMS#d6BLQQNQ
z8E%%2a$VGSEpoh!(M6#r8E+}$QQ(h!_<Ys=28`(p?aR}E>Gu+GmNuc2mW0L;SzWzB
z$?C0tr=7(Z1uD)%ns88GDau{C)&fYrURdz0H3ik=2UPh{ZUN=@PF-EuMg68Cg9{lb
zVoz7?ZXhZmdH{H={tet7LhjiJ5wC*@o*3rSz4PKC@WpMRgg0F41S^8h|J38F=+L49
zHW_xOxWRXOND<ootaHik>hfVCgk;L6eo5|jdsfp0vA|QYGu&AZu4cw}h6aJ(Iao|~
z4>Lv~*PrvIIxB-K|4lsxwD~bHQG8tAZyr@^WW;&nw@pM&!(d6q5^mXN5Dbv#9Q?#U
z4)w6yK6>;9n-Xm(cSYSTYIv9|V-VjvRvhti*BsG19MSuxzK_7fJdYvik}g4#v=YFc
zO$ZQt`h8GEJWNH1lFr2AN?P>Wp-VbO9%Jl<xtl+lN5a2MjBp$!6CUU;RN$7gCJfJG
zOH`gsk*glgLPVM((9qtm(`(9%=}lk60~7Ff3Vb|qZaE>k<RVK6f7ns=Wl$O-g4A&C
zCO?ZA&Eg(q3vbRaAGhS`m8WTfOEoCZXW2DZmCRDy&?b(A)1%aZ`{Hbje}D>HZ6`UI
zopvWNO6cXMa1y|P^^hMyAL0>FWpTXPVcdM(s5hGj6}$1wj(UUcu(Z5CoWUP+k#tTG
z0jQ!45^0Y=%e4@`J;duJ5`lqD>kVefe-4&86`2cep<{liQNnE2qGgf3wXbFTT;G<e
zNI1ap3M&t&PE3-&4~A?Y^^r+cQWv003<P6}KrUE1Uz-krC$E2xTGO*1pi%xvFaery
zqY;{6Gm+&Kl6>9v-V%`b9il7g5x<}}93l)tp~QE^DCX%DPrPZm3QDrfy0>1b)&UdV
z83KT$2Rh*M@B?a!Yh52>mhO9U(FW;}g6!wgo=A%fO_Q&qV?=&JfM&*WNp(scoM}mY
z(7lPV;&_9Pt&J4+NM?#<Ac%1?z+-fSX}3f`cn36pAbe_hlc8vvlce9>>~=Q)tpi{y
zMs<0+g%<>@T|I8>y=ypJ)5JJ&f0{5&k+krTYY1jg05kzoc9BW+Sq7wMMxAbyAhCge
zd=isR1`}B8=pbxOW&Nxz;ja=iL&!0U6;eO-DMBI|74NF`d+pwo93jCTAib3i#d^$S
zAl+bhusW9|u|=H0snTx4?Kps45{(&LLaYXF!bK~)`Gn5;L+b*)&Um)r#zfagLK=88
zglm8RiW$?s0r`P)v|W1b#PJY7J&Um2l2iqeV+-F5Fb8C<pCEi&UU}Q(y$VGMrAd%V
z#_UfIq0vcjwU(u!ew4BF@TgFpz2lB`&{L5qKn@kX1)^0s`Ax`&h<y4Sm0wmh9lzO;
z;S(Nwz~;kP|Cx6ZF3yJZ1&Nng0!!@&vPc4#3C@!EH}6OPU3&QJ+4e*5EoAi(-;ao6
z^uvns&$?E%^e}q%EJ9trtSspsC=t;GL87Ok=1jNdY?nmb&S=a9g_Z#R7Artl0kFx}
zL6KB5&}}IA;3g@2g6qC?3z%lB!?H311Ja9T?vva{K^d2a7G`!<^NH1wlSlgGG$ip6
zl2{<2_o9=YeO+HN#0?|XS9ceu!)$cCzz8<UXL%&_$4$k(`*0n(0IyxieI<ki$>!z5
zHdXJ`EGBr9HV_A}-{ct9D)S01(xgx>VQtJ>j!-W!P!bKUo;XQ?-CRN<!RaA6`-xA|
zgt7qt;Lc~w{j736Y@?n_rgD97UstLk0PHLv-5l2GPISX@r2rqFW=}pLoeTD85dHOm
z0wBX5veOJtR6{j6Fmc+-+d*b7A-mw%9ib^oAb-t+zyUYg{~rkqa}r~Asv#(fP#{;E
z%*9}UBz`@-mjYCC-a09;cRJwiin}+6*NI-JjQXn{E<I{g4=ac#uI(LE;$}VGpMMTZ
zs2Rdf!3(BDCgt)pnIW-|JMA^+&<KG*5DSGqad~AjSy#MVi8|-4b?_v8vJ%%A+v6VI
zbQ!ZhT@(?Lzr6hbIBW6`1X0)8T|89u#S|368Gxn)jPJqAR{2vd+zO_(nefPR+?~He
z*3RF;*ZIf%{URpunQS-?Exz7^I!`7rYW+mPN)^!s#^L#mp;Lpya$ZJ))>lYk;W8AX
zVu+`bZ1DOt_GPGb%-4d!aptEGwN8eROHObbQF2QYB9tV-jp3q34ytJ{0etms@pfy#
zoyUb&U>@!rh=9wdj)ZNy2N;>0;u|U@4j1_NX^KfV!yiVttZzqldYDj;RM=vd)ZqX5
zMR~aXp)f4^S)vRG-2!BzJVc&V6oI%H<05&G84{AAog(g4IN0qF|BOd*N3Rv@9KFDf
z;-+MmyfEEeN9o2Ea@S#X>ic`-V9Lv~7a$=(I4<U%@OE8uV)D8}iaVZ{<*(#kid0ZL
za~CU9PPUmo1)+Ep!p5Ty`B%F5TjWoo#-%-bynx8YNAeKfs$5AYtR)K_uJf>7#F(_Z
zdFDi?9<A5PkJ5Qvd%NM<OFke03$sWK+jv{fZEotN>J2`&#yzU-97*EfxziYo=gvmN
zD{M@BZ=o!>0&9668K3Gn5w&Q&Y{`W9;aR^zqX#>j;F%!a=79D!Lo6aA>cvV0u~hX|
znyILU7UE97*G>4e)%yH&MT#@trbjG$*u00_NC;3?EOLMp3`*fBn4|STqjp?iR~2k&
zJvq>hE0yo#N=@Qcxk6Ry<*69jkD{M{rm=)cf}cl~<D=?P1qV=QIPq0kZdEm6+8Bh)
zAr*sl>U;<bFUY_QKq$UQCsm9D(F=Q}kQeed^<6^nZ2npo3ypuzklh{5AlTykpsbH0
z!m^#nXgou9aX*O1hrMh_!w%j^H_Q+qiv*aTXb#HA*Ej^~=^K3ae)4*-h9kKJah>uq
zp)NL*DN+!Gl0{l7@<sa7ixV1=w0BsJ@dQMC{JsYFL$loAJ+QgjNRClZohdT7i8u^w
z`tXwb4Y7fWo62j?XzQgGw@~Ul*`f3eD9#TIBJJJ@!y?`VFh*K}-yRh#HG!lB_%c+$
zG=C)`pocj$K-nA;pe#iJ{J0DOK)`PM)0)LQxN!5YYWS<Ro{I@jf`FOE-4p1<onE(x
z#1g5DT8G(@qKur*P-vZwkTTN0&w8{K;)c>hoKCRIiKvYn>E*q>{g*rOVSTT<U&U*0
z$~)&w#o@?y4BN3QK1LyMRoQKNcqJYIq<*$BO_PaiCyD9nqK|5b@{u-~D%dkIM;n`_
zd<8xYCHhq|-9_SU64jz5fv7Tlk^Kr^4mdcIZf7}owT2Y$QZ=(Kj4ETZKFnj8j>-A?
z668{*)RvBhy!DOG;P9DaA=AI^Jq4UN{%}5t;mz-!7{r4NMJh1_1sA;z?bWG&Mz3LF
zGQJ4rM_6%#I-}kvSR)u}(87gH4|um}d<W4tUe#2CIPrav<$3YHS1F5TgGOlOKz9i-
zw|(B(iMm)n7Y7*PdJ(C65q{Gy5>kK0U?`@-drRF7S#|{2${*;wyKyEfN0I&^W#LNk
zCHyJhodPQVYA@o|n*6Z!BYTn`))QU}N~~NVD&-Dn+Q9wd0_gK7_0Y@qw^0wFE{}*e
z2F>8lA9_FH`-2Bc1z3hnpqH5d);H?u4CI5K0Y11v(Gx~<zc4#|v+?v(`U4+{jY>`2
zi6V6eyrE)z0;JTHU9r8!INc5GZBHm0-SA;D_683z@`2mSNHBbAUiz>8b^eDucbftX
zJa_{42pI0h*=^5$3;%oa<k467XZz2Ox59r>w6n8~Z`%)d9zS~YcxUI~ldrb6A3xf9
z_*JxZ+Y>4350W9mv7)cAr4ZPAgQ7YtyXyO*c7O14_%$IP?+!<w$Gp!T{onr+?QCuR
zd2MHF`%zS7)EhpI;lPxzEYxN7qhxH?@ffYcJmPq0<`mIeIQroC8z-V)(a(AcK1^wP
z@R`wG6Z$hxm?e**R*PM=F*21gxdq6`m<t32iUS#^*09}!dlM#|VSC0KOfm;K{wH`M
z81}#AV{5Uzvc}#pR42i$0%%?Q?7oVPiLodU3~)HeLpi+l+3wHeEpVEk_*pXBxP_YE
z06>BFQ9Qjk>oHrru|nB||0c4&(M(>;S%XKOy~C!3MdRHycp>4v#N&aO7?|89_#=Fd
zeRcxlmM%J-BQj^hv5tWNC*;>LaLLf{ew}kR?8q+(fbW6}A)6Hd#B~p$ddj%}g_nR>
zLI`w+e9Dc`=3Cpan86`PiHcn{@m<YuT$XM>{Hct5l&SDurc6dIUNU}ypKwMyuyFu7
z<k=Zxa)AM(vz2IaMl~ARme$1ds00TK2OMT6AUm-HBc&aj<CvBKBwVDE?3%AD5d*5;
zl^d0)dK?`!>Tj!iNL#sFKE`i&r{7hZuj_A`5o$EbwdVV%z8{rq@1yUlwY{~d^0y--
z#yF1Z4fb9=JUXcIy_{-o_u$PQlKVt2(X>`?MhDfyY7_l7>k-Ga-l`Qe-H#3{ja}H1
zns8PgRGaT*U-zrX<cXg5>y4-!As0upy8Gs!+=!0eG>+=W6-;6eJ=dzW{RRe-$8%w8
zsMb&zRo=q$7UA`tg9F(Qj97kybu>8BXt#d!zEOSkx*5H$AM8P7jb2tT!Sc(4iVlb6
z?H-h?hmZ)%hviqCWFx9$-m*&8)OkklURPMkQOo$h-6o$p#)5Y1wPpi9*RbA3)3<(y
zyr^qYxlv`_BTj6;QO6(zFB)MC^n<3gigrZM3$*+Wp$eggEQ-F*Xs?1yO0`$VvH`VB
zeYeYq<~bXhz>imfD<IUUJUAQjMrVQ_FIw_rV~|Y0V>f&`xHag3L%oG}9?@jaI`VVl
zqx^i)?Oycz8|^bz#{a>`a_>*QF5h{NR1<yIC?Db8)^YsW-`JY_fF^?PN%U+o1+$oL
zpubERb%I>s=gB7TEoV#77E&l=$Me&1?h9{{L4;&!+P6f<Y)Y_^hS6`FEf;;AKu+t-
z`bfcs@6L_nWt>!flF*P;Ya#?+STM;`1qj%q#FkhV2dM!T{*=xS;lD2Zqyki(u3!>~
zXHnS!6(u|fYpF_f5n@Ta37;IHH9I9A2TKsEzc_WluG<iuIob(KC2;%q`2f?OxCAuD
zbu~h6Jf4lFJzQQ>#f`6TLj=Q@sI=-AyOJsp?j<lrYq)i-MWXMmVmTD<Hdisy3FgXf
z5Q^MtlTQLqD9zh$4KXWjfl8?~Uu%>2WLOHS$)-jW7O}aSPXL-4B+@1K{yNw%Oi&Vf
zv^s!WB+xIkZ<|q07L(^w2pTDOa0bY6R!oqGtq<8{GDY@2@rfWv96{1ZLyNqNIO%LT
z-ib@=puX>s!U^jPTwuYhR<&kWGP&se;~#kb^ji(FB3uv`bNw#odQ_<%{c#^PP~nz3
z%=PGE@;DAvXs^pD?1KWUnP20Vbl?Xepa}abI>DHkCS6FQ9~SLYzOuw*%`K0Z7A{3Y
zhvuqz3{3Yy%z%pqS9r;*U#eqKG9rE}eSr*u8hzx=K~!0QG&k-0MxE0Fe1mykI^z@^
zM9U6hO?wjCa0L!=3IR%~MKxIeZFI(|5w9*<J(nRN7}B9NYRN8vn9mbPxbcAAVp+3L
zFHF3PV1;^~acG!f3@(W1O}m+@-Vtk!${uy?z7~Km#Z6<Wof$(WL?2Aph_kU#u4t0(
ziN)Zuz|pktJAj2tave-D9c|&vJd7+{>5JYN4H?_7>nA@QKX0(m8N_`#i`;YOHWUKv
zgIAUd=JF+-DDtclg7lbRv!|EfACV&56L%`xYcEQCX<xVat^v+>kG?ulBCmhw#!*4A
z5dST}9Y;rg9gf}k0wOXR<Q4`w%>+%CB*4F2mx`KmpQ5c#f7!bCCs@XNT?=ErrKeDh
z<}<zBDzJ4YmGl+dw+c?uxS+`u&rga}C(12ryTGz6%St6;o|zIFOY+Hk6`YI<ISTjA
zEYq&Klih6v=OL!$6Z-A24buT6NOjIaJs(4D*Rd(V*{w#(%Ps-i#uwgTaA_zOFOpW}
z=szidQaCn1NKE1-yqs-dvz2Bdw@QP#gapzz@=GB^)~OreL}dGK^6npjbDbbi1o+dk
zp^;?3UgxdV2zLMG9lN7t0*q-zow4h9&*%g%Gq^3_H9~9@HiD+6OE*J1yXr<s(S8^h
z1X%e!$S@>^1KtyPRWKZHLEwcH&zk+Q>R1s;0k)GVk5A7_EGLEIDE58<HDvq`cd4&P
zN#g3$vsA4k3acm^N%0tqQjs74gHJ}RAoE&LP7eFbkZjLNS;jDE80)iT<gr#Lx{7%(
z9Mpq}*+SAMACUa@*)ybiy`=7&8a>ju?^3J&2TVU~@NgT^-#N&`R~bl7Hh;fO%Kzk@
z0NEfp75Eor!RtQ55gyUM(Ex$$cV#yH@CdysH@h(2g^AAvIsp-cH*x|olP7eL$1wK=
zf%yh*V7@=w+2$lo5|;ZS>7W+1VxT{_NE~qdgY@TfMf)#)C~OJB^qB=aZI=BwpA%WP
zR65S4VV&=OVLsPjO_!x0O^5O<EtgzVC18=qI3s23WQIN|P`WTrfoV;5i~_e8)Jq4>
zBobIu8^&d(;>qNq$j$Mx``fvyej(YhiF<j3p`QUKKsqnKjYKDk&kDJ;0*yr)`Q<J-
z^ZA4x?kX|5Yn?G&Vsb&s{m&9*L_vTU&o2l}gH=h0fY5YeN!A>YpW`K04FW#o`nt4s
zW_7t>bdQH~(_2)LppYmXE-J&_UE~i#dorBe7&aJajuB^KmKcYkCst{jH*@CLcBZxj
zpGfu6&egRW@3dzm+RDZxNwk!V{Btf^NxsM^`Qsxq4~ewWd2DeWJD1i*SdL9Z*LZnt
zL}pa?;{Aj2tK;~veEhwLW`zrQhbZ5Ig)xk85zJqfVW0oz<eLRsk_Kx<lnHX9va!Y|
zg~xk{04CFDJeLYD8hvg=HK%2P0G!rDIfU`berm2D())%^M<Vjh4{&uDc3=w7=GTe7
z{q|asmPVV}Z5rbyi+13lwL)kzV;KsXKm)wI0clVoW)Nv|;zJ<hk&!G#=HpUK-cm72
z;~7#x$6{-IJc*Zpa&aWS+=Y?&0^R4y507}Q)|a|Sj%=xm&hVvjFu8CJ>7syP05c>`
zWB+9u%e6$A4I-OI7o!B!g<tNl++@`l&bGaJBd*q(l>?;d+uBa+YlLQZI2*UyjN`%e
zx=vF1h;10cnvXobs!NsdKQ4+BUAb(GKm94v#z^_{EPDK81=LBcs)Vkh`w+;twz`@B
z9Z&_x%BV#7VD|7KR+ji^LAI9Ma15&bEpAp1Dtk-CE0VRZMF7?1y}Z9pFLSIbKbQC2
z>vV&RIM<p?yt{jR`LZ5e*^^D^>aHFaZilkOAWUe<DNLoUPcQ$nW|}j6K9>?<Vc)w2
zF^a&_?X80Q>_tI}B7lqKBZ#MSp8j1i#?n#O#RE4;6VPSyjWCFuoHS^-h(`xZ`wtX|
zm#!WDIn^SESWq%{>pyILK)?krlEX598|NeajH~>3!AN{*PaduFsw#30Op|kcMFN%E
zEe3Pqh=HYOjBCm?^yuS6h%e>Ebu|p&tqF0@ZNOSUkqBm>HhhY3-{8iQ@e2fGWTsXM
z9VVhEgEd=VnJ^iYfy;<GoQBHJJ@4yQxQm%`fAys$%{(slCpS0xI2qM$OD~$LNPw$z
zLwJ@KA=#<~!@6l|gOy;qs39@EYQv0hO648ugwP*6B<gN@cM!OPz#RncAaDnPI|$rC
z;0^+J5V(WD9R%(ma0h`q2;4#74gz-&xP!nQ1nwYk2Z1{X+(F<D0(TI&gTNgG?jUdn
hfjbD?LEsJocM!OPz#RncAaDnPI|$rC;13Of{|8wta-0AF

literal 0
HcmV?d00001

diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 4bcc5937db39..2f9e954085ec 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -35,6 +35,7 @@
 #include <inttypes.h>
 #include <errno.h>
 #include <sys/ioctl.h>
+#include <signal.h>
 
 #include <drm.h>
 
@@ -71,26 +72,23 @@ static void trigger_reset(int fd)
 	gem_quiescent_gpu(fd);
 }
 
-static void wedge_gpu(int fd)
+static void manual_hang(int drm_fd)
 {
-	/* First idle the GPU then disable GPU resets before injecting a hang */
-	gem_quiescent_gpu(fd);
-
-	igt_require(i915_reset_control(false));
+	int dir = igt_debugfs_dir(drm_fd);
 
-	igt_debug("Wedging GPU by injecting hang\n");
-	igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));
+	igt_sysfs_set(dir, "i915_wedged", "-1");
 
-	igt_assert(i915_reset_control(true));
+	close(dir);
 }
 
-static void wedgeme(int drm_fd)
+static void wedge_gpu(int fd)
 {
-	int dir = igt_debugfs_dir(drm_fd);
-
-	igt_sysfs_set(dir, "i915_wedged", "-1");
+	/* First idle the GPU then disable GPU resets before injecting a hang */
+	gem_quiescent_gpu(fd);
 
-	close(dir);
+	igt_require(i915_reset_control(false));
+	manual_hang(fd);
+	igt_assert(i915_reset_control(true));
 }
 
 static int __gem_throttle(int fd)
@@ -149,26 +147,111 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
 	return err;
 }
 
-static void test_wait(int fd)
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
+{
+	if (gem_can_store_dword(fd, flags))
+		return __igt_spin_batch_new_poll(fd, ctx, flags);
+	else
+		return __igt_spin_batch_new(fd, ctx, flags, 0);
+}
+
+static void __spin_wait(int fd, igt_spin_t *spin)
+{
+	if (spin->running) {
+		igt_spin_busywait_until_running(spin);
+	} else {
+		igt_debug("__spin_wait - usleep mode\n");
+		usleep(500e3); /* Better than nothing! */
+	}
+}
+
+static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
+{
+	igt_spin_t *spin = __spin_poll(fd, ctx, flags);
+
+	__spin_wait(fd, spin);
+
+	return spin;
+}
+
+static int debugfs_dir = -1;
+
+static void hang_handler(int sig)
+{
+	igt_sysfs_set(debugfs_dir, "i915_wedged", "-1");
+}
+
+static void hang_after(int fd, unsigned int us)
+{
+        struct sigaction sa = { .sa_handler = hang_handler };
+	struct itimerval itv = { };
+
+	debugfs_dir = igt_debugfs_dir(fd);
+	igt_assert_fd(debugfs_dir);
+
+	igt_assert_eq(sigaction(SIGALRM, &sa, NULL), 0);
+
+	itv.it_value.tv_sec = us / 1000000;
+	itv.it_value.tv_usec = us % 1000000;
+	setitimer(ITIMER_REAL, &itv, NULL);
+}
+
+static void cleanup_hang(void)
+{
+	struct itimerval itv = { };
+
+	igt_assert_eq(setitimer(ITIMER_REAL, &itv, NULL), 0);
+
+	igt_assert_fd(debugfs_dir);
+	close(debugfs_dir);
+	debugfs_dir = -1;
+}
+
+static int __check_wait(int fd, uint32_t bo, unsigned int wait)
+{
+	unsigned long wait_timeout = 250e6; /* Some margin for actual reset. */
+	int ret;
+
+	if (wait) {
+		wait_timeout += wait * 2000; /* x2 for safety. */
+		wait_timeout += 250e6; /* Margin for signal delay. */;
+		hang_after(fd, wait);
+	} else {
+		manual_hang(fd);
+	}
+
+	ret = __gem_wait(fd, bo, wait_timeout);
+
+	if (wait)
+		cleanup_hang();
+
+	return ret;
+}
+
+#define TEST_WEDGE (1)
+
+static void test_wait(int fd, unsigned int flags, unsigned int wait)
 {
-	igt_hang_t hang;
+	igt_spin_t *hang;
 
 	igt_require_gem(fd);
 
-	/* If the request we wait on completes due to a hang (even for
+	/*
+	 * If the request we wait on completes due to a hang (even for
 	 * that request), the user expects the return value to 0 (success).
 	 */
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
-	igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
-	igt_post_hang_ring(fd, hang);
 
-	/* If the GPU is wedged during the wait, again we expect the return
-	 * value to be 0 (success).
-	 */
-	igt_require(i915_reset_control(false));
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
-	igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
-	igt_post_hang_ring(fd, hang);
+	if (flags & TEST_WEDGE)
+		igt_require(i915_reset_control(false));
+	else
+		igt_require(i915_reset_control(true));
+
+	hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
+
+	igt_assert_eq(__check_wait(fd, hang->handle, wait), 0);
+
+	igt_spin_batch_free(fd, hang);
+
 	igt_require(i915_reset_control(true));
 
 	trigger_reset(fd);
@@ -181,7 +264,7 @@ static void test_suspend(int fd, int state)
 
 	/* Check we can suspend when the driver is already wedged */
 	igt_require(i915_reset_control(false));
-	wedgeme(fd);
+	manual_hang(fd);
 
 	igt_system_suspend_autoresume(state, SUSPEND_TEST_DEVICES);
 
@@ -189,7 +272,7 @@ static void test_suspend(int fd, int state)
 	trigger_reset(fd);
 }
 
-static void test_inflight(int fd)
+static void test_inflight(int fd, unsigned int wait)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	struct drm_i915_gem_exec_object2 obj[2];
@@ -209,11 +292,10 @@ static void test_inflight(int fd)
 		int fence[64]; /* conservative estimate of ring size */
 
 		gem_quiescent_gpu(fd);
-
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		hang = spin_sync(fd, 0, engine);
 		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
@@ -227,7 +309,8 @@ static void test_inflight(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+		igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
@@ -256,7 +339,7 @@ static void test_inflight_suspend(int fd)
 	obj[1].handle = gem_create(fd, 4096);
 	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
 
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = spin_sync(fd, 0, 0);
 	obj[0].handle = hang->handle;
 
 	memset(&execbuf, 0, sizeof(execbuf));
@@ -273,7 +356,8 @@ static void test_inflight_suspend(int fd)
 	igt_set_autoresume_delay(30);
 	igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE);
 
-	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	igt_assert_eq(__check_wait(fd, obj[1].handle, 10), 0);
+
 	for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 		igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 		close(fence[n]);
@@ -301,7 +385,7 @@ static uint32_t context_create_safe(int i915)
 	return param.ctx_id;
 }
 
-static void test_inflight_contexts(int fd)
+static void test_inflight_contexts(int fd, unsigned int wait)
 {
 	struct drm_i915_gem_exec_object2 obj[2];
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -330,7 +414,7 @@ static void test_inflight_contexts(int fd)
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		hang = spin_sync(fd, 0, engine);
 		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
@@ -345,7 +429,8 @@ static void test_inflight_contexts(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+		igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
@@ -375,7 +460,7 @@ static void test_inflight_external(int fd)
 	fence = igt_cork_plug(&cork, fd);
 
 	igt_require(i915_reset_control(false));
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = __spin_poll(fd, 0, 0);
 
 	memset(&obj, 0, sizeof(obj));
 	obj.handle = gem_create(fd, 4096);
@@ -393,6 +478,9 @@ static void test_inflight_external(int fd)
 	fence = execbuf.rsvd2 >> 32;
 	igt_assert(fence != -1);
 
+	__spin_wait(fd, hang);
+	manual_hang(fd);
+
 	gem_sync(fd, hang->handle); /* wedged, with an unready batch */
 	igt_assert(!gem_bo_busy(fd, hang->handle));
 	igt_assert(gem_bo_busy(fd, obj.handle));
@@ -407,7 +495,7 @@ static void test_inflight_external(int fd)
 	trigger_reset(fd);
 }
 
-static void test_inflight_internal(int fd)
+static void test_inflight_internal(int fd, unsigned int wait)
 {
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct drm_i915_gem_exec_object2 obj[2];
@@ -420,7 +508,7 @@ static void test_inflight_internal(int fd)
 	igt_require(gem_has_exec_fence(fd));
 
 	igt_require(i915_reset_control(false));
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = spin_sync(fd, 0, 0);
 
 	memset(obj, 0, sizeof(obj));
 	obj[0].handle = hang->handle;
@@ -441,7 +529,8 @@ static void test_inflight_internal(int fd)
 		nfence++;
 	}
 
-	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 	while (nfence--) {
 		igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
 		close(fences[nfence]);
@@ -484,29 +573,46 @@ igt_main
 	igt_subtest("execbuf")
 		test_execbuf(fd);
 
-	igt_subtest("wait")
-		test_wait(fd);
-
 	igt_subtest("suspend")
 		test_suspend(fd, SUSPEND_STATE_MEM);
 
 	igt_subtest("hibernate")
 		test_suspend(fd, SUSPEND_STATE_DISK);
 
-	igt_subtest("in-flight")
-		test_inflight(fd);
-
-	igt_subtest("in-flight-contexts")
-		test_inflight_contexts(fd);
-
 	igt_subtest("in-flight-external")
 		test_inflight_external(fd);
 
-	igt_subtest("in-flight-internal") {
-		igt_skip_on(gem_has_semaphores(fd));
-		test_inflight_internal(fd);
-	}
-
 	igt_subtest("in-flight-suspend")
 		test_inflight_suspend(fd);
+
+	igt_subtest_group {
+		const struct {
+			unsigned int wait;
+			const char *name;
+		} waits[] = {
+			{ .wait = 0, .name = "immediate" },
+			{ .wait = 10, .name = "10us" },
+			{ .wait = 10000, .name = "10ms" },
+		};
+		unsigned int i;
+
+		for (i = 0; i < sizeof(waits) / sizeof(waits[0]); i++) {
+			igt_subtest_f("wait-%s", waits[i].name)
+				test_wait(fd, 0, waits[i].wait);
+
+			igt_subtest_f("wait-wedge-%s", waits[i].name)
+				test_wait(fd, TEST_WEDGE, waits[i].wait);
+
+			igt_subtest_f("in-flight-%s", waits[i].name)
+				test_inflight(fd, waits[i].wait);
+
+			igt_subtest_f("in-flight-contexts-%s", waits[i].name)
+				test_inflight_contexts(fd, waits[i].wait);
+
+			igt_subtest_f("in-flight-internal-%s", waits[i].name) {
+				igt_skip_on(gem_has_semaphores(fd));
+				test_inflight_internal(fd, waits[i].wait);
+			}
+		}
+	}
 }
-- 
2.14.1

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [PATCH i-g-t 3/3] tests/perf_pmu: Improve accuracy by waiting on spinner to start
  2018-03-22 17:24 ` [igt-dev] " Tvrtko Ursulin
@ 2018-03-22 17:24   ` Tvrtko Ursulin
  -1 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-22 17:24 UTC (permalink / raw)
  To: igt-dev; +Cc: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

More than one test assumes that the spinner is running pretty much
immediately after we have create or submitted it.

In actuality there is a variable delay, especially on execlists platforms,
between submission and spin batch starting to run on the hardware.

To enable tests which care about this level of timing to account for this,
we add a new spin batch constructor which provides an output field which
can be polled to determine when the batch actually started running.

This is implemented via MI_STOREDW_IMM from the spin batch, writing into
memory mapped page shared with userspace.

Using this facility from perf_pmu, where applicable, should improve very
occasional test fails across the set and platforms.

v2:
 Chris Wilson:
 * Use caching mapping if available.
 * Handle old gens better.
 * Use gem_can_store_dword.
 * Cache exec obj array in spin_batch_t for easier resubmit.

v3:
 * Forgot I915_EXEC_NO_RELOC. (Chris Wilson)

v4:
 * Mask out all non-engine flags in gem_can_store_dword.
 * Added some debug logging.

v5:
 * Fix relocs and batch munmap. (Chris)
 * Added assert idle spinner batch looks as expected.

v6:
 * Skip accuracy tests when !gem_can_store_dword.

v7:
 * Fix batch recursion reloc address.

v8:
 Chris Wilson:
 * Pull up gem_can_store_dword check before we start submitting.
 * Build spinner batch in a way we can skip store dword when not
   needed so we can run on SandyBridge.

v9:
 * Fix wait on spinner.
 * More tweaks to accuracy test.

v10:
 * Dropped accuracy subtest changes due problems with RT thread and
   tasklet submission.

v11:
 * Use READ_ONCE.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> # v9
---
 tests/perf_pmu.c | 151 +++++++++++++++++++++++++++++++++++++------------------
 1 file changed, 103 insertions(+), 48 deletions(-)

diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index 19fcc95ffc7f..f27b7ec7d2c2 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -170,6 +170,56 @@ static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
 #define FLAG_LONG (16)
 #define FLAG_HANG (32)
 
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
+{
+	if (gem_can_store_dword(fd, flags))
+		return __igt_spin_batch_new_poll(fd, ctx, flags);
+	else
+		return __igt_spin_batch_new(fd, ctx, flags, 0);
+}
+
+static unsigned long __spin_wait(int fd, igt_spin_t *spin)
+{
+	struct timespec start = { };
+
+	igt_nsec_elapsed(&start);
+
+	if (spin->running) {
+		unsigned long timeout = 0;
+
+		while (!READ_ONCE(*spin->running)) {
+			unsigned long t = igt_nsec_elapsed(&start);
+
+			if ((t - timeout) > 250e6) {
+				timeout = t;
+				igt_warn("Spinner not running after %.2fms\n",
+					 (double)t / 1e6);
+			}
+		}
+	} else {
+		igt_debug("__spin_wait - usleep mode\n");
+		usleep(500e3); /* Better than nothing! */
+	}
+
+	return igt_nsec_elapsed(&start);
+}
+
+static igt_spin_t * __spin_sync(int fd, uint32_t ctx, unsigned long flags)
+{
+	igt_spin_t *spin = __spin_poll(fd, ctx, flags);
+
+	__spin_wait(fd, spin);
+
+	return spin;
+}
+
+static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
+{
+	igt_require_gem(fd);
+
+	return __spin_sync(fd, ctx, flags);
+}
+
 static void end_spin(int fd, igt_spin_t *spin, unsigned int flags)
 {
 	if (!spin)
@@ -180,8 +230,25 @@ static void end_spin(int fd, igt_spin_t *spin, unsigned int flags)
 	if (flags & FLAG_SYNC)
 		gem_sync(fd, spin->handle);
 
-	if (flags & TEST_TRAILING_IDLE)
-		usleep(batch_duration_ns / 5000);
+	if (flags & TEST_TRAILING_IDLE) {
+		unsigned long t, timeout = 0;
+		struct timespec start = { };
+
+		igt_nsec_elapsed(&start);
+
+		do {
+			t = igt_nsec_elapsed(&start);
+
+			if (gem_bo_busy(fd, spin->handle) &&
+			    (t - timeout) > 10e6) {
+				timeout = t;
+				igt_warn("Spinner not idle after %.2fms\n",
+					 (double)t / 1e6);
+			}
+
+			usleep(1e3);
+		} while (t < batch_duration_ns / 5);
+	}
 }
 
 static void
@@ -195,7 +262,7 @@ single(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
 	fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
 
 	if (flags & TEST_BUSY)
-		spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+		spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	else
 		spin = NULL;
 
@@ -251,13 +318,7 @@ busy_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	sleep(2);
 
-	spin = __igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
-
-	/*
-	 * Sleep for a bit after making the engine busy to make sure the PMU
-	 * gets enabled when the batch is already running.
-	 */
-	usleep(500e3);
+	spin = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 
 	fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
 
@@ -300,7 +361,7 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 * re-submission in execlists mode. Make sure busyness is correctly
 	 * reported with the engine busy, and after the engine went idle.
 	 */
-	spin[0] = __igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+	spin[0] = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	usleep(500e3);
 	spin[1] = __igt_spin_batch_new(gem_fd, ctx, e2ring(gem_fd, e), 0);
 
@@ -386,7 +447,7 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 
 	igt_assert_eq(i, num_engines);
 
-	spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+	spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	pmu_read_multi(fd[0], num_engines, tval[0]);
 	slept = measured_usleep(batch_duration_ns / 1000);
 	if (flags & TEST_TRAILING_IDLE)
@@ -412,15 +473,15 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 }
 
 static void
-__submit_spin_batch(int gem_fd,
-		    struct drm_i915_gem_exec_object2 *obj,
-		    const struct intel_execution_engine2 *e)
+__submit_spin_batch(int gem_fd, igt_spin_t *spin,
+		    const struct intel_execution_engine2 *e,
+		    int offset)
 {
-	struct drm_i915_gem_execbuffer2 eb = {
-		.buffer_count = 1,
-		.buffers_ptr = to_user_pointer(obj),
-		.flags = e2ring(gem_fd, e),
-	};
+	struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
+
+	eb.flags &= ~(0x3f | I915_EXEC_BSD_MASK);
+	eb.flags |= e2ring(gem_fd, e) | I915_EXEC_NO_RELOC;
+	eb.batch_start_offset += offset;
 
 	gem_execbuf(gem_fd, &eb);
 }
@@ -429,7 +490,6 @@ static void
 most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 		    const unsigned int num_engines, unsigned int flags)
 {
-	struct drm_i915_gem_exec_object2 obj = {};
 	const struct intel_execution_engine2 *e_;
 	uint64_t tval[2][num_engines];
 	uint64_t val[num_engines];
@@ -443,15 +503,12 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 		if (!gem_has_engine(gem_fd, e_->class, e_->instance))
 			continue;
 
-		if (e == e_) {
+		if (e == e_)
 			idle_idx = i;
-		} else if (spin) {
-			__submit_spin_batch(gem_fd, &obj, e_);
-		} else {
-			spin = igt_spin_batch_new(gem_fd, 0,
-						  e2ring(gem_fd, e_), 0);
-			obj.handle = spin->handle;
-		}
+		else if (spin)
+			__submit_spin_batch(gem_fd, spin, e_, 64);
+		else
+			spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e_));
 
 		val[i++] = I915_PMU_ENGINE_BUSY(e_->class, e_->instance);
 	}
@@ -461,6 +518,9 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 	for (i = 0; i < num_engines; i++)
 		fd[i] = open_group(val[i], fd[0]);
 
+	/* Small delay to allow engines to start. */
+	usleep(__spin_wait(gem_fd, spin) * num_engines / 1e3);
+
 	pmu_read_multi(fd[0], num_engines, tval[0]);
 	slept = measured_usleep(batch_duration_ns / 1000);
 	if (flags & TEST_TRAILING_IDLE)
@@ -489,7 +549,6 @@ static void
 all_busy_check_all(int gem_fd, const unsigned int num_engines,
 		   unsigned int flags)
 {
-	struct drm_i915_gem_exec_object2 obj = {};
 	const struct intel_execution_engine2 *e;
 	uint64_t tval[2][num_engines];
 	uint64_t val[num_engines];
@@ -503,13 +562,10 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
 		if (!gem_has_engine(gem_fd, e->class, e->instance))
 			continue;
 
-		if (spin) {
-			__submit_spin_batch(gem_fd, &obj, e);
-		} else {
-			spin = igt_spin_batch_new(gem_fd, 0,
-						  e2ring(gem_fd, e), 0);
-			obj.handle = spin->handle;
-		}
+		if (spin)
+			__submit_spin_batch(gem_fd, spin, e, 64);
+		else
+			spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e));
 
 		val[i++] = I915_PMU_ENGINE_BUSY(e->class, e->instance);
 	}
@@ -519,6 +575,9 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
 	for (i = 0; i < num_engines; i++)
 		fd[i] = open_group(val[i], fd[0]);
 
+	/* Small delay to allow engines to start. */
+	usleep(__spin_wait(gem_fd, spin) * num_engines / 1e3);
+
 	pmu_read_multi(fd[0], num_engines, tval[0]);
 	slept = measured_usleep(batch_duration_ns / 1000);
 	if (flags & TEST_TRAILING_IDLE)
@@ -550,7 +609,7 @@ no_sema(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
 	open_group(I915_PMU_ENGINE_WAIT(e->class, e->instance), fd);
 
 	if (flags & TEST_BUSY)
-		spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+		spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	else
 		spin = NULL;
 
@@ -884,7 +943,7 @@ multi_client(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	fd[1] = open_pmu(config);
 
-	spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+	spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 
 	val[0] = val[1] = __pmu_read_single(fd[0], &ts[0]);
 	slept[1] = measured_usleep(batch_duration_ns / 1000);
@@ -1248,7 +1307,7 @@ test_frequency(int gem_fd)
 	igt_require(igt_sysfs_get_u32(sysfs, "gt_boost_freq_mhz") == min_freq);
 
 	gem_quiescent_gpu(gem_fd); /* Idle to be sure the change takes effect */
-	spin = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
+	spin = spin_sync(gem_fd, 0, I915_EXEC_RENDER);
 
 	slept = pmu_read_multi(fd, 2, start);
 	measured_usleep(batch_duration_ns / 1000);
@@ -1274,7 +1333,7 @@ test_frequency(int gem_fd)
 	igt_require(igt_sysfs_get_u32(sysfs, "gt_min_freq_mhz") == max_freq);
 
 	gem_quiescent_gpu(gem_fd);
-	spin = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
+	spin = spin_sync(gem_fd, 0, I915_EXEC_RENDER);
 
 	slept = pmu_read_multi(fd, 2, start);
 	measured_usleep(batch_duration_ns / 1000);
@@ -1517,7 +1576,6 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 		const unsigned long timeout[] = {
 			pwm_calibration_us * 1000, test_us * 1000
 		};
-		struct drm_i915_gem_exec_object2 obj = {};
 		uint64_t total_busy_ns = 0, total_idle_ns = 0;
 		igt_spin_t *spin;
 		int ret;
@@ -1531,11 +1589,8 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 
 		/* Allocate our spin batch and idle it. */
 		spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
-		obj.handle = spin->handle;
-		__submit_spin_batch(gem_fd, &obj, e); /* record its location */
 		igt_spin_batch_end(spin);
-		gem_sync(gem_fd, obj.handle);
-		obj.flags |= EXEC_OBJECT_PINNED;
+		gem_sync(gem_fd, spin->handle);
 
 		/* 1st pass is calibration, second pass is the test. */
 		for (int pass = 0; pass < ARRAY_SIZE(timeout); pass++) {
@@ -1549,7 +1604,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 
 				/* Restart the spinbatch. */
 				__rearm_spin_batch(spin);
-				__submit_spin_batch(gem_fd, &obj, e);
+				__submit_spin_batch(gem_fd, spin, e, 0);
 
 				/*
 				 * Note that the submission may be delayed to a
@@ -1559,7 +1614,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 
 				t_busy = measured_usleep(busy_us);
 				igt_spin_batch_end(spin);
-				gem_sync(gem_fd, obj.handle);
+				gem_sync(gem_fd, spin->handle);
 
 				total_busy_ns += t_busy;
 
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [Intel-gfx] [PATCH i-g-t 3/3] tests/perf_pmu: Improve accuracy by waiting on spinner to start
@ 2018-03-22 17:24   ` Tvrtko Ursulin
  0 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-22 17:24 UTC (permalink / raw)
  To: igt-dev; +Cc: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

More than one test assumes that the spinner is running pretty much
immediately after we have create or submitted it.

In actuality there is a variable delay, especially on execlists platforms,
between submission and spin batch starting to run on the hardware.

To enable tests which care about this level of timing to account for this,
we add a new spin batch constructor which provides an output field which
can be polled to determine when the batch actually started running.

This is implemented via MI_STOREDW_IMM from the spin batch, writing into
memory mapped page shared with userspace.

Using this facility from perf_pmu, where applicable, should improve very
occasional test fails across the set and platforms.

v2:
 Chris Wilson:
 * Use caching mapping if available.
 * Handle old gens better.
 * Use gem_can_store_dword.
 * Cache exec obj array in spin_batch_t for easier resubmit.

v3:
 * Forgot I915_EXEC_NO_RELOC. (Chris Wilson)

v4:
 * Mask out all non-engine flags in gem_can_store_dword.
 * Added some debug logging.

v5:
 * Fix relocs and batch munmap. (Chris)
 * Added assert idle spinner batch looks as expected.

v6:
 * Skip accuracy tests when !gem_can_store_dword.

v7:
 * Fix batch recursion reloc address.

v8:
 Chris Wilson:
 * Pull up gem_can_store_dword check before we start submitting.
 * Build spinner batch in a way we can skip store dword when not
   needed so we can run on SandyBridge.

v9:
 * Fix wait on spinner.
 * More tweaks to accuracy test.

v10:
 * Dropped accuracy subtest changes due problems with RT thread and
   tasklet submission.

v11:
 * Use READ_ONCE.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> # v9
---
 tests/perf_pmu.c | 151 +++++++++++++++++++++++++++++++++++++------------------
 1 file changed, 103 insertions(+), 48 deletions(-)

diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index 19fcc95ffc7f..f27b7ec7d2c2 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -170,6 +170,56 @@ static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
 #define FLAG_LONG (16)
 #define FLAG_HANG (32)
 
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
+{
+	if (gem_can_store_dword(fd, flags))
+		return __igt_spin_batch_new_poll(fd, ctx, flags);
+	else
+		return __igt_spin_batch_new(fd, ctx, flags, 0);
+}
+
+static unsigned long __spin_wait(int fd, igt_spin_t *spin)
+{
+	struct timespec start = { };
+
+	igt_nsec_elapsed(&start);
+
+	if (spin->running) {
+		unsigned long timeout = 0;
+
+		while (!READ_ONCE(*spin->running)) {
+			unsigned long t = igt_nsec_elapsed(&start);
+
+			if ((t - timeout) > 250e6) {
+				timeout = t;
+				igt_warn("Spinner not running after %.2fms\n",
+					 (double)t / 1e6);
+			}
+		}
+	} else {
+		igt_debug("__spin_wait - usleep mode\n");
+		usleep(500e3); /* Better than nothing! */
+	}
+
+	return igt_nsec_elapsed(&start);
+}
+
+static igt_spin_t * __spin_sync(int fd, uint32_t ctx, unsigned long flags)
+{
+	igt_spin_t *spin = __spin_poll(fd, ctx, flags);
+
+	__spin_wait(fd, spin);
+
+	return spin;
+}
+
+static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
+{
+	igt_require_gem(fd);
+
+	return __spin_sync(fd, ctx, flags);
+}
+
 static void end_spin(int fd, igt_spin_t *spin, unsigned int flags)
 {
 	if (!spin)
@@ -180,8 +230,25 @@ static void end_spin(int fd, igt_spin_t *spin, unsigned int flags)
 	if (flags & FLAG_SYNC)
 		gem_sync(fd, spin->handle);
 
-	if (flags & TEST_TRAILING_IDLE)
-		usleep(batch_duration_ns / 5000);
+	if (flags & TEST_TRAILING_IDLE) {
+		unsigned long t, timeout = 0;
+		struct timespec start = { };
+
+		igt_nsec_elapsed(&start);
+
+		do {
+			t = igt_nsec_elapsed(&start);
+
+			if (gem_bo_busy(fd, spin->handle) &&
+			    (t - timeout) > 10e6) {
+				timeout = t;
+				igt_warn("Spinner not idle after %.2fms\n",
+					 (double)t / 1e6);
+			}
+
+			usleep(1e3);
+		} while (t < batch_duration_ns / 5);
+	}
 }
 
 static void
@@ -195,7 +262,7 @@ single(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
 	fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
 
 	if (flags & TEST_BUSY)
-		spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+		spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	else
 		spin = NULL;
 
@@ -251,13 +318,7 @@ busy_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	sleep(2);
 
-	spin = __igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
-
-	/*
-	 * Sleep for a bit after making the engine busy to make sure the PMU
-	 * gets enabled when the batch is already running.
-	 */
-	usleep(500e3);
+	spin = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 
 	fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
 
@@ -300,7 +361,7 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 * re-submission in execlists mode. Make sure busyness is correctly
 	 * reported with the engine busy, and after the engine went idle.
 	 */
-	spin[0] = __igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+	spin[0] = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	usleep(500e3);
 	spin[1] = __igt_spin_batch_new(gem_fd, ctx, e2ring(gem_fd, e), 0);
 
@@ -386,7 +447,7 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 
 	igt_assert_eq(i, num_engines);
 
-	spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+	spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	pmu_read_multi(fd[0], num_engines, tval[0]);
 	slept = measured_usleep(batch_duration_ns / 1000);
 	if (flags & TEST_TRAILING_IDLE)
@@ -412,15 +473,15 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 }
 
 static void
-__submit_spin_batch(int gem_fd,
-		    struct drm_i915_gem_exec_object2 *obj,
-		    const struct intel_execution_engine2 *e)
+__submit_spin_batch(int gem_fd, igt_spin_t *spin,
+		    const struct intel_execution_engine2 *e,
+		    int offset)
 {
-	struct drm_i915_gem_execbuffer2 eb = {
-		.buffer_count = 1,
-		.buffers_ptr = to_user_pointer(obj),
-		.flags = e2ring(gem_fd, e),
-	};
+	struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
+
+	eb.flags &= ~(0x3f | I915_EXEC_BSD_MASK);
+	eb.flags |= e2ring(gem_fd, e) | I915_EXEC_NO_RELOC;
+	eb.batch_start_offset += offset;
 
 	gem_execbuf(gem_fd, &eb);
 }
@@ -429,7 +490,6 @@ static void
 most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 		    const unsigned int num_engines, unsigned int flags)
 {
-	struct drm_i915_gem_exec_object2 obj = {};
 	const struct intel_execution_engine2 *e_;
 	uint64_t tval[2][num_engines];
 	uint64_t val[num_engines];
@@ -443,15 +503,12 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 		if (!gem_has_engine(gem_fd, e_->class, e_->instance))
 			continue;
 
-		if (e == e_) {
+		if (e == e_)
 			idle_idx = i;
-		} else if (spin) {
-			__submit_spin_batch(gem_fd, &obj, e_);
-		} else {
-			spin = igt_spin_batch_new(gem_fd, 0,
-						  e2ring(gem_fd, e_), 0);
-			obj.handle = spin->handle;
-		}
+		else if (spin)
+			__submit_spin_batch(gem_fd, spin, e_, 64);
+		else
+			spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e_));
 
 		val[i++] = I915_PMU_ENGINE_BUSY(e_->class, e_->instance);
 	}
@@ -461,6 +518,9 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 	for (i = 0; i < num_engines; i++)
 		fd[i] = open_group(val[i], fd[0]);
 
+	/* Small delay to allow engines to start. */
+	usleep(__spin_wait(gem_fd, spin) * num_engines / 1e3);
+
 	pmu_read_multi(fd[0], num_engines, tval[0]);
 	slept = measured_usleep(batch_duration_ns / 1000);
 	if (flags & TEST_TRAILING_IDLE)
@@ -489,7 +549,6 @@ static void
 all_busy_check_all(int gem_fd, const unsigned int num_engines,
 		   unsigned int flags)
 {
-	struct drm_i915_gem_exec_object2 obj = {};
 	const struct intel_execution_engine2 *e;
 	uint64_t tval[2][num_engines];
 	uint64_t val[num_engines];
@@ -503,13 +562,10 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
 		if (!gem_has_engine(gem_fd, e->class, e->instance))
 			continue;
 
-		if (spin) {
-			__submit_spin_batch(gem_fd, &obj, e);
-		} else {
-			spin = igt_spin_batch_new(gem_fd, 0,
-						  e2ring(gem_fd, e), 0);
-			obj.handle = spin->handle;
-		}
+		if (spin)
+			__submit_spin_batch(gem_fd, spin, e, 64);
+		else
+			spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e));
 
 		val[i++] = I915_PMU_ENGINE_BUSY(e->class, e->instance);
 	}
@@ -519,6 +575,9 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
 	for (i = 0; i < num_engines; i++)
 		fd[i] = open_group(val[i], fd[0]);
 
+	/* Small delay to allow engines to start. */
+	usleep(__spin_wait(gem_fd, spin) * num_engines / 1e3);
+
 	pmu_read_multi(fd[0], num_engines, tval[0]);
 	slept = measured_usleep(batch_duration_ns / 1000);
 	if (flags & TEST_TRAILING_IDLE)
@@ -550,7 +609,7 @@ no_sema(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
 	open_group(I915_PMU_ENGINE_WAIT(e->class, e->instance), fd);
 
 	if (flags & TEST_BUSY)
-		spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+		spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	else
 		spin = NULL;
 
@@ -884,7 +943,7 @@ multi_client(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	fd[1] = open_pmu(config);
 
-	spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+	spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 
 	val[0] = val[1] = __pmu_read_single(fd[0], &ts[0]);
 	slept[1] = measured_usleep(batch_duration_ns / 1000);
@@ -1248,7 +1307,7 @@ test_frequency(int gem_fd)
 	igt_require(igt_sysfs_get_u32(sysfs, "gt_boost_freq_mhz") == min_freq);
 
 	gem_quiescent_gpu(gem_fd); /* Idle to be sure the change takes effect */
-	spin = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
+	spin = spin_sync(gem_fd, 0, I915_EXEC_RENDER);
 
 	slept = pmu_read_multi(fd, 2, start);
 	measured_usleep(batch_duration_ns / 1000);
@@ -1274,7 +1333,7 @@ test_frequency(int gem_fd)
 	igt_require(igt_sysfs_get_u32(sysfs, "gt_min_freq_mhz") == max_freq);
 
 	gem_quiescent_gpu(gem_fd);
-	spin = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
+	spin = spin_sync(gem_fd, 0, I915_EXEC_RENDER);
 
 	slept = pmu_read_multi(fd, 2, start);
 	measured_usleep(batch_duration_ns / 1000);
@@ -1517,7 +1576,6 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 		const unsigned long timeout[] = {
 			pwm_calibration_us * 1000, test_us * 1000
 		};
-		struct drm_i915_gem_exec_object2 obj = {};
 		uint64_t total_busy_ns = 0, total_idle_ns = 0;
 		igt_spin_t *spin;
 		int ret;
@@ -1531,11 +1589,8 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 
 		/* Allocate our spin batch and idle it. */
 		spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
-		obj.handle = spin->handle;
-		__submit_spin_batch(gem_fd, &obj, e); /* record its location */
 		igt_spin_batch_end(spin);
-		gem_sync(gem_fd, obj.handle);
-		obj.flags |= EXEC_OBJECT_PINNED;
+		gem_sync(gem_fd, spin->handle);
 
 		/* 1st pass is calibration, second pass is the test. */
 		for (int pass = 0; pass < ARRAY_SIZE(timeout); pass++) {
@@ -1549,7 +1604,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 
 				/* Restart the spinbatch. */
 				__rearm_spin_batch(spin);
-				__submit_spin_batch(gem_fd, &obj, e);
+				__submit_spin_batch(gem_fd, spin, e, 0);
 
 				/*
 				 * Note that the submission may be delayed to a
@@ -1559,7 +1614,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 
 				t_busy = measured_usleep(busy_us);
 				igt_spin_batch_end(spin);
-				gem_sync(gem_fd, obj.handle);
+				gem_sync(gem_fd, spin->handle);
 
 				total_busy_ns += t_busy;
 
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [igt-dev] [PATCH i-g-t 2/3] tests/gem_eio: Speed up test execution
  2018-03-22 17:24   ` [igt-dev] " Tvrtko Ursulin
@ 2018-03-22 17:44     ` Chris Wilson
  -1 siblings, 0 replies; 21+ messages in thread
From: Chris Wilson @ 2018-03-22 17:44 UTC (permalink / raw)
  To: Tvrtko Ursulin, igt-dev; +Cc: Intel-gfx

Quoting Tvrtko Ursulin (2018-03-22 17:24:16)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> If we stop relying on regular GPU hangs to be detected, but trigger them
> manually as soon as we know our batch of interest is actually executing
> on the GPU, we can dramatically speed up various subtests.
> 
> This is enabled by the pollable spin batch added in the previous patch.
> 
> v2:
>  * Test gem_wait after reset/wedge and with reset/wedge after a few
>    predefined intervals since gem_wait invocation. (Chris Wilson)
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Antonio Argenziano <antonio.argenziano@intel.com>
> ---
>  lib.tar         | Bin 0 -> 102400 bytes
>  tests/gem_eio.c | 214 ++++++++++++++++++++++++++++++++++++++++++--------------
>  2 files changed, 160 insertions(+), 54 deletions(-)
>  create mode 100644 lib.tar
> 
> diff --git a/tests/gem_eio.c b/tests/gem_eio.c
> index 4bcc5937db39..2f9e954085ec 100644
> --- a/tests/gem_eio.c
> +++ b/tests/gem_eio.c
> @@ -35,6 +35,7 @@
>  #include <inttypes.h>
>  #include <errno.h>
>  #include <sys/ioctl.h>
> +#include <signal.h>
>  
>  #include <drm.h>
>  
> @@ -71,26 +72,23 @@ static void trigger_reset(int fd)
>         gem_quiescent_gpu(fd);
>  }
>  
> -static void wedge_gpu(int fd)
> +static void manual_hang(int drm_fd)
>  {
> -       /* First idle the GPU then disable GPU resets before injecting a hang */
> -       gem_quiescent_gpu(fd);
> -
> -       igt_require(i915_reset_control(false));
> +       int dir = igt_debugfs_dir(drm_fd);
>  
> -       igt_debug("Wedging GPU by injecting hang\n");
> -       igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));
> +       igt_sysfs_set(dir, "i915_wedged", "-1");
>  
> -       igt_assert(i915_reset_control(true));
> +       close(dir);
>  }
>  
> -static void wedgeme(int drm_fd)
> +static void wedge_gpu(int fd)
>  {
> -       int dir = igt_debugfs_dir(drm_fd);
> -
> -       igt_sysfs_set(dir, "i915_wedged", "-1");
> +       /* First idle the GPU then disable GPU resets before injecting a hang */
> +       gem_quiescent_gpu(fd);
>  
> -       close(dir);
> +       igt_require(i915_reset_control(false));
> +       manual_hang(fd);
> +       igt_assert(i915_reset_control(true));
>  }
>  
>  static int __gem_throttle(int fd)
> @@ -149,26 +147,111 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
>         return err;
>  }
>  
> -static void test_wait(int fd)
> +static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
> +{
> +       if (gem_can_store_dword(fd, flags))
> +               return __igt_spin_batch_new_poll(fd, ctx, flags);
> +       else
> +               return __igt_spin_batch_new(fd, ctx, flags, 0);
> +}
> +
> +static void __spin_wait(int fd, igt_spin_t *spin)
> +{
> +       if (spin->running) {
> +               igt_spin_busywait_until_running(spin);
> +       } else {
> +               igt_debug("__spin_wait - usleep mode\n");
> +               usleep(500e3); /* Better than nothing! */
> +       }
> +}
> +
> +static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
> +{
> +       igt_spin_t *spin = __spin_poll(fd, ctx, flags);
> +
> +       __spin_wait(fd, spin);
> +
> +       return spin;
> +}
> +
> +static int debugfs_dir = -1;
> +
> +static void hang_handler(int sig)
> +{
> +       igt_sysfs_set(debugfs_dir, "i915_wedged", "-1");
> +}
> +
> +static void hang_after(int fd, unsigned int us)
> +{
> +        struct sigaction sa = { .sa_handler = hang_handler };
> +       struct itimerval itv = { };
> +
> +       debugfs_dir = igt_debugfs_dir(fd);
> +       igt_assert_fd(debugfs_dir);
> +
> +       igt_assert_eq(sigaction(SIGALRM, &sa, NULL), 0);
> +
> +       itv.it_value.tv_sec = us / 1000000;

USEC_PER_SEC.

> +       itv.it_value.tv_usec = us % 1000000;
> +       setitimer(ITIMER_REAL, &itv, NULL);

Ok, that gives a single shot signal.

I would have used
struct sigevent sev = {
	.sigev_notify = SIGEV_THREAD,
	.sigev_value.sigval_int = debugfs_dir
	.sigev_notify_function = hang_handler
};
timer_create(CLOCK_MONOTONIC, &sec, &timer);
timer_settime(timer, 0, &its, NULL);

Then 

static void hang_handler(union sigval arg)
{
	igt_sysfs_set(arg.sival_int, "i915_wedged", 1);
}

No signals, nor globals required :)

The problem with using a signal is that it interrupts the gem_wait()
and so we don't actually check that it is being woken by the hang
because it is already awake. Gah.

> +static void cleanup_hang(void)
> +{
> +       struct itimerval itv = { };
> +
> +       igt_assert_eq(setitimer(ITIMER_REAL, &itv, NULL), 0);

You also need a sleep here as it does not flush inflight signals.
(Also timer_destroy :)

> +       igt_assert_fd(debugfs_dir);
> +       close(debugfs_dir);
> +       debugfs_dir = -1;
> +}
> +
> +static int __check_wait(int fd, uint32_t bo, unsigned int wait)
> +{
> +       unsigned long wait_timeout = 250e6; /* Some margin for actual reset. */
> +       int ret;
> +
> +       if (wait) {
> +               wait_timeout += wait * 2000; /* x2 for safety. */
> +               wait_timeout += 250e6; /* Margin for signal delay. */;
> +               hang_after(fd, wait);
> +       } else {
> +               manual_hang(fd);
> +       }
> +
> +       ret = __gem_wait(fd, bo, wait_timeout);

Ok, I understand where the concerned about how long it took to recover
from reset came from :)

> +
> +       if (wait)
> +               cleanup_hang();
> +
> +       return ret;
> +}
> +
> +#define TEST_WEDGE (1)
> +
> +static void test_wait(int fd, unsigned int flags, unsigned int wait)
>  {
> -       igt_hang_t hang;
> +       igt_spin_t *hang;
>  
>         igt_require_gem(fd);
>  
> -       /* If the request we wait on completes due to a hang (even for
> +       /*
> +        * If the request we wait on completes due to a hang (even for
>          * that request), the user expects the return value to 0 (success).
>          */
> -       hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
> -       igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
> -       igt_post_hang_ring(fd, hang);
>  
> -       /* If the GPU is wedged during the wait, again we expect the return
> -        * value to be 0 (success).
> -        */
> -       igt_require(i915_reset_control(false));
> -       hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
> -       igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
> -       igt_post_hang_ring(fd, hang);
> +       if (flags & TEST_WEDGE)
> +               igt_require(i915_reset_control(false));
> +       else
> +               igt_require(i915_reset_control(true));
> +
> +       hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
> +
> +       igt_assert_eq(__check_wait(fd, hang->handle, wait), 0);
> +
> +       igt_spin_batch_free(fd, hang);
> +
>         igt_require(i915_reset_control(true));
>  
>         trigger_reset(fd);
> @@ -181,7 +264,7 @@ static void test_suspend(int fd, int state)
>  
>         /* Check we can suspend when the driver is already wedged */
>         igt_require(i915_reset_control(false));
> -       wedgeme(fd);
> +       manual_hang(fd);
>  
>         igt_system_suspend_autoresume(state, SUSPEND_TEST_DEVICES);
>  
> @@ -189,7 +272,7 @@ static void test_suspend(int fd, int state)
>         trigger_reset(fd);
>  }
>  
> -static void test_inflight(int fd)
> +static void test_inflight(int fd, unsigned int wait)
>  {
>         const uint32_t bbe = MI_BATCH_BUFFER_END;
>         struct drm_i915_gem_exec_object2 obj[2];
> @@ -209,11 +292,10 @@ static void test_inflight(int fd)
>                 int fence[64]; /* conservative estimate of ring size */
>  
>                 gem_quiescent_gpu(fd);
> -
>                 igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
>                 igt_require(i915_reset_control(false));
>  
> -               hang = __igt_spin_batch_new(fd, 0, engine, 0);
> +               hang = spin_sync(fd, 0, engine);
>                 obj[0].handle = hang->handle;
>  
>                 memset(&execbuf, 0, sizeof(execbuf));
> @@ -227,7 +309,8 @@ static void test_inflight(int fd)
>                         igt_assert(fence[n] != -1);
>                 }
>  
> -               igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +               igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
> +
>                 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>                         igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>                         close(fence[n]);
> @@ -256,7 +339,7 @@ static void test_inflight_suspend(int fd)
>         obj[1].handle = gem_create(fd, 4096);
>         gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
>  
> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
> +       hang = spin_sync(fd, 0, 0);
>         obj[0].handle = hang->handle;
>  
>         memset(&execbuf, 0, sizeof(execbuf));
> @@ -273,7 +356,8 @@ static void test_inflight_suspend(int fd)
>         igt_set_autoresume_delay(30);
>         igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE);
>  
> -       igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +       igt_assert_eq(__check_wait(fd, obj[1].handle, 10), 0);
> +
>         for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>                 igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>                 close(fence[n]);
> @@ -301,7 +385,7 @@ static uint32_t context_create_safe(int i915)
>         return param.ctx_id;
>  }
>  
> -static void test_inflight_contexts(int fd)
> +static void test_inflight_contexts(int fd, unsigned int wait)
>  {
>         struct drm_i915_gem_exec_object2 obj[2];
>         const uint32_t bbe = MI_BATCH_BUFFER_END;
> @@ -330,7 +414,7 @@ static void test_inflight_contexts(int fd)
>                 igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
>                 igt_require(i915_reset_control(false));
>  
> -               hang = __igt_spin_batch_new(fd, 0, engine, 0);
> +               hang = spin_sync(fd, 0, engine);
>                 obj[0].handle = hang->handle;
>  
>                 memset(&execbuf, 0, sizeof(execbuf));
> @@ -345,7 +429,8 @@ static void test_inflight_contexts(int fd)
>                         igt_assert(fence[n] != -1);
>                 }
>  
> -               igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +               igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
> +
>                 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>                         igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>                         close(fence[n]);
> @@ -375,7 +460,7 @@ static void test_inflight_external(int fd)
>         fence = igt_cork_plug(&cork, fd);
>  
>         igt_require(i915_reset_control(false));
> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
> +       hang = __spin_poll(fd, 0, 0);
>  
>         memset(&obj, 0, sizeof(obj));
>         obj.handle = gem_create(fd, 4096);
> @@ -393,6 +478,9 @@ static void test_inflight_external(int fd)
>         fence = execbuf.rsvd2 >> 32;
>         igt_assert(fence != -1);
>  
> +       __spin_wait(fd, hang);
> +       manual_hang(fd);
> +
>         gem_sync(fd, hang->handle); /* wedged, with an unready batch */
>         igt_assert(!gem_bo_busy(fd, hang->handle));
>         igt_assert(gem_bo_busy(fd, obj.handle));
> @@ -407,7 +495,7 @@ static void test_inflight_external(int fd)
>         trigger_reset(fd);
>  }
>  
> -static void test_inflight_internal(int fd)
> +static void test_inflight_internal(int fd, unsigned int wait)
>  {
>         struct drm_i915_gem_execbuffer2 execbuf;
>         struct drm_i915_gem_exec_object2 obj[2];
> @@ -420,7 +508,7 @@ static void test_inflight_internal(int fd)
>         igt_require(gem_has_exec_fence(fd));
>  
>         igt_require(i915_reset_control(false));
> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
> +       hang = spin_sync(fd, 0, 0);
>  
>         memset(obj, 0, sizeof(obj));
>         obj[0].handle = hang->handle;
> @@ -441,7 +529,8 @@ static void test_inflight_internal(int fd)
>                 nfence++;
>         }
>  
> -       igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +       igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
> +
>         while (nfence--) {
>                 igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
>                 close(fences[nfence]);
> @@ -484,29 +573,46 @@ igt_main
>         igt_subtest("execbuf")
>                 test_execbuf(fd);
>  
> -       igt_subtest("wait")
> -               test_wait(fd);
> -
>         igt_subtest("suspend")
>                 test_suspend(fd, SUSPEND_STATE_MEM);
>  
>         igt_subtest("hibernate")
>                 test_suspend(fd, SUSPEND_STATE_DISK);
>  
> -       igt_subtest("in-flight")
> -               test_inflight(fd);
> -
> -       igt_subtest("in-flight-contexts")
> -               test_inflight_contexts(fd);
> -
>         igt_subtest("in-flight-external")
>                 test_inflight_external(fd);
>  
> -       igt_subtest("in-flight-internal") {
> -               igt_skip_on(gem_has_semaphores(fd));
> -               test_inflight_internal(fd);
> -       }
> -
>         igt_subtest("in-flight-suspend")
>                 test_inflight_suspend(fd);
> +
> +       igt_subtest_group {
> +               const struct {
> +                       unsigned int wait;
> +                       const char *name;
> +               } waits[] = {
> +                       { .wait = 0, .name = "immediate" },
> +                       { .wait = 10, .name = "10us" },

i915_request_spin is set to 2us currently :| I guess that's a really hard
window to hit reliably. Maybe we should spin for 200ms just to make
testing easier!

> +                       { .wait = 10000, .name = "10ms" },
> +               };
> +               unsigned int i;
> +
> +               for (i = 0; i < sizeof(waits) / sizeof(waits[0]); i++) {
> +                       igt_subtest_f("wait-%s", waits[i].name)
> +                               test_wait(fd, 0, waits[i].wait);
> +
> +                       igt_subtest_f("wait-wedge-%s", waits[i].name)
> +                               test_wait(fd, TEST_WEDGE, waits[i].wait);

Ok.

> +
> +                       igt_subtest_f("in-flight-%s", waits[i].name)
> +                               test_inflight(fd, waits[i].wait);
> +
> +                       igt_subtest_f("in-flight-contexts-%s", waits[i].name)
> +                               test_inflight_contexts(fd, waits[i].wait);
> +
> +                       igt_subtest_f("in-flight-internal-%s", waits[i].name) {
> +                               igt_skip_on(gem_has_semaphores(fd));
> +                               test_inflight_internal(fd, waits[i].wait);

And ok.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [igt-dev] [PATCH i-g-t 2/3] tests/gem_eio: Speed up test execution
@ 2018-03-22 17:44     ` Chris Wilson
  0 siblings, 0 replies; 21+ messages in thread
From: Chris Wilson @ 2018-03-22 17:44 UTC (permalink / raw)
  To: Tvrtko Ursulin, igt-dev; +Cc: Intel-gfx, Tvrtko Ursulin

Quoting Tvrtko Ursulin (2018-03-22 17:24:16)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> If we stop relying on regular GPU hangs to be detected, but trigger them
> manually as soon as we know our batch of interest is actually executing
> on the GPU, we can dramatically speed up various subtests.
> 
> This is enabled by the pollable spin batch added in the previous patch.
> 
> v2:
>  * Test gem_wait after reset/wedge and with reset/wedge after a few
>    predefined intervals since gem_wait invocation. (Chris Wilson)
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Antonio Argenziano <antonio.argenziano@intel.com>
> ---
>  lib.tar         | Bin 0 -> 102400 bytes
>  tests/gem_eio.c | 214 ++++++++++++++++++++++++++++++++++++++++++--------------
>  2 files changed, 160 insertions(+), 54 deletions(-)
>  create mode 100644 lib.tar
> 
> diff --git a/tests/gem_eio.c b/tests/gem_eio.c
> index 4bcc5937db39..2f9e954085ec 100644
> --- a/tests/gem_eio.c
> +++ b/tests/gem_eio.c
> @@ -35,6 +35,7 @@
>  #include <inttypes.h>
>  #include <errno.h>
>  #include <sys/ioctl.h>
> +#include <signal.h>
>  
>  #include <drm.h>
>  
> @@ -71,26 +72,23 @@ static void trigger_reset(int fd)
>         gem_quiescent_gpu(fd);
>  }
>  
> -static void wedge_gpu(int fd)
> +static void manual_hang(int drm_fd)
>  {
> -       /* First idle the GPU then disable GPU resets before injecting a hang */
> -       gem_quiescent_gpu(fd);
> -
> -       igt_require(i915_reset_control(false));
> +       int dir = igt_debugfs_dir(drm_fd);
>  
> -       igt_debug("Wedging GPU by injecting hang\n");
> -       igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));
> +       igt_sysfs_set(dir, "i915_wedged", "-1");
>  
> -       igt_assert(i915_reset_control(true));
> +       close(dir);
>  }
>  
> -static void wedgeme(int drm_fd)
> +static void wedge_gpu(int fd)
>  {
> -       int dir = igt_debugfs_dir(drm_fd);
> -
> -       igt_sysfs_set(dir, "i915_wedged", "-1");
> +       /* First idle the GPU then disable GPU resets before injecting a hang */
> +       gem_quiescent_gpu(fd);
>  
> -       close(dir);
> +       igt_require(i915_reset_control(false));
> +       manual_hang(fd);
> +       igt_assert(i915_reset_control(true));
>  }
>  
>  static int __gem_throttle(int fd)
> @@ -149,26 +147,111 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
>         return err;
>  }
>  
> -static void test_wait(int fd)
> +static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
> +{
> +       if (gem_can_store_dword(fd, flags))
> +               return __igt_spin_batch_new_poll(fd, ctx, flags);
> +       else
> +               return __igt_spin_batch_new(fd, ctx, flags, 0);
> +}
> +
> +static void __spin_wait(int fd, igt_spin_t *spin)
> +{
> +       if (spin->running) {
> +               igt_spin_busywait_until_running(spin);
> +       } else {
> +               igt_debug("__spin_wait - usleep mode\n");
> +               usleep(500e3); /* Better than nothing! */
> +       }
> +}
> +
> +static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
> +{
> +       igt_spin_t *spin = __spin_poll(fd, ctx, flags);
> +
> +       __spin_wait(fd, spin);
> +
> +       return spin;
> +}
> +
> +static int debugfs_dir = -1;
> +
> +static void hang_handler(int sig)
> +{
> +       igt_sysfs_set(debugfs_dir, "i915_wedged", "-1");
> +}
> +
> +static void hang_after(int fd, unsigned int us)
> +{
> +        struct sigaction sa = { .sa_handler = hang_handler };
> +       struct itimerval itv = { };
> +
> +       debugfs_dir = igt_debugfs_dir(fd);
> +       igt_assert_fd(debugfs_dir);
> +
> +       igt_assert_eq(sigaction(SIGALRM, &sa, NULL), 0);
> +
> +       itv.it_value.tv_sec = us / 1000000;

USEC_PER_SEC.

> +       itv.it_value.tv_usec = us % 1000000;
> +       setitimer(ITIMER_REAL, &itv, NULL);

Ok, that gives a single shot signal.

I would have used
struct sigevent sev = {
	.sigev_notify = SIGEV_THREAD,
	.sigev_value.sigval_int = debugfs_dir
	.sigev_notify_function = hang_handler
};
timer_create(CLOCK_MONOTONIC, &sec, &timer);
timer_settime(timer, 0, &its, NULL);

Then 

static void hang_handler(union sigval arg)
{
	igt_sysfs_set(arg.sival_int, "i915_wedged", 1);
}

No signals, nor globals required :)

The problem with using a signal is that it interrupts the gem_wait()
and so we don't actually check that it is being woken by the hang
because it is already awake. Gah.

> +static void cleanup_hang(void)
> +{
> +       struct itimerval itv = { };
> +
> +       igt_assert_eq(setitimer(ITIMER_REAL, &itv, NULL), 0);

You also need a sleep here as it does not flush inflight signals.
(Also timer_destroy :)

> +       igt_assert_fd(debugfs_dir);
> +       close(debugfs_dir);
> +       debugfs_dir = -1;
> +}
> +
> +static int __check_wait(int fd, uint32_t bo, unsigned int wait)
> +{
> +       unsigned long wait_timeout = 250e6; /* Some margin for actual reset. */
> +       int ret;
> +
> +       if (wait) {
> +               wait_timeout += wait * 2000; /* x2 for safety. */
> +               wait_timeout += 250e6; /* Margin for signal delay. */;
> +               hang_after(fd, wait);
> +       } else {
> +               manual_hang(fd);
> +       }
> +
> +       ret = __gem_wait(fd, bo, wait_timeout);

Ok, I understand where the concerned about how long it took to recover
from reset came from :)

> +
> +       if (wait)
> +               cleanup_hang();
> +
> +       return ret;
> +}
> +
> +#define TEST_WEDGE (1)
> +
> +static void test_wait(int fd, unsigned int flags, unsigned int wait)
>  {
> -       igt_hang_t hang;
> +       igt_spin_t *hang;
>  
>         igt_require_gem(fd);
>  
> -       /* If the request we wait on completes due to a hang (even for
> +       /*
> +        * If the request we wait on completes due to a hang (even for
>          * that request), the user expects the return value to 0 (success).
>          */
> -       hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
> -       igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
> -       igt_post_hang_ring(fd, hang);
>  
> -       /* If the GPU is wedged during the wait, again we expect the return
> -        * value to be 0 (success).
> -        */
> -       igt_require(i915_reset_control(false));
> -       hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
> -       igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
> -       igt_post_hang_ring(fd, hang);
> +       if (flags & TEST_WEDGE)
> +               igt_require(i915_reset_control(false));
> +       else
> +               igt_require(i915_reset_control(true));
> +
> +       hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
> +
> +       igt_assert_eq(__check_wait(fd, hang->handle, wait), 0);
> +
> +       igt_spin_batch_free(fd, hang);
> +
>         igt_require(i915_reset_control(true));
>  
>         trigger_reset(fd);
> @@ -181,7 +264,7 @@ static void test_suspend(int fd, int state)
>  
>         /* Check we can suspend when the driver is already wedged */
>         igt_require(i915_reset_control(false));
> -       wedgeme(fd);
> +       manual_hang(fd);
>  
>         igt_system_suspend_autoresume(state, SUSPEND_TEST_DEVICES);
>  
> @@ -189,7 +272,7 @@ static void test_suspend(int fd, int state)
>         trigger_reset(fd);
>  }
>  
> -static void test_inflight(int fd)
> +static void test_inflight(int fd, unsigned int wait)
>  {
>         const uint32_t bbe = MI_BATCH_BUFFER_END;
>         struct drm_i915_gem_exec_object2 obj[2];
> @@ -209,11 +292,10 @@ static void test_inflight(int fd)
>                 int fence[64]; /* conservative estimate of ring size */
>  
>                 gem_quiescent_gpu(fd);
> -
>                 igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
>                 igt_require(i915_reset_control(false));
>  
> -               hang = __igt_spin_batch_new(fd, 0, engine, 0);
> +               hang = spin_sync(fd, 0, engine);
>                 obj[0].handle = hang->handle;
>  
>                 memset(&execbuf, 0, sizeof(execbuf));
> @@ -227,7 +309,8 @@ static void test_inflight(int fd)
>                         igt_assert(fence[n] != -1);
>                 }
>  
> -               igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +               igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
> +
>                 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>                         igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>                         close(fence[n]);
> @@ -256,7 +339,7 @@ static void test_inflight_suspend(int fd)
>         obj[1].handle = gem_create(fd, 4096);
>         gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
>  
> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
> +       hang = spin_sync(fd, 0, 0);
>         obj[0].handle = hang->handle;
>  
>         memset(&execbuf, 0, sizeof(execbuf));
> @@ -273,7 +356,8 @@ static void test_inflight_suspend(int fd)
>         igt_set_autoresume_delay(30);
>         igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE);
>  
> -       igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +       igt_assert_eq(__check_wait(fd, obj[1].handle, 10), 0);
> +
>         for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>                 igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>                 close(fence[n]);
> @@ -301,7 +385,7 @@ static uint32_t context_create_safe(int i915)
>         return param.ctx_id;
>  }
>  
> -static void test_inflight_contexts(int fd)
> +static void test_inflight_contexts(int fd, unsigned int wait)
>  {
>         struct drm_i915_gem_exec_object2 obj[2];
>         const uint32_t bbe = MI_BATCH_BUFFER_END;
> @@ -330,7 +414,7 @@ static void test_inflight_contexts(int fd)
>                 igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
>                 igt_require(i915_reset_control(false));
>  
> -               hang = __igt_spin_batch_new(fd, 0, engine, 0);
> +               hang = spin_sync(fd, 0, engine);
>                 obj[0].handle = hang->handle;
>  
>                 memset(&execbuf, 0, sizeof(execbuf));
> @@ -345,7 +429,8 @@ static void test_inflight_contexts(int fd)
>                         igt_assert(fence[n] != -1);
>                 }
>  
> -               igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +               igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
> +
>                 for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>                         igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>                         close(fence[n]);
> @@ -375,7 +460,7 @@ static void test_inflight_external(int fd)
>         fence = igt_cork_plug(&cork, fd);
>  
>         igt_require(i915_reset_control(false));
> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
> +       hang = __spin_poll(fd, 0, 0);
>  
>         memset(&obj, 0, sizeof(obj));
>         obj.handle = gem_create(fd, 4096);
> @@ -393,6 +478,9 @@ static void test_inflight_external(int fd)
>         fence = execbuf.rsvd2 >> 32;
>         igt_assert(fence != -1);
>  
> +       __spin_wait(fd, hang);
> +       manual_hang(fd);
> +
>         gem_sync(fd, hang->handle); /* wedged, with an unready batch */
>         igt_assert(!gem_bo_busy(fd, hang->handle));
>         igt_assert(gem_bo_busy(fd, obj.handle));
> @@ -407,7 +495,7 @@ static void test_inflight_external(int fd)
>         trigger_reset(fd);
>  }
>  
> -static void test_inflight_internal(int fd)
> +static void test_inflight_internal(int fd, unsigned int wait)
>  {
>         struct drm_i915_gem_execbuffer2 execbuf;
>         struct drm_i915_gem_exec_object2 obj[2];
> @@ -420,7 +508,7 @@ static void test_inflight_internal(int fd)
>         igt_require(gem_has_exec_fence(fd));
>  
>         igt_require(i915_reset_control(false));
> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
> +       hang = spin_sync(fd, 0, 0);
>  
>         memset(obj, 0, sizeof(obj));
>         obj[0].handle = hang->handle;
> @@ -441,7 +529,8 @@ static void test_inflight_internal(int fd)
>                 nfence++;
>         }
>  
> -       igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
> +       igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
> +
>         while (nfence--) {
>                 igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
>                 close(fences[nfence]);
> @@ -484,29 +573,46 @@ igt_main
>         igt_subtest("execbuf")
>                 test_execbuf(fd);
>  
> -       igt_subtest("wait")
> -               test_wait(fd);
> -
>         igt_subtest("suspend")
>                 test_suspend(fd, SUSPEND_STATE_MEM);
>  
>         igt_subtest("hibernate")
>                 test_suspend(fd, SUSPEND_STATE_DISK);
>  
> -       igt_subtest("in-flight")
> -               test_inflight(fd);
> -
> -       igt_subtest("in-flight-contexts")
> -               test_inflight_contexts(fd);
> -
>         igt_subtest("in-flight-external")
>                 test_inflight_external(fd);
>  
> -       igt_subtest("in-flight-internal") {
> -               igt_skip_on(gem_has_semaphores(fd));
> -               test_inflight_internal(fd);
> -       }
> -
>         igt_subtest("in-flight-suspend")
>                 test_inflight_suspend(fd);
> +
> +       igt_subtest_group {
> +               const struct {
> +                       unsigned int wait;
> +                       const char *name;
> +               } waits[] = {
> +                       { .wait = 0, .name = "immediate" },
> +                       { .wait = 10, .name = "10us" },

i915_request_spin is set to 2us currently :| I guess that's a really hard
window to hit reliably. Maybe we should spin for 200ms just to make
testing easier!

> +                       { .wait = 10000, .name = "10ms" },
> +               };
> +               unsigned int i;
> +
> +               for (i = 0; i < sizeof(waits) / sizeof(waits[0]); i++) {
> +                       igt_subtest_f("wait-%s", waits[i].name)
> +                               test_wait(fd, 0, waits[i].wait);
> +
> +                       igt_subtest_f("wait-wedge-%s", waits[i].name)
> +                               test_wait(fd, TEST_WEDGE, waits[i].wait);

Ok.

> +
> +                       igt_subtest_f("in-flight-%s", waits[i].name)
> +                               test_inflight(fd, waits[i].wait);
> +
> +                       igt_subtest_f("in-flight-contexts-%s", waits[i].name)
> +                               test_inflight_contexts(fd, waits[i].wait);
> +
> +                       igt_subtest_f("in-flight-internal-%s", waits[i].name) {
> +                               igt_skip_on(gem_has_semaphores(fd));
> +                               test_inflight_internal(fd, waits[i].wait);

And ok.
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch
  2018-03-22 17:24 ` [igt-dev] " Tvrtko Ursulin
                   ` (2 preceding siblings ...)
  (?)
@ 2018-03-22 19:27 ` Patchwork
  -1 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2018-03-22 19:27 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: igt-dev

== Series Details ==

Series: series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch
URL   : https://patchwork.freedesktop.org/series/40498/
State : success

== Summary ==

IGT patchset tested on top of latest successful build
0d5665783284fee1750bc4a9d7a0378cb5ce77fe lib: Run gem_test_engine() in an isolated context

with latest DRM-Tip kernel build CI_DRM_3968
40fcdd23bec7 drm-tip: 2018y-03m-22d-15h-28m-32s UTC integration manifest

Testlist changes:
+igt@gem_eio@in-flight-10ms
+igt@gem_eio@in-flight-10us
+igt@gem_eio@in-flight-contexts-10ms
+igt@gem_eio@in-flight-contexts-10us
+igt@gem_eio@in-flight-contexts-immediate
+igt@gem_eio@in-flight-immediate
+igt@gem_eio@in-flight-internal-10ms
+igt@gem_eio@in-flight-internal-10us
+igt@gem_eio@in-flight-internal-immediate
+igt@gem_eio@wait-10ms
+igt@gem_eio@wait-10us
+igt@gem_eio@wait-immediate
+igt@gem_eio@wait-wedge-10ms
+igt@gem_eio@wait-wedge-10us
+igt@gem_eio@wait-wedge-immediate
-igt@gem_eio@in-flight
-igt@gem_eio@in-flight-contexts
-igt@gem_eio@in-flight-internal
-igt@gem_eio@wait

---- Known issues:

Test debugfs_test:
        Subgroup read_all_entries:
                incomplete -> PASS       (fi-snb-2520m) fdo#103713 +1
Test gem_mmap_gtt:
        Subgroup basic-small-bo-tiledx:
                fail       -> PASS       (fi-gdg-551) fdo#102575
Test kms_pipe_crc_basic:
        Subgroup nonblocking-crc-pipe-b-frame-sequence:
                incomplete -> PASS       (fi-cnl-y3) fdo#103191

fdo#103713 
fdo#102575 
fdo#103191 

fi-bdw-5557u     total:285  pass:264  dwarn:0   dfail:0   fail:0   skip:21  time:431s
fi-bdw-gvtdvm    total:285  pass:261  dwarn:0   dfail:0   fail:0   skip:24  time:445s
fi-blb-e6850     total:285  pass:220  dwarn:1   dfail:0   fail:0   skip:64  time:382s
fi-bsw-n3050     total:285  pass:239  dwarn:0   dfail:0   fail:0   skip:46  time:540s
fi-bwr-2160      total:285  pass:180  dwarn:0   dfail:0   fail:0   skip:105 time:297s
fi-bxt-dsi       total:285  pass:255  dwarn:0   dfail:0   fail:0   skip:30  time:514s
fi-bxt-j4205     total:285  pass:256  dwarn:0   dfail:0   fail:0   skip:29  time:514s
fi-byt-j1900     total:285  pass:250  dwarn:0   dfail:0   fail:0   skip:35  time:521s
fi-byt-n2820     total:285  pass:246  dwarn:0   dfail:0   fail:0   skip:39  time:505s
fi-cfl-8700k     total:285  pass:257  dwarn:0   dfail:0   fail:0   skip:28  time:411s
fi-cfl-u         total:285  pass:259  dwarn:0   dfail:0   fail:0   skip:26  time:510s
fi-cnl-drrs      total:285  pass:254  dwarn:3   dfail:0   fail:0   skip:28  time:541s
fi-cnl-y3        total:285  pass:259  dwarn:0   dfail:0   fail:0   skip:26  time:585s
fi-elk-e7500     total:285  pass:225  dwarn:1   dfail:0   fail:0   skip:59  time:428s
fi-gdg-551       total:285  pass:177  dwarn:0   dfail:0   fail:0   skip:108 time:318s
fi-glk-1         total:285  pass:257  dwarn:0   dfail:0   fail:0   skip:28  time:535s
fi-hsw-4770      total:285  pass:258  dwarn:0   dfail:0   fail:0   skip:27  time:403s
fi-ilk-650       total:285  pass:225  dwarn:0   dfail:0   fail:0   skip:60  time:422s
fi-ivb-3520m     total:285  pass:256  dwarn:0   dfail:0   fail:0   skip:29  time:467s
fi-ivb-3770      total:285  pass:252  dwarn:0   dfail:0   fail:0   skip:33  time:432s
fi-kbl-7500u     total:285  pass:260  dwarn:1   dfail:0   fail:0   skip:24  time:478s
fi-kbl-7567u     total:285  pass:265  dwarn:0   dfail:0   fail:0   skip:20  time:465s
fi-kbl-r         total:285  pass:258  dwarn:0   dfail:0   fail:0   skip:27  time:518s
fi-pnv-d510      total:285  pass:219  dwarn:1   dfail:0   fail:0   skip:65  time:654s
fi-skl-6260u     total:285  pass:265  dwarn:0   dfail:0   fail:0   skip:20  time:436s
fi-skl-6600u     total:285  pass:258  dwarn:0   dfail:0   fail:0   skip:27  time:530s
fi-skl-6700k2    total:285  pass:261  dwarn:0   dfail:0   fail:0   skip:24  time:507s
fi-skl-6770hq    total:285  pass:265  dwarn:0   dfail:0   fail:0   skip:20  time:497s
fi-skl-guc       total:285  pass:257  dwarn:0   dfail:0   fail:0   skip:28  time:431s
fi-skl-gvtdvm    total:285  pass:262  dwarn:0   dfail:0   fail:0   skip:23  time:449s
fi-snb-2520m     total:242  pass:208  dwarn:0   dfail:0   fail:0   skip:33 
fi-snb-2600      total:285  pass:245  dwarn:0   dfail:0   fail:0   skip:40  time:399s

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1183/issues.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [igt-dev] ✗ Fi.CI.IGT: failure for series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch
  2018-03-22 17:24 ` [igt-dev] " Tvrtko Ursulin
                   ` (3 preceding siblings ...)
  (?)
@ 2018-03-22 21:15 ` Patchwork
  -1 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2018-03-22 21:15 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: igt-dev

== Series Details ==

Series: series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch
URL   : https://patchwork.freedesktop.org/series/40498/
State : failure

== Summary ==

---- Possible new issues:

Test gem_pwrite:
        Subgroup big-cpu-forwards:
                pass       -> SKIP       (shard-apl)
Test kms_vblank:
        Subgroup pipe-a-ts-continuation-modeset-hang:
                pass       -> INCOMPLETE (shard-hsw)

---- Known issues:

Test kms_flip:
        Subgroup basic-flip-vs-wf_vblank:
                pass       -> FAIL       (shard-hsw) fdo#100368
        Subgroup dpms-vs-vblank-race:
                fail       -> PASS       (shard-hsw) fdo#103060 +2

fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
fdo#103060 https://bugs.freedesktop.org/show_bug.cgi?id=103060

shard-apl        total:3495 pass:1830 dwarn:1   dfail:0   fail:7   skip:1656 time:12915s
shard-hsw        total:3486 pass:1777 dwarn:1   dfail:0   fail:3   skip:1703 time:11184s
shard-snb        total:3495 pass:1374 dwarn:1   dfail:0   fail:3   skip:2117 time:7197s
Blacklisted hosts:
shard-kbl        total:3477 pass:1926 dwarn:8   dfail:0   fail:10  skip:1532 time:9450s

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1183/shards.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [igt-dev] [PATCH i-g-t 2/3] tests/gem_eio: Speed up test execution
  2018-03-22 17:44     ` Chris Wilson
@ 2018-03-23  9:46       ` Tvrtko Ursulin
  -1 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-23  9:46 UTC (permalink / raw)
  To: Chris Wilson, Tvrtko Ursulin, igt-dev; +Cc: Intel-gfx


On 22/03/2018 17:44, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2018-03-22 17:24:16)
>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>
>> If we stop relying on regular GPU hangs to be detected, but trigger them
>> manually as soon as we know our batch of interest is actually executing
>> on the GPU, we can dramatically speed up various subtests.
>>
>> This is enabled by the pollable spin batch added in the previous patch.
>>
>> v2:
>>   * Test gem_wait after reset/wedge and with reset/wedge after a few
>>     predefined intervals since gem_wait invocation. (Chris Wilson)
>>
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
>> Cc: Antonio Argenziano <antonio.argenziano@intel.com>
>> ---
>>   lib.tar         | Bin 0 -> 102400 bytes
>>   tests/gem_eio.c | 214 ++++++++++++++++++++++++++++++++++++++++++--------------
>>   2 files changed, 160 insertions(+), 54 deletions(-)
>>   create mode 100644 lib.tar
>>
>> diff --git a/tests/gem_eio.c b/tests/gem_eio.c
>> index 4bcc5937db39..2f9e954085ec 100644
>> --- a/tests/gem_eio.c
>> +++ b/tests/gem_eio.c
>> @@ -35,6 +35,7 @@
>>   #include <inttypes.h>
>>   #include <errno.h>
>>   #include <sys/ioctl.h>
>> +#include <signal.h>
>>   
>>   #include <drm.h>
>>   
>> @@ -71,26 +72,23 @@ static void trigger_reset(int fd)
>>          gem_quiescent_gpu(fd);
>>   }
>>   
>> -static void wedge_gpu(int fd)
>> +static void manual_hang(int drm_fd)
>>   {
>> -       /* First idle the GPU then disable GPU resets before injecting a hang */
>> -       gem_quiescent_gpu(fd);
>> -
>> -       igt_require(i915_reset_control(false));
>> +       int dir = igt_debugfs_dir(drm_fd);
>>   
>> -       igt_debug("Wedging GPU by injecting hang\n");
>> -       igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));
>> +       igt_sysfs_set(dir, "i915_wedged", "-1");
>>   
>> -       igt_assert(i915_reset_control(true));
>> +       close(dir);
>>   }
>>   
>> -static void wedgeme(int drm_fd)
>> +static void wedge_gpu(int fd)
>>   {
>> -       int dir = igt_debugfs_dir(drm_fd);
>> -
>> -       igt_sysfs_set(dir, "i915_wedged", "-1");
>> +       /* First idle the GPU then disable GPU resets before injecting a hang */
>> +       gem_quiescent_gpu(fd);
>>   
>> -       close(dir);
>> +       igt_require(i915_reset_control(false));
>> +       manual_hang(fd);
>> +       igt_assert(i915_reset_control(true));
>>   }
>>   
>>   static int __gem_throttle(int fd)
>> @@ -149,26 +147,111 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
>>          return err;
>>   }
>>   
>> -static void test_wait(int fd)
>> +static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
>> +{
>> +       if (gem_can_store_dword(fd, flags))
>> +               return __igt_spin_batch_new_poll(fd, ctx, flags);
>> +       else
>> +               return __igt_spin_batch_new(fd, ctx, flags, 0);
>> +}
>> +
>> +static void __spin_wait(int fd, igt_spin_t *spin)
>> +{
>> +       if (spin->running) {
>> +               igt_spin_busywait_until_running(spin);
>> +       } else {
>> +               igt_debug("__spin_wait - usleep mode\n");
>> +               usleep(500e3); /* Better than nothing! */
>> +       }
>> +}
>> +
>> +static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
>> +{
>> +       igt_spin_t *spin = __spin_poll(fd, ctx, flags);
>> +
>> +       __spin_wait(fd, spin);
>> +
>> +       return spin;
>> +}
>> +
>> +static int debugfs_dir = -1;
>> +
>> +static void hang_handler(int sig)
>> +{
>> +       igt_sysfs_set(debugfs_dir, "i915_wedged", "-1");
>> +}
>> +
>> +static void hang_after(int fd, unsigned int us)
>> +{
>> +        struct sigaction sa = { .sa_handler = hang_handler };
>> +       struct itimerval itv = { };
>> +
>> +       debugfs_dir = igt_debugfs_dir(fd);
>> +       igt_assert_fd(debugfs_dir);
>> +
>> +       igt_assert_eq(sigaction(SIGALRM, &sa, NULL), 0);
>> +
>> +       itv.it_value.tv_sec = us / 1000000;
> 
> USEC_PER_SEC.
> 
>> +       itv.it_value.tv_usec = us % 1000000;
>> +       setitimer(ITIMER_REAL, &itv, NULL);
> 
> Ok, that gives a single shot signal.
> 
> I would have used
> struct sigevent sev = {
> 	.sigev_notify = SIGEV_THREAD,
> 	.sigev_value.sigval_int = debugfs_dir
> 	.sigev_notify_function = hang_handler
> };
> timer_create(CLOCK_MONOTONIC, &sec, &timer);
> timer_settime(timer, 0, &its, NULL);
> 
> Then
> 
> static void hang_handler(union sigval arg)
> {
> 	igt_sysfs_set(arg.sival_int, "i915_wedged", 1);
> }
> 
> No signals, nor globals required :)

I wasn't familiar with this facility.

It creates a new thread, so any hopes for small microsecond delays might 
be ruined. I can try it if you think it is still worth it?

> The problem with using a signal is that it interrupts the gem_wait()
> and so we don't actually check that it is being woken by the hang
> because it is already awake. Gah.

Hm... if I am following correctly, we end up with -ERESTARTSYS and the 
the ioctl can get restarted for us, if I would set SA_RESTART.

At the moment it happens to work because drmIoctl restart the signal.

>> +static void cleanup_hang(void)
>> +{
>> +       struct itimerval itv = { };
>> +
>> +       igt_assert_eq(setitimer(ITIMER_REAL, &itv, NULL), 0);
> 
> You also need a sleep here as it does not flush inflight signals.
> (Also timer_destroy :)

I always pass a longer timeout to gem_wait than the signal so I think it 
should be guaranteed that the signal had fired before gem_wait will be 
exiting.

Regards,

Tvrtko

> 
>> +       igt_assert_fd(debugfs_dir);
>> +       close(debugfs_dir);
>> +       debugfs_dir = -1;
>> +}
>> +
>> +static int __check_wait(int fd, uint32_t bo, unsigned int wait)
>> +{
>> +       unsigned long wait_timeout = 250e6; /* Some margin for actual reset. */
>> +       int ret;
>> +
>> +       if (wait) {
>> +               wait_timeout += wait * 2000; /* x2 for safety. */
>> +               wait_timeout += 250e6; /* Margin for signal delay. */;
>> +               hang_after(fd, wait);
>> +       } else {
>> +               manual_hang(fd);
>> +       }
>> +
>> +       ret = __gem_wait(fd, bo, wait_timeout);
> 
> Ok, I understand where the concerned about how long it took to recover
> from reset came from :)
> 
>> +
>> +       if (wait)
>> +               cleanup_hang();
>> +
>> +       return ret;
>> +}
>> +
>> +#define TEST_WEDGE (1)
>> +
>> +static void test_wait(int fd, unsigned int flags, unsigned int wait)
>>   {
>> -       igt_hang_t hang;
>> +       igt_spin_t *hang;
>>   
>>          igt_require_gem(fd);
>>   
>> -       /* If the request we wait on completes due to a hang (even for
>> +       /*
>> +        * If the request we wait on completes due to a hang (even for
>>           * that request), the user expects the return value to 0 (success).
>>           */
>> -       hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
>> -       igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
>> -       igt_post_hang_ring(fd, hang);
>>   
>> -       /* If the GPU is wedged during the wait, again we expect the return
>> -        * value to be 0 (success).
>> -        */
>> -       igt_require(i915_reset_control(false));
>> -       hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
>> -       igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
>> -       igt_post_hang_ring(fd, hang);
>> +       if (flags & TEST_WEDGE)
>> +               igt_require(i915_reset_control(false));
>> +       else
>> +               igt_require(i915_reset_control(true));
>> +
>> +       hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
>> +
>> +       igt_assert_eq(__check_wait(fd, hang->handle, wait), 0);
>> +
>> +       igt_spin_batch_free(fd, hang);
>> +
>>          igt_require(i915_reset_control(true));
>>   
>>          trigger_reset(fd);
>> @@ -181,7 +264,7 @@ static void test_suspend(int fd, int state)
>>   
>>          /* Check we can suspend when the driver is already wedged */
>>          igt_require(i915_reset_control(false));
>> -       wedgeme(fd);
>> +       manual_hang(fd);
>>   
>>          igt_system_suspend_autoresume(state, SUSPEND_TEST_DEVICES);
>>   
>> @@ -189,7 +272,7 @@ static void test_suspend(int fd, int state)
>>          trigger_reset(fd);
>>   }
>>   
>> -static void test_inflight(int fd)
>> +static void test_inflight(int fd, unsigned int wait)
>>   {
>>          const uint32_t bbe = MI_BATCH_BUFFER_END;
>>          struct drm_i915_gem_exec_object2 obj[2];
>> @@ -209,11 +292,10 @@ static void test_inflight(int fd)
>>                  int fence[64]; /* conservative estimate of ring size */
>>   
>>                  gem_quiescent_gpu(fd);
>> -
>>                  igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
>>                  igt_require(i915_reset_control(false));
>>   
>> -               hang = __igt_spin_batch_new(fd, 0, engine, 0);
>> +               hang = spin_sync(fd, 0, engine);
>>                  obj[0].handle = hang->handle;
>>   
>>                  memset(&execbuf, 0, sizeof(execbuf));
>> @@ -227,7 +309,8 @@ static void test_inflight(int fd)
>>                          igt_assert(fence[n] != -1);
>>                  }
>>   
>> -               igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
>> +               igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
>> +
>>                  for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>>                          igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>>                          close(fence[n]);
>> @@ -256,7 +339,7 @@ static void test_inflight_suspend(int fd)
>>          obj[1].handle = gem_create(fd, 4096);
>>          gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
>>   
>> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
>> +       hang = spin_sync(fd, 0, 0);
>>          obj[0].handle = hang->handle;
>>   
>>          memset(&execbuf, 0, sizeof(execbuf));
>> @@ -273,7 +356,8 @@ static void test_inflight_suspend(int fd)
>>          igt_set_autoresume_delay(30);
>>          igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE);
>>   
>> -       igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
>> +       igt_assert_eq(__check_wait(fd, obj[1].handle, 10), 0);
>> +
>>          for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>>                  igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>>                  close(fence[n]);
>> @@ -301,7 +385,7 @@ static uint32_t context_create_safe(int i915)
>>          return param.ctx_id;
>>   }
>>   
>> -static void test_inflight_contexts(int fd)
>> +static void test_inflight_contexts(int fd, unsigned int wait)
>>   {
>>          struct drm_i915_gem_exec_object2 obj[2];
>>          const uint32_t bbe = MI_BATCH_BUFFER_END;
>> @@ -330,7 +414,7 @@ static void test_inflight_contexts(int fd)
>>                  igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
>>                  igt_require(i915_reset_control(false));
>>   
>> -               hang = __igt_spin_batch_new(fd, 0, engine, 0);
>> +               hang = spin_sync(fd, 0, engine);
>>                  obj[0].handle = hang->handle;
>>   
>>                  memset(&execbuf, 0, sizeof(execbuf));
>> @@ -345,7 +429,8 @@ static void test_inflight_contexts(int fd)
>>                          igt_assert(fence[n] != -1);
>>                  }
>>   
>> -               igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
>> +               igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
>> +
>>                  for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>>                          igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>>                          close(fence[n]);
>> @@ -375,7 +460,7 @@ static void test_inflight_external(int fd)
>>          fence = igt_cork_plug(&cork, fd);
>>   
>>          igt_require(i915_reset_control(false));
>> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
>> +       hang = __spin_poll(fd, 0, 0);
>>   
>>          memset(&obj, 0, sizeof(obj));
>>          obj.handle = gem_create(fd, 4096);
>> @@ -393,6 +478,9 @@ static void test_inflight_external(int fd)
>>          fence = execbuf.rsvd2 >> 32;
>>          igt_assert(fence != -1);
>>   
>> +       __spin_wait(fd, hang);
>> +       manual_hang(fd);
>> +
>>          gem_sync(fd, hang->handle); /* wedged, with an unready batch */
>>          igt_assert(!gem_bo_busy(fd, hang->handle));
>>          igt_assert(gem_bo_busy(fd, obj.handle));
>> @@ -407,7 +495,7 @@ static void test_inflight_external(int fd)
>>          trigger_reset(fd);
>>   }
>>   
>> -static void test_inflight_internal(int fd)
>> +static void test_inflight_internal(int fd, unsigned int wait)
>>   {
>>          struct drm_i915_gem_execbuffer2 execbuf;
>>          struct drm_i915_gem_exec_object2 obj[2];
>> @@ -420,7 +508,7 @@ static void test_inflight_internal(int fd)
>>          igt_require(gem_has_exec_fence(fd));
>>   
>>          igt_require(i915_reset_control(false));
>> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
>> +       hang = spin_sync(fd, 0, 0);
>>   
>>          memset(obj, 0, sizeof(obj));
>>          obj[0].handle = hang->handle;
>> @@ -441,7 +529,8 @@ static void test_inflight_internal(int fd)
>>                  nfence++;
>>          }
>>   
>> -       igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
>> +       igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
>> +
>>          while (nfence--) {
>>                  igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
>>                  close(fences[nfence]);
>> @@ -484,29 +573,46 @@ igt_main
>>          igt_subtest("execbuf")
>>                  test_execbuf(fd);
>>   
>> -       igt_subtest("wait")
>> -               test_wait(fd);
>> -
>>          igt_subtest("suspend")
>>                  test_suspend(fd, SUSPEND_STATE_MEM);
>>   
>>          igt_subtest("hibernate")
>>                  test_suspend(fd, SUSPEND_STATE_DISK);
>>   
>> -       igt_subtest("in-flight")
>> -               test_inflight(fd);
>> -
>> -       igt_subtest("in-flight-contexts")
>> -               test_inflight_contexts(fd);
>> -
>>          igt_subtest("in-flight-external")
>>                  test_inflight_external(fd);
>>   
>> -       igt_subtest("in-flight-internal") {
>> -               igt_skip_on(gem_has_semaphores(fd));
>> -               test_inflight_internal(fd);
>> -       }
>> -
>>          igt_subtest("in-flight-suspend")
>>                  test_inflight_suspend(fd);
>> +
>> +       igt_subtest_group {
>> +               const struct {
>> +                       unsigned int wait;
>> +                       const char *name;
>> +               } waits[] = {
>> +                       { .wait = 0, .name = "immediate" },
>> +                       { .wait = 10, .name = "10us" },
> 
> i915_request_spin is set to 2us currently :| I guess that's a really hard
> window to hit reliably. Maybe we should spin for 200ms just to make
> testing easier!
> 
>> +                       { .wait = 10000, .name = "10ms" },
>> +               };
>> +               unsigned int i;
>> +
>> +               for (i = 0; i < sizeof(waits) / sizeof(waits[0]); i++) {
>> +                       igt_subtest_f("wait-%s", waits[i].name)
>> +                               test_wait(fd, 0, waits[i].wait);
>> +
>> +                       igt_subtest_f("wait-wedge-%s", waits[i].name)
>> +                               test_wait(fd, TEST_WEDGE, waits[i].wait);
> 
> Ok.
> 
>> +
>> +                       igt_subtest_f("in-flight-%s", waits[i].name)
>> +                               test_inflight(fd, waits[i].wait);
>> +
>> +                       igt_subtest_f("in-flight-contexts-%s", waits[i].name)
>> +                               test_inflight_contexts(fd, waits[i].wait);
>> +
>> +                       igt_subtest_f("in-flight-internal-%s", waits[i].name) {
>> +                               igt_skip_on(gem_has_semaphores(fd));
>> +                               test_inflight_internal(fd, waits[i].wait);
> 
> And ok.
> -Chris
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t 2/3] tests/gem_eio: Speed up test execution
@ 2018-03-23  9:46       ` Tvrtko Ursulin
  0 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-23  9:46 UTC (permalink / raw)
  To: Chris Wilson, Tvrtko Ursulin, igt-dev; +Cc: Intel-gfx


On 22/03/2018 17:44, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2018-03-22 17:24:16)
>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>
>> If we stop relying on regular GPU hangs to be detected, but trigger them
>> manually as soon as we know our batch of interest is actually executing
>> on the GPU, we can dramatically speed up various subtests.
>>
>> This is enabled by the pollable spin batch added in the previous patch.
>>
>> v2:
>>   * Test gem_wait after reset/wedge and with reset/wedge after a few
>>     predefined intervals since gem_wait invocation. (Chris Wilson)
>>
>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
>> Cc: Antonio Argenziano <antonio.argenziano@intel.com>
>> ---
>>   lib.tar         | Bin 0 -> 102400 bytes
>>   tests/gem_eio.c | 214 ++++++++++++++++++++++++++++++++++++++++++--------------
>>   2 files changed, 160 insertions(+), 54 deletions(-)
>>   create mode 100644 lib.tar
>>
>> diff --git a/tests/gem_eio.c b/tests/gem_eio.c
>> index 4bcc5937db39..2f9e954085ec 100644
>> --- a/tests/gem_eio.c
>> +++ b/tests/gem_eio.c
>> @@ -35,6 +35,7 @@
>>   #include <inttypes.h>
>>   #include <errno.h>
>>   #include <sys/ioctl.h>
>> +#include <signal.h>
>>   
>>   #include <drm.h>
>>   
>> @@ -71,26 +72,23 @@ static void trigger_reset(int fd)
>>          gem_quiescent_gpu(fd);
>>   }
>>   
>> -static void wedge_gpu(int fd)
>> +static void manual_hang(int drm_fd)
>>   {
>> -       /* First idle the GPU then disable GPU resets before injecting a hang */
>> -       gem_quiescent_gpu(fd);
>> -
>> -       igt_require(i915_reset_control(false));
>> +       int dir = igt_debugfs_dir(drm_fd);
>>   
>> -       igt_debug("Wedging GPU by injecting hang\n");
>> -       igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));
>> +       igt_sysfs_set(dir, "i915_wedged", "-1");
>>   
>> -       igt_assert(i915_reset_control(true));
>> +       close(dir);
>>   }
>>   
>> -static void wedgeme(int drm_fd)
>> +static void wedge_gpu(int fd)
>>   {
>> -       int dir = igt_debugfs_dir(drm_fd);
>> -
>> -       igt_sysfs_set(dir, "i915_wedged", "-1");
>> +       /* First idle the GPU then disable GPU resets before injecting a hang */
>> +       gem_quiescent_gpu(fd);
>>   
>> -       close(dir);
>> +       igt_require(i915_reset_control(false));
>> +       manual_hang(fd);
>> +       igt_assert(i915_reset_control(true));
>>   }
>>   
>>   static int __gem_throttle(int fd)
>> @@ -149,26 +147,111 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
>>          return err;
>>   }
>>   
>> -static void test_wait(int fd)
>> +static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
>> +{
>> +       if (gem_can_store_dword(fd, flags))
>> +               return __igt_spin_batch_new_poll(fd, ctx, flags);
>> +       else
>> +               return __igt_spin_batch_new(fd, ctx, flags, 0);
>> +}
>> +
>> +static void __spin_wait(int fd, igt_spin_t *spin)
>> +{
>> +       if (spin->running) {
>> +               igt_spin_busywait_until_running(spin);
>> +       } else {
>> +               igt_debug("__spin_wait - usleep mode\n");
>> +               usleep(500e3); /* Better than nothing! */
>> +       }
>> +}
>> +
>> +static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
>> +{
>> +       igt_spin_t *spin = __spin_poll(fd, ctx, flags);
>> +
>> +       __spin_wait(fd, spin);
>> +
>> +       return spin;
>> +}
>> +
>> +static int debugfs_dir = -1;
>> +
>> +static void hang_handler(int sig)
>> +{
>> +       igt_sysfs_set(debugfs_dir, "i915_wedged", "-1");
>> +}
>> +
>> +static void hang_after(int fd, unsigned int us)
>> +{
>> +        struct sigaction sa = { .sa_handler = hang_handler };
>> +       struct itimerval itv = { };
>> +
>> +       debugfs_dir = igt_debugfs_dir(fd);
>> +       igt_assert_fd(debugfs_dir);
>> +
>> +       igt_assert_eq(sigaction(SIGALRM, &sa, NULL), 0);
>> +
>> +       itv.it_value.tv_sec = us / 1000000;
> 
> USEC_PER_SEC.
> 
>> +       itv.it_value.tv_usec = us % 1000000;
>> +       setitimer(ITIMER_REAL, &itv, NULL);
> 
> Ok, that gives a single shot signal.
> 
> I would have used
> struct sigevent sev = {
> 	.sigev_notify = SIGEV_THREAD,
> 	.sigev_value.sigval_int = debugfs_dir
> 	.sigev_notify_function = hang_handler
> };
> timer_create(CLOCK_MONOTONIC, &sec, &timer);
> timer_settime(timer, 0, &its, NULL);
> 
> Then
> 
> static void hang_handler(union sigval arg)
> {
> 	igt_sysfs_set(arg.sival_int, "i915_wedged", 1);
> }
> 
> No signals, nor globals required :)

I wasn't familiar with this facility.

It creates a new thread, so any hopes for small microsecond delays might 
be ruined. I can try it if you think it is still worth it?

> The problem with using a signal is that it interrupts the gem_wait()
> and so we don't actually check that it is being woken by the hang
> because it is already awake. Gah.

Hm... if I am following correctly, we end up with -ERESTARTSYS and the 
the ioctl can get restarted for us, if I would set SA_RESTART.

At the moment it happens to work because drmIoctl restart the signal.

>> +static void cleanup_hang(void)
>> +{
>> +       struct itimerval itv = { };
>> +
>> +       igt_assert_eq(setitimer(ITIMER_REAL, &itv, NULL), 0);
> 
> You also need a sleep here as it does not flush inflight signals.
> (Also timer_destroy :)

I always pass a longer timeout to gem_wait than the signal so I think it 
should be guaranteed that the signal had fired before gem_wait will be 
exiting.

Regards,

Tvrtko

> 
>> +       igt_assert_fd(debugfs_dir);
>> +       close(debugfs_dir);
>> +       debugfs_dir = -1;
>> +}
>> +
>> +static int __check_wait(int fd, uint32_t bo, unsigned int wait)
>> +{
>> +       unsigned long wait_timeout = 250e6; /* Some margin for actual reset. */
>> +       int ret;
>> +
>> +       if (wait) {
>> +               wait_timeout += wait * 2000; /* x2 for safety. */
>> +               wait_timeout += 250e6; /* Margin for signal delay. */;
>> +               hang_after(fd, wait);
>> +       } else {
>> +               manual_hang(fd);
>> +       }
>> +
>> +       ret = __gem_wait(fd, bo, wait_timeout);
> 
> Ok, I understand where the concerned about how long it took to recover
> from reset came from :)
> 
>> +
>> +       if (wait)
>> +               cleanup_hang();
>> +
>> +       return ret;
>> +}
>> +
>> +#define TEST_WEDGE (1)
>> +
>> +static void test_wait(int fd, unsigned int flags, unsigned int wait)
>>   {
>> -       igt_hang_t hang;
>> +       igt_spin_t *hang;
>>   
>>          igt_require_gem(fd);
>>   
>> -       /* If the request we wait on completes due to a hang (even for
>> +       /*
>> +        * If the request we wait on completes due to a hang (even for
>>           * that request), the user expects the return value to 0 (success).
>>           */
>> -       hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
>> -       igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
>> -       igt_post_hang_ring(fd, hang);
>>   
>> -       /* If the GPU is wedged during the wait, again we expect the return
>> -        * value to be 0 (success).
>> -        */
>> -       igt_require(i915_reset_control(false));
>> -       hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
>> -       igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
>> -       igt_post_hang_ring(fd, hang);
>> +       if (flags & TEST_WEDGE)
>> +               igt_require(i915_reset_control(false));
>> +       else
>> +               igt_require(i915_reset_control(true));
>> +
>> +       hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
>> +
>> +       igt_assert_eq(__check_wait(fd, hang->handle, wait), 0);
>> +
>> +       igt_spin_batch_free(fd, hang);
>> +
>>          igt_require(i915_reset_control(true));
>>   
>>          trigger_reset(fd);
>> @@ -181,7 +264,7 @@ static void test_suspend(int fd, int state)
>>   
>>          /* Check we can suspend when the driver is already wedged */
>>          igt_require(i915_reset_control(false));
>> -       wedgeme(fd);
>> +       manual_hang(fd);
>>   
>>          igt_system_suspend_autoresume(state, SUSPEND_TEST_DEVICES);
>>   
>> @@ -189,7 +272,7 @@ static void test_suspend(int fd, int state)
>>          trigger_reset(fd);
>>   }
>>   
>> -static void test_inflight(int fd)
>> +static void test_inflight(int fd, unsigned int wait)
>>   {
>>          const uint32_t bbe = MI_BATCH_BUFFER_END;
>>          struct drm_i915_gem_exec_object2 obj[2];
>> @@ -209,11 +292,10 @@ static void test_inflight(int fd)
>>                  int fence[64]; /* conservative estimate of ring size */
>>   
>>                  gem_quiescent_gpu(fd);
>> -
>>                  igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
>>                  igt_require(i915_reset_control(false));
>>   
>> -               hang = __igt_spin_batch_new(fd, 0, engine, 0);
>> +               hang = spin_sync(fd, 0, engine);
>>                  obj[0].handle = hang->handle;
>>   
>>                  memset(&execbuf, 0, sizeof(execbuf));
>> @@ -227,7 +309,8 @@ static void test_inflight(int fd)
>>                          igt_assert(fence[n] != -1);
>>                  }
>>   
>> -               igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
>> +               igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
>> +
>>                  for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>>                          igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>>                          close(fence[n]);
>> @@ -256,7 +339,7 @@ static void test_inflight_suspend(int fd)
>>          obj[1].handle = gem_create(fd, 4096);
>>          gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
>>   
>> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
>> +       hang = spin_sync(fd, 0, 0);
>>          obj[0].handle = hang->handle;
>>   
>>          memset(&execbuf, 0, sizeof(execbuf));
>> @@ -273,7 +356,8 @@ static void test_inflight_suspend(int fd)
>>          igt_set_autoresume_delay(30);
>>          igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE);
>>   
>> -       igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
>> +       igt_assert_eq(__check_wait(fd, obj[1].handle, 10), 0);
>> +
>>          for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>>                  igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>>                  close(fence[n]);
>> @@ -301,7 +385,7 @@ static uint32_t context_create_safe(int i915)
>>          return param.ctx_id;
>>   }
>>   
>> -static void test_inflight_contexts(int fd)
>> +static void test_inflight_contexts(int fd, unsigned int wait)
>>   {
>>          struct drm_i915_gem_exec_object2 obj[2];
>>          const uint32_t bbe = MI_BATCH_BUFFER_END;
>> @@ -330,7 +414,7 @@ static void test_inflight_contexts(int fd)
>>                  igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
>>                  igt_require(i915_reset_control(false));
>>   
>> -               hang = __igt_spin_batch_new(fd, 0, engine, 0);
>> +               hang = spin_sync(fd, 0, engine);
>>                  obj[0].handle = hang->handle;
>>   
>>                  memset(&execbuf, 0, sizeof(execbuf));
>> @@ -345,7 +429,8 @@ static void test_inflight_contexts(int fd)
>>                          igt_assert(fence[n] != -1);
>>                  }
>>   
>> -               igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
>> +               igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
>> +
>>                  for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
>>                          igt_assert_eq(sync_fence_status(fence[n]), -EIO);
>>                          close(fence[n]);
>> @@ -375,7 +460,7 @@ static void test_inflight_external(int fd)
>>          fence = igt_cork_plug(&cork, fd);
>>   
>>          igt_require(i915_reset_control(false));
>> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
>> +       hang = __spin_poll(fd, 0, 0);
>>   
>>          memset(&obj, 0, sizeof(obj));
>>          obj.handle = gem_create(fd, 4096);
>> @@ -393,6 +478,9 @@ static void test_inflight_external(int fd)
>>          fence = execbuf.rsvd2 >> 32;
>>          igt_assert(fence != -1);
>>   
>> +       __spin_wait(fd, hang);
>> +       manual_hang(fd);
>> +
>>          gem_sync(fd, hang->handle); /* wedged, with an unready batch */
>>          igt_assert(!gem_bo_busy(fd, hang->handle));
>>          igt_assert(gem_bo_busy(fd, obj.handle));
>> @@ -407,7 +495,7 @@ static void test_inflight_external(int fd)
>>          trigger_reset(fd);
>>   }
>>   
>> -static void test_inflight_internal(int fd)
>> +static void test_inflight_internal(int fd, unsigned int wait)
>>   {
>>          struct drm_i915_gem_execbuffer2 execbuf;
>>          struct drm_i915_gem_exec_object2 obj[2];
>> @@ -420,7 +508,7 @@ static void test_inflight_internal(int fd)
>>          igt_require(gem_has_exec_fence(fd));
>>   
>>          igt_require(i915_reset_control(false));
>> -       hang = __igt_spin_batch_new(fd, 0, 0, 0);
>> +       hang = spin_sync(fd, 0, 0);
>>   
>>          memset(obj, 0, sizeof(obj));
>>          obj[0].handle = hang->handle;
>> @@ -441,7 +529,8 @@ static void test_inflight_internal(int fd)
>>                  nfence++;
>>          }
>>   
>> -       igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
>> +       igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
>> +
>>          while (nfence--) {
>>                  igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
>>                  close(fences[nfence]);
>> @@ -484,29 +573,46 @@ igt_main
>>          igt_subtest("execbuf")
>>                  test_execbuf(fd);
>>   
>> -       igt_subtest("wait")
>> -               test_wait(fd);
>> -
>>          igt_subtest("suspend")
>>                  test_suspend(fd, SUSPEND_STATE_MEM);
>>   
>>          igt_subtest("hibernate")
>>                  test_suspend(fd, SUSPEND_STATE_DISK);
>>   
>> -       igt_subtest("in-flight")
>> -               test_inflight(fd);
>> -
>> -       igt_subtest("in-flight-contexts")
>> -               test_inflight_contexts(fd);
>> -
>>          igt_subtest("in-flight-external")
>>                  test_inflight_external(fd);
>>   
>> -       igt_subtest("in-flight-internal") {
>> -               igt_skip_on(gem_has_semaphores(fd));
>> -               test_inflight_internal(fd);
>> -       }
>> -
>>          igt_subtest("in-flight-suspend")
>>                  test_inflight_suspend(fd);
>> +
>> +       igt_subtest_group {
>> +               const struct {
>> +                       unsigned int wait;
>> +                       const char *name;
>> +               } waits[] = {
>> +                       { .wait = 0, .name = "immediate" },
>> +                       { .wait = 10, .name = "10us" },
> 
> i915_request_spin is set to 2us currently :| I guess that's a really hard
> window to hit reliably. Maybe we should spin for 200ms just to make
> testing easier!
> 
>> +                       { .wait = 10000, .name = "10ms" },
>> +               };
>> +               unsigned int i;
>> +
>> +               for (i = 0; i < sizeof(waits) / sizeof(waits[0]); i++) {
>> +                       igt_subtest_f("wait-%s", waits[i].name)
>> +                               test_wait(fd, 0, waits[i].wait);
>> +
>> +                       igt_subtest_f("wait-wedge-%s", waits[i].name)
>> +                               test_wait(fd, TEST_WEDGE, waits[i].wait);
> 
> Ok.
> 
>> +
>> +                       igt_subtest_f("in-flight-%s", waits[i].name)
>> +                               test_inflight(fd, waits[i].wait);
>> +
>> +                       igt_subtest_f("in-flight-contexts-%s", waits[i].name)
>> +                               test_inflight_contexts(fd, waits[i].wait);
>> +
>> +                       igt_subtest_f("in-flight-internal-%s", waits[i].name) {
>> +                               igt_skip_on(gem_has_semaphores(fd));
>> +                               test_inflight_internal(fd, waits[i].wait);
> 
> And ok.
> -Chris
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [igt-dev] [PATCH i-g-t 2/3] tests/gem_eio: Speed up test execution
  2018-03-23  9:46       ` [igt-dev] [Intel-gfx] " Tvrtko Ursulin
@ 2018-03-23  9:54         ` Chris Wilson
  -1 siblings, 0 replies; 21+ messages in thread
From: Chris Wilson @ 2018-03-23  9:54 UTC (permalink / raw)
  To: Tvrtko Ursulin, Tvrtko Ursulin, igt-dev; +Cc: Intel-gfx

Quoting Tvrtko Ursulin (2018-03-23 09:46:49)
> 
> On 22/03/2018 17:44, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2018-03-22 17:24:16)
> >> +       itv.it_value.tv_usec = us % 1000000;
> >> +       setitimer(ITIMER_REAL, &itv, NULL);
> > 
> > Ok, that gives a single shot signal.
> > 
> > I would have used
> > struct sigevent sev = {
> >       .sigev_notify = SIGEV_THREAD,
> >       .sigev_value.sigval_int = debugfs_dir
> >       .sigev_notify_function = hang_handler
> > };
> > timer_create(CLOCK_MONOTONIC, &sec, &timer);
> > timer_settime(timer, 0, &its, NULL);
> > 
> > Then
> > 
> > static void hang_handler(union sigval arg)
> > {
> >       igt_sysfs_set(arg.sival_int, "i915_wedged", 1);
> > }
> > 
> > No signals, nor globals required :)
> 
> I wasn't familiar with this facility.
> 
> It creates a new thread, so any hopes for small microsecond delays might 
> be ruined. I can try it if you think it is still worth it?

Yes, still worth a shot. We can always poke glibc for some smarts. Or
chase it ourselves with custom threading. We need an igt_debug() to
actually tell us how long it took to respond (from spin start to
gem_wait return).

> > The problem with using a signal is that it interrupts the gem_wait()
> > and so we don't actually check that it is being woken by the hang
> > because it is already awake. Gah.
> 
> Hm... if I am following correctly, we end up with -ERESTARTSYS and the 
> the ioctl can get restarted for us, if I would set SA_RESTART.
> 
> At the moment it happens to work because drmIoctl restart the signal.

Yes. But even automatic -ERESTARTSYS handling still implies we break
i915_request_wait() due to signal_pending_state().

> >> +static void cleanup_hang(void)
> >> +{
> >> +       struct itimerval itv = { };
> >> +
> >> +       igt_assert_eq(setitimer(ITIMER_REAL, &itv, NULL), 0);
> > 
> > You also need a sleep here as it does not flush inflight signals.
> > (Also timer_destroy :)
> 
> I always pass a longer timeout to gem_wait than the signal so I think it 
> should be guaranteed that the signal had fired before gem_wait will be 
> exiting.

I hadn't considered that. Please leave a comment as the timer
cancellation leaving pending signals inflight is something that keeps
catching me out.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t 2/3] tests/gem_eio: Speed up test execution
@ 2018-03-23  9:54         ` Chris Wilson
  0 siblings, 0 replies; 21+ messages in thread
From: Chris Wilson @ 2018-03-23  9:54 UTC (permalink / raw)
  To: Tvrtko Ursulin, Tvrtko Ursulin, igt-dev; +Cc: Intel-gfx

Quoting Tvrtko Ursulin (2018-03-23 09:46:49)
> 
> On 22/03/2018 17:44, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2018-03-22 17:24:16)
> >> +       itv.it_value.tv_usec = us % 1000000;
> >> +       setitimer(ITIMER_REAL, &itv, NULL);
> > 
> > Ok, that gives a single shot signal.
> > 
> > I would have used
> > struct sigevent sev = {
> >       .sigev_notify = SIGEV_THREAD,
> >       .sigev_value.sigval_int = debugfs_dir
> >       .sigev_notify_function = hang_handler
> > };
> > timer_create(CLOCK_MONOTONIC, &sec, &timer);
> > timer_settime(timer, 0, &its, NULL);
> > 
> > Then
> > 
> > static void hang_handler(union sigval arg)
> > {
> >       igt_sysfs_set(arg.sival_int, "i915_wedged", 1);
> > }
> > 
> > No signals, nor globals required :)
> 
> I wasn't familiar with this facility.
> 
> It creates a new thread, so any hopes for small microsecond delays might 
> be ruined. I can try it if you think it is still worth it?

Yes, still worth a shot. We can always poke glibc for some smarts. Or
chase it ourselves with custom threading. We need an igt_debug() to
actually tell us how long it took to respond (from spin start to
gem_wait return).

> > The problem with using a signal is that it interrupts the gem_wait()
> > and so we don't actually check that it is being woken by the hang
> > because it is already awake. Gah.
> 
> Hm... if I am following correctly, we end up with -ERESTARTSYS and the 
> the ioctl can get restarted for us, if I would set SA_RESTART.
> 
> At the moment it happens to work because drmIoctl restart the signal.

Yes. But even automatic -ERESTARTSYS handling still implies we break
i915_request_wait() due to signal_pending_state().

> >> +static void cleanup_hang(void)
> >> +{
> >> +       struct itimerval itv = { };
> >> +
> >> +       igt_assert_eq(setitimer(ITIMER_REAL, &itv, NULL), 0);
> > 
> > You also need a sleep here as it does not flush inflight signals.
> > (Also timer_destroy :)
> 
> I always pass a longer timeout to gem_wait than the signal so I think it 
> should be guaranteed that the signal had fired before gem_wait will be 
> exiting.

I hadn't considered that. Please leave a comment as the timer
cancellation leaving pending signals inflight is something that keeps
catching me out.
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH i-g-t v3 2/3] tests/gem_eio: Speed up test execution
  2018-03-23  9:54         ` [igt-dev] [Intel-gfx] " Chris Wilson
@ 2018-03-23 11:54           ` Tvrtko Ursulin
  -1 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-23 11:54 UTC (permalink / raw)
  To: igt-dev; +Cc: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

If we stop relying on regular GPU hangs to be detected, but trigger them
manually as soon as we know our batch of interest is actually executing
on the GPU, we can dramatically speed up various subtests.

This is enabled by the pollable spin batch added in the previous patch.

v2:
 * Test gem_wait after reset/wedge and with reset/wedge after a few
   predefined intervals since gem_wait invocation. (Chris Wilson)

v3:
 Chris Wilson:
 * Decrease short test to 1us.
 * Use POSIX timers instead of signals to avoid interrupting gem_wait.
 * Improve comment.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
---
 tests/Makefile.am |   1 +
 tests/gem_eio.c   | 235 +++++++++++++++++++++++++++++++++++++++++-------------
 tests/meson.build |   8 +-
 3 files changed, 189 insertions(+), 55 deletions(-)

diff --git a/tests/Makefile.am b/tests/Makefile.am
index dbc7be722eb9..f41ad5096349 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -130,6 +130,7 @@ gem_userptr_blits_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
 gem_userptr_blits_LDADD = $(LDADD) -lpthread
 perf_pmu_LDADD = $(LDADD) $(top_builddir)/lib/libigt_perf.la
 
+gem_eio_LDADD = $(LDADD) -lrt
 gem_wait_LDADD = $(LDADD) -lrt
 kms_flip_LDADD = $(LDADD) -lrt -lpthread
 pm_rc6_residency_LDADD = $(LDADD) -lrt
diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 4bcc5937db39..b824d9d4c9c0 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -35,6 +35,8 @@
 #include <inttypes.h>
 #include <errno.h>
 #include <sys/ioctl.h>
+#include <signal.h>
+#include <time.h>
 
 #include <drm.h>
 
@@ -62,6 +64,10 @@ static bool i915_reset_control(bool enable)
 
 static void trigger_reset(int fd)
 {
+	struct timespec ts = { };
+
+	igt_nsec_elapsed(&ts);
+
 	igt_force_gpu_reset(fd);
 
 	/* And just check the gpu is indeed running again */
@@ -69,22 +75,12 @@ static void trigger_reset(int fd)
 	gem_test_engine(fd, ALL_ENGINES);
 
 	gem_quiescent_gpu(fd);
-}
-
-static void wedge_gpu(int fd)
-{
-	/* First idle the GPU then disable GPU resets before injecting a hang */
-	gem_quiescent_gpu(fd);
-
-	igt_require(i915_reset_control(false));
-
-	igt_debug("Wedging GPU by injecting hang\n");
-	igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));
 
-	igt_assert(i915_reset_control(true));
+	/* We expect forced reset and health check to be quick. */
+	igt_assert(igt_seconds_elapsed(&ts) < 2);
 }
 
-static void wedgeme(int drm_fd)
+static void manual_hang(int drm_fd)
 {
 	int dir = igt_debugfs_dir(drm_fd);
 
@@ -93,6 +89,16 @@ static void wedgeme(int drm_fd)
 	close(dir);
 }
 
+static void wedge_gpu(int fd)
+{
+	/* First idle the GPU then disable GPU resets before injecting a hang */
+	gem_quiescent_gpu(fd);
+
+	igt_require(i915_reset_control(false));
+	manual_hang(fd);
+	igt_assert(i915_reset_control(true));
+}
+
 static int __gem_throttle(int fd)
 {
 	int err = 0;
@@ -149,26 +155,124 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
 	return err;
 }
 
-static void test_wait(int fd)
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
 {
-	igt_hang_t hang;
+	if (gem_can_store_dword(fd, flags))
+		return __igt_spin_batch_new_poll(fd, ctx, flags);
+	else
+		return __igt_spin_batch_new(fd, ctx, flags, 0);
+}
+
+static void __spin_wait(int fd, igt_spin_t *spin)
+{
+	if (spin->running) {
+		igt_spin_busywait_until_running(spin);
+	} else {
+		igt_debug("__spin_wait - usleep mode\n");
+		usleep(500e3); /* Better than nothing! */
+	}
+}
+
+static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
+{
+	igt_spin_t *spin = __spin_poll(fd, ctx, flags);
+
+	__spin_wait(fd, spin);
+
+	return spin;
+}
+
+struct hang_ctx {
+	int debugfs;
+	struct timespec ts;
+	timer_t timer;
+};
+
+static void hang_handler(union sigval arg)
+{
+	struct hang_ctx *ctx = arg.sival_ptr;
+
+	igt_debug("hang delay = %.2fus\n", igt_nsec_elapsed(&ctx->ts) / 1000.0);
+
+	igt_assert(igt_sysfs_set(ctx->debugfs, "i915_wedged", "-1"));
+
+	igt_assert_eq(timer_delete(ctx->timer), 0);
+	close(ctx->debugfs);
+	free(ctx);
+}
+
+static void hang_after(int fd, unsigned int us)
+{
+	struct sigevent sev = {
+		.sigev_notify = SIGEV_THREAD,
+		.sigev_notify_function = hang_handler
+	};
+	struct itimerspec its = {
+		.it_value.tv_sec = us / USEC_PER_SEC,
+		.it_value.tv_nsec = us % USEC_PER_SEC * 1000,
+	};
+	struct hang_ctx *ctx;
+
+	ctx = calloc(1, sizeof(*ctx));
+	igt_assert(ctx);
+
+	ctx->debugfs = igt_debugfs_dir(fd);
+	igt_assert_fd(ctx->debugfs);
+
+	sev.sigev_value.sival_ptr = ctx;
+
+	igt_assert_eq(timer_create(CLOCK_MONOTONIC, &sev, &ctx->timer), 0);
+
+	igt_nsec_elapsed(&ctx->ts);
+
+	igt_assert_eq(timer_settime(ctx->timer, 0, &its, NULL), 0);
+}
+
+static int __check_wait(int fd, uint32_t bo, unsigned int wait)
+{
+	unsigned long wait_timeout = 250e6; /* Some margin for actual reset. */
+	int ret;
+
+	if (wait) {
+		/*
+		 * Double the wait plus some fixed margin to ensure gem_wait
+		 * can never time out before the async hang runs.
+		 */
+		wait_timeout += wait * 2000 + 250e6;
+		hang_after(fd, wait);
+	} else {
+		manual_hang(fd);
+	}
+
+	ret = __gem_wait(fd, bo, wait_timeout);
+
+	return ret;
+}
+
+#define TEST_WEDGE (1)
+
+static void test_wait(int fd, unsigned int flags, unsigned int wait)
+{
+	igt_spin_t *hang;
 
 	igt_require_gem(fd);
 
-	/* If the request we wait on completes due to a hang (even for
+	/*
+	 * If the request we wait on completes due to a hang (even for
 	 * that request), the user expects the return value to 0 (success).
 	 */
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
-	igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
-	igt_post_hang_ring(fd, hang);
 
-	/* If the GPU is wedged during the wait, again we expect the return
-	 * value to be 0 (success).
-	 */
-	igt_require(i915_reset_control(false));
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
-	igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
-	igt_post_hang_ring(fd, hang);
+	if (flags & TEST_WEDGE)
+		igt_require(i915_reset_control(false));
+	else
+		igt_require(i915_reset_control(true));
+
+	hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
+
+	igt_assert_eq(__check_wait(fd, hang->handle, wait), 0);
+
+	igt_spin_batch_free(fd, hang);
+
 	igt_require(i915_reset_control(true));
 
 	trigger_reset(fd);
@@ -181,7 +285,7 @@ static void test_suspend(int fd, int state)
 
 	/* Check we can suspend when the driver is already wedged */
 	igt_require(i915_reset_control(false));
-	wedgeme(fd);
+	manual_hang(fd);
 
 	igt_system_suspend_autoresume(state, SUSPEND_TEST_DEVICES);
 
@@ -189,7 +293,7 @@ static void test_suspend(int fd, int state)
 	trigger_reset(fd);
 }
 
-static void test_inflight(int fd)
+static void test_inflight(int fd, unsigned int wait)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	struct drm_i915_gem_exec_object2 obj[2];
@@ -209,11 +313,10 @@ static void test_inflight(int fd)
 		int fence[64]; /* conservative estimate of ring size */
 
 		gem_quiescent_gpu(fd);
-
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		hang = spin_sync(fd, 0, engine);
 		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
@@ -227,7 +330,8 @@ static void test_inflight(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+		igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
@@ -256,7 +360,7 @@ static void test_inflight_suspend(int fd)
 	obj[1].handle = gem_create(fd, 4096);
 	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
 
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = spin_sync(fd, 0, 0);
 	obj[0].handle = hang->handle;
 
 	memset(&execbuf, 0, sizeof(execbuf));
@@ -273,7 +377,8 @@ static void test_inflight_suspend(int fd)
 	igt_set_autoresume_delay(30);
 	igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE);
 
-	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	igt_assert_eq(__check_wait(fd, obj[1].handle, 10), 0);
+
 	for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 		igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 		close(fence[n]);
@@ -301,7 +406,7 @@ static uint32_t context_create_safe(int i915)
 	return param.ctx_id;
 }
 
-static void test_inflight_contexts(int fd)
+static void test_inflight_contexts(int fd, unsigned int wait)
 {
 	struct drm_i915_gem_exec_object2 obj[2];
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -330,7 +435,7 @@ static void test_inflight_contexts(int fd)
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		hang = spin_sync(fd, 0, engine);
 		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
@@ -345,7 +450,8 @@ static void test_inflight_contexts(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+		igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
@@ -375,7 +481,7 @@ static void test_inflight_external(int fd)
 	fence = igt_cork_plug(&cork, fd);
 
 	igt_require(i915_reset_control(false));
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = __spin_poll(fd, 0, 0);
 
 	memset(&obj, 0, sizeof(obj));
 	obj.handle = gem_create(fd, 4096);
@@ -393,6 +499,9 @@ static void test_inflight_external(int fd)
 	fence = execbuf.rsvd2 >> 32;
 	igt_assert(fence != -1);
 
+	__spin_wait(fd, hang);
+	manual_hang(fd);
+
 	gem_sync(fd, hang->handle); /* wedged, with an unready batch */
 	igt_assert(!gem_bo_busy(fd, hang->handle));
 	igt_assert(gem_bo_busy(fd, obj.handle));
@@ -407,7 +516,7 @@ static void test_inflight_external(int fd)
 	trigger_reset(fd);
 }
 
-static void test_inflight_internal(int fd)
+static void test_inflight_internal(int fd, unsigned int wait)
 {
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct drm_i915_gem_exec_object2 obj[2];
@@ -420,7 +529,7 @@ static void test_inflight_internal(int fd)
 	igt_require(gem_has_exec_fence(fd));
 
 	igt_require(i915_reset_control(false));
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = spin_sync(fd, 0, 0);
 
 	memset(obj, 0, sizeof(obj));
 	obj[0].handle = hang->handle;
@@ -441,7 +550,8 @@ static void test_inflight_internal(int fd)
 		nfence++;
 	}
 
-	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 	while (nfence--) {
 		igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
 		close(fences[nfence]);
@@ -484,29 +594,46 @@ igt_main
 	igt_subtest("execbuf")
 		test_execbuf(fd);
 
-	igt_subtest("wait")
-		test_wait(fd);
-
 	igt_subtest("suspend")
 		test_suspend(fd, SUSPEND_STATE_MEM);
 
 	igt_subtest("hibernate")
 		test_suspend(fd, SUSPEND_STATE_DISK);
 
-	igt_subtest("in-flight")
-		test_inflight(fd);
-
-	igt_subtest("in-flight-contexts")
-		test_inflight_contexts(fd);
-
 	igt_subtest("in-flight-external")
 		test_inflight_external(fd);
 
-	igt_subtest("in-flight-internal") {
-		igt_skip_on(gem_has_semaphores(fd));
-		test_inflight_internal(fd);
-	}
-
 	igt_subtest("in-flight-suspend")
 		test_inflight_suspend(fd);
+
+	igt_subtest_group {
+		const struct {
+			unsigned int wait;
+			const char *name;
+		} waits[] = {
+			{ .wait = 0, .name = "immediate" },
+			{ .wait = 1, .name = "1us" },
+			{ .wait = 10000, .name = "10ms" },
+		};
+		unsigned int i;
+
+		for (i = 0; i < sizeof(waits) / sizeof(waits[0]); i++) {
+			igt_subtest_f("wait-%s", waits[i].name)
+				test_wait(fd, 0, waits[i].wait);
+
+			igt_subtest_f("wait-wedge-%s", waits[i].name)
+				test_wait(fd, TEST_WEDGE, waits[i].wait);
+
+			igt_subtest_f("in-flight-%s", waits[i].name)
+				test_inflight(fd, waits[i].wait);
+
+			igt_subtest_f("in-flight-contexts-%s", waits[i].name)
+				test_inflight_contexts(fd, waits[i].wait);
+
+			igt_subtest_f("in-flight-internal-%s", waits[i].name) {
+				igt_skip_on(gem_has_semaphores(fd));
+				test_inflight_internal(fd, waits[i].wait);
+			}
+		}
+	}
 }
diff --git a/tests/meson.build b/tests/meson.build
index 122aefabe0f0..4720dfe21a66 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -40,7 +40,6 @@ test_progs = [
 	'gem_ctx_switch',
 	'gem_ctx_thrash',
 	'gem_double_irq_loop',
-	'gem_eio',
 	'gem_evict_alignment',
 	'gem_evict_everything',
 	'gem_exec_alignment',
@@ -289,6 +288,13 @@ foreach prog : test_progs
 		   install : true)
 endforeach
 
+test_executables += executable('gem_eio', 'gem_eio.c',
+	   dependencies : test_deps + [ realtime ],
+	   install_dir : libexecdir,
+	   install_rpath : rpathdir,
+	   install : true)
+test_progs += 'gem_eio'
+
 test_executables += executable('perf_pmu', 'perf_pmu.c',
 	   dependencies : test_deps + [ lib_igt_perf ],
 	   install_dir : libexecdir,
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* [Intel-gfx] [PATCH i-g-t v3 2/3] tests/gem_eio: Speed up test execution
@ 2018-03-23 11:54           ` Tvrtko Ursulin
  0 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-23 11:54 UTC (permalink / raw)
  To: igt-dev; +Cc: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

If we stop relying on regular GPU hangs to be detected, but trigger them
manually as soon as we know our batch of interest is actually executing
on the GPU, we can dramatically speed up various subtests.

This is enabled by the pollable spin batch added in the previous patch.

v2:
 * Test gem_wait after reset/wedge and with reset/wedge after a few
   predefined intervals since gem_wait invocation. (Chris Wilson)

v3:
 Chris Wilson:
 * Decrease short test to 1us.
 * Use POSIX timers instead of signals to avoid interrupting gem_wait.
 * Improve comment.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Antonio Argenziano <antonio.argenziano@intel.com>
---
 tests/Makefile.am |   1 +
 tests/gem_eio.c   | 235 +++++++++++++++++++++++++++++++++++++++++-------------
 tests/meson.build |   8 +-
 3 files changed, 189 insertions(+), 55 deletions(-)

diff --git a/tests/Makefile.am b/tests/Makefile.am
index dbc7be722eb9..f41ad5096349 100644
--- a/tests/Makefile.am
+++ b/tests/Makefile.am
@@ -130,6 +130,7 @@ gem_userptr_blits_CFLAGS = $(AM_CFLAGS) $(THREAD_CFLAGS)
 gem_userptr_blits_LDADD = $(LDADD) -lpthread
 perf_pmu_LDADD = $(LDADD) $(top_builddir)/lib/libigt_perf.la
 
+gem_eio_LDADD = $(LDADD) -lrt
 gem_wait_LDADD = $(LDADD) -lrt
 kms_flip_LDADD = $(LDADD) -lrt -lpthread
 pm_rc6_residency_LDADD = $(LDADD) -lrt
diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 4bcc5937db39..b824d9d4c9c0 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -35,6 +35,8 @@
 #include <inttypes.h>
 #include <errno.h>
 #include <sys/ioctl.h>
+#include <signal.h>
+#include <time.h>
 
 #include <drm.h>
 
@@ -62,6 +64,10 @@ static bool i915_reset_control(bool enable)
 
 static void trigger_reset(int fd)
 {
+	struct timespec ts = { };
+
+	igt_nsec_elapsed(&ts);
+
 	igt_force_gpu_reset(fd);
 
 	/* And just check the gpu is indeed running again */
@@ -69,22 +75,12 @@ static void trigger_reset(int fd)
 	gem_test_engine(fd, ALL_ENGINES);
 
 	gem_quiescent_gpu(fd);
-}
-
-static void wedge_gpu(int fd)
-{
-	/* First idle the GPU then disable GPU resets before injecting a hang */
-	gem_quiescent_gpu(fd);
-
-	igt_require(i915_reset_control(false));
-
-	igt_debug("Wedging GPU by injecting hang\n");
-	igt_post_hang_ring(fd, igt_hang_ring(fd, I915_EXEC_DEFAULT));
 
-	igt_assert(i915_reset_control(true));
+	/* We expect forced reset and health check to be quick. */
+	igt_assert(igt_seconds_elapsed(&ts) < 2);
 }
 
-static void wedgeme(int drm_fd)
+static void manual_hang(int drm_fd)
 {
 	int dir = igt_debugfs_dir(drm_fd);
 
@@ -93,6 +89,16 @@ static void wedgeme(int drm_fd)
 	close(dir);
 }
 
+static void wedge_gpu(int fd)
+{
+	/* First idle the GPU then disable GPU resets before injecting a hang */
+	gem_quiescent_gpu(fd);
+
+	igt_require(i915_reset_control(false));
+	manual_hang(fd);
+	igt_assert(i915_reset_control(true));
+}
+
 static int __gem_throttle(int fd)
 {
 	int err = 0;
@@ -149,26 +155,124 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
 	return err;
 }
 
-static void test_wait(int fd)
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
 {
-	igt_hang_t hang;
+	if (gem_can_store_dword(fd, flags))
+		return __igt_spin_batch_new_poll(fd, ctx, flags);
+	else
+		return __igt_spin_batch_new(fd, ctx, flags, 0);
+}
+
+static void __spin_wait(int fd, igt_spin_t *spin)
+{
+	if (spin->running) {
+		igt_spin_busywait_until_running(spin);
+	} else {
+		igt_debug("__spin_wait - usleep mode\n");
+		usleep(500e3); /* Better than nothing! */
+	}
+}
+
+static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
+{
+	igt_spin_t *spin = __spin_poll(fd, ctx, flags);
+
+	__spin_wait(fd, spin);
+
+	return spin;
+}
+
+struct hang_ctx {
+	int debugfs;
+	struct timespec ts;
+	timer_t timer;
+};
+
+static void hang_handler(union sigval arg)
+{
+	struct hang_ctx *ctx = arg.sival_ptr;
+
+	igt_debug("hang delay = %.2fus\n", igt_nsec_elapsed(&ctx->ts) / 1000.0);
+
+	igt_assert(igt_sysfs_set(ctx->debugfs, "i915_wedged", "-1"));
+
+	igt_assert_eq(timer_delete(ctx->timer), 0);
+	close(ctx->debugfs);
+	free(ctx);
+}
+
+static void hang_after(int fd, unsigned int us)
+{
+	struct sigevent sev = {
+		.sigev_notify = SIGEV_THREAD,
+		.sigev_notify_function = hang_handler
+	};
+	struct itimerspec its = {
+		.it_value.tv_sec = us / USEC_PER_SEC,
+		.it_value.tv_nsec = us % USEC_PER_SEC * 1000,
+	};
+	struct hang_ctx *ctx;
+
+	ctx = calloc(1, sizeof(*ctx));
+	igt_assert(ctx);
+
+	ctx->debugfs = igt_debugfs_dir(fd);
+	igt_assert_fd(ctx->debugfs);
+
+	sev.sigev_value.sival_ptr = ctx;
+
+	igt_assert_eq(timer_create(CLOCK_MONOTONIC, &sev, &ctx->timer), 0);
+
+	igt_nsec_elapsed(&ctx->ts);
+
+	igt_assert_eq(timer_settime(ctx->timer, 0, &its, NULL), 0);
+}
+
+static int __check_wait(int fd, uint32_t bo, unsigned int wait)
+{
+	unsigned long wait_timeout = 250e6; /* Some margin for actual reset. */
+	int ret;
+
+	if (wait) {
+		/*
+		 * Double the wait plus some fixed margin to ensure gem_wait
+		 * can never time out before the async hang runs.
+		 */
+		wait_timeout += wait * 2000 + 250e6;
+		hang_after(fd, wait);
+	} else {
+		manual_hang(fd);
+	}
+
+	ret = __gem_wait(fd, bo, wait_timeout);
+
+	return ret;
+}
+
+#define TEST_WEDGE (1)
+
+static void test_wait(int fd, unsigned int flags, unsigned int wait)
+{
+	igt_spin_t *hang;
 
 	igt_require_gem(fd);
 
-	/* If the request we wait on completes due to a hang (even for
+	/*
+	 * If the request we wait on completes due to a hang (even for
 	 * that request), the user expects the return value to 0 (success).
 	 */
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
-	igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
-	igt_post_hang_ring(fd, hang);
 
-	/* If the GPU is wedged during the wait, again we expect the return
-	 * value to be 0 (success).
-	 */
-	igt_require(i915_reset_control(false));
-	hang = igt_hang_ring(fd, I915_EXEC_DEFAULT);
-	igt_assert_eq(__gem_wait(fd, hang.handle, -1), 0);
-	igt_post_hang_ring(fd, hang);
+	if (flags & TEST_WEDGE)
+		igt_require(i915_reset_control(false));
+	else
+		igt_require(i915_reset_control(true));
+
+	hang = spin_sync(fd, 0, I915_EXEC_DEFAULT);
+
+	igt_assert_eq(__check_wait(fd, hang->handle, wait), 0);
+
+	igt_spin_batch_free(fd, hang);
+
 	igt_require(i915_reset_control(true));
 
 	trigger_reset(fd);
@@ -181,7 +285,7 @@ static void test_suspend(int fd, int state)
 
 	/* Check we can suspend when the driver is already wedged */
 	igt_require(i915_reset_control(false));
-	wedgeme(fd);
+	manual_hang(fd);
 
 	igt_system_suspend_autoresume(state, SUSPEND_TEST_DEVICES);
 
@@ -189,7 +293,7 @@ static void test_suspend(int fd, int state)
 	trigger_reset(fd);
 }
 
-static void test_inflight(int fd)
+static void test_inflight(int fd, unsigned int wait)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	struct drm_i915_gem_exec_object2 obj[2];
@@ -209,11 +313,10 @@ static void test_inflight(int fd)
 		int fence[64]; /* conservative estimate of ring size */
 
 		gem_quiescent_gpu(fd);
-
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		hang = spin_sync(fd, 0, engine);
 		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
@@ -227,7 +330,8 @@ static void test_inflight(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+		igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
@@ -256,7 +360,7 @@ static void test_inflight_suspend(int fd)
 	obj[1].handle = gem_create(fd, 4096);
 	gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
 
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = spin_sync(fd, 0, 0);
 	obj[0].handle = hang->handle;
 
 	memset(&execbuf, 0, sizeof(execbuf));
@@ -273,7 +377,8 @@ static void test_inflight_suspend(int fd)
 	igt_set_autoresume_delay(30);
 	igt_system_suspend_autoresume(SUSPEND_STATE_MEM, SUSPEND_TEST_NONE);
 
-	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	igt_assert_eq(__check_wait(fd, obj[1].handle, 10), 0);
+
 	for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 		igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 		close(fence[n]);
@@ -301,7 +406,7 @@ static uint32_t context_create_safe(int i915)
 	return param.ctx_id;
 }
 
-static void test_inflight_contexts(int fd)
+static void test_inflight_contexts(int fd, unsigned int wait)
 {
 	struct drm_i915_gem_exec_object2 obj[2];
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -330,7 +435,7 @@ static void test_inflight_contexts(int fd)
 		igt_debug("Starting %s on engine '%s'\n", __func__, e__->name);
 		igt_require(i915_reset_control(false));
 
-		hang = __igt_spin_batch_new(fd, 0, engine, 0);
+		hang = spin_sync(fd, 0, engine);
 		obj[0].handle = hang->handle;
 
 		memset(&execbuf, 0, sizeof(execbuf));
@@ -345,7 +450,8 @@ static void test_inflight_contexts(int fd)
 			igt_assert(fence[n] != -1);
 		}
 
-		igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+		igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 		for (unsigned int n = 0; n < ARRAY_SIZE(fence); n++) {
 			igt_assert_eq(sync_fence_status(fence[n]), -EIO);
 			close(fence[n]);
@@ -375,7 +481,7 @@ static void test_inflight_external(int fd)
 	fence = igt_cork_plug(&cork, fd);
 
 	igt_require(i915_reset_control(false));
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = __spin_poll(fd, 0, 0);
 
 	memset(&obj, 0, sizeof(obj));
 	obj.handle = gem_create(fd, 4096);
@@ -393,6 +499,9 @@ static void test_inflight_external(int fd)
 	fence = execbuf.rsvd2 >> 32;
 	igt_assert(fence != -1);
 
+	__spin_wait(fd, hang);
+	manual_hang(fd);
+
 	gem_sync(fd, hang->handle); /* wedged, with an unready batch */
 	igt_assert(!gem_bo_busy(fd, hang->handle));
 	igt_assert(gem_bo_busy(fd, obj.handle));
@@ -407,7 +516,7 @@ static void test_inflight_external(int fd)
 	trigger_reset(fd);
 }
 
-static void test_inflight_internal(int fd)
+static void test_inflight_internal(int fd, unsigned int wait)
 {
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct drm_i915_gem_exec_object2 obj[2];
@@ -420,7 +529,7 @@ static void test_inflight_internal(int fd)
 	igt_require(gem_has_exec_fence(fd));
 
 	igt_require(i915_reset_control(false));
-	hang = __igt_spin_batch_new(fd, 0, 0, 0);
+	hang = spin_sync(fd, 0, 0);
 
 	memset(obj, 0, sizeof(obj));
 	obj[0].handle = hang->handle;
@@ -441,7 +550,8 @@ static void test_inflight_internal(int fd)
 		nfence++;
 	}
 
-	igt_assert_eq(__gem_wait(fd, obj[1].handle, -1), 0);
+	igt_assert_eq(__check_wait(fd, obj[1].handle, wait), 0);
+
 	while (nfence--) {
 		igt_assert_eq(sync_fence_status(fences[nfence]), -EIO);
 		close(fences[nfence]);
@@ -484,29 +594,46 @@ igt_main
 	igt_subtest("execbuf")
 		test_execbuf(fd);
 
-	igt_subtest("wait")
-		test_wait(fd);
-
 	igt_subtest("suspend")
 		test_suspend(fd, SUSPEND_STATE_MEM);
 
 	igt_subtest("hibernate")
 		test_suspend(fd, SUSPEND_STATE_DISK);
 
-	igt_subtest("in-flight")
-		test_inflight(fd);
-
-	igt_subtest("in-flight-contexts")
-		test_inflight_contexts(fd);
-
 	igt_subtest("in-flight-external")
 		test_inflight_external(fd);
 
-	igt_subtest("in-flight-internal") {
-		igt_skip_on(gem_has_semaphores(fd));
-		test_inflight_internal(fd);
-	}
-
 	igt_subtest("in-flight-suspend")
 		test_inflight_suspend(fd);
+
+	igt_subtest_group {
+		const struct {
+			unsigned int wait;
+			const char *name;
+		} waits[] = {
+			{ .wait = 0, .name = "immediate" },
+			{ .wait = 1, .name = "1us" },
+			{ .wait = 10000, .name = "10ms" },
+		};
+		unsigned int i;
+
+		for (i = 0; i < sizeof(waits) / sizeof(waits[0]); i++) {
+			igt_subtest_f("wait-%s", waits[i].name)
+				test_wait(fd, 0, waits[i].wait);
+
+			igt_subtest_f("wait-wedge-%s", waits[i].name)
+				test_wait(fd, TEST_WEDGE, waits[i].wait);
+
+			igt_subtest_f("in-flight-%s", waits[i].name)
+				test_inflight(fd, waits[i].wait);
+
+			igt_subtest_f("in-flight-contexts-%s", waits[i].name)
+				test_inflight_contexts(fd, waits[i].wait);
+
+			igt_subtest_f("in-flight-internal-%s", waits[i].name) {
+				igt_skip_on(gem_has_semaphores(fd));
+				test_inflight_internal(fd, waits[i].wait);
+			}
+		}
+	}
 }
diff --git a/tests/meson.build b/tests/meson.build
index 122aefabe0f0..4720dfe21a66 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -40,7 +40,6 @@ test_progs = [
 	'gem_ctx_switch',
 	'gem_ctx_thrash',
 	'gem_double_irq_loop',
-	'gem_eio',
 	'gem_evict_alignment',
 	'gem_evict_everything',
 	'gem_exec_alignment',
@@ -289,6 +288,13 @@ foreach prog : test_progs
 		   install : true)
 endforeach
 
+test_executables += executable('gem_eio', 'gem_eio.c',
+	   dependencies : test_deps + [ realtime ],
+	   install_dir : libexecdir,
+	   install_rpath : rpathdir,
+	   install : true)
+test_progs += 'gem_eio'
+
 test_executables += executable('perf_pmu', 'perf_pmu.c',
 	   dependencies : test_deps + [ lib_igt_perf ],
 	   install_dir : libexecdir,
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

* Re: [PATCH i-g-t v3 2/3] tests/gem_eio: Speed up test execution
  2018-03-23 11:54           ` [Intel-gfx] " Tvrtko Ursulin
@ 2018-03-23 12:05             ` Chris Wilson
  -1 siblings, 0 replies; 21+ messages in thread
From: Chris Wilson @ 2018-03-23 12:05 UTC (permalink / raw)
  To: Tvrtko Ursulin, igt-dev; +Cc: Intel-gfx

Quoting Tvrtko Ursulin (2018-03-23 11:54:28)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> If we stop relying on regular GPU hangs to be detected, but trigger them
> manually as soon as we know our batch of interest is actually executing
> on the GPU, we can dramatically speed up various subtests.
> 
> This is enabled by the pollable spin batch added in the previous patch.
> 
> v2:
>  * Test gem_wait after reset/wedge and with reset/wedge after a few
>    predefined intervals since gem_wait invocation. (Chris Wilson)
> 
> v3:
>  Chris Wilson:
>  * Decrease short test to 1us.
>  * Use POSIX timers instead of signals to avoid interrupting gem_wait.
>  * Improve comment.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Antonio Argenziano <antonio.argenziano@intel.com>

I can't think of anything else that might go wrong, so
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 21+ messages in thread

* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t v3 2/3] tests/gem_eio: Speed up test execution
@ 2018-03-23 12:05             ` Chris Wilson
  0 siblings, 0 replies; 21+ messages in thread
From: Chris Wilson @ 2018-03-23 12:05 UTC (permalink / raw)
  To: Tvrtko Ursulin, igt-dev; +Cc: Intel-gfx

Quoting Tvrtko Ursulin (2018-03-23 11:54:28)
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> If we stop relying on regular GPU hangs to be detected, but trigger them
> manually as soon as we know our batch of interest is actually executing
> on the GPU, we can dramatically speed up various subtests.
> 
> This is enabled by the pollable spin batch added in the previous patch.
> 
> v2:
>  * Test gem_wait after reset/wedge and with reset/wedge after a few
>    predefined intervals since gem_wait invocation. (Chris Wilson)
> 
> v3:
>  Chris Wilson:
>  * Decrease short test to 1us.
>  * Use POSIX timers instead of signals to avoid interrupting gem_wait.
>  * Improve comment.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Antonio Argenziano <antonio.argenziano@intel.com>

I can't think of anything else that might go wrong, so
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch (rev2)
  2018-03-22 17:24 ` [igt-dev] " Tvrtko Ursulin
                   ` (4 preceding siblings ...)
  (?)
@ 2018-03-23 20:04 ` Patchwork
  -1 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2018-03-23 20:04 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: igt-dev

== Series Details ==

Series: series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch (rev2)
URL   : https://patchwork.freedesktop.org/series/40498/
State : success

== Summary ==

IGT patchset tested on top of latest successful build
c2ee90774496a9772f17a7a359d7a670bf7d6b85 meson: Chamelium depends on GSL

with latest DRM-Tip kernel build CI_DRM_3976
101f8aec6229 drm-tip: 2018y-03m-23d-17h-52m-01s UTC integration manifest

Testlist changes:
+igt@gem_eio@in-flight-1us
+igt@gem_eio@in-flight-10ms
+igt@gem_eio@in-flight-contexts-1us
+igt@gem_eio@in-flight-contexts-10ms
+igt@gem_eio@in-flight-contexts-immediate
+igt@gem_eio@in-flight-immediate
+igt@gem_eio@in-flight-internal-1us
+igt@gem_eio@in-flight-internal-10ms
+igt@gem_eio@in-flight-internal-immediate
+igt@gem_eio@wait-1us
+igt@gem_eio@wait-10ms
+igt@gem_eio@wait-immediate
+igt@gem_eio@wait-wedge-1us
+igt@gem_eio@wait-wedge-10ms
+igt@gem_eio@wait-wedge-immediate
-igt@gem_eio@in-flight
-igt@gem_eio@in-flight-contexts
-igt@gem_eio@in-flight-internal
-igt@gem_eio@wait

---- Known issues:

Test debugfs_test:
        Subgroup read_all_entries:
                incomplete -> PASS       (fi-snb-2520m) fdo#103713
Test gem_mmap_gtt:
        Subgroup basic-small-bo-tiledx:
                pass       -> FAIL       (fi-gdg-551) fdo#102575
Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-b:
                pass       -> DMESG-WARN (fi-cnl-y3) fdo#104951
Test prime_vgem:
        Subgroup basic-fence-flip:
                fail       -> PASS       (fi-ilk-650) fdo#104008

fdo#103713 https://bugs.freedesktop.org/show_bug.cgi?id=103713
fdo#102575 https://bugs.freedesktop.org/show_bug.cgi?id=102575
fdo#104951 https://bugs.freedesktop.org/show_bug.cgi?id=104951
fdo#104008 https://bugs.freedesktop.org/show_bug.cgi?id=104008

fi-bdw-5557u     total:285  pass:264  dwarn:0   dfail:0   fail:0   skip:21  time:435s
fi-bdw-gvtdvm    total:285  pass:261  dwarn:0   dfail:0   fail:0   skip:24  time:444s
fi-blb-e6850     total:285  pass:220  dwarn:1   dfail:0   fail:0   skip:64  time:385s
fi-bsw-n3050     total:285  pass:239  dwarn:0   dfail:0   fail:0   skip:46  time:539s
fi-bwr-2160      total:285  pass:180  dwarn:0   dfail:0   fail:0   skip:105 time:296s
fi-bxt-dsi       total:285  pass:255  dwarn:0   dfail:0   fail:0   skip:30  time:512s
fi-bxt-j4205     total:285  pass:256  dwarn:0   dfail:0   fail:0   skip:29  time:517s
fi-byt-j1900     total:285  pass:250  dwarn:0   dfail:0   fail:0   skip:35  time:526s
fi-byt-n2820     total:285  pass:246  dwarn:0   dfail:0   fail:0   skip:39  time:510s
fi-cfl-8700k     total:285  pass:257  dwarn:0   dfail:0   fail:0   skip:28  time:410s
fi-cfl-u         total:285  pass:259  dwarn:0   dfail:0   fail:0   skip:26  time:513s
fi-cnl-y3        total:285  pass:258  dwarn:1   dfail:0   fail:0   skip:26  time:592s
fi-elk-e7500     total:285  pass:225  dwarn:1   dfail:0   fail:0   skip:59  time:429s
fi-gdg-551       total:285  pass:176  dwarn:0   dfail:0   fail:1   skip:108 time:318s
fi-glk-1         total:285  pass:257  dwarn:0   dfail:0   fail:0   skip:28  time:539s
fi-hsw-4770      total:285  pass:258  dwarn:0   dfail:0   fail:0   skip:27  time:408s
fi-ilk-650       total:285  pass:225  dwarn:0   dfail:0   fail:0   skip:60  time:423s
fi-ivb-3520m     total:285  pass:256  dwarn:0   dfail:0   fail:0   skip:29  time:475s
fi-ivb-3770      total:285  pass:252  dwarn:0   dfail:0   fail:0   skip:33  time:432s
fi-kbl-7500u     total:285  pass:260  dwarn:1   dfail:0   fail:0   skip:24  time:473s
fi-kbl-7567u     total:285  pass:265  dwarn:0   dfail:0   fail:0   skip:20  time:468s
fi-kbl-r         total:285  pass:258  dwarn:0   dfail:0   fail:0   skip:27  time:518s
fi-pnv-d510      total:285  pass:219  dwarn:1   dfail:0   fail:0   skip:65  time:655s
fi-skl-6260u     total:285  pass:265  dwarn:0   dfail:0   fail:0   skip:20  time:441s
fi-skl-6600u     total:285  pass:258  dwarn:0   dfail:0   fail:0   skip:27  time:531s
fi-skl-6700k2    total:285  pass:261  dwarn:0   dfail:0   fail:0   skip:24  time:506s
fi-skl-6770hq    total:285  pass:265  dwarn:0   dfail:0   fail:0   skip:20  time:498s
fi-skl-guc       total:285  pass:257  dwarn:0   dfail:0   fail:0   skip:28  time:427s
fi-skl-gvtdvm    total:285  pass:262  dwarn:0   dfail:0   fail:0   skip:23  time:448s
fi-snb-2520m     total:285  pass:245  dwarn:0   dfail:0   fail:0   skip:40  time:575s
fi-snb-2600      total:285  pass:245  dwarn:0   dfail:0   fail:0   skip:40  time:408s
Blacklisted hosts:
fi-cfl-s3        total:285  pass:259  dwarn:0   dfail:0   fail:0   skip:26  time:569s
fi-cnl-psr       total:224  pass:198  dwarn:0   dfail:0   fail:1   skip:24 
fi-glk-j4005     total:285  pass:256  dwarn:0   dfail:0   fail:0   skip:29  time:493s

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1185/issues.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [igt-dev] ✓ Fi.CI.IGT: success for series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch (rev2)
  2018-03-22 17:24 ` [igt-dev] " Tvrtko Ursulin
                   ` (5 preceding siblings ...)
  (?)
@ 2018-03-24  1:09 ` Patchwork
  -1 siblings, 0 replies; 21+ messages in thread
From: Patchwork @ 2018-03-24  1:09 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: igt-dev

== Series Details ==

Series: series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch (rev2)
URL   : https://patchwork.freedesktop.org/series/40498/
State : success

== Summary ==

---- Possible new issues:

Test pm_rc6_residency:
        Subgroup rc6-accuracy:
                skip       -> PASS       (shard-snb)

---- Known issues:

Test kms_flip:
        Subgroup 2x-plain-flip-fb-recreate-interruptible:
                fail       -> PASS       (shard-hsw) fdo#100368 +2
Test kms_rotation_crc:
        Subgroup sprite-rotation-180:
                pass       -> FAIL       (shard-snb) fdo#103925

fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
fdo#103925 https://bugs.freedesktop.org/show_bug.cgi?id=103925

shard-apl        total:3495 pass:1831 dwarn:1   dfail:0   fail:7   skip:1655 time:12860s
shard-hsw        total:3495 pass:1783 dwarn:1   dfail:0   fail:1   skip:1709 time:11791s
shard-snb        total:3495 pass:1374 dwarn:1   dfail:0   fail:4   skip:2116 time:7026s
Blacklisted hosts:
shard-kbl        total:3495 pass:1952 dwarn:1   dfail:0   fail:10  skip:1532 time:9745s

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1185/shards.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 21+ messages in thread

* [PATCH i-g-t 1/3] lib/dummyload: Add pollable spin batch
@ 2018-03-22 11:17 Tvrtko Ursulin
  0 siblings, 0 replies; 21+ messages in thread
From: Tvrtko Ursulin @ 2018-03-22 11:17 UTC (permalink / raw)
  To: igt-dev; +Cc: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Pollable spin batch exports a spin->running pointer which can be checked
by dereferencing it to see if the spinner is actually executing on the
GPU.

This is useful for tests which want to make sure they do not proceed with
their next step whilst the spinner is potentially only being processed by
the driver and not actually executing.

Pollable spinner can be created with igt_spin_batch_new_poll or
__igt_spin_batch_new_poll, after which a loop similar to:

	while (!*(volatile bool *)spin->running)
		;

Can be used to wait until the spinner start execution.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 lib/igt_dummyload.c  | 192 ++++++++++++++++++++++++++++++++++++++++-----------
 lib/igt_dummyload.h  |  11 +++
 lib/igt_gt.c         |   2 +-
 lib/ioctl_wrappers.c |   2 +-
 lib/ioctl_wrappers.h |   1 +
 5 files changed, 167 insertions(+), 41 deletions(-)

diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index dbc92e8f2951..98ab7ac2c6e9 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -74,35 +74,48 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc,
 	reloc->write_domain = write_domains;
 }
 
-static int emit_recursive_batch(igt_spin_t *spin,
-				int fd, uint32_t ctx, unsigned engine,
-				uint32_t dep, bool out_fence)
+#define OUT_FENCE	(1 << 0)
+#define POLL_RUN	(1 << 1)
+
+static int
+emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
+		     uint32_t dep, unsigned int flags)
 {
 #define SCRATCH 0
 #define BATCH 1
 	const int gen = intel_gen(intel_get_drm_devid(fd));
-	struct drm_i915_gem_exec_object2 obj[2];
-	struct drm_i915_gem_relocation_entry relocs[2];
-	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_relocation_entry relocs[2], *r;
+	struct drm_i915_gem_execbuffer2 *execbuf;
+	struct drm_i915_gem_exec_object2 *obj;
 	unsigned int engines[16];
 	unsigned int nengine;
 	int fence_fd = -1;
-	uint32_t *batch;
+	uint32_t *batch, *batch_start;
 	int i;
 
 	nengine = 0;
 	if (engine == ALL_ENGINES) {
-		for_each_engine(fd, engine)
-			if (engine)
+		for_each_engine(fd, engine) {
+			if (engine) {
+			if (flags & POLL_RUN)
+				igt_require(!(flags & POLL_RUN) ||
+					    gem_can_store_dword(fd, engine));
+
 				engines[nengine++] = engine;
+			}
+		}
 	} else {
 		gem_require_ring(fd, engine);
+		igt_require(!(flags & POLL_RUN) ||
+			    gem_can_store_dword(fd, engine));
 		engines[nengine++] = engine;
 	}
 	igt_require(nengine);
 
-	memset(&execbuf, 0, sizeof(execbuf));
-	memset(obj, 0, sizeof(obj));
+	memset(&spin->execbuf, 0, sizeof(spin->execbuf));
+	execbuf = &spin->execbuf;
+	memset(spin->obj, 0, sizeof(spin->obj));
+	obj = spin->obj;
 	memset(relocs, 0, sizeof(relocs));
 
 	obj[BATCH].handle = gem_create(fd, BATCH_SIZE);
@@ -113,19 +126,66 @@ static int emit_recursive_batch(igt_spin_t *spin,
 				       	BATCH_SIZE, PROT_WRITE);
 	gem_set_domain(fd, obj[BATCH].handle,
 			I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
-	execbuf.buffer_count++;
+	execbuf->buffer_count++;
+	batch_start = batch;
 
 	if (dep) {
+		igt_assert(!(flags & POLL_RUN));
+
 		/* dummy write to dependency */
 		obj[SCRATCH].handle = dep;
 		fill_reloc(&relocs[obj[BATCH].relocation_count++],
 			   dep, 1020,
 			   I915_GEM_DOMAIN_RENDER,
 			   I915_GEM_DOMAIN_RENDER);
-		execbuf.buffer_count++;
+		execbuf->buffer_count++;
+	} else if (flags & POLL_RUN) {
+		unsigned int offset;
+
+		igt_assert(!dep);
+
+		if (gen == 4 || gen == 5)
+			execbuf->flags |= I915_EXEC_SECURE;
+
+		spin->poll_handle = gem_create(fd, 4096);
+
+		if (__gem_set_caching(fd, spin->poll_handle,
+				      I915_CACHING_CACHED) == 0)
+			spin->running = __gem_mmap__cpu(fd, spin->poll_handle,
+							0, 4096,
+							PROT_READ | PROT_WRITE);
+		else
+			spin->running = __gem_mmap__wc(fd, spin->poll_handle,
+						       0, 4096,
+						       PROT_READ | PROT_WRITE);
+		igt_assert(spin->running);
+		igt_assert_eq(*spin->running, 0);
+
+		*batch++ = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+
+		if (gen >= 8) {
+			offset = 1;
+			*batch++ = 0;
+			*batch++ = 0;
+		} else if (gen >= 4) {
+			offset = 2;
+			*batch++ = 0;
+			*batch++ = 0;
+		} else {
+			offset = 1;
+			batch[-1]--;
+			*batch++ = 0;
+		}
+
+		*batch++ = 1;
+
+		obj[SCRATCH].handle = spin->poll_handle;
+		fill_reloc(&relocs[obj[BATCH].relocation_count++],
+			   spin->poll_handle, offset, 0, 0);
+		execbuf->buffer_count++;
 	}
 
-	spin->batch = batch;
+	spin->batch = batch = batch_start + 64 / sizeof(*batch);
 	spin->handle = obj[BATCH].handle;
 
 	/* Allow ourselves to be preempted */
@@ -145,40 +205,42 @@ static int emit_recursive_batch(igt_spin_t *spin,
 	batch += 1000;
 
 	/* recurse */
-	fill_reloc(&relocs[obj[BATCH].relocation_count],
-		   obj[BATCH].handle, (batch - spin->batch) + 1,
-		   I915_GEM_DOMAIN_COMMAND, 0);
+	r = &relocs[obj[BATCH].relocation_count++];
+	r->target_handle = obj[BATCH].handle;
+	r->offset = (batch + 1 - batch_start) * sizeof(*batch);
+	r->read_domains = I915_GEM_DOMAIN_COMMAND;
+	r->delta = 64;
 	if (gen >= 8) {
 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8 | 1;
-		*batch++ = 0;
+		*batch++ = r->delta;
 		*batch++ = 0;
 	} else if (gen >= 6) {
 		*batch++ = MI_BATCH_BUFFER_START | 1 << 8;
-		*batch++ = 0;
+		*batch++ = r->delta;
 	} else {
 		*batch++ = MI_BATCH_BUFFER_START | 2 << 6;
-		*batch = 0;
-		if (gen < 4) {
-			*batch |= 1;
-			relocs[obj[BATCH].relocation_count].delta = 1;
-		}
+		if (gen < 4)
+			r->delta |= 1;
+		*batch = r->delta;
 		batch++;
 	}
-	obj[BATCH].relocation_count++;
 	obj[BATCH].relocs_ptr = to_user_pointer(relocs);
 
-	execbuf.buffers_ptr = to_user_pointer(obj + (2 - execbuf.buffer_count));
-	execbuf.rsvd1 = ctx;
+	execbuf->buffers_ptr = to_user_pointer(obj +
+					       (2 - execbuf->buffer_count));
+	execbuf->rsvd1 = ctx;
 
-	if (out_fence)
-		execbuf.flags |= I915_EXEC_FENCE_OUT;
+	if (flags & OUT_FENCE)
+		execbuf->flags |= I915_EXEC_FENCE_OUT;
 
 	for (i = 0; i < nengine; i++) {
-		execbuf.flags &= ~ENGINE_MASK;
-		execbuf.flags |= engines[i];
-		gem_execbuf_wr(fd, &execbuf);
-		if (out_fence) {
-			int _fd = execbuf.rsvd2 >> 32;
+		execbuf->flags &= ~ENGINE_MASK;
+		execbuf->flags |= engines[i];
+
+		gem_execbuf_wr(fd, execbuf);
+
+		if (flags & OUT_FENCE) {
+			int _fd = execbuf->rsvd2 >> 32;
 
 			igt_assert(_fd >= 0);
 			if (fence_fd == -1) {
@@ -194,12 +256,20 @@ static int emit_recursive_batch(igt_spin_t *spin,
 		}
 	}
 
+	/* Make it easier for callers to resubmit. */
+
+	obj[BATCH].relocation_count = 0;
+	obj[BATCH].relocs_ptr = 0;
+
+	obj[SCRATCH].flags = EXEC_OBJECT_PINNED;
+	obj[BATCH].flags = EXEC_OBJECT_PINNED;
+
 	return fence_fd;
 }
 
 static igt_spin_t *
 ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
-		      int out_fence)
+		      unsigned int flags)
 {
 	igt_spin_t *spin;
 
@@ -207,7 +277,7 @@ ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
 	igt_assert(spin);
 
 	spin->out_fence = emit_recursive_batch(spin, fd, ctx, engine, dep,
-					       out_fence);
+					       flags);
 
 	pthread_mutex_lock(&list_lock);
 	igt_list_add(&spin->link, &spin_list);
@@ -219,7 +289,7 @@ ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
 igt_spin_t *
 __igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
 {
-	return ___igt_spin_batch_new(fd, ctx, engine, dep, false);
+	return ___igt_spin_batch_new(fd, ctx, engine, dep, 0);
 }
 
 /**
@@ -253,7 +323,7 @@ igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
 igt_spin_t *
 __igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
 {
-	return ___igt_spin_batch_new(fd, ctx, engine, 0, true);
+	return ___igt_spin_batch_new(fd, ctx, engine, 0, OUT_FENCE);
 }
 
 /**
@@ -286,6 +356,42 @@ igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
 	return spin;
 }
 
+igt_spin_t *
+__igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
+{
+	return ___igt_spin_batch_new(fd, ctx, engine, 0, POLL_RUN);
+}
+
+/**
+ * igt_spin_batch_new_poll:
+ * @fd: open i915 drm file descriptor
+ * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
+ *          than 0, execute on all available rings.
+ *
+ * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
+ * contains the batch's handle that can be waited upon. The returned structure
+ * must be passed to igt_spin_batch_free() for post-processing.
+ *
+ * igt_spin_t->running will containt a pointer which target will change from
+ * zero to one once the spinner actually starts executing on the GPU.
+ *
+ * Returns:
+ * Structure with helper internal state for igt_spin_batch_free().
+ */
+igt_spin_t *
+igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
+{
+	igt_spin_t *spin;
+
+	igt_require_gem(fd);
+	igt_require(gem_mmap__has_wc(fd));
+
+	spin = __igt_spin_batch_new_poll(fd, ctx, engine);
+	igt_assert(gem_bo_busy(fd, spin->handle));
+
+	return spin;
+}
+
 static void notify(union sigval arg)
 {
 	igt_spin_t *spin = arg.sival_ptr;
@@ -340,6 +446,8 @@ void igt_spin_batch_end(igt_spin_t *spin)
 	if (!spin)
 		return;
 
+	igt_assert(*spin->batch == MI_ARB_CHK ||
+		   *spin->batch == MI_BATCH_BUFFER_END);
 	*spin->batch = MI_BATCH_BUFFER_END;
 	__sync_synchronize();
 }
@@ -365,7 +473,13 @@ void igt_spin_batch_free(int fd, igt_spin_t *spin)
 		timer_delete(spin->timer);
 
 	igt_spin_batch_end(spin);
-	gem_munmap(spin->batch, BATCH_SIZE);
+	gem_munmap((void *)((unsigned long)spin->batch & (~4095UL)),
+		   BATCH_SIZE);
+
+	if (spin->running) {
+		gem_munmap(spin->running, 4096);
+		gem_close(fd, spin->poll_handle);
+	}
 
 	gem_close(fd, spin->handle);
 
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index 4103e4ab9e36..3103935a309b 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -36,6 +36,10 @@ typedef struct igt_spin {
 	struct igt_list link;
 	uint32_t *batch;
 	int out_fence;
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_execbuffer2 execbuf;
+	uint32_t poll_handle;
+	bool *running;
 } igt_spin_t;
 
 igt_spin_t *__igt_spin_batch_new(int fd,
@@ -55,6 +59,13 @@ igt_spin_t *igt_spin_batch_new_fence(int fd,
 				     uint32_t ctx,
 				     unsigned engine);
 
+igt_spin_t *__igt_spin_batch_new_poll(int fd,
+				       uint32_t ctx,
+				       unsigned engine);
+igt_spin_t *igt_spin_batch_new_poll(int fd,
+				    uint32_t ctx,
+				    unsigned engine);
+
 void igt_spin_batch_set_timeout(igt_spin_t *spin, int64_t ns);
 void igt_spin_batch_end(igt_spin_t *spin);
 void igt_spin_batch_free(int fd, igt_spin_t *spin);
diff --git a/lib/igt_gt.c b/lib/igt_gt.c
index 01aebc670862..4569fd36bd85 100644
--- a/lib/igt_gt.c
+++ b/lib/igt_gt.c
@@ -609,7 +609,7 @@ bool gem_can_store_dword(int fd, unsigned int engine)
 	if (gen == 3 && (info->is_grantsdale || info->is_alviso))
 		return false; /* only supports physical addresses */
 
-	if (gen == 6 && (engine & ~(3<<13)) == I915_EXEC_BSD)
+	if (gen == 6 && ((engine & 0x3f) == I915_EXEC_BSD))
 		return false; /* kills the machine! */
 
 	if (info->is_broadwater)
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 8748cfcfc04f..4e1a08bf06b4 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -198,7 +198,7 @@ void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride)
 	igt_assert(__gem_set_tiling(fd, handle, tiling, stride) == 0);
 }
 
-static int __gem_set_caching(int fd, uint32_t handle, uint32_t caching)
+int __gem_set_caching(int fd, uint32_t handle, uint32_t caching)
 {
 	struct drm_i915_gem_caching arg;
 	int err;
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index 13fbe3c103c0..b966f72c90a8 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -61,6 +61,7 @@ bool gem_get_tiling(int fd, uint32_t handle, uint32_t *tiling, uint32_t *swizzle
 void gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride);
 int __gem_set_tiling(int fd, uint32_t handle, uint32_t tiling, uint32_t stride);
 
+int __gem_set_caching(int fd, uint32_t handle, uint32_t caching);
 void gem_set_caching(int fd, uint32_t handle, uint32_t caching);
 uint32_t gem_get_caching(int fd, uint32_t handle);
 uint32_t gem_flink(int fd, uint32_t handle);
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 21+ messages in thread

end of thread, other threads:[~2018-03-24  1:09 UTC | newest]

Thread overview: 21+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-03-22 17:24 [PATCH i-g-t 1/3] lib/dummyload: Add pollable spin batch Tvrtko Ursulin
2018-03-22 17:24 ` [igt-dev] " Tvrtko Ursulin
2018-03-22 17:24 ` [PATCH i-g-t 2/3] tests/gem_eio: Speed up test execution Tvrtko Ursulin
2018-03-22 17:24   ` [igt-dev] " Tvrtko Ursulin
2018-03-22 17:44   ` Chris Wilson
2018-03-22 17:44     ` Chris Wilson
2018-03-23  9:46     ` Tvrtko Ursulin
2018-03-23  9:46       ` [igt-dev] [Intel-gfx] " Tvrtko Ursulin
2018-03-23  9:54       ` [igt-dev] " Chris Wilson
2018-03-23  9:54         ` [igt-dev] [Intel-gfx] " Chris Wilson
2018-03-23 11:54         ` [PATCH i-g-t v3 " Tvrtko Ursulin
2018-03-23 11:54           ` [Intel-gfx] " Tvrtko Ursulin
2018-03-23 12:05           ` Chris Wilson
2018-03-23 12:05             ` [igt-dev] [Intel-gfx] " Chris Wilson
2018-03-22 17:24 ` [PATCH i-g-t 3/3] tests/perf_pmu: Improve accuracy by waiting on spinner to start Tvrtko Ursulin
2018-03-22 17:24   ` [Intel-gfx] " Tvrtko Ursulin
2018-03-22 19:27 ` [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch Patchwork
2018-03-22 21:15 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
2018-03-23 20:04 ` [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/3] lib/dummyload: Add pollable spin batch (rev2) Patchwork
2018-03-24  1:09 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2018-03-22 11:17 [PATCH i-g-t 1/3] lib/dummyload: Add pollable spin batch Tvrtko Ursulin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.