All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH i-g-t 1/2] lib: Convert spin batch constructor to a factory
@ 2018-06-25 13:13 ` Chris Wilson
  0 siblings, 0 replies; 6+ messages in thread
From: Chris Wilson @ 2018-06-25 13:13 UTC (permalink / raw)
  To: intel-gfx; +Cc: igt-dev

In order to make adding more options easier, expose the full set of
options to the caller.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 lib/igt_dummyload.c            | 147 +++++++++------------------------
 lib/igt_dummyload.h            |  42 +++++-----
 tests/drv_missed_irq.c         |   2 +-
 tests/gem_busy.c               |  17 ++--
 tests/gem_ctx_isolation.c      |  26 +++---
 tests/gem_eio.c                |  13 ++-
 tests/gem_exec_fence.c         |  16 ++--
 tests/gem_exec_latency.c       |  18 +++-
 tests/gem_exec_nop.c           |   4 +-
 tests/gem_exec_reloc.c         |  10 ++-
 tests/gem_exec_schedule.c      |  27 ++++--
 tests/gem_exec_suspend.c       |   2 +-
 tests/gem_fenced_exec_thrash.c |   2 +-
 tests/gem_shrink.c             |   4 +-
 tests/gem_spin_batch.c         |   4 +-
 tests/gem_sync.c               |   5 +-
 tests/gem_wait.c               |   4 +-
 tests/kms_busy.c               |  10 ++-
 tests/kms_cursor_legacy.c      |   7 +-
 tests/perf_pmu.c               |  33 +++++---
 tests/pm_rps.c                 |   9 +-
 21 files changed, 189 insertions(+), 213 deletions(-)

diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 3809b4e61..94efdf745 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -75,12 +75,9 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc,
 	reloc->write_domain = write_domains;
 }
 
-#define OUT_FENCE	(1 << 0)
-#define POLL_RUN	(1 << 1)
-
 static int
-emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
-		     uint32_t dep, unsigned int flags)
+emit_recursive_batch(igt_spin_t *spin,
+		     int fd, const struct igt_spin_factory *opts)
 {
 #define SCRATCH 0
 #define BATCH 1
@@ -95,21 +92,18 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 	int i;
 
 	nengine = 0;
-	if (engine == ALL_ENGINES) {
-		for_each_engine(fd, engine) {
-			if (engine) {
-			if (flags & POLL_RUN)
-				igt_require(!(flags & POLL_RUN) ||
-					    gem_can_store_dword(fd, engine));
-
-				engines[nengine++] = engine;
-			}
+	if (opts->engine == ALL_ENGINES) {
+		unsigned int engine;
+
+		for_each_physical_engine(fd, engine) {
+			if (opts->flags & IGT_SPIN_POLL_RUN &&
+			    !gem_can_store_dword(fd, engine))
+				continue;
+
+			engines[nengine++] = engine;
 		}
 	} else {
-		gem_require_ring(fd, engine);
-		igt_require(!(flags & POLL_RUN) ||
-			    gem_can_store_dword(fd, engine));
-		engines[nengine++] = engine;
+		engines[nengine++] = opts->engine;
 	}
 	igt_require(nengine);
 
@@ -130,20 +124,20 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 	execbuf->buffer_count++;
 	batch_start = batch;
 
-	if (dep) {
-		igt_assert(!(flags & POLL_RUN));
+	if (opts->dependency) {
+		igt_assert(!(opts->flags & IGT_SPIN_POLL_RUN));
 
 		/* dummy write to dependency */
-		obj[SCRATCH].handle = dep;
+		obj[SCRATCH].handle = opts->dependency;
 		fill_reloc(&relocs[obj[BATCH].relocation_count++],
-			   dep, 1020,
+			   opts->dependency, 1020,
 			   I915_GEM_DOMAIN_RENDER,
 			   I915_GEM_DOMAIN_RENDER);
 		execbuf->buffer_count++;
-	} else if (flags & POLL_RUN) {
+	} else if (opts->flags & IGT_SPIN_POLL_RUN) {
 		unsigned int offset;
 
-		igt_assert(!dep);
+		igt_assert(!opts->dependency);
 
 		if (gen == 4 || gen == 5) {
 			execbuf->flags |= I915_EXEC_SECURE;
@@ -231,9 +225,9 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 
 	execbuf->buffers_ptr = to_user_pointer(obj +
 					       (2 - execbuf->buffer_count));
-	execbuf->rsvd1 = ctx;
+	execbuf->rsvd1 = opts->ctx;
 
-	if (flags & OUT_FENCE)
+	if (opts->flags & IGT_SPIN_FENCE_OUT)
 		execbuf->flags |= I915_EXEC_FENCE_OUT;
 
 	for (i = 0; i < nengine; i++) {
@@ -242,7 +236,7 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 
 		gem_execbuf_wr(fd, execbuf);
 
-		if (flags & OUT_FENCE) {
+		if (opts->flags & IGT_SPIN_FENCE_OUT) {
 			int _fd = execbuf->rsvd2 >> 32;
 
 			igt_assert(_fd >= 0);
@@ -271,16 +265,14 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 }
 
 static igt_spin_t *
-___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
-		      unsigned int flags)
+spin_batch_create(int fd, const struct igt_spin_factory *opts)
 {
 	igt_spin_t *spin;
 
 	spin = calloc(1, sizeof(struct igt_spin));
 	igt_assert(spin);
 
-	spin->out_fence = emit_recursive_batch(spin, fd, ctx, engine, dep,
-					       flags);
+	spin->out_fence = emit_recursive_batch(spin, fd, opts);
 
 	pthread_mutex_lock(&list_lock);
 	igt_list_add(&spin->link, &spin_list);
@@ -290,18 +282,15 @@ ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
 }
 
 igt_spin_t *
-__igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
+__igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts)
 {
-	return ___igt_spin_batch_new(fd, ctx, engine, dep, 0);
+	return spin_batch_create(fd, opts);
 }
 
 /**
- * igt_spin_batch_new:
+ * igt_spin_batch_factory:
  * @fd: open i915 drm file descriptor
- * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
- *          than 0, execute on all available rings.
- * @dep: handle to a buffer object dependency. If greater than 0, add a
- *              relocation entry to this buffer within the batch.
+ * @opts: controlling options such as context, engine, dependencies etc
  *
  * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
  * contains the batch's handle that can be waited upon. The returned structure
@@ -311,86 +300,26 @@ __igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
  * Structure with helper internal state for igt_spin_batch_free().
  */
 igt_spin_t *
-igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
+igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts)
 {
 	igt_spin_t *spin;
 
 	igt_require_gem(fd);
 
-	spin = __igt_spin_batch_new(fd, ctx, engine, dep);
-	igt_assert(gem_bo_busy(fd, spin->handle));
-
-	return spin;
-}
-
-igt_spin_t *
-__igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
-{
-	return ___igt_spin_batch_new(fd, ctx, engine, 0, OUT_FENCE);
-}
+	if (opts->engine != ALL_ENGINES) {
+		gem_require_ring(fd, opts->engine);
+		if (opts->flags & IGT_SPIN_POLL_RUN)
+			igt_require(gem_can_store_dword(fd, opts->engine));
+	}
 
-/**
- * igt_spin_batch_new_fence:
- * @fd: open i915 drm file descriptor
- * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
- *          than 0, execute on all available rings.
- *
- * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
- * contains the batch's handle that can be waited upon. The returned structure
- * must be passed to igt_spin_batch_free() for post-processing.
- *
- * igt_spin_t will contain an output fence associtated with this batch.
- *
- * Returns:
- * Structure with helper internal state for igt_spin_batch_free().
- */
-igt_spin_t *
-igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
-{
-	igt_spin_t *spin;
+	spin = spin_batch_create(fd, opts);
 
-	igt_require_gem(fd);
-	igt_require(gem_has_exec_fence(fd));
-
-	spin = __igt_spin_batch_new_fence(fd, ctx, engine);
 	igt_assert(gem_bo_busy(fd, spin->handle));
-	igt_assert(poll(&(struct pollfd){spin->out_fence, POLLIN}, 1, 0) == 0);
-
-	return spin;
-}
-
-igt_spin_t *
-__igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
-{
-	return ___igt_spin_batch_new(fd, ctx, engine, 0, POLL_RUN);
-}
+	if (opts->flags & IGT_SPIN_FENCE_OUT) {
+		struct pollfd pfd = { spin->out_fence, POLLIN };
 
-/**
- * igt_spin_batch_new_poll:
- * @fd: open i915 drm file descriptor
- * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
- *          than 0, execute on all available rings.
- *
- * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
- * contains the batch's handle that can be waited upon. The returned structure
- * must be passed to igt_spin_batch_free() for post-processing.
- *
- * igt_spin_t->running will containt a pointer which target will change from
- * zero to one once the spinner actually starts executing on the GPU.
- *
- * Returns:
- * Structure with helper internal state for igt_spin_batch_free().
- */
-igt_spin_t *
-igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
-{
-	igt_spin_t *spin;
-
-	igt_require_gem(fd);
-	igt_require(gem_mmap__has_wc(fd));
-
-	spin = __igt_spin_batch_new_poll(fd, ctx, engine);
-	igt_assert(gem_bo_busy(fd, spin->handle));
+		igt_assert(poll(&pfd, 1, 0) == 0);
+	}
 
 	return spin;
 }
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index c6ccc2936..c794f2544 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -43,29 +43,25 @@ typedef struct igt_spin {
 	bool *running;
 } igt_spin_t;
 
-igt_spin_t *__igt_spin_batch_new(int fd,
-				 uint32_t ctx,
-				 unsigned engine,
-				 uint32_t  dep);
-igt_spin_t *igt_spin_batch_new(int fd,
-			       uint32_t ctx,
-			       unsigned engine,
-			       uint32_t  dep);
-
-igt_spin_t *__igt_spin_batch_new_fence(int fd,
-				       uint32_t ctx,
-				       unsigned engine);
-
-igt_spin_t *igt_spin_batch_new_fence(int fd,
-				     uint32_t ctx,
-				     unsigned engine);
-
-igt_spin_t *__igt_spin_batch_new_poll(int fd,
-				       uint32_t ctx,
-				       unsigned engine);
-igt_spin_t *igt_spin_batch_new_poll(int fd,
-				    uint32_t ctx,
-				    unsigned engine);
+struct igt_spin_factory {
+	uint32_t ctx;
+	uint32_t dependency;
+	unsigned int engine;
+	unsigned int flags;
+};
+
+#define IGT_SPIN_FENCE_OUT (1 << 0)
+#define IGT_SPIN_POLL_RUN  (1 << 1)
+
+igt_spin_t *
+__igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts);
+igt_spin_t *
+igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts);
+
+#define __igt_spin_batch_new(fd, ...) \
+	__igt_spin_batch_factory(fd, &((struct igt_spin_factory){__VA_ARGS__}))
+#define igt_spin_batch_new(fd, ...) \
+	igt_spin_batch_factory(fd, &((struct igt_spin_factory){__VA_ARGS__}))
 
 void igt_spin_batch_set_timeout(igt_spin_t *spin, int64_t ns);
 void igt_spin_batch_end(igt_spin_t *spin);
diff --git a/tests/drv_missed_irq.c b/tests/drv_missed_irq.c
index 791ee51fb..78690c36a 100644
--- a/tests/drv_missed_irq.c
+++ b/tests/drv_missed_irq.c
@@ -33,7 +33,7 @@ IGT_TEST_DESCRIPTION("Inject missed interrupts and make sure they are caught");
 
 static void trigger_missed_interrupt(int fd, unsigned ring)
 {
-	igt_spin_t *spin = __igt_spin_batch_new(fd, 0, ring, 0);
+	igt_spin_t *spin = __igt_spin_batch_new(fd, .engine = ring);
 	uint32_t go;
 	int link[2];
 
diff --git a/tests/gem_busy.c b/tests/gem_busy.c
index f564651ba..76b44a5d4 100644
--- a/tests/gem_busy.c
+++ b/tests/gem_busy.c
@@ -114,7 +114,9 @@ static void semaphore(int fd, unsigned ring, uint32_t flags)
 
 	/* Create a long running batch which we can use to hog the GPU */
 	handle[BUSY] = gem_create(fd, 4096);
-	spin = igt_spin_batch_new(fd, 0, ring, handle[BUSY]);
+	spin = igt_spin_batch_new(fd,
+				  .engine = ring,
+				  .dependency = handle[BUSY]);
 
 	/* Queue a batch after the busy, it should block and remain "busy" */
 	igt_assert(exec_noop(fd, handle, ring | flags, false));
@@ -363,17 +365,16 @@ static void close_race(int fd)
 		igt_assert(sched_setscheduler(getpid(), SCHED_RR, &rt) == 0);
 
 		for (i = 0; i < nhandles; i++) {
-			spin[i] = __igt_spin_batch_new(fd, 0,
-						       engines[rand() % nengine], 0);
+			spin[i] = __igt_spin_batch_new(fd,
+						       .engine = engines[rand() % nengine]);
 			handles[i] = spin[i]->handle;
 		}
 
 		igt_until_timeout(20) {
 			for (i = 0; i < nhandles; i++) {
 				igt_spin_batch_free(fd, spin[i]);
-				spin[i] = __igt_spin_batch_new(fd, 0,
-							       engines[rand() % nengine],
-							       0);
+				spin[i] = __igt_spin_batch_new(fd,
+							       .engine = engines[rand() % nengine]);
 				handles[i] = spin[i]->handle;
 				__sync_synchronize();
 			}
@@ -415,7 +416,7 @@ static bool has_semaphores(int fd)
 
 static bool has_extended_busy_ioctl(int fd)
 {
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, I915_EXEC_RENDER, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd, .engine = I915_EXEC_RENDER);
 	uint32_t read, write;
 
 	__gem_busy(fd, spin->handle, &read, &write);
@@ -426,7 +427,7 @@ static bool has_extended_busy_ioctl(int fd)
 
 static void basic(int fd, unsigned ring, unsigned flags)
 {
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, ring, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd, .engine = ring);
 	struct timespec tv;
 	int timeout;
 	bool busy;
diff --git a/tests/gem_ctx_isolation.c b/tests/gem_ctx_isolation.c
index fe7d3490c..2e19e8c03 100644
--- a/tests/gem_ctx_isolation.c
+++ b/tests/gem_ctx_isolation.c
@@ -502,7 +502,7 @@ static void isolation(int fd,
 		ctx[0] = gem_context_create(fd);
 		regs[0] = read_regs(fd, ctx[0], e, flags);
 
-		spin = igt_spin_batch_new(fd, ctx[0], engine, 0);
+		spin = igt_spin_batch_new(fd, .ctx = ctx[0], .engine = engine);
 
 		if (flags & DIRTY1) {
 			igt_debug("%s[%d]: Setting all registers of ctx 0 to 0x%08x\n",
@@ -557,8 +557,11 @@ static void isolation(int fd,
 
 static void inject_reset_context(int fd, unsigned int engine)
 {
+	struct igt_spin_factory opts = {
+		.ctx = gem_context_create(fd),
+		.engine = engine,
+	};
 	igt_spin_t *spin;
-	uint32_t ctx;
 
 	/*
 	 * Force a context switch before triggering the reset, or else
@@ -566,19 +569,20 @@ static void inject_reset_context(int fd, unsigned int engine)
 	 * HW for screwing up if the context was already broken.
 	 */
 
-	ctx = gem_context_create(fd);
-	if (gem_can_store_dword(fd, engine)) {
-		spin = __igt_spin_batch_new_poll(fd, ctx, engine);
+	if (gem_can_store_dword(fd, engine))
+		opts.flags |= IGT_SPIN_POLL_RUN;
+
+	spin = __igt_spin_batch_factory(fd, &opts);
+
+	if (spin->running)
 		igt_spin_busywait_until_running(spin);
-	} else {
-		spin = __igt_spin_batch_new(fd, ctx, engine, 0);
+	else
 		usleep(1000); /* better than nothing */
-	}
 
 	igt_force_gpu_reset(fd);
 
 	igt_spin_batch_free(fd, spin);
-	gem_context_destroy(fd, ctx);
+	gem_context_destroy(fd, opts.ctx);
 }
 
 static void preservation(int fd,
@@ -604,7 +608,7 @@ static void preservation(int fd,
 	gem_quiescent_gpu(fd);
 
 	ctx[num_values] = gem_context_create(fd);
-	spin = igt_spin_batch_new(fd, ctx[num_values], engine, 0);
+	spin = igt_spin_batch_new(fd, .ctx = ctx[num_values], .engine = engine);
 	regs[num_values][0] = read_regs(fd, ctx[num_values], e, flags);
 	for (int v = 0; v < num_values; v++) {
 		ctx[v] = gem_context_create(fd);
@@ -644,7 +648,7 @@ static void preservation(int fd,
 		break;
 	}
 
-	spin = igt_spin_batch_new(fd, ctx[num_values], engine, 0);
+	spin = igt_spin_batch_new(fd, .ctx = ctx[num_values], .engine = engine);
 	for (int v = 0; v < num_values; v++)
 		regs[v][1] = read_regs(fd, ctx[v], e, flags);
 	regs[num_values][1] = read_regs(fd, ctx[num_values], e, flags);
diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 5faf7502b..0ec1aaec9 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -157,10 +157,15 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
 
 static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
 {
-	if (gem_can_store_dword(fd, flags))
-		return __igt_spin_batch_new_poll(fd, ctx, flags);
-	else
-		return __igt_spin_batch_new(fd, ctx, flags, 0);
+	struct igt_spin_factory opts = {
+		.ctx = ctx,
+		.engine = flags,
+	};
+
+	if (gem_can_store_dword(fd, opts.engine))
+		opts.flags |= IGT_SPIN_POLL_RUN;
+
+	return __igt_spin_batch_factory(fd, &opts);
 }
 
 static void __spin_wait(int fd, igt_spin_t *spin)
diff --git a/tests/gem_exec_fence.c b/tests/gem_exec_fence.c
index eb93308d1..ba46595d3 100644
--- a/tests/gem_exec_fence.c
+++ b/tests/gem_exec_fence.c
@@ -468,7 +468,7 @@ static void test_parallel(int fd, unsigned int master)
 	/* Fill the queue with many requests so that the next one has to
 	 * wait before it can be executed by the hardware.
 	 */
-	spin = igt_spin_batch_new(fd, 0, master, plug);
+	spin = igt_spin_batch_new(fd, .engine = master, .dependency = plug);
 	resubmit(fd, spin->handle, master, 16);
 
 	/* Now queue the master request and its secondaries */
@@ -651,7 +651,7 @@ static void test_keep_in_fence(int fd, unsigned int engine, unsigned int flags)
 	igt_spin_t *spin;
 	int fence;
 
-	spin = igt_spin_batch_new(fd, 0, engine, 0);
+	spin = igt_spin_batch_new(fd, .engine = engine);
 
 	gem_execbuf_wr(fd, &execbuf);
 	fence = upper_32_bits(execbuf.rsvd2);
@@ -1070,7 +1070,7 @@ static void test_syncobj_unused_fence(int fd)
 	struct local_gem_exec_fence fence = {
 		.handle = syncobj_create(fd),
 	};
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 
 	/* sanity check our syncobj_to_sync_file interface */
 	igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT);
@@ -1162,7 +1162,7 @@ static void test_syncobj_signal(int fd)
 	struct local_gem_exec_fence fence = {
 		.handle = syncobj_create(fd),
 	};
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 
 	/* Check that the syncobj is signaled only when our request/fence is */
 
@@ -1212,7 +1212,7 @@ static void test_syncobj_wait(int fd)
 
 	gem_quiescent_gpu(fd);
 
-	spin = igt_spin_batch_new(fd, 0, 0, 0);
+	spin = igt_spin_batch_new(fd);
 
 	memset(&execbuf, 0, sizeof(execbuf));
 	execbuf.buffers_ptr = to_user_pointer(&obj);
@@ -1282,7 +1282,7 @@ static void test_syncobj_export(int fd)
 		.handle = syncobj_create(fd),
 	};
 	int export[2];
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 
 	/* Check that if we export the syncobj prior to use it picks up
 	 * the later fence. This allows a syncobj to establish a channel
@@ -1340,7 +1340,7 @@ static void test_syncobj_repeat(int fd)
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct local_gem_exec_fence *fence;
 	int export;
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 
 	/* Check that we can wait on the same fence multiple times */
 	fence = calloc(nfences, sizeof(*fence));
@@ -1395,7 +1395,7 @@ static void test_syncobj_import(int fd)
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	struct drm_i915_gem_exec_object2 obj;
 	struct drm_i915_gem_execbuffer2 execbuf;
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 	uint32_t sync = syncobj_create(fd);
 	int fence;
 
diff --git a/tests/gem_exec_latency.c b/tests/gem_exec_latency.c
index ea2e4c681..75811f325 100644
--- a/tests/gem_exec_latency.c
+++ b/tests/gem_exec_latency.c
@@ -63,6 +63,10 @@ static unsigned int ring_size;
 static void
 poll_ring(int fd, unsigned ring, const char *name)
 {
+	const struct igt_spin_factory opts = {
+		.engine = ring,
+		.flags = IGT_SPIN_POLL_RUN,
+	};
 	struct timespec tv = {};
 	unsigned long cycles;
 	igt_spin_t *spin[2];
@@ -72,11 +76,11 @@ poll_ring(int fd, unsigned ring, const char *name)
 	gem_require_ring(fd, ring);
 	igt_require(gem_can_store_dword(fd, ring));
 
-	spin[0] = __igt_spin_batch_new_poll(fd, 0, ring);
+	spin[0] = __igt_spin_batch_factory(fd, &opts);
 	igt_assert(spin[0]->running);
 	cmd = *spin[0]->batch;
 
-	spin[1] = __igt_spin_batch_new_poll(fd, 0, ring);
+	spin[1] = __igt_spin_batch_factory(fd, &opts);
 	igt_assert(spin[1]->running);
 	igt_assert(cmd == *spin[1]->batch);
 
@@ -312,7 +316,9 @@ static void latency_from_ring(int fd,
 			       I915_GEM_DOMAIN_GTT);
 
 		if (flags & PREEMPT)
-			spin = __igt_spin_batch_new(fd, ctx[0], ring, 0);
+			spin = __igt_spin_batch_new(fd,
+						    .ctx = ctx[0],
+						    .engine = ring);
 
 		if (flags & CORK) {
 			obj[0].handle = igt_cork_plug(&c, fd);
@@ -456,6 +462,10 @@ rthog_latency_on_ring(int fd, unsigned int engine, const char *name, unsigned in
 	};
 #define NPASS ARRAY_SIZE(passname)
 #define MMAP_SZ (64 << 10)
+	const struct igt_spin_factory opts = {
+		.engine = engine,
+		.flags = IGT_SPIN_POLL_RUN,
+	};
 	struct rt_pkt *results;
 	unsigned int engines[16];
 	const char *names[16];
@@ -513,7 +523,7 @@ rthog_latency_on_ring(int fd, unsigned int engine, const char *name, unsigned in
 
 			usleep(250);
 
-			spin = __igt_spin_batch_new_poll(fd, 0, engine);
+			spin = __igt_spin_batch_factory(fd, &opts);
 			if (!spin) {
 				igt_warn("Failed to create spinner! (%s)\n",
 					 passname[pass]);
diff --git a/tests/gem_exec_nop.c b/tests/gem_exec_nop.c
index 0523b1c02..74d27522d 100644
--- a/tests/gem_exec_nop.c
+++ b/tests/gem_exec_nop.c
@@ -709,7 +709,9 @@ static void preempt(int fd, uint32_t handle,
 	clock_gettime(CLOCK_MONOTONIC, &start);
 	do {
 		igt_spin_t *spin =
-			__igt_spin_batch_new(fd, ctx[0], ring_id, 0);
+			__igt_spin_batch_new(fd,
+					     .ctx = ctx[0],
+					     .engine = ring_id);
 
 		for (int loop = 0; loop < 1024; loop++)
 			gem_execbuf(fd, &execbuf);
diff --git a/tests/gem_exec_reloc.c b/tests/gem_exec_reloc.c
index 91c6691af..837f60a6c 100644
--- a/tests/gem_exec_reloc.c
+++ b/tests/gem_exec_reloc.c
@@ -388,7 +388,9 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
 		}
 
 		if (flags & ACTIVE) {
-			spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle);
+			spin = igt_spin_batch_new(fd,
+						  .engine = I915_EXEC_DEFAULT,
+						  .dependency = obj.handle);
 			if (!(flags & HANG))
 				igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
 			igt_assert(gem_bo_busy(fd, obj.handle));
@@ -454,7 +456,9 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
 		}
 
 		if (flags & ACTIVE) {
-			spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle);
+			spin = igt_spin_batch_new(fd,
+						  .engine = I915_EXEC_DEFAULT,
+						  .dependency = obj.handle);
 			if (!(flags & HANG))
 				igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
 			igt_assert(gem_bo_busy(fd, obj.handle));
@@ -581,7 +585,7 @@ static void basic_range(int fd, unsigned flags)
 	execbuf.buffer_count = n + 1;
 
 	if (flags & ACTIVE) {
-		spin = igt_spin_batch_new(fd, 0, 0, obj[n].handle);
+		spin = igt_spin_batch_new(fd, .dependency = obj[n].handle);
 		if (!(flags & HANG))
 			igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
 		igt_assert(gem_bo_busy(fd, obj[n].handle));
diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index 1f43147f7..35a44ab10 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -132,9 +132,12 @@ static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine)
 	igt_spin_t *spin[MAX_ELSP_QLEN];
 
 	for (int n = 0; n < ARRAY_SIZE(spin); n++) {
-		uint32_t ctx = create_highest_priority(fd);
-		spin[n] = __igt_spin_batch_new(fd, ctx, engine, 0);
-		gem_context_destroy(fd, ctx);
+		const struct igt_spin_factory opts = {
+			.ctx = create_highest_priority(fd),
+			.engine = engine,
+		};
+		spin[n] = __igt_spin_batch_factory(fd, &opts);
+		gem_context_destroy(fd, opts.ctx);
 	}
 
 	igt_cork_unplug(c); /* batches will now be queued on the engine */
@@ -196,7 +199,7 @@ static void independent(int fd, unsigned int engine)
 			continue;
 
 		if (spin == NULL) {
-			spin = __igt_spin_batch_new(fd, 0, other, 0);
+			spin = __igt_spin_batch_new(fd, .engine = other);
 		} else {
 			struct drm_i915_gem_exec_object2 obj = {
 				.handle = spin->handle,
@@ -428,7 +431,9 @@ static void preempt(int fd, unsigned ring, unsigned flags)
 			ctx[LO] = gem_context_create(fd);
 			gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
 		}
-		spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
+		spin[n] = __igt_spin_batch_new(fd,
+					       .ctx = ctx[LO],
+					       .engine = ring);
 		igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
 
 		store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
@@ -462,7 +467,9 @@ static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
 
 	for_each_physical_engine(fd, other) {
 		if (spin == NULL) {
-			spin = __igt_spin_batch_new(fd, ctx, other, 0);
+			spin = __igt_spin_batch_new(fd,
+						    .ctx = ctx,
+						    .engine = other);
 		} else {
 			struct drm_i915_gem_exec_object2 obj = {
 				.handle = spin->handle,
@@ -672,7 +679,9 @@ static void preempt_self(int fd, unsigned ring)
 	n = 0;
 	gem_context_set_priority(fd, ctx[HI], MIN_PRIO);
 	for_each_physical_engine(fd, other) {
-		spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
+		spin[n] = __igt_spin_batch_new(fd,
+					       .ctx = ctx[NOISE],
+					       .engine = other);
 		store_dword(fd, ctx[HI], other,
 			    result, (n + 1)*sizeof(uint32_t), n + 1,
 			    0, I915_GEM_DOMAIN_RENDER);
@@ -714,7 +723,9 @@ static void preemptive_hang(int fd, unsigned ring)
 		ctx[LO] = gem_context_create(fd);
 		gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
 
-		spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
+		spin[n] = __igt_spin_batch_new(fd,
+					       .ctx = ctx[LO],
+					       .engine = ring);
 
 		gem_context_destroy(fd, ctx[LO]);
 	}
diff --git a/tests/gem_exec_suspend.c b/tests/gem_exec_suspend.c
index db2bca262..43c52d105 100644
--- a/tests/gem_exec_suspend.c
+++ b/tests/gem_exec_suspend.c
@@ -189,7 +189,7 @@ static void run_test(int fd, unsigned engine, unsigned flags)
 	}
 
 	if (flags & HANG)
-		spin = igt_spin_batch_new(fd, 0, engine, 0);
+		spin = igt_spin_batch_new(fd, .engine = engine);
 
 	switch (mode(flags)) {
 	case NOSLEEP:
diff --git a/tests/gem_fenced_exec_thrash.c b/tests/gem_fenced_exec_thrash.c
index 385790ada..7248d310d 100644
--- a/tests/gem_fenced_exec_thrash.c
+++ b/tests/gem_fenced_exec_thrash.c
@@ -132,7 +132,7 @@ static void run_test(int fd, int num_fences, int expected_errno,
 			igt_spin_t *spin = NULL;
 
 			if (flags & BUSY_LOAD)
-				spin = __igt_spin_batch_new(fd, 0, 0, 0);
+				spin = __igt_spin_batch_new(fd);
 
 			igt_while_interruptible(flags & INTERRUPTIBLE) {
 				igt_assert_eq(__gem_execbuf(fd, &execbuf[i]),
diff --git a/tests/gem_shrink.c b/tests/gem_shrink.c
index 3d33453aa..929e0426a 100644
--- a/tests/gem_shrink.c
+++ b/tests/gem_shrink.c
@@ -346,9 +346,9 @@ static void reclaim(unsigned engine, int timeout)
 		} while (!*shared);
 	}
 
-	spin = igt_spin_batch_new(fd, 0, engine, 0);
+	spin = igt_spin_batch_new(fd, .engine = engine);
 	igt_until_timeout(timeout) {
-		igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0);
+		igt_spin_t *next = __igt_spin_batch_new(fd, .engine = engine);
 
 		igt_spin_batch_set_timeout(spin, timeout_100ms);
 		gem_sync(fd, spin->handle);
diff --git a/tests/gem_spin_batch.c b/tests/gem_spin_batch.c
index cffeb6d71..52410010b 100644
--- a/tests/gem_spin_batch.c
+++ b/tests/gem_spin_batch.c
@@ -41,9 +41,9 @@ static void spin(int fd, unsigned int engine, unsigned int timeout_sec)
 	struct timespec itv = { };
 	uint64_t elapsed;
 
-	spin = __igt_spin_batch_new(fd, 0, engine, 0);
+	spin = __igt_spin_batch_new(fd, .engine = engine);
 	while ((elapsed = igt_nsec_elapsed(&tv)) >> 30 < timeout_sec) {
-		igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0);
+		igt_spin_t *next = __igt_spin_batch_new(fd, .engine = engine);
 
 		igt_spin_batch_set_timeout(spin,
 					   timeout_100ms - igt_nsec_elapsed(&itv));
diff --git a/tests/gem_sync.c b/tests/gem_sync.c
index 1e2e089a1..2fcb9aa01 100644
--- a/tests/gem_sync.c
+++ b/tests/gem_sync.c
@@ -715,9 +715,8 @@ preempt(int fd, unsigned ring, int num_children, int timeout)
 		do {
 			igt_spin_t *spin =
 				__igt_spin_batch_new(fd,
-						     ctx[0],
-						     execbuf.flags,
-						     0);
+						     .ctx = ctx[0],
+						     .engine = execbuf.flags);
 
 			do {
 				gem_execbuf(fd, &execbuf);
diff --git a/tests/gem_wait.c b/tests/gem_wait.c
index 61d8a4059..7914c9365 100644
--- a/tests/gem_wait.c
+++ b/tests/gem_wait.c
@@ -74,7 +74,9 @@ static void basic(int fd, unsigned engine, unsigned flags)
 	IGT_CORK_HANDLE(cork);
 	uint32_t plug =
 		flags & (WRITE | AWAIT) ? igt_cork_plug(&cork, fd) : 0;
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, engine, plug);
+	igt_spin_t *spin = igt_spin_batch_new(fd,
+					      .engine = engine,
+					      .dependency = plug);
 	struct drm_i915_gem_wait wait = {
 		flags & WRITE ? plug : spin->handle
 	};
diff --git a/tests/kms_busy.c b/tests/kms_busy.c
index 4a4e0e156..abf39828b 100644
--- a/tests/kms_busy.c
+++ b/tests/kms_busy.c
@@ -84,7 +84,8 @@ static void flip_to_fb(igt_display_t *dpy, int pipe,
 	struct drm_event_vblank ev;
 
 	igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
-					   0, ring, fb->gem_handle);
+					   .engine = ring,
+					   .dependency = fb->gem_handle);
 
 	if (modeset) {
 		/*
@@ -200,7 +201,8 @@ static void test_atomic_commit_hang(igt_display_t *dpy, igt_plane_t *primary,
 				    struct igt_fb *busy_fb, unsigned ring)
 {
 	igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
-					   0, ring, busy_fb->gem_handle);
+					   .engine = ring,
+					   .dependency = busy_fb->gem_handle);
 	struct pollfd pfd = { .fd = dpy->drm_fd, .events = POLLIN };
 	unsigned flags = 0;
 	struct drm_event_vblank ev;
@@ -287,7 +289,9 @@ static void test_pageflip_modeset_hang(igt_display_t *dpy,
 
 	igt_display_commit2(dpy, dpy->is_atomic ? COMMIT_ATOMIC : COMMIT_LEGACY);
 
-	t = igt_spin_batch_new(dpy->drm_fd, 0, ring, fb.gem_handle);
+	t = igt_spin_batch_new(dpy->drm_fd,
+			       .engine = ring,
+			       .dependency = fb.gem_handle);
 
 	do_or_die(drmModePageFlip(dpy->drm_fd, dpy->pipes[pipe].crtc_id, fb.fb_id, DRM_MODE_PAGE_FLIP_EVENT, &fb));
 
diff --git a/tests/kms_cursor_legacy.c b/tests/kms_cursor_legacy.c
index d0a28b3c4..85340d43e 100644
--- a/tests/kms_cursor_legacy.c
+++ b/tests/kms_cursor_legacy.c
@@ -532,7 +532,8 @@ static void basic_flip_cursor(igt_display_t *display,
 
 		spin = NULL;
 		if (flags & BASIC_BUSY)
-			spin = igt_spin_batch_new(display->drm_fd, 0, 0, fb_info.gem_handle);
+			spin = igt_spin_batch_new(display->drm_fd,
+						  .dependency = fb_info.gem_handle);
 
 		/* Start with a synchronous query to align with the vblank */
 		vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS);
@@ -1323,8 +1324,8 @@ static void flip_vs_cursor_busy_crc(igt_display_t *display, bool atomic)
 	for (int i = 1; i >= 0; i--) {
 		igt_spin_t *spin;
 
-		spin = igt_spin_batch_new(display->drm_fd, 0, 0,
-					  fb_info[1].gem_handle);
+		spin = igt_spin_batch_new(display->drm_fd,
+					  .dependency = fb_info[1].gem_handle);
 
 		vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS);
 
diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index 4570f926d..a1d36ac4f 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -172,10 +172,15 @@ static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
 
 static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
 {
+	struct igt_spin_factory opts = {
+		.ctx = ctx,
+		.engine = flags,
+	};
+
 	if (gem_can_store_dword(fd, flags))
-		return __igt_spin_batch_new_poll(fd, ctx, flags);
-	else
-		return __igt_spin_batch_new(fd, ctx, flags, 0);
+		opts.flags |= IGT_SPIN_POLL_RUN;
+
+	return __igt_spin_batch_factory(fd, &opts);
 }
 
 static unsigned long __spin_wait(int fd, igt_spin_t *spin)
@@ -356,7 +361,9 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	spin[0] = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	usleep(500e3);
-	spin[1] = __igt_spin_batch_new(gem_fd, ctx, e2ring(gem_fd, e), 0);
+	spin[1] = __igt_spin_batch_new(gem_fd,
+				       .ctx = ctx,
+				       .engine = e2ring(gem_fd, e));
 
 	/*
 	 * Open PMU as fast as possible after the second spin batch in attempt
@@ -1045,8 +1052,8 @@ static void cpu_hotplug(int gem_fd)
 	 * Create two spinners so test can ensure shorter gaps in engine
 	 * busyness as it is terminating one and re-starting the other.
 	 */
-	spin[0] = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
-	spin[1] = __igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
+	spin[0] = igt_spin_batch_new(gem_fd, .engine = I915_EXEC_RENDER);
+	spin[1] = __igt_spin_batch_new(gem_fd, .engine = I915_EXEC_RENDER);
 
 	val = __pmu_read_single(fd, &ts[0]);
 
@@ -1129,8 +1136,8 @@ static void cpu_hotplug(int gem_fd)
 			break;
 
 		igt_spin_batch_free(gem_fd, spin[cur]);
-		spin[cur] = __igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER,
-						 0);
+		spin[cur] = __igt_spin_batch_new(gem_fd,
+						 .engine = I915_EXEC_RENDER);
 		cur ^= 1;
 	}
 
@@ -1167,8 +1174,9 @@ test_interrupts(int gem_fd)
 
 	/* Queue spinning batches. */
 	for (int i = 0; i < target; i++) {
-		spin[i] = __igt_spin_batch_new_fence(gem_fd,
-						     0, I915_EXEC_RENDER);
+		spin[i] = __igt_spin_batch_new(gem_fd,
+					       .engine = I915_EXEC_RENDER,
+					       .flags = IGT_SPIN_FENCE_OUT);
 		if (i == 0) {
 			fence_fd = spin[i]->out_fence;
 		} else {
@@ -1229,7 +1237,8 @@ test_interrupts_sync(int gem_fd)
 
 	/* Queue spinning batches. */
 	for (int i = 0; i < target; i++)
-		spin[i] = __igt_spin_batch_new_fence(gem_fd, 0, 0);
+		spin[i] = __igt_spin_batch_new(gem_fd,
+					       .flags = IGT_SPIN_FENCE_OUT);
 
 	/* Wait for idle state. */
 	idle = pmu_read_single(fd);
@@ -1550,7 +1559,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 		igt_spin_t *spin;
 
 		/* Allocate our spin batch and idle it. */
-		spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+		spin = igt_spin_batch_new(gem_fd, .engine = e2ring(gem_fd, e));
 		igt_spin_batch_end(spin);
 		gem_sync(gem_fd, spin->handle);
 
diff --git a/tests/pm_rps.c b/tests/pm_rps.c
index 006d084b8..202132b1c 100644
--- a/tests/pm_rps.c
+++ b/tests/pm_rps.c
@@ -235,9 +235,9 @@ static void load_helper_run(enum load load)
 
 		igt_debug("Applying %s load...\n", lh.load ? "high" : "low");
 
-		spin[0] = __igt_spin_batch_new(drm_fd, 0, 0, 0);
+		spin[0] = __igt_spin_batch_new(drm_fd);
 		if (lh.load == HIGH)
-			spin[1] = __igt_spin_batch_new(drm_fd, 0, 0, 0);
+			spin[1] = __igt_spin_batch_new(drm_fd);
 		while (!lh.exit) {
 			handle = spin[0]->handle;
 			igt_spin_batch_end(spin[0]);
@@ -248,8 +248,7 @@ static void load_helper_run(enum load load)
 			usleep(100);
 
 			spin[0] = spin[1];
-			spin[lh.load == HIGH] =
-				__igt_spin_batch_new(drm_fd, 0, 0, 0);
+			spin[lh.load == HIGH] = __igt_spin_batch_new(drm_fd);
 		}
 
 		handle = spin[0]->handle;
@@ -510,7 +509,7 @@ static void boost_freq(int fd, int *boost_freqs)
 	int64_t timeout = 1;
 	igt_spin_t *load;
 
-	load = igt_spin_batch_new(fd, 0, 0, 0);
+	load = igt_spin_batch_new(fd);
 	resubmit_batch(fd, load->handle, 16);
 
 	/* Waiting will grant us a boost to maximum */
-- 
2.18.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [Intel-gfx] [PATCH i-g-t 1/2] lib: Convert spin batch constructor to a factory
@ 2018-06-25 13:13 ` Chris Wilson
  0 siblings, 0 replies; 6+ messages in thread
From: Chris Wilson @ 2018-06-25 13:13 UTC (permalink / raw)
  To: intel-gfx; +Cc: igt-dev

In order to make adding more options easier, expose the full set of
options to the caller.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 lib/igt_dummyload.c            | 147 +++++++++------------------------
 lib/igt_dummyload.h            |  42 +++++-----
 tests/drv_missed_irq.c         |   2 +-
 tests/gem_busy.c               |  17 ++--
 tests/gem_ctx_isolation.c      |  26 +++---
 tests/gem_eio.c                |  13 ++-
 tests/gem_exec_fence.c         |  16 ++--
 tests/gem_exec_latency.c       |  18 +++-
 tests/gem_exec_nop.c           |   4 +-
 tests/gem_exec_reloc.c         |  10 ++-
 tests/gem_exec_schedule.c      |  27 ++++--
 tests/gem_exec_suspend.c       |   2 +-
 tests/gem_fenced_exec_thrash.c |   2 +-
 tests/gem_shrink.c             |   4 +-
 tests/gem_spin_batch.c         |   4 +-
 tests/gem_sync.c               |   5 +-
 tests/gem_wait.c               |   4 +-
 tests/kms_busy.c               |  10 ++-
 tests/kms_cursor_legacy.c      |   7 +-
 tests/perf_pmu.c               |  33 +++++---
 tests/pm_rps.c                 |   9 +-
 21 files changed, 189 insertions(+), 213 deletions(-)

diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 3809b4e61..94efdf745 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -75,12 +75,9 @@ fill_reloc(struct drm_i915_gem_relocation_entry *reloc,
 	reloc->write_domain = write_domains;
 }
 
-#define OUT_FENCE	(1 << 0)
-#define POLL_RUN	(1 << 1)
-
 static int
-emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
-		     uint32_t dep, unsigned int flags)
+emit_recursive_batch(igt_spin_t *spin,
+		     int fd, const struct igt_spin_factory *opts)
 {
 #define SCRATCH 0
 #define BATCH 1
@@ -95,21 +92,18 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 	int i;
 
 	nengine = 0;
-	if (engine == ALL_ENGINES) {
-		for_each_engine(fd, engine) {
-			if (engine) {
-			if (flags & POLL_RUN)
-				igt_require(!(flags & POLL_RUN) ||
-					    gem_can_store_dword(fd, engine));
-
-				engines[nengine++] = engine;
-			}
+	if (opts->engine == ALL_ENGINES) {
+		unsigned int engine;
+
+		for_each_physical_engine(fd, engine) {
+			if (opts->flags & IGT_SPIN_POLL_RUN &&
+			    !gem_can_store_dword(fd, engine))
+				continue;
+
+			engines[nengine++] = engine;
 		}
 	} else {
-		gem_require_ring(fd, engine);
-		igt_require(!(flags & POLL_RUN) ||
-			    gem_can_store_dword(fd, engine));
-		engines[nengine++] = engine;
+		engines[nengine++] = opts->engine;
 	}
 	igt_require(nengine);
 
@@ -130,20 +124,20 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 	execbuf->buffer_count++;
 	batch_start = batch;
 
-	if (dep) {
-		igt_assert(!(flags & POLL_RUN));
+	if (opts->dependency) {
+		igt_assert(!(opts->flags & IGT_SPIN_POLL_RUN));
 
 		/* dummy write to dependency */
-		obj[SCRATCH].handle = dep;
+		obj[SCRATCH].handle = opts->dependency;
 		fill_reloc(&relocs[obj[BATCH].relocation_count++],
-			   dep, 1020,
+			   opts->dependency, 1020,
 			   I915_GEM_DOMAIN_RENDER,
 			   I915_GEM_DOMAIN_RENDER);
 		execbuf->buffer_count++;
-	} else if (flags & POLL_RUN) {
+	} else if (opts->flags & IGT_SPIN_POLL_RUN) {
 		unsigned int offset;
 
-		igt_assert(!dep);
+		igt_assert(!opts->dependency);
 
 		if (gen == 4 || gen == 5) {
 			execbuf->flags |= I915_EXEC_SECURE;
@@ -231,9 +225,9 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 
 	execbuf->buffers_ptr = to_user_pointer(obj +
 					       (2 - execbuf->buffer_count));
-	execbuf->rsvd1 = ctx;
+	execbuf->rsvd1 = opts->ctx;
 
-	if (flags & OUT_FENCE)
+	if (opts->flags & IGT_SPIN_FENCE_OUT)
 		execbuf->flags |= I915_EXEC_FENCE_OUT;
 
 	for (i = 0; i < nengine; i++) {
@@ -242,7 +236,7 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 
 		gem_execbuf_wr(fd, execbuf);
 
-		if (flags & OUT_FENCE) {
+		if (opts->flags & IGT_SPIN_FENCE_OUT) {
 			int _fd = execbuf->rsvd2 >> 32;
 
 			igt_assert(_fd >= 0);
@@ -271,16 +265,14 @@ emit_recursive_batch(igt_spin_t *spin, int fd, uint32_t ctx, unsigned engine,
 }
 
 static igt_spin_t *
-___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
-		      unsigned int flags)
+spin_batch_create(int fd, const struct igt_spin_factory *opts)
 {
 	igt_spin_t *spin;
 
 	spin = calloc(1, sizeof(struct igt_spin));
 	igt_assert(spin);
 
-	spin->out_fence = emit_recursive_batch(spin, fd, ctx, engine, dep,
-					       flags);
+	spin->out_fence = emit_recursive_batch(spin, fd, opts);
 
 	pthread_mutex_lock(&list_lock);
 	igt_list_add(&spin->link, &spin_list);
@@ -290,18 +282,15 @@ ___igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep,
 }
 
 igt_spin_t *
-__igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
+__igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts)
 {
-	return ___igt_spin_batch_new(fd, ctx, engine, dep, 0);
+	return spin_batch_create(fd, opts);
 }
 
 /**
- * igt_spin_batch_new:
+ * igt_spin_batch_factory:
  * @fd: open i915 drm file descriptor
- * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
- *          than 0, execute on all available rings.
- * @dep: handle to a buffer object dependency. If greater than 0, add a
- *              relocation entry to this buffer within the batch.
+ * @opts: controlling options such as context, engine, dependencies etc
  *
  * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
  * contains the batch's handle that can be waited upon. The returned structure
@@ -311,86 +300,26 @@ __igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
  * Structure with helper internal state for igt_spin_batch_free().
  */
 igt_spin_t *
-igt_spin_batch_new(int fd, uint32_t ctx, unsigned engine, uint32_t dep)
+igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts)
 {
 	igt_spin_t *spin;
 
 	igt_require_gem(fd);
 
-	spin = __igt_spin_batch_new(fd, ctx, engine, dep);
-	igt_assert(gem_bo_busy(fd, spin->handle));
-
-	return spin;
-}
-
-igt_spin_t *
-__igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
-{
-	return ___igt_spin_batch_new(fd, ctx, engine, 0, OUT_FENCE);
-}
+	if (opts->engine != ALL_ENGINES) {
+		gem_require_ring(fd, opts->engine);
+		if (opts->flags & IGT_SPIN_POLL_RUN)
+			igt_require(gem_can_store_dword(fd, opts->engine));
+	}
 
-/**
- * igt_spin_batch_new_fence:
- * @fd: open i915 drm file descriptor
- * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
- *          than 0, execute on all available rings.
- *
- * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
- * contains the batch's handle that can be waited upon. The returned structure
- * must be passed to igt_spin_batch_free() for post-processing.
- *
- * igt_spin_t will contain an output fence associtated with this batch.
- *
- * Returns:
- * Structure with helper internal state for igt_spin_batch_free().
- */
-igt_spin_t *
-igt_spin_batch_new_fence(int fd, uint32_t ctx, unsigned engine)
-{
-	igt_spin_t *spin;
+	spin = spin_batch_create(fd, opts);
 
-	igt_require_gem(fd);
-	igt_require(gem_has_exec_fence(fd));
-
-	spin = __igt_spin_batch_new_fence(fd, ctx, engine);
 	igt_assert(gem_bo_busy(fd, spin->handle));
-	igt_assert(poll(&(struct pollfd){spin->out_fence, POLLIN}, 1, 0) == 0);
-
-	return spin;
-}
-
-igt_spin_t *
-__igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
-{
-	return ___igt_spin_batch_new(fd, ctx, engine, 0, POLL_RUN);
-}
+	if (opts->flags & IGT_SPIN_FENCE_OUT) {
+		struct pollfd pfd = { spin->out_fence, POLLIN };
 
-/**
- * igt_spin_batch_new_poll:
- * @fd: open i915 drm file descriptor
- * @engine: Ring to execute batch OR'd with execbuf flags. If value is less
- *          than 0, execute on all available rings.
- *
- * Start a recursive batch on a ring. Immediately returns a #igt_spin_t that
- * contains the batch's handle that can be waited upon. The returned structure
- * must be passed to igt_spin_batch_free() for post-processing.
- *
- * igt_spin_t->running will containt a pointer which target will change from
- * zero to one once the spinner actually starts executing on the GPU.
- *
- * Returns:
- * Structure with helper internal state for igt_spin_batch_free().
- */
-igt_spin_t *
-igt_spin_batch_new_poll(int fd, uint32_t ctx, unsigned engine)
-{
-	igt_spin_t *spin;
-
-	igt_require_gem(fd);
-	igt_require(gem_mmap__has_wc(fd));
-
-	spin = __igt_spin_batch_new_poll(fd, ctx, engine);
-	igt_assert(gem_bo_busy(fd, spin->handle));
+		igt_assert(poll(&pfd, 1, 0) == 0);
+	}
 
 	return spin;
 }
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index c6ccc2936..c794f2544 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -43,29 +43,25 @@ typedef struct igt_spin {
 	bool *running;
 } igt_spin_t;
 
-igt_spin_t *__igt_spin_batch_new(int fd,
-				 uint32_t ctx,
-				 unsigned engine,
-				 uint32_t  dep);
-igt_spin_t *igt_spin_batch_new(int fd,
-			       uint32_t ctx,
-			       unsigned engine,
-			       uint32_t  dep);
-
-igt_spin_t *__igt_spin_batch_new_fence(int fd,
-				       uint32_t ctx,
-				       unsigned engine);
-
-igt_spin_t *igt_spin_batch_new_fence(int fd,
-				     uint32_t ctx,
-				     unsigned engine);
-
-igt_spin_t *__igt_spin_batch_new_poll(int fd,
-				       uint32_t ctx,
-				       unsigned engine);
-igt_spin_t *igt_spin_batch_new_poll(int fd,
-				    uint32_t ctx,
-				    unsigned engine);
+struct igt_spin_factory {
+	uint32_t ctx;
+	uint32_t dependency;
+	unsigned int engine;
+	unsigned int flags;
+};
+
+#define IGT_SPIN_FENCE_OUT (1 << 0)
+#define IGT_SPIN_POLL_RUN  (1 << 1)
+
+igt_spin_t *
+__igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts);
+igt_spin_t *
+igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts);
+
+#define __igt_spin_batch_new(fd, ...) \
+	__igt_spin_batch_factory(fd, &((struct igt_spin_factory){__VA_ARGS__}))
+#define igt_spin_batch_new(fd, ...) \
+	igt_spin_batch_factory(fd, &((struct igt_spin_factory){__VA_ARGS__}))
 
 void igt_spin_batch_set_timeout(igt_spin_t *spin, int64_t ns);
 void igt_spin_batch_end(igt_spin_t *spin);
diff --git a/tests/drv_missed_irq.c b/tests/drv_missed_irq.c
index 791ee51fb..78690c36a 100644
--- a/tests/drv_missed_irq.c
+++ b/tests/drv_missed_irq.c
@@ -33,7 +33,7 @@ IGT_TEST_DESCRIPTION("Inject missed interrupts and make sure they are caught");
 
 static void trigger_missed_interrupt(int fd, unsigned ring)
 {
-	igt_spin_t *spin = __igt_spin_batch_new(fd, 0, ring, 0);
+	igt_spin_t *spin = __igt_spin_batch_new(fd, .engine = ring);
 	uint32_t go;
 	int link[2];
 
diff --git a/tests/gem_busy.c b/tests/gem_busy.c
index f564651ba..76b44a5d4 100644
--- a/tests/gem_busy.c
+++ b/tests/gem_busy.c
@@ -114,7 +114,9 @@ static void semaphore(int fd, unsigned ring, uint32_t flags)
 
 	/* Create a long running batch which we can use to hog the GPU */
 	handle[BUSY] = gem_create(fd, 4096);
-	spin = igt_spin_batch_new(fd, 0, ring, handle[BUSY]);
+	spin = igt_spin_batch_new(fd,
+				  .engine = ring,
+				  .dependency = handle[BUSY]);
 
 	/* Queue a batch after the busy, it should block and remain "busy" */
 	igt_assert(exec_noop(fd, handle, ring | flags, false));
@@ -363,17 +365,16 @@ static void close_race(int fd)
 		igt_assert(sched_setscheduler(getpid(), SCHED_RR, &rt) == 0);
 
 		for (i = 0; i < nhandles; i++) {
-			spin[i] = __igt_spin_batch_new(fd, 0,
-						       engines[rand() % nengine], 0);
+			spin[i] = __igt_spin_batch_new(fd,
+						       .engine = engines[rand() % nengine]);
 			handles[i] = spin[i]->handle;
 		}
 
 		igt_until_timeout(20) {
 			for (i = 0; i < nhandles; i++) {
 				igt_spin_batch_free(fd, spin[i]);
-				spin[i] = __igt_spin_batch_new(fd, 0,
-							       engines[rand() % nengine],
-							       0);
+				spin[i] = __igt_spin_batch_new(fd,
+							       .engine = engines[rand() % nengine]);
 				handles[i] = spin[i]->handle;
 				__sync_synchronize();
 			}
@@ -415,7 +416,7 @@ static bool has_semaphores(int fd)
 
 static bool has_extended_busy_ioctl(int fd)
 {
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, I915_EXEC_RENDER, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd, .engine = I915_EXEC_RENDER);
 	uint32_t read, write;
 
 	__gem_busy(fd, spin->handle, &read, &write);
@@ -426,7 +427,7 @@ static bool has_extended_busy_ioctl(int fd)
 
 static void basic(int fd, unsigned ring, unsigned flags)
 {
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, ring, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd, .engine = ring);
 	struct timespec tv;
 	int timeout;
 	bool busy;
diff --git a/tests/gem_ctx_isolation.c b/tests/gem_ctx_isolation.c
index fe7d3490c..2e19e8c03 100644
--- a/tests/gem_ctx_isolation.c
+++ b/tests/gem_ctx_isolation.c
@@ -502,7 +502,7 @@ static void isolation(int fd,
 		ctx[0] = gem_context_create(fd);
 		regs[0] = read_regs(fd, ctx[0], e, flags);
 
-		spin = igt_spin_batch_new(fd, ctx[0], engine, 0);
+		spin = igt_spin_batch_new(fd, .ctx = ctx[0], .engine = engine);
 
 		if (flags & DIRTY1) {
 			igt_debug("%s[%d]: Setting all registers of ctx 0 to 0x%08x\n",
@@ -557,8 +557,11 @@ static void isolation(int fd,
 
 static void inject_reset_context(int fd, unsigned int engine)
 {
+	struct igt_spin_factory opts = {
+		.ctx = gem_context_create(fd),
+		.engine = engine,
+	};
 	igt_spin_t *spin;
-	uint32_t ctx;
 
 	/*
 	 * Force a context switch before triggering the reset, or else
@@ -566,19 +569,20 @@ static void inject_reset_context(int fd, unsigned int engine)
 	 * HW for screwing up if the context was already broken.
 	 */
 
-	ctx = gem_context_create(fd);
-	if (gem_can_store_dword(fd, engine)) {
-		spin = __igt_spin_batch_new_poll(fd, ctx, engine);
+	if (gem_can_store_dword(fd, engine))
+		opts.flags |= IGT_SPIN_POLL_RUN;
+
+	spin = __igt_spin_batch_factory(fd, &opts);
+
+	if (spin->running)
 		igt_spin_busywait_until_running(spin);
-	} else {
-		spin = __igt_spin_batch_new(fd, ctx, engine, 0);
+	else
 		usleep(1000); /* better than nothing */
-	}
 
 	igt_force_gpu_reset(fd);
 
 	igt_spin_batch_free(fd, spin);
-	gem_context_destroy(fd, ctx);
+	gem_context_destroy(fd, opts.ctx);
 }
 
 static void preservation(int fd,
@@ -604,7 +608,7 @@ static void preservation(int fd,
 	gem_quiescent_gpu(fd);
 
 	ctx[num_values] = gem_context_create(fd);
-	spin = igt_spin_batch_new(fd, ctx[num_values], engine, 0);
+	spin = igt_spin_batch_new(fd, .ctx = ctx[num_values], .engine = engine);
 	regs[num_values][0] = read_regs(fd, ctx[num_values], e, flags);
 	for (int v = 0; v < num_values; v++) {
 		ctx[v] = gem_context_create(fd);
@@ -644,7 +648,7 @@ static void preservation(int fd,
 		break;
 	}
 
-	spin = igt_spin_batch_new(fd, ctx[num_values], engine, 0);
+	spin = igt_spin_batch_new(fd, .ctx = ctx[num_values], .engine = engine);
 	for (int v = 0; v < num_values; v++)
 		regs[v][1] = read_regs(fd, ctx[v], e, flags);
 	regs[num_values][1] = read_regs(fd, ctx[num_values], e, flags);
diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 5faf7502b..0ec1aaec9 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -157,10 +157,15 @@ static int __gem_wait(int fd, uint32_t handle, int64_t timeout)
 
 static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
 {
-	if (gem_can_store_dword(fd, flags))
-		return __igt_spin_batch_new_poll(fd, ctx, flags);
-	else
-		return __igt_spin_batch_new(fd, ctx, flags, 0);
+	struct igt_spin_factory opts = {
+		.ctx = ctx,
+		.engine = flags,
+	};
+
+	if (gem_can_store_dword(fd, opts.engine))
+		opts.flags |= IGT_SPIN_POLL_RUN;
+
+	return __igt_spin_batch_factory(fd, &opts);
 }
 
 static void __spin_wait(int fd, igt_spin_t *spin)
diff --git a/tests/gem_exec_fence.c b/tests/gem_exec_fence.c
index eb93308d1..ba46595d3 100644
--- a/tests/gem_exec_fence.c
+++ b/tests/gem_exec_fence.c
@@ -468,7 +468,7 @@ static void test_parallel(int fd, unsigned int master)
 	/* Fill the queue with many requests so that the next one has to
 	 * wait before it can be executed by the hardware.
 	 */
-	spin = igt_spin_batch_new(fd, 0, master, plug);
+	spin = igt_spin_batch_new(fd, .engine = master, .dependency = plug);
 	resubmit(fd, spin->handle, master, 16);
 
 	/* Now queue the master request and its secondaries */
@@ -651,7 +651,7 @@ static void test_keep_in_fence(int fd, unsigned int engine, unsigned int flags)
 	igt_spin_t *spin;
 	int fence;
 
-	spin = igt_spin_batch_new(fd, 0, engine, 0);
+	spin = igt_spin_batch_new(fd, .engine = engine);
 
 	gem_execbuf_wr(fd, &execbuf);
 	fence = upper_32_bits(execbuf.rsvd2);
@@ -1070,7 +1070,7 @@ static void test_syncobj_unused_fence(int fd)
 	struct local_gem_exec_fence fence = {
 		.handle = syncobj_create(fd),
 	};
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 
 	/* sanity check our syncobj_to_sync_file interface */
 	igt_assert_eq(__syncobj_to_sync_file(fd, 0), -ENOENT);
@@ -1162,7 +1162,7 @@ static void test_syncobj_signal(int fd)
 	struct local_gem_exec_fence fence = {
 		.handle = syncobj_create(fd),
 	};
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 
 	/* Check that the syncobj is signaled only when our request/fence is */
 
@@ -1212,7 +1212,7 @@ static void test_syncobj_wait(int fd)
 
 	gem_quiescent_gpu(fd);
 
-	spin = igt_spin_batch_new(fd, 0, 0, 0);
+	spin = igt_spin_batch_new(fd);
 
 	memset(&execbuf, 0, sizeof(execbuf));
 	execbuf.buffers_ptr = to_user_pointer(&obj);
@@ -1282,7 +1282,7 @@ static void test_syncobj_export(int fd)
 		.handle = syncobj_create(fd),
 	};
 	int export[2];
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 
 	/* Check that if we export the syncobj prior to use it picks up
 	 * the later fence. This allows a syncobj to establish a channel
@@ -1340,7 +1340,7 @@ static void test_syncobj_repeat(int fd)
 	struct drm_i915_gem_execbuffer2 execbuf;
 	struct local_gem_exec_fence *fence;
 	int export;
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 
 	/* Check that we can wait on the same fence multiple times */
 	fence = calloc(nfences, sizeof(*fence));
@@ -1395,7 +1395,7 @@ static void test_syncobj_import(int fd)
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
 	struct drm_i915_gem_exec_object2 obj;
 	struct drm_i915_gem_execbuffer2 execbuf;
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, 0, 0);
+	igt_spin_t *spin = igt_spin_batch_new(fd);
 	uint32_t sync = syncobj_create(fd);
 	int fence;
 
diff --git a/tests/gem_exec_latency.c b/tests/gem_exec_latency.c
index ea2e4c681..75811f325 100644
--- a/tests/gem_exec_latency.c
+++ b/tests/gem_exec_latency.c
@@ -63,6 +63,10 @@ static unsigned int ring_size;
 static void
 poll_ring(int fd, unsigned ring, const char *name)
 {
+	const struct igt_spin_factory opts = {
+		.engine = ring,
+		.flags = IGT_SPIN_POLL_RUN,
+	};
 	struct timespec tv = {};
 	unsigned long cycles;
 	igt_spin_t *spin[2];
@@ -72,11 +76,11 @@ poll_ring(int fd, unsigned ring, const char *name)
 	gem_require_ring(fd, ring);
 	igt_require(gem_can_store_dword(fd, ring));
 
-	spin[0] = __igt_spin_batch_new_poll(fd, 0, ring);
+	spin[0] = __igt_spin_batch_factory(fd, &opts);
 	igt_assert(spin[0]->running);
 	cmd = *spin[0]->batch;
 
-	spin[1] = __igt_spin_batch_new_poll(fd, 0, ring);
+	spin[1] = __igt_spin_batch_factory(fd, &opts);
 	igt_assert(spin[1]->running);
 	igt_assert(cmd == *spin[1]->batch);
 
@@ -312,7 +316,9 @@ static void latency_from_ring(int fd,
 			       I915_GEM_DOMAIN_GTT);
 
 		if (flags & PREEMPT)
-			spin = __igt_spin_batch_new(fd, ctx[0], ring, 0);
+			spin = __igt_spin_batch_new(fd,
+						    .ctx = ctx[0],
+						    .engine = ring);
 
 		if (flags & CORK) {
 			obj[0].handle = igt_cork_plug(&c, fd);
@@ -456,6 +462,10 @@ rthog_latency_on_ring(int fd, unsigned int engine, const char *name, unsigned in
 	};
 #define NPASS ARRAY_SIZE(passname)
 #define MMAP_SZ (64 << 10)
+	const struct igt_spin_factory opts = {
+		.engine = engine,
+		.flags = IGT_SPIN_POLL_RUN,
+	};
 	struct rt_pkt *results;
 	unsigned int engines[16];
 	const char *names[16];
@@ -513,7 +523,7 @@ rthog_latency_on_ring(int fd, unsigned int engine, const char *name, unsigned in
 
 			usleep(250);
 
-			spin = __igt_spin_batch_new_poll(fd, 0, engine);
+			spin = __igt_spin_batch_factory(fd, &opts);
 			if (!spin) {
 				igt_warn("Failed to create spinner! (%s)\n",
 					 passname[pass]);
diff --git a/tests/gem_exec_nop.c b/tests/gem_exec_nop.c
index 0523b1c02..74d27522d 100644
--- a/tests/gem_exec_nop.c
+++ b/tests/gem_exec_nop.c
@@ -709,7 +709,9 @@ static void preempt(int fd, uint32_t handle,
 	clock_gettime(CLOCK_MONOTONIC, &start);
 	do {
 		igt_spin_t *spin =
-			__igt_spin_batch_new(fd, ctx[0], ring_id, 0);
+			__igt_spin_batch_new(fd,
+					     .ctx = ctx[0],
+					     .engine = ring_id);
 
 		for (int loop = 0; loop < 1024; loop++)
 			gem_execbuf(fd, &execbuf);
diff --git a/tests/gem_exec_reloc.c b/tests/gem_exec_reloc.c
index 91c6691af..837f60a6c 100644
--- a/tests/gem_exec_reloc.c
+++ b/tests/gem_exec_reloc.c
@@ -388,7 +388,9 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
 		}
 
 		if (flags & ACTIVE) {
-			spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle);
+			spin = igt_spin_batch_new(fd,
+						  .engine = I915_EXEC_DEFAULT,
+						  .dependency = obj.handle);
 			if (!(flags & HANG))
 				igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
 			igt_assert(gem_bo_busy(fd, obj.handle));
@@ -454,7 +456,9 @@ static void basic_reloc(int fd, unsigned before, unsigned after, unsigned flags)
 		}
 
 		if (flags & ACTIVE) {
-			spin = igt_spin_batch_new(fd, 0, I915_EXEC_DEFAULT, obj.handle);
+			spin = igt_spin_batch_new(fd,
+						  .engine = I915_EXEC_DEFAULT,
+						  .dependency = obj.handle);
 			if (!(flags & HANG))
 				igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
 			igt_assert(gem_bo_busy(fd, obj.handle));
@@ -581,7 +585,7 @@ static void basic_range(int fd, unsigned flags)
 	execbuf.buffer_count = n + 1;
 
 	if (flags & ACTIVE) {
-		spin = igt_spin_batch_new(fd, 0, 0, obj[n].handle);
+		spin = igt_spin_batch_new(fd, .dependency = obj[n].handle);
 		if (!(flags & HANG))
 			igt_spin_batch_set_timeout(spin, NSEC_PER_SEC/100);
 		igt_assert(gem_bo_busy(fd, obj[n].handle));
diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index 1f43147f7..35a44ab10 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -132,9 +132,12 @@ static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine)
 	igt_spin_t *spin[MAX_ELSP_QLEN];
 
 	for (int n = 0; n < ARRAY_SIZE(spin); n++) {
-		uint32_t ctx = create_highest_priority(fd);
-		spin[n] = __igt_spin_batch_new(fd, ctx, engine, 0);
-		gem_context_destroy(fd, ctx);
+		const struct igt_spin_factory opts = {
+			.ctx = create_highest_priority(fd),
+			.engine = engine,
+		};
+		spin[n] = __igt_spin_batch_factory(fd, &opts);
+		gem_context_destroy(fd, opts.ctx);
 	}
 
 	igt_cork_unplug(c); /* batches will now be queued on the engine */
@@ -196,7 +199,7 @@ static void independent(int fd, unsigned int engine)
 			continue;
 
 		if (spin == NULL) {
-			spin = __igt_spin_batch_new(fd, 0, other, 0);
+			spin = __igt_spin_batch_new(fd, .engine = other);
 		} else {
 			struct drm_i915_gem_exec_object2 obj = {
 				.handle = spin->handle,
@@ -428,7 +431,9 @@ static void preempt(int fd, unsigned ring, unsigned flags)
 			ctx[LO] = gem_context_create(fd);
 			gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
 		}
-		spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
+		spin[n] = __igt_spin_batch_new(fd,
+					       .ctx = ctx[LO],
+					       .engine = ring);
 		igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
 
 		store_dword(fd, ctx[HI], ring, result, 0, n + 1, 0, I915_GEM_DOMAIN_RENDER);
@@ -462,7 +467,9 @@ static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
 
 	for_each_physical_engine(fd, other) {
 		if (spin == NULL) {
-			spin = __igt_spin_batch_new(fd, ctx, other, 0);
+			spin = __igt_spin_batch_new(fd,
+						    .ctx = ctx,
+						    .engine = other);
 		} else {
 			struct drm_i915_gem_exec_object2 obj = {
 				.handle = spin->handle,
@@ -672,7 +679,9 @@ static void preempt_self(int fd, unsigned ring)
 	n = 0;
 	gem_context_set_priority(fd, ctx[HI], MIN_PRIO);
 	for_each_physical_engine(fd, other) {
-		spin[n] = __igt_spin_batch_new(fd, ctx[NOISE], other, 0);
+		spin[n] = __igt_spin_batch_new(fd,
+					       .ctx = ctx[NOISE],
+					       .engine = other);
 		store_dword(fd, ctx[HI], other,
 			    result, (n + 1)*sizeof(uint32_t), n + 1,
 			    0, I915_GEM_DOMAIN_RENDER);
@@ -714,7 +723,9 @@ static void preemptive_hang(int fd, unsigned ring)
 		ctx[LO] = gem_context_create(fd);
 		gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
 
-		spin[n] = __igt_spin_batch_new(fd, ctx[LO], ring, 0);
+		spin[n] = __igt_spin_batch_new(fd,
+					       .ctx = ctx[LO],
+					       .engine = ring);
 
 		gem_context_destroy(fd, ctx[LO]);
 	}
diff --git a/tests/gem_exec_suspend.c b/tests/gem_exec_suspend.c
index db2bca262..43c52d105 100644
--- a/tests/gem_exec_suspend.c
+++ b/tests/gem_exec_suspend.c
@@ -189,7 +189,7 @@ static void run_test(int fd, unsigned engine, unsigned flags)
 	}
 
 	if (flags & HANG)
-		spin = igt_spin_batch_new(fd, 0, engine, 0);
+		spin = igt_spin_batch_new(fd, .engine = engine);
 
 	switch (mode(flags)) {
 	case NOSLEEP:
diff --git a/tests/gem_fenced_exec_thrash.c b/tests/gem_fenced_exec_thrash.c
index 385790ada..7248d310d 100644
--- a/tests/gem_fenced_exec_thrash.c
+++ b/tests/gem_fenced_exec_thrash.c
@@ -132,7 +132,7 @@ static void run_test(int fd, int num_fences, int expected_errno,
 			igt_spin_t *spin = NULL;
 
 			if (flags & BUSY_LOAD)
-				spin = __igt_spin_batch_new(fd, 0, 0, 0);
+				spin = __igt_spin_batch_new(fd);
 
 			igt_while_interruptible(flags & INTERRUPTIBLE) {
 				igt_assert_eq(__gem_execbuf(fd, &execbuf[i]),
diff --git a/tests/gem_shrink.c b/tests/gem_shrink.c
index 3d33453aa..929e0426a 100644
--- a/tests/gem_shrink.c
+++ b/tests/gem_shrink.c
@@ -346,9 +346,9 @@ static void reclaim(unsigned engine, int timeout)
 		} while (!*shared);
 	}
 
-	spin = igt_spin_batch_new(fd, 0, engine, 0);
+	spin = igt_spin_batch_new(fd, .engine = engine);
 	igt_until_timeout(timeout) {
-		igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0);
+		igt_spin_t *next = __igt_spin_batch_new(fd, .engine = engine);
 
 		igt_spin_batch_set_timeout(spin, timeout_100ms);
 		gem_sync(fd, spin->handle);
diff --git a/tests/gem_spin_batch.c b/tests/gem_spin_batch.c
index cffeb6d71..52410010b 100644
--- a/tests/gem_spin_batch.c
+++ b/tests/gem_spin_batch.c
@@ -41,9 +41,9 @@ static void spin(int fd, unsigned int engine, unsigned int timeout_sec)
 	struct timespec itv = { };
 	uint64_t elapsed;
 
-	spin = __igt_spin_batch_new(fd, 0, engine, 0);
+	spin = __igt_spin_batch_new(fd, .engine = engine);
 	while ((elapsed = igt_nsec_elapsed(&tv)) >> 30 < timeout_sec) {
-		igt_spin_t *next = __igt_spin_batch_new(fd, 0, engine, 0);
+		igt_spin_t *next = __igt_spin_batch_new(fd, .engine = engine);
 
 		igt_spin_batch_set_timeout(spin,
 					   timeout_100ms - igt_nsec_elapsed(&itv));
diff --git a/tests/gem_sync.c b/tests/gem_sync.c
index 1e2e089a1..2fcb9aa01 100644
--- a/tests/gem_sync.c
+++ b/tests/gem_sync.c
@@ -715,9 +715,8 @@ preempt(int fd, unsigned ring, int num_children, int timeout)
 		do {
 			igt_spin_t *spin =
 				__igt_spin_batch_new(fd,
-						     ctx[0],
-						     execbuf.flags,
-						     0);
+						     .ctx = ctx[0],
+						     .engine = execbuf.flags);
 
 			do {
 				gem_execbuf(fd, &execbuf);
diff --git a/tests/gem_wait.c b/tests/gem_wait.c
index 61d8a4059..7914c9365 100644
--- a/tests/gem_wait.c
+++ b/tests/gem_wait.c
@@ -74,7 +74,9 @@ static void basic(int fd, unsigned engine, unsigned flags)
 	IGT_CORK_HANDLE(cork);
 	uint32_t plug =
 		flags & (WRITE | AWAIT) ? igt_cork_plug(&cork, fd) : 0;
-	igt_spin_t *spin = igt_spin_batch_new(fd, 0, engine, plug);
+	igt_spin_t *spin = igt_spin_batch_new(fd,
+					      .engine = engine,
+					      .dependency = plug);
 	struct drm_i915_gem_wait wait = {
 		flags & WRITE ? plug : spin->handle
 	};
diff --git a/tests/kms_busy.c b/tests/kms_busy.c
index 4a4e0e156..abf39828b 100644
--- a/tests/kms_busy.c
+++ b/tests/kms_busy.c
@@ -84,7 +84,8 @@ static void flip_to_fb(igt_display_t *dpy, int pipe,
 	struct drm_event_vblank ev;
 
 	igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
-					   0, ring, fb->gem_handle);
+					   .engine = ring,
+					   .dependency = fb->gem_handle);
 
 	if (modeset) {
 		/*
@@ -200,7 +201,8 @@ static void test_atomic_commit_hang(igt_display_t *dpy, igt_plane_t *primary,
 				    struct igt_fb *busy_fb, unsigned ring)
 {
 	igt_spin_t *t = igt_spin_batch_new(dpy->drm_fd,
-					   0, ring, busy_fb->gem_handle);
+					   .engine = ring,
+					   .dependency = busy_fb->gem_handle);
 	struct pollfd pfd = { .fd = dpy->drm_fd, .events = POLLIN };
 	unsigned flags = 0;
 	struct drm_event_vblank ev;
@@ -287,7 +289,9 @@ static void test_pageflip_modeset_hang(igt_display_t *dpy,
 
 	igt_display_commit2(dpy, dpy->is_atomic ? COMMIT_ATOMIC : COMMIT_LEGACY);
 
-	t = igt_spin_batch_new(dpy->drm_fd, 0, ring, fb.gem_handle);
+	t = igt_spin_batch_new(dpy->drm_fd,
+			       .engine = ring,
+			       .dependency = fb.gem_handle);
 
 	do_or_die(drmModePageFlip(dpy->drm_fd, dpy->pipes[pipe].crtc_id, fb.fb_id, DRM_MODE_PAGE_FLIP_EVENT, &fb));
 
diff --git a/tests/kms_cursor_legacy.c b/tests/kms_cursor_legacy.c
index d0a28b3c4..85340d43e 100644
--- a/tests/kms_cursor_legacy.c
+++ b/tests/kms_cursor_legacy.c
@@ -532,7 +532,8 @@ static void basic_flip_cursor(igt_display_t *display,
 
 		spin = NULL;
 		if (flags & BASIC_BUSY)
-			spin = igt_spin_batch_new(display->drm_fd, 0, 0, fb_info.gem_handle);
+			spin = igt_spin_batch_new(display->drm_fd,
+						  .dependency = fb_info.gem_handle);
 
 		/* Start with a synchronous query to align with the vblank */
 		vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS);
@@ -1323,8 +1324,8 @@ static void flip_vs_cursor_busy_crc(igt_display_t *display, bool atomic)
 	for (int i = 1; i >= 0; i--) {
 		igt_spin_t *spin;
 
-		spin = igt_spin_batch_new(display->drm_fd, 0, 0,
-					  fb_info[1].gem_handle);
+		spin = igt_spin_batch_new(display->drm_fd,
+					  .dependency = fb_info[1].gem_handle);
 
 		vblank_start = get_vblank(display->drm_fd, pipe, DRM_VBLANK_NEXTONMISS);
 
diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index 4570f926d..a1d36ac4f 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -172,10 +172,15 @@ static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
 
 static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
 {
+	struct igt_spin_factory opts = {
+		.ctx = ctx,
+		.engine = flags,
+	};
+
 	if (gem_can_store_dword(fd, flags))
-		return __igt_spin_batch_new_poll(fd, ctx, flags);
-	else
-		return __igt_spin_batch_new(fd, ctx, flags, 0);
+		opts.flags |= IGT_SPIN_POLL_RUN;
+
+	return __igt_spin_batch_factory(fd, &opts);
 }
 
 static unsigned long __spin_wait(int fd, igt_spin_t *spin)
@@ -356,7 +361,9 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	spin[0] = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
 	usleep(500e3);
-	spin[1] = __igt_spin_batch_new(gem_fd, ctx, e2ring(gem_fd, e), 0);
+	spin[1] = __igt_spin_batch_new(gem_fd,
+				       .ctx = ctx,
+				       .engine = e2ring(gem_fd, e));
 
 	/*
 	 * Open PMU as fast as possible after the second spin batch in attempt
@@ -1045,8 +1052,8 @@ static void cpu_hotplug(int gem_fd)
 	 * Create two spinners so test can ensure shorter gaps in engine
 	 * busyness as it is terminating one and re-starting the other.
 	 */
-	spin[0] = igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
-	spin[1] = __igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER, 0);
+	spin[0] = igt_spin_batch_new(gem_fd, .engine = I915_EXEC_RENDER);
+	spin[1] = __igt_spin_batch_new(gem_fd, .engine = I915_EXEC_RENDER);
 
 	val = __pmu_read_single(fd, &ts[0]);
 
@@ -1129,8 +1136,8 @@ static void cpu_hotplug(int gem_fd)
 			break;
 
 		igt_spin_batch_free(gem_fd, spin[cur]);
-		spin[cur] = __igt_spin_batch_new(gem_fd, 0, I915_EXEC_RENDER,
-						 0);
+		spin[cur] = __igt_spin_batch_new(gem_fd,
+						 .engine = I915_EXEC_RENDER);
 		cur ^= 1;
 	}
 
@@ -1167,8 +1174,9 @@ test_interrupts(int gem_fd)
 
 	/* Queue spinning batches. */
 	for (int i = 0; i < target; i++) {
-		spin[i] = __igt_spin_batch_new_fence(gem_fd,
-						     0, I915_EXEC_RENDER);
+		spin[i] = __igt_spin_batch_new(gem_fd,
+					       .engine = I915_EXEC_RENDER,
+					       .flags = IGT_SPIN_FENCE_OUT);
 		if (i == 0) {
 			fence_fd = spin[i]->out_fence;
 		} else {
@@ -1229,7 +1237,8 @@ test_interrupts_sync(int gem_fd)
 
 	/* Queue spinning batches. */
 	for (int i = 0; i < target; i++)
-		spin[i] = __igt_spin_batch_new_fence(gem_fd, 0, 0);
+		spin[i] = __igt_spin_batch_new(gem_fd,
+					       .flags = IGT_SPIN_FENCE_OUT);
 
 	/* Wait for idle state. */
 	idle = pmu_read_single(fd);
@@ -1550,7 +1559,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 		igt_spin_t *spin;
 
 		/* Allocate our spin batch and idle it. */
-		spin = igt_spin_batch_new(gem_fd, 0, e2ring(gem_fd, e), 0);
+		spin = igt_spin_batch_new(gem_fd, .engine = e2ring(gem_fd, e));
 		igt_spin_batch_end(spin);
 		gem_sync(gem_fd, spin->handle);
 
diff --git a/tests/pm_rps.c b/tests/pm_rps.c
index 006d084b8..202132b1c 100644
--- a/tests/pm_rps.c
+++ b/tests/pm_rps.c
@@ -235,9 +235,9 @@ static void load_helper_run(enum load load)
 
 		igt_debug("Applying %s load...\n", lh.load ? "high" : "low");
 
-		spin[0] = __igt_spin_batch_new(drm_fd, 0, 0, 0);
+		spin[0] = __igt_spin_batch_new(drm_fd);
 		if (lh.load == HIGH)
-			spin[1] = __igt_spin_batch_new(drm_fd, 0, 0, 0);
+			spin[1] = __igt_spin_batch_new(drm_fd);
 		while (!lh.exit) {
 			handle = spin[0]->handle;
 			igt_spin_batch_end(spin[0]);
@@ -248,8 +248,7 @@ static void load_helper_run(enum load load)
 			usleep(100);
 
 			spin[0] = spin[1];
-			spin[lh.load == HIGH] =
-				__igt_spin_batch_new(drm_fd, 0, 0, 0);
+			spin[lh.load == HIGH] = __igt_spin_batch_new(drm_fd);
 		}
 
 		handle = spin[0]->handle;
@@ -510,7 +509,7 @@ static void boost_freq(int fd, int *boost_freqs)
 	int64_t timeout = 1;
 	igt_spin_t *load;
 
-	load = igt_spin_batch_new(fd, 0, 0, 0);
+	load = igt_spin_batch_new(fd);
 	resubmit_batch(fd, load->handle, 16);
 
 	/* Waiting will grant us a boost to maximum */
-- 
2.18.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH i-g-t 2/2] lib: Spin fast, retire early
  2018-06-25 13:13 ` [Intel-gfx] " Chris Wilson
@ 2018-06-25 13:13   ` Chris Wilson
  -1 siblings, 0 replies; 6+ messages in thread
From: Chris Wilson @ 2018-06-25 13:13 UTC (permalink / raw)
  To: intel-gfx; +Cc: igt-dev

When using the pollable spinner, we often want to use it as a means of
ensuring the task is running on the GPU before switching to something
else. In which case we don't want to add extra delay inside the spinner,
but the current 1000 NOPs add on order of 5us, which is often larger
than the target latency.

v2: Don't change perf_pmu as that is sensitive to the extra CPU latency
from a tight GPU spinner.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Antonio Argenziano <antonio.argenziano@intel.com> #v1
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v1
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 lib/igt_dummyload.c       | 3 ++-
 lib/igt_dummyload.h       | 1 +
 tests/gem_ctx_isolation.c | 1 +
 tests/gem_eio.c           | 1 +
 tests/gem_exec_latency.c  | 4 ++--
 5 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 94efdf745..7beb66244 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -199,7 +199,8 @@ emit_recursive_batch(igt_spin_t *spin,
 	 * between function calls, that appears enough to keep SNB out of
 	 * trouble. See https://bugs.freedesktop.org/show_bug.cgi?id=102262
 	 */
-	batch += 1000;
+	if (!(opts->flags & IGT_SPIN_FAST))
+		batch += 1000;
 
 	/* recurse */
 	r = &relocs[obj[BATCH].relocation_count++];
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index c794f2544..e80a12451 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -52,6 +52,7 @@ struct igt_spin_factory {
 
 #define IGT_SPIN_FENCE_OUT (1 << 0)
 #define IGT_SPIN_POLL_RUN  (1 << 1)
+#define IGT_SPIN_FAST      (1 << 2)
 
 igt_spin_t *
 __igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts);
diff --git a/tests/gem_ctx_isolation.c b/tests/gem_ctx_isolation.c
index 2e19e8c03..4325e1c28 100644
--- a/tests/gem_ctx_isolation.c
+++ b/tests/gem_ctx_isolation.c
@@ -560,6 +560,7 @@ static void inject_reset_context(int fd, unsigned int engine)
 	struct igt_spin_factory opts = {
 		.ctx = gem_context_create(fd),
 		.engine = engine,
+		.flags = IGT_SPIN_FAST,
 	};
 	igt_spin_t *spin;
 
diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 0ec1aaec9..3162a3170 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -160,6 +160,7 @@ static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
 	struct igt_spin_factory opts = {
 		.ctx = ctx,
 		.engine = flags,
+		.flags = IGT_SPIN_FAST,
 	};
 
 	if (gem_can_store_dword(fd, opts.engine))
diff --git a/tests/gem_exec_latency.c b/tests/gem_exec_latency.c
index 75811f325..de16322a6 100644
--- a/tests/gem_exec_latency.c
+++ b/tests/gem_exec_latency.c
@@ -65,7 +65,7 @@ poll_ring(int fd, unsigned ring, const char *name)
 {
 	const struct igt_spin_factory opts = {
 		.engine = ring,
-		.flags = IGT_SPIN_POLL_RUN,
+		.flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FAST,
 	};
 	struct timespec tv = {};
 	unsigned long cycles;
@@ -464,7 +464,7 @@ rthog_latency_on_ring(int fd, unsigned int engine, const char *name, unsigned in
 #define MMAP_SZ (64 << 10)
 	const struct igt_spin_factory opts = {
 		.engine = engine,
-		.flags = IGT_SPIN_POLL_RUN,
+		.flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FAST,
 	};
 	struct rt_pkt *results;
 	unsigned int engines[16];
-- 
2.18.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [igt-dev] [PATCH i-g-t 2/2] lib: Spin fast, retire early
@ 2018-06-25 13:13   ` Chris Wilson
  0 siblings, 0 replies; 6+ messages in thread
From: Chris Wilson @ 2018-06-25 13:13 UTC (permalink / raw)
  To: intel-gfx; +Cc: igt-dev, Tvrtko Ursulin

When using the pollable spinner, we often want to use it as a means of
ensuring the task is running on the GPU before switching to something
else. In which case we don't want to add extra delay inside the spinner,
but the current 1000 NOPs add on order of 5us, which is often larger
than the target latency.

v2: Don't change perf_pmu as that is sensitive to the extra CPU latency
from a tight GPU spinner.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Antonio Argenziano <antonio.argenziano@intel.com> #v1
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v1
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 lib/igt_dummyload.c       | 3 ++-
 lib/igt_dummyload.h       | 1 +
 tests/gem_ctx_isolation.c | 1 +
 tests/gem_eio.c           | 1 +
 tests/gem_exec_latency.c  | 4 ++--
 5 files changed, 7 insertions(+), 3 deletions(-)

diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 94efdf745..7beb66244 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -199,7 +199,8 @@ emit_recursive_batch(igt_spin_t *spin,
 	 * between function calls, that appears enough to keep SNB out of
 	 * trouble. See https://bugs.freedesktop.org/show_bug.cgi?id=102262
 	 */
-	batch += 1000;
+	if (!(opts->flags & IGT_SPIN_FAST))
+		batch += 1000;
 
 	/* recurse */
 	r = &relocs[obj[BATCH].relocation_count++];
diff --git a/lib/igt_dummyload.h b/lib/igt_dummyload.h
index c794f2544..e80a12451 100644
--- a/lib/igt_dummyload.h
+++ b/lib/igt_dummyload.h
@@ -52,6 +52,7 @@ struct igt_spin_factory {
 
 #define IGT_SPIN_FENCE_OUT (1 << 0)
 #define IGT_SPIN_POLL_RUN  (1 << 1)
+#define IGT_SPIN_FAST      (1 << 2)
 
 igt_spin_t *
 __igt_spin_batch_factory(int fd, const struct igt_spin_factory *opts);
diff --git a/tests/gem_ctx_isolation.c b/tests/gem_ctx_isolation.c
index 2e19e8c03..4325e1c28 100644
--- a/tests/gem_ctx_isolation.c
+++ b/tests/gem_ctx_isolation.c
@@ -560,6 +560,7 @@ static void inject_reset_context(int fd, unsigned int engine)
 	struct igt_spin_factory opts = {
 		.ctx = gem_context_create(fd),
 		.engine = engine,
+		.flags = IGT_SPIN_FAST,
 	};
 	igt_spin_t *spin;
 
diff --git a/tests/gem_eio.c b/tests/gem_eio.c
index 0ec1aaec9..3162a3170 100644
--- a/tests/gem_eio.c
+++ b/tests/gem_eio.c
@@ -160,6 +160,7 @@ static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
 	struct igt_spin_factory opts = {
 		.ctx = ctx,
 		.engine = flags,
+		.flags = IGT_SPIN_FAST,
 	};
 
 	if (gem_can_store_dword(fd, opts.engine))
diff --git a/tests/gem_exec_latency.c b/tests/gem_exec_latency.c
index 75811f325..de16322a6 100644
--- a/tests/gem_exec_latency.c
+++ b/tests/gem_exec_latency.c
@@ -65,7 +65,7 @@ poll_ring(int fd, unsigned ring, const char *name)
 {
 	const struct igt_spin_factory opts = {
 		.engine = ring,
-		.flags = IGT_SPIN_POLL_RUN,
+		.flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FAST,
 	};
 	struct timespec tv = {};
 	unsigned long cycles;
@@ -464,7 +464,7 @@ rthog_latency_on_ring(int fd, unsigned int engine, const char *name, unsigned in
 #define MMAP_SZ (64 << 10)
 	const struct igt_spin_factory opts = {
 		.engine = engine,
-		.flags = IGT_SPIN_POLL_RUN,
+		.flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FAST,
 	};
 	struct rt_pkt *results;
 	unsigned int engines[16];
-- 
2.18.0

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/2] lib: Convert spin batch constructor to a factory
  2018-06-25 13:13 ` [Intel-gfx] " Chris Wilson
  (?)
  (?)
@ 2018-06-25 13:46 ` Patchwork
  -1 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2018-06-25 13:46 UTC (permalink / raw)
  To: Chris Wilson; +Cc: igt-dev

== Series Details ==

Series: series starting with [i-g-t,1/2] lib: Convert spin batch constructor to a factory
URL   : https://patchwork.freedesktop.org/series/45337/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_4373 -> IGTPW_1501 =

== Summary - SUCCESS ==

  No regressions found.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/45337/revisions/1/mbox/

== Known issues ==

  Here are the changes found in IGTPW_1501 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@gem_ringfill@basic-default-hang:
      fi-pnv-d510:        NOTRUN -> DMESG-WARN (fdo#101600)

    igt@kms_chamelium@dp-crc-fast:
      fi-kbl-7500u:       PASS -> DMESG-FAIL (fdo#103841)

    
    ==== Possible fixes ====

    igt@gem_exec_gttfill@basic:
      fi-byt-n2820:       FAIL (fdo#106744) -> PASS

    igt@gem_exec_suspend@basic-s4-devices:
      fi-kbl-7500u:       DMESG-WARN (fdo#105128) -> PASS

    
  fdo#101600 https://bugs.freedesktop.org/show_bug.cgi?id=101600
  fdo#103841 https://bugs.freedesktop.org/show_bug.cgi?id=103841
  fdo#105128 https://bugs.freedesktop.org/show_bug.cgi?id=105128
  fdo#106744 https://bugs.freedesktop.org/show_bug.cgi?id=106744


== Participating hosts (43 -> 39) ==

  Additional (1): fi-pnv-d510 
  Missing    (5): fi-ctg-p8600 fi-ilk-m540 fi-byt-squawks fi-bsw-cyan fi-hsw-4200u 


== Build changes ==

    * IGT: IGT_4529 -> IGTPW_1501

  CI_DRM_4373: be7193758db79443ad5dc45072a166746819ba7e @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_1501: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1501/
  IGT_4529: 23d50a49413aff619d00ec50fc2e051e9b45baa5 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1501/issues.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [igt-dev] ✓ Fi.CI.IGT: success for series starting with [i-g-t,1/2] lib: Convert spin batch constructor to a factory
  2018-06-25 13:13 ` [Intel-gfx] " Chris Wilson
                   ` (2 preceding siblings ...)
  (?)
@ 2018-06-25 18:35 ` Patchwork
  -1 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2018-06-25 18:35 UTC (permalink / raw)
  To: Chris Wilson; +Cc: igt-dev

== Series Details ==

Series: series starting with [i-g-t,1/2] lib: Convert spin batch constructor to a factory
URL   : https://patchwork.freedesktop.org/series/45337/
State : success

== Summary ==

= CI Bug Log - changes from IGT_4529_full -> IGTPW_1501_full =

== Summary - WARNING ==

  Minor unknown changes coming with IGTPW_1501_full need to be verified
  manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in IGTPW_1501_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/45337/revisions/1/mbox/

== Possible new issues ==

  Here are the unknown changes that may have been introduced in IGTPW_1501_full:

  === IGT changes ===

    ==== Warnings ====

    igt@gem_exec_schedule@deep-bsd1:
      shard-kbl:          PASS -> SKIP +3

    
== Known issues ==

  Here are the changes found in IGTPW_1501_full that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@drv_selftest@live_gtt:
      shard-glk:          PASS -> FAIL (fdo#105347)

    igt@gem_exec_fence@basic-wait-default:
      shard-snb:          PASS -> INCOMPLETE (fdo#105411)

    igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic:
      shard-hsw:          PASS -> FAIL (fdo#105767)

    igt@kms_flip@2x-flip-vs-expired-vblank:
      shard-hsw:          PASS -> FAIL (fdo#102887)

    igt@kms_flip@2x-plain-flip-fb-recreate-interruptible:
      shard-glk:          PASS -> FAIL (fdo#100368)

    igt@kms_flip_tiling@flip-to-x-tiled:
      shard-glk:          PASS -> FAIL (fdo#104724, fdo#103822) +1

    igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-indfb-draw-blt:
      shard-glk:          PASS -> FAIL (fdo#103167, fdo#104724)

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b:
      shard-kbl:          PASS -> INCOMPLETE (fdo#103665) +1

    igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
      shard-apl:          PASS -> FAIL (fdo#103375)

    igt@perf_pmu@rc6-runtime-pm-long:
      shard-apl:          PASS -> FAIL (fdo#105010)
      shard-glk:          PASS -> FAIL (fdo#105010)

    
    ==== Possible fixes ====

    igt@drv_selftest@live_hangcheck:
      shard-apl:          DMESG-FAIL (fdo#106560, fdo#106947) -> PASS

    igt@gem_exec_params@rs-invalid-on-bsd-ring:
      shard-snb:          INCOMPLETE (fdo#105411) -> SKIP

    igt@kms_cursor_crc@cursor-64x64-onscreen:
      shard-glk:          INCOMPLETE (k.org#198133, fdo#103359) -> PASS

    igt@kms_flip@2x-plain-flip-fb-recreate:
      shard-glk:          FAIL (fdo#100368) -> PASS

    igt@kms_flip@dpms-vs-vblank-race:
      shard-glk:          FAIL (fdo#103060) -> PASS

    igt@kms_flip@flip-vs-expired-vblank-interruptible:
      shard-glk:          FAIL (fdo#105363) -> PASS

    igt@kms_flip_tiling@flip-to-y-tiled:
      shard-glk:          FAIL (fdo#104724, fdo#103822) -> PASS

    igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-render:
      shard-snb:          FAIL (fdo#103167, fdo#104724) -> PASS

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-c:
      shard-glk:          DMESG-WARN (fdo#106247) -> PASS

    igt@kms_rotation_crc@primary-rotation-180:
      shard-hsw:          FAIL (fdo#104724, fdo#103925) -> PASS

    igt@kms_rotation_crc@sprite-rotation-180:
      shard-snb:          FAIL (fdo#104724, fdo#103925) -> PASS

    
  fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
  fdo#102887 https://bugs.freedesktop.org/show_bug.cgi?id=102887
  fdo#103060 https://bugs.freedesktop.org/show_bug.cgi?id=103060
  fdo#103167 https://bugs.freedesktop.org/show_bug.cgi?id=103167
  fdo#103359 https://bugs.freedesktop.org/show_bug.cgi?id=103359
  fdo#103375 https://bugs.freedesktop.org/show_bug.cgi?id=103375
  fdo#103665 https://bugs.freedesktop.org/show_bug.cgi?id=103665
  fdo#103822 https://bugs.freedesktop.org/show_bug.cgi?id=103822
  fdo#103925 https://bugs.freedesktop.org/show_bug.cgi?id=103925
  fdo#104724 https://bugs.freedesktop.org/show_bug.cgi?id=104724
  fdo#105010 https://bugs.freedesktop.org/show_bug.cgi?id=105010
  fdo#105347 https://bugs.freedesktop.org/show_bug.cgi?id=105347
  fdo#105363 https://bugs.freedesktop.org/show_bug.cgi?id=105363
  fdo#105411 https://bugs.freedesktop.org/show_bug.cgi?id=105411
  fdo#105767 https://bugs.freedesktop.org/show_bug.cgi?id=105767
  fdo#106247 https://bugs.freedesktop.org/show_bug.cgi?id=106247
  fdo#106560 https://bugs.freedesktop.org/show_bug.cgi?id=106560
  fdo#106947 https://bugs.freedesktop.org/show_bug.cgi?id=106947
  k.org#198133 https://bugzilla.kernel.org/show_bug.cgi?id=198133


== Participating hosts (5 -> 5) ==

  No changes in participating hosts


== Build changes ==

    * IGT: IGT_4529 -> IGTPW_1501
    * Linux: CI_DRM_4371 -> CI_DRM_4373

  CI_DRM_4371: 9094e9d97a6e13db8c1a444d08c0988adad9a002 @ git://anongit.freedesktop.org/gfx-ci/linux
  CI_DRM_4373: be7193758db79443ad5dc45072a166746819ba7e @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_1501: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1501/
  IGT_4529: 23d50a49413aff619d00ec50fc2e051e9b45baa5 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1501/shards.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-06-25 18:35 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-06-25 13:13 [PATCH i-g-t 1/2] lib: Convert spin batch constructor to a factory Chris Wilson
2018-06-25 13:13 ` [Intel-gfx] " Chris Wilson
2018-06-25 13:13 ` [PATCH i-g-t 2/2] lib: Spin fast, retire early Chris Wilson
2018-06-25 13:13   ` [igt-dev] " Chris Wilson
2018-06-25 13:46 ` [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/2] lib: Convert spin batch constructor to a factory Patchwork
2018-06-25 18:35 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.