All of lore.kernel.org
 help / color / mirror / Atom feed
From: Antonio Argenziano <antonio.argenziano@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [RFC i-g-t] igt/gem_ringfill: Adds full ringbuffer preemption test
Date: Tue, 15 Aug 2017 14:44:21 -0700	[thread overview]
Message-ID: <20170815214421.2136-1-antonio.argenziano@intel.com> (raw)

Sending as RFC to get feedback on what would be the correct behaviour of
the driver and, therefore, if the test is valid.

We do a wait while holding the mutex if we are adding a request and figure
out that there is no more space in the ring buffer.
Is that considered a bug? In the current driver all goes well
because even if you are waiting on a hanging request eventually
hangcheck will come in a kill the request and since the new request
would have waited there anyway it is not a big deal. But, when
preemption is going to be added it will cause a high priority
(preemptive) request to wait for a long time before actually preempting.

The testcase added here, stimulates this scenario where a high priority
request is sent while another process keeps submitting requests and
filling its ringbuffer.

Cc: Chris Wilson <chris@chris-wilson.co.uk>

Signed-off-by: Antonio Argenziano <antonio.argenziano@intel.com>
---
 tests/gem_ringfill.c | 140 +++++++++++++++++++++++++++++++++++++++++++++++----
 1 file changed, 129 insertions(+), 11 deletions(-)

diff --git a/tests/gem_ringfill.c b/tests/gem_ringfill.c
index b52996a4..6ca8352e 100644
--- a/tests/gem_ringfill.c
+++ b/tests/gem_ringfill.c
@@ -47,8 +47,31 @@
 #define HIBERNATE 0x40
 #define NEWFD 0x80
 
+#define MAX_PRIO 1023
+#define LOCAL_CONTEXT_PARAM_PRIORITY 0x7
+
+IGT_TEST_DESCRIPTION("Check that we can manage the ringbuffer when full");
+
 static unsigned int ring_size;
 
+static int __ctx_set_priority(int fd, uint32_t ctx, int prio)
+{
+	struct local_i915_gem_context_param param;
+
+	memset(&param, 0, sizeof(param));
+	param.context = ctx;
+	param.size = 0;
+	param.param = LOCAL_CONTEXT_PARAM_PRIORITY;
+	param.value = prio;
+
+	return __gem_context_set_param(fd, &param);
+}
+
+static void ctx_set_priority(int fd, uint32_t ctx, int prio)
+{
+	igt_assert_eq(__ctx_set_priority(fd, ctx, prio), 0);
+}
+
 static void check_bo(int fd, uint32_t handle)
 {
 	uint32_t *map;
@@ -324,6 +347,88 @@ static unsigned int measure_ring_size(int fd)
 	return count;
 }
 
+static void exec_noop(int fd, uint32_t ctx, uint32_t engine, uint32_t *handle)
+{
+	uint32_t bbe = MI_BATCH_BUFFER_END;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec;
+
+	gem_require_ring(fd, engine);
+
+	memset(&exec, 0, sizeof(exec));
+
+	*handle = exec.handle = gem_create(fd, 4096);
+
+	gem_write(fd, exec.handle, 0, &bbe, sizeof(bbe));
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = to_user_pointer(&exec);
+	execbuf.buffer_count = 1;
+	execbuf.flags = engine;
+	execbuf.rsvd1 = ctx;
+
+	gem_execbuf(fd, &execbuf);
+}
+
+static void wait_batch(int fd, uint32_t handle)
+{
+	int64_t timeout = 1ull * NSEC_PER_SEC; //1 sec
+
+	if(gem_wait(fd, handle, &timeout) != 0) {
+		//Force reset and fail the test
+		igt_force_gpu_reset(fd);
+		igt_assert_f(0, "[0x%x] batch did not complete!", handle);
+	}
+}
+
+/*
+ * This test checks that is possible for a high priority request to trigger a
+ * preemption if another context has filled its ringbuffer.
+ * The aim of the test is to make sure that high priority requests cannot be
+ * stalled by low priority tasks.
+ * */
+static void preempt_while_ringbuffer_full(int fd, uint32_t engine)
+{
+	uint32_t hp_ctx, lp_ctx;
+	uint32_t hp_batch;
+	igt_spin_t *lp_batch;
+
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_relocation_entry reloc[1024];
+	struct drm_i915_gem_execbuffer2 execbuf;
+	const unsigned timeout = 10;
+
+	lp_ctx = gem_context_create(fd);
+	ctx_set_priority(fd, lp_ctx, -MAX_PRIO);
+
+	hp_ctx = gem_context_create(fd);
+	ctx_set_priority(fd, hp_ctx, MAX_PRIO);
+
+	igt_require(setup_execbuf(fd, &execbuf, obj, reloc, engine) == 0);
+	execbuf.rsvd1 = lp_ctx;
+
+	/*Stall execution and fill ring*/
+	lp_batch = igt_spin_batch_new(fd, lp_ctx, engine, 0);
+	igt_fork(child_no, 1) {
+		fill_ring(fd, &execbuf, 0, timeout);
+	}
+
+	/*We don't know when the ring will be full so keep sending in a loop*/
+	igt_until_timeout(1) {
+		exec_noop(fd, hp_ctx, engine, &hp_batch);
+
+		/*Preemption expected, if HP batch doesn't complete test fails*/
+		wait_batch(fd, hp_batch);
+		igt_assert(gem_bo_busy(fd, lp_batch->handle));
+		gem_close(fd, hp_batch);
+
+		usleep(100);
+	}
+
+	igt_terminate_spin_batches();
+	igt_waitchildren();
+}
+
 igt_main
 {
 	const struct {
@@ -363,21 +468,34 @@ igt_main
 		igt_require(ring_size);
 	}
 
-	for (m = modes; m->suffix; m++) {
-		const struct intel_execution_engine *e;
-
-		for (e = intel_execution_engines; e->name; e++) {
-			igt_subtest_f("%s%s%s",
-				      m->basic && !e->exec_id ? "basic-" : "",
-				      e->name,
-				      m->suffix) {
-				igt_skip_on(m->flags & NEWFD && master);
-				run_test(fd, e->exec_id | e->flags,
-					 m->flags, m->timeout);
+	igt_subtest_group {
+		for (m = modes; m->suffix; m++) {
+			const struct intel_execution_engine *e;
+
+			for (e = intel_execution_engines; e->name; e++) {
+				igt_subtest_f("%s%s%s",
+						m->basic && !e->exec_id ? "basic-" : "",
+						e->name,
+						m->suffix) {
+					igt_skip_on(m->flags & NEWFD && master);
+					run_test(fd, e->exec_id | e->flags,
+							m->flags, m->timeout);
+				}
 			}
 		}
 	}
 
+	igt_subtest_group {
+			for (const struct intel_execution_engine *e
+					= intel_execution_engines; e->name; e++) {
+				igt_subtest_f("preempt-while-full-%s", e->name) {
+					igt_fork_hang_detector(fd);
+					preempt_while_ringbuffer_full(fd, e->exec_id | e->flags);
+					igt_stop_hang_detector();
+				}
+			}
+	}
+
 	igt_fixture
 		close(fd);
 }
-- 
2.14.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

             reply	other threads:[~2017-08-15 21:44 UTC|newest]

Thread overview: 7+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-08-15 21:44 Antonio Argenziano [this message]
2017-08-15 21:47 ` ✗ Fi.CI.BAT: failure for igt/gem_ringfill: Adds full ringbuffer preemption test Patchwork
2017-08-15 22:35 ` [RFC i-g-t] " Chris Wilson
2017-08-16  7:39   ` Chris Wilson
2017-08-17 15:14   ` Antonio Argenziano
2017-08-17 15:38     ` Chris Wilson
2017-08-15 23:32 ` Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170815214421.2136-1-antonio.argenziano@intel.com \
    --to=antonio.argenziano@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.