All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH i-g-t 0/3] IGT fixes for priority management + capture with GuC submission
@ 2021-08-04  1:23 ` Matthew Brost
  0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-04  1:23 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

Signed-off-by: Matthew Brost <matthew.brost@intel.com>

Matthew Brost (2):
  i915/gem_exec_schedule: Make gem_exec_schedule understand static
    priority mapping
  i915/gem_ctx_shared: Make gem_ctx_shared understand static priority
    mapping

Signed-off-by: John Harrison (1):
  i915/gem_exec_capture: Update to support GuC based resets

 lib/i915/gem_scheduler.c       | 14 ++++++++
 lib/i915/gem_scheduler.h       |  1 +
 lib/i915/i915_drm_local.h      | 10 ++++++
 lib/igt_gt.c                   | 44 ++++++++++++++++--------
 lib/igt_gt.h                   |  1 +
 tests/i915/gem_ctx_shared.c    | 12 ++++---
 tests/i915/gem_exec_capture.c  | 52 +++++++++++++++++++++++-----
 tests/i915/gem_exec_schedule.c | 62 +++++++++++++++++++++-------------
 8 files changed, 144 insertions(+), 52 deletions(-)

-- 
2.28.0


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [igt-dev] [PATCH i-g-t 0/3] IGT fixes for priority management + capture with GuC submission
@ 2021-08-04  1:23 ` Matthew Brost
  0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-04  1:23 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

Signed-off-by: Matthew Brost <matthew.brost@intel.com>

Matthew Brost (2):
  i915/gem_exec_schedule: Make gem_exec_schedule understand static
    priority mapping
  i915/gem_ctx_shared: Make gem_ctx_shared understand static priority
    mapping

Signed-off-by: John Harrison (1):
  i915/gem_exec_capture: Update to support GuC based resets

 lib/i915/gem_scheduler.c       | 14 ++++++++
 lib/i915/gem_scheduler.h       |  1 +
 lib/i915/i915_drm_local.h      | 10 ++++++
 lib/igt_gt.c                   | 44 ++++++++++++++++--------
 lib/igt_gt.h                   |  1 +
 tests/i915/gem_ctx_shared.c    | 12 ++++---
 tests/i915/gem_exec_capture.c  | 52 +++++++++++++++++++++++-----
 tests/i915/gem_exec_schedule.c | 62 +++++++++++++++++++++-------------
 8 files changed, 144 insertions(+), 52 deletions(-)

-- 
2.28.0

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [Intel-gfx] [PATCH i-g-t 1/3] i915/gem_exec_schedule: Make gem_exec_schedule understand static priority mapping
  2021-08-04  1:23 ` [igt-dev] " Matthew Brost
@ 2021-08-04  1:23   ` Matthew Brost
  -1 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-04  1:23 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

The i915 currently has 2k visible priority levels which are currently
unique. This is changing to statically map these 2k levels into 3
buckets:

low: < 0
mid: 0
high: > 0

Update gem_exec_schedule to understand this. This entails updating
promotion test to use 3 levels that will map into different buckets and
also add bit of delay after releasing a cork beforing completing the
spinners.

Also skip any tests that rely on having more than 3 priority levels.

v2: Add a delay between starting releasing spinner and cork in
promotion, add local define for static mapping engine info

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 lib/i915/gem_scheduler.c       | 14 ++++++++
 lib/i915/gem_scheduler.h       |  1 +
 lib/i915/i915_drm_local.h      | 10 ++++++
 tests/i915/gem_exec_schedule.c | 62 +++++++++++++++++++++-------------
 4 files changed, 63 insertions(+), 24 deletions(-)

diff --git a/lib/i915/gem_scheduler.c b/lib/i915/gem_scheduler.c
index cdddf42ad..d006b8676 100644
--- a/lib/i915/gem_scheduler.c
+++ b/lib/i915/gem_scheduler.c
@@ -28,6 +28,7 @@
 #include "igt_core.h"
 #include "ioctl_wrappers.h"
 
+#include "i915/i915_drm_local.h"
 #include "i915/gem_scheduler.h"
 #include "i915/gem_submission.h"
 
@@ -90,6 +91,19 @@ bool gem_scheduler_has_ctx_priority(int fd)
 		I915_SCHEDULER_CAP_PRIORITY;
 }
 
+/**
+ * gem_scheduler_has_static_priority:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether the driver supports priority assigned
+ * from user space are statically mapping into 3 buckets.
+ */
+bool gem_scheduler_has_static_priority(int fd)
+{
+	return gem_scheduler_capability(fd) &
+		I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
+}
+
 /**
  * gem_scheduler_has_preemption:
  * @fd: open i915 drm file descriptor
diff --git a/lib/i915/gem_scheduler.h b/lib/i915/gem_scheduler.h
index d43e84bd2..b00804f70 100644
--- a/lib/i915/gem_scheduler.h
+++ b/lib/i915/gem_scheduler.h
@@ -29,6 +29,7 @@
 unsigned gem_scheduler_capability(int fd);
 bool gem_scheduler_enabled(int fd);
 bool gem_scheduler_has_ctx_priority(int fd);
+bool gem_scheduler_has_static_priority(int fd);
 bool gem_scheduler_has_preemption(int fd);
 bool gem_scheduler_has_semaphores(int fd);
 bool gem_scheduler_has_engine_busy_stats(int fd);
diff --git a/lib/i915/i915_drm_local.h b/lib/i915/i915_drm_local.h
index dd646aedf..a1527ff21 100644
--- a/lib/i915/i915_drm_local.h
+++ b/lib/i915/i915_drm_local.h
@@ -20,6 +20,16 @@ extern "C" {
  * clean these up when kernel uapi headers are sync'd.
  */
 
+/*
+ * Indicates the 2k user priority levels are statically mapped into 3 buckets as
+ * follows:
+ *
+ * -1k to -1	Low priority
+ * 0		Normal priority
+ * 1 to 1k	Highest priority
+ */
+#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index e5fb45982..bb9fb6c14 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -199,7 +199,8 @@ create_highest_priority(int fd, const intel_ctx_cfg_t *cfg)
 
 static void unplug_show_queue(int fd, struct igt_cork *c,
 			      const intel_ctx_cfg_t *cfg,
-			      unsigned int engine)
+			      unsigned int engine,
+			      unsigned usec_delay)
 {
 	igt_spin_t *spin[MAX_ELSP_QLEN];
 	int max = MAX_ELSP_QLEN;
@@ -216,6 +217,7 @@ static void unplug_show_queue(int fd, struct igt_cork *c,
 
 	igt_cork_unplug(c); /* batches will now be queued on the engine */
 	igt_debugfs_dump(fd, "i915_engine_info");
+	usleep(usec_delay);
 
 	for (int n = 0; n < max; n++)
 		igt_spin_free(fd, spin[n]);
@@ -237,7 +239,7 @@ static void fifo(int fd, const intel_ctx_t *ctx, unsigned ring)
 	store_dword_fenced(fd, ctx, ring, scratch, 0, 1, fence, 0);
 	store_dword_fenced(fd, ctx, ring, scratch, 0, 2, fence, 0);
 
-	unplug_show_queue(fd, &cork, &ctx->cfg, ring);
+	unplug_show_queue(fd, &cork, &ctx->cfg, ring, 0);
 	close(fence);
 
 	result =  __sync_read_u32(fd, scratch, 0);
@@ -298,7 +300,7 @@ static void implicit_rw(int i915, const intel_ctx_t *ctx, unsigned int ring,
 				   ring, scratch, 0, ring,
 				   fence, I915_GEM_DOMAIN_RENDER);
 
-	unplug_show_queue(i915, &cork, &ctx->cfg, ring);
+	unplug_show_queue(i915, &cork, &ctx->cfg, ring, 0);
 	close(fence);
 
 	result =  __sync_read_u32(i915, scratch, 0);
@@ -355,7 +357,7 @@ static void independent(int fd, const intel_ctx_t *ctx, unsigned int engine,
 	/* Same priority, but different timeline (as different engine) */
 	batch = __store_dword(fd, ctx, engine, scratch, 0, engine, 0, fence, 0);
 
-	unplug_show_queue(fd, &cork, &ctx->cfg, engine);
+	unplug_show_queue(fd, &cork, &ctx->cfg, engine, 0);
 	close(fence);
 
 	gem_sync(fd, batch);
@@ -1326,7 +1328,7 @@ static void reorder(int fd, const intel_ctx_cfg_t *cfg,
 	store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO]->id, fence, 0);
 	store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI]->id, fence, 0);
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 0);
 	close(fence);
 
 	result =  __sync_read_u32(fd, scratch, 0);
@@ -1353,10 +1355,10 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
 	gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
 
 	ctx[HI] = intel_ctx_create(fd, cfg);
-	gem_context_set_priority(fd, ctx[HI]->id, 0);
+	gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
 
 	ctx[NOISE] = intel_ctx_create(fd, cfg);
-	gem_context_set_priority(fd, ctx[NOISE]->id, MIN_PRIO/2);
+	gem_context_set_priority(fd, ctx[NOISE]->id, 0);
 
 	result = gem_create(fd, 4096);
 	dep = gem_create(fd, 4096);
@@ -1377,7 +1379,7 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
 
 	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI]->id, 0);
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 250000);
 	close(fence);
 
 	dep_read = __sync_read_u32(fd, dep, 0);
@@ -1893,7 +1895,7 @@ static void deep(int fd, const intel_ctx_cfg_t *cfg,
 	igt_info("Second deptree: %d requests [%.3fs]\n",
 		 n * XS, 1e-9*igt_nsec_elapsed(&tv));
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 0);
 	gem_close(fd, plug);
 	igt_require(expected); /* too slow */
 
@@ -1962,7 +1964,7 @@ static void wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
 	igt_info("Submitted %d requests over %d contexts in %.1fms\n",
 		 count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 0);
 	close(fence);
 
 	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
@@ -2067,7 +2069,7 @@ static void reorder_wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
 		intel_ctx_destroy(fd, tmp_ctx);
 	}
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 0);
 	close(fence);
 
 	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
@@ -2963,19 +2965,25 @@ igt_main
 			test_each_engine_store("preempt-other-chain", fd, ctx, e)
 				preempt_other(fd, &ctx->cfg, e->flags, CHAIN);
 
-			test_each_engine_store("preempt-queue", fd, ctx, e)
-				preempt_queue(fd, &ctx->cfg, e->flags, 0);
+			test_each_engine_store("preempt-engines", fd, ctx, e)
+				preempt_engines(fd, e, 0);
 
-			test_each_engine_store("preempt-queue-chain", fd, ctx, e)
-				preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
-			test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
-				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
+			igt_subtest_group {
+				igt_fixture {
+					igt_require(!gem_scheduler_has_static_priority(fd));
+				}
 
-			test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
-				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
+				test_each_engine_store("preempt-queue", fd, ctx, e)
+					preempt_queue(fd, &ctx->cfg, e->flags, 0);
 
-			test_each_engine_store("preempt-engines", fd, ctx, e)
-				preempt_engines(fd, e, 0);
+				test_each_engine_store("preempt-queue-chain", fd, ctx, e)
+					preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
+				test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
+					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
+
+				test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
+					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
+			}
 
 			igt_subtest_group {
 				igt_hang_t hang;
@@ -3017,11 +3025,17 @@ igt_main
 		test_each_engine_store("wide", fd, ctx, e)
 			wide(fd, &ctx->cfg, e->flags);
 
-		test_each_engine_store("reorder-wide", fd, ctx, e)
-			reorder_wide(fd, &ctx->cfg, e->flags);
-
 		test_each_engine_store("smoketest", fd, ctx, e)
 			smoketest(fd, &ctx->cfg, e->flags, 5);
+
+		igt_subtest_group {
+			igt_fixture {
+				igt_require(!gem_scheduler_has_static_priority(fd));
+			}
+
+			test_each_engine_store("reorder-wide", fd, ctx, e)
+				reorder_wide(fd, &ctx->cfg, e->flags);
+		}
 	}
 
 	igt_subtest_group {
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [Intel-gfx] [PATCH i-g-t 1/3] i915/gem_exec_schedule: Make gem_exec_schedule understand static priority mapping
@ 2021-08-04  1:23   ` Matthew Brost
  0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-04  1:23 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

The i915 currently has 2k visible priority levels which are currently
unique. This is changing to statically map these 2k levels into 3
buckets:

low: < 0
mid: 0
high: > 0

Update gem_exec_schedule to understand this. This entails updating
promotion test to use 3 levels that will map into different buckets and
also add bit of delay after releasing a cork beforing completing the
spinners.

Also skip any tests that rely on having more than 3 priority levels.

v2: Add a delay between starting releasing spinner and cork in
promotion, add local define for static mapping engine info

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 lib/i915/gem_scheduler.c       | 14 ++++++++
 lib/i915/gem_scheduler.h       |  1 +
 lib/i915/i915_drm_local.h      | 10 ++++++
 tests/i915/gem_exec_schedule.c | 62 +++++++++++++++++++++-------------
 4 files changed, 63 insertions(+), 24 deletions(-)

diff --git a/lib/i915/gem_scheduler.c b/lib/i915/gem_scheduler.c
index cdddf42ad..d006b8676 100644
--- a/lib/i915/gem_scheduler.c
+++ b/lib/i915/gem_scheduler.c
@@ -28,6 +28,7 @@
 #include "igt_core.h"
 #include "ioctl_wrappers.h"
 
+#include "i915/i915_drm_local.h"
 #include "i915/gem_scheduler.h"
 #include "i915/gem_submission.h"
 
@@ -90,6 +91,19 @@ bool gem_scheduler_has_ctx_priority(int fd)
 		I915_SCHEDULER_CAP_PRIORITY;
 }
 
+/**
+ * gem_scheduler_has_static_priority:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether the driver supports priority assigned
+ * from user space are statically mapping into 3 buckets.
+ */
+bool gem_scheduler_has_static_priority(int fd)
+{
+	return gem_scheduler_capability(fd) &
+		I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
+}
+
 /**
  * gem_scheduler_has_preemption:
  * @fd: open i915 drm file descriptor
diff --git a/lib/i915/gem_scheduler.h b/lib/i915/gem_scheduler.h
index d43e84bd2..b00804f70 100644
--- a/lib/i915/gem_scheduler.h
+++ b/lib/i915/gem_scheduler.h
@@ -29,6 +29,7 @@
 unsigned gem_scheduler_capability(int fd);
 bool gem_scheduler_enabled(int fd);
 bool gem_scheduler_has_ctx_priority(int fd);
+bool gem_scheduler_has_static_priority(int fd);
 bool gem_scheduler_has_preemption(int fd);
 bool gem_scheduler_has_semaphores(int fd);
 bool gem_scheduler_has_engine_busy_stats(int fd);
diff --git a/lib/i915/i915_drm_local.h b/lib/i915/i915_drm_local.h
index dd646aedf..a1527ff21 100644
--- a/lib/i915/i915_drm_local.h
+++ b/lib/i915/i915_drm_local.h
@@ -20,6 +20,16 @@ extern "C" {
  * clean these up when kernel uapi headers are sync'd.
  */
 
+/*
+ * Indicates the 2k user priority levels are statically mapped into 3 buckets as
+ * follows:
+ *
+ * -1k to -1	Low priority
+ * 0		Normal priority
+ * 1 to 1k	Highest priority
+ */
+#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index e5fb45982..bb9fb6c14 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -199,7 +199,8 @@ create_highest_priority(int fd, const intel_ctx_cfg_t *cfg)
 
 static void unplug_show_queue(int fd, struct igt_cork *c,
 			      const intel_ctx_cfg_t *cfg,
-			      unsigned int engine)
+			      unsigned int engine,
+			      unsigned usec_delay)
 {
 	igt_spin_t *spin[MAX_ELSP_QLEN];
 	int max = MAX_ELSP_QLEN;
@@ -216,6 +217,7 @@ static void unplug_show_queue(int fd, struct igt_cork *c,
 
 	igt_cork_unplug(c); /* batches will now be queued on the engine */
 	igt_debugfs_dump(fd, "i915_engine_info");
+	usleep(usec_delay);
 
 	for (int n = 0; n < max; n++)
 		igt_spin_free(fd, spin[n]);
@@ -237,7 +239,7 @@ static void fifo(int fd, const intel_ctx_t *ctx, unsigned ring)
 	store_dword_fenced(fd, ctx, ring, scratch, 0, 1, fence, 0);
 	store_dword_fenced(fd, ctx, ring, scratch, 0, 2, fence, 0);
 
-	unplug_show_queue(fd, &cork, &ctx->cfg, ring);
+	unplug_show_queue(fd, &cork, &ctx->cfg, ring, 0);
 	close(fence);
 
 	result =  __sync_read_u32(fd, scratch, 0);
@@ -298,7 +300,7 @@ static void implicit_rw(int i915, const intel_ctx_t *ctx, unsigned int ring,
 				   ring, scratch, 0, ring,
 				   fence, I915_GEM_DOMAIN_RENDER);
 
-	unplug_show_queue(i915, &cork, &ctx->cfg, ring);
+	unplug_show_queue(i915, &cork, &ctx->cfg, ring, 0);
 	close(fence);
 
 	result =  __sync_read_u32(i915, scratch, 0);
@@ -355,7 +357,7 @@ static void independent(int fd, const intel_ctx_t *ctx, unsigned int engine,
 	/* Same priority, but different timeline (as different engine) */
 	batch = __store_dword(fd, ctx, engine, scratch, 0, engine, 0, fence, 0);
 
-	unplug_show_queue(fd, &cork, &ctx->cfg, engine);
+	unplug_show_queue(fd, &cork, &ctx->cfg, engine, 0);
 	close(fence);
 
 	gem_sync(fd, batch);
@@ -1326,7 +1328,7 @@ static void reorder(int fd, const intel_ctx_cfg_t *cfg,
 	store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO]->id, fence, 0);
 	store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI]->id, fence, 0);
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 0);
 	close(fence);
 
 	result =  __sync_read_u32(fd, scratch, 0);
@@ -1353,10 +1355,10 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
 	gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
 
 	ctx[HI] = intel_ctx_create(fd, cfg);
-	gem_context_set_priority(fd, ctx[HI]->id, 0);
+	gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
 
 	ctx[NOISE] = intel_ctx_create(fd, cfg);
-	gem_context_set_priority(fd, ctx[NOISE]->id, MIN_PRIO/2);
+	gem_context_set_priority(fd, ctx[NOISE]->id, 0);
 
 	result = gem_create(fd, 4096);
 	dep = gem_create(fd, 4096);
@@ -1377,7 +1379,7 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
 
 	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI]->id, 0);
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 250000);
 	close(fence);
 
 	dep_read = __sync_read_u32(fd, dep, 0);
@@ -1893,7 +1895,7 @@ static void deep(int fd, const intel_ctx_cfg_t *cfg,
 	igt_info("Second deptree: %d requests [%.3fs]\n",
 		 n * XS, 1e-9*igt_nsec_elapsed(&tv));
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 0);
 	gem_close(fd, plug);
 	igt_require(expected); /* too slow */
 
@@ -1962,7 +1964,7 @@ static void wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
 	igt_info("Submitted %d requests over %d contexts in %.1fms\n",
 		 count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 0);
 	close(fence);
 
 	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
@@ -2067,7 +2069,7 @@ static void reorder_wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
 		intel_ctx_destroy(fd, tmp_ctx);
 	}
 
-	unplug_show_queue(fd, &cork, cfg, ring);
+	unplug_show_queue(fd, &cork, cfg, ring, 0);
 	close(fence);
 
 	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
@@ -2963,19 +2965,25 @@ igt_main
 			test_each_engine_store("preempt-other-chain", fd, ctx, e)
 				preempt_other(fd, &ctx->cfg, e->flags, CHAIN);
 
-			test_each_engine_store("preempt-queue", fd, ctx, e)
-				preempt_queue(fd, &ctx->cfg, e->flags, 0);
+			test_each_engine_store("preempt-engines", fd, ctx, e)
+				preempt_engines(fd, e, 0);
 
-			test_each_engine_store("preempt-queue-chain", fd, ctx, e)
-				preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
-			test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
-				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
+			igt_subtest_group {
+				igt_fixture {
+					igt_require(!gem_scheduler_has_static_priority(fd));
+				}
 
-			test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
-				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
+				test_each_engine_store("preempt-queue", fd, ctx, e)
+					preempt_queue(fd, &ctx->cfg, e->flags, 0);
 
-			test_each_engine_store("preempt-engines", fd, ctx, e)
-				preempt_engines(fd, e, 0);
+				test_each_engine_store("preempt-queue-chain", fd, ctx, e)
+					preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
+				test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
+					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
+
+				test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
+					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
+			}
 
 			igt_subtest_group {
 				igt_hang_t hang;
@@ -3017,11 +3025,17 @@ igt_main
 		test_each_engine_store("wide", fd, ctx, e)
 			wide(fd, &ctx->cfg, e->flags);
 
-		test_each_engine_store("reorder-wide", fd, ctx, e)
-			reorder_wide(fd, &ctx->cfg, e->flags);
-
 		test_each_engine_store("smoketest", fd, ctx, e)
 			smoketest(fd, &ctx->cfg, e->flags, 5);
+
+		igt_subtest_group {
+			igt_fixture {
+				igt_require(!gem_scheduler_has_static_priority(fd));
+			}
+
+			test_each_engine_store("reorder-wide", fd, ctx, e)
+				reorder_wide(fd, &ctx->cfg, e->flags);
+		}
 	}
 
 	igt_subtest_group {
-- 
2.28.0

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [Intel-gfx] [PATCH i-g-t 2/3] i915/gem_ctx_shared: Make gem_ctx_shared understand static priority mapping
  2021-08-04  1:23 ` [igt-dev] " Matthew Brost
@ 2021-08-04  1:23   ` Matthew Brost
  -1 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-04  1:23 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

The i915 currently has 2k visible priority levels which are currently
unique. This is changing to statically map these 2k levels into 3
buckets:

low: < 0
mid: 0
high: > 0

Update gem_ctx_shared to understand this. This entails updating
promotion test to use 3 levels that will map into different buckets and
also add bit of delay after releasing a cork beforing completing the
spinners.

v2: Add a delay between starting releasing spinner and cork in
promotion

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/i915/gem_ctx_shared.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/tests/i915/gem_ctx_shared.c b/tests/i915/gem_ctx_shared.c
index 4441e6eb7..b3a156ca4 100644
--- a/tests/i915/gem_ctx_shared.c
+++ b/tests/i915/gem_ctx_shared.c
@@ -571,7 +571,8 @@ create_highest_priority(int i915, const intel_ctx_cfg_t *cfg)
 }
 
 static void unplug_show_queue(int i915, struct igt_cork *c,
-			      const intel_ctx_cfg_t *cfg, unsigned int engine)
+			      const intel_ctx_cfg_t *cfg, unsigned int engine,
+			      unsigned int usec_delay)
 {
 	igt_spin_t *spin[MAX_ELSP_QLEN];
 
@@ -583,6 +584,7 @@ static void unplug_show_queue(int i915, struct igt_cork *c,
 
 	igt_cork_unplug(c); /* batches will now be queued on the engine */
 	igt_debugfs_dump(i915, "i915_engine_info");
+	usleep(usec_delay);
 
 	for (int n = 0; n < ARRAY_SIZE(spin); n++)
 		igt_spin_free(i915, spin[n]);
@@ -734,7 +736,7 @@ static void reorder(int i915, const intel_ctx_cfg_t *cfg,
 	store_dword(i915, ctx[LO], ring, scratch, 0, ctx[LO]->id, plug, 0);
 	store_dword(i915, ctx[HI], ring, scratch, 0, ctx[HI]->id, plug, 0);
 
-	unplug_show_queue(i915, &cork, &q_cfg, ring);
+	unplug_show_queue(i915, &cork, &q_cfg, ring, 0);
 	gem_close(i915, plug);
 
 	ptr = gem_mmap__device_coherent(i915, scratch, 0, 4096, PROT_READ);
@@ -771,10 +773,10 @@ static void promotion(int i915, const intel_ctx_cfg_t *cfg, unsigned ring)
 	gem_context_set_priority(i915, ctx[LO]->id, MIN_PRIO);
 
 	ctx[HI] = intel_ctx_create(i915, &q_cfg);
-	gem_context_set_priority(i915, ctx[HI]->id, 0);
+	gem_context_set_priority(i915, ctx[HI]->id, MAX_PRIO);
 
 	ctx[NOISE] = intel_ctx_create(i915, &q_cfg);
-	gem_context_set_priority(i915, ctx[NOISE]->id, MIN_PRIO/2);
+	gem_context_set_priority(i915, ctx[NOISE]->id, 0);
 
 	result = gem_create(i915, 4096);
 	dep = gem_create(i915, 4096);
@@ -795,7 +797,7 @@ static void promotion(int i915, const intel_ctx_cfg_t *cfg, unsigned ring)
 
 	store_dword(i915, ctx[HI], ring, result, 0, ctx[HI]->id, 0, 0);
 
-	unplug_show_queue(i915, &cork, &q_cfg, ring);
+	unplug_show_queue(i915, &cork, &q_cfg, ring, 250000);
 	gem_close(i915, plug);
 
 	ptr = gem_mmap__device_coherent(i915, dep, 0, 4096, PROT_READ);
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [igt-dev] [PATCH i-g-t 2/3] i915/gem_ctx_shared: Make gem_ctx_shared understand static priority mapping
@ 2021-08-04  1:23   ` Matthew Brost
  0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-04  1:23 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

The i915 currently has 2k visible priority levels which are currently
unique. This is changing to statically map these 2k levels into 3
buckets:

low: < 0
mid: 0
high: > 0

Update gem_ctx_shared to understand this. This entails updating
promotion test to use 3 levels that will map into different buckets and
also add bit of delay after releasing a cork beforing completing the
spinners.

v2: Add a delay between starting releasing spinner and cork in
promotion

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 tests/i915/gem_ctx_shared.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/tests/i915/gem_ctx_shared.c b/tests/i915/gem_ctx_shared.c
index 4441e6eb7..b3a156ca4 100644
--- a/tests/i915/gem_ctx_shared.c
+++ b/tests/i915/gem_ctx_shared.c
@@ -571,7 +571,8 @@ create_highest_priority(int i915, const intel_ctx_cfg_t *cfg)
 }
 
 static void unplug_show_queue(int i915, struct igt_cork *c,
-			      const intel_ctx_cfg_t *cfg, unsigned int engine)
+			      const intel_ctx_cfg_t *cfg, unsigned int engine,
+			      unsigned int usec_delay)
 {
 	igt_spin_t *spin[MAX_ELSP_QLEN];
 
@@ -583,6 +584,7 @@ static void unplug_show_queue(int i915, struct igt_cork *c,
 
 	igt_cork_unplug(c); /* batches will now be queued on the engine */
 	igt_debugfs_dump(i915, "i915_engine_info");
+	usleep(usec_delay);
 
 	for (int n = 0; n < ARRAY_SIZE(spin); n++)
 		igt_spin_free(i915, spin[n]);
@@ -734,7 +736,7 @@ static void reorder(int i915, const intel_ctx_cfg_t *cfg,
 	store_dword(i915, ctx[LO], ring, scratch, 0, ctx[LO]->id, plug, 0);
 	store_dword(i915, ctx[HI], ring, scratch, 0, ctx[HI]->id, plug, 0);
 
-	unplug_show_queue(i915, &cork, &q_cfg, ring);
+	unplug_show_queue(i915, &cork, &q_cfg, ring, 0);
 	gem_close(i915, plug);
 
 	ptr = gem_mmap__device_coherent(i915, scratch, 0, 4096, PROT_READ);
@@ -771,10 +773,10 @@ static void promotion(int i915, const intel_ctx_cfg_t *cfg, unsigned ring)
 	gem_context_set_priority(i915, ctx[LO]->id, MIN_PRIO);
 
 	ctx[HI] = intel_ctx_create(i915, &q_cfg);
-	gem_context_set_priority(i915, ctx[HI]->id, 0);
+	gem_context_set_priority(i915, ctx[HI]->id, MAX_PRIO);
 
 	ctx[NOISE] = intel_ctx_create(i915, &q_cfg);
-	gem_context_set_priority(i915, ctx[NOISE]->id, MIN_PRIO/2);
+	gem_context_set_priority(i915, ctx[NOISE]->id, 0);
 
 	result = gem_create(i915, 4096);
 	dep = gem_create(i915, 4096);
@@ -795,7 +797,7 @@ static void promotion(int i915, const intel_ctx_cfg_t *cfg, unsigned ring)
 
 	store_dword(i915, ctx[HI], ring, result, 0, ctx[HI]->id, 0, 0);
 
-	unplug_show_queue(i915, &cork, &q_cfg, ring);
+	unplug_show_queue(i915, &cork, &q_cfg, ring, 250000);
 	gem_close(i915, plug);
 
 	ptr = gem_mmap__device_coherent(i915, dep, 0, 4096, PROT_READ);
-- 
2.28.0

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [Intel-gfx] [PATCH i-g-t 3/3] i915/gem_exec_capture: Update to support GuC based resets
  2021-08-04  1:23 ` [igt-dev] " Matthew Brost
@ 2021-08-04  1:23   ` Matthew Brost
  -1 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-04  1:23 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

From: "Signed-off-by: John Harrison" <John.C.Harrison@Intel.com>

When GuC submission is enabled, GuC itself manages hang detection and
recovery. Therefore, any test that relies on being able to trigger an
engine reset in the driver will fail. Full GT resets can still be
triggered by the driver, however in that situation detecting the
specific context that caused a hang is not possible as the driver has
no information about what is actually running on the hardware at any
given time.

So update the test to cause a reset via a the hangcheck mechanism by
submitting a hanging batch and waiting. That way it is guaranteed to
be testing the correct reset code paths for the current platform,
whether that is GuC enabled or not.

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 lib/igt_gt.c                  | 44 +++++++++++++++++++----------
 lib/igt_gt.h                  |  1 +
 tests/i915/gem_exec_capture.c | 52 +++++++++++++++++++++++++++++------
 3 files changed, 74 insertions(+), 23 deletions(-)

diff --git a/lib/igt_gt.c b/lib/igt_gt.c
index c049477db..ec548d501 100644
--- a/lib/igt_gt.c
+++ b/lib/igt_gt.c
@@ -56,23 +56,28 @@
  * engines.
  */
 
+static int reset_query_once = -1;
+
 static bool has_gpu_reset(int fd)
 {
-	static int once = -1;
-	if (once < 0) {
-		struct drm_i915_getparam gp;
-		int val = 0;
-
-		memset(&gp, 0, sizeof(gp));
-		gp.param = 35; /* HAS_GPU_RESET */
-		gp.value = &val;
-
-		if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
-			once = intel_gen(intel_get_drm_devid(fd)) >= 5;
-		else
-			once = val > 0;
+	if (reset_query_once < 0) {
+		reset_query_once = gem_gpu_reset_type(fd);
+
+		/* Very old kernels did not support the query */
+		if (reset_query_once == -1)
+			reset_query_once =
+			      (intel_gen(intel_get_drm_devid(fd)) >= 5) ? 1 : 0;
 	}
-	return once;
+
+	return reset_query_once > 0;
+}
+
+static bool has_engine_reset(int fd)
+{
+	if (reset_query_once < 0)
+		has_gpu_reset(fd);
+
+	return reset_query_once > 1;
 }
 
 static void eat_error_state(int dev)
@@ -176,7 +181,11 @@ igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags)
 		igt_skip("hang injection disabled by user [IGT_HANG=0]\n");
 	gem_context_require_bannable(fd);
 
-	allow_reset = 1;
+	if (flags & HANG_WANT_ENGINE_RESET)
+		allow_reset = 2;
+	else
+		allow_reset = 1;
+
 	if ((flags & HANG_ALLOW_CAPTURE) == 0) {
 		param.param = I915_CONTEXT_PARAM_NO_ERROR_CAPTURE;
 		param.value = 1;
@@ -187,11 +196,16 @@ igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags)
 		__gem_context_set_param(fd, &param);
 		allow_reset = INT_MAX; /* any reset method */
 	}
+
 	igt_require(igt_params_set(fd, "reset", "%d", allow_reset));
+	reset_query_once = -1;  /* Re-query after changing param */
 
 	if (!igt_check_boolean_env_var("IGT_HANG_WITHOUT_RESET", false))
 		igt_require(has_gpu_reset(fd));
 
+	if (flags & HANG_WANT_ENGINE_RESET)
+		igt_require(has_engine_reset(fd));
+
 	ban = context_get_ban(fd, ctx);
 	if ((flags & HANG_ALLOW_BAN) == 0)
 		context_set_ban(fd, ctx, 0);
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index d87fae2d3..d806c4b80 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -48,6 +48,7 @@ void igt_disallow_hang(int fd, igt_hang_t arg);
 igt_hang_t igt_hang_ctx(int fd, uint32_t ctx, int ring, unsigned flags);
 #define HANG_ALLOW_BAN 1
 #define HANG_ALLOW_CAPTURE 2
+#define HANG_WANT_ENGINE_RESET 4
 
 igt_hang_t igt_hang_ring(int fd, int ring);
 void igt_post_hang_ring(int fd, igt_hang_t arg);
diff --git a/tests/i915/gem_exec_capture.c b/tests/i915/gem_exec_capture.c
index f59cb09da..6ae4208ce 100644
--- a/tests/i915/gem_exec_capture.c
+++ b/tests/i915/gem_exec_capture.c
@@ -23,6 +23,7 @@
 
 #include <sys/poll.h>
 #include <zlib.h>
+#include <sched.h>
 
 #include "i915/gem.h"
 #include "i915/gem_create.h"
@@ -31,8 +32,16 @@
 #include "igt_rand.h"
 #include "igt_sysfs.h"
 
+#define MAX_RESET_TIME	120
+
 IGT_TEST_DESCRIPTION("Check that we capture the user specified objects on a hang");
 
+static void configure_engine(int fd, const char *name)
+{
+	gem_engine_property_printf(fd, name, "preempt_timeout_ms", "%d", 250);
+	gem_engine_property_printf(fd, name, "heartbeat_interval_ms", "%d", 500);
+}
+
 static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
 {
 	char *error, *str;
@@ -61,8 +70,13 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
 	igt_assert(found);
 }
 
+static bool fence_busy(int fence)
+{
+	return poll(&(struct pollfd){fence, POLLIN}, 1, 0) == 0;
+}
+
 static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
-		       unsigned ring, uint32_t target)
+		       unsigned ring, uint32_t target, const char *name)
 {
 	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
 	struct drm_i915_gem_exec_object2 obj[4];
@@ -74,6 +88,10 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
 	struct drm_i915_gem_execbuffer2 execbuf;
 	uint32_t *batch, *seqno;
 	int i;
+	int fence_out;
+	struct timeval before, after, delta;
+
+	configure_engine(fd, name);
 
 	memset(obj, 0, sizeof(obj));
 	obj[SCRATCH].handle = gem_create(fd, 4096);
@@ -149,18 +167,34 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
 	execbuf.flags = ring;
 	if (gen > 3 && gen < 6)
 		execbuf.flags |= I915_EXEC_SECURE;
+	execbuf.flags |= I915_EXEC_FENCE_OUT;
+	execbuf.rsvd2 = ~0UL;
 	execbuf.rsvd1 = ctx->id;
 
 	igt_assert(!READ_ONCE(*seqno));
-	gem_execbuf(fd, &execbuf);
+	gem_execbuf_wr(fd, &execbuf);
+
+	fence_out = execbuf.rsvd2 >> 32;
+	igt_assert(fence_out >= 0);
 
 	/* Wait for the request to start */
 	while (READ_ONCE(*seqno) != 0xc0ffee)
 		igt_assert(gem_bo_busy(fd, obj[SCRATCH].handle));
 	munmap(seqno, 4096);
 
+	/* Wait for a reset to occur */
+	gettimeofday(&before, NULL);
+	while (fence_busy(fence_out)) {
+		gettimeofday(&after, NULL);
+		timersub(&after, &before, &delta);
+		igt_assert(delta.tv_sec < MAX_RESET_TIME);
+		sched_yield();
+	}
+	gettimeofday(&after, NULL);
+	timersub(&after, &before, &delta);
+	igt_info("Target died after %ld.%06lds\n", delta.tv_sec, delta.tv_usec);
+
 	/* Check that only the buffer we marked is reported in the error */
-	igt_force_gpu_reset(fd);
 	check_error_state(dir, &obj[CAPTURE]);
 
 	gem_sync(fd, obj[BATCH].handle);
@@ -170,12 +204,13 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
 	gem_close(fd, obj[SCRATCH].handle);
 }
 
-static void capture(int fd, int dir, const intel_ctx_t *ctx, unsigned ring)
+static void capture(int fd, int dir, const intel_ctx_t *ctx,
+		    const struct intel_execution_engine2 *e)
 {
 	uint32_t handle;
 
 	handle = gem_create(fd, 4096);
-	__capture1(fd, dir, ctx, ring, handle);
+	__capture1(fd, dir, ctx, e->flags, handle, e->name);
 	gem_close(fd, handle);
 }
 
@@ -577,7 +612,7 @@ static void userptr(int fd, int dir)
 	igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
 	igt_require(__gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0);
 
-	__capture1(fd, dir, intel_ctx_0(fd), 0, handle);
+	__capture1(fd, dir, intel_ctx_0(fd), 0, handle, "bcs0");
 
 	gem_close(fd, handle);
 	free(ptr);
@@ -626,7 +661,8 @@ igt_main
 		gem_require_mmap_wc(fd);
 		igt_require(has_capture(fd));
 		ctx = intel_ctx_create_all_physical(fd);
-		igt_allow_hang(fd, ctx->id, HANG_ALLOW_CAPTURE);
+		igt_allow_hang(fd, 0, HANG_ALLOW_CAPTURE |
+			       HANG_WANT_ENGINE_RESET);
 
 		dir = igt_sysfs_open(fd);
 		igt_require(igt_sysfs_set(dir, "error", "Begone!"));
@@ -634,7 +670,7 @@ igt_main
 	}
 
 	test_each_engine("capture", fd, ctx, e)
-		capture(fd, dir, ctx, e->flags);
+		capture(fd, dir, ctx, e);
 
 	igt_subtest_f("many-4K-zero") {
 		igt_require(gem_can_store_dword(fd, 0));
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [Intel-gfx] [PATCH i-g-t 3/3] i915/gem_exec_capture: Update to support GuC based resets
@ 2021-08-04  1:23   ` Matthew Brost
  0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-04  1:23 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

From: "Signed-off-by: John Harrison" <John.C.Harrison@Intel.com>

When GuC submission is enabled, GuC itself manages hang detection and
recovery. Therefore, any test that relies on being able to trigger an
engine reset in the driver will fail. Full GT resets can still be
triggered by the driver, however in that situation detecting the
specific context that caused a hang is not possible as the driver has
no information about what is actually running on the hardware at any
given time.

So update the test to cause a reset via a the hangcheck mechanism by
submitting a hanging batch and waiting. That way it is guaranteed to
be testing the correct reset code paths for the current platform,
whether that is GuC enabled or not.

Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 lib/igt_gt.c                  | 44 +++++++++++++++++++----------
 lib/igt_gt.h                  |  1 +
 tests/i915/gem_exec_capture.c | 52 +++++++++++++++++++++++++++++------
 3 files changed, 74 insertions(+), 23 deletions(-)

diff --git a/lib/igt_gt.c b/lib/igt_gt.c
index c049477db..ec548d501 100644
--- a/lib/igt_gt.c
+++ b/lib/igt_gt.c
@@ -56,23 +56,28 @@
  * engines.
  */
 
+static int reset_query_once = -1;
+
 static bool has_gpu_reset(int fd)
 {
-	static int once = -1;
-	if (once < 0) {
-		struct drm_i915_getparam gp;
-		int val = 0;
-
-		memset(&gp, 0, sizeof(gp));
-		gp.param = 35; /* HAS_GPU_RESET */
-		gp.value = &val;
-
-		if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
-			once = intel_gen(intel_get_drm_devid(fd)) >= 5;
-		else
-			once = val > 0;
+	if (reset_query_once < 0) {
+		reset_query_once = gem_gpu_reset_type(fd);
+
+		/* Very old kernels did not support the query */
+		if (reset_query_once == -1)
+			reset_query_once =
+			      (intel_gen(intel_get_drm_devid(fd)) >= 5) ? 1 : 0;
 	}
-	return once;
+
+	return reset_query_once > 0;
+}
+
+static bool has_engine_reset(int fd)
+{
+	if (reset_query_once < 0)
+		has_gpu_reset(fd);
+
+	return reset_query_once > 1;
 }
 
 static void eat_error_state(int dev)
@@ -176,7 +181,11 @@ igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags)
 		igt_skip("hang injection disabled by user [IGT_HANG=0]\n");
 	gem_context_require_bannable(fd);
 
-	allow_reset = 1;
+	if (flags & HANG_WANT_ENGINE_RESET)
+		allow_reset = 2;
+	else
+		allow_reset = 1;
+
 	if ((flags & HANG_ALLOW_CAPTURE) == 0) {
 		param.param = I915_CONTEXT_PARAM_NO_ERROR_CAPTURE;
 		param.value = 1;
@@ -187,11 +196,16 @@ igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags)
 		__gem_context_set_param(fd, &param);
 		allow_reset = INT_MAX; /* any reset method */
 	}
+
 	igt_require(igt_params_set(fd, "reset", "%d", allow_reset));
+	reset_query_once = -1;  /* Re-query after changing param */
 
 	if (!igt_check_boolean_env_var("IGT_HANG_WITHOUT_RESET", false))
 		igt_require(has_gpu_reset(fd));
 
+	if (flags & HANG_WANT_ENGINE_RESET)
+		igt_require(has_engine_reset(fd));
+
 	ban = context_get_ban(fd, ctx);
 	if ((flags & HANG_ALLOW_BAN) == 0)
 		context_set_ban(fd, ctx, 0);
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index d87fae2d3..d806c4b80 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -48,6 +48,7 @@ void igt_disallow_hang(int fd, igt_hang_t arg);
 igt_hang_t igt_hang_ctx(int fd, uint32_t ctx, int ring, unsigned flags);
 #define HANG_ALLOW_BAN 1
 #define HANG_ALLOW_CAPTURE 2
+#define HANG_WANT_ENGINE_RESET 4
 
 igt_hang_t igt_hang_ring(int fd, int ring);
 void igt_post_hang_ring(int fd, igt_hang_t arg);
diff --git a/tests/i915/gem_exec_capture.c b/tests/i915/gem_exec_capture.c
index f59cb09da..6ae4208ce 100644
--- a/tests/i915/gem_exec_capture.c
+++ b/tests/i915/gem_exec_capture.c
@@ -23,6 +23,7 @@
 
 #include <sys/poll.h>
 #include <zlib.h>
+#include <sched.h>
 
 #include "i915/gem.h"
 #include "i915/gem_create.h"
@@ -31,8 +32,16 @@
 #include "igt_rand.h"
 #include "igt_sysfs.h"
 
+#define MAX_RESET_TIME	120
+
 IGT_TEST_DESCRIPTION("Check that we capture the user specified objects on a hang");
 
+static void configure_engine(int fd, const char *name)
+{
+	gem_engine_property_printf(fd, name, "preempt_timeout_ms", "%d", 250);
+	gem_engine_property_printf(fd, name, "heartbeat_interval_ms", "%d", 500);
+}
+
 static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
 {
 	char *error, *str;
@@ -61,8 +70,13 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
 	igt_assert(found);
 }
 
+static bool fence_busy(int fence)
+{
+	return poll(&(struct pollfd){fence, POLLIN}, 1, 0) == 0;
+}
+
 static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
-		       unsigned ring, uint32_t target)
+		       unsigned ring, uint32_t target, const char *name)
 {
 	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
 	struct drm_i915_gem_exec_object2 obj[4];
@@ -74,6 +88,10 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
 	struct drm_i915_gem_execbuffer2 execbuf;
 	uint32_t *batch, *seqno;
 	int i;
+	int fence_out;
+	struct timeval before, after, delta;
+
+	configure_engine(fd, name);
 
 	memset(obj, 0, sizeof(obj));
 	obj[SCRATCH].handle = gem_create(fd, 4096);
@@ -149,18 +167,34 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
 	execbuf.flags = ring;
 	if (gen > 3 && gen < 6)
 		execbuf.flags |= I915_EXEC_SECURE;
+	execbuf.flags |= I915_EXEC_FENCE_OUT;
+	execbuf.rsvd2 = ~0UL;
 	execbuf.rsvd1 = ctx->id;
 
 	igt_assert(!READ_ONCE(*seqno));
-	gem_execbuf(fd, &execbuf);
+	gem_execbuf_wr(fd, &execbuf);
+
+	fence_out = execbuf.rsvd2 >> 32;
+	igt_assert(fence_out >= 0);
 
 	/* Wait for the request to start */
 	while (READ_ONCE(*seqno) != 0xc0ffee)
 		igt_assert(gem_bo_busy(fd, obj[SCRATCH].handle));
 	munmap(seqno, 4096);
 
+	/* Wait for a reset to occur */
+	gettimeofday(&before, NULL);
+	while (fence_busy(fence_out)) {
+		gettimeofday(&after, NULL);
+		timersub(&after, &before, &delta);
+		igt_assert(delta.tv_sec < MAX_RESET_TIME);
+		sched_yield();
+	}
+	gettimeofday(&after, NULL);
+	timersub(&after, &before, &delta);
+	igt_info("Target died after %ld.%06lds\n", delta.tv_sec, delta.tv_usec);
+
 	/* Check that only the buffer we marked is reported in the error */
-	igt_force_gpu_reset(fd);
 	check_error_state(dir, &obj[CAPTURE]);
 
 	gem_sync(fd, obj[BATCH].handle);
@@ -170,12 +204,13 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
 	gem_close(fd, obj[SCRATCH].handle);
 }
 
-static void capture(int fd, int dir, const intel_ctx_t *ctx, unsigned ring)
+static void capture(int fd, int dir, const intel_ctx_t *ctx,
+		    const struct intel_execution_engine2 *e)
 {
 	uint32_t handle;
 
 	handle = gem_create(fd, 4096);
-	__capture1(fd, dir, ctx, ring, handle);
+	__capture1(fd, dir, ctx, e->flags, handle, e->name);
 	gem_close(fd, handle);
 }
 
@@ -577,7 +612,7 @@ static void userptr(int fd, int dir)
 	igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
 	igt_require(__gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0);
 
-	__capture1(fd, dir, intel_ctx_0(fd), 0, handle);
+	__capture1(fd, dir, intel_ctx_0(fd), 0, handle, "bcs0");
 
 	gem_close(fd, handle);
 	free(ptr);
@@ -626,7 +661,8 @@ igt_main
 		gem_require_mmap_wc(fd);
 		igt_require(has_capture(fd));
 		ctx = intel_ctx_create_all_physical(fd);
-		igt_allow_hang(fd, ctx->id, HANG_ALLOW_CAPTURE);
+		igt_allow_hang(fd, 0, HANG_ALLOW_CAPTURE |
+			       HANG_WANT_ENGINE_RESET);
 
 		dir = igt_sysfs_open(fd);
 		igt_require(igt_sysfs_set(dir, "error", "Begone!"));
@@ -634,7 +670,7 @@ igt_main
 	}
 
 	test_each_engine("capture", fd, ctx, e)
-		capture(fd, dir, ctx, e->flags);
+		capture(fd, dir, ctx, e);
 
 	igt_subtest_f("many-4K-zero") {
 		igt_require(gem_can_store_dword(fd, 0));
-- 
2.28.0

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [igt-dev] ✓ Fi.CI.BAT: success for IGT fixes for priority management + capture with GuC submission
  2021-08-04  1:23 ` [igt-dev] " Matthew Brost
                   ` (3 preceding siblings ...)
  (?)
@ 2021-08-04  1:47 ` Patchwork
  -1 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2021-08-04  1:47 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

[-- Attachment #1: Type: text/plain, Size: 2762 bytes --]

== Series Details ==

Series: IGT fixes for priority management + capture with GuC submission
URL   : https://patchwork.freedesktop.org/series/93365/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_10444 -> IGTPW_6087
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/index.html

Known issues
------------

  Here are the changes found in IGTPW_6087 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_suspend@basic-s0:
    - fi-tgl-1115g4:      [PASS][1] -> [FAIL][2] ([i915#1888])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/fi-tgl-1115g4/igt@gem_exec_suspend@basic-s0.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/fi-tgl-1115g4/igt@gem_exec_suspend@basic-s0.html

  * igt@i915_selftest@live@gt_heartbeat:
    - fi-cfl-8700k:       [PASS][3] -> [DMESG-FAIL][4] ([i915#2291] / [i915#541])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/fi-cfl-8700k/igt@i915_selftest@live@gt_heartbeat.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/fi-cfl-8700k/igt@i915_selftest@live@gt_heartbeat.html

  * igt@kms_chamelium@dp-crc-fast:
    - fi-kbl-7500u:       [PASS][5] -> [FAIL][6] ([i915#1372])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html

  * igt@prime_vgem@basic-userptr:
    - fi-pnv-d510:        NOTRUN -> [SKIP][7] ([fdo#109271]) +53 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/fi-pnv-d510/igt@prime_vgem@basic-userptr.html

  
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [i915#1372]: https://gitlab.freedesktop.org/drm/intel/issues/1372
  [i915#1888]: https://gitlab.freedesktop.org/drm/intel/issues/1888
  [i915#2291]: https://gitlab.freedesktop.org/drm/intel/issues/2291
  [i915#541]: https://gitlab.freedesktop.org/drm/intel/issues/541


Participating hosts (34 -> 33)
------------------------------

  Additional (1): fi-pnv-d510 
  Missing    (2): fi-bsw-cyan fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * IGT: IGT_6159 -> IGTPW_6087

  CI-20190529: 20190529
  CI_DRM_10444: 1c14496fa6184006fdae96f9a30435c0a2a697cc @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_6087: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/index.html
  IGT_6159: 6135b9cc319ed965e3aafb5b2ae2abf4762a06b2 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/index.html

[-- Attachment #2: Type: text/html, Size: 3444 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [igt-dev] ✓ Fi.CI.IGT: success for IGT fixes for priority management + capture with GuC submission
  2021-08-04  1:23 ` [igt-dev] " Matthew Brost
                   ` (4 preceding siblings ...)
  (?)
@ 2021-08-05 11:46 ` Patchwork
  -1 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2021-08-05 11:46 UTC (permalink / raw)
  To: Matthew Brost; +Cc: igt-dev

[-- Attachment #1: Type: text/plain, Size: 30281 bytes --]

== Series Details ==

Series: IGT fixes for priority management + capture with GuC submission
URL   : https://patchwork.freedesktop.org/series/93365/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_10444_full -> IGTPW_6087_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/index.html

Known issues
------------

  Here are the changes found in IGTPW_6087_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_persistence@legacy-engines-persistence:
    - shard-snb:          NOTRUN -> [SKIP][1] ([fdo#109271] / [i915#1099]) +3 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-snb2/igt@gem_ctx_persistence@legacy-engines-persistence.html

  * igt@gem_eio@in-flight-suspend:
    - shard-apl:          [PASS][2] -> [DMESG-WARN][3] ([i915#180]) +5 similar issues
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-apl6/igt@gem_eio@in-flight-suspend.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl1/igt@gem_eio@in-flight-suspend.html

  * igt@gem_eio@unwedge-stress:
    - shard-tglb:         [PASS][4] -> [TIMEOUT][5] ([i915#2369] / [i915#3063] / [i915#3648])
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-tglb1/igt@gem_eio@unwedge-stress.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb3/igt@gem_eio@unwedge-stress.html

  * igt@gem_exec_capture@userptr:
    - shard-snb:          [PASS][6] -> [SKIP][7] ([fdo#109271])
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-snb7/igt@gem_exec_capture@userptr.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-snb5/igt@gem_exec_capture@userptr.html

  * igt@gem_exec_fair@basic-deadline:
    - shard-apl:          NOTRUN -> [FAIL][8] ([i915#2846])
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl7/igt@gem_exec_fair@basic-deadline.html

  * igt@gem_exec_fair@basic-none-share@rcs0:
    - shard-kbl:          [PASS][9] -> [FAIL][10] ([i915#2842]) +4 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-kbl2/igt@gem_exec_fair@basic-none-share@rcs0.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl6/igt@gem_exec_fair@basic-none-share@rcs0.html

  * igt@gem_exec_fair@basic-none@vcs0:
    - shard-glk:          [PASS][11] -> [FAIL][12] ([i915#2842]) +2 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-glk6/igt@gem_exec_fair@basic-none@vcs0.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk9/igt@gem_exec_fair@basic-none@vcs0.html

  * igt@gem_exec_fair@basic-none@vcs1:
    - shard-iclb:         NOTRUN -> [FAIL][13] ([i915#2842])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb4/igt@gem_exec_fair@basic-none@vcs1.html

  * igt@gem_pread@exhaustion:
    - shard-apl:          NOTRUN -> [WARN][14] ([i915#2658])
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl7/igt@gem_pread@exhaustion.html

  * igt@gem_userptr_blits@dmabuf-sync:
    - shard-apl:          NOTRUN -> [SKIP][15] ([fdo#109271] / [i915#3323])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl2/igt@gem_userptr_blits@dmabuf-sync.html

  * igt@gem_userptr_blits@input-checking:
    - shard-apl:          NOTRUN -> [DMESG-WARN][16] ([i915#3002])
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl3/igt@gem_userptr_blits@input-checking.html

  * igt@gem_userptr_blits@invalid-mmap-offset-unsync:
    - shard-iclb:         NOTRUN -> [SKIP][17] ([i915#3297])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb7/igt@gem_userptr_blits@invalid-mmap-offset-unsync.html
    - shard-tglb:         NOTRUN -> [SKIP][18] ([i915#3297])
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb2/igt@gem_userptr_blits@invalid-mmap-offset-unsync.html

  * igt@gem_userptr_blits@vma-merge:
    - shard-apl:          NOTRUN -> [FAIL][19] ([i915#3318])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl7/igt@gem_userptr_blits@vma-merge.html

  * igt@gen9_exec_parse@bb-start-cmd:
    - shard-tglb:         NOTRUN -> [SKIP][20] ([i915#2856])
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb2/igt@gen9_exec_parse@bb-start-cmd.html
    - shard-iclb:         NOTRUN -> [SKIP][21] ([i915#2856])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb5/igt@gen9_exec_parse@bb-start-cmd.html

  * igt@i915_pm_rpm@modeset-non-lpsp:
    - shard-iclb:         NOTRUN -> [SKIP][22] ([i915#579])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb1/igt@i915_pm_rpm@modeset-non-lpsp.html
    - shard-tglb:         NOTRUN -> [SKIP][23] ([i915#579])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb1/igt@i915_pm_rpm@modeset-non-lpsp.html

  * igt@kms_addfb_basic@invalid-smem-bo-on-discrete:
    - shard-tglb:         NOTRUN -> [SKIP][24] ([i915#3826])
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb5/igt@kms_addfb_basic@invalid-smem-bo-on-discrete.html
    - shard-iclb:         NOTRUN -> [SKIP][25] ([i915#3826])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb4/igt@kms_addfb_basic@invalid-smem-bo-on-discrete.html

  * igt@kms_big_fb@linear-64bpp-rotate-90:
    - shard-iclb:         NOTRUN -> [SKIP][26] ([fdo#110725] / [fdo#111614])
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb8/igt@kms_big_fb@linear-64bpp-rotate-90.html
    - shard-tglb:         NOTRUN -> [SKIP][27] ([fdo#111614])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb5/igt@kms_big_fb@linear-64bpp-rotate-90.html

  * igt@kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0-hflip:
    - shard-kbl:          NOTRUN -> [SKIP][28] ([fdo#109271] / [i915#3777])
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl6/igt@kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0-hflip.html
    - shard-glk:          NOTRUN -> [SKIP][29] ([fdo#109271] / [i915#3777])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk1/igt@kms_big_fb@x-tiled-max-hw-stride-32bpp-rotate-0-hflip.html

  * igt@kms_big_fb@y-tiled-32bpp-rotate-0:
    - shard-glk:          [PASS][30] -> [DMESG-WARN][31] ([i915#118] / [i915#95]) +1 similar issue
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-glk5/igt@kms_big_fb@y-tiled-32bpp-rotate-0.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk4/igt@kms_big_fb@y-tiled-32bpp-rotate-0.html

  * igt@kms_big_fb@yf-tiled-addfb:
    - shard-tglb:         NOTRUN -> [SKIP][32] ([fdo#111615])
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb6/igt@kms_big_fb@yf-tiled-addfb.html

  * igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip:
    - shard-apl:          NOTRUN -> [SKIP][33] ([fdo#109271] / [i915#3777]) +3 similar issues
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl2/igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip.html

  * igt@kms_ccs@pipe-a-ccs-on-another-bo-y_tiled_gen12_mc_ccs:
    - shard-apl:          NOTRUN -> [SKIP][34] ([fdo#109271] / [i915#3886]) +6 similar issues
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl2/igt@kms_ccs@pipe-a-ccs-on-another-bo-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-a-crc-primary-rotation-180-y_tiled_gen12_mc_ccs:
    - shard-tglb:         NOTRUN -> [SKIP][35] ([i915#3689] / [i915#3886]) +1 similar issue
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb5/igt@kms_ccs@pipe-a-crc-primary-rotation-180-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-a-crc-sprite-planes-basic-y_tiled_ccs:
    - shard-tglb:         NOTRUN -> [SKIP][36] ([i915#3689]) +4 similar issues
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb7/igt@kms_ccs@pipe-a-crc-sprite-planes-basic-y_tiled_ccs.html

  * igt@kms_ccs@pipe-b-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc:
    - shard-kbl:          NOTRUN -> [SKIP][37] ([fdo#109271] / [i915#3886]) +1 similar issue
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl3/igt@kms_ccs@pipe-b-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc.html
    - shard-glk:          NOTRUN -> [SKIP][38] ([fdo#109271] / [i915#3886]) +1 similar issue
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk6/igt@kms_ccs@pipe-b-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc.html
    - shard-iclb:         NOTRUN -> [SKIP][39] ([fdo#109278] / [i915#3886]) +2 similar issues
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb5/igt@kms_ccs@pipe-b-missing-ccs-buffer-y_tiled_gen12_rc_ccs_cc.html

  * igt@kms_ccs@pipe-d-missing-ccs-buffer-y_tiled_gen12_rc_ccs:
    - shard-kbl:          NOTRUN -> [SKIP][40] ([fdo#109271]) +41 similar issues
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl2/igt@kms_ccs@pipe-d-missing-ccs-buffer-y_tiled_gen12_rc_ccs.html

  * igt@kms_chamelium@dp-frame-dump:
    - shard-iclb:         NOTRUN -> [SKIP][41] ([fdo#109284] / [fdo#111827]) +5 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb5/igt@kms_chamelium@dp-frame-dump.html
    - shard-glk:          NOTRUN -> [SKIP][42] ([fdo#109271] / [fdo#111827]) +4 similar issues
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk6/igt@kms_chamelium@dp-frame-dump.html

  * igt@kms_chamelium@hdmi-mode-timings:
    - shard-snb:          NOTRUN -> [SKIP][43] ([fdo#109271] / [fdo#111827]) +20 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-snb2/igt@kms_chamelium@hdmi-mode-timings.html
    - shard-kbl:          NOTRUN -> [SKIP][44] ([fdo#109271] / [fdo#111827]) +4 similar issues
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl6/igt@kms_chamelium@hdmi-mode-timings.html

  * igt@kms_color_chamelium@pipe-a-ctm-limited-range:
    - shard-apl:          NOTRUN -> [SKIP][45] ([fdo#109271] / [fdo#111827]) +14 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl3/igt@kms_color_chamelium@pipe-a-ctm-limited-range.html

  * igt@kms_color_chamelium@pipe-b-ctm-0-75:
    - shard-tglb:         NOTRUN -> [SKIP][46] ([fdo#109284] / [fdo#111827]) +5 similar issues
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb2/igt@kms_color_chamelium@pipe-b-ctm-0-75.html

  * igt@kms_content_protection@dp-mst-lic-type-1:
    - shard-iclb:         NOTRUN -> [SKIP][47] ([i915#3116])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb8/igt@kms_content_protection@dp-mst-lic-type-1.html
    - shard-tglb:         NOTRUN -> [SKIP][48] ([i915#3116])
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb5/igt@kms_content_protection@dp-mst-lic-type-1.html

  * igt@kms_content_protection@legacy:
    - shard-tglb:         NOTRUN -> [SKIP][49] ([fdo#111828])
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb1/igt@kms_content_protection@legacy.html

  * igt@kms_content_protection@lic:
    - shard-apl:          NOTRUN -> [TIMEOUT][50] ([i915#1319])
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl3/igt@kms_content_protection@lic.html

  * igt@kms_content_protection@uevent:
    - shard-apl:          NOTRUN -> [FAIL][51] ([i915#2105])
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl2/igt@kms_content_protection@uevent.html

  * igt@kms_cursor_crc@pipe-a-cursor-max-size-rapid-movement:
    - shard-tglb:         NOTRUN -> [SKIP][52] ([i915#3359]) +1 similar issue
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb2/igt@kms_cursor_crc@pipe-a-cursor-max-size-rapid-movement.html

  * igt@kms_cursor_crc@pipe-b-cursor-512x512-sliding:
    - shard-iclb:         NOTRUN -> [SKIP][53] ([fdo#109278] / [fdo#109279])
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb6/igt@kms_cursor_crc@pipe-b-cursor-512x512-sliding.html
    - shard-tglb:         NOTRUN -> [SKIP][54] ([fdo#109279] / [i915#3359])
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb5/igt@kms_cursor_crc@pipe-b-cursor-512x512-sliding.html

  * igt@kms_cursor_crc@pipe-c-cursor-32x32-sliding:
    - shard-tglb:         NOTRUN -> [SKIP][55] ([i915#3319])
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb8/igt@kms_cursor_crc@pipe-c-cursor-32x32-sliding.html

  * igt@kms_cursor_edge_walk@pipe-d-128x128-right-edge:
    - shard-snb:          NOTRUN -> [SKIP][56] ([fdo#109271]) +447 similar issues
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-snb5/igt@kms_cursor_edge_walk@pipe-d-128x128-right-edge.html

  * igt@kms_cursor_legacy@cursorb-vs-flipa-atomic-transitions-varying-size:
    - shard-iclb:         NOTRUN -> [SKIP][57] ([fdo#109274] / [fdo#109278])
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb2/igt@kms_cursor_legacy@cursorb-vs-flipa-atomic-transitions-varying-size.html

  * igt@kms_cursor_legacy@pipe-d-single-bo:
    - shard-apl:          NOTRUN -> [SKIP][58] ([fdo#109271] / [i915#533])
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl7/igt@kms_cursor_legacy@pipe-d-single-bo.html

  * igt@kms_cursor_legacy@pipe-d-single-move:
    - shard-iclb:         NOTRUN -> [SKIP][59] ([fdo#109278]) +10 similar issues
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb4/igt@kms_cursor_legacy@pipe-d-single-move.html

  * igt@kms_flip@2x-flip-vs-panning:
    - shard-iclb:         NOTRUN -> [SKIP][60] ([fdo#109274]) +2 similar issues
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb8/igt@kms_flip@2x-flip-vs-panning.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible@a-hdmi-a1:
    - shard-glk:          [PASS][61] -> [FAIL][62] ([i915#79])
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-glk5/igt@kms_flip@flip-vs-expired-vblank-interruptible@a-hdmi-a1.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk3/igt@kms_flip@flip-vs-expired-vblank-interruptible@a-hdmi-a1.html

  * igt@kms_flip@flip-vs-suspend-interruptible@c-dp1:
    - shard-apl:          NOTRUN -> [DMESG-WARN][63] ([i915#180]) +1 similar issue
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl8/igt@kms_flip@flip-vs-suspend-interruptible@c-dp1.html

  * igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytilegen12rcccs:
    - shard-apl:          NOTRUN -> [SKIP][64] ([fdo#109271] / [i915#2672])
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl6/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytilegen12rcccs.html

  * igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile:
    - shard-tglb:         NOTRUN -> [SKIP][65] ([i915#2587])
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb2/igt@kms_flip_scaled_crc@flip-32bpp-ytileccs-to-64bpp-ytile.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render:
    - shard-kbl:          [PASS][66] -> [FAIL][67] ([i915#2546])
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-kbl3/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl3/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html
    - shard-glk:          [PASS][68] -> [FAIL][69] ([i915#2546])
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-glk6/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html

  * igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-cpu:
    - shard-iclb:         NOTRUN -> [SKIP][70] ([fdo#109280]) +11 similar issues
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-pri-shrfb-draw-mmap-cpu.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-mmap-cpu:
    - shard-glk:          NOTRUN -> [SKIP][71] ([fdo#109271]) +35 similar issues
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk2/igt@kms_frontbuffer_tracking@fbcpsr-1p-offscren-pri-indfb-draw-mmap-cpu.html

  * igt@kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-render:
    - shard-tglb:         NOTRUN -> [SKIP][72] ([fdo#111825]) +14 similar issues
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb2/igt@kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-render.html

  * igt@kms_plane_alpha_blend@pipe-b-alpha-7efc:
    - shard-apl:          NOTRUN -> [FAIL][73] ([fdo#108145] / [i915#265]) +1 similar issue
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl1/igt@kms_plane_alpha_blend@pipe-b-alpha-7efc.html

  * igt@kms_plane_alpha_blend@pipe-b-alpha-transparent-fb:
    - shard-apl:          NOTRUN -> [FAIL][74] ([i915#265])
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl8/igt@kms_plane_alpha_blend@pipe-b-alpha-transparent-fb.html

  * igt@kms_psr2_sf@overlay-plane-update-sf-dmg-area-2:
    - shard-apl:          NOTRUN -> [SKIP][75] ([fdo#109271] / [i915#658]) +3 similar issues
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl3/igt@kms_psr2_sf@overlay-plane-update-sf-dmg-area-2.html

  * igt@kms_psr@psr2_cursor_blt:
    - shard-iclb:         [PASS][76] -> [SKIP][77] ([fdo#109441]) +2 similar issues
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-iclb2/igt@kms_psr@psr2_cursor_blt.html
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb5/igt@kms_psr@psr2_cursor_blt.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         NOTRUN -> [SKIP][78] ([fdo#109441]) +1 similar issue
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb4/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@kms_psr@psr2_cursor_plane_onoff:
    - shard-tglb:         NOTRUN -> [FAIL][79] ([i915#132] / [i915#3467]) +1 similar issue
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb1/igt@kms_psr@psr2_cursor_plane_onoff.html

  * igt@kms_sysfs_edid_timing:
    - shard-kbl:          NOTRUN -> [FAIL][80] ([IGT#2])
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl3/igt@kms_sysfs_edid_timing.html

  * igt@kms_vblank@pipe-d-ts-continuation-idle:
    - shard-apl:          NOTRUN -> [SKIP][81] ([fdo#109271]) +230 similar issues
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl1/igt@kms_vblank@pipe-d-ts-continuation-idle.html

  * igt@kms_writeback@writeback-pixel-formats:
    - shard-apl:          NOTRUN -> [SKIP][82] ([fdo#109271] / [i915#2437])
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl7/igt@kms_writeback@writeback-pixel-formats.html

  * igt@nouveau_crc@pipe-a-ctx-flip-skip-current-frame:
    - shard-tglb:         NOTRUN -> [SKIP][83] ([i915#2530])
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb1/igt@nouveau_crc@pipe-a-ctx-flip-skip-current-frame.html
    - shard-iclb:         NOTRUN -> [SKIP][84] ([i915#2530])
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb1/igt@nouveau_crc@pipe-a-ctx-flip-skip-current-frame.html

  * igt@prime_nv_pcopy@test3_1:
    - shard-tglb:         NOTRUN -> [SKIP][85] ([fdo#109291]) +1 similar issue
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb3/igt@prime_nv_pcopy@test3_1.html

  * igt@prime_nv_test@nv_write_i915_gtt_mmap_read:
    - shard-iclb:         NOTRUN -> [SKIP][86] ([fdo#109291]) +1 similar issue
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb3/igt@prime_nv_test@nv_write_i915_gtt_mmap_read.html

  * igt@sysfs_clients@sema-50:
    - shard-iclb:         NOTRUN -> [SKIP][87] ([i915#2994])
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb8/igt@sysfs_clients@sema-50.html
    - shard-kbl:          NOTRUN -> [SKIP][88] ([fdo#109271] / [i915#2994])
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl3/igt@sysfs_clients@sema-50.html
    - shard-glk:          NOTRUN -> [SKIP][89] ([fdo#109271] / [i915#2994])
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk5/igt@sysfs_clients@sema-50.html
    - shard-tglb:         NOTRUN -> [SKIP][90] ([i915#2994])
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb6/igt@sysfs_clients@sema-50.html

  
#### Possible fixes ####

  * igt@gem_ctx_persistence@many-contexts:
    - shard-tglb:         [FAIL][91] ([i915#2410]) -> [PASS][92]
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-tglb2/igt@gem_ctx_persistence@many-contexts.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-tglb1/igt@gem_ctx_persistence@many-contexts.html

  * igt@gem_exec_fair@basic-deadline:
    - shard-kbl:          [FAIL][93] ([i915#2846]) -> [PASS][94]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-kbl4/igt@gem_exec_fair@basic-deadline.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl7/igt@gem_exec_fair@basic-deadline.html
    - shard-glk:          [FAIL][95] ([i915#2846]) -> [PASS][96]
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-glk7/igt@gem_exec_fair@basic-deadline.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk4/igt@gem_exec_fair@basic-deadline.html

  * igt@gem_exec_fair@basic-pace-solo@rcs0:
    - shard-kbl:          [FAIL][97] ([i915#2842]) -> [PASS][98]
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-kbl7/igt@gem_exec_fair@basic-pace-solo@rcs0.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl1/igt@gem_exec_fair@basic-pace-solo@rcs0.html

  * igt@gem_exec_fair@basic-pace@vecs0:
    - shard-iclb:         [FAIL][99] ([i915#2842]) -> [PASS][100]
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-iclb4/igt@gem_exec_fair@basic-pace@vecs0.html
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb3/igt@gem_exec_fair@basic-pace@vecs0.html

  * igt@gem_mmap_gtt@cpuset-medium-copy:
    - shard-iclb:         [FAIL][101] ([i915#307]) -> [PASS][102] +1 similar issue
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-iclb7/igt@gem_mmap_gtt@cpuset-medium-copy.html
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb3/igt@gem_mmap_gtt@cpuset-medium-copy.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible@c-dp1:
    - shard-kbl:          [FAIL][103] ([i915#79]) -> [PASS][104]
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-kbl6/igt@kms_flip@flip-vs-expired-vblank-interruptible@c-dp1.html
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl4/igt@kms_flip@flip-vs-expired-vblank-interruptible@c-dp1.html

  * igt@kms_flip@flip-vs-expired-vblank@c-hdmi-a2:
    - shard-glk:          [FAIL][105] ([i915#79]) -> [PASS][106]
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-glk5/igt@kms_flip@flip-vs-expired-vblank@c-hdmi-a2.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-glk1/igt@kms_flip@flip-vs-expired-vblank@c-hdmi-a2.html

  * igt@kms_flip_event_leak:
    - shard-snb:          [SKIP][107] ([fdo#109271]) -> [PASS][108]
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-snb2/igt@kms_flip_event_leak.html
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-snb7/igt@kms_flip_event_leak.html

  * igt@kms_psr@psr2_cursor_render:
    - shard-iclb:         [SKIP][109] ([fdo#109441]) -> [PASS][110] +2 similar issues
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-iclb5/igt@kms_psr@psr2_cursor_render.html
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb2/igt@kms_psr@psr2_cursor_render.html

  * igt@perf@polling-parameterized:
    - shard-iclb:         [FAIL][111] ([i915#1542]) -> [PASS][112]
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-iclb7/igt@perf@polling-parameterized.html
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb3/igt@perf@polling-parameterized.html

  
#### Warnings ####

  * igt@i915_pm_rc6_residency@rc6-idle:
    - shard-iclb:         [WARN][113] ([i915#1804] / [i915#2684]) -> [WARN][114] ([i915#2684])
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-iclb7/igt@i915_pm_rc6_residency@rc6-idle.html
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb5/igt@i915_pm_rc6_residency@rc6-idle.html

  * igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-2:
    - shard-iclb:         [SKIP][115] ([i915#2920]) -> [SKIP][116] ([i915#658]) +1 similar issue
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-iclb2/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-2.html
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb8/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-2.html

  * igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-1:
    - shard-iclb:         [SKIP][117] ([i915#658]) -> [SKIP][118] ([i915#2920])
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-iclb6/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-1.html
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-iclb2/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-1.html

  * igt@runner@aborted:
    - shard-kbl:          ([FAIL][119], [FAIL][120]) ([i915#3002] / [i915#3363]) -> ([FAIL][121], [FAIL][122]) ([i915#2505] / [i915#3002] / [i915#3363])
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-kbl4/igt@runner@aborted.html
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-kbl2/igt@runner@aborted.html
   [121]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl2/igt@runner@aborted.html
   [122]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-kbl6/igt@runner@aborted.html
    - shard-apl:          ([FAIL][123], [FAIL][124], [FAIL][125]) ([i915#180] / [i915#3363]) -> ([FAIL][126], [FAIL][127], [FAIL][128], [FAIL][129], [FAIL][130], [FAIL][131], [FAIL][132]) ([fdo#109271] / [i915#1610] / [i915#180] / [i915#1814] / [i915#2292] / [i915#3002] / [i915#3363])
   [123]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-apl8/igt@runner@aborted.html
   [124]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-apl6/igt@runner@aborted.html
   [125]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10444/shard-apl1/igt@runner@aborted.html
   [126]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl1/igt@runner@aborted.html
   [127]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl8/igt@runner@aborted.html
   [128]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl3/igt@runner@aborted.html
   [129]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl8/igt@runner@aborted.html
   [130]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl8/igt@runner@aborted.html
   [131]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl6/igt@runner@aborted.html
   [132]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/shard-apl1/igt@runner@aborted.html

  
  [IGT#2]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/2
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109274]: https://bugs.freedesktop.org/show_bug.cgi?id=109274
  [fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
  [fdo#109279]: https://bugs.freedesktop.org/show_bug.cgi?id=109279
  [fdo#109280]: https://bugs.freedesktop.org/show_bug.cgi?id=109280
  [fdo#109284]: https://bugs.freedesktop.org/show_bug.cgi?id=109284
  [fdo#109291]: https://bugs.freedesktop.org/show_bug.cgi?id=109291
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110725]: https://bugs.freedesktop.org/show_bug.cgi?id=110725
  [fdo#111614]: https://bugs.freedesktop.org/show_bug.cgi?id=111614
  [fdo#111615]: https://bugs.freedesktop.org/show_bug.cgi?id=111615
  [fdo#111825]: https://bugs.freedesktop.org/show_bug.cgi?id=111825
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [fdo#111828]: https://bugs.freedesktop.org/show_bug.cgi?id=111828
  [i915#1099]: https://gitlab.freedesktop.org/drm/intel/issues/1099
  [i915#118]: https://gitlab.freedesktop.org/drm/intel/issues/118
  [i915#1319]: https://gitlab.freedesktop.org/drm/intel/issues/1319
  [i915#132]: https://gitlab.freedesktop.org/drm/intel/issues/132
  [i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
  [i915#1610]: https://gitlab.freedesktop.org/drm/intel/issues/1610
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#1804]: https://gitlab.freedesktop.org/drm/intel/issues/1804
  [i915#1814]: https://gitlab.freedesktop.org/drm/intel/issues/1814
  [i915#2105]: https://gitlab.freedesktop.org/drm/intel/issues/2105
  [i915#2292]: https://gitlab.freedesktop.org/drm/intel/issues/2292
  [i915#2369]: https://gitlab.freedesktop.org/drm/intel/issues/2369
  [i915#2410]: https://gitlab.freedesktop.org/drm/intel/issues/2410
  [i915#2437]: https://gitlab.freedesktop.org/drm/intel/issues/2437
  [i915#2505]: https://gitlab.freedesktop.org/drm/intel/issues/2505
  [i915#2530]: https://gitlab.freedesktop.org/drm/intel/issues/2530
  [i915#2546]: https://gitlab.freedesktop.org/drm/intel/issues/2546
  [i915#2587]: https://gitlab.freedesktop.org/drm/intel/issues/2587
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#2658]: https://gitlab.freedesktop.org/drm/intel/issues/2658
  [i915#2672]: https://gitlab.freedesktop.org/drm/intel/issues/2672
  [i915#2684]: https://gitlab.freedesktop.org/drm/intel/issues/2684
  [i915#2842]: https://gitlab.freedesktop.org/drm/intel/issue

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_6087/index.html

[-- Attachment #2: Type: text/html, Size: 37046 bytes --]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [Intel-gfx] [PATCH i-g-t 1/3] i915/gem_exec_schedule: Make gem_exec_schedule understand static priority mapping
  2021-08-04  1:23   ` Matthew Brost
@ 2021-08-13 23:24     ` Daniele Ceraolo Spurio
  -1 siblings, 0 replies; 16+ messages in thread
From: Daniele Ceraolo Spurio @ 2021-08-13 23:24 UTC (permalink / raw)
  To: Matthew Brost, igt-dev; +Cc: intel-gfx



On 8/3/2021 6:23 PM, Matthew Brost wrote:
> The i915 currently has 2k visible priority levels which are currently
> unique. This is changing to statically map these 2k levels into 3
> buckets:
>
> low: < 0
> mid: 0
> high: > 0
>
> Update gem_exec_schedule to understand this. This entails updating
> promotion test to use 3 levels that will map into different buckets and
> also add bit of delay after releasing a cork beforing completing the
> spinners.

This needs a line about why we add the delay, something like "to give 
time to the i915 scheduler to process the fence release and queue the 
requests" or something.
BTW, any reason not to just add the delay unconditionally in 
unplug_show_queue, instead of only in one test? Other tests might suffer 
from the same problem even if they're not hitting it at the moment.

Daniele

>
> Also skip any tests that rely on having more than 3 priority levels.
>
> v2: Add a delay between starting releasing spinner and cork in
> promotion, add local define for static mapping engine info
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   lib/i915/gem_scheduler.c       | 14 ++++++++
>   lib/i915/gem_scheduler.h       |  1 +
>   lib/i915/i915_drm_local.h      | 10 ++++++
>   tests/i915/gem_exec_schedule.c | 62 +++++++++++++++++++++-------------
>   4 files changed, 63 insertions(+), 24 deletions(-)
>
> diff --git a/lib/i915/gem_scheduler.c b/lib/i915/gem_scheduler.c
> index cdddf42ad..d006b8676 100644
> --- a/lib/i915/gem_scheduler.c
> +++ b/lib/i915/gem_scheduler.c
> @@ -28,6 +28,7 @@
>   #include "igt_core.h"
>   #include "ioctl_wrappers.h"
>   
> +#include "i915/i915_drm_local.h"
>   #include "i915/gem_scheduler.h"
>   #include "i915/gem_submission.h"
>   
> @@ -90,6 +91,19 @@ bool gem_scheduler_has_ctx_priority(int fd)
>   		I915_SCHEDULER_CAP_PRIORITY;
>   }
>   
> +/**
> + * gem_scheduler_has_static_priority:
> + * @fd: open i915 drm file descriptor
> + *
> + * Feature test macro to query whether the driver supports priority assigned
> + * from user space are statically mapping into 3 buckets.
> + */
> +bool gem_scheduler_has_static_priority(int fd)
> +{
> +	return gem_scheduler_capability(fd) &
> +		I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
> +}
> +
>   /**
>    * gem_scheduler_has_preemption:
>    * @fd: open i915 drm file descriptor
> diff --git a/lib/i915/gem_scheduler.h b/lib/i915/gem_scheduler.h
> index d43e84bd2..b00804f70 100644
> --- a/lib/i915/gem_scheduler.h
> +++ b/lib/i915/gem_scheduler.h
> @@ -29,6 +29,7 @@
>   unsigned gem_scheduler_capability(int fd);
>   bool gem_scheduler_enabled(int fd);
>   bool gem_scheduler_has_ctx_priority(int fd);
> +bool gem_scheduler_has_static_priority(int fd);
>   bool gem_scheduler_has_preemption(int fd);
>   bool gem_scheduler_has_semaphores(int fd);
>   bool gem_scheduler_has_engine_busy_stats(int fd);
> diff --git a/lib/i915/i915_drm_local.h b/lib/i915/i915_drm_local.h
> index dd646aedf..a1527ff21 100644
> --- a/lib/i915/i915_drm_local.h
> +++ b/lib/i915/i915_drm_local.h
> @@ -20,6 +20,16 @@ extern "C" {
>    * clean these up when kernel uapi headers are sync'd.
>    */
>   
> +/*
> + * Indicates the 2k user priority levels are statically mapped into 3 buckets as
> + * follows:
> + *
> + * -1k to -1	Low priority
> + * 0		Normal priority
> + * 1 to 1k	Highest priority
> + */
> +#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
> +
>   #if defined(__cplusplus)
>   }
>   #endif
> diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
> index e5fb45982..bb9fb6c14 100644
> --- a/tests/i915/gem_exec_schedule.c
> +++ b/tests/i915/gem_exec_schedule.c
> @@ -199,7 +199,8 @@ create_highest_priority(int fd, const intel_ctx_cfg_t *cfg)
>   
>   static void unplug_show_queue(int fd, struct igt_cork *c,
>   			      const intel_ctx_cfg_t *cfg,
> -			      unsigned int engine)
> +			      unsigned int engine,
> +			      unsigned usec_delay)
>   {
>   	igt_spin_t *spin[MAX_ELSP_QLEN];
>   	int max = MAX_ELSP_QLEN;
> @@ -216,6 +217,7 @@ static void unplug_show_queue(int fd, struct igt_cork *c,
>   
>   	igt_cork_unplug(c); /* batches will now be queued on the engine */
>   	igt_debugfs_dump(fd, "i915_engine_info");
> +	usleep(usec_delay);
>   
>   	for (int n = 0; n < max; n++)
>   		igt_spin_free(fd, spin[n]);
> @@ -237,7 +239,7 @@ static void fifo(int fd, const intel_ctx_t *ctx, unsigned ring)
>   	store_dword_fenced(fd, ctx, ring, scratch, 0, 1, fence, 0);
>   	store_dword_fenced(fd, ctx, ring, scratch, 0, 2, fence, 0);
>   
> -	unplug_show_queue(fd, &cork, &ctx->cfg, ring);
> +	unplug_show_queue(fd, &cork, &ctx->cfg, ring, 0);
>   	close(fence);
>   
>   	result =  __sync_read_u32(fd, scratch, 0);
> @@ -298,7 +300,7 @@ static void implicit_rw(int i915, const intel_ctx_t *ctx, unsigned int ring,
>   				   ring, scratch, 0, ring,
>   				   fence, I915_GEM_DOMAIN_RENDER);
>   
> -	unplug_show_queue(i915, &cork, &ctx->cfg, ring);
> +	unplug_show_queue(i915, &cork, &ctx->cfg, ring, 0);
>   	close(fence);
>   
>   	result =  __sync_read_u32(i915, scratch, 0);
> @@ -355,7 +357,7 @@ static void independent(int fd, const intel_ctx_t *ctx, unsigned int engine,
>   	/* Same priority, but different timeline (as different engine) */
>   	batch = __store_dword(fd, ctx, engine, scratch, 0, engine, 0, fence, 0);
>   
> -	unplug_show_queue(fd, &cork, &ctx->cfg, engine);
> +	unplug_show_queue(fd, &cork, &ctx->cfg, engine, 0);
>   	close(fence);
>   
>   	gem_sync(fd, batch);
> @@ -1326,7 +1328,7 @@ static void reorder(int fd, const intel_ctx_cfg_t *cfg,
>   	store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO]->id, fence, 0);
>   	store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI]->id, fence, 0);
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 0);
>   	close(fence);
>   
>   	result =  __sync_read_u32(fd, scratch, 0);
> @@ -1353,10 +1355,10 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
>   	gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
>   
>   	ctx[HI] = intel_ctx_create(fd, cfg);
> -	gem_context_set_priority(fd, ctx[HI]->id, 0);
> +	gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
>   
>   	ctx[NOISE] = intel_ctx_create(fd, cfg);
> -	gem_context_set_priority(fd, ctx[NOISE]->id, MIN_PRIO/2);
> +	gem_context_set_priority(fd, ctx[NOISE]->id, 0);
>   
>   	result = gem_create(fd, 4096);
>   	dep = gem_create(fd, 4096);
> @@ -1377,7 +1379,7 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
>   
>   	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI]->id, 0);
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 250000);
>   	close(fence);
>   
>   	dep_read = __sync_read_u32(fd, dep, 0);
> @@ -1893,7 +1895,7 @@ static void deep(int fd, const intel_ctx_cfg_t *cfg,
>   	igt_info("Second deptree: %d requests [%.3fs]\n",
>   		 n * XS, 1e-9*igt_nsec_elapsed(&tv));
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 0);
>   	gem_close(fd, plug);
>   	igt_require(expected); /* too slow */
>   
> @@ -1962,7 +1964,7 @@ static void wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
>   	igt_info("Submitted %d requests over %d contexts in %.1fms\n",
>   		 count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 0);
>   	close(fence);
>   
>   	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
> @@ -2067,7 +2069,7 @@ static void reorder_wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
>   		intel_ctx_destroy(fd, tmp_ctx);
>   	}
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 0);
>   	close(fence);
>   
>   	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
> @@ -2963,19 +2965,25 @@ igt_main
>   			test_each_engine_store("preempt-other-chain", fd, ctx, e)
>   				preempt_other(fd, &ctx->cfg, e->flags, CHAIN);
>   
> -			test_each_engine_store("preempt-queue", fd, ctx, e)
> -				preempt_queue(fd, &ctx->cfg, e->flags, 0);
> +			test_each_engine_store("preempt-engines", fd, ctx, e)
> +				preempt_engines(fd, e, 0);
>   
> -			test_each_engine_store("preempt-queue-chain", fd, ctx, e)
> -				preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
> -			test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
> -				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
> +			igt_subtest_group {
> +				igt_fixture {
> +					igt_require(!gem_scheduler_has_static_priority(fd));
> +				}
>   
> -			test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
> -				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
> +				test_each_engine_store("preempt-queue", fd, ctx, e)
> +					preempt_queue(fd, &ctx->cfg, e->flags, 0);
>   
> -			test_each_engine_store("preempt-engines", fd, ctx, e)
> -				preempt_engines(fd, e, 0);
> +				test_each_engine_store("preempt-queue-chain", fd, ctx, e)
> +					preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
> +				test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
> +					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
> +
> +				test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
> +					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
> +			}
>   
>   			igt_subtest_group {
>   				igt_hang_t hang;
> @@ -3017,11 +3025,17 @@ igt_main
>   		test_each_engine_store("wide", fd, ctx, e)
>   			wide(fd, &ctx->cfg, e->flags);
>   
> -		test_each_engine_store("reorder-wide", fd, ctx, e)
> -			reorder_wide(fd, &ctx->cfg, e->flags);
> -
>   		test_each_engine_store("smoketest", fd, ctx, e)
>   			smoketest(fd, &ctx->cfg, e->flags, 5);
> +
> +		igt_subtest_group {
> +			igt_fixture {
> +				igt_require(!gem_scheduler_has_static_priority(fd));
> +			}
> +
> +			test_each_engine_store("reorder-wide", fd, ctx, e)
> +				reorder_wide(fd, &ctx->cfg, e->flags);
> +		}
>   	}
>   
>   	igt_subtest_group {


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t 1/3] i915/gem_exec_schedule: Make gem_exec_schedule understand static priority mapping
@ 2021-08-13 23:24     ` Daniele Ceraolo Spurio
  0 siblings, 0 replies; 16+ messages in thread
From: Daniele Ceraolo Spurio @ 2021-08-13 23:24 UTC (permalink / raw)
  To: Matthew Brost, igt-dev; +Cc: intel-gfx



On 8/3/2021 6:23 PM, Matthew Brost wrote:
> The i915 currently has 2k visible priority levels which are currently
> unique. This is changing to statically map these 2k levels into 3
> buckets:
>
> low: < 0
> mid: 0
> high: > 0
>
> Update gem_exec_schedule to understand this. This entails updating
> promotion test to use 3 levels that will map into different buckets and
> also add bit of delay after releasing a cork beforing completing the
> spinners.

This needs a line about why we add the delay, something like "to give 
time to the i915 scheduler to process the fence release and queue the 
requests" or something.
BTW, any reason not to just add the delay unconditionally in 
unplug_show_queue, instead of only in one test? Other tests might suffer 
from the same problem even if they're not hitting it at the moment.

Daniele

>
> Also skip any tests that rely on having more than 3 priority levels.
>
> v2: Add a delay between starting releasing spinner and cork in
> promotion, add local define for static mapping engine info
>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   lib/i915/gem_scheduler.c       | 14 ++++++++
>   lib/i915/gem_scheduler.h       |  1 +
>   lib/i915/i915_drm_local.h      | 10 ++++++
>   tests/i915/gem_exec_schedule.c | 62 +++++++++++++++++++++-------------
>   4 files changed, 63 insertions(+), 24 deletions(-)
>
> diff --git a/lib/i915/gem_scheduler.c b/lib/i915/gem_scheduler.c
> index cdddf42ad..d006b8676 100644
> --- a/lib/i915/gem_scheduler.c
> +++ b/lib/i915/gem_scheduler.c
> @@ -28,6 +28,7 @@
>   #include "igt_core.h"
>   #include "ioctl_wrappers.h"
>   
> +#include "i915/i915_drm_local.h"
>   #include "i915/gem_scheduler.h"
>   #include "i915/gem_submission.h"
>   
> @@ -90,6 +91,19 @@ bool gem_scheduler_has_ctx_priority(int fd)
>   		I915_SCHEDULER_CAP_PRIORITY;
>   }
>   
> +/**
> + * gem_scheduler_has_static_priority:
> + * @fd: open i915 drm file descriptor
> + *
> + * Feature test macro to query whether the driver supports priority assigned
> + * from user space are statically mapping into 3 buckets.
> + */
> +bool gem_scheduler_has_static_priority(int fd)
> +{
> +	return gem_scheduler_capability(fd) &
> +		I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
> +}
> +
>   /**
>    * gem_scheduler_has_preemption:
>    * @fd: open i915 drm file descriptor
> diff --git a/lib/i915/gem_scheduler.h b/lib/i915/gem_scheduler.h
> index d43e84bd2..b00804f70 100644
> --- a/lib/i915/gem_scheduler.h
> +++ b/lib/i915/gem_scheduler.h
> @@ -29,6 +29,7 @@
>   unsigned gem_scheduler_capability(int fd);
>   bool gem_scheduler_enabled(int fd);
>   bool gem_scheduler_has_ctx_priority(int fd);
> +bool gem_scheduler_has_static_priority(int fd);
>   bool gem_scheduler_has_preemption(int fd);
>   bool gem_scheduler_has_semaphores(int fd);
>   bool gem_scheduler_has_engine_busy_stats(int fd);
> diff --git a/lib/i915/i915_drm_local.h b/lib/i915/i915_drm_local.h
> index dd646aedf..a1527ff21 100644
> --- a/lib/i915/i915_drm_local.h
> +++ b/lib/i915/i915_drm_local.h
> @@ -20,6 +20,16 @@ extern "C" {
>    * clean these up when kernel uapi headers are sync'd.
>    */
>   
> +/*
> + * Indicates the 2k user priority levels are statically mapped into 3 buckets as
> + * follows:
> + *
> + * -1k to -1	Low priority
> + * 0		Normal priority
> + * 1 to 1k	Highest priority
> + */
> +#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
> +
>   #if defined(__cplusplus)
>   }
>   #endif
> diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
> index e5fb45982..bb9fb6c14 100644
> --- a/tests/i915/gem_exec_schedule.c
> +++ b/tests/i915/gem_exec_schedule.c
> @@ -199,7 +199,8 @@ create_highest_priority(int fd, const intel_ctx_cfg_t *cfg)
>   
>   static void unplug_show_queue(int fd, struct igt_cork *c,
>   			      const intel_ctx_cfg_t *cfg,
> -			      unsigned int engine)
> +			      unsigned int engine,
> +			      unsigned usec_delay)
>   {
>   	igt_spin_t *spin[MAX_ELSP_QLEN];
>   	int max = MAX_ELSP_QLEN;
> @@ -216,6 +217,7 @@ static void unplug_show_queue(int fd, struct igt_cork *c,
>   
>   	igt_cork_unplug(c); /* batches will now be queued on the engine */
>   	igt_debugfs_dump(fd, "i915_engine_info");
> +	usleep(usec_delay);
>   
>   	for (int n = 0; n < max; n++)
>   		igt_spin_free(fd, spin[n]);
> @@ -237,7 +239,7 @@ static void fifo(int fd, const intel_ctx_t *ctx, unsigned ring)
>   	store_dword_fenced(fd, ctx, ring, scratch, 0, 1, fence, 0);
>   	store_dword_fenced(fd, ctx, ring, scratch, 0, 2, fence, 0);
>   
> -	unplug_show_queue(fd, &cork, &ctx->cfg, ring);
> +	unplug_show_queue(fd, &cork, &ctx->cfg, ring, 0);
>   	close(fence);
>   
>   	result =  __sync_read_u32(fd, scratch, 0);
> @@ -298,7 +300,7 @@ static void implicit_rw(int i915, const intel_ctx_t *ctx, unsigned int ring,
>   				   ring, scratch, 0, ring,
>   				   fence, I915_GEM_DOMAIN_RENDER);
>   
> -	unplug_show_queue(i915, &cork, &ctx->cfg, ring);
> +	unplug_show_queue(i915, &cork, &ctx->cfg, ring, 0);
>   	close(fence);
>   
>   	result =  __sync_read_u32(i915, scratch, 0);
> @@ -355,7 +357,7 @@ static void independent(int fd, const intel_ctx_t *ctx, unsigned int engine,
>   	/* Same priority, but different timeline (as different engine) */
>   	batch = __store_dword(fd, ctx, engine, scratch, 0, engine, 0, fence, 0);
>   
> -	unplug_show_queue(fd, &cork, &ctx->cfg, engine);
> +	unplug_show_queue(fd, &cork, &ctx->cfg, engine, 0);
>   	close(fence);
>   
>   	gem_sync(fd, batch);
> @@ -1326,7 +1328,7 @@ static void reorder(int fd, const intel_ctx_cfg_t *cfg,
>   	store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO]->id, fence, 0);
>   	store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI]->id, fence, 0);
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 0);
>   	close(fence);
>   
>   	result =  __sync_read_u32(fd, scratch, 0);
> @@ -1353,10 +1355,10 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
>   	gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
>   
>   	ctx[HI] = intel_ctx_create(fd, cfg);
> -	gem_context_set_priority(fd, ctx[HI]->id, 0);
> +	gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
>   
>   	ctx[NOISE] = intel_ctx_create(fd, cfg);
> -	gem_context_set_priority(fd, ctx[NOISE]->id, MIN_PRIO/2);
> +	gem_context_set_priority(fd, ctx[NOISE]->id, 0);
>   
>   	result = gem_create(fd, 4096);
>   	dep = gem_create(fd, 4096);
> @@ -1377,7 +1379,7 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
>   
>   	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI]->id, 0);
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 250000);
>   	close(fence);
>   
>   	dep_read = __sync_read_u32(fd, dep, 0);
> @@ -1893,7 +1895,7 @@ static void deep(int fd, const intel_ctx_cfg_t *cfg,
>   	igt_info("Second deptree: %d requests [%.3fs]\n",
>   		 n * XS, 1e-9*igt_nsec_elapsed(&tv));
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 0);
>   	gem_close(fd, plug);
>   	igt_require(expected); /* too slow */
>   
> @@ -1962,7 +1964,7 @@ static void wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
>   	igt_info("Submitted %d requests over %d contexts in %.1fms\n",
>   		 count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 0);
>   	close(fence);
>   
>   	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
> @@ -2067,7 +2069,7 @@ static void reorder_wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
>   		intel_ctx_destroy(fd, tmp_ctx);
>   	}
>   
> -	unplug_show_queue(fd, &cork, cfg, ring);
> +	unplug_show_queue(fd, &cork, cfg, ring, 0);
>   	close(fence);
>   
>   	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
> @@ -2963,19 +2965,25 @@ igt_main
>   			test_each_engine_store("preempt-other-chain", fd, ctx, e)
>   				preempt_other(fd, &ctx->cfg, e->flags, CHAIN);
>   
> -			test_each_engine_store("preempt-queue", fd, ctx, e)
> -				preempt_queue(fd, &ctx->cfg, e->flags, 0);
> +			test_each_engine_store("preempt-engines", fd, ctx, e)
> +				preempt_engines(fd, e, 0);
>   
> -			test_each_engine_store("preempt-queue-chain", fd, ctx, e)
> -				preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
> -			test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
> -				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
> +			igt_subtest_group {
> +				igt_fixture {
> +					igt_require(!gem_scheduler_has_static_priority(fd));
> +				}
>   
> -			test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
> -				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
> +				test_each_engine_store("preempt-queue", fd, ctx, e)
> +					preempt_queue(fd, &ctx->cfg, e->flags, 0);
>   
> -			test_each_engine_store("preempt-engines", fd, ctx, e)
> -				preempt_engines(fd, e, 0);
> +				test_each_engine_store("preempt-queue-chain", fd, ctx, e)
> +					preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
> +				test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
> +					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
> +
> +				test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
> +					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
> +			}
>   
>   			igt_subtest_group {
>   				igt_hang_t hang;
> @@ -3017,11 +3025,17 @@ igt_main
>   		test_each_engine_store("wide", fd, ctx, e)
>   			wide(fd, &ctx->cfg, e->flags);
>   
> -		test_each_engine_store("reorder-wide", fd, ctx, e)
> -			reorder_wide(fd, &ctx->cfg, e->flags);
> -
>   		test_each_engine_store("smoketest", fd, ctx, e)
>   			smoketest(fd, &ctx->cfg, e->flags, 5);
> +
> +		igt_subtest_group {
> +			igt_fixture {
> +				igt_require(!gem_scheduler_has_static_priority(fd));
> +			}
> +
> +			test_each_engine_store("reorder-wide", fd, ctx, e)
> +				reorder_wide(fd, &ctx->cfg, e->flags);
> +		}
>   	}
>   
>   	igt_subtest_group {

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [Intel-gfx] [PATCH i-g-t 3/3] i915/gem_exec_capture: Update to support GuC based resets
  2021-08-04  1:23   ` Matthew Brost
@ 2021-08-14  0:28     ` Daniele Ceraolo Spurio
  -1 siblings, 0 replies; 16+ messages in thread
From: Daniele Ceraolo Spurio @ 2021-08-14  0:28 UTC (permalink / raw)
  To: Matthew Brost, igt-dev; +Cc: intel-gfx



On 8/3/2021 6:23 PM, Matthew Brost wrote:
> From: "Signed-off-by: John Harrison" <John.C.Harrison@Intel.com>
>
> When GuC submission is enabled, GuC itself manages hang detection and
> recovery. Therefore, any test that relies on being able to trigger an
> engine reset in the driver will fail. Full GT resets can still be
> triggered by the driver, however in that situation detecting the
> specific context that caused a hang is not possible as the driver has
> no information about what is actually running on the hardware at any
> given time.
>
> So update the test to cause a reset via a the hangcheck mechanism by
> submitting a hanging batch and waiting. That way it is guaranteed to
> be testing the correct reset code paths for the current platform,
> whether that is GuC enabled or not.
>
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   lib/igt_gt.c                  | 44 +++++++++++++++++++----------
>   lib/igt_gt.h                  |  1 +
>   tests/i915/gem_exec_capture.c | 52 +++++++++++++++++++++++++++++------
>   3 files changed, 74 insertions(+), 23 deletions(-)
>
> diff --git a/lib/igt_gt.c b/lib/igt_gt.c
> index c049477db..ec548d501 100644
> --- a/lib/igt_gt.c
> +++ b/lib/igt_gt.c
> @@ -56,23 +56,28 @@
>    * engines.
>    */
>   
> +static int reset_query_once = -1;
> +
>   static bool has_gpu_reset(int fd)
>   {
> -	static int once = -1;
> -	if (once < 0) {
> -		struct drm_i915_getparam gp;
> -		int val = 0;
> -
> -		memset(&gp, 0, sizeof(gp));
> -		gp.param = 35; /* HAS_GPU_RESET */
> -		gp.value = &val;
> -
> -		if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
> -			once = intel_gen(intel_get_drm_devid(fd)) >= 5;
> -		else
> -			once = val > 0;
> +	if (reset_query_once < 0) {
> +		reset_query_once = gem_gpu_reset_type(fd);
> +
> +		/* Very old kernels did not support the query */
> +		if (reset_query_once == -1)
> +			reset_query_once =
> +			      (intel_gen(intel_get_drm_devid(fd)) >= 5) ? 1 : 0;
>   	}
> -	return once;
> +
> +	return reset_query_once > 0;
> +}
> +
> +static bool has_engine_reset(int fd)
> +{
> +	if (reset_query_once < 0)
> +		has_gpu_reset(fd);
> +
> +	return reset_query_once > 1;
>   }
>   
>   static void eat_error_state(int dev)
> @@ -176,7 +181,11 @@ igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags)
>   		igt_skip("hang injection disabled by user [IGT_HANG=0]\n");
>   	gem_context_require_bannable(fd);
>   
> -	allow_reset = 1;
> +	if (flags & HANG_WANT_ENGINE_RESET)
> +		allow_reset = 2;
> +	else
> +		allow_reset = 1;
> +
>   	if ((flags & HANG_ALLOW_CAPTURE) == 0) {
>   		param.param = I915_CONTEXT_PARAM_NO_ERROR_CAPTURE;
>   		param.value = 1;
> @@ -187,11 +196,16 @@ igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags)
>   		__gem_context_set_param(fd, &param);
>   		allow_reset = INT_MAX; /* any reset method */
>   	}
> +
>   	igt_require(igt_params_set(fd, "reset", "%d", allow_reset));
> +	reset_query_once = -1;  /* Re-query after changing param */
>   
>   	if (!igt_check_boolean_env_var("IGT_HANG_WITHOUT_RESET", false))
>   		igt_require(has_gpu_reset(fd));
>   
> +	if (flags & HANG_WANT_ENGINE_RESET)
> +		igt_require(has_engine_reset(fd));
> +
>   	ban = context_get_ban(fd, ctx);
>   	if ((flags & HANG_ALLOW_BAN) == 0)
>   		context_set_ban(fd, ctx, 0);
> diff --git a/lib/igt_gt.h b/lib/igt_gt.h
> index d87fae2d3..d806c4b80 100644
> --- a/lib/igt_gt.h
> +++ b/lib/igt_gt.h
> @@ -48,6 +48,7 @@ void igt_disallow_hang(int fd, igt_hang_t arg);
>   igt_hang_t igt_hang_ctx(int fd, uint32_t ctx, int ring, unsigned flags);
>   #define HANG_ALLOW_BAN 1
>   #define HANG_ALLOW_CAPTURE 2
> +#define HANG_WANT_ENGINE_RESET 4
>   
>   igt_hang_t igt_hang_ring(int fd, int ring);
>   void igt_post_hang_ring(int fd, igt_hang_t arg);
> diff --git a/tests/i915/gem_exec_capture.c b/tests/i915/gem_exec_capture.c
> index f59cb09da..6ae4208ce 100644
> --- a/tests/i915/gem_exec_capture.c
> +++ b/tests/i915/gem_exec_capture.c
> @@ -23,6 +23,7 @@
>   
>   #include <sys/poll.h>
>   #include <zlib.h>
> +#include <sched.h>
>   
>   #include "i915/gem.h"
>   #include "i915/gem_create.h"
> @@ -31,8 +32,16 @@
>   #include "igt_rand.h"
>   #include "igt_sysfs.h"
>   
> +#define MAX_RESET_TIME	120

isn't this too big when setting the heartbeat interval to 500 ms and the 
preempt one to 250? 5 secs should be more than enough.

> +
>   IGT_TEST_DESCRIPTION("Check that we capture the user specified objects on a hang");
>   
> +static void configure_engine(int fd, const char *name)
> +{
> +	gem_engine_property_printf(fd, name, "preempt_timeout_ms", "%d", 250);
> +	gem_engine_property_printf(fd, name, "heartbeat_interval_ms", "%d", 500);

Shouldn't we put these entries back to their original values after the 
test is done?

> +}
> +
>   static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
>   {
>   	char *error, *str;
> @@ -61,8 +70,13 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
>   	igt_assert(found);
>   }
>   
> +static bool fence_busy(int fence)
> +{
> +	return poll(&(struct pollfd){fence, POLLIN}, 1, 0) == 0;
> +}
> +
>   static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
> -		       unsigned ring, uint32_t target)
> +		       unsigned ring, uint32_t target, const char *name)
>   {
>   	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
>   	struct drm_i915_gem_exec_object2 obj[4];
> @@ -74,6 +88,10 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
>   	struct drm_i915_gem_execbuffer2 execbuf;
>   	uint32_t *batch, *seqno;
>   	int i;
> +	int fence_out;
> +	struct timeval before, after, delta;
> +
> +	configure_engine(fd, name);
>   
>   	memset(obj, 0, sizeof(obj));
>   	obj[SCRATCH].handle = gem_create(fd, 4096);
> @@ -149,18 +167,34 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
>   	execbuf.flags = ring;
>   	if (gen > 3 && gen < 6)
>   		execbuf.flags |= I915_EXEC_SECURE;
> +	execbuf.flags |= I915_EXEC_FENCE_OUT;
> +	execbuf.rsvd2 = ~0UL;
>   	execbuf.rsvd1 = ctx->id;
>   
>   	igt_assert(!READ_ONCE(*seqno));
> -	gem_execbuf(fd, &execbuf);
> +	gem_execbuf_wr(fd, &execbuf);
> +
> +	fence_out = execbuf.rsvd2 >> 32;
> +	igt_assert(fence_out >= 0);
>   
>   	/* Wait for the request to start */
>   	while (READ_ONCE(*seqno) != 0xc0ffee)
>   		igt_assert(gem_bo_busy(fd, obj[SCRATCH].handle));
>   	munmap(seqno, 4096);
>   
> +	/* Wait for a reset to occur */
> +	gettimeofday(&before, NULL);
> +	while (fence_busy(fence_out)) {
> +		gettimeofday(&after, NULL);
> +		timersub(&after, &before, &delta);
> +		igt_assert(delta.tv_sec < MAX_RESET_TIME);
> +		sched_yield();
> +	}
> +	gettimeofday(&after, NULL);
> +	timersub(&after, &before, &delta);
> +	igt_info("Target died after %ld.%06lds\n", delta.tv_sec, delta.tv_usec);

What does "died" mean here? does it indicate that a reset correctly 
happened? IMO needs a better wording.

> +
>   	/* Check that only the buffer we marked is reported in the error */
> -	igt_force_gpu_reset(fd);
>   	check_error_state(dir, &obj[CAPTURE]);
>   
>   	gem_sync(fd, obj[BATCH].handle);
> @@ -170,12 +204,13 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
>   	gem_close(fd, obj[SCRATCH].handle);
>   }
>   
> -static void capture(int fd, int dir, const intel_ctx_t *ctx, unsigned ring)
> +static void capture(int fd, int dir, const intel_ctx_t *ctx,
> +		    const struct intel_execution_engine2 *e)
>   {
>   	uint32_t handle;
>   
>   	handle = gem_create(fd, 4096);
> -	__capture1(fd, dir, ctx, ring, handle);
> +	__capture1(fd, dir, ctx, e->flags, handle, e->name);
>   	gem_close(fd, handle);
>   }
>   
> @@ -577,7 +612,7 @@ static void userptr(int fd, int dir)
>   	igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
>   	igt_require(__gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0);
>   
> -	__capture1(fd, dir, intel_ctx_0(fd), 0, handle);
> +	__capture1(fd, dir, intel_ctx_0(fd), 0, handle, "bcs0");

Why does ring = 0 match BCS0?

>   
>   	gem_close(fd, handle);
>   	free(ptr);
> @@ -626,7 +661,8 @@ igt_main
>   		gem_require_mmap_wc(fd);
>   		igt_require(has_capture(fd));
>   		ctx = intel_ctx_create_all_physical(fd);
> -		igt_allow_hang(fd, ctx->id, HANG_ALLOW_CAPTURE);
> +		igt_allow_hang(fd, 0, HANG_ALLOW_CAPTURE |
> +			       HANG_WANT_ENGINE_RESET);

This is going to make the test skip on platforms that have full gt reset 
but not engine reset. You can see in the CI results that it is now 
skipping on snb-shards (the change in result is being caught by a 
catch-all fdo, so not reported as a failure).

Daniele

>   
>   		dir = igt_sysfs_open(fd);
>   		igt_require(igt_sysfs_set(dir, "error", "Begone!"));
> @@ -634,7 +670,7 @@ igt_main
>   	}
>   
>   	test_each_engine("capture", fd, ctx, e)
> -		capture(fd, dir, ctx, e->flags);
> +		capture(fd, dir, ctx, e);
>   
>   	igt_subtest_f("many-4K-zero") {
>   		igt_require(gem_can_store_dword(fd, 0));


^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t 3/3] i915/gem_exec_capture: Update to support GuC based resets
@ 2021-08-14  0:28     ` Daniele Ceraolo Spurio
  0 siblings, 0 replies; 16+ messages in thread
From: Daniele Ceraolo Spurio @ 2021-08-14  0:28 UTC (permalink / raw)
  To: Matthew Brost, igt-dev; +Cc: intel-gfx



On 8/3/2021 6:23 PM, Matthew Brost wrote:
> From: "Signed-off-by: John Harrison" <John.C.Harrison@Intel.com>
>
> When GuC submission is enabled, GuC itself manages hang detection and
> recovery. Therefore, any test that relies on being able to trigger an
> engine reset in the driver will fail. Full GT resets can still be
> triggered by the driver, however in that situation detecting the
> specific context that caused a hang is not possible as the driver has
> no information about what is actually running on the hardware at any
> given time.
>
> So update the test to cause a reset via a the hangcheck mechanism by
> submitting a hanging batch and waiting. That way it is guaranteed to
> be testing the correct reset code paths for the current platform,
> whether that is GuC enabled or not.
>
> Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   lib/igt_gt.c                  | 44 +++++++++++++++++++----------
>   lib/igt_gt.h                  |  1 +
>   tests/i915/gem_exec_capture.c | 52 +++++++++++++++++++++++++++++------
>   3 files changed, 74 insertions(+), 23 deletions(-)
>
> diff --git a/lib/igt_gt.c b/lib/igt_gt.c
> index c049477db..ec548d501 100644
> --- a/lib/igt_gt.c
> +++ b/lib/igt_gt.c
> @@ -56,23 +56,28 @@
>    * engines.
>    */
>   
> +static int reset_query_once = -1;
> +
>   static bool has_gpu_reset(int fd)
>   {
> -	static int once = -1;
> -	if (once < 0) {
> -		struct drm_i915_getparam gp;
> -		int val = 0;
> -
> -		memset(&gp, 0, sizeof(gp));
> -		gp.param = 35; /* HAS_GPU_RESET */
> -		gp.value = &val;
> -
> -		if (ioctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
> -			once = intel_gen(intel_get_drm_devid(fd)) >= 5;
> -		else
> -			once = val > 0;
> +	if (reset_query_once < 0) {
> +		reset_query_once = gem_gpu_reset_type(fd);
> +
> +		/* Very old kernels did not support the query */
> +		if (reset_query_once == -1)
> +			reset_query_once =
> +			      (intel_gen(intel_get_drm_devid(fd)) >= 5) ? 1 : 0;
>   	}
> -	return once;
> +
> +	return reset_query_once > 0;
> +}
> +
> +static bool has_engine_reset(int fd)
> +{
> +	if (reset_query_once < 0)
> +		has_gpu_reset(fd);
> +
> +	return reset_query_once > 1;
>   }
>   
>   static void eat_error_state(int dev)
> @@ -176,7 +181,11 @@ igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags)
>   		igt_skip("hang injection disabled by user [IGT_HANG=0]\n");
>   	gem_context_require_bannable(fd);
>   
> -	allow_reset = 1;
> +	if (flags & HANG_WANT_ENGINE_RESET)
> +		allow_reset = 2;
> +	else
> +		allow_reset = 1;
> +
>   	if ((flags & HANG_ALLOW_CAPTURE) == 0) {
>   		param.param = I915_CONTEXT_PARAM_NO_ERROR_CAPTURE;
>   		param.value = 1;
> @@ -187,11 +196,16 @@ igt_hang_t igt_allow_hang(int fd, unsigned ctx, unsigned flags)
>   		__gem_context_set_param(fd, &param);
>   		allow_reset = INT_MAX; /* any reset method */
>   	}
> +
>   	igt_require(igt_params_set(fd, "reset", "%d", allow_reset));
> +	reset_query_once = -1;  /* Re-query after changing param */
>   
>   	if (!igt_check_boolean_env_var("IGT_HANG_WITHOUT_RESET", false))
>   		igt_require(has_gpu_reset(fd));
>   
> +	if (flags & HANG_WANT_ENGINE_RESET)
> +		igt_require(has_engine_reset(fd));
> +
>   	ban = context_get_ban(fd, ctx);
>   	if ((flags & HANG_ALLOW_BAN) == 0)
>   		context_set_ban(fd, ctx, 0);
> diff --git a/lib/igt_gt.h b/lib/igt_gt.h
> index d87fae2d3..d806c4b80 100644
> --- a/lib/igt_gt.h
> +++ b/lib/igt_gt.h
> @@ -48,6 +48,7 @@ void igt_disallow_hang(int fd, igt_hang_t arg);
>   igt_hang_t igt_hang_ctx(int fd, uint32_t ctx, int ring, unsigned flags);
>   #define HANG_ALLOW_BAN 1
>   #define HANG_ALLOW_CAPTURE 2
> +#define HANG_WANT_ENGINE_RESET 4
>   
>   igt_hang_t igt_hang_ring(int fd, int ring);
>   void igt_post_hang_ring(int fd, igt_hang_t arg);
> diff --git a/tests/i915/gem_exec_capture.c b/tests/i915/gem_exec_capture.c
> index f59cb09da..6ae4208ce 100644
> --- a/tests/i915/gem_exec_capture.c
> +++ b/tests/i915/gem_exec_capture.c
> @@ -23,6 +23,7 @@
>   
>   #include <sys/poll.h>
>   #include <zlib.h>
> +#include <sched.h>
>   
>   #include "i915/gem.h"
>   #include "i915/gem_create.h"
> @@ -31,8 +32,16 @@
>   #include "igt_rand.h"
>   #include "igt_sysfs.h"
>   
> +#define MAX_RESET_TIME	120

isn't this too big when setting the heartbeat interval to 500 ms and the 
preempt one to 250? 5 secs should be more than enough.

> +
>   IGT_TEST_DESCRIPTION("Check that we capture the user specified objects on a hang");
>   
> +static void configure_engine(int fd, const char *name)
> +{
> +	gem_engine_property_printf(fd, name, "preempt_timeout_ms", "%d", 250);
> +	gem_engine_property_printf(fd, name, "heartbeat_interval_ms", "%d", 500);

Shouldn't we put these entries back to their original values after the 
test is done?

> +}
> +
>   static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
>   {
>   	char *error, *str;
> @@ -61,8 +70,13 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
>   	igt_assert(found);
>   }
>   
> +static bool fence_busy(int fence)
> +{
> +	return poll(&(struct pollfd){fence, POLLIN}, 1, 0) == 0;
> +}
> +
>   static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
> -		       unsigned ring, uint32_t target)
> +		       unsigned ring, uint32_t target, const char *name)
>   {
>   	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
>   	struct drm_i915_gem_exec_object2 obj[4];
> @@ -74,6 +88,10 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
>   	struct drm_i915_gem_execbuffer2 execbuf;
>   	uint32_t *batch, *seqno;
>   	int i;
> +	int fence_out;
> +	struct timeval before, after, delta;
> +
> +	configure_engine(fd, name);
>   
>   	memset(obj, 0, sizeof(obj));
>   	obj[SCRATCH].handle = gem_create(fd, 4096);
> @@ -149,18 +167,34 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
>   	execbuf.flags = ring;
>   	if (gen > 3 && gen < 6)
>   		execbuf.flags |= I915_EXEC_SECURE;
> +	execbuf.flags |= I915_EXEC_FENCE_OUT;
> +	execbuf.rsvd2 = ~0UL;
>   	execbuf.rsvd1 = ctx->id;
>   
>   	igt_assert(!READ_ONCE(*seqno));
> -	gem_execbuf(fd, &execbuf);
> +	gem_execbuf_wr(fd, &execbuf);
> +
> +	fence_out = execbuf.rsvd2 >> 32;
> +	igt_assert(fence_out >= 0);
>   
>   	/* Wait for the request to start */
>   	while (READ_ONCE(*seqno) != 0xc0ffee)
>   		igt_assert(gem_bo_busy(fd, obj[SCRATCH].handle));
>   	munmap(seqno, 4096);
>   
> +	/* Wait for a reset to occur */
> +	gettimeofday(&before, NULL);
> +	while (fence_busy(fence_out)) {
> +		gettimeofday(&after, NULL);
> +		timersub(&after, &before, &delta);
> +		igt_assert(delta.tv_sec < MAX_RESET_TIME);
> +		sched_yield();
> +	}
> +	gettimeofday(&after, NULL);
> +	timersub(&after, &before, &delta);
> +	igt_info("Target died after %ld.%06lds\n", delta.tv_sec, delta.tv_usec);

What does "died" mean here? does it indicate that a reset correctly 
happened? IMO needs a better wording.

> +
>   	/* Check that only the buffer we marked is reported in the error */
> -	igt_force_gpu_reset(fd);
>   	check_error_state(dir, &obj[CAPTURE]);
>   
>   	gem_sync(fd, obj[BATCH].handle);
> @@ -170,12 +204,13 @@ static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
>   	gem_close(fd, obj[SCRATCH].handle);
>   }
>   
> -static void capture(int fd, int dir, const intel_ctx_t *ctx, unsigned ring)
> +static void capture(int fd, int dir, const intel_ctx_t *ctx,
> +		    const struct intel_execution_engine2 *e)
>   {
>   	uint32_t handle;
>   
>   	handle = gem_create(fd, 4096);
> -	__capture1(fd, dir, ctx, ring, handle);
> +	__capture1(fd, dir, ctx, e->flags, handle, e->name);
>   	gem_close(fd, handle);
>   }
>   
> @@ -577,7 +612,7 @@ static void userptr(int fd, int dir)
>   	igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
>   	igt_require(__gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0);
>   
> -	__capture1(fd, dir, intel_ctx_0(fd), 0, handle);
> +	__capture1(fd, dir, intel_ctx_0(fd), 0, handle, "bcs0");

Why does ring = 0 match BCS0?

>   
>   	gem_close(fd, handle);
>   	free(ptr);
> @@ -626,7 +661,8 @@ igt_main
>   		gem_require_mmap_wc(fd);
>   		igt_require(has_capture(fd));
>   		ctx = intel_ctx_create_all_physical(fd);
> -		igt_allow_hang(fd, ctx->id, HANG_ALLOW_CAPTURE);
> +		igt_allow_hang(fd, 0, HANG_ALLOW_CAPTURE |
> +			       HANG_WANT_ENGINE_RESET);

This is going to make the test skip on platforms that have full gt reset 
but not engine reset. You can see in the CI results that it is now 
skipping on snb-shards (the change in result is being caught by a 
catch-all fdo, so not reported as a failure).

Daniele

>   
>   		dir = igt_sysfs_open(fd);
>   		igt_require(igt_sysfs_set(dir, "error", "Begone!"));
> @@ -634,7 +670,7 @@ igt_main
>   	}
>   
>   	test_each_engine("capture", fd, ctx, e)
> -		capture(fd, dir, ctx, e->flags);
> +		capture(fd, dir, ctx, e);
>   
>   	igt_subtest_f("many-4K-zero") {
>   		igt_require(gem_can_store_dword(fd, 0));

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [Intel-gfx] [PATCH i-g-t 1/3] i915/gem_exec_schedule: Make gem_exec_schedule understand static priority mapping
  2021-08-13 23:24     ` [igt-dev] " Daniele Ceraolo Spurio
@ 2021-08-16 16:39       ` Matthew Brost
  -1 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-16 16:39 UTC (permalink / raw)
  To: Daniele Ceraolo Spurio; +Cc: igt-dev, intel-gfx

On Fri, Aug 13, 2021 at 04:24:37PM -0700, Daniele Ceraolo Spurio wrote:
> 
> 
> On 8/3/2021 6:23 PM, Matthew Brost wrote:
> > The i915 currently has 2k visible priority levels which are currently
> > unique. This is changing to statically map these 2k levels into 3
> > buckets:
> > 
> > low: < 0
> > mid: 0
> > high: > 0
> > 
> > Update gem_exec_schedule to understand this. This entails updating
> > promotion test to use 3 levels that will map into different buckets and
> > also add bit of delay after releasing a cork beforing completing the
> > spinners.
> 
> This needs a line about why we add the delay, something like "to give time
> to the i915 scheduler to process the fence release and queue the requests"
> or something.

Will reword, have typo here too.

> BTW, any reason not to just add the delay unconditionally in
> unplug_show_queue, instead of only in one test? Other tests might suffer
> from the same problem even if they're not hitting it at the moment.
>

Yea, probably a better approach to future proof this as I could see
other sections randomly failing in CI and wasting our time. Will fix
this and subsequent patch too.

Matt
 
> Daniele
> 
> > 
> > Also skip any tests that rely on having more than 3 priority levels.
> > 
> > v2: Add a delay between starting releasing spinner and cork in
> > promotion, add local define for static mapping engine info
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >   lib/i915/gem_scheduler.c       | 14 ++++++++
> >   lib/i915/gem_scheduler.h       |  1 +
> >   lib/i915/i915_drm_local.h      | 10 ++++++
> >   tests/i915/gem_exec_schedule.c | 62 +++++++++++++++++++++-------------
> >   4 files changed, 63 insertions(+), 24 deletions(-)
> > 
> > diff --git a/lib/i915/gem_scheduler.c b/lib/i915/gem_scheduler.c
> > index cdddf42ad..d006b8676 100644
> > --- a/lib/i915/gem_scheduler.c
> > +++ b/lib/i915/gem_scheduler.c
> > @@ -28,6 +28,7 @@
> >   #include "igt_core.h"
> >   #include "ioctl_wrappers.h"
> > +#include "i915/i915_drm_local.h"
> >   #include "i915/gem_scheduler.h"
> >   #include "i915/gem_submission.h"
> > @@ -90,6 +91,19 @@ bool gem_scheduler_has_ctx_priority(int fd)
> >   		I915_SCHEDULER_CAP_PRIORITY;
> >   }
> > +/**
> > + * gem_scheduler_has_static_priority:
> > + * @fd: open i915 drm file descriptor
> > + *
> > + * Feature test macro to query whether the driver supports priority assigned
> > + * from user space are statically mapping into 3 buckets.
> > + */
> > +bool gem_scheduler_has_static_priority(int fd)
> > +{
> > +	return gem_scheduler_capability(fd) &
> > +		I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
> > +}
> > +
> >   /**
> >    * gem_scheduler_has_preemption:
> >    * @fd: open i915 drm file descriptor
> > diff --git a/lib/i915/gem_scheduler.h b/lib/i915/gem_scheduler.h
> > index d43e84bd2..b00804f70 100644
> > --- a/lib/i915/gem_scheduler.h
> > +++ b/lib/i915/gem_scheduler.h
> > @@ -29,6 +29,7 @@
> >   unsigned gem_scheduler_capability(int fd);
> >   bool gem_scheduler_enabled(int fd);
> >   bool gem_scheduler_has_ctx_priority(int fd);
> > +bool gem_scheduler_has_static_priority(int fd);
> >   bool gem_scheduler_has_preemption(int fd);
> >   bool gem_scheduler_has_semaphores(int fd);
> >   bool gem_scheduler_has_engine_busy_stats(int fd);
> > diff --git a/lib/i915/i915_drm_local.h b/lib/i915/i915_drm_local.h
> > index dd646aedf..a1527ff21 100644
> > --- a/lib/i915/i915_drm_local.h
> > +++ b/lib/i915/i915_drm_local.h
> > @@ -20,6 +20,16 @@ extern "C" {
> >    * clean these up when kernel uapi headers are sync'd.
> >    */
> > +/*
> > + * Indicates the 2k user priority levels are statically mapped into 3 buckets as
> > + * follows:
> > + *
> > + * -1k to -1	Low priority
> > + * 0		Normal priority
> > + * 1 to 1k	Highest priority
> > + */
> > +#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
> > +
> >   #if defined(__cplusplus)
> >   }
> >   #endif
> > diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
> > index e5fb45982..bb9fb6c14 100644
> > --- a/tests/i915/gem_exec_schedule.c
> > +++ b/tests/i915/gem_exec_schedule.c
> > @@ -199,7 +199,8 @@ create_highest_priority(int fd, const intel_ctx_cfg_t *cfg)
> >   static void unplug_show_queue(int fd, struct igt_cork *c,
> >   			      const intel_ctx_cfg_t *cfg,
> > -			      unsigned int engine)
> > +			      unsigned int engine,
> > +			      unsigned usec_delay)
> >   {
> >   	igt_spin_t *spin[MAX_ELSP_QLEN];
> >   	int max = MAX_ELSP_QLEN;
> > @@ -216,6 +217,7 @@ static void unplug_show_queue(int fd, struct igt_cork *c,
> >   	igt_cork_unplug(c); /* batches will now be queued on the engine */
> >   	igt_debugfs_dump(fd, "i915_engine_info");
> > +	usleep(usec_delay);
> >   	for (int n = 0; n < max; n++)
> >   		igt_spin_free(fd, spin[n]);
> > @@ -237,7 +239,7 @@ static void fifo(int fd, const intel_ctx_t *ctx, unsigned ring)
> >   	store_dword_fenced(fd, ctx, ring, scratch, 0, 1, fence, 0);
> >   	store_dword_fenced(fd, ctx, ring, scratch, 0, 2, fence, 0);
> > -	unplug_show_queue(fd, &cork, &ctx->cfg, ring);
> > +	unplug_show_queue(fd, &cork, &ctx->cfg, ring, 0);
> >   	close(fence);
> >   	result =  __sync_read_u32(fd, scratch, 0);
> > @@ -298,7 +300,7 @@ static void implicit_rw(int i915, const intel_ctx_t *ctx, unsigned int ring,
> >   				   ring, scratch, 0, ring,
> >   				   fence, I915_GEM_DOMAIN_RENDER);
> > -	unplug_show_queue(i915, &cork, &ctx->cfg, ring);
> > +	unplug_show_queue(i915, &cork, &ctx->cfg, ring, 0);
> >   	close(fence);
> >   	result =  __sync_read_u32(i915, scratch, 0);
> > @@ -355,7 +357,7 @@ static void independent(int fd, const intel_ctx_t *ctx, unsigned int engine,
> >   	/* Same priority, but different timeline (as different engine) */
> >   	batch = __store_dword(fd, ctx, engine, scratch, 0, engine, 0, fence, 0);
> > -	unplug_show_queue(fd, &cork, &ctx->cfg, engine);
> > +	unplug_show_queue(fd, &cork, &ctx->cfg, engine, 0);
> >   	close(fence);
> >   	gem_sync(fd, batch);
> > @@ -1326,7 +1328,7 @@ static void reorder(int fd, const intel_ctx_cfg_t *cfg,
> >   	store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO]->id, fence, 0);
> >   	store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI]->id, fence, 0);
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 0);
> >   	close(fence);
> >   	result =  __sync_read_u32(fd, scratch, 0);
> > @@ -1353,10 +1355,10 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
> >   	gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
> >   	ctx[HI] = intel_ctx_create(fd, cfg);
> > -	gem_context_set_priority(fd, ctx[HI]->id, 0);
> > +	gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
> >   	ctx[NOISE] = intel_ctx_create(fd, cfg);
> > -	gem_context_set_priority(fd, ctx[NOISE]->id, MIN_PRIO/2);
> > +	gem_context_set_priority(fd, ctx[NOISE]->id, 0);
> >   	result = gem_create(fd, 4096);
> >   	dep = gem_create(fd, 4096);
> > @@ -1377,7 +1379,7 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
> >   	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI]->id, 0);
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 250000);
> >   	close(fence);
> >   	dep_read = __sync_read_u32(fd, dep, 0);
> > @@ -1893,7 +1895,7 @@ static void deep(int fd, const intel_ctx_cfg_t *cfg,
> >   	igt_info("Second deptree: %d requests [%.3fs]\n",
> >   		 n * XS, 1e-9*igt_nsec_elapsed(&tv));
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 0);
> >   	gem_close(fd, plug);
> >   	igt_require(expected); /* too slow */
> > @@ -1962,7 +1964,7 @@ static void wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
> >   	igt_info("Submitted %d requests over %d contexts in %.1fms\n",
> >   		 count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 0);
> >   	close(fence);
> >   	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
> > @@ -2067,7 +2069,7 @@ static void reorder_wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
> >   		intel_ctx_destroy(fd, tmp_ctx);
> >   	}
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 0);
> >   	close(fence);
> >   	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
> > @@ -2963,19 +2965,25 @@ igt_main
> >   			test_each_engine_store("preempt-other-chain", fd, ctx, e)
> >   				preempt_other(fd, &ctx->cfg, e->flags, CHAIN);
> > -			test_each_engine_store("preempt-queue", fd, ctx, e)
> > -				preempt_queue(fd, &ctx->cfg, e->flags, 0);
> > +			test_each_engine_store("preempt-engines", fd, ctx, e)
> > +				preempt_engines(fd, e, 0);
> > -			test_each_engine_store("preempt-queue-chain", fd, ctx, e)
> > -				preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
> > -			test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
> > -				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
> > +			igt_subtest_group {
> > +				igt_fixture {
> > +					igt_require(!gem_scheduler_has_static_priority(fd));
> > +				}
> > -			test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
> > -				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
> > +				test_each_engine_store("preempt-queue", fd, ctx, e)
> > +					preempt_queue(fd, &ctx->cfg, e->flags, 0);
> > -			test_each_engine_store("preempt-engines", fd, ctx, e)
> > -				preempt_engines(fd, e, 0);
> > +				test_each_engine_store("preempt-queue-chain", fd, ctx, e)
> > +					preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
> > +				test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
> > +					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
> > +
> > +				test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
> > +					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
> > +			}
> >   			igt_subtest_group {
> >   				igt_hang_t hang;
> > @@ -3017,11 +3025,17 @@ igt_main
> >   		test_each_engine_store("wide", fd, ctx, e)
> >   			wide(fd, &ctx->cfg, e->flags);
> > -		test_each_engine_store("reorder-wide", fd, ctx, e)
> > -			reorder_wide(fd, &ctx->cfg, e->flags);
> > -
> >   		test_each_engine_store("smoketest", fd, ctx, e)
> >   			smoketest(fd, &ctx->cfg, e->flags, 5);
> > +
> > +		igt_subtest_group {
> > +			igt_fixture {
> > +				igt_require(!gem_scheduler_has_static_priority(fd));
> > +			}
> > +
> > +			test_each_engine_store("reorder-wide", fd, ctx, e)
> > +				reorder_wide(fd, &ctx->cfg, e->flags);
> > +		}
> >   	}
> >   	igt_subtest_group {
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t 1/3] i915/gem_exec_schedule: Make gem_exec_schedule understand static priority mapping
@ 2021-08-16 16:39       ` Matthew Brost
  0 siblings, 0 replies; 16+ messages in thread
From: Matthew Brost @ 2021-08-16 16:39 UTC (permalink / raw)
  To: Daniele Ceraolo Spurio; +Cc: igt-dev, intel-gfx

On Fri, Aug 13, 2021 at 04:24:37PM -0700, Daniele Ceraolo Spurio wrote:
> 
> 
> On 8/3/2021 6:23 PM, Matthew Brost wrote:
> > The i915 currently has 2k visible priority levels which are currently
> > unique. This is changing to statically map these 2k levels into 3
> > buckets:
> > 
> > low: < 0
> > mid: 0
> > high: > 0
> > 
> > Update gem_exec_schedule to understand this. This entails updating
> > promotion test to use 3 levels that will map into different buckets and
> > also add bit of delay after releasing a cork beforing completing the
> > spinners.
> 
> This needs a line about why we add the delay, something like "to give time
> to the i915 scheduler to process the fence release and queue the requests"
> or something.

Will reword, have typo here too.

> BTW, any reason not to just add the delay unconditionally in
> unplug_show_queue, instead of only in one test? Other tests might suffer
> from the same problem even if they're not hitting it at the moment.
>

Yea, probably a better approach to future proof this as I could see
other sections randomly failing in CI and wasting our time. Will fix
this and subsequent patch too.

Matt
 
> Daniele
> 
> > 
> > Also skip any tests that rely on having more than 3 priority levels.
> > 
> > v2: Add a delay between starting releasing spinner and cork in
> > promotion, add local define for static mapping engine info
> > 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >   lib/i915/gem_scheduler.c       | 14 ++++++++
> >   lib/i915/gem_scheduler.h       |  1 +
> >   lib/i915/i915_drm_local.h      | 10 ++++++
> >   tests/i915/gem_exec_schedule.c | 62 +++++++++++++++++++++-------------
> >   4 files changed, 63 insertions(+), 24 deletions(-)
> > 
> > diff --git a/lib/i915/gem_scheduler.c b/lib/i915/gem_scheduler.c
> > index cdddf42ad..d006b8676 100644
> > --- a/lib/i915/gem_scheduler.c
> > +++ b/lib/i915/gem_scheduler.c
> > @@ -28,6 +28,7 @@
> >   #include "igt_core.h"
> >   #include "ioctl_wrappers.h"
> > +#include "i915/i915_drm_local.h"
> >   #include "i915/gem_scheduler.h"
> >   #include "i915/gem_submission.h"
> > @@ -90,6 +91,19 @@ bool gem_scheduler_has_ctx_priority(int fd)
> >   		I915_SCHEDULER_CAP_PRIORITY;
> >   }
> > +/**
> > + * gem_scheduler_has_static_priority:
> > + * @fd: open i915 drm file descriptor
> > + *
> > + * Feature test macro to query whether the driver supports priority assigned
> > + * from user space are statically mapping into 3 buckets.
> > + */
> > +bool gem_scheduler_has_static_priority(int fd)
> > +{
> > +	return gem_scheduler_capability(fd) &
> > +		I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP;
> > +}
> > +
> >   /**
> >    * gem_scheduler_has_preemption:
> >    * @fd: open i915 drm file descriptor
> > diff --git a/lib/i915/gem_scheduler.h b/lib/i915/gem_scheduler.h
> > index d43e84bd2..b00804f70 100644
> > --- a/lib/i915/gem_scheduler.h
> > +++ b/lib/i915/gem_scheduler.h
> > @@ -29,6 +29,7 @@
> >   unsigned gem_scheduler_capability(int fd);
> >   bool gem_scheduler_enabled(int fd);
> >   bool gem_scheduler_has_ctx_priority(int fd);
> > +bool gem_scheduler_has_static_priority(int fd);
> >   bool gem_scheduler_has_preemption(int fd);
> >   bool gem_scheduler_has_semaphores(int fd);
> >   bool gem_scheduler_has_engine_busy_stats(int fd);
> > diff --git a/lib/i915/i915_drm_local.h b/lib/i915/i915_drm_local.h
> > index dd646aedf..a1527ff21 100644
> > --- a/lib/i915/i915_drm_local.h
> > +++ b/lib/i915/i915_drm_local.h
> > @@ -20,6 +20,16 @@ extern "C" {
> >    * clean these up when kernel uapi headers are sync'd.
> >    */
> > +/*
> > + * Indicates the 2k user priority levels are statically mapped into 3 buckets as
> > + * follows:
> > + *
> > + * -1k to -1	Low priority
> > + * 0		Normal priority
> > + * 1 to 1k	Highest priority
> > + */
> > +#define   I915_SCHEDULER_CAP_STATIC_PRIORITY_MAP	(1ul << 5)
> > +
> >   #if defined(__cplusplus)
> >   }
> >   #endif
> > diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
> > index e5fb45982..bb9fb6c14 100644
> > --- a/tests/i915/gem_exec_schedule.c
> > +++ b/tests/i915/gem_exec_schedule.c
> > @@ -199,7 +199,8 @@ create_highest_priority(int fd, const intel_ctx_cfg_t *cfg)
> >   static void unplug_show_queue(int fd, struct igt_cork *c,
> >   			      const intel_ctx_cfg_t *cfg,
> > -			      unsigned int engine)
> > +			      unsigned int engine,
> > +			      unsigned usec_delay)
> >   {
> >   	igt_spin_t *spin[MAX_ELSP_QLEN];
> >   	int max = MAX_ELSP_QLEN;
> > @@ -216,6 +217,7 @@ static void unplug_show_queue(int fd, struct igt_cork *c,
> >   	igt_cork_unplug(c); /* batches will now be queued on the engine */
> >   	igt_debugfs_dump(fd, "i915_engine_info");
> > +	usleep(usec_delay);
> >   	for (int n = 0; n < max; n++)
> >   		igt_spin_free(fd, spin[n]);
> > @@ -237,7 +239,7 @@ static void fifo(int fd, const intel_ctx_t *ctx, unsigned ring)
> >   	store_dword_fenced(fd, ctx, ring, scratch, 0, 1, fence, 0);
> >   	store_dword_fenced(fd, ctx, ring, scratch, 0, 2, fence, 0);
> > -	unplug_show_queue(fd, &cork, &ctx->cfg, ring);
> > +	unplug_show_queue(fd, &cork, &ctx->cfg, ring, 0);
> >   	close(fence);
> >   	result =  __sync_read_u32(fd, scratch, 0);
> > @@ -298,7 +300,7 @@ static void implicit_rw(int i915, const intel_ctx_t *ctx, unsigned int ring,
> >   				   ring, scratch, 0, ring,
> >   				   fence, I915_GEM_DOMAIN_RENDER);
> > -	unplug_show_queue(i915, &cork, &ctx->cfg, ring);
> > +	unplug_show_queue(i915, &cork, &ctx->cfg, ring, 0);
> >   	close(fence);
> >   	result =  __sync_read_u32(i915, scratch, 0);
> > @@ -355,7 +357,7 @@ static void independent(int fd, const intel_ctx_t *ctx, unsigned int engine,
> >   	/* Same priority, but different timeline (as different engine) */
> >   	batch = __store_dword(fd, ctx, engine, scratch, 0, engine, 0, fence, 0);
> > -	unplug_show_queue(fd, &cork, &ctx->cfg, engine);
> > +	unplug_show_queue(fd, &cork, &ctx->cfg, engine, 0);
> >   	close(fence);
> >   	gem_sync(fd, batch);
> > @@ -1326,7 +1328,7 @@ static void reorder(int fd, const intel_ctx_cfg_t *cfg,
> >   	store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO]->id, fence, 0);
> >   	store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI]->id, fence, 0);
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 0);
> >   	close(fence);
> >   	result =  __sync_read_u32(fd, scratch, 0);
> > @@ -1353,10 +1355,10 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
> >   	gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
> >   	ctx[HI] = intel_ctx_create(fd, cfg);
> > -	gem_context_set_priority(fd, ctx[HI]->id, 0);
> > +	gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
> >   	ctx[NOISE] = intel_ctx_create(fd, cfg);
> > -	gem_context_set_priority(fd, ctx[NOISE]->id, MIN_PRIO/2);
> > +	gem_context_set_priority(fd, ctx[NOISE]->id, 0);
> >   	result = gem_create(fd, 4096);
> >   	dep = gem_create(fd, 4096);
> > @@ -1377,7 +1379,7 @@ static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
> >   	store_dword(fd, ctx[HI], ring, result, 0, ctx[HI]->id, 0);
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 250000);
> >   	close(fence);
> >   	dep_read = __sync_read_u32(fd, dep, 0);
> > @@ -1893,7 +1895,7 @@ static void deep(int fd, const intel_ctx_cfg_t *cfg,
> >   	igt_info("Second deptree: %d requests [%.3fs]\n",
> >   		 n * XS, 1e-9*igt_nsec_elapsed(&tv));
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 0);
> >   	gem_close(fd, plug);
> >   	igt_require(expected); /* too slow */
> > @@ -1962,7 +1964,7 @@ static void wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
> >   	igt_info("Submitted %d requests over %d contexts in %.1fms\n",
> >   		 count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 0);
> >   	close(fence);
> >   	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
> > @@ -2067,7 +2069,7 @@ static void reorder_wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
> >   		intel_ctx_destroy(fd, tmp_ctx);
> >   	}
> > -	unplug_show_queue(fd, &cork, cfg, ring);
> > +	unplug_show_queue(fd, &cork, cfg, ring, 0);
> >   	close(fence);
> >   	__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
> > @@ -2963,19 +2965,25 @@ igt_main
> >   			test_each_engine_store("preempt-other-chain", fd, ctx, e)
> >   				preempt_other(fd, &ctx->cfg, e->flags, CHAIN);
> > -			test_each_engine_store("preempt-queue", fd, ctx, e)
> > -				preempt_queue(fd, &ctx->cfg, e->flags, 0);
> > +			test_each_engine_store("preempt-engines", fd, ctx, e)
> > +				preempt_engines(fd, e, 0);
> > -			test_each_engine_store("preempt-queue-chain", fd, ctx, e)
> > -				preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
> > -			test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
> > -				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
> > +			igt_subtest_group {
> > +				igt_fixture {
> > +					igt_require(!gem_scheduler_has_static_priority(fd));
> > +				}
> > -			test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
> > -				preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
> > +				test_each_engine_store("preempt-queue", fd, ctx, e)
> > +					preempt_queue(fd, &ctx->cfg, e->flags, 0);
> > -			test_each_engine_store("preempt-engines", fd, ctx, e)
> > -				preempt_engines(fd, e, 0);
> > +				test_each_engine_store("preempt-queue-chain", fd, ctx, e)
> > +					preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
> > +				test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
> > +					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
> > +
> > +				test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
> > +					preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
> > +			}
> >   			igt_subtest_group {
> >   				igt_hang_t hang;
> > @@ -3017,11 +3025,17 @@ igt_main
> >   		test_each_engine_store("wide", fd, ctx, e)
> >   			wide(fd, &ctx->cfg, e->flags);
> > -		test_each_engine_store("reorder-wide", fd, ctx, e)
> > -			reorder_wide(fd, &ctx->cfg, e->flags);
> > -
> >   		test_each_engine_store("smoketest", fd, ctx, e)
> >   			smoketest(fd, &ctx->cfg, e->flags, 5);
> > +
> > +		igt_subtest_group {
> > +			igt_fixture {
> > +				igt_require(!gem_scheduler_has_static_priority(fd));
> > +			}
> > +
> > +			test_each_engine_store("reorder-wide", fd, ctx, e)
> > +				reorder_wide(fd, &ctx->cfg, e->flags);
> > +		}
> >   	}
> >   	igt_subtest_group {
> 

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2021-08-16 16:44 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-08-04  1:23 [Intel-gfx] [PATCH i-g-t 0/3] IGT fixes for priority management + capture with GuC submission Matthew Brost
2021-08-04  1:23 ` [igt-dev] " Matthew Brost
2021-08-04  1:23 ` [Intel-gfx] [PATCH i-g-t 1/3] i915/gem_exec_schedule: Make gem_exec_schedule understand static priority mapping Matthew Brost
2021-08-04  1:23   ` Matthew Brost
2021-08-13 23:24   ` Daniele Ceraolo Spurio
2021-08-13 23:24     ` [igt-dev] " Daniele Ceraolo Spurio
2021-08-16 16:39     ` Matthew Brost
2021-08-16 16:39       ` [igt-dev] " Matthew Brost
2021-08-04  1:23 ` [Intel-gfx] [PATCH i-g-t 2/3] i915/gem_ctx_shared: Make gem_ctx_shared " Matthew Brost
2021-08-04  1:23   ` [igt-dev] " Matthew Brost
2021-08-04  1:23 ` [Intel-gfx] [PATCH i-g-t 3/3] i915/gem_exec_capture: Update to support GuC based resets Matthew Brost
2021-08-04  1:23   ` Matthew Brost
2021-08-14  0:28   ` Daniele Ceraolo Spurio
2021-08-14  0:28     ` [igt-dev] " Daniele Ceraolo Spurio
2021-08-04  1:47 ` [igt-dev] ✓ Fi.CI.BAT: success for IGT fixes for priority management + capture with GuC submission Patchwork
2021-08-05 11:46 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.