* [Intel-gfx] [PATCH i-g-t 1/2] lib/i915: Report scheduler caps for timeslicing
@ 2020-05-13 17:02 ` Chris Wilson
0 siblings, 0 replies; 10+ messages in thread
From: Chris Wilson @ 2020-05-13 17:02 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev, Chris Wilson
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
include/drm-uapi/i915_drm.h | 8 +++++---
lib/i915/gem_scheduler.c | 15 +++++++++++++++
lib/i915/gem_scheduler.h | 1 +
3 files changed, 21 insertions(+), 3 deletions(-)
diff --git a/include/drm-uapi/i915_drm.h b/include/drm-uapi/i915_drm.h
index 2b55af13a..a222b6bfb 100644
--- a/include/drm-uapi/i915_drm.h
+++ b/include/drm-uapi/i915_drm.h
@@ -523,6 +523,7 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
+#define I915_SCHEDULER_CAP_TIMESLICING (1ul << 5)
#define I915_PARAM_HUC_STATUS 42
@@ -1040,9 +1041,10 @@ struct drm_i915_gem_exec_fence {
*/
__u32 handle;
-#define I915_EXEC_FENCE_WAIT (1<<0)
-#define I915_EXEC_FENCE_SIGNAL (1<<1)
-#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
+#define I915_EXEC_FENCE_WAIT (1u << 0)
+#define I915_EXEC_FENCE_SIGNAL (1u << 1)
+#define I915_EXEC_FENCE_WAIT_SUBMIT (1u << 2)
+#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_WAIT_SUBMIT << 1))
__u32 flags;
};
diff --git a/lib/i915/gem_scheduler.c b/lib/i915/gem_scheduler.c
index 184da8436..7873766ae 100644
--- a/lib/i915/gem_scheduler.c
+++ b/lib/i915/gem_scheduler.c
@@ -129,6 +129,19 @@ bool gem_scheduler_has_engine_busy_stats(int fd)
I915_SCHEDULER_CAP_ENGINE_BUSY_STATS;
}
+/**
+ * gem_scheduler_has_timeslicing:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether the driver supports using HW preemption
+ * to implement timeslicing of userspace batches. This allows userspace to
+ * implement micro-level scheduling within their own batches.
+ */
+bool gem_scheduler_has_timeslicing(int fd)
+{
+ return gem_scheduler_capability(fd) & I915_SCHEDULER_CAP_TIMESLICING;
+}
+
/**
* gem_scheduler_print_capability:
* @fd: open i915 drm file descriptor
@@ -149,6 +162,8 @@ void gem_scheduler_print_capability(int fd)
igt_info(" - With preemption enabled\n");
if (caps & I915_SCHEDULER_CAP_SEMAPHORES)
igt_info(" - With HW semaphores enabled\n");
+ if (caps & I915_SCHEDULER_CAP_TIMESLICING)
+ igt_info(" - With user timeslicing enabled\n");
if (caps & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
igt_info(" - With engine busy statistics\n");
}
diff --git a/lib/i915/gem_scheduler.h b/lib/i915/gem_scheduler.h
index 14bd4cac4..d43e84bd2 100644
--- a/lib/i915/gem_scheduler.h
+++ b/lib/i915/gem_scheduler.h
@@ -32,6 +32,7 @@ bool gem_scheduler_has_ctx_priority(int fd);
bool gem_scheduler_has_preemption(int fd);
bool gem_scheduler_has_semaphores(int fd);
bool gem_scheduler_has_engine_busy_stats(int fd);
+bool gem_scheduler_has_timeslicing(int fd);
void gem_scheduler_print_capability(int fd);
#endif /* GEM_SCHEDULER_H */
--
2.26.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [igt-dev] [PATCH i-g-t 1/2] lib/i915: Report scheduler caps for timeslicing
@ 2020-05-13 17:02 ` Chris Wilson
0 siblings, 0 replies; 10+ messages in thread
From: Chris Wilson @ 2020-05-13 17:02 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev, Chris Wilson
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
include/drm-uapi/i915_drm.h | 8 +++++---
lib/i915/gem_scheduler.c | 15 +++++++++++++++
lib/i915/gem_scheduler.h | 1 +
3 files changed, 21 insertions(+), 3 deletions(-)
diff --git a/include/drm-uapi/i915_drm.h b/include/drm-uapi/i915_drm.h
index 2b55af13a..a222b6bfb 100644
--- a/include/drm-uapi/i915_drm.h
+++ b/include/drm-uapi/i915_drm.h
@@ -523,6 +523,7 @@ typedef struct drm_i915_irq_wait {
#define I915_SCHEDULER_CAP_PREEMPTION (1ul << 2)
#define I915_SCHEDULER_CAP_SEMAPHORES (1ul << 3)
#define I915_SCHEDULER_CAP_ENGINE_BUSY_STATS (1ul << 4)
+#define I915_SCHEDULER_CAP_TIMESLICING (1ul << 5)
#define I915_PARAM_HUC_STATUS 42
@@ -1040,9 +1041,10 @@ struct drm_i915_gem_exec_fence {
*/
__u32 handle;
-#define I915_EXEC_FENCE_WAIT (1<<0)
-#define I915_EXEC_FENCE_SIGNAL (1<<1)
-#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_SIGNAL << 1))
+#define I915_EXEC_FENCE_WAIT (1u << 0)
+#define I915_EXEC_FENCE_SIGNAL (1u << 1)
+#define I915_EXEC_FENCE_WAIT_SUBMIT (1u << 2)
+#define __I915_EXEC_FENCE_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_WAIT_SUBMIT << 1))
__u32 flags;
};
diff --git a/lib/i915/gem_scheduler.c b/lib/i915/gem_scheduler.c
index 184da8436..7873766ae 100644
--- a/lib/i915/gem_scheduler.c
+++ b/lib/i915/gem_scheduler.c
@@ -129,6 +129,19 @@ bool gem_scheduler_has_engine_busy_stats(int fd)
I915_SCHEDULER_CAP_ENGINE_BUSY_STATS;
}
+/**
+ * gem_scheduler_has_timeslicing:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to query whether the driver supports using HW preemption
+ * to implement timeslicing of userspace batches. This allows userspace to
+ * implement micro-level scheduling within their own batches.
+ */
+bool gem_scheduler_has_timeslicing(int fd)
+{
+ return gem_scheduler_capability(fd) & I915_SCHEDULER_CAP_TIMESLICING;
+}
+
/**
* gem_scheduler_print_capability:
* @fd: open i915 drm file descriptor
@@ -149,6 +162,8 @@ void gem_scheduler_print_capability(int fd)
igt_info(" - With preemption enabled\n");
if (caps & I915_SCHEDULER_CAP_SEMAPHORES)
igt_info(" - With HW semaphores enabled\n");
+ if (caps & I915_SCHEDULER_CAP_TIMESLICING)
+ igt_info(" - With user timeslicing enabled\n");
if (caps & I915_SCHEDULER_CAP_ENGINE_BUSY_STATS)
igt_info(" - With engine busy statistics\n");
}
diff --git a/lib/i915/gem_scheduler.h b/lib/i915/gem_scheduler.h
index 14bd4cac4..d43e84bd2 100644
--- a/lib/i915/gem_scheduler.h
+++ b/lib/i915/gem_scheduler.h
@@ -32,6 +32,7 @@ bool gem_scheduler_has_ctx_priority(int fd);
bool gem_scheduler_has_preemption(int fd);
bool gem_scheduler_has_semaphores(int fd);
bool gem_scheduler_has_engine_busy_stats(int fd);
+bool gem_scheduler_has_timeslicing(int fd);
void gem_scheduler_print_capability(int fd);
#endif /* GEM_SCHEDULER_H */
--
2.26.2
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Intel-gfx] [PATCH i-g-t 2/2] i915/gem_exec_fence: Teach invalid-wait about invalid future fences
2020-05-13 17:02 ` [igt-dev] " Chris Wilson
@ 2020-05-13 17:02 ` Chris Wilson
-1 siblings, 0 replies; 10+ messages in thread
From: Chris Wilson @ 2020-05-13 17:02 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev, Chris Wilson
When we allow a wait on a future future fence, it must autoexpire if the
fence is never signaled by userspace. Also put future fences to work, as
the intention is to use them, along with WAIT_SUBMIT and semaphores, for
userspace to perform its own fine-grained scheduling. Or simply run
concurrent clients without having to flush batches between context
switches.
v2: Verify deadlock detection
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
tests/i915/gem_exec_fence.c | 680 +++++++++++++++++++++++++++++++++++-
1 file changed, 677 insertions(+), 3 deletions(-)
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index 4140bff24..eb1165080 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -1123,11 +1123,12 @@ static void test_syncobj_invalid_wait(int fd)
struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd, 0),
};
+ int out;
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = I915_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
@@ -1135,14 +1136,59 @@ static void test_syncobj_invalid_wait(int fd)
obj.handle = gem_create(fd, 4096);
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
- /* waiting before the fence is set is invalid */
+ /* waiting before the fence is set is^W may be invalid */
fence.flags = I915_EXEC_FENCE_WAIT;
- igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
+ if (__gem_execbuf_wr(fd, &execbuf)) {
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
+ return;
+ }
+
+ /* If we do allow the wait on a future fence, it should autoexpire */
+ gem_sync(fd, obj.handle);
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -ETIMEDOUT);
+ close(out);
gem_close(fd, obj.handle);
syncobj_destroy(fd, fence.handle);
}
+static void test_syncobj_incomplete_wait_submit(int i915)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = batch_create(i915),
+ };
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ .flags = I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT,
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+
+ .flags = I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT,
+ };
+ int out;
+
+ /* waiting before the fence is set is^W may be invalid */
+ if (__gem_execbuf_wr(i915, &execbuf)) {
+ igt_assert_eq(__gem_execbuf(i915, &execbuf), -EINVAL);
+ return;
+ }
+
+ /* If we do allow the wait on a future fence, it should autoexpire */
+ gem_sync(i915, obj.handle);
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -ETIMEDOUT);
+ close(out);
+
+ gem_close(i915, obj.handle);
+ syncobj_destroy(i915, fence.handle);
+}
+
static void test_syncobj_invalid_flags(int fd)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -1289,6 +1335,517 @@ static void test_syncobj_wait(int fd)
}
}
+static uint32_t future_batch(int i915, uint32_t offset)
+{
+ uint32_t handle = gem_create(i915, 4096);
+ const int gen = intel_gen(intel_get_drm_devid(i915));
+ uint32_t cs[16];
+ int i = 0;
+
+ cs[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ cs[++i] = offset + 4000;
+ cs[++i] = 0;
+ } else if (gen >= 4) {
+ cs[++i] = 0;
+ cs[++i] = offset + 4000;
+ } else {
+ cs[i]--;
+ cs[++i] = offset + 4000;
+ }
+ cs[++i] = 1;
+ cs[i + 1] = MI_BATCH_BUFFER_END;
+ gem_write(i915, handle, 0, cs, sizeof(cs));
+
+ cs[i] = 2;
+ gem_write(i915, handle, 64, cs, sizeof(cs));
+
+ return handle;
+}
+
+static void test_syncobj_future(int i915, unsigned int engine)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence = {};
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ };
+ const struct intel_execution_engine2 *e;
+
+ /*
+ * Client A is waiting on a future fence from B. So even though its
+ * execbuf is called first, we need to hold it in a queue waiting on
+ * B.
+ */
+ igt_require(gem_scheduler_enabled(i915));
+
+ __for_each_physical_engine(i915, e) {
+ uint32_t result;
+
+ igt_debug("waiting on future %s\n", e->name);
+ fence.handle = syncobj_create(i915, 0);
+
+ fence.flags = I915_EXEC_FENCE_WAIT;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags = engine | I915_EXEC_FENCE_ARRAY;
+ execbuf.rsvd1 = 0;
+ gem_execbuf(i915, &execbuf); /* writes 1 */
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 64;
+ execbuf.flags = e->flags | I915_EXEC_FENCE_ARRAY;
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ gem_execbuf(i915, &execbuf); /* writes 2 */
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence.handle);
+ gem_sync(i915, obj.handle); /* write hazard lies */
+ gem_read(i915, obj.handle, 4000, &result, sizeof(result));
+ igt_assert_eq(result, 1);
+ }
+
+ gem_close(i915, obj.handle);
+}
+
+static uint32_t future_submit_batch(int i915, uint32_t offset)
+{
+ uint32_t handle = gem_create(i915, 4096);
+ const int gen = intel_gen(intel_get_drm_devid(i915));
+ uint32_t cs[16];
+ int i = 0;
+
+ cs[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ cs[++i] = offset + 4000;
+ cs[++i] = 0;
+ } else if (gen >= 4) {
+ cs[++i] = 0;
+ cs[++i] = offset + 4000;
+ } else {
+ cs[i]--;
+ cs[++i] = offset + 4000;
+ }
+ cs[++i] = 1;
+ cs[i + 1] = MI_BATCH_BUFFER_END;
+ igt_assert(i + 1 < ARRAY_SIZE(cs));
+ gem_write(i915, handle, 0, cs, sizeof(cs));
+
+ i = 0;
+ cs[i++] =
+ MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD |
+ (4 - 2);
+ cs[i++] = 1;
+ cs[i++] = offset + 4000;
+ cs[i++] = 0;
+
+ cs[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ cs[++i] = offset + 4000;
+ cs[++i] = 0;
+ } else if (gen >= 4) {
+ cs[++i] = 0;
+ cs[++i] = offset + 4000;
+ } else {
+ cs[i]--;
+ cs[++i] = offset + 4000;
+ }
+ cs[++i] = 2;
+ cs[++i] = MI_BATCH_BUFFER_END;
+ igt_assert(i < ARRAY_SIZE(cs));
+
+ gem_write(i915, handle, 64, cs, sizeof(cs));
+
+ return handle;
+}
+
+static void test_syncobj_future_submit(int i915, unsigned int engine)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_submit_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ const struct intel_execution_engine2 *e;
+
+ /*
+ * Here we submit client A waiting on client B, but internally client
+ * B has a semaphore that waits on client A. This relies on timeslicing
+ * to reorder B before A, even though userspace has asked to submit
+ * A & B simultaneously (and due to the sequence we will submit B
+ * then A).
+ */
+ igt_require(gem_scheduler_has_timeslicing(i915));
+
+ __for_each_physical_engine(i915, e) {
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = engine | I915_EXEC_FENCE_ARRAY,
+ };
+ uint32_t result;
+ int out;
+
+ igt_debug("waiting on future %s\n", e->name);
+
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ fence.flags = I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0); /* writes 1 */
+ execbuf.flags &= ~I915_EXEC_FENCE_OUT;
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 64;
+ execbuf.flags &= ~I915_EXEC_RING_MASK;
+ execbuf.flags |= e->flags;
+ gem_execbuf(i915, &execbuf); /* writes 2 */
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence.handle);
+ gem_sync(i915, obj.handle); /* write hazard lies */
+ gem_read(i915, obj.handle, 4000, &result, sizeof(result));
+ igt_assert_eq(result, 2);
+
+ /* check we didn't autotimeout */
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), 1);
+ close(out);
+ }
+
+ gem_close(i915, obj.handle);
+}
+
+static void test_syncobj_future_past(int i915, unsigned int engine, int flags)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = engine | I915_EXEC_FENCE_ARRAY,
+ };
+ uint32_t result;
+ int out;
+
+ fence.flags = I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_SIGNAL | flags;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0); /* writes 1 */
+ execbuf.flags &= ~I915_EXEC_FENCE_OUT;
+
+ gem_sync(i915, obj.handle); /* write hazard lies */
+ gem_read(i915, obj.handle, 4000, &result, sizeof(result));
+ igt_assert_eq(result, 1);
+
+ /* check we didn't autotimeout */
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), 1);
+ close(out);
+
+ gem_close(i915, obj.handle);
+ syncobj_destroy(i915, fence.handle);
+}
+
+static void test_syncobj_future_self(int i915, unsigned int engine, int flags)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = engine | I915_EXEC_FENCE_ARRAY,
+ };
+ int out;
+
+ fence.flags = I915_EXEC_FENCE_WAIT | flags;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0); /* writes 1 */
+ execbuf.flags &= ~I915_EXEC_FENCE_OUT;
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 64;
+ gem_execbuf(i915, &execbuf); /* writes 2 */
+
+ gem_sync(i915, obj.handle); /* write hazard lies */
+
+ gem_close(i915, obj.handle);
+ syncobj_destroy(i915, fence.handle);
+
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+}
+
+static void test_syncobj_future_pair(int i915, unsigned int engine)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = submitN_batches(i915, 24 << 20, 2),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence[2] = {
+ { .handle = syncobj_create(i915, 0) },
+ { .handle = syncobj_create(i915, 0) }
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(fence),
+ .num_cliprects = 2,
+ };
+ const struct intel_execution_engine2 *e;
+
+ __for_each_physical_engine(i915, e) {
+ int out = 0;
+
+ gem_write(i915, obj.handle, 0, &out, sizeof(out));
+ fence[0].handle = syncobj_create(i915, 0);
+ fence[1].handle = syncobj_create(i915, 0);
+
+ fence[0].flags = I915_EXEC_FENCE_SIGNAL;
+ fence[1].flags =
+ I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ execbuf.batch_start_offset = 1024;
+ execbuf.flags =
+ engine | I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = gem_context_create(i915);
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0);
+ gem_context_destroy(i915, execbuf.rsvd1);
+ execbuf.rsvd2 >>= 32;
+
+ fence[0].flags =
+ I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ fence[1].flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 2048;
+ execbuf.flags =
+ e->flags | I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = gem_context_create(i915);
+ gem_execbuf_wr(i915, &execbuf);
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence[0].handle);
+ syncobj_destroy(i915, fence[1].handle);
+
+ gem_sync(i915, obj.handle); /* write hazard lies */
+
+ out = execbuf.rsvd2;
+ igt_assert_eq(sync_fence_status(out), 1);
+ close(out);
+
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), 1);
+ close(out);
+
+ gem_read(i915, obj.handle, 0, &out, sizeof(out));
+ igt_assert_eq(out, 16);
+ }
+
+ gem_close(i915, obj.handle);
+}
+
+static void test_syncobj_future_group(int i915, unsigned int engine, int count)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = submitN_batches(i915, 24 << 20, count),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence[count];
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(fence),
+ .num_cliprects = count,
+ .flags = engine | I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT,
+ };
+ int out[count];
+ uint32_t result;
+
+ for (int i = 0; i < count; i++) {
+ fence[i].handle = syncobj_create(i915, 0);
+ fence[i].flags =
+ I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ }
+
+ for (int i = 0; i < count; i++) {
+ fence[i].flags = I915_EXEC_FENCE_SIGNAL;
+
+ execbuf.batch_start_offset = 1024 * (i + 1);
+ execbuf.rsvd1 = gem_context_create(i915);
+ gem_execbuf_wr(i915, &execbuf);
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ out[i] = execbuf.rsvd2 >> 32;
+ fence[i].flags =
+ I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ }
+ gem_sync(i915, obj.handle); /* write hazard lies */
+
+ /* As both batches were waiting for the other to start -- deadlock? */
+ for (int i = 0; i < count; i++) {
+ syncobj_destroy(i915, fence[i].handle);
+ igt_assert_eq(sync_fence_status(out[i]), 1);
+ close(out[i]);
+ }
+
+ /* Nevertheless, we ignored^Wresolved the deadlock and let them run */
+ gem_read(i915, obj.handle, 0, &result, sizeof(result));
+ igt_assert_eq(result, 8 * count);
+ gem_close(i915, obj.handle);
+}
+
+
+static void
+test_syncobj_future_deadlock(int i915, unsigned int engine, int flags)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence = {};
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ };
+ const struct intel_execution_engine2 *e;
+
+ __for_each_physical_engine(i915, e) {
+ int out;
+
+ fence.handle = syncobj_create(i915, 0),
+
+ fence.flags = I915_EXEC_FENCE_WAIT | flags;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags = engine | I915_EXEC_FENCE_ARRAY,
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = 0;
+ gem_execbuf_wr(i915, &execbuf); /* writes 1 */
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 64;
+ execbuf.flags = e->flags | I915_EXEC_FENCE_ARRAY,
+ execbuf.flags |= I915_EXEC_FENCE_OUT | I915_EXEC_FENCE_IN;
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ execbuf.rsvd2 >>= 32;
+ gem_execbuf_wr(i915, &execbuf); /* writes 2 */
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence.handle);
+ gem_sync(i915, obj.handle);
+
+ /* How should this deadlock be resolved? */
+ out = execbuf.rsvd2;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+ }
+
+ gem_close(i915, obj.handle);
+}
+
+static void
+test_syncobj_future_cycle(int i915, unsigned int engine, int flags)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ const struct intel_execution_engine2 *e1, *e2;
+
+ __for_each_physical_engine(i915, e1) {
+ __for_each_physical_engine(i915, e2) {
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = engine | I915_EXEC_FENCE_ARRAY,
+ };
+ int out;
+
+ fence.flags = I915_EXEC_FENCE_WAIT | flags;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0);
+
+ fence.flags = 0;
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ execbuf.rsvd2 >>= 32;
+ execbuf.flags &= ~I915_EXEC_RING_MASK;
+ execbuf.flags |= e1->flags | I915_EXEC_FENCE_IN;
+ gem_execbuf_wr(i915, &execbuf);
+ gem_context_destroy(i915, execbuf.rsvd1);
+ close(execbuf.rsvd2);
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ execbuf.rsvd2 >>= 32;
+ execbuf.flags &= ~I915_EXEC_RING_MASK;
+ execbuf.flags |= e2->flags;
+ execbuf.batch_start_offset = 64;
+ gem_execbuf_wr(i915, &execbuf); /* writes 2 */
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence.handle);
+ gem_sync(i915, obj.handle);
+
+
+ /* How should this deadlock be resolved? */
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+
+ out = execbuf.rsvd2;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+ }}
+
+ gem_close(i915, obj.handle);
+}
+
static void test_syncobj_export(int fd)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -1574,6 +2131,117 @@ static void test_syncobj_channel(int fd)
syncobj_destroy(fd, syncobj[i]);
}
+static bool has_future_syncobj(int i915)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = batch_create(i915),
+ };
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ .flags = I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_SIGNAL,
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = I915_EXEC_FENCE_ARRAY,
+ };
+ bool result;
+
+ result = __gem_execbuf(i915, &execbuf) == 0;
+ gem_close(i915, obj.handle);
+
+ return result;
+}
+
+static void syncobj_futures(int i915)
+{
+ const struct intel_execution_engine2 *e;
+
+ igt_fixture {
+ igt_require(gem_scheduler_enabled(i915));
+ igt_require(has_future_syncobj(i915));
+ }
+
+ igt_subtest_with_dynamic("syncobj-future")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future(i915, e->flags);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-past")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_past(i915, e->flags, 0);
+ }
+
+
+ igt_subtest_with_dynamic("syncobj-future-submit")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_submit(i915, e->flags);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-submit-past")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_past(i915, e->flags,
+ I915_EXEC_FENCE_WAIT_SUBMIT);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-self")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_self(i915, e->flags, 0);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-self-submit")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_self(i915, e->flags,
+ I915_EXEC_FENCE_WAIT_SUBMIT);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-pair")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_pair(i915, e->flags);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-group")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_group(i915, e->flags, 67);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-deadlock")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_deadlock(i915, e->flags, 0);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-submit-deadlock")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_deadlock(i915, e->flags,
+ I915_EXEC_FENCE_WAIT_SUBMIT);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-cycle")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_cycle(i915, e->flags, 0);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-submit-cycle")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_cycle(i915, e->flags,
+ I915_EXEC_FENCE_WAIT_SUBMIT);
+ }
+}
+
igt_main
{
const struct intel_execution_engine2 *e;
@@ -1786,6 +2454,9 @@ igt_main
igt_subtest("syncobj-invalid-wait")
test_syncobj_invalid_wait(i915);
+ igt_subtest("syncobj-incomplete-wait-submit")
+ test_syncobj_incomplete_wait_submit(i915);
+
igt_subtest("syncobj-invalid-flags")
test_syncobj_invalid_flags(i915);
@@ -1795,6 +2466,9 @@ igt_main
igt_subtest("syncobj-wait")
test_syncobj_wait(i915);
+ igt_subtest_group
+ syncobj_futures(i915);
+
igt_subtest("syncobj-export")
test_syncobj_export(i915);
--
2.26.2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [igt-dev] [PATCH i-g-t 2/2] i915/gem_exec_fence: Teach invalid-wait about invalid future fences
@ 2020-05-13 17:02 ` Chris Wilson
0 siblings, 0 replies; 10+ messages in thread
From: Chris Wilson @ 2020-05-13 17:02 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev, Chris Wilson
When we allow a wait on a future future fence, it must autoexpire if the
fence is never signaled by userspace. Also put future fences to work, as
the intention is to use them, along with WAIT_SUBMIT and semaphores, for
userspace to perform its own fine-grained scheduling. Or simply run
concurrent clients without having to flush batches between context
switches.
v2: Verify deadlock detection
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
tests/i915/gem_exec_fence.c | 680 +++++++++++++++++++++++++++++++++++-
1 file changed, 677 insertions(+), 3 deletions(-)
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index 4140bff24..eb1165080 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -1123,11 +1123,12 @@ static void test_syncobj_invalid_wait(int fd)
struct drm_i915_gem_exec_fence fence = {
.handle = syncobj_create(fd, 0),
};
+ int out;
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.flags = I915_EXEC_FENCE_ARRAY;
+ execbuf.flags = I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
@@ -1135,14 +1136,59 @@ static void test_syncobj_invalid_wait(int fd)
obj.handle = gem_create(fd, 4096);
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
- /* waiting before the fence is set is invalid */
+ /* waiting before the fence is set is^W may be invalid */
fence.flags = I915_EXEC_FENCE_WAIT;
- igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
+ if (__gem_execbuf_wr(fd, &execbuf)) {
+ igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
+ return;
+ }
+
+ /* If we do allow the wait on a future fence, it should autoexpire */
+ gem_sync(fd, obj.handle);
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -ETIMEDOUT);
+ close(out);
gem_close(fd, obj.handle);
syncobj_destroy(fd, fence.handle);
}
+static void test_syncobj_incomplete_wait_submit(int i915)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = batch_create(i915),
+ };
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ .flags = I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT,
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+
+ .flags = I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT,
+ };
+ int out;
+
+ /* waiting before the fence is set is^W may be invalid */
+ if (__gem_execbuf_wr(i915, &execbuf)) {
+ igt_assert_eq(__gem_execbuf(i915, &execbuf), -EINVAL);
+ return;
+ }
+
+ /* If we do allow the wait on a future fence, it should autoexpire */
+ gem_sync(i915, obj.handle);
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -ETIMEDOUT);
+ close(out);
+
+ gem_close(i915, obj.handle);
+ syncobj_destroy(i915, fence.handle);
+}
+
static void test_syncobj_invalid_flags(int fd)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -1289,6 +1335,517 @@ static void test_syncobj_wait(int fd)
}
}
+static uint32_t future_batch(int i915, uint32_t offset)
+{
+ uint32_t handle = gem_create(i915, 4096);
+ const int gen = intel_gen(intel_get_drm_devid(i915));
+ uint32_t cs[16];
+ int i = 0;
+
+ cs[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ cs[++i] = offset + 4000;
+ cs[++i] = 0;
+ } else if (gen >= 4) {
+ cs[++i] = 0;
+ cs[++i] = offset + 4000;
+ } else {
+ cs[i]--;
+ cs[++i] = offset + 4000;
+ }
+ cs[++i] = 1;
+ cs[i + 1] = MI_BATCH_BUFFER_END;
+ gem_write(i915, handle, 0, cs, sizeof(cs));
+
+ cs[i] = 2;
+ gem_write(i915, handle, 64, cs, sizeof(cs));
+
+ return handle;
+}
+
+static void test_syncobj_future(int i915, unsigned int engine)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence = {};
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ };
+ const struct intel_execution_engine2 *e;
+
+ /*
+ * Client A is waiting on a future fence from B. So even though its
+ * execbuf is called first, we need to hold it in a queue waiting on
+ * B.
+ */
+ igt_require(gem_scheduler_enabled(i915));
+
+ __for_each_physical_engine(i915, e) {
+ uint32_t result;
+
+ igt_debug("waiting on future %s\n", e->name);
+ fence.handle = syncobj_create(i915, 0);
+
+ fence.flags = I915_EXEC_FENCE_WAIT;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags = engine | I915_EXEC_FENCE_ARRAY;
+ execbuf.rsvd1 = 0;
+ gem_execbuf(i915, &execbuf); /* writes 1 */
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 64;
+ execbuf.flags = e->flags | I915_EXEC_FENCE_ARRAY;
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ gem_execbuf(i915, &execbuf); /* writes 2 */
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence.handle);
+ gem_sync(i915, obj.handle); /* write hazard lies */
+ gem_read(i915, obj.handle, 4000, &result, sizeof(result));
+ igt_assert_eq(result, 1);
+ }
+
+ gem_close(i915, obj.handle);
+}
+
+static uint32_t future_submit_batch(int i915, uint32_t offset)
+{
+ uint32_t handle = gem_create(i915, 4096);
+ const int gen = intel_gen(intel_get_drm_devid(i915));
+ uint32_t cs[16];
+ int i = 0;
+
+ cs[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ cs[++i] = offset + 4000;
+ cs[++i] = 0;
+ } else if (gen >= 4) {
+ cs[++i] = 0;
+ cs[++i] = offset + 4000;
+ } else {
+ cs[i]--;
+ cs[++i] = offset + 4000;
+ }
+ cs[++i] = 1;
+ cs[i + 1] = MI_BATCH_BUFFER_END;
+ igt_assert(i + 1 < ARRAY_SIZE(cs));
+ gem_write(i915, handle, 0, cs, sizeof(cs));
+
+ i = 0;
+ cs[i++] =
+ MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_EQ_SDD |
+ (4 - 2);
+ cs[i++] = 1;
+ cs[i++] = offset + 4000;
+ cs[i++] = 0;
+
+ cs[i] = MI_STORE_DWORD_IMM | (gen < 6 ? 1 << 22 : 0);
+ if (gen >= 8) {
+ cs[++i] = offset + 4000;
+ cs[++i] = 0;
+ } else if (gen >= 4) {
+ cs[++i] = 0;
+ cs[++i] = offset + 4000;
+ } else {
+ cs[i]--;
+ cs[++i] = offset + 4000;
+ }
+ cs[++i] = 2;
+ cs[++i] = MI_BATCH_BUFFER_END;
+ igt_assert(i < ARRAY_SIZE(cs));
+
+ gem_write(i915, handle, 64, cs, sizeof(cs));
+
+ return handle;
+}
+
+static void test_syncobj_future_submit(int i915, unsigned int engine)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_submit_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ const struct intel_execution_engine2 *e;
+
+ /*
+ * Here we submit client A waiting on client B, but internally client
+ * B has a semaphore that waits on client A. This relies on timeslicing
+ * to reorder B before A, even though userspace has asked to submit
+ * A & B simultaneously (and due to the sequence we will submit B
+ * then A).
+ */
+ igt_require(gem_scheduler_has_timeslicing(i915));
+
+ __for_each_physical_engine(i915, e) {
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = engine | I915_EXEC_FENCE_ARRAY,
+ };
+ uint32_t result;
+ int out;
+
+ igt_debug("waiting on future %s\n", e->name);
+
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ fence.flags = I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0); /* writes 1 */
+ execbuf.flags &= ~I915_EXEC_FENCE_OUT;
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 64;
+ execbuf.flags &= ~I915_EXEC_RING_MASK;
+ execbuf.flags |= e->flags;
+ gem_execbuf(i915, &execbuf); /* writes 2 */
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence.handle);
+ gem_sync(i915, obj.handle); /* write hazard lies */
+ gem_read(i915, obj.handle, 4000, &result, sizeof(result));
+ igt_assert_eq(result, 2);
+
+ /* check we didn't autotimeout */
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), 1);
+ close(out);
+ }
+
+ gem_close(i915, obj.handle);
+}
+
+static void test_syncobj_future_past(int i915, unsigned int engine, int flags)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = engine | I915_EXEC_FENCE_ARRAY,
+ };
+ uint32_t result;
+ int out;
+
+ fence.flags = I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_SIGNAL | flags;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0); /* writes 1 */
+ execbuf.flags &= ~I915_EXEC_FENCE_OUT;
+
+ gem_sync(i915, obj.handle); /* write hazard lies */
+ gem_read(i915, obj.handle, 4000, &result, sizeof(result));
+ igt_assert_eq(result, 1);
+
+ /* check we didn't autotimeout */
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), 1);
+ close(out);
+
+ gem_close(i915, obj.handle);
+ syncobj_destroy(i915, fence.handle);
+}
+
+static void test_syncobj_future_self(int i915, unsigned int engine, int flags)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = engine | I915_EXEC_FENCE_ARRAY,
+ };
+ int out;
+
+ fence.flags = I915_EXEC_FENCE_WAIT | flags;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0); /* writes 1 */
+ execbuf.flags &= ~I915_EXEC_FENCE_OUT;
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 64;
+ gem_execbuf(i915, &execbuf); /* writes 2 */
+
+ gem_sync(i915, obj.handle); /* write hazard lies */
+
+ gem_close(i915, obj.handle);
+ syncobj_destroy(i915, fence.handle);
+
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+}
+
+static void test_syncobj_future_pair(int i915, unsigned int engine)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = submitN_batches(i915, 24 << 20, 2),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence[2] = {
+ { .handle = syncobj_create(i915, 0) },
+ { .handle = syncobj_create(i915, 0) }
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(fence),
+ .num_cliprects = 2,
+ };
+ const struct intel_execution_engine2 *e;
+
+ __for_each_physical_engine(i915, e) {
+ int out = 0;
+
+ gem_write(i915, obj.handle, 0, &out, sizeof(out));
+ fence[0].handle = syncobj_create(i915, 0);
+ fence[1].handle = syncobj_create(i915, 0);
+
+ fence[0].flags = I915_EXEC_FENCE_SIGNAL;
+ fence[1].flags =
+ I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ execbuf.batch_start_offset = 1024;
+ execbuf.flags =
+ engine | I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = gem_context_create(i915);
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0);
+ gem_context_destroy(i915, execbuf.rsvd1);
+ execbuf.rsvd2 >>= 32;
+
+ fence[0].flags =
+ I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ fence[1].flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 2048;
+ execbuf.flags =
+ e->flags | I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = gem_context_create(i915);
+ gem_execbuf_wr(i915, &execbuf);
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence[0].handle);
+ syncobj_destroy(i915, fence[1].handle);
+
+ gem_sync(i915, obj.handle); /* write hazard lies */
+
+ out = execbuf.rsvd2;
+ igt_assert_eq(sync_fence_status(out), 1);
+ close(out);
+
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), 1);
+ close(out);
+
+ gem_read(i915, obj.handle, 0, &out, sizeof(out));
+ igt_assert_eq(out, 16);
+ }
+
+ gem_close(i915, obj.handle);
+}
+
+static void test_syncobj_future_group(int i915, unsigned int engine, int count)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = submitN_batches(i915, 24 << 20, count),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence[count];
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(fence),
+ .num_cliprects = count,
+ .flags = engine | I915_EXEC_FENCE_ARRAY | I915_EXEC_FENCE_OUT,
+ };
+ int out[count];
+ uint32_t result;
+
+ for (int i = 0; i < count; i++) {
+ fence[i].handle = syncobj_create(i915, 0);
+ fence[i].flags =
+ I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ }
+
+ for (int i = 0; i < count; i++) {
+ fence[i].flags = I915_EXEC_FENCE_SIGNAL;
+
+ execbuf.batch_start_offset = 1024 * (i + 1);
+ execbuf.rsvd1 = gem_context_create(i915);
+ gem_execbuf_wr(i915, &execbuf);
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ out[i] = execbuf.rsvd2 >> 32;
+ fence[i].flags =
+ I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_WAIT_SUBMIT;
+ }
+ gem_sync(i915, obj.handle); /* write hazard lies */
+
+ /* As both batches were waiting for the other to start -- deadlock? */
+ for (int i = 0; i < count; i++) {
+ syncobj_destroy(i915, fence[i].handle);
+ igt_assert_eq(sync_fence_status(out[i]), 1);
+ close(out[i]);
+ }
+
+ /* Nevertheless, we ignored^Wresolved the deadlock and let them run */
+ gem_read(i915, obj.handle, 0, &result, sizeof(result));
+ igt_assert_eq(result, 8 * count);
+ gem_close(i915, obj.handle);
+}
+
+
+static void
+test_syncobj_future_deadlock(int i915, unsigned int engine, int flags)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ struct drm_i915_gem_exec_fence fence = {};
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ };
+ const struct intel_execution_engine2 *e;
+
+ __for_each_physical_engine(i915, e) {
+ int out;
+
+ fence.handle = syncobj_create(i915, 0),
+
+ fence.flags = I915_EXEC_FENCE_WAIT | flags;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags = engine | I915_EXEC_FENCE_ARRAY,
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = 0;
+ gem_execbuf_wr(i915, &execbuf); /* writes 1 */
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.batch_start_offset = 64;
+ execbuf.flags = e->flags | I915_EXEC_FENCE_ARRAY,
+ execbuf.flags |= I915_EXEC_FENCE_OUT | I915_EXEC_FENCE_IN;
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ execbuf.rsvd2 >>= 32;
+ gem_execbuf_wr(i915, &execbuf); /* writes 2 */
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence.handle);
+ gem_sync(i915, obj.handle);
+
+ /* How should this deadlock be resolved? */
+ out = execbuf.rsvd2;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+ }
+
+ gem_close(i915, obj.handle);
+}
+
+static void
+test_syncobj_future_cycle(int i915, unsigned int engine, int flags)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .offset = 24 << 20,
+ .handle = future_batch(i915, 24 << 20),
+ .flags = EXEC_OBJECT_PINNED,
+ };
+ const struct intel_execution_engine2 *e1, *e2;
+
+ __for_each_physical_engine(i915, e1) {
+ __for_each_physical_engine(i915, e2) {
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = engine | I915_EXEC_FENCE_ARRAY,
+ };
+ int out;
+
+ fence.flags = I915_EXEC_FENCE_WAIT | flags;
+ execbuf.batch_start_offset = 0;
+ execbuf.flags |= I915_EXEC_FENCE_OUT;
+ igt_require(__gem_execbuf_wr(i915, &execbuf) == 0);
+
+ fence.flags = 0;
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ execbuf.rsvd2 >>= 32;
+ execbuf.flags &= ~I915_EXEC_RING_MASK;
+ execbuf.flags |= e1->flags | I915_EXEC_FENCE_IN;
+ gem_execbuf_wr(i915, &execbuf);
+ gem_context_destroy(i915, execbuf.rsvd1);
+ close(execbuf.rsvd2);
+
+ fence.flags = I915_EXEC_FENCE_SIGNAL;
+ execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ execbuf.rsvd2 >>= 32;
+ execbuf.flags &= ~I915_EXEC_RING_MASK;
+ execbuf.flags |= e2->flags;
+ execbuf.batch_start_offset = 64;
+ gem_execbuf_wr(i915, &execbuf); /* writes 2 */
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ syncobj_destroy(i915, fence.handle);
+ gem_sync(i915, obj.handle);
+
+
+ /* How should this deadlock be resolved? */
+ out = execbuf.rsvd2 >> 32;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+
+ out = execbuf.rsvd2;
+ igt_assert_eq(sync_fence_status(out), -EDEADLK);
+ close(out);
+ }}
+
+ gem_close(i915, obj.handle);
+}
+
static void test_syncobj_export(int fd)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -1574,6 +2131,117 @@ static void test_syncobj_channel(int fd)
syncobj_destroy(fd, syncobj[i]);
}
+static bool has_future_syncobj(int i915)
+{
+ struct drm_i915_gem_exec_object2 obj = {
+ .handle = batch_create(i915),
+ };
+ struct drm_i915_gem_exec_fence fence = {
+ .handle = syncobj_create(i915, 0),
+ .flags = I915_EXEC_FENCE_WAIT | I915_EXEC_FENCE_SIGNAL,
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&obj),
+ .buffer_count = 1,
+ .cliprects_ptr = to_user_pointer(&fence),
+ .num_cliprects = 1,
+ .flags = I915_EXEC_FENCE_ARRAY,
+ };
+ bool result;
+
+ result = __gem_execbuf(i915, &execbuf) == 0;
+ gem_close(i915, obj.handle);
+
+ return result;
+}
+
+static void syncobj_futures(int i915)
+{
+ const struct intel_execution_engine2 *e;
+
+ igt_fixture {
+ igt_require(gem_scheduler_enabled(i915));
+ igt_require(has_future_syncobj(i915));
+ }
+
+ igt_subtest_with_dynamic("syncobj-future")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future(i915, e->flags);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-past")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_past(i915, e->flags, 0);
+ }
+
+
+ igt_subtest_with_dynamic("syncobj-future-submit")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_submit(i915, e->flags);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-submit-past")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_past(i915, e->flags,
+ I915_EXEC_FENCE_WAIT_SUBMIT);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-self")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_self(i915, e->flags, 0);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-self-submit")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_self(i915, e->flags,
+ I915_EXEC_FENCE_WAIT_SUBMIT);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-pair")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_pair(i915, e->flags);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-group")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_group(i915, e->flags, 67);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-deadlock")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_deadlock(i915, e->flags, 0);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-submit-deadlock")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_deadlock(i915, e->flags,
+ I915_EXEC_FENCE_WAIT_SUBMIT);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-cycle")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_cycle(i915, e->flags, 0);
+ }
+
+ igt_subtest_with_dynamic("syncobj-future-submit-cycle")
+ __for_each_physical_engine(i915, e) {
+ igt_dynamic_f("%s", e->name)
+ test_syncobj_future_cycle(i915, e->flags,
+ I915_EXEC_FENCE_WAIT_SUBMIT);
+ }
+}
+
igt_main
{
const struct intel_execution_engine2 *e;
@@ -1786,6 +2454,9 @@ igt_main
igt_subtest("syncobj-invalid-wait")
test_syncobj_invalid_wait(i915);
+ igt_subtest("syncobj-incomplete-wait-submit")
+ test_syncobj_incomplete_wait_submit(i915);
+
igt_subtest("syncobj-invalid-flags")
test_syncobj_invalid_flags(i915);
@@ -1795,6 +2466,9 @@ igt_main
igt_subtest("syncobj-wait")
test_syncobj_wait(i915);
+ igt_subtest_group
+ syncobj_futures(i915);
+
igt_subtest("syncobj-export")
test_syncobj_export(i915);
--
2.26.2
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/2] lib/i915: Report scheduler caps for timeslicing
2020-05-13 17:02 ` [igt-dev] " Chris Wilson
(?)
(?)
@ 2020-05-13 18:10 ` Patchwork
-1 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2020-05-13 18:10 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
== Series Details ==
Series: series starting with [i-g-t,1/2] lib/i915: Report scheduler caps for timeslicing
URL : https://patchwork.freedesktop.org/series/77236/
State : success
== Summary ==
CI Bug Log - changes from IGT_5652 -> IGTPW_4567
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/index.html
Known issues
------------
Here are the changes found in IGTPW_4567 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@i915_selftest@live@execlists:
- fi-skl-6600u: [PASS][1] -> [INCOMPLETE][2] ([i915#1874])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/fi-skl-6600u/igt@i915_selftest@live@execlists.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/fi-skl-6600u/igt@i915_selftest@live@execlists.html
[i915#1874]: https://gitlab.freedesktop.org/drm/intel/issues/1874
Participating hosts (50 -> 44)
------------------------------
Missing (6): fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-kbl-7560u fi-byt-clapper fi-bdw-samus
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_5652 -> IGTPW_4567
CI-20190529: 20190529
CI_DRM_8475: fdb67b76a2d3b315585813539269dac22e2305f4 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_4567: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/index.html
IGT_5652: 60e8be7ccc72086a88d2eff3bcd02495fad5fa46 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
== Testlist changes ==
+igt@gem_exec_fence@syncobj-future
+igt@gem_exec_fence@syncobj-future-cycle
+igt@gem_exec_fence@syncobj-future-deadlock
+igt@gem_exec_fence@syncobj-future-group
+igt@gem_exec_fence@syncobj-future-pair
+igt@gem_exec_fence@syncobj-future-past
+igt@gem_exec_fence@syncobj-future-self
+igt@gem_exec_fence@syncobj-future-self-submit
+igt@gem_exec_fence@syncobj-future-submit
+igt@gem_exec_fence@syncobj-future-submit-cycle
+igt@gem_exec_fence@syncobj-future-submit-deadlock
+igt@gem_exec_fence@syncobj-future-submit-past
+igt@gem_exec_fence@syncobj-incomplete-wait-submit
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/index.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 10+ messages in thread
* [igt-dev] ✓ Fi.CI.IGT: success for series starting with [i-g-t,1/2] lib/i915: Report scheduler caps for timeslicing
2020-05-13 17:02 ` [igt-dev] " Chris Wilson
` (2 preceding siblings ...)
(?)
@ 2020-05-13 21:47 ` Patchwork
-1 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2020-05-13 21:47 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
== Series Details ==
Series: series starting with [i-g-t,1/2] lib/i915: Report scheduler caps for timeslicing
URL : https://patchwork.freedesktop.org/series/77236/
State : success
== Summary ==
CI Bug Log - changes from IGT_5652_full -> IGTPW_4567_full
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/index.html
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in IGTPW_4567_full:
### IGT changes ###
#### Possible regressions ####
* {igt@gem_exec_fence@syncobj-future} (NEW):
- shard-iclb: NOTRUN -> [SKIP][1] +11 similar issues
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-iclb8/igt@gem_exec_fence@syncobj-future.html
* {igt@gem_exec_fence@syncobj-future-group} (NEW):
- shard-tglb: NOTRUN -> [SKIP][2] +11 similar issues
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-tglb1/igt@gem_exec_fence@syncobj-future-group.html
New tests
---------
New tests have been introduced between IGT_5652_full and IGTPW_4567_full:
### New IGT tests (13) ###
* igt@gem_exec_fence@syncobj-future:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-cycle:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-deadlock:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-group:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-pair:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-past:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-self:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-self-submit:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit-cycle:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit-deadlock:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit-past:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-incomplete-wait-submit:
- Statuses : 7 pass(s)
- Exec time: [0.0, 0.00] s
Known issues
------------
Here are the changes found in IGTPW_4567_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gen9_exec_parse@allowed-all:
- shard-glk: [PASS][3] -> [DMESG-WARN][4] ([i915#1436] / [i915#716])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-glk8/igt@gen9_exec_parse@allowed-all.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-glk2/igt@gen9_exec_parse@allowed-all.html
* igt@i915_suspend@debugfs-reader:
- shard-kbl: [PASS][5] -> [INCOMPLETE][6] ([i915#155])
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl2/igt@i915_suspend@debugfs-reader.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl1/igt@i915_suspend@debugfs-reader.html
* igt@i915_suspend@sysfs-reader:
- shard-kbl: [PASS][7] -> [INCOMPLETE][8] ([CI#80] / [i915#155])
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl4/igt@i915_suspend@sysfs-reader.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl4/igt@i915_suspend@sysfs-reader.html
* igt@kms_cursor_crc@pipe-a-cursor-64x64-onscreen:
- shard-kbl: [PASS][9] -> [FAIL][10] ([i915#54] / [i915#93] / [i915#95]) +2 similar issues
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl2/igt@kms_cursor_crc@pipe-a-cursor-64x64-onscreen.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl2/igt@kms_cursor_crc@pipe-a-cursor-64x64-onscreen.html
* igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled:
- shard-kbl: [PASS][11] -> [FAIL][12] ([i915#177] / [i915#52] / [i915#54] / [i915#93] / [i915#95]) +1 similar issue
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl3/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl1/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
* igt@kms_draw_crc@draw-method-xrgb8888-render-untiled:
- shard-apl: [PASS][13] -> [FAIL][14] ([i915#52] / [i915#54] / [i915#95])
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl6/igt@kms_draw_crc@draw-method-xrgb8888-render-untiled.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl1/igt@kms_draw_crc@draw-method-xrgb8888-render-untiled.html
* igt@kms_flip_tiling@flip-changes-tiling-y:
- shard-apl: [PASS][15] -> [FAIL][16] ([i915#95])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl1/igt@kms_flip_tiling@flip-changes-tiling-y.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl2/igt@kms_flip_tiling@flip-changes-tiling-y.html
* igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
- shard-apl: [PASS][17] -> [DMESG-WARN][18] ([i915#180]) +1 similar issue
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl3/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl4/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
* igt@kms_plane_lowres@pipe-a-tiling-x:
- shard-snb: [PASS][19] -> [SKIP][20] ([fdo#109271]) +6 similar issues
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-snb2/igt@kms_plane_lowres@pipe-a-tiling-x.html
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-snb6/igt@kms_plane_lowres@pipe-a-tiling-x.html
* igt@kms_psr@psr2_cursor_plane_onoff:
- shard-iclb: [PASS][21] -> [SKIP][22] ([fdo#109441]) +2 similar issues
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-iclb2/igt@kms_psr@psr2_cursor_plane_onoff.html
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-iclb4/igt@kms_psr@psr2_cursor_plane_onoff.html
* igt@kms_setmode@basic:
- shard-apl: [PASS][23] -> [FAIL][24] ([i915#31])
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl7/igt@kms_setmode@basic.html
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl4/igt@kms_setmode@basic.html
* igt@kms_vblank@pipe-a-ts-continuation-suspend:
- shard-kbl: [PASS][25] -> [DMESG-WARN][26] ([i915#180]) +1 similar issue
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl6/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
#### Possible fixes ####
* igt@gem_softpin@noreloc-s3:
- shard-kbl: [DMESG-WARN][27] ([i915#180]) -> [PASS][28]
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl4/igt@gem_softpin@noreloc-s3.html
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl6/igt@gem_softpin@noreloc-s3.html
* igt@gen9_exec_parse@allowed-all:
- shard-kbl: [DMESG-WARN][29] ([i915#1436] / [i915#716]) -> [PASS][30]
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl6/igt@gen9_exec_parse@allowed-all.html
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl2/igt@gen9_exec_parse@allowed-all.html
* igt@kms_big_fb@linear-32bpp-rotate-180:
- shard-apl: [FAIL][31] ([i915#1119] / [i915#95]) -> [PASS][32]
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl6/igt@kms_big_fb@linear-32bpp-rotate-180.html
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl7/igt@kms_big_fb@linear-32bpp-rotate-180.html
- shard-kbl: [FAIL][33] ([i915#1119] / [i915#93] / [i915#95]) -> [PASS][34]
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl6/igt@kms_big_fb@linear-32bpp-rotate-180.html
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl1/igt@kms_big_fb@linear-32bpp-rotate-180.html
* igt@kms_cursor_crc@pipe-a-cursor-256x256-random:
- shard-kbl: [FAIL][35] ([i915#54] / [i915#93] / [i915#95]) -> [PASS][36] +1 similar issue
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl2/igt@kms_cursor_crc@pipe-a-cursor-256x256-random.html
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl4/igt@kms_cursor_crc@pipe-a-cursor-256x256-random.html
* igt@kms_cursor_crc@pipe-a-cursor-suspend:
- shard-kbl: [DMESG-WARN][37] ([i915#180] / [i915#93] / [i915#95]) -> [PASS][38]
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl1/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl3/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
* igt@kms_frontbuffer_tracking@fbc-modesetfrombusy:
- shard-iclb: [FAIL][39] -> [PASS][40]
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-iclb5/igt@kms_frontbuffer_tracking@fbc-modesetfrombusy.html
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-iclb3/igt@kms_frontbuffer_tracking@fbc-modesetfrombusy.html
* igt@kms_hdmi_inject@inject-audio:
- shard-tglb: [SKIP][41] ([i915#433]) -> [PASS][42]
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-tglb1/igt@kms_hdmi_inject@inject-audio.html
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-tglb1/igt@kms_hdmi_inject@inject-audio.html
* igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence:
- shard-apl: [FAIL][43] ([i915#53] / [i915#95]) -> [PASS][44]
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl2/igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence.html
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl4/igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence.html
- shard-kbl: [FAIL][45] ([i915#53] / [i915#93] / [i915#95]) -> [PASS][46]
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl4/igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence.html
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl7/igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence.html
* igt@kms_plane_cursor@pipe-a-viewport-size-256:
- shard-apl: [FAIL][47] ([i915#1559] / [i915#95]) -> [PASS][48]
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl6/igt@kms_plane_cursor@pipe-a-viewport-size-256.html
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl4/igt@kms_plane_cursor@pipe-a-viewport-size-256.html
- shard-kbl: [FAIL][49] ([i915#1559] / [i915#93] / [i915#95]) -> [PASS][50]
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl2/igt@kms_plane_cursor@pipe-a-viewport-size-256.html
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl3/igt@kms_plane_cursor@pipe-a-viewport-size-256.html
* igt@kms_psr@psr2_suspend:
- shard-iclb: [SKIP][51] ([fdo#109441]) -> [PASS][52] +1 similar issue
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-iclb1/igt@kms_psr@psr2_suspend.html
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-iclb2/igt@kms_psr@psr2_suspend.html
#### Warnings ####
* igt@kms_content_protection@atomic:
- shard-apl: [TIMEOUT][53] ([i915#1319]) -> [FAIL][54] ([fdo#110321] / [fdo#110336])
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl2/igt@kms_content_protection@atomic.html
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl2/igt@kms_content_protection@atomic.html
* igt@kms_content_protection@uevent:
- shard-kbl: [FAIL][55] ([i915#357] / [i915#93] / [i915#95]) -> [FAIL][56] ([i915#357])
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl3/igt@kms_content_protection@uevent.html
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl3/igt@kms_content_protection@uevent.html
- shard-apl: [FAIL][57] ([i915#357] / [i915#95]) -> [FAIL][58] ([i915#357])
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl2/igt@kms_content_protection@uevent.html
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl4/igt@kms_content_protection@uevent.html
* igt@kms_fbcon_fbt@fbc:
- shard-kbl: [FAIL][59] ([i915#1121] / [i915#93] / [i915#95]) -> [FAIL][60] ([i915#64])
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl3/igt@kms_fbcon_fbt@fbc.html
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl4/igt@kms_fbcon_fbt@fbc.html
- shard-apl: [FAIL][61] ([i915#1121] / [i915#95]) -> [FAIL][62] ([i915#1525])
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl1/igt@kms_fbcon_fbt@fbc.html
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl8/igt@kms_fbcon_fbt@fbc.html
* igt@kms_plane_alpha_blend@pipe-a-alpha-opaque-fb:
- shard-apl: [FAIL][63] ([fdo#108145] / [i915#265]) -> [FAIL][64] ([fdo#108145] / [i915#265] / [i915#95]) +1 similar issue
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl6/igt@kms_plane_alpha_blend@pipe-a-alpha-opaque-fb.html
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl3/igt@kms_plane_alpha_blend@pipe-a-alpha-opaque-fb.html
* igt@kms_plane_alpha_blend@pipe-a-constant-alpha-max:
- shard-kbl: [FAIL][65] ([fdo#108145] / [i915#265] / [i915#93] / [i915#95]) -> [FAIL][66] ([fdo#108145] / [i915#265])
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl6/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-max.html
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl2/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-max.html
- shard-apl: [FAIL][67] ([fdo#108145] / [i915#265] / [i915#95]) -> [FAIL][68] ([fdo#108145] / [i915#265])
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-apl6/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-max.html
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-apl3/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-max.html
* igt@kms_plane_alpha_blend@pipe-c-alpha-7efc:
- shard-kbl: [FAIL][69] ([fdo#108145] / [i915#265]) -> [FAIL][70] ([fdo#108145] / [i915#265] / [i915#93] / [i915#95]) +1 similar issue
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-kbl3/igt@kms_plane_alpha_blend@pipe-c-alpha-7efc.html
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-kbl6/igt@kms_plane_alpha_blend@pipe-c-alpha-7efc.html
* igt@kms_psr2_su@page_flip:
- shard-iclb: [SKIP][71] ([fdo#109642] / [fdo#111068]) -> [FAIL][72] ([i915#608])
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5652/shard-iclb8/igt@kms_psr2_su@page_flip.html
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/shard-iclb2/igt@kms_psr2_su@page_flip.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[CI#80]: https://gitlab.freedesktop.org/gfx-ci/i915-infra/issues/80
[fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
[fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
[fdo#110321]: https://bugs.freedesktop.org/show_bug.cgi?id=110321
[fdo#110336]: https://bugs.freedesktop.org/show_bug.cgi?id=110336
[fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
[i915#1119]: https://gitlab.freedesktop.org/drm/intel/issues/1119
[i915#1121]: https://gitlab.freedesktop.org/drm/intel/issues/1121
[i915#1319]: https://gitlab.freedesktop.org/drm/intel/issues/1319
[i915#1436]: https://gitlab.freedesktop.org/drm/intel/issues/1436
[i915#1525]: https://gitlab.freedesktop.org/drm/intel/issues/1525
[i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
[i915#155]: https://gitlab.freedesktop.org/drm/intel/issues/155
[i915#1559]: https://gitlab.freedesktop.org/drm/intel/issues/1559
[i915#177]: https://gitlab.freedesktop.org/drm/intel/issues/177
[i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
[i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
[i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
[i915#357]: https://gitlab.freedesktop.org/drm/intel/issues/357
[i915#433]: https://gitlab.freedesktop.org/drm/intel/issues/433
[i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
[i915#53]: https://gitlab.freedesktop.org/drm/intel/issues/53
[i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
[i915#608]: https://gitlab.freedesktop.org/drm/intel/issues/608
[i915#64]: https://gitlab.freedesktop.org/drm/intel/issues/64
[i915#716]: https://gitlab.freedesktop.org/drm/intel/issues/716
[i915#93]: https://gitlab.freedesktop.org/drm/intel/issues/93
[i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95
Participating hosts (8 -> 8)
------------------------------
No changes in participating hosts
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_5652 -> IGTPW_4567
CI-20190529: 20190529
CI_DRM_8475: fdb67b76a2d3b315585813539269dac22e2305f4 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_4567: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/index.html
IGT_5652: 60e8be7ccc72086a88d2eff3bcd02495fad5fa46 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4567/index.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [Intel-gfx] [igt-dev] [PATCH i-g-t 1/2] lib/i915: Report scheduler caps for timeslicing
2020-05-13 17:02 ` [igt-dev] " Chris Wilson
@ 2020-05-14 3:41 ` Petri Latvala
-1 siblings, 0 replies; 10+ messages in thread
From: Petri Latvala @ 2020-05-14 3:41 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev, intel-gfx
On Wed, May 13, 2020 at 06:02:23PM +0100, Chris Wilson wrote:
> diff --git a/include/drm-uapi/i915_drm.h b/include/drm-uapi/i915_drm.h
In a separate commit please, with the commit message stating which
kernel git sha it's from.
--
Petri Latvala
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [igt-dev] [PATCH i-g-t 1/2] lib/i915: Report scheduler caps for timeslicing
@ 2020-05-14 3:41 ` Petri Latvala
0 siblings, 0 replies; 10+ messages in thread
From: Petri Latvala @ 2020-05-14 3:41 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev, intel-gfx
On Wed, May 13, 2020 at 06:02:23PM +0100, Chris Wilson wrote:
> diff --git a/include/drm-uapi/i915_drm.h b/include/drm-uapi/i915_drm.h
In a separate commit please, with the commit message stating which
kernel git sha it's from.
--
Petri Latvala
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 10+ messages in thread
* [igt-dev] ✗ GitLab.Pipeline: failure for series starting with [i-g-t,1/2] lib/i915: Report scheduler caps for timeslicing
2020-05-13 17:02 ` [igt-dev] " Chris Wilson
` (4 preceding siblings ...)
(?)
@ 2020-05-15 11:44 ` Patchwork
-1 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2020-05-15 11:44 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
== Series Details ==
Series: series starting with [i-g-t,1/2] lib/i915: Report scheduler caps for timeslicing
URL : https://patchwork.freedesktop.org/series/77236/
State : failure
== Summary ==
ERROR! This series introduces new undocumented tests:
gem_exec_fence@syncobj-future
gem_exec_fence@syncobj-future-cycle
gem_exec_fence@syncobj-future-deadlock
gem_exec_fence@syncobj-future-group
gem_exec_fence@syncobj-future-pair
gem_exec_fence@syncobj-future-past
gem_exec_fence@syncobj-future-self
gem_exec_fence@syncobj-future-self-submit
gem_exec_fence@syncobj-future-submit
gem_exec_fence@syncobj-future-submit-cycle
gem_exec_fence@syncobj-future-submit-deadlock
gem_exec_fence@syncobj-future-submit-past
gem_exec_fence@syncobj-incomplete-wait-submit
Can you document them as per the requirement in the [CONTRIBUTING.md]?
[Documentation] has more details on how to do this.
Here are few examples:
https://gitlab.freedesktop.org/drm/igt-gpu-tools/commit/0316695d03aa46108296b27f3982ec93200c7a6e
https://gitlab.freedesktop.org/drm/igt-gpu-tools/commit/443cc658e1e6b492ee17bf4f4d891029eb7a205d
Thanks in advance!
[CONTRIBUTING.md]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/blob/master/CONTRIBUTING.md#L19
[Documentation]: https://drm.pages.freedesktop.org/igt-gpu-tools/igt-gpu-tools-Core.html#igt-describe
Other than that, pipeline status: SUCCESS.
see https://gitlab.freedesktop.org/gfx-ci/igt-ci-tags/pipelines/146272 for the overview.
== Logs ==
For more details see: https://gitlab.freedesktop.org/gfx-ci/igt-ci-tags/pipelines/146272
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 10+ messages in thread
* [igt-dev] ✓ Fi.CI.IGT: success for series starting with [i-g-t,1/2] lib/i915: Report scheduler caps for timeslicing
2020-05-05 13:38 [Intel-gfx] [PATCH i-g-t 1/2] " Chris Wilson
@ 2020-05-06 2:29 ` Patchwork
0 siblings, 0 replies; 10+ messages in thread
From: Patchwork @ 2020-05-06 2:29 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
== Series Details ==
Series: series starting with [i-g-t,1/2] lib/i915: Report scheduler caps for timeslicing
URL : https://patchwork.freedesktop.org/series/76956/
State : success
== Summary ==
CI Bug Log - changes from IGT_5633_full -> IGTPW_4535_full
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/index.html
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in IGTPW_4535_full:
### IGT changes ###
#### Possible regressions ####
* {igt@gem_exec_fence@syncobj-future-self@vecs0} (NEW):
- shard-iclb: NOTRUN -> [SKIP][1] +31 similar issues
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-iclb3/igt@gem_exec_fence@syncobj-future-self@vecs0.html
* {igt@gem_exec_fence@syncobj-future@vecs0} (NEW):
- shard-tglb: NOTRUN -> [SKIP][2] +39 similar issues
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-tglb3/igt@gem_exec_fence@syncobj-future@vecs0.html
#### Suppressed ####
The following results come from untrusted machines, tests, or statuses.
They do not affect the overall result.
* {igt@gem_exec_fence@flip}:
- shard-iclb: [SKIP][3] ([i915#1837]) -> [SKIP][4]
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-iclb8/igt@gem_exec_fence@flip.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-iclb1/igt@gem_exec_fence@flip.html
- shard-tglb: [SKIP][5] ([i915#1837]) -> [SKIP][6]
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-tglb7/igt@gem_exec_fence@flip.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-tglb8/igt@gem_exec_fence@flip.html
New tests
---------
New tests have been introduced between IGT_5633_full and IGTPW_4535_full:
### New IGT tests (49) ###
* igt@gem_exec_fence@syncobj-future:
- Statuses :
- Exec time: [None] s
* igt@gem_exec_fence@syncobj-future-deadlock:
- Statuses :
- Exec time: [None] s
* igt@gem_exec_fence@syncobj-future-deadlock@bcs0:
- Statuses : 7 skip(s)
- Exec time: [0.00] s
* igt@gem_exec_fence@syncobj-future-deadlock@rcs0:
- Statuses : 7 skip(s)
- Exec time: [0.00] s
* igt@gem_exec_fence@syncobj-future-deadlock@vcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-deadlock@vcs1:
- Statuses : 2 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-deadlock@vecs0:
- Statuses : 6 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-past:
- Statuses :
- Exec time: [None] s
* igt@gem_exec_fence@syncobj-future-past@bcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.01] s
* igt@gem_exec_fence@syncobj-future-past@rcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-past@vcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-past@vcs1:
- Statuses : 2 skip(s)
- Exec time: [0.00] s
* igt@gem_exec_fence@syncobj-future-past@vecs0:
- Statuses : 6 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-self:
- Statuses :
- Exec time: [None] s
* igt@gem_exec_fence@syncobj-future-self-submit:
- Statuses :
- Exec time: [None] s
* igt@gem_exec_fence@syncobj-future-self-submit@bcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-self-submit@rcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-self-submit@vcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-self-submit@vcs1:
- Statuses : 3 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-self-submit@vecs0:
- Statuses : 6 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-self@bcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-self@rcs0:
- Statuses : 7 skip(s)
- Exec time: [0.00] s
* igt@gem_exec_fence@syncobj-future-self@vcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-self@vcs1:
- Statuses : 2 skip(s)
- Exec time: [0.00] s
* igt@gem_exec_fence@syncobj-future-self@vecs0:
- Statuses : 6 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-submit:
- Statuses :
- Exec time: [None] s
* igt@gem_exec_fence@syncobj-future-submit-deadlock:
- Statuses :
- Exec time: [None] s
* igt@gem_exec_fence@syncobj-future-submit-deadlock@bcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-submit-deadlock@rcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-submit-deadlock@vcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit-deadlock@vcs1:
- Statuses : 3 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit-deadlock@vecs0:
- Statuses : 6 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-submit-past:
- Statuses :
- Exec time: [None] s
* igt@gem_exec_fence@syncobj-future-submit-past@bcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit-past@rcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future-submit-past@vcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit-past@vcs1:
- Statuses : 3 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit-past@vecs0:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit@bcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit@rcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit@vcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit@vcs1:
- Statuses : 2 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future-submit@vecs0:
- Statuses : 6 skip(s)
- Exec time: [0.0] s
* igt@gem_exec_fence@syncobj-future@bcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future@rcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future@vcs0:
- Statuses : 7 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-future@vcs1:
- Statuses : 2 skip(s)
- Exec time: [0.00] s
* igt@gem_exec_fence@syncobj-future@vecs0:
- Statuses : 6 skip(s)
- Exec time: [0.0, 0.00] s
* igt@gem_exec_fence@syncobj-incomplete-wait-submit:
- Statuses : 7 pass(s)
- Exec time: [0.0, 0.00] s
Known issues
------------
Here are the changes found in IGTPW_4535_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_exec_params@invalid-bsd-ring:
- shard-iclb: [PASS][7] -> [SKIP][8] ([fdo#109276])
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-iclb4/igt@gem_exec_params@invalid-bsd-ring.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-iclb8/igt@gem_exec_params@invalid-bsd-ring.html
* igt@gen9_exec_parse@allowed-all:
- shard-apl: [PASS][9] -> [DMESG-WARN][10] ([i915#716])
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl7/igt@gen9_exec_parse@allowed-all.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl6/igt@gen9_exec_parse@allowed-all.html
* igt@kms_cursor_crc@pipe-a-cursor-64x64-onscreen:
- shard-kbl: [PASS][11] -> [FAIL][12] ([i915#54] / [i915#93] / [i915#95])
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl4/igt@kms_cursor_crc@pipe-a-cursor-64x64-onscreen.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl1/igt@kms_cursor_crc@pipe-a-cursor-64x64-onscreen.html
* igt@kms_frontbuffer_tracking@fbc-shrfb-scaledprimary:
- shard-kbl: [PASS][13] -> [FAIL][14] ([i915#49])
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl7/igt@kms_frontbuffer_tracking@fbc-shrfb-scaledprimary.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl7/igt@kms_frontbuffer_tracking@fbc-shrfb-scaledprimary.html
- shard-apl: [PASS][15] -> [FAIL][16] ([i915#49] / [i915#95])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl7/igt@kms_frontbuffer_tracking@fbc-shrfb-scaledprimary.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl8/igt@kms_frontbuffer_tracking@fbc-shrfb-scaledprimary.html
* igt@kms_frontbuffer_tracking@fbc-suspend:
- shard-apl: [PASS][17] -> [DMESG-WARN][18] ([i915#180] / [i915#95])
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl3/igt@kms_frontbuffer_tracking@fbc-suspend.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl6/igt@kms_frontbuffer_tracking@fbc-suspend.html
* igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
- shard-kbl: [PASS][19] -> [DMESG-WARN][20] ([i915#180]) +1 similar issue
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl6/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl1/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html
* igt@kms_plane_cursor@pipe-a-overlay-size-64:
- shard-apl: [PASS][21] -> [FAIL][22] ([i915#1559] / [i915#95])
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl4/igt@kms_plane_cursor@pipe-a-overlay-size-64.html
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl1/igt@kms_plane_cursor@pipe-a-overlay-size-64.html
- shard-kbl: [PASS][23] -> [FAIL][24] ([i915#1559] / [i915#93] / [i915#95])
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl3/igt@kms_plane_cursor@pipe-a-overlay-size-64.html
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl7/igt@kms_plane_cursor@pipe-a-overlay-size-64.html
* igt@kms_psr@psr2_primary_mmap_cpu:
- shard-iclb: [PASS][25] -> [SKIP][26] ([fdo#109441]) +2 similar issues
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-iclb2/igt@kms_psr@psr2_primary_mmap_cpu.html
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-iclb3/igt@kms_psr@psr2_primary_mmap_cpu.html
#### Possible fixes ####
* igt@gem_softpin@noreloc-s3:
- shard-kbl: [DMESG-WARN][27] ([i915#180]) -> [PASS][28]
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl4/igt@gem_softpin@noreloc-s3.html
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl2/igt@gem_softpin@noreloc-s3.html
* igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled:
- shard-kbl: [FAIL][29] ([i915#177] / [i915#52] / [i915#54] / [i915#93] / [i915#95]) -> [PASS][30]
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl7/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl2/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
- shard-apl: [FAIL][31] ([i915#52] / [i915#54] / [i915#95]) -> [PASS][32]
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl6/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl4/igt@kms_draw_crc@draw-method-xrgb8888-mmap-cpu-untiled.html
* {igt@kms_flip@flip-vs-suspend@a-dp1}:
- shard-apl: [DMESG-WARN][33] ([i915#180]) -> [PASS][34] +1 similar issue
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl4/igt@kms_flip@flip-vs-suspend@a-dp1.html
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl1/igt@kms_flip@flip-vs-suspend@a-dp1.html
* {igt@kms_flip@flip-vs-suspend@b-vga1}:
- shard-snb: [DMESG-WARN][35] -> [PASS][36]
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-snb6/igt@kms_flip@flip-vs-suspend@b-vga1.html
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-snb1/igt@kms_flip@flip-vs-suspend@b-vga1.html
* igt@kms_flip_tiling@flip-changes-tiling:
- shard-kbl: [FAIL][37] ([i915#699] / [i915#93] / [i915#95]) -> [PASS][38]
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl4/igt@kms_flip_tiling@flip-changes-tiling.html
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl7/igt@kms_flip_tiling@flip-changes-tiling.html
* igt@kms_hdmi_inject@inject-audio:
- shard-tglb: [SKIP][39] ([i915#433]) -> [PASS][40]
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-tglb1/igt@kms_hdmi_inject@inject-audio.html
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-tglb8/igt@kms_hdmi_inject@inject-audio.html
- shard-iclb: [SKIP][41] ([i915#433]) -> [PASS][42]
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-iclb5/igt@kms_hdmi_inject@inject-audio.html
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-iclb7/igt@kms_hdmi_inject@inject-audio.html
* igt@kms_plane_cursor@pipe-a-viewport-size-64:
- shard-kbl: [FAIL][43] ([i915#1559] / [i915#93] / [i915#95]) -> [PASS][44]
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl7/igt@kms_plane_cursor@pipe-a-viewport-size-64.html
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl1/igt@kms_plane_cursor@pipe-a-viewport-size-64.html
- shard-apl: [FAIL][45] ([i915#1559] / [i915#95]) -> [PASS][46]
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl7/igt@kms_plane_cursor@pipe-a-viewport-size-64.html
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl4/igt@kms_plane_cursor@pipe-a-viewport-size-64.html
* {igt@kms_prime@basic-crc@second-to-first}:
- shard-apl: [FAIL][47] ([i915#95]) -> [PASS][48] +1 similar issue
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl6/igt@kms_prime@basic-crc@second-to-first.html
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl6/igt@kms_prime@basic-crc@second-to-first.html
- shard-kbl: [FAIL][49] ([i915#93] / [i915#95]) -> [PASS][50]
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl7/igt@kms_prime@basic-crc@second-to-first.html
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl2/igt@kms_prime@basic-crc@second-to-first.html
* igt@kms_psr@psr2_sprite_plane_move:
- shard-iclb: [SKIP][51] ([fdo#109441]) -> [PASS][52] +3 similar issues
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-iclb8/igt@kms_psr@psr2_sprite_plane_move.html
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-iclb2/igt@kms_psr@psr2_sprite_plane_move.html
* igt@kms_setmode@basic:
- shard-apl: [FAIL][53] ([i915#31]) -> [PASS][54]
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl7/igt@kms_setmode@basic.html
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl2/igt@kms_setmode@basic.html
- shard-hsw: [FAIL][55] ([i915#31]) -> [PASS][56]
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-hsw1/igt@kms_setmode@basic.html
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-hsw1/igt@kms_setmode@basic.html
* {igt@perf@polling-parameterized}:
- shard-hsw: [FAIL][57] ([i915#1542]) -> [PASS][58]
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-hsw6/igt@perf@polling-parameterized.html
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-hsw7/igt@perf@polling-parameterized.html
#### Warnings ####
* igt@i915_pm_dc@dc3co-vpb-simulation:
- shard-iclb: [SKIP][59] ([i915#658]) -> [SKIP][60] ([i915#588])
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-iclb1/igt@i915_pm_dc@dc3co-vpb-simulation.html
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-iclb2/igt@i915_pm_dc@dc3co-vpb-simulation.html
* igt@i915_pm_dc@dc6-dpms:
- shard-tglb: [FAIL][61] ([i915#454]) -> [SKIP][62] ([i915#468])
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-tglb5/igt@i915_pm_dc@dc6-dpms.html
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-tglb2/igt@i915_pm_dc@dc6-dpms.html
* igt@i915_pm_dc@dc6-psr:
- shard-tglb: [SKIP][63] ([i915#468]) -> [FAIL][64] ([i915#454])
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-tglb2/igt@i915_pm_dc@dc6-psr.html
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-tglb8/igt@i915_pm_dc@dc6-psr.html
* igt@kms_content_protection@atomic:
- shard-apl: [TIMEOUT][65] ([i915#1319]) -> [FAIL][66] ([fdo#110321] / [fdo#110336])
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl8/igt@kms_content_protection@atomic.html
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl1/igt@kms_content_protection@atomic.html
* igt@kms_content_protection@atomic-dpms:
- shard-apl: [FAIL][67] ([fdo#110321] / [fdo#110336] / [i915#95]) -> [TIMEOUT][68] ([i915#1319])
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl8/igt@kms_content_protection@atomic-dpms.html
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl2/igt@kms_content_protection@atomic-dpms.html
* igt@kms_content_protection@lic:
- shard-apl: [FAIL][69] ([fdo#110321]) -> [TIMEOUT][70] ([i915#1319])
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-apl6/igt@kms_content_protection@lic.html
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-apl2/igt@kms_content_protection@lic.html
* igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb:
- shard-kbl: [FAIL][71] ([fdo#108145] / [i915#265]) -> [FAIL][72] ([fdo#108145] / [i915#265] / [i915#62])
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-kbl6/igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb.html
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-kbl4/igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb.html
* igt@kms_psr2_su@page_flip:
- shard-iclb: [FAIL][73] ([i915#608]) -> [SKIP][74] ([fdo#109642] / [fdo#111068])
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/IGT_5633/shard-iclb2/igt@kms_psr2_su@page_flip.html
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/shard-iclb5/igt@kms_psr2_su@page_flip.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
[fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
[fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
[fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
[fdo#110321]: https://bugs.freedesktop.org/show_bug.cgi?id=110321
[fdo#110336]: https://bugs.freedesktop.org/show_bug.cgi?id=110336
[fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
[i915#1319]: https://gitlab.freedesktop.org/drm/intel/issues/1319
[i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
[i915#1559]: https://gitlab.freedesktop.org/drm/intel/issues/1559
[i915#177]: https://gitlab.freedesktop.org/drm/intel/issues/177
[i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
[i915#1837]: https://gitlab.freedesktop.org/drm/intel/issues/1837
[i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
[i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
[i915#433]: https://gitlab.freedesktop.org/drm/intel/issues/433
[i915#454]: https://gitlab.freedesktop.org/drm/intel/issues/454
[i915#468]: https://gitlab.freedesktop.org/drm/intel/issues/468
[i915#49]: https://gitlab.freedesktop.org/drm/intel/issues/49
[i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
[i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
[i915#588]: https://gitlab.freedesktop.org/drm/intel/issues/588
[i915#608]: https://gitlab.freedesktop.org/drm/intel/issues/608
[i915#62]: https://gitlab.freedesktop.org/drm/intel/issues/62
[i915#658]: https://gitlab.freedesktop.org/drm/intel/issues/658
[i915#699]: https://gitlab.freedesktop.org/drm/intel/issues/699
[i915#716]: https://gitlab.freedesktop.org/drm/intel/issues/716
[i915#93]: https://gitlab.freedesktop.org/drm/intel/issues/93
[i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95
Participating hosts (8 -> 8)
------------------------------
No changes in participating hosts
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_5633 -> IGTPW_4535
CI-20190529: 20190529
CI_DRM_8430: 2daa6f8cad645f49a898158190a20a893b4aabe3 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_4535: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/index.html
IGT_5633: c8c2e5ed5cd8e4b7a69a903f3f1653612086abcc @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4535/index.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 10+ messages in thread
end of thread, other threads:[~2020-05-15 11:44 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-13 17:02 [Intel-gfx] [PATCH i-g-t 1/2] lib/i915: Report scheduler caps for timeslicing Chris Wilson
2020-05-13 17:02 ` [igt-dev] " Chris Wilson
2020-05-13 17:02 ` [Intel-gfx] [PATCH i-g-t 2/2] i915/gem_exec_fence: Teach invalid-wait about invalid future fences Chris Wilson
2020-05-13 17:02 ` [igt-dev] " Chris Wilson
2020-05-13 18:10 ` [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/2] lib/i915: Report scheduler caps for timeslicing Patchwork
2020-05-13 21:47 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork
2020-05-14 3:41 ` [Intel-gfx] [igt-dev] [PATCH i-g-t 1/2] " Petri Latvala
2020-05-14 3:41 ` Petri Latvala
2020-05-15 11:44 ` [igt-dev] ✗ GitLab.Pipeline: failure for series starting with [i-g-t,1/2] " Patchwork
-- strict thread matches above, loose matches on Subject: below --
2020-05-05 13:38 [Intel-gfx] [PATCH i-g-t 1/2] " Chris Wilson
2020-05-06 2:29 ` [igt-dev] ✓ Fi.CI.IGT: success for series starting with [i-g-t,1/2] " Patchwork
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.