* [Intel-gfx] [PATCH i-g-t 1/6] lib/i915: Report unknown device as the future @ 2020-07-10 9:32 ` Chris Wilson 0 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson Since we likely know all the old devices, an unknown device is most likely a future device, so use -1u instead of 0 for its generation. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- lib/intel_device_info.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/intel_device_info.c b/lib/intel_device_info.c index 21f7a9570..dfa43f490 100644 --- a/lib/intel_device_info.c +++ b/lib/intel_device_info.c @@ -447,11 +447,11 @@ out: * Computes the Intel GFX generation for the given device id. * * Returns: - * The GFX generation on successful lookup, 0 on failure. + * The GFX generation on successful lookup, -1u on failure. */ unsigned intel_gen(uint16_t devid) { - return ffs(intel_get_device_info(devid)->gen); + return ffs(intel_get_device_info(devid)->gen) ?: -1u; } /** -- 2.27.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [igt-dev] [PATCH i-g-t 1/6] lib/i915: Report unknown device as the future @ 2020-07-10 9:32 ` Chris Wilson 0 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson Since we likely know all the old devices, an unknown device is most likely a future device, so use -1u instead of 0 for its generation. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- lib/intel_device_info.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/intel_device_info.c b/lib/intel_device_info.c index 21f7a9570..dfa43f490 100644 --- a/lib/intel_device_info.c +++ b/lib/intel_device_info.c @@ -447,11 +447,11 @@ out: * Computes the Intel GFX generation for the given device id. * * Returns: - * The GFX generation on successful lookup, 0 on failure. + * The GFX generation on successful lookup, -1u on failure. */ unsigned intel_gen(uint16_t devid) { - return ffs(intel_get_device_info(devid)->gen); + return ffs(intel_get_device_info(devid)->gen) ?: -1u; } /** -- 2.27.0 _______________________________________________ igt-dev mailing list igt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/igt-dev ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [Intel-gfx] [PATCH i-g-t 2/6] tools: Use the gt number stored in the device info 2020-07-10 9:32 ` [igt-dev] " Chris Wilson @ 2020-07-10 9:32 ` Chris Wilson -1 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson Don't use the encoded information within the PCI-ID for the GT value, as the rules keep changing. Use the device info instead. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- lib/intel_chipset.h | 1 - lib/intel_device_info.c | 23 ----------------------- tools/intel_l3_parity.c | 5 +++-- tools/intel_reg_checker.c | 5 +++++ 4 files changed, 8 insertions(+), 26 deletions(-) diff --git a/lib/intel_chipset.h b/lib/intel_chipset.h index 929fac530..84b259692 100644 --- a/lib/intel_chipset.h +++ b/lib/intel_chipset.h @@ -79,7 +79,6 @@ struct intel_device_info { const struct intel_device_info *intel_get_device_info(uint16_t devid) __attribute__((pure)); unsigned intel_gen(uint16_t devid) __attribute__((pure)); -unsigned intel_gt(uint16_t devid) __attribute__((pure)); extern enum pch_type intel_pch; diff --git a/lib/intel_device_info.c b/lib/intel_device_info.c index dfa43f490..5f3e8c191 100644 --- a/lib/intel_device_info.c +++ b/lib/intel_device_info.c @@ -453,26 +453,3 @@ unsigned intel_gen(uint16_t devid) { return ffs(intel_get_device_info(devid)->gen) ?: -1u; } - -/** - * intel_gt: - * @devid: pci device id - * - * Computes the Intel GFX GT size for the given device id. - * - * Returns: - * The GT size. - */ -unsigned intel_gt(uint16_t devid) -{ - unsigned mask = intel_gen(devid); - - if (mask >= 8) - mask = 0xf; - else if (mask >= 6) - mask = 0x3; - else - mask = 0; - - return (devid >> 4) & mask; -} diff --git a/tools/intel_l3_parity.c b/tools/intel_l3_parity.c index 340f94b1a..484dd0281 100644 --- a/tools/intel_l3_parity.c +++ b/tools/intel_l3_parity.c @@ -44,10 +44,11 @@ #include "intel_l3_parity.h" static unsigned int devid; + /* L3 size is always a function of banks. The number of banks cannot be * determined by number of slices however */ static inline int num_banks(void) { - switch (intel_gt(devid)) { + switch (intel_get_device_info(devid)->gt) { case 2: return 8; case 1: return 4; default: return 2; @@ -61,7 +62,7 @@ static inline int num_banks(void) { #define MAX_ROW (1<<12) #define MAX_BANKS_PER_SLICE 4 #define NUM_REGS (MAX_BANKS_PER_SLICE * NUM_SUBBANKS) -#define MAX_SLICES (intel_gt(devid) > 1 ? 2 : 1) +#define MAX_SLICES (intel_get_device_info(devid)->gt > 1 ? 2 : 1) #define REAL_MAX_SLICES 2 /* TODO support SLM config */ #define L3_SIZE ((MAX_ROW * 4) * NUM_SUBBANKS * num_banks()) diff --git a/tools/intel_reg_checker.c b/tools/intel_reg_checker.c index 3f90de824..2aefabc67 100644 --- a/tools/intel_reg_checker.c +++ b/tools/intel_reg_checker.c @@ -143,6 +143,11 @@ check_gfx_mode(void) check_perf_bit(gfx_mode, 13, "Flush TLB Invalidation Mode", true); } +static unsigned intel_gt(uint16_t __devid) +{ + return intel_get_device_info(__devid)->gt; +} + static void check_gt_mode(void) { -- 2.27.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [igt-dev] [PATCH i-g-t 2/6] tools: Use the gt number stored in the device info @ 2020-07-10 9:32 ` Chris Wilson 0 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson Don't use the encoded information within the PCI-ID for the GT value, as the rules keep changing. Use the device info instead. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- lib/intel_chipset.h | 1 - lib/intel_device_info.c | 23 ----------------------- tools/intel_l3_parity.c | 5 +++-- tools/intel_reg_checker.c | 5 +++++ 4 files changed, 8 insertions(+), 26 deletions(-) diff --git a/lib/intel_chipset.h b/lib/intel_chipset.h index 929fac530..84b259692 100644 --- a/lib/intel_chipset.h +++ b/lib/intel_chipset.h @@ -79,7 +79,6 @@ struct intel_device_info { const struct intel_device_info *intel_get_device_info(uint16_t devid) __attribute__((pure)); unsigned intel_gen(uint16_t devid) __attribute__((pure)); -unsigned intel_gt(uint16_t devid) __attribute__((pure)); extern enum pch_type intel_pch; diff --git a/lib/intel_device_info.c b/lib/intel_device_info.c index dfa43f490..5f3e8c191 100644 --- a/lib/intel_device_info.c +++ b/lib/intel_device_info.c @@ -453,26 +453,3 @@ unsigned intel_gen(uint16_t devid) { return ffs(intel_get_device_info(devid)->gen) ?: -1u; } - -/** - * intel_gt: - * @devid: pci device id - * - * Computes the Intel GFX GT size for the given device id. - * - * Returns: - * The GT size. - */ -unsigned intel_gt(uint16_t devid) -{ - unsigned mask = intel_gen(devid); - - if (mask >= 8) - mask = 0xf; - else if (mask >= 6) - mask = 0x3; - else - mask = 0; - - return (devid >> 4) & mask; -} diff --git a/tools/intel_l3_parity.c b/tools/intel_l3_parity.c index 340f94b1a..484dd0281 100644 --- a/tools/intel_l3_parity.c +++ b/tools/intel_l3_parity.c @@ -44,10 +44,11 @@ #include "intel_l3_parity.h" static unsigned int devid; + /* L3 size is always a function of banks. The number of banks cannot be * determined by number of slices however */ static inline int num_banks(void) { - switch (intel_gt(devid)) { + switch (intel_get_device_info(devid)->gt) { case 2: return 8; case 1: return 4; default: return 2; @@ -61,7 +62,7 @@ static inline int num_banks(void) { #define MAX_ROW (1<<12) #define MAX_BANKS_PER_SLICE 4 #define NUM_REGS (MAX_BANKS_PER_SLICE * NUM_SUBBANKS) -#define MAX_SLICES (intel_gt(devid) > 1 ? 2 : 1) +#define MAX_SLICES (intel_get_device_info(devid)->gt > 1 ? 2 : 1) #define REAL_MAX_SLICES 2 /* TODO support SLM config */ #define L3_SIZE ((MAX_ROW * 4) * NUM_SUBBANKS * num_banks()) diff --git a/tools/intel_reg_checker.c b/tools/intel_reg_checker.c index 3f90de824..2aefabc67 100644 --- a/tools/intel_reg_checker.c +++ b/tools/intel_reg_checker.c @@ -143,6 +143,11 @@ check_gfx_mode(void) check_perf_bit(gfx_mode, 13, "Flush TLB Invalidation Mode", true); } +static unsigned intel_gt(uint16_t __devid) +{ + return intel_get_device_info(__devid)->gt; +} + static void check_gt_mode(void) { -- 2.27.0 _______________________________________________ igt-dev mailing list igt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/igt-dev ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [Intel-gfx] [PATCH i-g-t 3/6] lib/i915: Pick a subtest conformant name for an unknown engine 2020-07-10 9:32 ` [igt-dev] " Chris Wilson @ 2020-07-10 9:32 ` Chris Wilson -1 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson IGT disallows ':' in its subtest names, and as we use the engine name for dynamic subtest names, pick a name that doesn't accidentally cause IGT to assert (even when those tests are not being run). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- lib/i915/gem_engine_topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c index 2c0ae5a25..6c5fbe817 100644 --- a/lib/i915/gem_engine_topology.c +++ b/lib/i915/gem_engine_topology.c @@ -136,7 +136,7 @@ static void init_engine(struct intel_execution_engine2 *e2, } else { igt_debug("found unknown engine (%d, %d)\n", class, instance); e2->flags = -1; - ret = snprintf(e2->name, sizeof(e2->name), "%u:%u", + ret = snprintf(e2->name, sizeof(e2->name), "c%u_%u", class, instance); } -- 2.27.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [igt-dev] [PATCH i-g-t 3/6] lib/i915: Pick a subtest conformant name for an unknown engine @ 2020-07-10 9:32 ` Chris Wilson 0 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson IGT disallows ':' in its subtest names, and as we use the engine name for dynamic subtest names, pick a name that doesn't accidentally cause IGT to assert (even when those tests are not being run). Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- lib/i915/gem_engine_topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c index 2c0ae5a25..6c5fbe817 100644 --- a/lib/i915/gem_engine_topology.c +++ b/lib/i915/gem_engine_topology.c @@ -136,7 +136,7 @@ static void init_engine(struct intel_execution_engine2 *e2, } else { igt_debug("found unknown engine (%d, %d)\n", class, instance); e2->flags = -1; - ret = snprintf(e2->name, sizeof(e2->name), "%u:%u", + ret = snprintf(e2->name, sizeof(e2->name), "c%u_%u", class, instance); } -- 2.27.0 _______________________________________________ igt-dev mailing list igt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/igt-dev ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [Intel-gfx] [PATCH i-g-t 4/6] i915/gem_close: Adapt to allow duplicate handles 2020-07-10 9:32 ` [igt-dev] " Chris Wilson @ 2020-07-10 9:32 ` Chris Wilson -1 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson With an upcoming change, we can relax the rule about handles not being duplicated in the execocbj[]. Duplicate handles must not otherwise conflict in their placements (e.g. two EXEC_OBJECT_PINNED at different offsets), but otherwise if they are able to be resolved to the same GPU address, then the operation is harmless and decreed legal. Since this is a relaxation in the negative ABI, update the test case to allow the permissible duplicate handles. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- tests/i915/gem_close.c | 51 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 7 deletions(-) diff --git a/tests/i915/gem_close.c b/tests/i915/gem_close.c index 4fdc1ad79..a9bf2d2d4 100644 --- a/tests/i915/gem_close.c +++ b/tests/i915/gem_close.c @@ -24,21 +24,57 @@ #include "i915/gem.h" #include "igt.h" -static bool has_duplicate(int err) +static int batch_create(int fd) +{ + const uint32_t bbe = MI_BATCH_BUFFER_END; + uint32_t handle; + + handle = gem_create(fd, 4096); + gem_write(fd, handle, 0, &bbe, sizeof(bbe)); + + return handle; +} + +static int allows_duplicate(int fd) +{ + struct drm_i915_gem_exec_object2 obj[2] = { + { .handle = batch_create(fd), }, + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(obj), + .buffer_count = 1, + }; + int err; + + gem_execbuf(fd, &execbuf); + + obj[1] = obj[0]; + execbuf.buffer_count = 2; + + err = __gem_execbuf(fd, &execbuf); + gem_close(fd, obj[0].handle); + + return err; +} + +static bool is_duplicate(int err) { return err == -EINVAL || err == -EALREADY; } static void test_many_handles(int fd) { - uint32_t bbe = MI_BATCH_BUFFER_END; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj[2]; uint32_t clones[128]; /* XXX try with 1024 */ uint32_t original; + int expected; + + expected = allows_duplicate(fd); + if (expected) + igt_assert(is_duplicate(expected)); - original = gem_create(fd, 4096); - gem_write(fd, original, 0, &bbe, sizeof(bbe)); + original = batch_create(fd); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = to_user_pointer(obj); @@ -54,7 +90,8 @@ static void test_many_handles(int fd) gem_execbuf(fd, &execbuf); } - /* We do not allow the sam object to be referenced multiple times + /* + * We do not allow the same object to be referenced multiple times * within an execbuf; hence why this practice of cloning a handle * is only found within test cases. */ @@ -62,11 +99,11 @@ static void test_many_handles(int fd) obj[0].handle = original; for (int i = 0; i < ARRAY_SIZE(clones); i++) { obj[1].handle = clones[i]; - igt_assert(has_duplicate(__gem_execbuf(fd, &execbuf))); + igt_assert_eq(__gem_execbuf(fd, &execbuf), expected); } /* Any other clone pair should also be detected */ obj[1].handle = clones[0]; /* (last, first) */ - igt_assert(has_duplicate(__gem_execbuf(fd, &execbuf))); + igt_assert_eq(__gem_execbuf(fd, &execbuf), expected); execbuf.buffer_count = 1; /* Now close the original having used every clone */ -- 2.27.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [igt-dev] [PATCH i-g-t 4/6] i915/gem_close: Adapt to allow duplicate handles @ 2020-07-10 9:32 ` Chris Wilson 0 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson With an upcoming change, we can relax the rule about handles not being duplicated in the execocbj[]. Duplicate handles must not otherwise conflict in their placements (e.g. two EXEC_OBJECT_PINNED at different offsets), but otherwise if they are able to be resolved to the same GPU address, then the operation is harmless and decreed legal. Since this is a relaxation in the negative ABI, update the test case to allow the permissible duplicate handles. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- tests/i915/gem_close.c | 51 ++++++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 7 deletions(-) diff --git a/tests/i915/gem_close.c b/tests/i915/gem_close.c index 4fdc1ad79..a9bf2d2d4 100644 --- a/tests/i915/gem_close.c +++ b/tests/i915/gem_close.c @@ -24,21 +24,57 @@ #include "i915/gem.h" #include "igt.h" -static bool has_duplicate(int err) +static int batch_create(int fd) +{ + const uint32_t bbe = MI_BATCH_BUFFER_END; + uint32_t handle; + + handle = gem_create(fd, 4096); + gem_write(fd, handle, 0, &bbe, sizeof(bbe)); + + return handle; +} + +static int allows_duplicate(int fd) +{ + struct drm_i915_gem_exec_object2 obj[2] = { + { .handle = batch_create(fd), }, + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(obj), + .buffer_count = 1, + }; + int err; + + gem_execbuf(fd, &execbuf); + + obj[1] = obj[0]; + execbuf.buffer_count = 2; + + err = __gem_execbuf(fd, &execbuf); + gem_close(fd, obj[0].handle); + + return err; +} + +static bool is_duplicate(int err) { return err == -EINVAL || err == -EALREADY; } static void test_many_handles(int fd) { - uint32_t bbe = MI_BATCH_BUFFER_END; struct drm_i915_gem_execbuffer2 execbuf; struct drm_i915_gem_exec_object2 obj[2]; uint32_t clones[128]; /* XXX try with 1024 */ uint32_t original; + int expected; + + expected = allows_duplicate(fd); + if (expected) + igt_assert(is_duplicate(expected)); - original = gem_create(fd, 4096); - gem_write(fd, original, 0, &bbe, sizeof(bbe)); + original = batch_create(fd); memset(&execbuf, 0, sizeof(execbuf)); execbuf.buffers_ptr = to_user_pointer(obj); @@ -54,7 +90,8 @@ static void test_many_handles(int fd) gem_execbuf(fd, &execbuf); } - /* We do not allow the sam object to be referenced multiple times + /* + * We do not allow the same object to be referenced multiple times * within an execbuf; hence why this practice of cloning a handle * is only found within test cases. */ @@ -62,11 +99,11 @@ static void test_many_handles(int fd) obj[0].handle = original; for (int i = 0; i < ARRAY_SIZE(clones); i++) { obj[1].handle = clones[i]; - igt_assert(has_duplicate(__gem_execbuf(fd, &execbuf))); + igt_assert_eq(__gem_execbuf(fd, &execbuf), expected); } /* Any other clone pair should also be detected */ obj[1].handle = clones[0]; /* (last, first) */ - igt_assert(has_duplicate(__gem_execbuf(fd, &execbuf))); + igt_assert_eq(__gem_execbuf(fd, &execbuf), expected); execbuf.buffer_count = 1; /* Now close the original having used every clone */ -- 2.27.0 _______________________________________________ igt-dev mailing list igt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/igt-dev ^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [Intel-gfx] [igt-dev] [PATCH i-g-t 4/6] i915/gem_close: Adapt to allow duplicate handles 2020-07-10 9:32 ` [igt-dev] " Chris Wilson @ 2020-07-10 14:11 ` Tvrtko Ursulin -1 siblings, 0 replies; 16+ messages in thread From: Tvrtko Ursulin @ 2020-07-10 14:11 UTC (permalink / raw) To: Chris Wilson, intel-gfx; +Cc: igt-dev On 10/07/2020 10:32, Chris Wilson wrote: > With an upcoming change, we can relax the rule about handles not being > duplicated in the execocbj[]. Duplicate handles must not otherwise > conflict in their placements (e.g. two EXEC_OBJECT_PINNED at different > offsets), but otherwise if they are able to be resolved to the same GPU > address, then the operation is harmless and decreed legal. > > Since this is a relaxation in the negative ABI, update the test case to > allow the permissible duplicate handles. > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > --- > tests/i915/gem_close.c | 51 ++++++++++++++++++++++++++++++++++++------ > 1 file changed, 44 insertions(+), 7 deletions(-) > > diff --git a/tests/i915/gem_close.c b/tests/i915/gem_close.c > index 4fdc1ad79..a9bf2d2d4 100644 > --- a/tests/i915/gem_close.c > +++ b/tests/i915/gem_close.c > @@ -24,21 +24,57 @@ > #include "i915/gem.h" > #include "igt.h" > > -static bool has_duplicate(int err) > +static int batch_create(int fd) > +{ > + const uint32_t bbe = MI_BATCH_BUFFER_END; > + uint32_t handle; > + > + handle = gem_create(fd, 4096); > + gem_write(fd, handle, 0, &bbe, sizeof(bbe)); > + > + return handle; > +} > + > +static int allows_duplicate(int fd) > +{ > + struct drm_i915_gem_exec_object2 obj[2] = { > + { .handle = batch_create(fd), }, > + }; > + struct drm_i915_gem_execbuffer2 execbuf = { > + .buffers_ptr = to_user_pointer(obj), > + .buffer_count = 1, > + }; > + int err; > + > + gem_execbuf(fd, &execbuf); > + > + obj[1] = obj[0]; > + execbuf.buffer_count = 2; > + > + err = __gem_execbuf(fd, &execbuf); > + gem_close(fd, obj[0].handle); > + > + return err; > +} > + > +static bool is_duplicate(int err) > { > return err == -EINVAL || err == -EALREADY; > } > > static void test_many_handles(int fd) > { > - uint32_t bbe = MI_BATCH_BUFFER_END; > struct drm_i915_gem_execbuffer2 execbuf; > struct drm_i915_gem_exec_object2 obj[2]; > uint32_t clones[128]; /* XXX try with 1024 */ > uint32_t original; > + int expected; > + > + expected = allows_duplicate(fd); > + if (expected) > + igt_assert(is_duplicate(expected)); > > - original = gem_create(fd, 4096); > - gem_write(fd, original, 0, &bbe, sizeof(bbe)); > + original = batch_create(fd); > > memset(&execbuf, 0, sizeof(execbuf)); > execbuf.buffers_ptr = to_user_pointer(obj); > @@ -54,7 +90,8 @@ static void test_many_handles(int fd) > gem_execbuf(fd, &execbuf); > } > > - /* We do not allow the sam object to be referenced multiple times > + /* > + * We do not allow the same object to be referenced multiple times > * within an execbuf; hence why this practice of cloning a handle > * is only found within test cases. > */ > @@ -62,11 +99,11 @@ static void test_many_handles(int fd) > obj[0].handle = original; > for (int i = 0; i < ARRAY_SIZE(clones); i++) { > obj[1].handle = clones[i]; > - igt_assert(has_duplicate(__gem_execbuf(fd, &execbuf))); > + igt_assert_eq(__gem_execbuf(fd, &execbuf), expected); > } > /* Any other clone pair should also be detected */ > obj[1].handle = clones[0]; /* (last, first) */ > - igt_assert(has_duplicate(__gem_execbuf(fd, &execbuf))); > + igt_assert_eq(__gem_execbuf(fd, &execbuf), expected); > execbuf.buffer_count = 1; > > /* Now close the original having used every clone */ > Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Regards, Tvrtko _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [igt-dev] [PATCH i-g-t 4/6] i915/gem_close: Adapt to allow duplicate handles @ 2020-07-10 14:11 ` Tvrtko Ursulin 0 siblings, 0 replies; 16+ messages in thread From: Tvrtko Ursulin @ 2020-07-10 14:11 UTC (permalink / raw) To: Chris Wilson, intel-gfx; +Cc: igt-dev On 10/07/2020 10:32, Chris Wilson wrote: > With an upcoming change, we can relax the rule about handles not being > duplicated in the execocbj[]. Duplicate handles must not otherwise > conflict in their placements (e.g. two EXEC_OBJECT_PINNED at different > offsets), but otherwise if they are able to be resolved to the same GPU > address, then the operation is harmless and decreed legal. > > Since this is a relaxation in the negative ABI, update the test case to > allow the permissible duplicate handles. > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> > --- > tests/i915/gem_close.c | 51 ++++++++++++++++++++++++++++++++++++------ > 1 file changed, 44 insertions(+), 7 deletions(-) > > diff --git a/tests/i915/gem_close.c b/tests/i915/gem_close.c > index 4fdc1ad79..a9bf2d2d4 100644 > --- a/tests/i915/gem_close.c > +++ b/tests/i915/gem_close.c > @@ -24,21 +24,57 @@ > #include "i915/gem.h" > #include "igt.h" > > -static bool has_duplicate(int err) > +static int batch_create(int fd) > +{ > + const uint32_t bbe = MI_BATCH_BUFFER_END; > + uint32_t handle; > + > + handle = gem_create(fd, 4096); > + gem_write(fd, handle, 0, &bbe, sizeof(bbe)); > + > + return handle; > +} > + > +static int allows_duplicate(int fd) > +{ > + struct drm_i915_gem_exec_object2 obj[2] = { > + { .handle = batch_create(fd), }, > + }; > + struct drm_i915_gem_execbuffer2 execbuf = { > + .buffers_ptr = to_user_pointer(obj), > + .buffer_count = 1, > + }; > + int err; > + > + gem_execbuf(fd, &execbuf); > + > + obj[1] = obj[0]; > + execbuf.buffer_count = 2; > + > + err = __gem_execbuf(fd, &execbuf); > + gem_close(fd, obj[0].handle); > + > + return err; > +} > + > +static bool is_duplicate(int err) > { > return err == -EINVAL || err == -EALREADY; > } > > static void test_many_handles(int fd) > { > - uint32_t bbe = MI_BATCH_BUFFER_END; > struct drm_i915_gem_execbuffer2 execbuf; > struct drm_i915_gem_exec_object2 obj[2]; > uint32_t clones[128]; /* XXX try with 1024 */ > uint32_t original; > + int expected; > + > + expected = allows_duplicate(fd); > + if (expected) > + igt_assert(is_duplicate(expected)); > > - original = gem_create(fd, 4096); > - gem_write(fd, original, 0, &bbe, sizeof(bbe)); > + original = batch_create(fd); > > memset(&execbuf, 0, sizeof(execbuf)); > execbuf.buffers_ptr = to_user_pointer(obj); > @@ -54,7 +90,8 @@ static void test_many_handles(int fd) > gem_execbuf(fd, &execbuf); > } > > - /* We do not allow the sam object to be referenced multiple times > + /* > + * We do not allow the same object to be referenced multiple times > * within an execbuf; hence why this practice of cloning a handle > * is only found within test cases. > */ > @@ -62,11 +99,11 @@ static void test_many_handles(int fd) > obj[0].handle = original; > for (int i = 0; i < ARRAY_SIZE(clones); i++) { > obj[1].handle = clones[i]; > - igt_assert(has_duplicate(__gem_execbuf(fd, &execbuf))); > + igt_assert_eq(__gem_execbuf(fd, &execbuf), expected); > } > /* Any other clone pair should also be detected */ > obj[1].handle = clones[0]; /* (last, first) */ > - igt_assert(has_duplicate(__gem_execbuf(fd, &execbuf))); > + igt_assert_eq(__gem_execbuf(fd, &execbuf), expected); > execbuf.buffer_count = 1; > > /* Now close the original having used every clone */ > Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Regards, Tvrtko _______________________________________________ igt-dev mailing list igt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/igt-dev ^ permalink raw reply [flat|nested] 16+ messages in thread
* [Intel-gfx] [PATCH i-g-t 5/6] i915/gem_exec_schedule: Try to spot unfairness 2020-07-10 9:32 ` [igt-dev] " Chris Wilson @ 2020-07-10 9:32 ` Chris Wilson -1 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson An important property for multi-client systems is that each client gets a 'fair' allotment of system time. (Where fairness is at the whim of the context properties, such as priorities.) This test forks N independent clients (albeit they happen to share a single vm), and does an equal amount of work in client and asserts that they take an equal amount of time. Though we have never claimed to have a completely fair scheduler, that is what is expected. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Ramalingam C <ramalingam.c@intel.com> --- tests/i915/gem_exec_schedule.c | 805 +++++++++++++++++++++++++++++++++ 1 file changed, 805 insertions(+) diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c index 0e0362502..1b3cbfd74 100644 --- a/tests/i915/gem_exec_schedule.c +++ b/tests/i915/gem_exec_schedule.c @@ -29,6 +29,7 @@ #include <sys/poll.h> #include <sys/ioctl.h> #include <sys/mman.h> +#include <sys/resource.h> #include <sys/syscall.h> #include <sched.h> #include <signal.h> @@ -2505,6 +2506,789 @@ static void measure_semaphore_power(int i915) rapl_close(&pkg); } +static int read_timestamp_frequency(int i915) +{ + int value = 0; + drm_i915_getparam_t gp = { + .value = &value, + .param = I915_PARAM_CS_TIMESTAMP_FREQUENCY, + }; + ioctl(i915, DRM_IOCTL_I915_GETPARAM, &gp); + return value; +} + +static uint64_t div64_u64_round_up(uint64_t x, uint64_t y) +{ + return (x + y - 1) / y; +} + +static uint64_t ns_to_ticks(int i915, uint64_t ns) +{ + return div64_u64_round_up(ns * read_timestamp_frequency(i915), + NSEC_PER_SEC); +} + +static uint64_t ticks_to_ns(int i915, uint64_t ticks) +{ + return div64_u64_round_up(ticks * NSEC_PER_SEC, + read_timestamp_frequency(i915)); +} + +#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) + +#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1) +#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2)) +/* Opcodes for MI_MATH_INSTR */ +#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0) +#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2) +#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2) +#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1) +#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1) +#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0) +#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0) +#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0) +#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0) +#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0) +#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2) +#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2) +/* Registers used as operands in MI_MATH_INSTR */ +#define MI_MATH_REG(x) (x) +#define MI_MATH_REG_SRCA 0x20 +#define MI_MATH_REG_SRCB 0x21 +#define MI_MATH_REG_ACCU 0x31 +#define MI_MATH_REG_ZF 0x32 +#define MI_MATH_REG_CF 0x33 + +#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1) + +static void delay(int i915, + const struct intel_execution_engine2 *e, + uint32_t handle, + uint64_t addr, + uint64_t ns) +{ + const int use_64b = intel_gen(intel_get_drm_devid(i915)) >= 8; + const uint32_t base = gem_engine_mmio_base(i915, e->name); +#define CS_GPR(x) (base + 0x600 + 8 * (x)) +#define RUNTIME (base + 0x3a8) + enum { START_TS, NOW_TS }; + uint32_t *map, *cs, *jmp; + + igt_require(base); + + /* Loop until CTX_TIMESTAMP - initial > @ns */ + + cs = map = gem_mmap__device_coherent(i915, handle, 0, 4096, PROT_WRITE); + + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(START_TS) + 4; + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_REG; + *cs++ = RUNTIME; + *cs++ = CS_GPR(START_TS); + + while (offset_in_page(cs) & 63) + *cs++ = 0; + jmp = cs; + + *cs++ = 0x5 << 23; /* MI_ARB_CHECK */ + + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(NOW_TS) + 4; + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_REG; + *cs++ = RUNTIME; + *cs++ = CS_GPR(NOW_TS); + + /* delta = now - start; inverted to match COND_BBE */ + *cs++ = MI_MATH(4); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS)); + *cs++ = MI_MATH_SUB; + *cs++ = MI_MATH_STOREINV(MI_MATH_REG(NOW_TS), MI_MATH_REG_ACCU); + + /* Save delta for reading by COND_BBE */ + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = CS_GPR(NOW_TS); + *cs++ = addr + 4000; + *cs++ = addr >> 32; + + /* Delay between SRM and COND_BBE to post the writes */ + for (int n = 0; n < 8; n++) { + *cs++ = MI_STORE_DWORD_IMM; + if (use_64b) { + *cs++ = addr + 4064; + *cs++ = addr >> 32; + } else { + *cs++ = 0; + *cs++ = addr + 4064; + } + *cs++ = 0; + } + + /* Break if delta > ns */ + *cs++ = MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE | (1 + use_64b); + *cs++ = ~ns_to_ticks(i915, ns); + *cs++ = addr + 4000; + *cs++ = addr >> 32; + + /* Otherwise back to recalculating delta */ + *cs++ = MI_BATCH_BUFFER_START | 1 << 8 | use_64b; + *cs++ = addr + offset_in_page(jmp); + *cs++ = addr >> 32; + + munmap(map, 4096); +} + +static struct drm_i915_gem_exec_object2 +delay_create(int i915, uint32_t ctx, + const struct intel_execution_engine2 *e, + uint64_t target_ns) +{ + struct drm_i915_gem_exec_object2 obj = { + .handle = batch_create(i915), + .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS, + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + .rsvd1 = ctx, + .flags = e->flags, + }; + + obj.offset = obj.handle << 12; + gem_execbuf(i915, &execbuf); + gem_sync(i915, obj.handle); + + delay(i915, e, obj.handle, obj.offset, target_ns); + + obj.flags |= EXEC_OBJECT_PINNED; + return obj; +} + +static void tslog(int i915, + const struct intel_execution_engine2 *e, + uint32_t handle, + uint64_t addr) +{ + const int use_64b = intel_gen(intel_get_drm_devid(i915)) >= 8; + const uint32_t base = gem_engine_mmio_base(i915, e->name); +#define CS_GPR(x) (base + 0x600 + 8 * (x)) +#define CS_TIMESTAMP (base + 0x358) + enum { INC, MASK, ADDR }; + uint32_t *timestamp_lo, *addr_lo; + uint32_t *map, *cs; + + igt_require(base); + + map = gem_mmap__device_coherent(i915, handle, 0, 4096, PROT_WRITE); + cs = map + 512; + + /* Record the current CS_TIMESTAMP into a journal [a 512 slot ring]. */ + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = CS_TIMESTAMP; + timestamp_lo = cs; + *cs++ = addr; + *cs++ = addr >> 32; + + /* Load the address + inc & mask variables */ + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(ADDR); + addr_lo = cs; + *cs++ = addr; + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(ADDR) + 4; + *cs++ = addr >> 32; + + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(INC); + *cs++ = 4; + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(INC) + 4; + *cs++ = 0; + + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(MASK); + *cs++ = 0xfffff7ff; + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(MASK) + 4; + *cs++ = 0xffffffff; + + /* Increment the [ring] address for saving CS_TIMESTAMP */ + *cs++ = MI_MATH(8); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(INC)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(ADDR)); + *cs++ = MI_MATH_ADD; + *cs++ = MI_MATH_STORE(MI_MATH_REG(ADDR), MI_MATH_REG_ACCU); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(ADDR)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(MASK)); + *cs++ = MI_MATH_AND; + *cs++ = MI_MATH_STORE(MI_MATH_REG(ADDR), MI_MATH_REG_ACCU); + + /* Rewrite the batch buffer for the next execution */ + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = CS_GPR(ADDR); + *cs++ = addr + offset_in_page(timestamp_lo); + *cs++ = addr >> 32; + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = CS_GPR(ADDR); + *cs++ = addr + offset_in_page(addr_lo); + *cs++ = addr >> 32; + + *cs++ = MI_BATCH_BUFFER_END; + + munmap(map, 4096); +} + +static struct drm_i915_gem_exec_object2 +tslog_create(int i915, uint32_t ctx, const struct intel_execution_engine2 *e) +{ + struct drm_i915_gem_exec_object2 obj = { + .handle = batch_create(i915), + .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS, + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + .rsvd1 = ctx, + .flags = e->flags, + }; + + obj.offset = obj.handle << 12; + gem_execbuf(i915, &execbuf); + gem_sync(i915, obj.handle); + + tslog(i915, e, obj.handle, obj.offset); + + obj.flags |= EXEC_OBJECT_PINNED; + return obj; +} + +static int cmp_u32(const void *A, const void *B) +{ + const uint32_t *a = A, *b = B; + + if (*a < *b) + return -1; + else if (*a > *b) + return 1; + else + return 0; +} + +static struct intel_execution_engine2 +pick_random_engine(int i915, const struct intel_execution_engine2 *not) +{ + const struct intel_execution_engine2 *e; + unsigned int count = 0; + + __for_each_physical_engine(i915, e) { + if (e->flags == not->flags) + continue; + if (!gem_class_has_mutable_submission(i915, e->class)) + continue; + count++; + } + if (!count) + return *not; + + count = rand() % count; + __for_each_physical_engine(i915, e) { + if (e->flags == not->flags) + continue; + if (!gem_class_has_mutable_submission(i915, e->class)) + continue; + if (!count--) + break; + } + + return *e; +} + +static void fair_child(int i915, uint32_t ctx, + const struct intel_execution_engine2 *e, + uint64_t frame_ns, + int timeline, + uint32_t common, + unsigned int flags, + unsigned long *ctl, + unsigned long *out) +#define F_SYNC (1 << 0) +#define F_PACE (1 << 1) +#define F_FLOW (1 << 2) +#define F_HALF (1 << 3) +#define F_SOLO (1 << 4) +#define F_SPARE (1 << 5) +#define F_NEXT (1 << 6) +#define F_VIP (1 << 7) +#define F_RRUL (1 << 8) +#define F_SHARE (1 << 9) +#define F_PING (1 << 10) +#define F_THROTTLE (1 << 11) +#define F_ISOLATE (1 << 12) +{ + const int batches_per_frame = flags & F_SOLO ? 1 : 3; + struct drm_i915_gem_exec_object2 obj[4] = { + {}, + { + .handle = common ?: gem_create(i915, 4096), + }, + delay_create(i915, ctx, e, frame_ns / batches_per_frame), + delay_create(i915, ctx, e, frame_ns / batches_per_frame), + }; + struct intel_execution_engine2 ping = *e; + int p_fence = -1, n_fence = -1; + unsigned long count = 0; + int n; + + srandom(getpid()); + if (flags & F_PING) + ping = pick_random_engine(i915, e); + obj[0] = tslog_create(i915, ctx, &ping); + + while (!READ_ONCE(*ctl)) { + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(obj), + .buffer_count = 4, + .rsvd1 = ctx, + .rsvd2 = -1, + .flags = e->flags, + }; + + if (flags & F_FLOW) { + unsigned int seq; + + seq = count; + if (flags & F_NEXT) + seq++; + + execbuf.rsvd2 = + sw_sync_timeline_create_fence(timeline, seq); + execbuf.flags |= I915_EXEC_FENCE_IN; + } + + execbuf.flags |= I915_EXEC_FENCE_OUT; + gem_execbuf_wr(i915, &execbuf); + n_fence = execbuf.rsvd2 >> 32; + execbuf.flags &= ~(I915_EXEC_FENCE_OUT | I915_EXEC_FENCE_IN); + for (n = 1; n < batches_per_frame; n++) + gem_execbuf(i915, &execbuf); + close(execbuf.rsvd2); + + execbuf.buffer_count = 1; + execbuf.batch_start_offset = 2048; + execbuf.flags = ping.flags | I915_EXEC_FENCE_IN; + execbuf.rsvd2 = n_fence; + gem_execbuf(i915, &execbuf); + + if (flags & F_PACE && p_fence != -1) { + struct pollfd pfd = { + .fd = p_fence, + .events = POLLIN, + }; + poll(&pfd, 1, -1); + } + close(p_fence); + + if (flags & F_SYNC) { + struct pollfd pfd = { + .fd = n_fence, + .events = POLLIN, + }; + poll(&pfd, 1, -1); + } + + if (flags & F_THROTTLE) + igt_ioctl(i915, DRM_IOCTL_I915_GEM_THROTTLE, 0); + + igt_swap(obj[2], obj[3]); + igt_swap(p_fence, n_fence); + count++; + } + close(p_fence); + + gem_close(i915, obj[3].handle); + gem_close(i915, obj[2].handle); + if (obj[1].handle != common) + gem_close(i915, obj[1].handle); + + gem_sync(i915, obj[0].handle); + if (out) { + uint32_t *map; + + map = gem_mmap__device_coherent(i915, obj[0].handle, + 0, 4096, PROT_WRITE); + for (n = 1; n < min(count, 512); n++) { + igt_assert(map[n]); + map[n - 1] = map[n] - map[n - 1]; + } + qsort(map, --n, sizeof(*map), cmp_u32); + *out = ticks_to_ns(i915, map[n / 2]); + munmap(map, 4096); + } + gem_close(i915, obj[0].handle); +} + +static int cmp_ul(const void *A, const void *B) +{ + const unsigned long *a = A, *b = B; + + if (*a < *b) + return -1; + else if (*a > *b) + return 1; + else + return 0; +} + +static uint64_t d_cpu_time(const struct rusage *a, const struct rusage *b) +{ + uint64_t cpu_time = 0; + + cpu_time += (a->ru_utime.tv_sec - b->ru_utime.tv_sec) * NSEC_PER_SEC; + cpu_time += (a->ru_utime.tv_usec - b->ru_utime.tv_usec) * 1000; + + cpu_time += (a->ru_stime.tv_sec - b->ru_stime.tv_sec) * NSEC_PER_SEC; + cpu_time += (a->ru_stime.tv_usec - b->ru_stime.tv_usec) * 1000; + + return cpu_time; +} + +static void timeline_advance(int timeline, int delay_ns) +{ + struct timespec tv = { .tv_nsec = delay_ns }; + nanosleep(&tv, NULL); + sw_sync_timeline_inc(timeline, 1); +} + +static void fairness(int i915, + const struct intel_execution_engine2 *e, + int timeout, unsigned int flags) +{ + const int frame_ns = 16666 * 1000; + const int fence_ns = flags & F_HALF ? 2 * frame_ns : frame_ns; + unsigned long *result; + uint32_t common = 0; + + igt_require(gem_class_has_mutable_submission(i915, e->class)); + + if (flags & F_SHARE) + common = gem_create(i915, 4095); + + result = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0); + + for (int n = 2; n <= 64; n <<= 1) { /* 32 == 500us per client */ + int timeline = sw_sync_timeline_create(); + int nfences = timeout * NSEC_PER_SEC / fence_ns + 1; + const int nchild = n - 1; /* odd for easy medians */ + const int child_ns = frame_ns / (nchild + !!(flags & F_SPARE)); + const int lo = nchild / 4; + const int hi = (3 * nchild + 3) / 4 - 1; + struct rusage old_usage, usage; + uint64_t cpu_time, d_time; + unsigned long vip = -1; + struct timespec tv; + struct igt_mean m; + + if (flags & F_PING) { + struct intel_execution_engine2 *ping; + + __for_each_physical_engine(i915, ping) { + if (ping->flags == e->flags) + continue; + + igt_fork(child, 1) { + uint32_t ctx = gem_context_clone_with_engines(i915, 0); + + fair_child(i915, ctx, ping, + child_ns / 8, + -1, common, + F_SOLO | F_PACE | F_SHARE, + &result[nchild], + NULL); + + gem_context_destroy(i915, ctx); + } + } + } + + memset(result, 0, (nchild + 1) * sizeof(result[0])); + getrusage(RUSAGE_CHILDREN, &old_usage); + igt_nsec_elapsed(memset(&tv, 0, sizeof(tv))); + igt_fork(child, nchild) { + uint32_t ctx; + + if (flags & F_ISOLATE) { + int clone, dmabuf = -1; + + if (common) + dmabuf = prime_handle_to_fd(i915, common); + + clone = gem_reopen_driver(i915); + gem_context_copy_engines(i915, 0, clone, 0); + i915 = clone; + + if (dmabuf != -1) + common = prime_fd_to_handle(i915, dmabuf); + } + + ctx = gem_context_clone_with_engines(i915, 0); + + if (flags & F_VIP && child == 0) { + gem_context_set_priority(i915, ctx, MAX_PRIO); + flags |= F_FLOW; + } + if (flags & F_RRUL && child == 0) + flags |= F_SOLO | F_FLOW | F_SYNC; + + fair_child(i915, ctx, e, child_ns, + timeline, common, flags, + &result[nchild], + &result[child]); + + gem_context_destroy(i915, ctx); + } + + while (nfences--) + timeline_advance(timeline, fence_ns); + + result[nchild] = 1; + for (int child = 0; child < nchild; child++) { + while (!READ_ONCE(result[child])) + timeline_advance(timeline, fence_ns); + } + + igt_waitchildren(); + close(timeline); + + /* Are we running out of CPU time, and fail to submit frames? */ + d_time = igt_nsec_elapsed(&tv); + getrusage(RUSAGE_CHILDREN, &usage); + cpu_time = d_cpu_time(&usage, &old_usage); + if (10 * cpu_time > 9 * d_time) { + if (nchild > 7) + break; + + igt_skip_on_f(10 * cpu_time > 9 * d_time, + "%.0f%% CPU usage, presuming capacity exceeded\n", + 100. * cpu_time / d_time); + } + + igt_mean_init(&m); + for (int child = 0; child < nchild; child++) + igt_mean_add(&m, result[child]); + + if (flags & (F_VIP | F_RRUL)) + vip = result[0]; + + qsort(result, nchild, sizeof(*result), cmp_ul); + igt_info("%2d clients, range: [%.1f, %.1f], iqr: [%.1f, %.1f], median: %.1f, mean: %.1f ± %.2f ms\n", + nchild, + 1e-6 * result[0], 1e-6 * result[nchild - 1], + 1e-6 * result[lo], 1e-6 * result[hi], + 1e-6 * result[nchild / 2], + 1e-6 * igt_mean_get(&m), + 1e-6 * sqrt(igt_mean_get_variance(&m))); + + if (vip != -1) { + igt_info("VIP interval %.2f ms\n", 1e-6 * vip); + igt_assert(4 * vip > 3 * fence_ns && + 3 * vip < 4 * fence_ns); + } + + /* May be slowed due to sheer volume of context switches */ + igt_assert(4 * igt_mean_get(&m) > 3 * fence_ns && + igt_mean_get(&m) < 3 * fence_ns); + + igt_assert(4 * igt_mean_get(&m) > 3 * result[nchild / 2] && + 3 * igt_mean_get(&m) < 4 * result[nchild / 2]); + + igt_assert(2 * (result[hi] - result[lo]) < result[nchild / 2]); + } + + munmap(result, 4096); + if (common) + gem_close(i915, common); +} + +static void test_fairness(int i915, int timeout) +{ + static const struct { + const char *name; + unsigned int flags; + } fair[] = { + /* + * none - maximal greed in each client + * + * Push as many frames from each client as fast as possible + */ + { "none", 0 }, + { "none-vip", F_VIP }, /* one vip client must meet deadlines */ + { "none-solo", F_SOLO }, /* 1 batch per frame per client */ + { "none-share", F_SHARE }, /* read from a common buffer */ + { "none-rrul", F_RRUL }, /* "realtime-response under load" */ + { "none-ping", F_PING }, /* measure inter-engine fairness */ + + /* + * throttle - original per client throttling + * + * Used for front buffering rendering where there is no + * extenal frame marker. Each client tries to only keep + * 20ms of work submitted, though that measurement is + * flawed... + * + * This is used by Xory to try and maintain some resembalance + * of input/output consistency when being feed a continuous + * stream of X11 draw requests straight into scanout, where + * the clients may submit the work faster than can be drawn. + * + * Throttling tracks requests per-file (and assumes that + * all requests are in submission order across the whole file), + * so we split each child to its own fd. + */ + { "throttle", F_THROTTLE | F_ISOLATE }, + { "throttle-vip", F_THROTTLE | F_ISOLATE | F_VIP }, + { "throttle-solo", F_THROTTLE | F_ISOLATE | F_SOLO }, + { "throttle-share", F_THROTTLE | F_ISOLATE | F_SHARE }, + { "throttle-rrul", F_THROTTLE | F_ISOLATE | F_RRUL }, + + /* + * pace - mesa "submit double buffering" + * + * Submit a frame, wait for previous frame to start. This + * prevents each client from getting too far ahead of its + * rendering, maintaining a consistent input/output latency. + */ + { "pace", F_PACE }, + { "pace-solo", F_PACE | F_SOLO}, + { "pace-share", F_PACE | F_SHARE}, + { "pace-ping", F_PACE | F_SHARE | F_PING}, + + /* sync - only submit a frame at a time */ + { "sync", F_SYNC }, + { "sync-vip", F_SYNC | F_VIP }, + { "sync-solo", F_SYNC | F_SOLO }, + + /* flow - synchronise execution against the clock (vblank) */ + { "flow", F_PACE | F_FLOW }, + { "flow-share", F_PACE | F_FLOW | F_SHARE }, + { "flow-ping", F_PACE | F_FLOW | F_SHARE | F_PING }, + + /* next - submit ahead of the clock (vblank double buffering) */ + { "next", F_PACE | F_FLOW | F_NEXT }, + { "next-share", F_PACE | F_FLOW | F_NEXT | F_SHARE }, + { "next-ping", F_PACE | F_FLOW | F_NEXT | F_SHARE | F_PING }, + + /* spare - underutilise by a single client timeslice */ + { "spare", F_PACE | F_FLOW | F_SPARE }, + + /* half - run at half pace (submit 16ms of work every 32ms) */ + { "half", F_PACE | F_FLOW | F_HALF }, + + {} + }; + + for (typeof(*fair) *f = fair; f->name; f++) { + igt_subtest_with_dynamic_f("fair-%s", f->name) { + const struct intel_execution_engine2 *e; + + igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8); + + __for_each_physical_engine(i915, e) { + if (!gem_class_can_store_dword(i915, e->class)) + continue; + + igt_dynamic_f("%s", e->name) + fairness(i915, e, timeout, f->flags); + } + } + } +} + +static uint32_t read_ctx_timestamp(int i915, + uint32_t ctx, + const struct intel_execution_engine2 *e) +{ + const int use_64b = intel_gen(intel_get_drm_devid(i915)) >= 8; + const uint32_t base = gem_engine_mmio_base(i915, e->name); + struct drm_i915_gem_relocation_entry reloc; + struct drm_i915_gem_exec_object2 obj = { + .handle = gem_create(i915, 4096), + .offset = 32 << 20, + .relocs_ptr = to_user_pointer(&reloc), + .relocation_count = 1, + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + .flags = e->flags, + .rsvd1 = ctx, + }; +#define RUNTIME (base + 0x3a8) + uint32_t *map, *cs; + uint32_t ts; + + igt_require(base); + + cs = map = gem_mmap__device_coherent(i915, obj.handle, + 0, 4096, PROT_WRITE); + + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = RUNTIME; + memset(&reloc, 0, sizeof(reloc)); + reloc.target_handle = obj.handle; + reloc.presumed_offset = obj.offset; + reloc.offset = offset_in_page(cs); + reloc.delta = 4000; + *cs++ = obj.offset + 4000; + *cs++ = obj.offset >> 32; + + *cs++ = MI_BATCH_BUFFER_END; + + gem_execbuf(i915, &execbuf); + gem_sync(i915, obj.handle); + gem_close(i915, obj.handle); + + ts = map[1000]; + munmap(map, 4096); + + return ts; +} + +static void fairslice(int i915, const struct intel_execution_engine2 *e) +{ + igt_spin_t *spin[3]; + uint32_t ctx[3]; + uint32_t ts[3]; + + for (int i = 0; i < ARRAY_SIZE(ctx); i++) { + ctx[i] = gem_context_clone_with_engines(i915, 0); + spin[i] = igt_spin_new(i915, .ctx = ctx[i], .engine = e->flags); + } + + sleep(2); /* over the course of many timeslices */ + + for (int i = 0; i < ARRAY_SIZE(ctx); i++) { + igt_assert(gem_bo_busy(i915, spin[i]->handle)); + igt_spin_end(spin[i]); + + ts[i] = read_ctx_timestamp(i915, ctx[i], e); + } + + for (int i = 0; i < ARRAY_SIZE(ctx); i++) { + igt_spin_free(i915, spin[i]); + gem_context_destroy(i915, ctx[i]); + } + + qsort(ts, 3, sizeof(*ts), cmp_u32); + igt_info("%s: [%.1f, %.1f] ms\n", e->name, + 1e-6 * ticks_to_ns(i915, ts[0]), + 1e-6 * ticks_to_ns(i915, ts[2])); + + igt_assert(ts[0] && ts[2] > ts[0]); + igt_assert(4 * ts[0] > 3 * ts[2]); +} + #define test_each_engine(T, i915, e) \ igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \ igt_dynamic_f("%s", e->name) @@ -2571,6 +3355,25 @@ igt_main test_each_engine("lateslice", fd, e) lateslice(fd, e->flags); + igt_subtest_group { + igt_fixture { + igt_require(gem_scheduler_has_semaphores(fd)); + igt_require(gem_scheduler_has_preemption(fd)); + igt_require(intel_gen(intel_get_drm_devid(fd)) >= 8); + } + + test_each_engine("fairslice", fd, e) + fairslice(fd, e); + + igt_subtest("fairslice-all") { + __for_each_physical_engine(fd, e) { + igt_fork(child, 1) + fairslice(fd, e); + } + igt_waitchildren(); + } + } + test_each_engine("submit-early-slice", fd, e) submit_slice(fd, e, EARLY_SUBMIT); test_each_engine("submit-golden-slice", fd, e) @@ -2599,6 +3402,8 @@ igt_main test_each_engine_store("promotion", fd, e) promotion(fd, e->flags); + test_fairness(fd, 2); + igt_subtest_group { igt_fixture { igt_require(gem_scheduler_has_preemption(fd)); -- 2.27.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [igt-dev] [PATCH i-g-t 5/6] i915/gem_exec_schedule: Try to spot unfairness @ 2020-07-10 9:32 ` Chris Wilson 0 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Tvrtko Ursulin, Chris Wilson An important property for multi-client systems is that each client gets a 'fair' allotment of system time. (Where fairness is at the whim of the context properties, such as priorities.) This test forks N independent clients (albeit they happen to share a single vm), and does an equal amount of work in client and asserts that they take an equal amount of time. Though we have never claimed to have a completely fair scheduler, that is what is expected. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Ramalingam C <ramalingam.c@intel.com> --- tests/i915/gem_exec_schedule.c | 805 +++++++++++++++++++++++++++++++++ 1 file changed, 805 insertions(+) diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c index 0e0362502..1b3cbfd74 100644 --- a/tests/i915/gem_exec_schedule.c +++ b/tests/i915/gem_exec_schedule.c @@ -29,6 +29,7 @@ #include <sys/poll.h> #include <sys/ioctl.h> #include <sys/mman.h> +#include <sys/resource.h> #include <sys/syscall.h> #include <sched.h> #include <signal.h> @@ -2505,6 +2506,789 @@ static void measure_semaphore_power(int i915) rapl_close(&pkg); } +static int read_timestamp_frequency(int i915) +{ + int value = 0; + drm_i915_getparam_t gp = { + .value = &value, + .param = I915_PARAM_CS_TIMESTAMP_FREQUENCY, + }; + ioctl(i915, DRM_IOCTL_I915_GETPARAM, &gp); + return value; +} + +static uint64_t div64_u64_round_up(uint64_t x, uint64_t y) +{ + return (x + y - 1) / y; +} + +static uint64_t ns_to_ticks(int i915, uint64_t ns) +{ + return div64_u64_round_up(ns * read_timestamp_frequency(i915), + NSEC_PER_SEC); +} + +static uint64_t ticks_to_ns(int i915, uint64_t ticks) +{ + return div64_u64_round_up(ticks * NSEC_PER_SEC, + read_timestamp_frequency(i915)); +} + +#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) + +#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1) +#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2)) +/* Opcodes for MI_MATH_INSTR */ +#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0) +#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2) +#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2) +#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1) +#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1) +#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0) +#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0) +#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0) +#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0) +#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0) +#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2) +#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2) +/* Registers used as operands in MI_MATH_INSTR */ +#define MI_MATH_REG(x) (x) +#define MI_MATH_REG_SRCA 0x20 +#define MI_MATH_REG_SRCB 0x21 +#define MI_MATH_REG_ACCU 0x31 +#define MI_MATH_REG_ZF 0x32 +#define MI_MATH_REG_CF 0x33 + +#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1) + +static void delay(int i915, + const struct intel_execution_engine2 *e, + uint32_t handle, + uint64_t addr, + uint64_t ns) +{ + const int use_64b = intel_gen(intel_get_drm_devid(i915)) >= 8; + const uint32_t base = gem_engine_mmio_base(i915, e->name); +#define CS_GPR(x) (base + 0x600 + 8 * (x)) +#define RUNTIME (base + 0x3a8) + enum { START_TS, NOW_TS }; + uint32_t *map, *cs, *jmp; + + igt_require(base); + + /* Loop until CTX_TIMESTAMP - initial > @ns */ + + cs = map = gem_mmap__device_coherent(i915, handle, 0, 4096, PROT_WRITE); + + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(START_TS) + 4; + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_REG; + *cs++ = RUNTIME; + *cs++ = CS_GPR(START_TS); + + while (offset_in_page(cs) & 63) + *cs++ = 0; + jmp = cs; + + *cs++ = 0x5 << 23; /* MI_ARB_CHECK */ + + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(NOW_TS) + 4; + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_REG; + *cs++ = RUNTIME; + *cs++ = CS_GPR(NOW_TS); + + /* delta = now - start; inverted to match COND_BBE */ + *cs++ = MI_MATH(4); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS)); + *cs++ = MI_MATH_SUB; + *cs++ = MI_MATH_STOREINV(MI_MATH_REG(NOW_TS), MI_MATH_REG_ACCU); + + /* Save delta for reading by COND_BBE */ + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = CS_GPR(NOW_TS); + *cs++ = addr + 4000; + *cs++ = addr >> 32; + + /* Delay between SRM and COND_BBE to post the writes */ + for (int n = 0; n < 8; n++) { + *cs++ = MI_STORE_DWORD_IMM; + if (use_64b) { + *cs++ = addr + 4064; + *cs++ = addr >> 32; + } else { + *cs++ = 0; + *cs++ = addr + 4064; + } + *cs++ = 0; + } + + /* Break if delta > ns */ + *cs++ = MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE | (1 + use_64b); + *cs++ = ~ns_to_ticks(i915, ns); + *cs++ = addr + 4000; + *cs++ = addr >> 32; + + /* Otherwise back to recalculating delta */ + *cs++ = MI_BATCH_BUFFER_START | 1 << 8 | use_64b; + *cs++ = addr + offset_in_page(jmp); + *cs++ = addr >> 32; + + munmap(map, 4096); +} + +static struct drm_i915_gem_exec_object2 +delay_create(int i915, uint32_t ctx, + const struct intel_execution_engine2 *e, + uint64_t target_ns) +{ + struct drm_i915_gem_exec_object2 obj = { + .handle = batch_create(i915), + .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS, + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + .rsvd1 = ctx, + .flags = e->flags, + }; + + obj.offset = obj.handle << 12; + gem_execbuf(i915, &execbuf); + gem_sync(i915, obj.handle); + + delay(i915, e, obj.handle, obj.offset, target_ns); + + obj.flags |= EXEC_OBJECT_PINNED; + return obj; +} + +static void tslog(int i915, + const struct intel_execution_engine2 *e, + uint32_t handle, + uint64_t addr) +{ + const int use_64b = intel_gen(intel_get_drm_devid(i915)) >= 8; + const uint32_t base = gem_engine_mmio_base(i915, e->name); +#define CS_GPR(x) (base + 0x600 + 8 * (x)) +#define CS_TIMESTAMP (base + 0x358) + enum { INC, MASK, ADDR }; + uint32_t *timestamp_lo, *addr_lo; + uint32_t *map, *cs; + + igt_require(base); + + map = gem_mmap__device_coherent(i915, handle, 0, 4096, PROT_WRITE); + cs = map + 512; + + /* Record the current CS_TIMESTAMP into a journal [a 512 slot ring]. */ + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = CS_TIMESTAMP; + timestamp_lo = cs; + *cs++ = addr; + *cs++ = addr >> 32; + + /* Load the address + inc & mask variables */ + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(ADDR); + addr_lo = cs; + *cs++ = addr; + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(ADDR) + 4; + *cs++ = addr >> 32; + + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(INC); + *cs++ = 4; + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(INC) + 4; + *cs++ = 0; + + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(MASK); + *cs++ = 0xfffff7ff; + *cs++ = MI_LOAD_REGISTER_IMM; + *cs++ = CS_GPR(MASK) + 4; + *cs++ = 0xffffffff; + + /* Increment the [ring] address for saving CS_TIMESTAMP */ + *cs++ = MI_MATH(8); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(INC)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(ADDR)); + *cs++ = MI_MATH_ADD; + *cs++ = MI_MATH_STORE(MI_MATH_REG(ADDR), MI_MATH_REG_ACCU); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(ADDR)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(MASK)); + *cs++ = MI_MATH_AND; + *cs++ = MI_MATH_STORE(MI_MATH_REG(ADDR), MI_MATH_REG_ACCU); + + /* Rewrite the batch buffer for the next execution */ + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = CS_GPR(ADDR); + *cs++ = addr + offset_in_page(timestamp_lo); + *cs++ = addr >> 32; + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = CS_GPR(ADDR); + *cs++ = addr + offset_in_page(addr_lo); + *cs++ = addr >> 32; + + *cs++ = MI_BATCH_BUFFER_END; + + munmap(map, 4096); +} + +static struct drm_i915_gem_exec_object2 +tslog_create(int i915, uint32_t ctx, const struct intel_execution_engine2 *e) +{ + struct drm_i915_gem_exec_object2 obj = { + .handle = batch_create(i915), + .flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS, + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + .rsvd1 = ctx, + .flags = e->flags, + }; + + obj.offset = obj.handle << 12; + gem_execbuf(i915, &execbuf); + gem_sync(i915, obj.handle); + + tslog(i915, e, obj.handle, obj.offset); + + obj.flags |= EXEC_OBJECT_PINNED; + return obj; +} + +static int cmp_u32(const void *A, const void *B) +{ + const uint32_t *a = A, *b = B; + + if (*a < *b) + return -1; + else if (*a > *b) + return 1; + else + return 0; +} + +static struct intel_execution_engine2 +pick_random_engine(int i915, const struct intel_execution_engine2 *not) +{ + const struct intel_execution_engine2 *e; + unsigned int count = 0; + + __for_each_physical_engine(i915, e) { + if (e->flags == not->flags) + continue; + if (!gem_class_has_mutable_submission(i915, e->class)) + continue; + count++; + } + if (!count) + return *not; + + count = rand() % count; + __for_each_physical_engine(i915, e) { + if (e->flags == not->flags) + continue; + if (!gem_class_has_mutable_submission(i915, e->class)) + continue; + if (!count--) + break; + } + + return *e; +} + +static void fair_child(int i915, uint32_t ctx, + const struct intel_execution_engine2 *e, + uint64_t frame_ns, + int timeline, + uint32_t common, + unsigned int flags, + unsigned long *ctl, + unsigned long *out) +#define F_SYNC (1 << 0) +#define F_PACE (1 << 1) +#define F_FLOW (1 << 2) +#define F_HALF (1 << 3) +#define F_SOLO (1 << 4) +#define F_SPARE (1 << 5) +#define F_NEXT (1 << 6) +#define F_VIP (1 << 7) +#define F_RRUL (1 << 8) +#define F_SHARE (1 << 9) +#define F_PING (1 << 10) +#define F_THROTTLE (1 << 11) +#define F_ISOLATE (1 << 12) +{ + const int batches_per_frame = flags & F_SOLO ? 1 : 3; + struct drm_i915_gem_exec_object2 obj[4] = { + {}, + { + .handle = common ?: gem_create(i915, 4096), + }, + delay_create(i915, ctx, e, frame_ns / batches_per_frame), + delay_create(i915, ctx, e, frame_ns / batches_per_frame), + }; + struct intel_execution_engine2 ping = *e; + int p_fence = -1, n_fence = -1; + unsigned long count = 0; + int n; + + srandom(getpid()); + if (flags & F_PING) + ping = pick_random_engine(i915, e); + obj[0] = tslog_create(i915, ctx, &ping); + + while (!READ_ONCE(*ctl)) { + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(obj), + .buffer_count = 4, + .rsvd1 = ctx, + .rsvd2 = -1, + .flags = e->flags, + }; + + if (flags & F_FLOW) { + unsigned int seq; + + seq = count; + if (flags & F_NEXT) + seq++; + + execbuf.rsvd2 = + sw_sync_timeline_create_fence(timeline, seq); + execbuf.flags |= I915_EXEC_FENCE_IN; + } + + execbuf.flags |= I915_EXEC_FENCE_OUT; + gem_execbuf_wr(i915, &execbuf); + n_fence = execbuf.rsvd2 >> 32; + execbuf.flags &= ~(I915_EXEC_FENCE_OUT | I915_EXEC_FENCE_IN); + for (n = 1; n < batches_per_frame; n++) + gem_execbuf(i915, &execbuf); + close(execbuf.rsvd2); + + execbuf.buffer_count = 1; + execbuf.batch_start_offset = 2048; + execbuf.flags = ping.flags | I915_EXEC_FENCE_IN; + execbuf.rsvd2 = n_fence; + gem_execbuf(i915, &execbuf); + + if (flags & F_PACE && p_fence != -1) { + struct pollfd pfd = { + .fd = p_fence, + .events = POLLIN, + }; + poll(&pfd, 1, -1); + } + close(p_fence); + + if (flags & F_SYNC) { + struct pollfd pfd = { + .fd = n_fence, + .events = POLLIN, + }; + poll(&pfd, 1, -1); + } + + if (flags & F_THROTTLE) + igt_ioctl(i915, DRM_IOCTL_I915_GEM_THROTTLE, 0); + + igt_swap(obj[2], obj[3]); + igt_swap(p_fence, n_fence); + count++; + } + close(p_fence); + + gem_close(i915, obj[3].handle); + gem_close(i915, obj[2].handle); + if (obj[1].handle != common) + gem_close(i915, obj[1].handle); + + gem_sync(i915, obj[0].handle); + if (out) { + uint32_t *map; + + map = gem_mmap__device_coherent(i915, obj[0].handle, + 0, 4096, PROT_WRITE); + for (n = 1; n < min(count, 512); n++) { + igt_assert(map[n]); + map[n - 1] = map[n] - map[n - 1]; + } + qsort(map, --n, sizeof(*map), cmp_u32); + *out = ticks_to_ns(i915, map[n / 2]); + munmap(map, 4096); + } + gem_close(i915, obj[0].handle); +} + +static int cmp_ul(const void *A, const void *B) +{ + const unsigned long *a = A, *b = B; + + if (*a < *b) + return -1; + else if (*a > *b) + return 1; + else + return 0; +} + +static uint64_t d_cpu_time(const struct rusage *a, const struct rusage *b) +{ + uint64_t cpu_time = 0; + + cpu_time += (a->ru_utime.tv_sec - b->ru_utime.tv_sec) * NSEC_PER_SEC; + cpu_time += (a->ru_utime.tv_usec - b->ru_utime.tv_usec) * 1000; + + cpu_time += (a->ru_stime.tv_sec - b->ru_stime.tv_sec) * NSEC_PER_SEC; + cpu_time += (a->ru_stime.tv_usec - b->ru_stime.tv_usec) * 1000; + + return cpu_time; +} + +static void timeline_advance(int timeline, int delay_ns) +{ + struct timespec tv = { .tv_nsec = delay_ns }; + nanosleep(&tv, NULL); + sw_sync_timeline_inc(timeline, 1); +} + +static void fairness(int i915, + const struct intel_execution_engine2 *e, + int timeout, unsigned int flags) +{ + const int frame_ns = 16666 * 1000; + const int fence_ns = flags & F_HALF ? 2 * frame_ns : frame_ns; + unsigned long *result; + uint32_t common = 0; + + igt_require(gem_class_has_mutable_submission(i915, e->class)); + + if (flags & F_SHARE) + common = gem_create(i915, 4095); + + result = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0); + + for (int n = 2; n <= 64; n <<= 1) { /* 32 == 500us per client */ + int timeline = sw_sync_timeline_create(); + int nfences = timeout * NSEC_PER_SEC / fence_ns + 1; + const int nchild = n - 1; /* odd for easy medians */ + const int child_ns = frame_ns / (nchild + !!(flags & F_SPARE)); + const int lo = nchild / 4; + const int hi = (3 * nchild + 3) / 4 - 1; + struct rusage old_usage, usage; + uint64_t cpu_time, d_time; + unsigned long vip = -1; + struct timespec tv; + struct igt_mean m; + + if (flags & F_PING) { + struct intel_execution_engine2 *ping; + + __for_each_physical_engine(i915, ping) { + if (ping->flags == e->flags) + continue; + + igt_fork(child, 1) { + uint32_t ctx = gem_context_clone_with_engines(i915, 0); + + fair_child(i915, ctx, ping, + child_ns / 8, + -1, common, + F_SOLO | F_PACE | F_SHARE, + &result[nchild], + NULL); + + gem_context_destroy(i915, ctx); + } + } + } + + memset(result, 0, (nchild + 1) * sizeof(result[0])); + getrusage(RUSAGE_CHILDREN, &old_usage); + igt_nsec_elapsed(memset(&tv, 0, sizeof(tv))); + igt_fork(child, nchild) { + uint32_t ctx; + + if (flags & F_ISOLATE) { + int clone, dmabuf = -1; + + if (common) + dmabuf = prime_handle_to_fd(i915, common); + + clone = gem_reopen_driver(i915); + gem_context_copy_engines(i915, 0, clone, 0); + i915 = clone; + + if (dmabuf != -1) + common = prime_fd_to_handle(i915, dmabuf); + } + + ctx = gem_context_clone_with_engines(i915, 0); + + if (flags & F_VIP && child == 0) { + gem_context_set_priority(i915, ctx, MAX_PRIO); + flags |= F_FLOW; + } + if (flags & F_RRUL && child == 0) + flags |= F_SOLO | F_FLOW | F_SYNC; + + fair_child(i915, ctx, e, child_ns, + timeline, common, flags, + &result[nchild], + &result[child]); + + gem_context_destroy(i915, ctx); + } + + while (nfences--) + timeline_advance(timeline, fence_ns); + + result[nchild] = 1; + for (int child = 0; child < nchild; child++) { + while (!READ_ONCE(result[child])) + timeline_advance(timeline, fence_ns); + } + + igt_waitchildren(); + close(timeline); + + /* Are we running out of CPU time, and fail to submit frames? */ + d_time = igt_nsec_elapsed(&tv); + getrusage(RUSAGE_CHILDREN, &usage); + cpu_time = d_cpu_time(&usage, &old_usage); + if (10 * cpu_time > 9 * d_time) { + if (nchild > 7) + break; + + igt_skip_on_f(10 * cpu_time > 9 * d_time, + "%.0f%% CPU usage, presuming capacity exceeded\n", + 100. * cpu_time / d_time); + } + + igt_mean_init(&m); + for (int child = 0; child < nchild; child++) + igt_mean_add(&m, result[child]); + + if (flags & (F_VIP | F_RRUL)) + vip = result[0]; + + qsort(result, nchild, sizeof(*result), cmp_ul); + igt_info("%2d clients, range: [%.1f, %.1f], iqr: [%.1f, %.1f], median: %.1f, mean: %.1f ± %.2f ms\n", + nchild, + 1e-6 * result[0], 1e-6 * result[nchild - 1], + 1e-6 * result[lo], 1e-6 * result[hi], + 1e-6 * result[nchild / 2], + 1e-6 * igt_mean_get(&m), + 1e-6 * sqrt(igt_mean_get_variance(&m))); + + if (vip != -1) { + igt_info("VIP interval %.2f ms\n", 1e-6 * vip); + igt_assert(4 * vip > 3 * fence_ns && + 3 * vip < 4 * fence_ns); + } + + /* May be slowed due to sheer volume of context switches */ + igt_assert(4 * igt_mean_get(&m) > 3 * fence_ns && + igt_mean_get(&m) < 3 * fence_ns); + + igt_assert(4 * igt_mean_get(&m) > 3 * result[nchild / 2] && + 3 * igt_mean_get(&m) < 4 * result[nchild / 2]); + + igt_assert(2 * (result[hi] - result[lo]) < result[nchild / 2]); + } + + munmap(result, 4096); + if (common) + gem_close(i915, common); +} + +static void test_fairness(int i915, int timeout) +{ + static const struct { + const char *name; + unsigned int flags; + } fair[] = { + /* + * none - maximal greed in each client + * + * Push as many frames from each client as fast as possible + */ + { "none", 0 }, + { "none-vip", F_VIP }, /* one vip client must meet deadlines */ + { "none-solo", F_SOLO }, /* 1 batch per frame per client */ + { "none-share", F_SHARE }, /* read from a common buffer */ + { "none-rrul", F_RRUL }, /* "realtime-response under load" */ + { "none-ping", F_PING }, /* measure inter-engine fairness */ + + /* + * throttle - original per client throttling + * + * Used for front buffering rendering where there is no + * extenal frame marker. Each client tries to only keep + * 20ms of work submitted, though that measurement is + * flawed... + * + * This is used by Xory to try and maintain some resembalance + * of input/output consistency when being feed a continuous + * stream of X11 draw requests straight into scanout, where + * the clients may submit the work faster than can be drawn. + * + * Throttling tracks requests per-file (and assumes that + * all requests are in submission order across the whole file), + * so we split each child to its own fd. + */ + { "throttle", F_THROTTLE | F_ISOLATE }, + { "throttle-vip", F_THROTTLE | F_ISOLATE | F_VIP }, + { "throttle-solo", F_THROTTLE | F_ISOLATE | F_SOLO }, + { "throttle-share", F_THROTTLE | F_ISOLATE | F_SHARE }, + { "throttle-rrul", F_THROTTLE | F_ISOLATE | F_RRUL }, + + /* + * pace - mesa "submit double buffering" + * + * Submit a frame, wait for previous frame to start. This + * prevents each client from getting too far ahead of its + * rendering, maintaining a consistent input/output latency. + */ + { "pace", F_PACE }, + { "pace-solo", F_PACE | F_SOLO}, + { "pace-share", F_PACE | F_SHARE}, + { "pace-ping", F_PACE | F_SHARE | F_PING}, + + /* sync - only submit a frame at a time */ + { "sync", F_SYNC }, + { "sync-vip", F_SYNC | F_VIP }, + { "sync-solo", F_SYNC | F_SOLO }, + + /* flow - synchronise execution against the clock (vblank) */ + { "flow", F_PACE | F_FLOW }, + { "flow-share", F_PACE | F_FLOW | F_SHARE }, + { "flow-ping", F_PACE | F_FLOW | F_SHARE | F_PING }, + + /* next - submit ahead of the clock (vblank double buffering) */ + { "next", F_PACE | F_FLOW | F_NEXT }, + { "next-share", F_PACE | F_FLOW | F_NEXT | F_SHARE }, + { "next-ping", F_PACE | F_FLOW | F_NEXT | F_SHARE | F_PING }, + + /* spare - underutilise by a single client timeslice */ + { "spare", F_PACE | F_FLOW | F_SPARE }, + + /* half - run at half pace (submit 16ms of work every 32ms) */ + { "half", F_PACE | F_FLOW | F_HALF }, + + {} + }; + + for (typeof(*fair) *f = fair; f->name; f++) { + igt_subtest_with_dynamic_f("fair-%s", f->name) { + const struct intel_execution_engine2 *e; + + igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8); + + __for_each_physical_engine(i915, e) { + if (!gem_class_can_store_dword(i915, e->class)) + continue; + + igt_dynamic_f("%s", e->name) + fairness(i915, e, timeout, f->flags); + } + } + } +} + +static uint32_t read_ctx_timestamp(int i915, + uint32_t ctx, + const struct intel_execution_engine2 *e) +{ + const int use_64b = intel_gen(intel_get_drm_devid(i915)) >= 8; + const uint32_t base = gem_engine_mmio_base(i915, e->name); + struct drm_i915_gem_relocation_entry reloc; + struct drm_i915_gem_exec_object2 obj = { + .handle = gem_create(i915, 4096), + .offset = 32 << 20, + .relocs_ptr = to_user_pointer(&reloc), + .relocation_count = 1, + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + .flags = e->flags, + .rsvd1 = ctx, + }; +#define RUNTIME (base + 0x3a8) + uint32_t *map, *cs; + uint32_t ts; + + igt_require(base); + + cs = map = gem_mmap__device_coherent(i915, obj.handle, + 0, 4096, PROT_WRITE); + + *cs++ = 0x24 << 23 | (1 + use_64b); /* SRM */ + *cs++ = RUNTIME; + memset(&reloc, 0, sizeof(reloc)); + reloc.target_handle = obj.handle; + reloc.presumed_offset = obj.offset; + reloc.offset = offset_in_page(cs); + reloc.delta = 4000; + *cs++ = obj.offset + 4000; + *cs++ = obj.offset >> 32; + + *cs++ = MI_BATCH_BUFFER_END; + + gem_execbuf(i915, &execbuf); + gem_sync(i915, obj.handle); + gem_close(i915, obj.handle); + + ts = map[1000]; + munmap(map, 4096); + + return ts; +} + +static void fairslice(int i915, const struct intel_execution_engine2 *e) +{ + igt_spin_t *spin[3]; + uint32_t ctx[3]; + uint32_t ts[3]; + + for (int i = 0; i < ARRAY_SIZE(ctx); i++) { + ctx[i] = gem_context_clone_with_engines(i915, 0); + spin[i] = igt_spin_new(i915, .ctx = ctx[i], .engine = e->flags); + } + + sleep(2); /* over the course of many timeslices */ + + for (int i = 0; i < ARRAY_SIZE(ctx); i++) { + igt_assert(gem_bo_busy(i915, spin[i]->handle)); + igt_spin_end(spin[i]); + + ts[i] = read_ctx_timestamp(i915, ctx[i], e); + } + + for (int i = 0; i < ARRAY_SIZE(ctx); i++) { + igt_spin_free(i915, spin[i]); + gem_context_destroy(i915, ctx[i]); + } + + qsort(ts, 3, sizeof(*ts), cmp_u32); + igt_info("%s: [%.1f, %.1f] ms\n", e->name, + 1e-6 * ticks_to_ns(i915, ts[0]), + 1e-6 * ticks_to_ns(i915, ts[2])); + + igt_assert(ts[0] && ts[2] > ts[0]); + igt_assert(4 * ts[0] > 3 * ts[2]); +} + #define test_each_engine(T, i915, e) \ igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \ igt_dynamic_f("%s", e->name) @@ -2571,6 +3355,25 @@ igt_main test_each_engine("lateslice", fd, e) lateslice(fd, e->flags); + igt_subtest_group { + igt_fixture { + igt_require(gem_scheduler_has_semaphores(fd)); + igt_require(gem_scheduler_has_preemption(fd)); + igt_require(intel_gen(intel_get_drm_devid(fd)) >= 8); + } + + test_each_engine("fairslice", fd, e) + fairslice(fd, e); + + igt_subtest("fairslice-all") { + __for_each_physical_engine(fd, e) { + igt_fork(child, 1) + fairslice(fd, e); + } + igt_waitchildren(); + } + } + test_each_engine("submit-early-slice", fd, e) submit_slice(fd, e, EARLY_SUBMIT); test_each_engine("submit-golden-slice", fd, e) @@ -2599,6 +3402,8 @@ igt_main test_each_engine_store("promotion", fd, e) promotion(fd, e->flags); + test_fairness(fd, 2); + igt_subtest_group { igt_fixture { igt_require(gem_scheduler_has_preemption(fd)); -- 2.27.0 _______________________________________________ igt-dev mailing list igt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/igt-dev ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [Intel-gfx] [PATCH i-g-t 6/6] i915/gem_softpin: Active rebinds 2020-07-10 9:32 ` [igt-dev] " Chris Wilson @ 2020-07-10 9:32 ` Chris Wilson -1 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson Verify that we do not block userspace [controlling fence progress] if it requires vma recycling. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- tests/i915/gem_softpin.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/i915/gem_softpin.c b/tests/i915/gem_softpin.c index 202abdd88..55ceb1bf2 100644 --- a/tests/i915/gem_softpin.c +++ b/tests/i915/gem_softpin.c @@ -263,6 +263,33 @@ static void test_reverse(int i915) gem_close(i915, handle); } +static void test_active(int i915) +{ + const uint32_t bbe = MI_BATCH_BUFFER_END; + struct drm_i915_gem_exec_object2 obj = { + .handle = gem_create(i915, 4096), + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + }; + igt_spin_t *spin; + + gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe)); + + /* Make a busy spot */ + spin = igt_spin_new(i915); + + /* Reuse it for ourselves */ + obj.offset = spin->obj[IGT_SPIN_BATCH].offset; + obj.flags = EXEC_OBJECT_PINNED; + gem_execbuf(i915, &execbuf); + igt_assert_eq_u64(obj.offset, spin->obj[IGT_SPIN_BATCH].offset); + + gem_close(i915, obj.handle); + igt_spin_free(i915, spin); +} + static uint64_t busy_batch(int fd) { const int gen = intel_gen(intel_get_drm_devid(fd)); @@ -565,6 +592,8 @@ igt_main test_overlap(fd); igt_subtest("reverse") test_reverse(fd); + igt_subtest("active") + test_active(fd); igt_subtest("noreloc") test_noreloc(fd, NOSLEEP, 0); -- 2.27.0 _______________________________________________ Intel-gfx mailing list Intel-gfx@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/intel-gfx ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [igt-dev] [PATCH i-g-t 6/6] i915/gem_softpin: Active rebinds @ 2020-07-10 9:32 ` Chris Wilson 0 siblings, 0 replies; 16+ messages in thread From: Chris Wilson @ 2020-07-10 9:32 UTC (permalink / raw) To: intel-gfx; +Cc: igt-dev, Chris Wilson Verify that we do not block userspace [controlling fence progress] if it requires vma recycling. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> --- tests/i915/gem_softpin.c | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/tests/i915/gem_softpin.c b/tests/i915/gem_softpin.c index 202abdd88..55ceb1bf2 100644 --- a/tests/i915/gem_softpin.c +++ b/tests/i915/gem_softpin.c @@ -263,6 +263,33 @@ static void test_reverse(int i915) gem_close(i915, handle); } +static void test_active(int i915) +{ + const uint32_t bbe = MI_BATCH_BUFFER_END; + struct drm_i915_gem_exec_object2 obj = { + .handle = gem_create(i915, 4096), + }; + struct drm_i915_gem_execbuffer2 execbuf = { + .buffers_ptr = to_user_pointer(&obj), + .buffer_count = 1, + }; + igt_spin_t *spin; + + gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe)); + + /* Make a busy spot */ + spin = igt_spin_new(i915); + + /* Reuse it for ourselves */ + obj.offset = spin->obj[IGT_SPIN_BATCH].offset; + obj.flags = EXEC_OBJECT_PINNED; + gem_execbuf(i915, &execbuf); + igt_assert_eq_u64(obj.offset, spin->obj[IGT_SPIN_BATCH].offset); + + gem_close(i915, obj.handle); + igt_spin_free(i915, spin); +} + static uint64_t busy_batch(int fd) { const int gen = intel_gen(intel_get_drm_devid(fd)); @@ -565,6 +592,8 @@ igt_main test_overlap(fd); igt_subtest("reverse") test_reverse(fd); + igt_subtest("active") + test_active(fd); igt_subtest("noreloc") test_noreloc(fd, NOSLEEP, 0); -- 2.27.0 _______________________________________________ igt-dev mailing list igt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/igt-dev ^ permalink raw reply related [flat|nested] 16+ messages in thread
* [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/6] lib/i915: Report unknown device as the future 2020-07-10 9:32 ` [igt-dev] " Chris Wilson ` (5 preceding siblings ...) (?) @ 2020-07-10 10:17 ` Patchwork -1 siblings, 0 replies; 16+ messages in thread From: Patchwork @ 2020-07-10 10:17 UTC (permalink / raw) To: Chris Wilson; +Cc: igt-dev == Series Details == Series: series starting with [i-g-t,1/6] lib/i915: Report unknown device as the future URL : https://patchwork.freedesktop.org/series/79336/ State : success == Summary == CI Bug Log - changes from CI_DRM_8725 -> IGTPW_4753 ==================================================== Summary ------- **SUCCESS** No regressions found. External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/index.html Known issues ------------ Here are the changes found in IGTPW_4753 that come from known issues: ### IGT changes ### #### Issues hit #### * igt@i915_pm_backlight@basic-brightness: - fi-whl-u: [PASS][1] -> [DMESG-WARN][2] ([i915#95]) [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-whl-u/igt@i915_pm_backlight@basic-brightness.html [2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-whl-u/igt@i915_pm_backlight@basic-brightness.html * igt@i915_pm_rpm@basic-pci-d3-state: - fi-bsw-kefka: [PASS][3] -> [DMESG-WARN][4] ([i915#1982]) [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-bsw-kefka/igt@i915_pm_rpm@basic-pci-d3-state.html [4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-bsw-kefka/igt@i915_pm_rpm@basic-pci-d3-state.html * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic: - fi-bsw-n3050: [PASS][5] -> [DMESG-WARN][6] ([i915#1982]) [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-bsw-n3050/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html [6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-bsw-n3050/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html * igt@kms_flip@basic-flip-vs-wf_vblank@c-edp1: - fi-icl-u2: [PASS][7] -> [DMESG-WARN][8] ([i915#1982]) +2 similar issues [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-icl-u2/igt@kms_flip@basic-flip-vs-wf_vblank@c-edp1.html [8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-icl-u2/igt@kms_flip@basic-flip-vs-wf_vblank@c-edp1.html * igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence: - fi-tgl-u2: [PASS][9] -> [DMESG-WARN][10] ([i915#402]) [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-tgl-u2/igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence.html [10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-tgl-u2/igt@kms_pipe_crc_basic@read-crc-pipe-a-frame-sequence.html #### Possible fixes #### * igt@core_auth@basic-auth: - fi-byt-j1900: [DMESG-WARN][11] ([i915#1982]) -> [PASS][12] [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-byt-j1900/igt@core_auth@basic-auth.html [12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-byt-j1900/igt@core_auth@basic-auth.html * igt@i915_selftest@live@gem_contexts: - fi-tgl-u2: [INCOMPLETE][13] ([i915#1932] / [i915#2045]) -> [PASS][14] [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-tgl-u2/igt@i915_selftest@live@gem_contexts.html [14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-tgl-u2/igt@i915_selftest@live@gem_contexts.html * igt@kms_busy@basic@flip: - fi-kbl-x1275: [DMESG-WARN][15] ([i915#62] / [i915#92] / [i915#95]) -> [PASS][16] [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-kbl-x1275/igt@kms_busy@basic@flip.html [16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-kbl-x1275/igt@kms_busy@basic@flip.html * igt@kms_cursor_legacy@basic-flip-after-cursor-atomic: - fi-icl-u2: [DMESG-WARN][17] ([i915#1982]) -> [PASS][18] [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-icl-u2/igt@kms_cursor_legacy@basic-flip-after-cursor-atomic.html [18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-icl-u2/igt@kms_cursor_legacy@basic-flip-after-cursor-atomic.html * igt@kms_flip@basic-plain-flip@c-dsi1: - {fi-tgl-dsi}: [DMESG-WARN][19] ([i915#1982]) -> [PASS][20] [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-tgl-dsi/igt@kms_flip@basic-plain-flip@c-dsi1.html [20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-tgl-dsi/igt@kms_flip@basic-plain-flip@c-dsi1.html #### Warnings #### * igt@gem_exec_suspend@basic-s3: - fi-kbl-x1275: [DMESG-WARN][21] ([i915#1982] / [i915#62] / [i915#92] / [i915#95]) -> [DMESG-WARN][22] ([i915#62] / [i915#92]) [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-kbl-x1275/igt@gem_exec_suspend@basic-s3.html [22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-kbl-x1275/igt@gem_exec_suspend@basic-s3.html * igt@kms_force_connector_basic@force-connector-state: - fi-kbl-x1275: [DMESG-WARN][23] ([i915#62] / [i915#92]) -> [DMESG-WARN][24] ([i915#62] / [i915#92] / [i915#95]) +3 similar issues [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-kbl-x1275/igt@kms_force_connector_basic@force-connector-state.html [24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-kbl-x1275/igt@kms_force_connector_basic@force-connector-state.html * igt@kms_force_connector_basic@force-edid: - fi-kbl-x1275: [DMESG-WARN][25] ([i915#62] / [i915#92] / [i915#95]) -> [DMESG-WARN][26] ([i915#62] / [i915#92]) +6 similar issues [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/fi-kbl-x1275/igt@kms_force_connector_basic@force-edid.html [26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/fi-kbl-x1275/igt@kms_force_connector_basic@force-edid.html {name}: This element is suppressed. This means it is ignored when computing the status of the difference (SUCCESS, WARNING, or FAILURE). [i915#1932]: https://gitlab.freedesktop.org/drm/intel/issues/1932 [i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982 [i915#2045]: https://gitlab.freedesktop.org/drm/intel/issues/2045 [i915#402]: https://gitlab.freedesktop.org/drm/intel/issues/402 [i915#62]: https://gitlab.freedesktop.org/drm/intel/issues/62 [i915#92]: https://gitlab.freedesktop.org/drm/intel/issues/92 [i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95 Participating hosts (40 -> 33) ------------------------------ Missing (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-byt-clapper fi-bdw-samus Build changes ------------- * CI: CI-20190529 -> None * IGT: IGT_5729 -> IGTPW_4753 CI-20190529: 20190529 CI_DRM_8725: 9f7e9d663d77cf702a3510d9ae9fbf37c813b759 @ git://anongit.freedesktop.org/gfx-ci/linux IGTPW_4753: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/index.html IGT_5729: a048d54f58dd70b07dbeb4541b273ec230ddb586 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools == Testlist changes == +igt@gem_exec_schedule@fairslice +igt@gem_exec_schedule@fairslice-all +igt@gem_exec_schedule@fair-flow +igt@gem_exec_schedule@fair-flow-ping +igt@gem_exec_schedule@fair-flow-share +igt@gem_exec_schedule@fair-half +igt@gem_exec_schedule@fair-next +igt@gem_exec_schedule@fair-next-ping +igt@gem_exec_schedule@fair-next-share +igt@gem_exec_schedule@fair-none +igt@gem_exec_schedule@fair-none-ping +igt@gem_exec_schedule@fair-none-rrul +igt@gem_exec_schedule@fair-none-share +igt@gem_exec_schedule@fair-none-solo +igt@gem_exec_schedule@fair-none-vip +igt@gem_exec_schedule@fair-pace +igt@gem_exec_schedule@fair-pace-ping +igt@gem_exec_schedule@fair-pace-share +igt@gem_exec_schedule@fair-pace-solo +igt@gem_exec_schedule@fair-spare +igt@gem_exec_schedule@fair-sync +igt@gem_exec_schedule@fair-sync-solo +igt@gem_exec_schedule@fair-sync-vip +igt@gem_exec_schedule@fair-throttle +igt@gem_exec_schedule@fair-throttle-rrul +igt@gem_exec_schedule@fair-throttle-share +igt@gem_exec_schedule@fair-throttle-solo +igt@gem_exec_schedule@fair-throttle-vip +igt@gem_softpin@active == Logs == For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/index.html _______________________________________________ igt-dev mailing list igt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/igt-dev ^ permalink raw reply [flat|nested] 16+ messages in thread
* [igt-dev] ✗ Fi.CI.IGT: failure for series starting with [i-g-t,1/6] lib/i915: Report unknown device as the future 2020-07-10 9:32 ` [igt-dev] " Chris Wilson ` (6 preceding siblings ...) (?) @ 2020-07-10 12:06 ` Patchwork -1 siblings, 0 replies; 16+ messages in thread From: Patchwork @ 2020-07-10 12:06 UTC (permalink / raw) To: Chris Wilson; +Cc: igt-dev == Series Details == Series: series starting with [i-g-t,1/6] lib/i915: Report unknown device as the future URL : https://patchwork.freedesktop.org/series/79336/ State : failure == Summary == CI Bug Log - changes from CI_DRM_8725_full -> IGTPW_4753_full ==================================================== Summary ------- **FAILURE** Serious unknown changes coming with IGTPW_4753_full absolutely need to be verified manually. If you think the reported changes have nothing to do with the changes introduced in IGTPW_4753_full, please notify your bug team to allow them to document this new failure mode, which will reduce false positives in CI. External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/index.html Possible new issues ------------------- Here are the unknown changes that may have been introduced in IGTPW_4753_full: ### IGT changes ### #### Possible regressions #### * {igt@gem_exec_schedule@fair-next-ping@vecs0} (NEW): - shard-iclb: NOTRUN -> [SKIP][1] +15 similar issues [1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-iclb1/igt@gem_exec_schedule@fair-next-ping@vecs0.html * {igt@gem_exec_schedule@fair-none-ping@rcs0} (NEW): - shard-tglb: NOTRUN -> [INCOMPLETE][2] +4 similar issues [2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-tglb5/igt@gem_exec_schedule@fair-none-ping@rcs0.html * {igt@gem_exec_schedule@fair-none-solo@rcs0} (NEW): - shard-kbl: NOTRUN -> [FAIL][3] +8 similar issues [3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-kbl6/igt@gem_exec_schedule@fair-none-solo@rcs0.html - shard-tglb: NOTRUN -> [FAIL][4] +32 similar issues [4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-tglb3/igt@gem_exec_schedule@fair-none-solo@rcs0.html * {igt@gem_exec_schedule@fair-pace-ping@rcs0} (NEW): - shard-tglb: NOTRUN -> [SKIP][5] +1 similar issue [5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-tglb1/igt@gem_exec_schedule@fair-pace-ping@rcs0.html * {igt@gem_exec_schedule@fair-pace-solo@vecs0} (NEW): - shard-apl: NOTRUN -> [FAIL][6] +4 similar issues [6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-apl1/igt@gem_exec_schedule@fair-pace-solo@vecs0.html * {igt@gem_exec_schedule@fair-throttle-solo@vcs0} (NEW): - shard-glk: NOTRUN -> [FAIL][7] +7 similar issues [7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-glk4/igt@gem_exec_schedule@fair-throttle-solo@vcs0.html * {igt@gem_exec_schedule@fair-throttle@rcs0} (NEW): - shard-iclb: NOTRUN -> [FAIL][8] +34 similar issues [8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-iclb3/igt@gem_exec_schedule@fair-throttle@rcs0.html * igt@perf@enable-disable: - shard-hsw: [PASS][9] -> [INCOMPLETE][10] +1 similar issue [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-hsw5/igt@perf@enable-disable.html [10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-hsw8/igt@perf@enable-disable.html New tests --------- New tests have been introduced between CI_DRM_8725_full and IGTPW_4753_full: ### New IGT tests (157) ### * igt@gem_exec_schedule@fair-flow: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-flow-ping: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-flow-ping@bcs0: - Statuses : 1 incomplete(s) 4 skip(s) - Exec time: [0.0, 6.38] s * igt@gem_exec_schedule@fair-flow-ping@rcs0: - Statuses : 5 skip(s) - Exec time: [2.12, 6.36] s * igt@gem_exec_schedule@fair-flow-ping@vcs0: - Statuses : 4 skip(s) - Exec time: [2.15, 6.37] s * igt@gem_exec_schedule@fair-flow-ping@vcs1: - Statuses : 1 skip(s) - Exec time: [2.16] s * igt@gem_exec_schedule@fair-flow-ping@vecs0: - Statuses : 4 skip(s) - Exec time: [2.15, 6.36] s * igt@gem_exec_schedule@fair-flow-share: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-flow-share@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 12.99] s * igt@gem_exec_schedule@fair-flow-share@rcs0: - Statuses : 5 pass(s) - Exec time: [8.80, 13.42] s * igt@gem_exec_schedule@fair-flow-share@vcs0: - Statuses : 5 pass(s) - Exec time: [8.90, 13.60] s * igt@gem_exec_schedule@fair-flow-share@vcs1: - Statuses : 2 pass(s) - Exec time: [12.91, 13.67] s * igt@gem_exec_schedule@fair-flow-share@vecs0: - Statuses : 5 pass(s) - Exec time: [8.89, 13.61] s * igt@gem_exec_schedule@fair-flow@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 12.96] s * igt@gem_exec_schedule@fair-flow@rcs0: - Statuses : 5 pass(s) - Exec time: [8.83, 13.44] s * igt@gem_exec_schedule@fair-flow@vcs0: - Statuses : 5 pass(s) - Exec time: [8.90, 13.59] s * igt@gem_exec_schedule@fair-flow@vcs1: - Statuses : 2 pass(s) - Exec time: [12.91, 13.59] s * igt@gem_exec_schedule@fair-flow@vecs0: - Statuses : 5 pass(s) - Exec time: [8.91, 13.63] s * igt@gem_exec_schedule@fair-half: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-half@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 13.31] s * igt@gem_exec_schedule@fair-half@rcs0: - Statuses : 5 pass(s) - Exec time: [11.47, 14.97] s * igt@gem_exec_schedule@fair-half@vcs0: - Statuses : 5 pass(s) - Exec time: [11.51, 14.95] s * igt@gem_exec_schedule@fair-half@vcs1: - Statuses : 3 pass(s) - Exec time: [11.50, 13.32] s * igt@gem_exec_schedule@fair-half@vecs0: - Statuses : 5 pass(s) - Exec time: [11.49, 14.93] s * igt@gem_exec_schedule@fair-next: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-next-ping: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-next-ping@bcs0: - Statuses : 4 skip(s) - Exec time: [0.0, 6.37] s * igt@gem_exec_schedule@fair-next-ping@rcs0: - Statuses : 1 incomplete(s) 4 skip(s) - Exec time: [0.0, 6.37] s * igt@gem_exec_schedule@fair-next-ping@vcs0: - Statuses : 4 skip(s) - Exec time: [2.15, 6.35] s * igt@gem_exec_schedule@fair-next-ping@vcs1: - Statuses : 2 skip(s) - Exec time: [2.14, 6.36] s * igt@gem_exec_schedule@fair-next-ping@vecs0: - Statuses : 4 skip(s) - Exec time: [2.15, 6.36] s * igt@gem_exec_schedule@fair-next-share: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-next-share@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 13.02] s * igt@gem_exec_schedule@fair-next-share@rcs0: - Statuses : 5 pass(s) - Exec time: [8.78, 13.45] s * igt@gem_exec_schedule@fair-next-share@vcs0: - Statuses : 5 pass(s) - Exec time: [8.82, 13.60] s * igt@gem_exec_schedule@fair-next-share@vcs1: - Statuses : 2 pass(s) - Exec time: [12.93, 13.62] s * igt@gem_exec_schedule@fair-next-share@vecs0: - Statuses : 5 pass(s) - Exec time: [8.91, 13.59] s * igt@gem_exec_schedule@fair-next@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 12.96] s * igt@gem_exec_schedule@fair-next@rcs0: - Statuses : 5 pass(s) - Exec time: [8.81, 13.34] s * igt@gem_exec_schedule@fair-next@vcs0: - Statuses : 5 pass(s) - Exec time: [8.91, 13.49] s * igt@gem_exec_schedule@fair-next@vcs1: - Statuses : 2 pass(s) - Exec time: [12.93, 13.52] s * igt@gem_exec_schedule@fair-next@vecs0: - Statuses : 5 pass(s) - Exec time: [8.91, 13.51] s * igt@gem_exec_schedule@fair-none: - Statuses : - Exec time: [None] s * igt@gem_exec_schedule@fair-none-ping: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-none-ping@bcs0: - Statuses : 4 skip(s) - Exec time: [0.0, 8.50] s * igt@gem_exec_schedule@fair-none-ping@rcs0: - Statuses : 1 incomplete(s) 4 skip(s) - Exec time: [0.0, 8.24] s * igt@gem_exec_schedule@fair-none-ping@vcs0: - Statuses : 4 skip(s) - Exec time: [2.62, 8.30] s * igt@gem_exec_schedule@fair-none-ping@vcs1: - Statuses : 1 skip(s) - Exec time: [2.62] s * igt@gem_exec_schedule@fair-none-ping@vecs0: - Statuses : 4 skip(s) - Exec time: [2.59, 8.43] s * igt@gem_exec_schedule@fair-none-rrul: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-none-rrul@bcs0: - Statuses : 1 fail(s) 1 pass(s) 3 skip(s) - Exec time: [0.0, 11.96] s * igt@gem_exec_schedule@fair-none-rrul@rcs0: - Statuses : 3 fail(s) 2 pass(s) - Exec time: [2.19, 11.64] s * igt@gem_exec_schedule@fair-none-rrul@vcs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [2.06, 15.15] s * igt@gem_exec_schedule@fair-none-rrul@vcs1: - Statuses : 1 fail(s) 1 pass(s) - Exec time: [9.15, 12.43] s * igt@gem_exec_schedule@fair-none-rrul@vecs0: - Statuses : 1 fail(s) 1 incomplete(s) 3 pass(s) - Exec time: [0.0, 15.48] s * igt@gem_exec_schedule@fair-none-share: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-none-share@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 13.27] s * igt@gem_exec_schedule@fair-none-share@rcs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [9.56, 13.04] s * igt@gem_exec_schedule@fair-none-share@vcs0: - Statuses : 4 pass(s) 1 skip(s) - Exec time: [7.53, 13.32] s * igt@gem_exec_schedule@fair-none-share@vcs1: - Statuses : 1 fail(s) 2 pass(s) - Exec time: [10.04, 13.34] s * igt@gem_exec_schedule@fair-none-share@vecs0: - Statuses : 1 fail(s) 3 pass(s) 1 skip(s) - Exec time: [7.55, 13.34] s * igt@gem_exec_schedule@fair-none-solo: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-none-solo@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 19.32] s * igt@gem_exec_schedule@fair-none-solo@rcs0: - Statuses : 4 fail(s) 1 pass(s) - Exec time: [13.13, 19.05] s * igt@gem_exec_schedule@fair-none-solo@vcs0: - Statuses : 5 pass(s) - Exec time: [14.99, 19.18] s * igt@gem_exec_schedule@fair-none-solo@vcs1: - Statuses : 3 pass(s) - Exec time: [16.72, 19.39] s * igt@gem_exec_schedule@fair-none-solo@vecs0: - Statuses : 5 pass(s) - Exec time: [15.77, 19.40] s * igt@gem_exec_schedule@fair-none-vip: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-none-vip@bcs0: - Statuses : 1 fail(s) 1 pass(s) 3 skip(s) - Exec time: [0.0, 12.64] s * igt@gem_exec_schedule@fair-none-vip@rcs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [2.55, 13.78] s * igt@gem_exec_schedule@fair-none-vip@vcs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [2.58, 13.71] s * igt@gem_exec_schedule@fair-none-vip@vcs1: - Statuses : 1 fail(s) 1 pass(s) - Exec time: [12.57, 13.88] s * igt@gem_exec_schedule@fair-none-vip@vecs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [2.58, 16.32] s * igt@gem_exec_schedule@fair-none@bcs0: - Statuses : 1 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-none@rcs0: - Statuses : 1 pass(s) - Exec time: [13.21] s * igt@gem_exec_schedule@fair-none@vcs0: - Statuses : 1 pass(s) - Exec time: [10.90] s * igt@gem_exec_schedule@fair-none@vecs0: - Statuses : 1 pass(s) - Exec time: [10.64] s * igt@gem_exec_schedule@fair-pace: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-pace-ping: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-pace-ping@bcs0: - Statuses : 1 incomplete(s) 4 skip(s) - Exec time: [0.0, 6.35] s * igt@gem_exec_schedule@fair-pace-ping@rcs0: - Statuses : 5 skip(s) - Exec time: [2.11, 6.37] s * igt@gem_exec_schedule@fair-pace-ping@vcs0: - Statuses : 4 skip(s) - Exec time: [2.11, 6.35] s * igt@gem_exec_schedule@fair-pace-ping@vcs1: - Statuses : 2 skip(s) - Exec time: [2.15, 6.37] s * igt@gem_exec_schedule@fair-pace-ping@vecs0: - Statuses : 4 skip(s) - Exec time: [2.11, 6.37] s * igt@gem_exec_schedule@fair-pace-share: - Statuses : - Exec time: [None] s * igt@gem_exec_schedule@fair-pace-solo: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-pace-solo@bcs0: - Statuses : 2 fail(s) 3 skip(s) - Exec time: [0.0, 10.57] s * igt@gem_exec_schedule@fair-pace-solo@rcs0: - Statuses : 5 fail(s) - Exec time: [8.51, 10.66] s * igt@gem_exec_schedule@fair-pace-solo@vcs0: - Statuses : 5 fail(s) - Exec time: [8.49, 10.59] s * igt@gem_exec_schedule@fair-pace-solo@vcs1: - Statuses : 2 fail(s) - Exec time: [8.49, 10.50] s * igt@gem_exec_schedule@fair-pace-solo@vecs0: - Statuses : 5 fail(s) - Exec time: [8.50, 10.60] s * igt@gem_exec_schedule@fair-pace@bcs0: - Statuses : 2 fail(s) 3 skip(s) - Exec time: [0.0, 10.63] s * igt@gem_exec_schedule@fair-pace@rcs0: - Statuses : 3 fail(s) 2 pass(s) - Exec time: [8.68, 12.09] s * igt@gem_exec_schedule@fair-pace@vcs0: - Statuses : 4 fail(s) 1 pass(s) - Exec time: [8.72, 11.49] s * igt@gem_exec_schedule@fair-pace@vcs1: - Statuses : 2 fail(s) - Exec time: [8.53, 10.56] s * igt@gem_exec_schedule@fair-pace@vecs0: - Statuses : 4 fail(s) 1 pass(s) - Exec time: [8.65, 11.52] s * igt@gem_exec_schedule@fair-spare: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-spare@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 13.02] s * igt@gem_exec_schedule@fair-spare@rcs0: - Statuses : 5 pass(s) - Exec time: [8.92, 13.54] s * igt@gem_exec_schedule@fair-spare@vcs0: - Statuses : 5 pass(s) - Exec time: [8.87, 13.68] s * igt@gem_exec_schedule@fair-spare@vcs1: - Statuses : 2 pass(s) - Exec time: [12.93, 13.68] s * igt@gem_exec_schedule@fair-spare@vecs0: - Statuses : 5 pass(s) - Exec time: [8.94, 13.66] s * igt@gem_exec_schedule@fair-sync: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-sync-solo: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-sync-solo@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 12.81] s * igt@gem_exec_schedule@fair-sync-solo@rcs0: - Statuses : 5 pass(s) - Exec time: [11.10, 15.35] s * igt@gem_exec_schedule@fair-sync-solo@vcs0: - Statuses : 5 pass(s) - Exec time: [11.03, 15.98] s * igt@gem_exec_schedule@fair-sync-solo@vcs1: - Statuses : 2 pass(s) - Exec time: [10.67, 12.72] s * igt@gem_exec_schedule@fair-sync-solo@vecs0: - Statuses : 5 pass(s) - Exec time: [11.01, 15.77] s * igt@gem_exec_schedule@fair-sync-vip: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-sync-vip@bcs0: - Statuses : 1 fail(s) 1 pass(s) 3 skip(s) - Exec time: [0.0, 12.82] s * igt@gem_exec_schedule@fair-sync-vip@rcs0: - Statuses : 1 fail(s) 4 pass(s) - Exec time: [2.19, 13.48] s * igt@gem_exec_schedule@fair-sync-vip@vcs0: - Statuses : 1 fail(s) 4 pass(s) - Exec time: [2.08, 13.46] s * igt@gem_exec_schedule@fair-sync-vip@vcs1: - Statuses : 2 pass(s) - Exec time: [12.81, 13.42] s * igt@gem_exec_schedule@fair-sync-vip@vecs0: - Statuses : 1 fail(s) 4 pass(s) - Exec time: [2.08, 13.46] s * igt@gem_exec_schedule@fair-sync@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 12.85] s * igt@gem_exec_schedule@fair-sync@rcs0: - Statuses : 5 pass(s) - Exec time: [8.59, 13.25] s * igt@gem_exec_schedule@fair-sync@vcs0: - Statuses : 5 pass(s) - Exec time: [8.54, 13.28] s * igt@gem_exec_schedule@fair-sync@vcs1: - Statuses : 2 pass(s) - Exec time: [8.47, 12.77] s * igt@gem_exec_schedule@fair-sync@vecs0: - Statuses : 5 pass(s) - Exec time: [8.54, 13.27] s * igt@gem_exec_schedule@fair-throttle: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-throttle-rrul: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-throttle-rrul@bcs0: - Statuses : 1 fail(s) 3 skip(s) - Exec time: [0.0, 2.06] s * igt@gem_exec_schedule@fair-throttle-rrul@rcs0: - Statuses : 1 fail(s) 3 pass(s) - Exec time: [2.09, 11.88] s * igt@gem_exec_schedule@fair-throttle-rrul@vcs0: - Statuses : 1 fail(s) 3 pass(s) - Exec time: [2.07, 12.19] s * igt@gem_exec_schedule@fair-throttle-rrul@vcs1: - Statuses : 1 fail(s) 1 pass(s) - Exec time: [2.07, 11.29] s * igt@gem_exec_schedule@fair-throttle-rrul@vecs0: - Statuses : 1 fail(s) 3 pass(s) - Exec time: [2.07, 12.13] s * igt@gem_exec_schedule@fair-throttle-share: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-throttle-share@bcs0: - Statuses : 1 fail(s) 1 pass(s) 3 skip(s) - Exec time: [0.0, 11.76] s * igt@gem_exec_schedule@fair-throttle-share@rcs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [8.78, 11.86] s * igt@gem_exec_schedule@fair-throttle-share@vcs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [8.92, 12.36] s * igt@gem_exec_schedule@fair-throttle-share@vcs1: - Statuses : 1 fail(s) 2 pass(s) - Exec time: [11.46, 14.31] s * igt@gem_exec_schedule@fair-throttle-share@vecs0: - Statuses : 1 fail(s) 4 pass(s) - Exec time: [8.80, 14.77] s * igt@gem_exec_schedule@fair-throttle-solo: - Statuses : - Exec time: [None] s * igt@gem_exec_schedule@fair-throttle-solo@bcs0: - Statuses : 1 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-throttle-solo@rcs0: - Statuses : 1 fail(s) - Exec time: [12.70] s * igt@gem_exec_schedule@fair-throttle-solo@vcs0: - Statuses : 1 fail(s) - Exec time: [13.39] s * igt@gem_exec_schedule@fair-throttle-solo@vecs0: - Statuses : 1 fail(s) - Exec time: [13.96] s * igt@gem_exec_schedule@fair-throttle-vip: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fair-throttle-vip@bcs0: - Statuses : 1 fail(s) 1 pass(s) 3 skip(s) - Exec time: [0.0, 14.34] s * igt@gem_exec_schedule@fair-throttle-vip@rcs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [2.19, 12.20] s * igt@gem_exec_schedule@fair-throttle-vip@vcs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [2.10, 13.09] s * igt@gem_exec_schedule@fair-throttle-vip@vcs1: - Statuses : 1 fail(s) 1 pass(s) - Exec time: [12.03, 12.59] s * igt@gem_exec_schedule@fair-throttle-vip@vecs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [2.10, 13.18] s * igt@gem_exec_schedule@fair-throttle@bcs0: - Statuses : 2 pass(s) 3 skip(s) - Exec time: [0.0, 14.80] s * igt@gem_exec_schedule@fair-throttle@rcs0: - Statuses : 2 fail(s) 3 pass(s) - Exec time: [9.06, 12.00] s * igt@gem_exec_schedule@fair-throttle@vcs0: - Statuses : 1 fail(s) 4 pass(s) - Exec time: [8.92, 14.48] s * igt@gem_exec_schedule@fair-throttle@vcs1: - Statuses : 1 fail(s) 1 pass(s) - Exec time: [11.54, 11.64] s * igt@gem_exec_schedule@fair-throttle@vecs0: - Statuses : 1 fail(s) 4 pass(s) - Exec time: [9.01, 14.77] s * igt@gem_exec_schedule@fairslice: - Statuses : 2 skip(s) - Exec time: [0.0] s * igt@gem_exec_schedule@fairslice-all: - Statuses : 5 pass(s) 2 skip(s) - Exec time: [0.0, 2.08] s * igt@gem_exec_schedule@fairslice@bcs0: - Statuses : 5 pass(s) - Exec time: [2.01, 2.02] s * igt@gem_exec_schedule@fairslice@rcs0: - Statuses : 5 pass(s) - Exec time: [2.01, 2.05] s * igt@gem_exec_schedule@fairslice@vcs0: - Statuses : 5 pass(s) - Exec time: [2.01, 2.02] s * igt@gem_exec_schedule@fairslice@vcs1: - Statuses : 3 pass(s) - Exec time: [2.01, 2.02] s * igt@gem_exec_schedule@fairslice@vecs0: - Statuses : 5 pass(s) - Exec time: [2.01, 2.02] s * igt@gem_softpin@active: - Statuses : 2 pass(s) 5 timeout(s) - Exec time: [5.73, 120.48] s Known issues ------------ Here are the changes found in IGTPW_4753_full that come from known issues: ### IGT changes ### #### Issues hit #### * igt@gem_render_copy@x-tiled: - shard-tglb: [PASS][11] -> [DMESG-WARN][12] ([i915#402]) [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-tglb5/igt@gem_render_copy@x-tiled.html [12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-tglb6/igt@gem_render_copy@x-tiled.html * igt@gen9_exec_parse@allowed-all: - shard-apl: [PASS][13] -> [DMESG-WARN][14] ([i915#1436] / [i915#716]) [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-apl7/igt@gen9_exec_parse@allowed-all.html [14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-apl6/igt@gen9_exec_parse@allowed-all.html * igt@i915_selftest@live@execlists: - shard-kbl: [PASS][15] -> [INCOMPLETE][16] ([i915#794]) [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-kbl6/igt@i915_selftest@live@execlists.html [16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-kbl2/igt@i915_selftest@live@execlists.html * igt@kms_atomic_interruptible@legacy-pageflip: - shard-snb: [PASS][17] -> [SKIP][18] ([fdo#109271]) +1 similar issue [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-snb2/igt@kms_atomic_interruptible@legacy-pageflip.html [18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-snb6/igt@kms_atomic_interruptible@legacy-pageflip.html * igt@kms_big_fb@x-tiled-64bpp-rotate-0: - shard-glk: [PASS][19] -> [DMESG-FAIL][20] ([i915#118] / [i915#95]) [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-glk7/igt@kms_big_fb@x-tiled-64bpp-rotate-0.html [20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-glk8/igt@kms_big_fb@x-tiled-64bpp-rotate-0.html * igt@kms_cursor_crc@pipe-a-cursor-64x21-offscreen: - shard-kbl: [PASS][21] -> [DMESG-FAIL][22] ([i915#54] / [i915#95]) +2 similar issues [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-kbl3/igt@kms_cursor_crc@pipe-a-cursor-64x21-offscreen.html [22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-kbl7/igt@kms_cursor_crc@pipe-a-cursor-64x21-offscreen.html * igt@kms_cursor_edge_walk@pipe-b-128x128-left-edge: - shard-glk: [PASS][23] -> [DMESG-WARN][24] ([i915#1982]) [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-glk8/igt@kms_cursor_edge_walk@pipe-b-128x128-left-edge.html [24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-glk6/igt@kms_cursor_edge_walk@pipe-b-128x128-left-edge.html * igt@kms_cursor_legacy@cursor-vs-flip-varying-size: - shard-apl: [PASS][25] -> [DMESG-WARN][26] ([i915#1635] / [i915#95]) +37 similar issues [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-apl2/igt@kms_cursor_legacy@cursor-vs-flip-varying-size.html [26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-apl1/igt@kms_cursor_legacy@cursor-vs-flip-varying-size.html * igt@kms_fbcon_fbt@fbc: - shard-kbl: [PASS][27] -> [FAIL][28] ([i915#64]) [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-kbl7/igt@kms_fbcon_fbt@fbc.html [28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-kbl1/igt@kms_fbcon_fbt@fbc.html - shard-apl: [PASS][29] -> [FAIL][30] ([i915#1525]) [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-apl4/igt@kms_fbcon_fbt@fbc.html [30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-apl1/igt@kms_fbcon_fbt@fbc.html * igt@kms_flip@flip-vs-suspend@c-dp1: - shard-kbl: [PASS][31] -> [DMESG-WARN][32] ([i915#180]) +7 similar issues [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-kbl4/igt@kms_flip@flip-vs-suspend@c-dp1.html [32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-kbl3/igt@kms_flip@flip-vs-suspend@c-dp1.html * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-pwrite: - shard-tglb: [PASS][33] -> [DMESG-WARN][34] ([i915#1982]) +4 similar issues [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-tglb2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-pwrite.html [34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-tglb2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-pri-indfb-draw-pwrite.html * igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-onoff: - shard-glk: [PASS][35] -> [FAIL][36] ([i915#49]) [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-glk6/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-onoff.html [36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-glk1/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-onoff.html * igt@kms_pipe_crc_basic@read-crc-pipe-a: - shard-apl: [PASS][37] -> [DMESG-FAIL][38] ([i915#1635] / [i915#95]) [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-apl3/igt@kms_pipe_crc_basic@read-crc-pipe-a.html [38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-apl6/igt@kms_pipe_crc_basic@read-crc-pipe-a.html - shard-kbl: [PASS][39] -> [DMESG-FAIL][40] ([i915#95]) [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-kbl4/igt@kms_pipe_crc_basic@read-crc-pipe-a.html [40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-kbl3/igt@kms_pipe_crc_basic@read-crc-pipe-a.html * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b: - shard-kbl: [PASS][41] -> [INCOMPLETE][42] ([CI#80] / [i915#155]) [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-kbl1/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b.html [42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-kbl2/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b.html * igt@kms_plane_alpha_blend@pipe-a-coverage-7efc: - shard-apl: [PASS][43] -> [DMESG-FAIL][44] ([fdo#108145] / [i915#1635] / [i915#95]) [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-apl6/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html [44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-apl4/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html * igt@kms_plane_scaling@pipe-a-scaler-with-clipping-clamping: - shard-iclb: [PASS][45] -> [DMESG-WARN][46] ([i915#1982]) [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-iclb2/igt@kms_plane_scaling@pipe-a-scaler-with-clipping-clamping.html [46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-iclb3/igt@kms_plane_scaling@pipe-a-scaler-with-clipping-clamping.html * igt@kms_psr@no_drrs: - shard-iclb: [PASS][47] -> [FAIL][48] ([i915#173]) [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-iclb4/igt@kms_psr@no_drrs.html [48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-iclb1/igt@kms_psr@no_drrs.html * igt@kms_psr@psr2_cursor_render: - shard-iclb: [PASS][49] -> [SKIP][50] ([fdo#109441]) +3 similar issues [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-iclb2/igt@kms_psr@psr2_cursor_render.html [50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-iclb5/igt@kms_psr@psr2_cursor_render.html * igt@kms_rmfb@rmfb-ioctl: - shard-kbl: [PASS][51] -> [DMESG-WARN][52] ([i915#93] / [i915#95]) +37 similar issues [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-kbl2/igt@kms_rmfb@rmfb-ioctl.html [52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-kbl7/igt@kms_rmfb@rmfb-ioctl.html * igt@kms_setmode@basic: - shard-kbl: [PASS][53] -> [FAIL][54] ([i915#31]) [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-kbl1/igt@kms_setmode@basic.html [54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-kbl3/igt@kms_setmode@basic.html * igt@perf_pmu@frequency-idle: - shard-snb: [PASS][55] -> [TIMEOUT][56] ([i915#1958] / [i915#2119]) [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-snb2/igt@perf_pmu@frequency-idle.html [56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-snb2/igt@perf_pmu@frequency-idle.html - shard-hsw: [PASS][57] -> [TIMEOUT][58] ([i915#1958] / [i915#2119]) [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-hsw8/igt@perf_pmu@frequency-idle.html [58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-hsw8/igt@perf_pmu@frequency-idle.html #### Possible fixes #### * igt@gem_eio@reset-stress: - shard-glk: [INCOMPLETE][59] ([i915#58] / [k.org#198133]) -> [PASS][60] [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-glk4/igt@gem_eio@reset-stress.html [60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-glk5/igt@gem_eio@reset-stress.html * igt@gem_exec_gttfill@all: - shard-glk: [DMESG-WARN][61] ([i915#118] / [i915#95]) -> [PASS][62] +1 similar issue [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-glk5/igt@gem_exec_gttfill@all.html [62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-glk9/igt@gem_exec_gttfill@all.html * igt@gem_userptr_blits@create-destroy-sync: - shard-hsw: [TIMEOUT][63] ([i915#1958] / [i915#2119]) -> [PASS][64] +1 similar issue [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-hsw4/igt@gem_userptr_blits@create-destroy-sync.html [64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/shard-hsw1/igt@gem_userptr_blits@create-destroy-sync.html * igt@i915_module_load@reload: - shard-tglb: [DMESG-WARN][65] ([i915#402]) -> [PASS][66] +2 similar issues [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8725/shard-tglb1/igt@i915_module_load@reload.html [66]: https://intel-gfx-ci.01.org/ == Logs == For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4753/index.html _______________________________________________ igt-dev mailing list igt-dev@lists.freedesktop.org https://lists.freedesktop.org/mailman/listinfo/igt-dev ^ permalink raw reply [flat|nested] 16+ messages in thread
end of thread, other threads:[~2020-07-10 14:11 UTC | newest] Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2020-07-10 9:32 [Intel-gfx] [PATCH i-g-t 1/6] lib/i915: Report unknown device as the future Chris Wilson 2020-07-10 9:32 ` [igt-dev] " Chris Wilson 2020-07-10 9:32 ` [Intel-gfx] [PATCH i-g-t 2/6] tools: Use the gt number stored in the device info Chris Wilson 2020-07-10 9:32 ` [igt-dev] " Chris Wilson 2020-07-10 9:32 ` [Intel-gfx] [PATCH i-g-t 3/6] lib/i915: Pick a subtest conformant name for an unknown engine Chris Wilson 2020-07-10 9:32 ` [igt-dev] " Chris Wilson 2020-07-10 9:32 ` [Intel-gfx] [PATCH i-g-t 4/6] i915/gem_close: Adapt to allow duplicate handles Chris Wilson 2020-07-10 9:32 ` [igt-dev] " Chris Wilson 2020-07-10 14:11 ` [Intel-gfx] " Tvrtko Ursulin 2020-07-10 14:11 ` Tvrtko Ursulin 2020-07-10 9:32 ` [Intel-gfx] [PATCH i-g-t 5/6] i915/gem_exec_schedule: Try to spot unfairness Chris Wilson 2020-07-10 9:32 ` [igt-dev] " Chris Wilson 2020-07-10 9:32 ` [Intel-gfx] [PATCH i-g-t 6/6] i915/gem_softpin: Active rebinds Chris Wilson 2020-07-10 9:32 ` [igt-dev] " Chris Wilson 2020-07-10 10:17 ` [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/6] lib/i915: Report unknown device as the future Patchwork 2020-07-10 12:06 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.