* [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
@ 2020-07-16 20:44 ` Chris Wilson
0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-07-16 20:44 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev, Chris Wilson
This is an attempt to chase down some preempt-to-busy races with
breadcrumb signaling on the virtual engines. By using more semaphore
spinners than available engines, we encourage very short timeslices, and
we make each batch of random duration to try and coincide the end of a
batch with the context being scheduled out.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
tests/i915/gem_exec_balancer.c | 109 +++++++++++++++++++++++++++++++++
1 file changed, 109 insertions(+)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index c5c0055fc..e4d9e0464 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -2240,6 +2240,112 @@ static void hog(int i915)
gem_quiescent_gpu(i915);
}
+static uint32_t sema_create(int i915, uint64_t addr, uint32_t **x)
+{
+ uint32_t handle = gem_create(i915, 4096);
+
+ *x = gem_mmap__device_coherent(i915, handle, 0, 4096, PROT_WRITE);
+ for (int n = 1; n <= 32; n++) {
+ uint32_t *cs = *x + n * 16;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD |
+ (4 - 2);
+ *cs++ = n;
+ *cs++ = addr;
+ *cs++ = addr >> 32;
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ }
+
+ return handle;
+}
+
+static uint32_t *sema(int i915, uint32_t ctx)
+{
+ uint32_t *ctl;
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = sema_create(i915, 64 << 20, &ctl),
+ .offset = 64 << 20,
+ .flags = EXEC_OBJECT_PINNED
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .rsvd1 = gem_context_clone_with_engines(i915, ctx),
+ };
+ int64_t poll = 1;
+
+ for (int n = 1; n <= 32; n++) {
+ execbuf.batch_start_offset = 64 * n,
+ gem_execbuf(i915, &execbuf);
+ /* Force a breadcrumb to be installed on each request */
+ gem_wait(i915, batch.handle, &poll);
+ }
+
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ igt_assert(gem_bo_busy(i915, batch.handle));
+ gem_close(i915, batch.handle);
+
+ return ctl;
+}
+
+static void __waits(int i915, int timeout, uint32_t ctx, unsigned int count)
+{
+ uint32_t *semaphores[count + 1];
+
+ for (int i = 0; i <= count; i++)
+ semaphores[i] = sema(i915, ctx);
+
+ igt_until_timeout(timeout) {
+ int i = rand() % (count + 1);
+
+ if ((*semaphores[i] += rand() % 32) >= 32) {
+ munmap(semaphores[i], 4096);
+ semaphores[i] = sema(i915, ctx);
+ }
+ }
+
+ for (int i = 0; i <= count; i++) {
+ *semaphores[i] = 0xffffffff;
+ munmap(semaphores[i], 4096);
+ }
+}
+
+static void waits(int i915, int timeout)
+{
+ igt_require(gem_scheduler_has_preemption(i915));
+ igt_require(gem_scheduler_has_semaphores(i915));
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ unsigned int count;
+ uint32_t ctx;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ if (count < 2) {
+ free(ci);
+ continue;
+ }
+
+ ctx = load_balancer_create(i915, ci, count);
+
+ __waits(i915, timeout, ctx, count);
+
+ gem_context_destroy(i915, ctx);
+ igt_waitchildren();
+
+ free(ci);
+ }
+
+ gem_quiescent_gpu(i915);
+}
+
static void nop(int i915)
{
struct drm_i915_gem_exec_object2 batch = {
@@ -2729,6 +2835,9 @@ igt_main
igt_subtest("hog")
hog(i915);
+ igt_subtest("waits")
+ waits(i915, 5);
+
igt_subtest("smoke")
smoketest(i915, 20);
--
2.28.0.rc0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [igt-dev] [PATCH i-g-t] i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
@ 2020-07-16 20:44 ` Chris Wilson
0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-07-16 20:44 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev, Tvrtko Ursulin, Chris Wilson
This is an attempt to chase down some preempt-to-busy races with
breadcrumb signaling on the virtual engines. By using more semaphore
spinners than available engines, we encourage very short timeslices, and
we make each batch of random duration to try and coincide the end of a
batch with the context being scheduled out.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
tests/i915/gem_exec_balancer.c | 109 +++++++++++++++++++++++++++++++++
1 file changed, 109 insertions(+)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index c5c0055fc..e4d9e0464 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -2240,6 +2240,112 @@ static void hog(int i915)
gem_quiescent_gpu(i915);
}
+static uint32_t sema_create(int i915, uint64_t addr, uint32_t **x)
+{
+ uint32_t handle = gem_create(i915, 4096);
+
+ *x = gem_mmap__device_coherent(i915, handle, 0, 4096, PROT_WRITE);
+ for (int n = 1; n <= 32; n++) {
+ uint32_t *cs = *x + n * 16;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_GTE_SDD |
+ (4 - 2);
+ *cs++ = n;
+ *cs++ = addr;
+ *cs++ = addr >> 32;
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ }
+
+ return handle;
+}
+
+static uint32_t *sema(int i915, uint32_t ctx)
+{
+ uint32_t *ctl;
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = sema_create(i915, 64 << 20, &ctl),
+ .offset = 64 << 20,
+ .flags = EXEC_OBJECT_PINNED
+ };
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .rsvd1 = gem_context_clone_with_engines(i915, ctx),
+ };
+ int64_t poll = 1;
+
+ for (int n = 1; n <= 32; n++) {
+ execbuf.batch_start_offset = 64 * n,
+ gem_execbuf(i915, &execbuf);
+ /* Force a breadcrumb to be installed on each request */
+ gem_wait(i915, batch.handle, &poll);
+ }
+
+ gem_context_destroy(i915, execbuf.rsvd1);
+
+ igt_assert(gem_bo_busy(i915, batch.handle));
+ gem_close(i915, batch.handle);
+
+ return ctl;
+}
+
+static void __waits(int i915, int timeout, uint32_t ctx, unsigned int count)
+{
+ uint32_t *semaphores[count + 1];
+
+ for (int i = 0; i <= count; i++)
+ semaphores[i] = sema(i915, ctx);
+
+ igt_until_timeout(timeout) {
+ int i = rand() % (count + 1);
+
+ if ((*semaphores[i] += rand() % 32) >= 32) {
+ munmap(semaphores[i], 4096);
+ semaphores[i] = sema(i915, ctx);
+ }
+ }
+
+ for (int i = 0; i <= count; i++) {
+ *semaphores[i] = 0xffffffff;
+ munmap(semaphores[i], 4096);
+ }
+}
+
+static void waits(int i915, int timeout)
+{
+ igt_require(gem_scheduler_has_preemption(i915));
+ igt_require(gem_scheduler_has_semaphores(i915));
+
+ for (int class = 0; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ unsigned int count;
+ uint32_t ctx;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ if (count < 2) {
+ free(ci);
+ continue;
+ }
+
+ ctx = load_balancer_create(i915, ci, count);
+
+ __waits(i915, timeout, ctx, count);
+
+ gem_context_destroy(i915, ctx);
+ igt_waitchildren();
+
+ free(ci);
+ }
+
+ gem_quiescent_gpu(i915);
+}
+
static void nop(int i915)
{
struct drm_i915_gem_exec_object2 batch = {
@@ -2729,6 +2835,9 @@ igt_main
igt_subtest("hog")
hog(i915);
+ igt_subtest("waits")
+ waits(i915, 5);
+
igt_subtest("smoke")
smoketest(i915, 20);
--
2.28.0.rc0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [igt-dev] ✓ Fi.CI.BAT: success for i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
2020-07-16 20:44 ` [igt-dev] " Chris Wilson
(?)
@ 2020-07-16 21:48 ` Patchwork
-1 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-07-16 21:48 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
[-- Attachment #1.1: Type: text/plain, Size: 6650 bytes --]
== Series Details ==
Series: i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
URL : https://patchwork.freedesktop.org/series/79566/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_8758 -> IGTPW_4772
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/index.html
Known issues
------------
Here are the changes found in IGTPW_4772 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_flink_basic@bad-open:
- fi-tgl-y: [PASS][1] -> [DMESG-WARN][2] ([i915#402])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-tgl-y/igt@gem_flink_basic@bad-open.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-tgl-y/igt@gem_flink_basic@bad-open.html
* igt@i915_selftest@live@active:
- fi-apl-guc: [PASS][3] -> [DMESG-FAIL][4] ([i915#1635] / [i915#666])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-apl-guc/igt@i915_selftest@live@active.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-apl-guc/igt@i915_selftest@live@active.html
* igt@i915_selftest@live@gem_contexts:
- fi-tgl-u2: [PASS][5] -> [INCOMPLETE][6] ([i915#2045])
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-tgl-u2/igt@i915_selftest@live@gem_contexts.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-tgl-u2/igt@i915_selftest@live@gem_contexts.html
* igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic:
- fi-tgl-y: [PASS][7] -> [DMESG-WARN][8] ([i915#1982]) +1 similar issue
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-tgl-y/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-tgl-y/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html
* igt@kms_flip@basic-flip-vs-wf_vblank@c-edp1:
- fi-icl-u2: [PASS][9] -> [DMESG-WARN][10] ([i915#1982])
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-icl-u2/igt@kms_flip@basic-flip-vs-wf_vblank@c-edp1.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-icl-u2/igt@kms_flip@basic-flip-vs-wf_vblank@c-edp1.html
#### Possible fixes ####
* igt@i915_module_load@reload:
- {fi-tgl-dsi}: [DMESG-WARN][11] ([i915#1982]) -> [PASS][12]
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-tgl-dsi/igt@i915_module_load@reload.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-tgl-dsi/igt@i915_module_load@reload.html
* igt@i915_pm_rpm@module-reload:
- fi-skl-6700k2: [INCOMPLETE][13] ([i915#151]) -> [PASS][14]
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-skl-6700k2/igt@i915_pm_rpm@module-reload.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-skl-6700k2/igt@i915_pm_rpm@module-reload.html
* igt@i915_selftest@live@coherency:
- fi-gdg-551: [DMESG-FAIL][15] ([i915#1748]) -> [PASS][16]
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-gdg-551/igt@i915_selftest@live@coherency.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-gdg-551/igt@i915_selftest@live@coherency.html
* igt@vgem_basic@setversion:
- fi-tgl-y: [DMESG-WARN][17] ([i915#402]) -> [PASS][18] +1 similar issue
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-tgl-y/igt@vgem_basic@setversion.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-tgl-y/igt@vgem_basic@setversion.html
* igt@vgem_basic@unload:
- fi-blb-e6850: [INCOMPLETE][19] -> [PASS][20]
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-blb-e6850/igt@vgem_basic@unload.html
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-blb-e6850/igt@vgem_basic@unload.html
#### Warnings ####
* igt@i915_module_load@reload:
- fi-tgl-u2: [DMESG-WARN][21] ([i915#402]) -> [DMESG-WARN][22] ([i915#1982])
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-tgl-u2/igt@i915_module_load@reload.html
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-tgl-u2/igt@i915_module_load@reload.html
* igt@kms_force_connector_basic@force-edid:
- fi-kbl-x1275: [DMESG-WARN][23] ([i915#62] / [i915#92]) -> [DMESG-WARN][24] ([i915#62] / [i915#92] / [i915#95]) +2 similar issues
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-kbl-x1275/igt@kms_force_connector_basic@force-edid.html
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-kbl-x1275/igt@kms_force_connector_basic@force-edid.html
* igt@prime_vgem@basic-fence-flip:
- fi-kbl-x1275: [DMESG-WARN][25] ([i915#62] / [i915#92] / [i915#95]) -> [DMESG-WARN][26] ([i915#62] / [i915#92]) +1 similar issue
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/fi-kbl-x1275/igt@prime_vgem@basic-fence-flip.html
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/fi-kbl-x1275/igt@prime_vgem@basic-fence-flip.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[i915#151]: https://gitlab.freedesktop.org/drm/intel/issues/151
[i915#1635]: https://gitlab.freedesktop.org/drm/intel/issues/1635
[i915#1748]: https://gitlab.freedesktop.org/drm/intel/issues/1748
[i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
[i915#2045]: https://gitlab.freedesktop.org/drm/intel/issues/2045
[i915#402]: https://gitlab.freedesktop.org/drm/intel/issues/402
[i915#62]: https://gitlab.freedesktop.org/drm/intel/issues/62
[i915#666]: https://gitlab.freedesktop.org/drm/intel/issues/666
[i915#92]: https://gitlab.freedesktop.org/drm/intel/issues/92
[i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95
Participating hosts (45 -> 40)
------------------------------
Missing (5): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-byt-clapper
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_5738 -> IGTPW_4772
CI-20190529: 20190529
CI_DRM_8758: b6738761bde03de00a1c84c6a85f9379f53f585c @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_4772: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/index.html
IGT_5738: bc8b56fe177af34fbde7b96f1f66614a0014c6ef @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
== Testlist changes ==
+igt@gem_exec_balancer@waits
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/index.html
[-- Attachment #1.2: Type: text/html, Size: 8371 bytes --]
[-- Attachment #2: Type: text/plain, Size: 154 bytes --]
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
* [igt-dev] ✓ Fi.CI.IGT: success for i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
2020-07-16 20:44 ` [igt-dev] " Chris Wilson
(?)
(?)
@ 2020-07-17 0:56 ` Patchwork
-1 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-07-17 0:56 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
[-- Attachment #1.1: Type: text/plain, Size: 18146 bytes --]
== Series Details ==
Series: i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
URL : https://patchwork.freedesktop.org/series/79566/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_8758_full -> IGTPW_4772_full
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/index.html
New tests
---------
New tests have been introduced between CI_DRM_8758_full and IGTPW_4772_full:
### New IGT tests (1) ###
* igt@gem_exec_balancer@waits:
- Statuses : 5 pass(s) 2 skip(s)
- Exec time: [0.0, 6.56] s
Known issues
------------
Here are the changes found in IGTPW_4772_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@core_setmaster@master-drop-set-root:
- shard-iclb: [PASS][1] -> [DMESG-WARN][2] ([i915#1982]) +1 similar issue
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-iclb5/igt@core_setmaster@master-drop-set-root.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-iclb2/igt@core_setmaster@master-drop-set-root.html
* igt@gem_exec_create@madvise:
- shard-glk: [PASS][3] -> [DMESG-WARN][4] ([i915#118] / [i915#95])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-glk9/igt@gem_exec_create@madvise.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-glk5/igt@gem_exec_create@madvise.html
* igt@i915_module_load@reload-with-fault-injection:
- shard-tglb: [PASS][5] -> [DMESG-WARN][6] ([i915#402]) +2 similar issues
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-tglb6/igt@i915_module_load@reload-with-fault-injection.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-tglb7/igt@i915_module_load@reload-with-fault-injection.html
* igt@i915_selftest@live@execlists:
- shard-iclb: [PASS][7] -> [INCOMPLETE][8] ([i915#2089])
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-iclb1/igt@i915_selftest@live@execlists.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-iclb1/igt@i915_selftest@live@execlists.html
* igt@i915_selftest@mock@requests:
- shard-hsw: [PASS][9] -> [INCOMPLETE][10] ([i915#2110])
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-hsw7/igt@i915_selftest@mock@requests.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-hsw1/igt@i915_selftest@mock@requests.html
* igt@kms_big_fb@y-tiled-64bpp-rotate-0:
- shard-glk: [PASS][11] -> [DMESG-FAIL][12] ([i915#118] / [i915#95])
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-glk1/igt@kms_big_fb@y-tiled-64bpp-rotate-0.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-glk8/igt@kms_big_fb@y-tiled-64bpp-rotate-0.html
* igt@kms_cursor_crc@pipe-a-cursor-128x128-random:
- shard-kbl: [PASS][13] -> [FAIL][14] ([i915#54])
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-kbl4/igt@kms_cursor_crc@pipe-a-cursor-128x128-random.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-kbl3/igt@kms_cursor_crc@pipe-a-cursor-128x128-random.html
- shard-apl: [PASS][15] -> [FAIL][16] ([i915#1635] / [i915#54])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-apl6/igt@kms_cursor_crc@pipe-a-cursor-128x128-random.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-apl1/igt@kms_cursor_crc@pipe-a-cursor-128x128-random.html
* igt@kms_cursor_crc@pipe-a-cursor-suspend:
- shard-kbl: [PASS][17] -> [DMESG-WARN][18] ([i915#180]) +6 similar issues
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-kbl7/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-kbl4/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
* igt@kms_flip@2x-modeset-vs-vblank-race@ab-vga1-hdmi-a1:
- shard-hsw: [PASS][19] -> [DMESG-WARN][20] ([i915#1982])
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-hsw1/igt@kms_flip@2x-modeset-vs-vblank-race@ab-vga1-hdmi-a1.html
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-hsw6/igt@kms_flip@2x-modeset-vs-vblank-race@ab-vga1-hdmi-a1.html
* igt@kms_flip@modeset-vs-vblank-race-interruptible@b-hdmi-a1:
- shard-glk: [PASS][21] -> [FAIL][22] ([i915#407])
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-glk1/igt@kms_flip@modeset-vs-vblank-race-interruptible@b-hdmi-a1.html
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-glk5/igt@kms_flip@modeset-vs-vblank-race-interruptible@b-hdmi-a1.html
* igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt:
- shard-tglb: [PASS][23] -> [DMESG-WARN][24] ([i915#1982]) +1 similar issue
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-tglb8/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt.html
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-tglb7/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt.html
* igt@kms_psr@psr2_sprite_render:
- shard-iclb: [PASS][25] -> [SKIP][26] ([fdo#109441])
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-iclb2/igt@kms_psr@psr2_sprite_render.html
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-iclb8/igt@kms_psr@psr2_sprite_render.html
* igt@perf@create-destroy-userspace-config:
- shard-glk: [PASS][27] -> [SKIP][28] ([fdo#109271])
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-glk3/igt@perf@create-destroy-userspace-config.html
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-glk2/igt@perf@create-destroy-userspace-config.html
#### Possible fixes ####
* igt@dumb_buffer@map-invalid-size:
- shard-snb: [TIMEOUT][29] ([i915#1958] / [i915#2119]) -> [PASS][30] +1 similar issue
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-snb5/igt@dumb_buffer@map-invalid-size.html
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-snb2/igt@dumb_buffer@map-invalid-size.html
- shard-hsw: [TIMEOUT][31] ([i915#1958] / [i915#2119]) -> [PASS][32] +2 similar issues
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-hsw6/igt@dumb_buffer@map-invalid-size.html
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-hsw2/igt@dumb_buffer@map-invalid-size.html
* igt@gem_exec_params@invalid-bsd1-flag-on-blt:
- shard-tglb: [DMESG-WARN][33] ([i915#402]) -> [PASS][34] +1 similar issue
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-tglb8/igt@gem_exec_params@invalid-bsd1-flag-on-blt.html
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-tglb7/igt@gem_exec_params@invalid-bsd1-flag-on-blt.html
* igt@gem_exec_reloc@basic-concurrent0:
- shard-glk: [FAIL][35] ([i915#1930]) -> [PASS][36]
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-glk2/igt@gem_exec_reloc@basic-concurrent0.html
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-glk2/igt@gem_exec_reloc@basic-concurrent0.html
* igt@gem_exec_whisper@basic-queues-all:
- shard-glk: [DMESG-WARN][37] ([i915#118] / [i915#95]) -> [PASS][38] +1 similar issue
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-glk7/igt@gem_exec_whisper@basic-queues-all.html
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-glk3/igt@gem_exec_whisper@basic-queues-all.html
* igt@gem_userptr_blits@invalid-mmap-offset-unsync@gtt:
- shard-tglb: [INCOMPLETE][39] ([i915#2119] / [i915#2149]) -> [PASS][40]
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-tglb7/igt@gem_userptr_blits@invalid-mmap-offset-unsync@gtt.html
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-tglb7/igt@gem_userptr_blits@invalid-mmap-offset-unsync@gtt.html
* igt@kms_big_fb@x-tiled-64bpp-rotate-0:
- shard-glk: [DMESG-FAIL][41] ([i915#118] / [i915#95]) -> [PASS][42] +1 similar issue
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-glk8/igt@kms_big_fb@x-tiled-64bpp-rotate-0.html
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-glk1/igt@kms_big_fb@x-tiled-64bpp-rotate-0.html
* igt@kms_cursor_legacy@short-flip-after-cursor-atomic-transitions:
- shard-kbl: [DMESG-WARN][43] ([i915#1982]) -> [PASS][44]
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-kbl4/igt@kms_cursor_legacy@short-flip-after-cursor-atomic-transitions.html
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-kbl1/igt@kms_cursor_legacy@short-flip-after-cursor-atomic-transitions.html
* igt@kms_draw_crc@draw-method-xrgb2101010-mmap-wc-untiled:
- shard-apl: [DMESG-WARN][45] ([i915#1635] / [i915#1982]) -> [PASS][46]
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-apl8/igt@kms_draw_crc@draw-method-xrgb2101010-mmap-wc-untiled.html
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-apl8/igt@kms_draw_crc@draw-method-xrgb2101010-mmap-wc-untiled.html
* igt@kms_frontbuffer_tracking@psr-slowdraw:
- shard-tglb: [DMESG-WARN][47] ([i915#1982]) -> [PASS][48] +1 similar issue
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-tglb5/igt@kms_frontbuffer_tracking@psr-slowdraw.html
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-tglb3/igt@kms_frontbuffer_tracking@psr-slowdraw.html
* igt@kms_psr@psr2_sprite_plane_move:
- shard-iclb: [SKIP][49] ([fdo#109441]) -> [PASS][50] +4 similar issues
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-iclb4/igt@kms_psr@psr2_sprite_plane_move.html
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-iclb2/igt@kms_psr@psr2_sprite_plane_move.html
* igt@kms_vblank@pipe-a-ts-continuation-suspend:
- shard-kbl: [DMESG-WARN][51] ([i915#180]) -> [PASS][52] +5 similar issues
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-kbl1/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-kbl3/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
* igt@perf_pmu@busy-idle@rcs0:
- shard-snb: [FAIL][53] ([i915#1958]) -> [PASS][54] +1 similar issue
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-snb5/igt@perf_pmu@busy-idle@rcs0.html
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-snb6/igt@perf_pmu@busy-idle@rcs0.html
* igt@perf_pmu@busy-idle@vcs0:
- shard-hsw: [FAIL][55] ([i915#1958]) -> [PASS][56] +2 similar issues
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-hsw6/igt@perf_pmu@busy-idle@vcs0.html
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-hsw6/igt@perf_pmu@busy-idle@vcs0.html
- shard-snb: [INCOMPLETE][57] ([i915#2119] / [i915#82]) -> [PASS][58]
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-snb5/igt@perf_pmu@busy-idle@vcs0.html
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-snb6/igt@perf_pmu@busy-idle@vcs0.html
* igt@perf_pmu@busy-idle@vecs0:
- shard-hsw: [DMESG-FAIL][59] ([i915#2119]) -> [PASS][60]
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-hsw6/igt@perf_pmu@busy-idle@vecs0.html
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-hsw6/igt@perf_pmu@busy-idle@vecs0.html
#### Warnings ####
* igt@kms_big_fb@linear-32bpp-rotate-270:
- shard-snb: [SKIP][61] ([fdo#109271]) -> [TIMEOUT][62] ([i915#1958] / [i915#2119]) +1 similar issue
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-snb6/igt@kms_big_fb@linear-32bpp-rotate-270.html
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-snb6/igt@kms_big_fb@linear-32bpp-rotate-270.html
* igt@kms_ccs@pipe-c-ccs-on-another-bo:
- shard-hsw: [TIMEOUT][63] ([i915#1958] / [i915#2119]) -> [SKIP][64] ([fdo#109271])
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-hsw6/igt@kms_ccs@pipe-c-ccs-on-another-bo.html
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-hsw1/igt@kms_ccs@pipe-c-ccs-on-another-bo.html
- shard-snb: [TIMEOUT][65] ([i915#1958] / [i915#2119]) -> [SKIP][66] ([fdo#109271]) +1 similar issue
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-snb5/igt@kms_ccs@pipe-c-ccs-on-another-bo.html
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-snb2/igt@kms_ccs@pipe-c-ccs-on-another-bo.html
* igt@kms_dp_dsc@basic-dsc-enable-edp:
- shard-iclb: [DMESG-WARN][67] ([i915#1226]) -> [SKIP][68] ([fdo#109349])
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-iclb8/igt@kms_dp_dsc@basic-dsc-enable-edp.html
* igt@kms_psr@psr2_dpms:
- shard-snb: [SKIP][69] ([fdo#109271]) -> [INCOMPLETE][70] ([i915#82])
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-snb1/igt@kms_psr@psr2_dpms.html
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-snb6/igt@kms_psr@psr2_dpms.html
* igt@perf@polling-parameterized:
- shard-hsw: [FAIL][71] ([i915#1542]) -> [INCOMPLETE][72] ([i915#1958])
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-hsw2/igt@perf@polling-parameterized.html
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-hsw6/igt@perf@polling-parameterized.html
* igt@runner@aborted:
- shard-hsw: [FAIL][73] ([i915#2110]) -> [FAIL][74] ([i915#1436] / [i915#2110])
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-hsw1/igt@runner@aborted.html
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-hsw1/igt@runner@aborted.html
- shard-iclb: ([FAIL][75], [FAIL][76]) ([i915#2110]) -> ([FAIL][77], [FAIL][78]) ([i915#1580] / [i915#2110])
[75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-iclb4/igt@runner@aborted.html
[76]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-iclb2/igt@runner@aborted.html
[77]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-iclb1/igt@runner@aborted.html
[78]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-iclb5/igt@runner@aborted.html
- shard-apl: ([FAIL][79], [FAIL][80], [FAIL][81]) ([i915#1610] / [i915#1635] / [i915#2110] / [i915#637]) -> [FAIL][82] ([i915#1635] / [i915#2110])
[79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-apl1/igt@runner@aborted.html
[80]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-apl8/igt@runner@aborted.html
[81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-apl8/igt@runner@aborted.html
[82]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-apl2/igt@runner@aborted.html
- shard-tglb: ([FAIL][83], [FAIL][84], [FAIL][85]) ([i915#2110]) -> [FAIL][86] ([i915#1764] / [i915#2110])
[83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-tglb5/igt@runner@aborted.html
[84]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-tglb7/igt@runner@aborted.html
[85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8758/shard-tglb1/igt@runner@aborted.html
[86]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/shard-tglb3/igt@runner@aborted.html
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109349]: https://bugs.freedesktop.org/show_bug.cgi?id=109349
[fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
[i915#118]: https://gitlab.freedesktop.org/drm/intel/issues/118
[i915#1226]: https://gitlab.freedesktop.org/drm/intel/issues/1226
[i915#1436]: https://gitlab.freedesktop.org/drm/intel/issues/1436
[i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
[i915#1580]: https://gitlab.freedesktop.org/drm/intel/issues/1580
[i915#1610]: https://gitlab.freedesktop.org/drm/intel/issues/1610
[i915#1635]: https://gitlab.freedesktop.org/drm/intel/issues/1635
[i915#1764]: https://gitlab.freedesktop.org/drm/intel/issues/1764
[i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
[i915#1930]: https://gitlab.freedesktop.org/drm/intel/issues/1930
[i915#1958]: https://gitlab.freedesktop.org/drm/intel/issues/1958
[i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
[i915#2089]: https://gitlab.freedesktop.org/drm/intel/issues/2089
[i915#2110]: https://gitlab.freedesktop.org/drm/intel/issues/2110
[i915#2119]: https://gitlab.freedesktop.org/drm/intel/issues/2119
[i915#2149]: https://gitlab.freedesktop.org/drm/intel/issues/2149
[i915#402]: https://gitlab.freedesktop.org/drm/intel/issues/402
[i915#407]: https://gitlab.freedesktop.org/drm/intel/issues/407
[i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
[i915#637]: https://gitlab.freedesktop.org/drm/intel/issues/637
[i915#82]: https://gitlab.freedesktop.org/drm/intel/issues/82
[i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95
Participating hosts (10 -> 8)
------------------------------
Missing (2): pig-skl-6260u pig-glk-j5005
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_5738 -> IGTPW_4772
* Piglit: piglit_4509 -> None
CI-20190529: 20190529
CI_DRM_8758: b6738761bde03de00a1c84c6a85f9379f53f585c @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_4772: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/index.html
IGT_5738: bc8b56fe177af34fbde7b96f1f66614a0014c6ef @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_4772/index.html
[-- Attachment #1.2: Type: text/html, Size: 22676 bytes --]
[-- Attachment #2: Type: text/plain, Size: 154 bytes --]
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
2020-07-16 20:44 ` [igt-dev] " Chris Wilson
@ 2020-07-17 8:34 ` Tvrtko Ursulin
-1 siblings, 0 replies; 8+ messages in thread
From: Tvrtko Ursulin @ 2020-07-17 8:34 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 16/07/2020 21:44, Chris Wilson wrote:
> This is an attempt to chase down some preempt-to-busy races with
> breadcrumb signaling on the virtual engines. By using more semaphore
> spinners than available engines, we encourage very short timeslices, and
> we make each batch of random duration to try and coincide the end of a
> batch with the context being scheduled out.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
> tests/i915/gem_exec_balancer.c | 109 +++++++++++++++++++++++++++++++++
> 1 file changed, 109 insertions(+)
>
> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> index c5c0055fc..e4d9e0464 100644
> --- a/tests/i915/gem_exec_balancer.c
> +++ b/tests/i915/gem_exec_balancer.c
> @@ -2240,6 +2240,112 @@ static void hog(int i915)
> gem_quiescent_gpu(i915);
> }
>
> +static uint32_t sema_create(int i915, uint64_t addr, uint32_t **x)
> +{
> + uint32_t handle = gem_create(i915, 4096);
> +
> + *x = gem_mmap__device_coherent(i915, handle, 0, 4096, PROT_WRITE);
> + for (int n = 1; n <= 32; n++) {
> + uint32_t *cs = *x + n * 16;
Okay so semaphore target is in the batch itself, that's why first 16
dwords are nops for convenience.
> +
> + *cs++ = MI_SEMAPHORE_WAIT |
> + MI_SEMAPHORE_POLL |
> + MI_SEMAPHORE_SAD_GTE_SDD |
> + (4 - 2);
> + *cs++ = n;
> + *cs++ = addr;
> + *cs++ = addr >> 32;
> +
> + *cs++ = MI_BATCH_BUFFER_END;
> + }
> +
> + return handle;
> +}
> +
> +static uint32_t *sema(int i915, uint32_t ctx)
> +{
> + uint32_t *ctl;
> + struct drm_i915_gem_exec_object2 batch = {
> + .handle = sema_create(i915, 64 << 20, &ctl),
> + .offset = 64 << 20,
> + .flags = EXEC_OBJECT_PINNED
> + };
> + struct drm_i915_gem_execbuffer2 execbuf = {
> + .buffers_ptr = to_user_pointer(&batch),
> + .buffer_count = 1,
> + .rsvd1 = gem_context_clone_with_engines(i915, ctx),
> + };
> + int64_t poll = 1;
> +
> + for (int n = 1; n <= 32; n++) {
> + execbuf.batch_start_offset = 64 * n,
> + gem_execbuf(i915, &execbuf);
> + /* Force a breadcrumb to be installed on each request */
> + gem_wait(i915, batch.handle, &poll);
> + }
> +
> + gem_context_destroy(i915, execbuf.rsvd1);
> +
> + igt_assert(gem_bo_busy(i915, batch.handle));
> + gem_close(i915, batch.handle);
> +
> + return ctl;
> +}
> +
> +static void __waits(int i915, int timeout, uint32_t ctx, unsigned int count)
> +{
> + uint32_t *semaphores[count + 1];
> +
> + for (int i = 0; i <= count; i++)
> + semaphores[i] = sema(i915, ctx);
> +
> + igt_until_timeout(timeout) {
> + int i = rand() % (count + 1);
> +
> + if ((*semaphores[i] += rand() % 32) >= 32) {
Write releases some batch buffers, until it knows it released all of
them when it creates a new set.
> + munmap(semaphores[i], 4096);
> + semaphores[i] = sema(i915, ctx);
> + }
> + }
> +
> + for (int i = 0; i <= count; i++) {
> + *semaphores[i] = 0xffffffff;
> + munmap(semaphores[i], 4096);
> + }
> +}
> +
> +static void waits(int i915, int timeout)
> +{
> + igt_require(gem_scheduler_has_preemption(i915));
> + igt_require(gem_scheduler_has_semaphores(i915));
> +
> + for (int class = 0; class < 32; class++) {
> + struct i915_engine_class_instance *ci;
> + unsigned int count;
> + uint32_t ctx;
> +
> + ci = list_engines(i915, 1u << class, &count);
> + if (!ci)
> + continue;
> +
> + if (count < 2) {
> + free(ci);
> + continue;
> + }
> +
> + ctx = load_balancer_create(i915, ci, count);
> +
> + __waits(i915, timeout, ctx, count);
> +
> + gem_context_destroy(i915, ctx);
> + igt_waitchildren();
Don't see any forking in the test.
> +
> + free(ci);
> + }
> +
> + gem_quiescent_gpu(i915);
> +}
> +
> static void nop(int i915)
> {
> struct drm_i915_gem_exec_object2 batch = {
> @@ -2729,6 +2835,9 @@ igt_main
> igt_subtest("hog")
> hog(i915);
>
> + igt_subtest("waits")
> + waits(i915, 5);
> +
> igt_subtest("smoke")
> smoketest(i915, 20);
>
>
Looks okay in principle.
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
I am not sure if the batch duration is not too short in practice, the
add loop will really rapidly end all, just needs 64 iterations on
average to end all 32 I think. So 64 WC writes from the CPU compared to
CSB processing and breadcrumb signaling latencies might be too short.
Maybe some small random udelays in the loop would be more realistic.
Maybe as a 2nd flavour of the test just in case.. more coverage the better.
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
@ 2020-07-17 8:34 ` Tvrtko Ursulin
0 siblings, 0 replies; 8+ messages in thread
From: Tvrtko Ursulin @ 2020-07-17 8:34 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 16/07/2020 21:44, Chris Wilson wrote:
> This is an attempt to chase down some preempt-to-busy races with
> breadcrumb signaling on the virtual engines. By using more semaphore
> spinners than available engines, we encourage very short timeslices, and
> we make each batch of random duration to try and coincide the end of a
> batch with the context being scheduled out.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
> tests/i915/gem_exec_balancer.c | 109 +++++++++++++++++++++++++++++++++
> 1 file changed, 109 insertions(+)
>
> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> index c5c0055fc..e4d9e0464 100644
> --- a/tests/i915/gem_exec_balancer.c
> +++ b/tests/i915/gem_exec_balancer.c
> @@ -2240,6 +2240,112 @@ static void hog(int i915)
> gem_quiescent_gpu(i915);
> }
>
> +static uint32_t sema_create(int i915, uint64_t addr, uint32_t **x)
> +{
> + uint32_t handle = gem_create(i915, 4096);
> +
> + *x = gem_mmap__device_coherent(i915, handle, 0, 4096, PROT_WRITE);
> + for (int n = 1; n <= 32; n++) {
> + uint32_t *cs = *x + n * 16;
Okay so semaphore target is in the batch itself, that's why first 16
dwords are nops for convenience.
> +
> + *cs++ = MI_SEMAPHORE_WAIT |
> + MI_SEMAPHORE_POLL |
> + MI_SEMAPHORE_SAD_GTE_SDD |
> + (4 - 2);
> + *cs++ = n;
> + *cs++ = addr;
> + *cs++ = addr >> 32;
> +
> + *cs++ = MI_BATCH_BUFFER_END;
> + }
> +
> + return handle;
> +}
> +
> +static uint32_t *sema(int i915, uint32_t ctx)
> +{
> + uint32_t *ctl;
> + struct drm_i915_gem_exec_object2 batch = {
> + .handle = sema_create(i915, 64 << 20, &ctl),
> + .offset = 64 << 20,
> + .flags = EXEC_OBJECT_PINNED
> + };
> + struct drm_i915_gem_execbuffer2 execbuf = {
> + .buffers_ptr = to_user_pointer(&batch),
> + .buffer_count = 1,
> + .rsvd1 = gem_context_clone_with_engines(i915, ctx),
> + };
> + int64_t poll = 1;
> +
> + for (int n = 1; n <= 32; n++) {
> + execbuf.batch_start_offset = 64 * n,
> + gem_execbuf(i915, &execbuf);
> + /* Force a breadcrumb to be installed on each request */
> + gem_wait(i915, batch.handle, &poll);
> + }
> +
> + gem_context_destroy(i915, execbuf.rsvd1);
> +
> + igt_assert(gem_bo_busy(i915, batch.handle));
> + gem_close(i915, batch.handle);
> +
> + return ctl;
> +}
> +
> +static void __waits(int i915, int timeout, uint32_t ctx, unsigned int count)
> +{
> + uint32_t *semaphores[count + 1];
> +
> + for (int i = 0; i <= count; i++)
> + semaphores[i] = sema(i915, ctx);
> +
> + igt_until_timeout(timeout) {
> + int i = rand() % (count + 1);
> +
> + if ((*semaphores[i] += rand() % 32) >= 32) {
Write releases some batch buffers, until it knows it released all of
them when it creates a new set.
> + munmap(semaphores[i], 4096);
> + semaphores[i] = sema(i915, ctx);
> + }
> + }
> +
> + for (int i = 0; i <= count; i++) {
> + *semaphores[i] = 0xffffffff;
> + munmap(semaphores[i], 4096);
> + }
> +}
> +
> +static void waits(int i915, int timeout)
> +{
> + igt_require(gem_scheduler_has_preemption(i915));
> + igt_require(gem_scheduler_has_semaphores(i915));
> +
> + for (int class = 0; class < 32; class++) {
> + struct i915_engine_class_instance *ci;
> + unsigned int count;
> + uint32_t ctx;
> +
> + ci = list_engines(i915, 1u << class, &count);
> + if (!ci)
> + continue;
> +
> + if (count < 2) {
> + free(ci);
> + continue;
> + }
> +
> + ctx = load_balancer_create(i915, ci, count);
> +
> + __waits(i915, timeout, ctx, count);
> +
> + gem_context_destroy(i915, ctx);
> + igt_waitchildren();
Don't see any forking in the test.
> +
> + free(ci);
> + }
> +
> + gem_quiescent_gpu(i915);
> +}
> +
> static void nop(int i915)
> {
> struct drm_i915_gem_exec_object2 batch = {
> @@ -2729,6 +2835,9 @@ igt_main
> igt_subtest("hog")
> hog(i915);
>
> + igt_subtest("waits")
> + waits(i915, 5);
> +
> igt_subtest("smoke")
> smoketest(i915, 20);
>
>
Looks okay in principle.
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
I am not sure if the batch duration is not too short in practice, the
add loop will really rapidly end all, just needs 64 iterations on
average to end all 32 I think. So 64 WC writes from the CPU compared to
CSB processing and breadcrumb signaling latencies might be too short.
Maybe some small random udelays in the loop would be more realistic.
Maybe as a 2nd flavour of the test just in case.. more coverage the better.
Regards,
Tvrtko
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
2020-07-17 8:34 ` [igt-dev] " Tvrtko Ursulin
@ 2020-07-17 10:19 ` Chris Wilson
-1 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-07-17 10:19 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Tvrtko Ursulin (2020-07-17 09:34:07)
>
> On 16/07/2020 21:44, Chris Wilson wrote:
> I am not sure if the batch duration is not too short in practice, the
> add loop will really rapidly end all, just needs 64 iterations on
> average to end all 32 I think. So 64 WC writes from the CPU compared to
> CSB processing and breadcrumb signaling latencies might be too short.
> Maybe some small random udelays in the loop would be more realistic.
> Maybe as a 2nd flavour of the test just in case.. more coverage the better.
GPU kernel IGT
semaphore wait
-> raise interrupt
handle interrupt
-> kick tasklet
begin preempt-to-busy semaphore signal
semaphore completes
request completes
submit new ELSP[]
-> stale unwound request
Duration of the batch/semaphore itself doesn't really factor into it,
it's that we have to let batch complete after we begin the process of
scheduling it out for an expired timeslice. It's such a small window and
I don't see a good way of hitting it reliably from userspace.
With some printk, I was able to confirm that we were timeslicing virtual
requests and moving them between engines with active breadcrumbs. But I
never once saw any of the bugs with the stale requests, using this test.
Somehow we want to length the preempt-to-busy window and coincide the
request completion at the same time. So far all I have is yucky (too
single purpose, we would be better off writing unit tests for each of
the steps involved).
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing
@ 2020-07-17 10:19 ` Chris Wilson
0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-07-17 10:19 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Tvrtko Ursulin (2020-07-17 09:34:07)
>
> On 16/07/2020 21:44, Chris Wilson wrote:
> I am not sure if the batch duration is not too short in practice, the
> add loop will really rapidly end all, just needs 64 iterations on
> average to end all 32 I think. So 64 WC writes from the CPU compared to
> CSB processing and breadcrumb signaling latencies might be too short.
> Maybe some small random udelays in the loop would be more realistic.
> Maybe as a 2nd flavour of the test just in case.. more coverage the better.
GPU kernel IGT
semaphore wait
-> raise interrupt
handle interrupt
-> kick tasklet
begin preempt-to-busy semaphore signal
semaphore completes
request completes
submit new ELSP[]
-> stale unwound request
Duration of the batch/semaphore itself doesn't really factor into it,
it's that we have to let batch complete after we begin the process of
scheduling it out for an expired timeslice. It's such a small window and
I don't see a good way of hitting it reliably from userspace.
With some printk, I was able to confirm that we were timeslicing virtual
requests and moving them between engines with active breadcrumbs. But I
never once saw any of the bugs with the stale requests, using this test.
Somehow we want to length the preempt-to-busy window and coincide the
request completion at the same time. So far all I have is yucky (too
single purpose, we would be better off writing unit tests for each of
the steps involved).
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 8+ messages in thread
end of thread, other threads:[~2020-07-17 10:19 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-16 20:44 [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Race breadcrumb signaling against timeslicing Chris Wilson
2020-07-16 20:44 ` [igt-dev] " Chris Wilson
2020-07-16 21:48 ` [igt-dev] ✓ Fi.CI.BAT: success for " Patchwork
2020-07-17 0:56 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork
2020-07-17 8:34 ` [Intel-gfx] [PATCH i-g-t] " Tvrtko Ursulin
2020-07-17 8:34 ` [igt-dev] " Tvrtko Ursulin
2020-07-17 10:19 ` Chris Wilson
2020-07-17 10:19 ` [igt-dev] " Chris Wilson
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.