* [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-14 19:15 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-14 19:15 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev
Although a virtual engine itself has no hang detection; that is on the
underlying physical engines, it does provide a unique means for clients
to try and break the system. Try and break it before they do.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
1 file changed, 105 insertions(+)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 70c4529b4..86028cfdd 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -24,6 +24,7 @@
#include <sched.h>
#include "igt.h"
+#include "igt_gt.h"
#include "igt_perf.h"
#include "i915/gem_ring.h"
#include "sw_sync.h"
@@ -1314,6 +1315,102 @@ static void semaphore(int i915)
gem_quiescent_gpu(i915);
}
+static void set_unbannable(int i915, uint32_t ctx)
+{
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_BANNABLE,
+ };
+
+ igt_assert_eq(__gem_context_set_param(i915, &p), 0);
+}
+
+static void hangme(int i915)
+{
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+
+ /*
+ * Fill the available engines with hanging virtual engines and verify
+ * that execution continues onto the second batch.
+ */
+
+ for (int class = 1; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ struct client {
+ igt_spin_t *spin[2];
+ } *client;
+ unsigned int count;
+ uint32_t bg;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ if (count < 2) {
+ free(ci);
+ continue;
+ }
+
+ client = malloc(sizeof(*client) * count);
+ igt_assert(client);
+
+ for (int i = 0; i < count; i++) {
+ uint32_t ctx = gem_context_create(i915);
+ struct client *c = &client[i];
+ unsigned int flags;
+
+ set_unbannable(i915, ctx);
+ set_load_balancer(i915, ctx, ci, count, NULL);
+
+ flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
+ for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
+ c->spin[j] = igt_spin_new(i915, ctx,
+ .flags = flags);
+ flags = IGT_SPIN_FENCE_OUT;
+ }
+
+ gem_context_destroy(i915, ctx);
+ }
+
+ /* Apply some background context to speed up hang detection */
+ bg = gem_context_create(i915);
+ set_engines(i915, bg, ci, count);
+ for (int i = 0; i < count; i++) {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .flags = i,
+ .rsvd1 = bg,
+ };
+ gem_execbuf(i915, &execbuf);
+ }
+ gem_context_destroy(i915, bg);
+
+ for (int i = 0; i < count; i++) {
+ struct client *c = &client[i];
+
+ igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
+ gem_sync(i915, c->spin[0]->handle);
+ igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
+ -EIO);
+
+ igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
+ gem_sync(i915, c->spin[1]->handle);
+ igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
+ -EIO);
+
+ igt_spin_free(i915, c->spin[0]);
+ igt_spin_free(i915, c->spin[1]);
+ }
+ free(client);
+ }
+
+ gem_close(i915, batch.handle);
+ gem_quiescent_gpu(i915);
+}
+
static void smoketest(int i915, int timeout)
{
struct drm_i915_gem_exec_object2 batch[2] = {
@@ -1486,4 +1583,12 @@ igt_main
igt_fixture {
igt_stop_hang_detector();
}
+
+ igt_subtest("hang") {
+ igt_hang_t hang = igt_allow_hang(i915, 0, 0);
+
+ hangme(i915);
+
+ igt_disallow_hang(i915, hang);
+ }
}
--
2.24.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-14 19:15 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-14 19:15 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev
Although a virtual engine itself has no hang detection; that is on the
underlying physical engines, it does provide a unique means for clients
to try and break the system. Try and break it before they do.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
1 file changed, 105 insertions(+)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 70c4529b4..86028cfdd 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -24,6 +24,7 @@
#include <sched.h>
#include "igt.h"
+#include "igt_gt.h"
#include "igt_perf.h"
#include "i915/gem_ring.h"
#include "sw_sync.h"
@@ -1314,6 +1315,102 @@ static void semaphore(int i915)
gem_quiescent_gpu(i915);
}
+static void set_unbannable(int i915, uint32_t ctx)
+{
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_BANNABLE,
+ };
+
+ igt_assert_eq(__gem_context_set_param(i915, &p), 0);
+}
+
+static void hangme(int i915)
+{
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+
+ /*
+ * Fill the available engines with hanging virtual engines and verify
+ * that execution continues onto the second batch.
+ */
+
+ for (int class = 1; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ struct client {
+ igt_spin_t *spin[2];
+ } *client;
+ unsigned int count;
+ uint32_t bg;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ if (count < 2) {
+ free(ci);
+ continue;
+ }
+
+ client = malloc(sizeof(*client) * count);
+ igt_assert(client);
+
+ for (int i = 0; i < count; i++) {
+ uint32_t ctx = gem_context_create(i915);
+ struct client *c = &client[i];
+ unsigned int flags;
+
+ set_unbannable(i915, ctx);
+ set_load_balancer(i915, ctx, ci, count, NULL);
+
+ flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
+ for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
+ c->spin[j] = igt_spin_new(i915, ctx,
+ .flags = flags);
+ flags = IGT_SPIN_FENCE_OUT;
+ }
+
+ gem_context_destroy(i915, ctx);
+ }
+
+ /* Apply some background context to speed up hang detection */
+ bg = gem_context_create(i915);
+ set_engines(i915, bg, ci, count);
+ for (int i = 0; i < count; i++) {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .flags = i,
+ .rsvd1 = bg,
+ };
+ gem_execbuf(i915, &execbuf);
+ }
+ gem_context_destroy(i915, bg);
+
+ for (int i = 0; i < count; i++) {
+ struct client *c = &client[i];
+
+ igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
+ gem_sync(i915, c->spin[0]->handle);
+ igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
+ -EIO);
+
+ igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
+ gem_sync(i915, c->spin[1]->handle);
+ igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
+ -EIO);
+
+ igt_spin_free(i915, c->spin[0]);
+ igt_spin_free(i915, c->spin[1]);
+ }
+ free(client);
+ }
+
+ gem_close(i915, batch.handle);
+ gem_quiescent_gpu(i915);
+}
+
static void smoketest(int i915, int timeout)
{
struct drm_i915_gem_exec_object2 batch[2] = {
@@ -1486,4 +1583,12 @@ igt_main
igt_fixture {
igt_stop_hang_detector();
}
+
+ igt_subtest("hang") {
+ igt_hang_t hang = igt_allow_hang(i915, 0, 0);
+
+ hangme(i915);
+
+ igt_disallow_hang(i915, hang);
+ }
}
--
2.24.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [igt-dev] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-14 19:15 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-14 19:15 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev, Tvrtko Ursulin
Although a virtual engine itself has no hang detection; that is on the
underlying physical engines, it does provide a unique means for clients
to try and break the system. Try and break it before they do.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
1 file changed, 105 insertions(+)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 70c4529b4..86028cfdd 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -24,6 +24,7 @@
#include <sched.h>
#include "igt.h"
+#include "igt_gt.h"
#include "igt_perf.h"
#include "i915/gem_ring.h"
#include "sw_sync.h"
@@ -1314,6 +1315,102 @@ static void semaphore(int i915)
gem_quiescent_gpu(i915);
}
+static void set_unbannable(int i915, uint32_t ctx)
+{
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_BANNABLE,
+ };
+
+ igt_assert_eq(__gem_context_set_param(i915, &p), 0);
+}
+
+static void hangme(int i915)
+{
+ struct drm_i915_gem_exec_object2 batch = {
+ .handle = batch_create(i915),
+ };
+
+ /*
+ * Fill the available engines with hanging virtual engines and verify
+ * that execution continues onto the second batch.
+ */
+
+ for (int class = 1; class < 32; class++) {
+ struct i915_engine_class_instance *ci;
+ struct client {
+ igt_spin_t *spin[2];
+ } *client;
+ unsigned int count;
+ uint32_t bg;
+
+ ci = list_engines(i915, 1u << class, &count);
+ if (!ci)
+ continue;
+
+ if (count < 2) {
+ free(ci);
+ continue;
+ }
+
+ client = malloc(sizeof(*client) * count);
+ igt_assert(client);
+
+ for (int i = 0; i < count; i++) {
+ uint32_t ctx = gem_context_create(i915);
+ struct client *c = &client[i];
+ unsigned int flags;
+
+ set_unbannable(i915, ctx);
+ set_load_balancer(i915, ctx, ci, count, NULL);
+
+ flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
+ for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
+ c->spin[j] = igt_spin_new(i915, ctx,
+ .flags = flags);
+ flags = IGT_SPIN_FENCE_OUT;
+ }
+
+ gem_context_destroy(i915, ctx);
+ }
+
+ /* Apply some background context to speed up hang detection */
+ bg = gem_context_create(i915);
+ set_engines(i915, bg, ci, count);
+ for (int i = 0; i < count; i++) {
+ struct drm_i915_gem_execbuffer2 execbuf = {
+ .buffers_ptr = to_user_pointer(&batch),
+ .buffer_count = 1,
+ .flags = i,
+ .rsvd1 = bg,
+ };
+ gem_execbuf(i915, &execbuf);
+ }
+ gem_context_destroy(i915, bg);
+
+ for (int i = 0; i < count; i++) {
+ struct client *c = &client[i];
+
+ igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
+ gem_sync(i915, c->spin[0]->handle);
+ igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
+ -EIO);
+
+ igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
+ gem_sync(i915, c->spin[1]->handle);
+ igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
+ -EIO);
+
+ igt_spin_free(i915, c->spin[0]);
+ igt_spin_free(i915, c->spin[1]);
+ }
+ free(client);
+ }
+
+ gem_close(i915, batch.handle);
+ gem_quiescent_gpu(i915);
+}
+
static void smoketest(int i915, int timeout)
{
struct drm_i915_gem_exec_object2 batch[2] = {
@@ -1486,4 +1583,12 @@ igt_main
igt_fixture {
igt_stop_hang_detector();
}
+
+ igt_subtest("hang") {
+ igt_hang_t hang = igt_allow_hang(i915, 0, 0);
+
+ hangme(i915);
+
+ igt_disallow_hang(i915, hang);
+ }
}
--
2.24.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 23+ messages in thread
* [igt-dev] ✓ Fi.CI.BAT: success for i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
2019-11-14 19:15 ` [Intel-gfx] " Chris Wilson
(?)
(?)
@ 2019-11-14 20:07 ` Patchwork
-1 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-11-14 20:07 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
== Series Details ==
Series: i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
URL : https://patchwork.freedesktop.org/series/69490/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_7346 -> IGTPW_3707
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/index.html
Known issues
------------
Here are the changes found in IGTPW_3707 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@kms_setmode@basic-clone-single-crtc:
- fi-skl-6770hq: [PASS][1] -> [WARN][2] ([fdo#112252])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/fi-skl-6770hq/igt@kms_setmode@basic-clone-single-crtc.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/fi-skl-6770hq/igt@kms_setmode@basic-clone-single-crtc.html
#### Possible fixes ####
* igt@i915_pm_rpm@module-reload:
- fi-skl-lmem: [DMESG-WARN][3] ([fdo#112261]) -> [PASS][4]
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/fi-skl-lmem/igt@i915_pm_rpm@module-reload.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/fi-skl-lmem/igt@i915_pm_rpm@module-reload.html
* igt@i915_selftest@live_gem_contexts:
- fi-bsw-n3050: [INCOMPLETE][5] ([fdo# 111542]) -> [PASS][6]
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/fi-bsw-n3050/igt@i915_selftest@live_gem_contexts.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/fi-bsw-n3050/igt@i915_selftest@live_gem_contexts.html
* igt@i915_selftest@live_hangcheck:
- fi-hsw-4770r: [DMESG-FAIL][7] ([fdo#111991]) -> [PASS][8]
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/fi-hsw-4770r/igt@i915_selftest@live_hangcheck.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/fi-hsw-4770r/igt@i915_selftest@live_hangcheck.html
* igt@kms_busy@basic-flip-pipe-b:
- fi-skl-6770hq: [DMESG-WARN][9] ([fdo#105541]) -> [PASS][10]
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/fi-skl-6770hq/igt@kms_busy@basic-flip-pipe-b.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/fi-skl-6770hq/igt@kms_busy@basic-flip-pipe-b.html
* igt@kms_chamelium@hdmi-edid-read:
- fi-kbl-7500u: [FAIL][11] ([fdo#109483]) -> [PASS][12]
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/fi-kbl-7500u/igt@kms_chamelium@hdmi-edid-read.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/fi-kbl-7500u/igt@kms_chamelium@hdmi-edid-read.html
* igt@kms_chamelium@hdmi-hpd-fast:
- fi-kbl-7500u: [FAIL][13] ([fdo#111045] / [fdo#111096]) -> [PASS][14]
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[fdo# 111542]: https://bugs.freedesktop.org/show_bug.cgi?id= 111542
[fdo#105541]: https://bugs.freedesktop.org/show_bug.cgi?id=105541
[fdo#109483]: https://bugs.freedesktop.org/show_bug.cgi?id=109483
[fdo#109964]: https://bugs.freedesktop.org/show_bug.cgi?id=109964
[fdo#110343]: https://bugs.freedesktop.org/show_bug.cgi?id=110343
[fdo#111045]: https://bugs.freedesktop.org/show_bug.cgi?id=111045
[fdo#111096]: https://bugs.freedesktop.org/show_bug.cgi?id=111096
[fdo#111991]: https://bugs.freedesktop.org/show_bug.cgi?id=111991
[fdo#112252]: https://bugs.freedesktop.org/show_bug.cgi?id=112252
[fdo#112261]: https://bugs.freedesktop.org/show_bug.cgi?id=112261
Participating hosts (49 -> 45)
------------------------------
Missing (4): fi-byt-clapper fi-byt-squawks fi-bsw-cyan fi-hsw-4200u
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_5284 -> IGTPW_3707
CI-20190529: 20190529
CI_DRM_7346: 78fd2497a0a373d6a68c047d759851e0a895f112 @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_3707: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/index.html
IGT_5284: b28d801e4c010942d05821fada6dd9d2d67d6e4d @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
== Testlist changes ==
+igt@gem_exec_balancer@hang
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/index.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 23+ messages in thread
* [igt-dev] ✗ GitLab.Pipeline: warning for i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
2019-11-14 19:15 ` [Intel-gfx] " Chris Wilson
` (2 preceding siblings ...)
(?)
@ 2019-11-14 20:11 ` Patchwork
-1 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-11-14 20:11 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
== Series Details ==
Series: i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
URL : https://patchwork.freedesktop.org/series/69490/
State : warning
== Summary ==
Did not get list of undocumented tests for this run, something is wrong!
Other than that, pipeline status: FAILED.
see https://gitlab.freedesktop.org/gfx-ci/igt-ci-tags/pipelines/79196 for the overview.
== Logs ==
For more details see: https://gitlab.freedesktop.org/gfx-ci/igt-ci-tags/pipelines/79196
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 13:02 ` Tvrtko Ursulin
0 siblings, 0 replies; 23+ messages in thread
From: Tvrtko Ursulin @ 2019-11-15 13:02 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 14/11/2019 19:15, Chris Wilson wrote:
> Although a virtual engine itself has no hang detection; that is on the
> underlying physical engines, it does provide a unique means for clients
> to try and break the system. Try and break it before they do.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> 1 file changed, 105 insertions(+)
>
> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> index 70c4529b4..86028cfdd 100644
> --- a/tests/i915/gem_exec_balancer.c
> +++ b/tests/i915/gem_exec_balancer.c
> @@ -24,6 +24,7 @@
> #include <sched.h>
>
> #include "igt.h"
> +#include "igt_gt.h"
> #include "igt_perf.h"
> #include "i915/gem_ring.h"
> #include "sw_sync.h"
> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> gem_quiescent_gpu(i915);
> }
>
> +static void set_unbannable(int i915, uint32_t ctx)
> +{
> + struct drm_i915_gem_context_param p = {
> + .ctx_id = ctx,
> + .param = I915_CONTEXT_PARAM_BANNABLE,
> + };
> +
> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> +}
> +
> +static void hangme(int i915)
> +{
> + struct drm_i915_gem_exec_object2 batch = {
> + .handle = batch_create(i915),
> + };
> +
> + /*
> + * Fill the available engines with hanging virtual engines and verify
> + * that execution continues onto the second batch.
> + */
> +
> + for (int class = 1; class < 32; class++) {
> + struct i915_engine_class_instance *ci;
> + struct client {
> + igt_spin_t *spin[2];
> + } *client;
> + unsigned int count;
> + uint32_t bg;
> +
> + ci = list_engines(i915, 1u << class, &count);
> + if (!ci)
> + continue;
> +
> + if (count < 2) {
> + free(ci);
> + continue;
> + }
> +
> + client = malloc(sizeof(*client) * count);
> + igt_assert(client);
> +
> + for (int i = 0; i < count; i++) {
> + uint32_t ctx = gem_context_create(i915);
> + struct client *c = &client[i];
> + unsigned int flags;
> +
> + set_unbannable(i915, ctx);
> + set_load_balancer(i915, ctx, ci, count, NULL);
> +
> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> + c->spin[j] = igt_spin_new(i915, ctx,
> + .flags = flags);
> + flags = IGT_SPIN_FENCE_OUT;
> + }
> +
> + gem_context_destroy(i915, ctx);
> + }
> +
> + /* Apply some background context to speed up hang detection */
> + bg = gem_context_create(i915);
> + set_engines(i915, bg, ci, count);
> + for (int i = 0; i < count; i++) {
> + struct drm_i915_gem_execbuffer2 execbuf = {
> + .buffers_ptr = to_user_pointer(&batch),
> + .buffer_count = 1,
> + .flags = i,
> + .rsvd1 = bg,
> + };
> + gem_execbuf(i915, &execbuf);
> + }
> + gem_context_destroy(i915, bg);
> +
> + for (int i = 0; i < count; i++) {
> + struct client *c = &client[i];
> +
> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> + gem_sync(i915, c->spin[0]->handle);
> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> + -EIO);
> +
> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> + gem_sync(i915, c->spin[1]->handle);
> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> + -EIO);
> +
> + igt_spin_free(i915, c->spin[0]);
> + igt_spin_free(i915, c->spin[1]);
> + }
> + free(client);
> + }
> +
> + gem_close(i915, batch.handle);
> + gem_quiescent_gpu(i915);
> +}
> +
> static void smoketest(int i915, int timeout)
> {
> struct drm_i915_gem_exec_object2 batch[2] = {
> @@ -1486,4 +1583,12 @@ igt_main
> igt_fixture {
> igt_stop_hang_detector();
> }
> +
> + igt_subtest("hang") {
> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> +
> + hangme(i915);
> +
> + igt_disallow_hang(i915, hang);
> + }
> }
>
Looks good. But do we need some core helpers to figure out when preempt
timeout is compiled out?
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 13:02 ` Tvrtko Ursulin
0 siblings, 0 replies; 23+ messages in thread
From: Tvrtko Ursulin @ 2019-11-15 13:02 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 14/11/2019 19:15, Chris Wilson wrote:
> Although a virtual engine itself has no hang detection; that is on the
> underlying physical engines, it does provide a unique means for clients
> to try and break the system. Try and break it before they do.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> 1 file changed, 105 insertions(+)
>
> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> index 70c4529b4..86028cfdd 100644
> --- a/tests/i915/gem_exec_balancer.c
> +++ b/tests/i915/gem_exec_balancer.c
> @@ -24,6 +24,7 @@
> #include <sched.h>
>
> #include "igt.h"
> +#include "igt_gt.h"
> #include "igt_perf.h"
> #include "i915/gem_ring.h"
> #include "sw_sync.h"
> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> gem_quiescent_gpu(i915);
> }
>
> +static void set_unbannable(int i915, uint32_t ctx)
> +{
> + struct drm_i915_gem_context_param p = {
> + .ctx_id = ctx,
> + .param = I915_CONTEXT_PARAM_BANNABLE,
> + };
> +
> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> +}
> +
> +static void hangme(int i915)
> +{
> + struct drm_i915_gem_exec_object2 batch = {
> + .handle = batch_create(i915),
> + };
> +
> + /*
> + * Fill the available engines with hanging virtual engines and verify
> + * that execution continues onto the second batch.
> + */
> +
> + for (int class = 1; class < 32; class++) {
> + struct i915_engine_class_instance *ci;
> + struct client {
> + igt_spin_t *spin[2];
> + } *client;
> + unsigned int count;
> + uint32_t bg;
> +
> + ci = list_engines(i915, 1u << class, &count);
> + if (!ci)
> + continue;
> +
> + if (count < 2) {
> + free(ci);
> + continue;
> + }
> +
> + client = malloc(sizeof(*client) * count);
> + igt_assert(client);
> +
> + for (int i = 0; i < count; i++) {
> + uint32_t ctx = gem_context_create(i915);
> + struct client *c = &client[i];
> + unsigned int flags;
> +
> + set_unbannable(i915, ctx);
> + set_load_balancer(i915, ctx, ci, count, NULL);
> +
> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> + c->spin[j] = igt_spin_new(i915, ctx,
> + .flags = flags);
> + flags = IGT_SPIN_FENCE_OUT;
> + }
> +
> + gem_context_destroy(i915, ctx);
> + }
> +
> + /* Apply some background context to speed up hang detection */
> + bg = gem_context_create(i915);
> + set_engines(i915, bg, ci, count);
> + for (int i = 0; i < count; i++) {
> + struct drm_i915_gem_execbuffer2 execbuf = {
> + .buffers_ptr = to_user_pointer(&batch),
> + .buffer_count = 1,
> + .flags = i,
> + .rsvd1 = bg,
> + };
> + gem_execbuf(i915, &execbuf);
> + }
> + gem_context_destroy(i915, bg);
> +
> + for (int i = 0; i < count; i++) {
> + struct client *c = &client[i];
> +
> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> + gem_sync(i915, c->spin[0]->handle);
> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> + -EIO);
> +
> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> + gem_sync(i915, c->spin[1]->handle);
> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> + -EIO);
> +
> + igt_spin_free(i915, c->spin[0]);
> + igt_spin_free(i915, c->spin[1]);
> + }
> + free(client);
> + }
> +
> + gem_close(i915, batch.handle);
> + gem_quiescent_gpu(i915);
> +}
> +
> static void smoketest(int i915, int timeout)
> {
> struct drm_i915_gem_exec_object2 batch[2] = {
> @@ -1486,4 +1583,12 @@ igt_main
> igt_fixture {
> igt_stop_hang_detector();
> }
> +
> + igt_subtest("hang") {
> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> +
> + hangme(i915);
> +
> + igt_disallow_hang(i915, hang);
> + }
> }
>
Looks good. But do we need some core helpers to figure out when preempt
timeout is compiled out?
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 13:02 ` Tvrtko Ursulin
0 siblings, 0 replies; 23+ messages in thread
From: Tvrtko Ursulin @ 2019-11-15 13:02 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 14/11/2019 19:15, Chris Wilson wrote:
> Although a virtual engine itself has no hang detection; that is on the
> underlying physical engines, it does provide a unique means for clients
> to try and break the system. Try and break it before they do.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> 1 file changed, 105 insertions(+)
>
> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> index 70c4529b4..86028cfdd 100644
> --- a/tests/i915/gem_exec_balancer.c
> +++ b/tests/i915/gem_exec_balancer.c
> @@ -24,6 +24,7 @@
> #include <sched.h>
>
> #include "igt.h"
> +#include "igt_gt.h"
> #include "igt_perf.h"
> #include "i915/gem_ring.h"
> #include "sw_sync.h"
> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> gem_quiescent_gpu(i915);
> }
>
> +static void set_unbannable(int i915, uint32_t ctx)
> +{
> + struct drm_i915_gem_context_param p = {
> + .ctx_id = ctx,
> + .param = I915_CONTEXT_PARAM_BANNABLE,
> + };
> +
> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> +}
> +
> +static void hangme(int i915)
> +{
> + struct drm_i915_gem_exec_object2 batch = {
> + .handle = batch_create(i915),
> + };
> +
> + /*
> + * Fill the available engines with hanging virtual engines and verify
> + * that execution continues onto the second batch.
> + */
> +
> + for (int class = 1; class < 32; class++) {
> + struct i915_engine_class_instance *ci;
> + struct client {
> + igt_spin_t *spin[2];
> + } *client;
> + unsigned int count;
> + uint32_t bg;
> +
> + ci = list_engines(i915, 1u << class, &count);
> + if (!ci)
> + continue;
> +
> + if (count < 2) {
> + free(ci);
> + continue;
> + }
> +
> + client = malloc(sizeof(*client) * count);
> + igt_assert(client);
> +
> + for (int i = 0; i < count; i++) {
> + uint32_t ctx = gem_context_create(i915);
> + struct client *c = &client[i];
> + unsigned int flags;
> +
> + set_unbannable(i915, ctx);
> + set_load_balancer(i915, ctx, ci, count, NULL);
> +
> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> + c->spin[j] = igt_spin_new(i915, ctx,
> + .flags = flags);
> + flags = IGT_SPIN_FENCE_OUT;
> + }
> +
> + gem_context_destroy(i915, ctx);
> + }
> +
> + /* Apply some background context to speed up hang detection */
> + bg = gem_context_create(i915);
> + set_engines(i915, bg, ci, count);
> + for (int i = 0; i < count; i++) {
> + struct drm_i915_gem_execbuffer2 execbuf = {
> + .buffers_ptr = to_user_pointer(&batch),
> + .buffer_count = 1,
> + .flags = i,
> + .rsvd1 = bg,
> + };
> + gem_execbuf(i915, &execbuf);
> + }
> + gem_context_destroy(i915, bg);
> +
> + for (int i = 0; i < count; i++) {
> + struct client *c = &client[i];
> +
> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> + gem_sync(i915, c->spin[0]->handle);
> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> + -EIO);
> +
> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> + gem_sync(i915, c->spin[1]->handle);
> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> + -EIO);
> +
> + igt_spin_free(i915, c->spin[0]);
> + igt_spin_free(i915, c->spin[1]);
> + }
> + free(client);
> + }
> +
> + gem_close(i915, batch.handle);
> + gem_quiescent_gpu(i915);
> +}
> +
> static void smoketest(int i915, int timeout)
> {
> struct drm_i915_gem_exec_object2 batch[2] = {
> @@ -1486,4 +1583,12 @@ igt_main
> igt_fixture {
> igt_stop_hang_detector();
> }
> +
> + igt_subtest("hang") {
> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> +
> + hangme(i915);
> +
> + igt_disallow_hang(i915, hang);
> + }
> }
>
Looks good. But do we need some core helpers to figure out when preempt
timeout is compiled out?
Regards,
Tvrtko
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 13:09 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-15 13:09 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
>
> On 14/11/2019 19:15, Chris Wilson wrote:
> > Although a virtual engine itself has no hang detection; that is on the
> > underlying physical engines, it does provide a unique means for clients
> > to try and break the system. Try and break it before they do.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> > tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> > 1 file changed, 105 insertions(+)
> >
> > diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> > index 70c4529b4..86028cfdd 100644
> > --- a/tests/i915/gem_exec_balancer.c
> > +++ b/tests/i915/gem_exec_balancer.c
> > @@ -24,6 +24,7 @@
> > #include <sched.h>
> >
> > #include "igt.h"
> > +#include "igt_gt.h"
> > #include "igt_perf.h"
> > #include "i915/gem_ring.h"
> > #include "sw_sync.h"
> > @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> > gem_quiescent_gpu(i915);
> > }
> >
> > +static void set_unbannable(int i915, uint32_t ctx)
> > +{
> > + struct drm_i915_gem_context_param p = {
> > + .ctx_id = ctx,
> > + .param = I915_CONTEXT_PARAM_BANNABLE,
> > + };
> > +
> > + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> > +}
> > +
> > +static void hangme(int i915)
> > +{
> > + struct drm_i915_gem_exec_object2 batch = {
> > + .handle = batch_create(i915),
> > + };
> > +
> > + /*
> > + * Fill the available engines with hanging virtual engines and verify
> > + * that execution continues onto the second batch.
> > + */
> > +
> > + for (int class = 1; class < 32; class++) {
> > + struct i915_engine_class_instance *ci;
> > + struct client {
> > + igt_spin_t *spin[2];
> > + } *client;
> > + unsigned int count;
> > + uint32_t bg;
> > +
> > + ci = list_engines(i915, 1u << class, &count);
> > + if (!ci)
> > + continue;
> > +
> > + if (count < 2) {
> > + free(ci);
> > + continue;
> > + }
> > +
> > + client = malloc(sizeof(*client) * count);
> > + igt_assert(client);
> > +
> > + for (int i = 0; i < count; i++) {
> > + uint32_t ctx = gem_context_create(i915);
> > + struct client *c = &client[i];
> > + unsigned int flags;
> > +
> > + set_unbannable(i915, ctx);
> > + set_load_balancer(i915, ctx, ci, count, NULL);
> > +
> > + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> > + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> > + c->spin[j] = igt_spin_new(i915, ctx,
> > + .flags = flags);
> > + flags = IGT_SPIN_FENCE_OUT;
> > + }
> > +
> > + gem_context_destroy(i915, ctx);
> > + }
> > +
> > + /* Apply some background context to speed up hang detection */
> > + bg = gem_context_create(i915);
> > + set_engines(i915, bg, ci, count);
> > + for (int i = 0; i < count; i++) {
> > + struct drm_i915_gem_execbuffer2 execbuf = {
> > + .buffers_ptr = to_user_pointer(&batch),
> > + .buffer_count = 1,
> > + .flags = i,
> > + .rsvd1 = bg,
> > + };
> > + gem_execbuf(i915, &execbuf);
> > + }
> > + gem_context_destroy(i915, bg);
> > +
> > + for (int i = 0; i < count; i++) {
> > + struct client *c = &client[i];
> > +
> > + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> > + gem_sync(i915, c->spin[0]->handle);
> > + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> > + -EIO);
> > +
> > + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> > + gem_sync(i915, c->spin[1]->handle);
> > + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> > + -EIO);
> > +
> > + igt_spin_free(i915, c->spin[0]);
> > + igt_spin_free(i915, c->spin[1]);
> > + }
> > + free(client);
> > + }
> > +
> > + gem_close(i915, batch.handle);
> > + gem_quiescent_gpu(i915);
> > +}
> > +
> > static void smoketest(int i915, int timeout)
> > {
> > struct drm_i915_gem_exec_object2 batch[2] = {
> > @@ -1486,4 +1583,12 @@ igt_main
> > igt_fixture {
> > igt_stop_hang_detector();
> > }
> > +
> > + igt_subtest("hang") {
> > + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> > +
> > + hangme(i915);
> > +
> > + igt_disallow_hang(i915, hang);
> > + }
> > }
> >
>
> Looks good. But do we need some core helpers to figure out when preempt
> timeout is compiled out?
It should still work the same, but slower; 10s hang detection rather
than ~200ms.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 13:09 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-15 13:09 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
>
> On 14/11/2019 19:15, Chris Wilson wrote:
> > Although a virtual engine itself has no hang detection; that is on the
> > underlying physical engines, it does provide a unique means for clients
> > to try and break the system. Try and break it before they do.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> > tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> > 1 file changed, 105 insertions(+)
> >
> > diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> > index 70c4529b4..86028cfdd 100644
> > --- a/tests/i915/gem_exec_balancer.c
> > +++ b/tests/i915/gem_exec_balancer.c
> > @@ -24,6 +24,7 @@
> > #include <sched.h>
> >
> > #include "igt.h"
> > +#include "igt_gt.h"
> > #include "igt_perf.h"
> > #include "i915/gem_ring.h"
> > #include "sw_sync.h"
> > @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> > gem_quiescent_gpu(i915);
> > }
> >
> > +static void set_unbannable(int i915, uint32_t ctx)
> > +{
> > + struct drm_i915_gem_context_param p = {
> > + .ctx_id = ctx,
> > + .param = I915_CONTEXT_PARAM_BANNABLE,
> > + };
> > +
> > + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> > +}
> > +
> > +static void hangme(int i915)
> > +{
> > + struct drm_i915_gem_exec_object2 batch = {
> > + .handle = batch_create(i915),
> > + };
> > +
> > + /*
> > + * Fill the available engines with hanging virtual engines and verify
> > + * that execution continues onto the second batch.
> > + */
> > +
> > + for (int class = 1; class < 32; class++) {
> > + struct i915_engine_class_instance *ci;
> > + struct client {
> > + igt_spin_t *spin[2];
> > + } *client;
> > + unsigned int count;
> > + uint32_t bg;
> > +
> > + ci = list_engines(i915, 1u << class, &count);
> > + if (!ci)
> > + continue;
> > +
> > + if (count < 2) {
> > + free(ci);
> > + continue;
> > + }
> > +
> > + client = malloc(sizeof(*client) * count);
> > + igt_assert(client);
> > +
> > + for (int i = 0; i < count; i++) {
> > + uint32_t ctx = gem_context_create(i915);
> > + struct client *c = &client[i];
> > + unsigned int flags;
> > +
> > + set_unbannable(i915, ctx);
> > + set_load_balancer(i915, ctx, ci, count, NULL);
> > +
> > + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> > + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> > + c->spin[j] = igt_spin_new(i915, ctx,
> > + .flags = flags);
> > + flags = IGT_SPIN_FENCE_OUT;
> > + }
> > +
> > + gem_context_destroy(i915, ctx);
> > + }
> > +
> > + /* Apply some background context to speed up hang detection */
> > + bg = gem_context_create(i915);
> > + set_engines(i915, bg, ci, count);
> > + for (int i = 0; i < count; i++) {
> > + struct drm_i915_gem_execbuffer2 execbuf = {
> > + .buffers_ptr = to_user_pointer(&batch),
> > + .buffer_count = 1,
> > + .flags = i,
> > + .rsvd1 = bg,
> > + };
> > + gem_execbuf(i915, &execbuf);
> > + }
> > + gem_context_destroy(i915, bg);
> > +
> > + for (int i = 0; i < count; i++) {
> > + struct client *c = &client[i];
> > +
> > + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> > + gem_sync(i915, c->spin[0]->handle);
> > + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> > + -EIO);
> > +
> > + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> > + gem_sync(i915, c->spin[1]->handle);
> > + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> > + -EIO);
> > +
> > + igt_spin_free(i915, c->spin[0]);
> > + igt_spin_free(i915, c->spin[1]);
> > + }
> > + free(client);
> > + }
> > +
> > + gem_close(i915, batch.handle);
> > + gem_quiescent_gpu(i915);
> > +}
> > +
> > static void smoketest(int i915, int timeout)
> > {
> > struct drm_i915_gem_exec_object2 batch[2] = {
> > @@ -1486,4 +1583,12 @@ igt_main
> > igt_fixture {
> > igt_stop_hang_detector();
> > }
> > +
> > + igt_subtest("hang") {
> > + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> > +
> > + hangme(i915);
> > +
> > + igt_disallow_hang(i915, hang);
> > + }
> > }
> >
>
> Looks good. But do we need some core helpers to figure out when preempt
> timeout is compiled out?
It should still work the same, but slower; 10s hang detection rather
than ~200ms.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 13:09 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-15 13:09 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
>
> On 14/11/2019 19:15, Chris Wilson wrote:
> > Although a virtual engine itself has no hang detection; that is on the
> > underlying physical engines, it does provide a unique means for clients
> > to try and break the system. Try and break it before they do.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> > tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> > 1 file changed, 105 insertions(+)
> >
> > diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> > index 70c4529b4..86028cfdd 100644
> > --- a/tests/i915/gem_exec_balancer.c
> > +++ b/tests/i915/gem_exec_balancer.c
> > @@ -24,6 +24,7 @@
> > #include <sched.h>
> >
> > #include "igt.h"
> > +#include "igt_gt.h"
> > #include "igt_perf.h"
> > #include "i915/gem_ring.h"
> > #include "sw_sync.h"
> > @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> > gem_quiescent_gpu(i915);
> > }
> >
> > +static void set_unbannable(int i915, uint32_t ctx)
> > +{
> > + struct drm_i915_gem_context_param p = {
> > + .ctx_id = ctx,
> > + .param = I915_CONTEXT_PARAM_BANNABLE,
> > + };
> > +
> > + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> > +}
> > +
> > +static void hangme(int i915)
> > +{
> > + struct drm_i915_gem_exec_object2 batch = {
> > + .handle = batch_create(i915),
> > + };
> > +
> > + /*
> > + * Fill the available engines with hanging virtual engines and verify
> > + * that execution continues onto the second batch.
> > + */
> > +
> > + for (int class = 1; class < 32; class++) {
> > + struct i915_engine_class_instance *ci;
> > + struct client {
> > + igt_spin_t *spin[2];
> > + } *client;
> > + unsigned int count;
> > + uint32_t bg;
> > +
> > + ci = list_engines(i915, 1u << class, &count);
> > + if (!ci)
> > + continue;
> > +
> > + if (count < 2) {
> > + free(ci);
> > + continue;
> > + }
> > +
> > + client = malloc(sizeof(*client) * count);
> > + igt_assert(client);
> > +
> > + for (int i = 0; i < count; i++) {
> > + uint32_t ctx = gem_context_create(i915);
> > + struct client *c = &client[i];
> > + unsigned int flags;
> > +
> > + set_unbannable(i915, ctx);
> > + set_load_balancer(i915, ctx, ci, count, NULL);
> > +
> > + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> > + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> > + c->spin[j] = igt_spin_new(i915, ctx,
> > + .flags = flags);
> > + flags = IGT_SPIN_FENCE_OUT;
> > + }
> > +
> > + gem_context_destroy(i915, ctx);
> > + }
> > +
> > + /* Apply some background context to speed up hang detection */
> > + bg = gem_context_create(i915);
> > + set_engines(i915, bg, ci, count);
> > + for (int i = 0; i < count; i++) {
> > + struct drm_i915_gem_execbuffer2 execbuf = {
> > + .buffers_ptr = to_user_pointer(&batch),
> > + .buffer_count = 1,
> > + .flags = i,
> > + .rsvd1 = bg,
> > + };
> > + gem_execbuf(i915, &execbuf);
> > + }
> > + gem_context_destroy(i915, bg);
> > +
> > + for (int i = 0; i < count; i++) {
> > + struct client *c = &client[i];
> > +
> > + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> > + gem_sync(i915, c->spin[0]->handle);
> > + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> > + -EIO);
> > +
> > + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> > + gem_sync(i915, c->spin[1]->handle);
> > + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> > + -EIO);
> > +
> > + igt_spin_free(i915, c->spin[0]);
> > + igt_spin_free(i915, c->spin[1]);
> > + }
> > + free(client);
> > + }
> > +
> > + gem_close(i915, batch.handle);
> > + gem_quiescent_gpu(i915);
> > +}
> > +
> > static void smoketest(int i915, int timeout)
> > {
> > struct drm_i915_gem_exec_object2 batch[2] = {
> > @@ -1486,4 +1583,12 @@ igt_main
> > igt_fixture {
> > igt_stop_hang_detector();
> > }
> > +
> > + igt_subtest("hang") {
> > + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> > +
> > + hangme(i915);
> > +
> > + igt_disallow_hang(i915, hang);
> > + }
> > }
> >
>
> Looks good. But do we need some core helpers to figure out when preempt
> timeout is compiled out?
It should still work the same, but slower; 10s hang detection rather
than ~200ms.
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 14:52 ` Tvrtko Ursulin
0 siblings, 0 replies; 23+ messages in thread
From: Tvrtko Ursulin @ 2019-11-15 14:52 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 15/11/2019 13:09, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
>>
>> On 14/11/2019 19:15, Chris Wilson wrote:
>>> Although a virtual engine itself has no hang detection; that is on the
>>> underlying physical engines, it does provide a unique means for clients
>>> to try and break the system. Try and break it before they do.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>> ---
>>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
>>> 1 file changed, 105 insertions(+)
>>>
>>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
>>> index 70c4529b4..86028cfdd 100644
>>> --- a/tests/i915/gem_exec_balancer.c
>>> +++ b/tests/i915/gem_exec_balancer.c
>>> @@ -24,6 +24,7 @@
>>> #include <sched.h>
>>>
>>> #include "igt.h"
>>> +#include "igt_gt.h"
>>> #include "igt_perf.h"
>>> #include "i915/gem_ring.h"
>>> #include "sw_sync.h"
>>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
>>> gem_quiescent_gpu(i915);
>>> }
>>>
>>> +static void set_unbannable(int i915, uint32_t ctx)
>>> +{
>>> + struct drm_i915_gem_context_param p = {
>>> + .ctx_id = ctx,
>>> + .param = I915_CONTEXT_PARAM_BANNABLE,
>>> + };
>>> +
>>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
>>> +}
>>> +
>>> +static void hangme(int i915)
>>> +{
>>> + struct drm_i915_gem_exec_object2 batch = {
>>> + .handle = batch_create(i915),
>>> + };
>>> +
>>> + /*
>>> + * Fill the available engines with hanging virtual engines and verify
>>> + * that execution continues onto the second batch.
>>> + */
>>> +
>>> + for (int class = 1; class < 32; class++) {
>>> + struct i915_engine_class_instance *ci;
>>> + struct client {
>>> + igt_spin_t *spin[2];
>>> + } *client;
>>> + unsigned int count;
>>> + uint32_t bg;
>>> +
>>> + ci = list_engines(i915, 1u << class, &count);
>>> + if (!ci)
>>> + continue;
>>> +
>>> + if (count < 2) {
>>> + free(ci);
>>> + continue;
>>> + }
>>> +
>>> + client = malloc(sizeof(*client) * count);
>>> + igt_assert(client);
>>> +
>>> + for (int i = 0; i < count; i++) {
>>> + uint32_t ctx = gem_context_create(i915);
>>> + struct client *c = &client[i];
>>> + unsigned int flags;
>>> +
>>> + set_unbannable(i915, ctx);
>>> + set_load_balancer(i915, ctx, ci, count, NULL);
>>> +
>>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
>>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
>>> + c->spin[j] = igt_spin_new(i915, ctx,
>>> + .flags = flags);
>>> + flags = IGT_SPIN_FENCE_OUT;
>>> + }
>>> +
>>> + gem_context_destroy(i915, ctx);
>>> + }
>>> +
>>> + /* Apply some background context to speed up hang detection */
>>> + bg = gem_context_create(i915);
>>> + set_engines(i915, bg, ci, count);
>>> + for (int i = 0; i < count; i++) {
>>> + struct drm_i915_gem_execbuffer2 execbuf = {
>>> + .buffers_ptr = to_user_pointer(&batch),
>>> + .buffer_count = 1,
>>> + .flags = i,
>>> + .rsvd1 = bg,
>>> + };
>>> + gem_execbuf(i915, &execbuf);
>>> + }
>>> + gem_context_destroy(i915, bg);
>>> +
>>> + for (int i = 0; i < count; i++) {
>>> + struct client *c = &client[i];
>>> +
>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
>>> + gem_sync(i915, c->spin[0]->handle);
>>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
>>> + -EIO);
>>> +
>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
>>> + gem_sync(i915, c->spin[1]->handle);
>>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
>>> + -EIO);
>>> +
>>> + igt_spin_free(i915, c->spin[0]);
>>> + igt_spin_free(i915, c->spin[1]);
>>> + }
>>> + free(client);
>>> + }
>>> +
>>> + gem_close(i915, batch.handle);
>>> + gem_quiescent_gpu(i915);
>>> +}
>>> +
>>> static void smoketest(int i915, int timeout)
>>> {
>>> struct drm_i915_gem_exec_object2 batch[2] = {
>>> @@ -1486,4 +1583,12 @@ igt_main
>>> igt_fixture {
>>> igt_stop_hang_detector();
>>> }
>>> +
>>> + igt_subtest("hang") {
>>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
>>> +
>>> + hangme(i915);
>>> +
>>> + igt_disallow_hang(i915, hang);
>>> + }
>>> }
>>>
>>
>> Looks good. But do we need some core helpers to figure out when preempt
>> timeout is compiled out?
>
> It should still work the same, but slower; 10s hang detection rather
> than ~200ms.
You are talking about old hangcheck? I was thinking about all new
Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 14:52 ` Tvrtko Ursulin
0 siblings, 0 replies; 23+ messages in thread
From: Tvrtko Ursulin @ 2019-11-15 14:52 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 15/11/2019 13:09, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
>>
>> On 14/11/2019 19:15, Chris Wilson wrote:
>>> Although a virtual engine itself has no hang detection; that is on the
>>> underlying physical engines, it does provide a unique means for clients
>>> to try and break the system. Try and break it before they do.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>> ---
>>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
>>> 1 file changed, 105 insertions(+)
>>>
>>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
>>> index 70c4529b4..86028cfdd 100644
>>> --- a/tests/i915/gem_exec_balancer.c
>>> +++ b/tests/i915/gem_exec_balancer.c
>>> @@ -24,6 +24,7 @@
>>> #include <sched.h>
>>>
>>> #include "igt.h"
>>> +#include "igt_gt.h"
>>> #include "igt_perf.h"
>>> #include "i915/gem_ring.h"
>>> #include "sw_sync.h"
>>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
>>> gem_quiescent_gpu(i915);
>>> }
>>>
>>> +static void set_unbannable(int i915, uint32_t ctx)
>>> +{
>>> + struct drm_i915_gem_context_param p = {
>>> + .ctx_id = ctx,
>>> + .param = I915_CONTEXT_PARAM_BANNABLE,
>>> + };
>>> +
>>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
>>> +}
>>> +
>>> +static void hangme(int i915)
>>> +{
>>> + struct drm_i915_gem_exec_object2 batch = {
>>> + .handle = batch_create(i915),
>>> + };
>>> +
>>> + /*
>>> + * Fill the available engines with hanging virtual engines and verify
>>> + * that execution continues onto the second batch.
>>> + */
>>> +
>>> + for (int class = 1; class < 32; class++) {
>>> + struct i915_engine_class_instance *ci;
>>> + struct client {
>>> + igt_spin_t *spin[2];
>>> + } *client;
>>> + unsigned int count;
>>> + uint32_t bg;
>>> +
>>> + ci = list_engines(i915, 1u << class, &count);
>>> + if (!ci)
>>> + continue;
>>> +
>>> + if (count < 2) {
>>> + free(ci);
>>> + continue;
>>> + }
>>> +
>>> + client = malloc(sizeof(*client) * count);
>>> + igt_assert(client);
>>> +
>>> + for (int i = 0; i < count; i++) {
>>> + uint32_t ctx = gem_context_create(i915);
>>> + struct client *c = &client[i];
>>> + unsigned int flags;
>>> +
>>> + set_unbannable(i915, ctx);
>>> + set_load_balancer(i915, ctx, ci, count, NULL);
>>> +
>>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
>>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
>>> + c->spin[j] = igt_spin_new(i915, ctx,
>>> + .flags = flags);
>>> + flags = IGT_SPIN_FENCE_OUT;
>>> + }
>>> +
>>> + gem_context_destroy(i915, ctx);
>>> + }
>>> +
>>> + /* Apply some background context to speed up hang detection */
>>> + bg = gem_context_create(i915);
>>> + set_engines(i915, bg, ci, count);
>>> + for (int i = 0; i < count; i++) {
>>> + struct drm_i915_gem_execbuffer2 execbuf = {
>>> + .buffers_ptr = to_user_pointer(&batch),
>>> + .buffer_count = 1,
>>> + .flags = i,
>>> + .rsvd1 = bg,
>>> + };
>>> + gem_execbuf(i915, &execbuf);
>>> + }
>>> + gem_context_destroy(i915, bg);
>>> +
>>> + for (int i = 0; i < count; i++) {
>>> + struct client *c = &client[i];
>>> +
>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
>>> + gem_sync(i915, c->spin[0]->handle);
>>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
>>> + -EIO);
>>> +
>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
>>> + gem_sync(i915, c->spin[1]->handle);
>>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
>>> + -EIO);
>>> +
>>> + igt_spin_free(i915, c->spin[0]);
>>> + igt_spin_free(i915, c->spin[1]);
>>> + }
>>> + free(client);
>>> + }
>>> +
>>> + gem_close(i915, batch.handle);
>>> + gem_quiescent_gpu(i915);
>>> +}
>>> +
>>> static void smoketest(int i915, int timeout)
>>> {
>>> struct drm_i915_gem_exec_object2 batch[2] = {
>>> @@ -1486,4 +1583,12 @@ igt_main
>>> igt_fixture {
>>> igt_stop_hang_detector();
>>> }
>>> +
>>> + igt_subtest("hang") {
>>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
>>> +
>>> + hangme(i915);
>>> +
>>> + igt_disallow_hang(i915, hang);
>>> + }
>>> }
>>>
>>
>> Looks good. But do we need some core helpers to figure out when preempt
>> timeout is compiled out?
>
> It should still work the same, but slower; 10s hang detection rather
> than ~200ms.
You are talking about old hangcheck? I was thinking about all new
Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 14:58 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-15 14:58 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Tvrtko Ursulin (2019-11-15 14:52:16)
>
> On 15/11/2019 13:09, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
> >>
> >> On 14/11/2019 19:15, Chris Wilson wrote:
> >>> Although a virtual engine itself has no hang detection; that is on the
> >>> underlying physical engines, it does provide a unique means for clients
> >>> to try and break the system. Try and break it before they do.
> >>>
> >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> >>> ---
> >>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> >>> 1 file changed, 105 insertions(+)
> >>>
> >>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> >>> index 70c4529b4..86028cfdd 100644
> >>> --- a/tests/i915/gem_exec_balancer.c
> >>> +++ b/tests/i915/gem_exec_balancer.c
> >>> @@ -24,6 +24,7 @@
> >>> #include <sched.h>
> >>>
> >>> #include "igt.h"
> >>> +#include "igt_gt.h"
> >>> #include "igt_perf.h"
> >>> #include "i915/gem_ring.h"
> >>> #include "sw_sync.h"
> >>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> >>> gem_quiescent_gpu(i915);
> >>> }
> >>>
> >>> +static void set_unbannable(int i915, uint32_t ctx)
> >>> +{
> >>> + struct drm_i915_gem_context_param p = {
> >>> + .ctx_id = ctx,
> >>> + .param = I915_CONTEXT_PARAM_BANNABLE,
> >>> + };
> >>> +
> >>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> >>> +}
> >>> +
> >>> +static void hangme(int i915)
> >>> +{
> >>> + struct drm_i915_gem_exec_object2 batch = {
> >>> + .handle = batch_create(i915),
> >>> + };
> >>> +
> >>> + /*
> >>> + * Fill the available engines with hanging virtual engines and verify
> >>> + * that execution continues onto the second batch.
> >>> + */
> >>> +
> >>> + for (int class = 1; class < 32; class++) {
> >>> + struct i915_engine_class_instance *ci;
> >>> + struct client {
> >>> + igt_spin_t *spin[2];
> >>> + } *client;
> >>> + unsigned int count;
> >>> + uint32_t bg;
> >>> +
> >>> + ci = list_engines(i915, 1u << class, &count);
> >>> + if (!ci)
> >>> + continue;
> >>> +
> >>> + if (count < 2) {
> >>> + free(ci);
> >>> + continue;
> >>> + }
> >>> +
> >>> + client = malloc(sizeof(*client) * count);
> >>> + igt_assert(client);
> >>> +
> >>> + for (int i = 0; i < count; i++) {
> >>> + uint32_t ctx = gem_context_create(i915);
> >>> + struct client *c = &client[i];
> >>> + unsigned int flags;
> >>> +
> >>> + set_unbannable(i915, ctx);
> >>> + set_load_balancer(i915, ctx, ci, count, NULL);
> >>> +
> >>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> >>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> >>> + c->spin[j] = igt_spin_new(i915, ctx,
> >>> + .flags = flags);
> >>> + flags = IGT_SPIN_FENCE_OUT;
> >>> + }
> >>> +
> >>> + gem_context_destroy(i915, ctx);
> >>> + }
> >>> +
> >>> + /* Apply some background context to speed up hang detection */
> >>> + bg = gem_context_create(i915);
> >>> + set_engines(i915, bg, ci, count);
> >>> + for (int i = 0; i < count; i++) {
> >>> + struct drm_i915_gem_execbuffer2 execbuf = {
> >>> + .buffers_ptr = to_user_pointer(&batch),
> >>> + .buffer_count = 1,
> >>> + .flags = i,
> >>> + .rsvd1 = bg,
> >>> + };
> >>> + gem_execbuf(i915, &execbuf);
> >>> + }
> >>> + gem_context_destroy(i915, bg);
> >>> +
> >>> + for (int i = 0; i < count; i++) {
> >>> + struct client *c = &client[i];
> >>> +
> >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> >>> + gem_sync(i915, c->spin[0]->handle);
> >>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> >>> + -EIO);
> >>> +
> >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> >>> + gem_sync(i915, c->spin[1]->handle);
> >>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> >>> + -EIO);
> >>> +
> >>> + igt_spin_free(i915, c->spin[0]);
> >>> + igt_spin_free(i915, c->spin[1]);
> >>> + }
> >>> + free(client);
> >>> + }
> >>> +
> >>> + gem_close(i915, batch.handle);
> >>> + gem_quiescent_gpu(i915);
> >>> +}
> >>> +
> >>> static void smoketest(int i915, int timeout)
> >>> {
> >>> struct drm_i915_gem_exec_object2 batch[2] = {
> >>> @@ -1486,4 +1583,12 @@ igt_main
> >>> igt_fixture {
> >>> igt_stop_hang_detector();
> >>> }
> >>> +
> >>> + igt_subtest("hang") {
> >>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> >>> +
> >>> + hangme(i915);
> >>> +
> >>> + igt_disallow_hang(i915, hang);
> >>> + }
> >>> }
> >>>
> >>
> >> Looks good. But do we need some core helpers to figure out when preempt
> >> timeout is compiled out?
> >
> > It should still work the same, but slower; 10s hang detection rather
> > than ~200ms.
>
> You are talking about old hangcheck? I was thinking about all new
> Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
Works even faster. :)
The spinners then get killed when the contexts are closed (default is
non-persistent contexts if you disable heartbeats entirely). The
challenge is really on the per-engine heartbeat controls to make sure we
kick off the dead contexts, but that's for the future.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 14:58 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-15 14:58 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Tvrtko Ursulin (2019-11-15 14:52:16)
>
> On 15/11/2019 13:09, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
> >>
> >> On 14/11/2019 19:15, Chris Wilson wrote:
> >>> Although a virtual engine itself has no hang detection; that is on the
> >>> underlying physical engines, it does provide a unique means for clients
> >>> to try and break the system. Try and break it before they do.
> >>>
> >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> >>> ---
> >>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> >>> 1 file changed, 105 insertions(+)
> >>>
> >>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> >>> index 70c4529b4..86028cfdd 100644
> >>> --- a/tests/i915/gem_exec_balancer.c
> >>> +++ b/tests/i915/gem_exec_balancer.c
> >>> @@ -24,6 +24,7 @@
> >>> #include <sched.h>
> >>>
> >>> #include "igt.h"
> >>> +#include "igt_gt.h"
> >>> #include "igt_perf.h"
> >>> #include "i915/gem_ring.h"
> >>> #include "sw_sync.h"
> >>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> >>> gem_quiescent_gpu(i915);
> >>> }
> >>>
> >>> +static void set_unbannable(int i915, uint32_t ctx)
> >>> +{
> >>> + struct drm_i915_gem_context_param p = {
> >>> + .ctx_id = ctx,
> >>> + .param = I915_CONTEXT_PARAM_BANNABLE,
> >>> + };
> >>> +
> >>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> >>> +}
> >>> +
> >>> +static void hangme(int i915)
> >>> +{
> >>> + struct drm_i915_gem_exec_object2 batch = {
> >>> + .handle = batch_create(i915),
> >>> + };
> >>> +
> >>> + /*
> >>> + * Fill the available engines with hanging virtual engines and verify
> >>> + * that execution continues onto the second batch.
> >>> + */
> >>> +
> >>> + for (int class = 1; class < 32; class++) {
> >>> + struct i915_engine_class_instance *ci;
> >>> + struct client {
> >>> + igt_spin_t *spin[2];
> >>> + } *client;
> >>> + unsigned int count;
> >>> + uint32_t bg;
> >>> +
> >>> + ci = list_engines(i915, 1u << class, &count);
> >>> + if (!ci)
> >>> + continue;
> >>> +
> >>> + if (count < 2) {
> >>> + free(ci);
> >>> + continue;
> >>> + }
> >>> +
> >>> + client = malloc(sizeof(*client) * count);
> >>> + igt_assert(client);
> >>> +
> >>> + for (int i = 0; i < count; i++) {
> >>> + uint32_t ctx = gem_context_create(i915);
> >>> + struct client *c = &client[i];
> >>> + unsigned int flags;
> >>> +
> >>> + set_unbannable(i915, ctx);
> >>> + set_load_balancer(i915, ctx, ci, count, NULL);
> >>> +
> >>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> >>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> >>> + c->spin[j] = igt_spin_new(i915, ctx,
> >>> + .flags = flags);
> >>> + flags = IGT_SPIN_FENCE_OUT;
> >>> + }
> >>> +
> >>> + gem_context_destroy(i915, ctx);
> >>> + }
> >>> +
> >>> + /* Apply some background context to speed up hang detection */
> >>> + bg = gem_context_create(i915);
> >>> + set_engines(i915, bg, ci, count);
> >>> + for (int i = 0; i < count; i++) {
> >>> + struct drm_i915_gem_execbuffer2 execbuf = {
> >>> + .buffers_ptr = to_user_pointer(&batch),
> >>> + .buffer_count = 1,
> >>> + .flags = i,
> >>> + .rsvd1 = bg,
> >>> + };
> >>> + gem_execbuf(i915, &execbuf);
> >>> + }
> >>> + gem_context_destroy(i915, bg);
> >>> +
> >>> + for (int i = 0; i < count; i++) {
> >>> + struct client *c = &client[i];
> >>> +
> >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> >>> + gem_sync(i915, c->spin[0]->handle);
> >>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> >>> + -EIO);
> >>> +
> >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> >>> + gem_sync(i915, c->spin[1]->handle);
> >>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> >>> + -EIO);
> >>> +
> >>> + igt_spin_free(i915, c->spin[0]);
> >>> + igt_spin_free(i915, c->spin[1]);
> >>> + }
> >>> + free(client);
> >>> + }
> >>> +
> >>> + gem_close(i915, batch.handle);
> >>> + gem_quiescent_gpu(i915);
> >>> +}
> >>> +
> >>> static void smoketest(int i915, int timeout)
> >>> {
> >>> struct drm_i915_gem_exec_object2 batch[2] = {
> >>> @@ -1486,4 +1583,12 @@ igt_main
> >>> igt_fixture {
> >>> igt_stop_hang_detector();
> >>> }
> >>> +
> >>> + igt_subtest("hang") {
> >>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> >>> +
> >>> + hangme(i915);
> >>> +
> >>> + igt_disallow_hang(i915, hang);
> >>> + }
> >>> }
> >>>
> >>
> >> Looks good. But do we need some core helpers to figure out when preempt
> >> timeout is compiled out?
> >
> > It should still work the same, but slower; 10s hang detection rather
> > than ~200ms.
>
> You are talking about old hangcheck? I was thinking about all new
> Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
Works even faster. :)
The spinners then get killed when the contexts are closed (default is
non-persistent contexts if you disable heartbeats entirely). The
challenge is really on the per-engine heartbeat controls to make sure we
kick off the dead contexts, but that's for the future.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 14:58 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-15 14:58 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Tvrtko Ursulin (2019-11-15 14:52:16)
>
> On 15/11/2019 13:09, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
> >>
> >> On 14/11/2019 19:15, Chris Wilson wrote:
> >>> Although a virtual engine itself has no hang detection; that is on the
> >>> underlying physical engines, it does provide a unique means for clients
> >>> to try and break the system. Try and break it before they do.
> >>>
> >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> >>> ---
> >>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> >>> 1 file changed, 105 insertions(+)
> >>>
> >>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> >>> index 70c4529b4..86028cfdd 100644
> >>> --- a/tests/i915/gem_exec_balancer.c
> >>> +++ b/tests/i915/gem_exec_balancer.c
> >>> @@ -24,6 +24,7 @@
> >>> #include <sched.h>
> >>>
> >>> #include "igt.h"
> >>> +#include "igt_gt.h"
> >>> #include "igt_perf.h"
> >>> #include "i915/gem_ring.h"
> >>> #include "sw_sync.h"
> >>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> >>> gem_quiescent_gpu(i915);
> >>> }
> >>>
> >>> +static void set_unbannable(int i915, uint32_t ctx)
> >>> +{
> >>> + struct drm_i915_gem_context_param p = {
> >>> + .ctx_id = ctx,
> >>> + .param = I915_CONTEXT_PARAM_BANNABLE,
> >>> + };
> >>> +
> >>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> >>> +}
> >>> +
> >>> +static void hangme(int i915)
> >>> +{
> >>> + struct drm_i915_gem_exec_object2 batch = {
> >>> + .handle = batch_create(i915),
> >>> + };
> >>> +
> >>> + /*
> >>> + * Fill the available engines with hanging virtual engines and verify
> >>> + * that execution continues onto the second batch.
> >>> + */
> >>> +
> >>> + for (int class = 1; class < 32; class++) {
> >>> + struct i915_engine_class_instance *ci;
> >>> + struct client {
> >>> + igt_spin_t *spin[2];
> >>> + } *client;
> >>> + unsigned int count;
> >>> + uint32_t bg;
> >>> +
> >>> + ci = list_engines(i915, 1u << class, &count);
> >>> + if (!ci)
> >>> + continue;
> >>> +
> >>> + if (count < 2) {
> >>> + free(ci);
> >>> + continue;
> >>> + }
> >>> +
> >>> + client = malloc(sizeof(*client) * count);
> >>> + igt_assert(client);
> >>> +
> >>> + for (int i = 0; i < count; i++) {
> >>> + uint32_t ctx = gem_context_create(i915);
> >>> + struct client *c = &client[i];
> >>> + unsigned int flags;
> >>> +
> >>> + set_unbannable(i915, ctx);
> >>> + set_load_balancer(i915, ctx, ci, count, NULL);
> >>> +
> >>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> >>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> >>> + c->spin[j] = igt_spin_new(i915, ctx,
> >>> + .flags = flags);
> >>> + flags = IGT_SPIN_FENCE_OUT;
> >>> + }
> >>> +
> >>> + gem_context_destroy(i915, ctx);
> >>> + }
> >>> +
> >>> + /* Apply some background context to speed up hang detection */
> >>> + bg = gem_context_create(i915);
> >>> + set_engines(i915, bg, ci, count);
> >>> + for (int i = 0; i < count; i++) {
> >>> + struct drm_i915_gem_execbuffer2 execbuf = {
> >>> + .buffers_ptr = to_user_pointer(&batch),
> >>> + .buffer_count = 1,
> >>> + .flags = i,
> >>> + .rsvd1 = bg,
> >>> + };
> >>> + gem_execbuf(i915, &execbuf);
> >>> + }
> >>> + gem_context_destroy(i915, bg);
> >>> +
> >>> + for (int i = 0; i < count; i++) {
> >>> + struct client *c = &client[i];
> >>> +
> >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> >>> + gem_sync(i915, c->spin[0]->handle);
> >>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> >>> + -EIO);
> >>> +
> >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> >>> + gem_sync(i915, c->spin[1]->handle);
> >>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> >>> + -EIO);
> >>> +
> >>> + igt_spin_free(i915, c->spin[0]);
> >>> + igt_spin_free(i915, c->spin[1]);
> >>> + }
> >>> + free(client);
> >>> + }
> >>> +
> >>> + gem_close(i915, batch.handle);
> >>> + gem_quiescent_gpu(i915);
> >>> +}
> >>> +
> >>> static void smoketest(int i915, int timeout)
> >>> {
> >>> struct drm_i915_gem_exec_object2 batch[2] = {
> >>> @@ -1486,4 +1583,12 @@ igt_main
> >>> igt_fixture {
> >>> igt_stop_hang_detector();
> >>> }
> >>> +
> >>> + igt_subtest("hang") {
> >>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> >>> +
> >>> + hangme(i915);
> >>> +
> >>> + igt_disallow_hang(i915, hang);
> >>> + }
> >>> }
> >>>
> >>
> >> Looks good. But do we need some core helpers to figure out when preempt
> >> timeout is compiled out?
> >
> > It should still work the same, but slower; 10s hang detection rather
> > than ~200ms.
>
> You are talking about old hangcheck? I was thinking about all new
> Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
Works even faster. :)
The spinners then get killed when the contexts are closed (default is
non-persistent contexts if you disable heartbeats entirely). The
challenge is really on the per-engine heartbeat controls to make sure we
kick off the dead contexts, but that's for the future.
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 14:59 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-15 14:59 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Chris Wilson (2019-11-15 14:58:00)
> Quoting Tvrtko Ursulin (2019-11-15 14:52:16)
> >
> > On 15/11/2019 13:09, Chris Wilson wrote:
> > > Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
> > >>
> > >> On 14/11/2019 19:15, Chris Wilson wrote:
> > >>> Although a virtual engine itself has no hang detection; that is on the
> > >>> underlying physical engines, it does provide a unique means for clients
> > >>> to try and break the system. Try and break it before they do.
> > >>>
> > >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > >>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > >>> ---
> > >>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> > >>> 1 file changed, 105 insertions(+)
> > >>>
> > >>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> > >>> index 70c4529b4..86028cfdd 100644
> > >>> --- a/tests/i915/gem_exec_balancer.c
> > >>> +++ b/tests/i915/gem_exec_balancer.c
> > >>> @@ -24,6 +24,7 @@
> > >>> #include <sched.h>
> > >>>
> > >>> #include "igt.h"
> > >>> +#include "igt_gt.h"
> > >>> #include "igt_perf.h"
> > >>> #include "i915/gem_ring.h"
> > >>> #include "sw_sync.h"
> > >>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> > >>> gem_quiescent_gpu(i915);
> > >>> }
> > >>>
> > >>> +static void set_unbannable(int i915, uint32_t ctx)
> > >>> +{
> > >>> + struct drm_i915_gem_context_param p = {
> > >>> + .ctx_id = ctx,
> > >>> + .param = I915_CONTEXT_PARAM_BANNABLE,
> > >>> + };
> > >>> +
> > >>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> > >>> +}
> > >>> +
> > >>> +static void hangme(int i915)
> > >>> +{
> > >>> + struct drm_i915_gem_exec_object2 batch = {
> > >>> + .handle = batch_create(i915),
> > >>> + };
> > >>> +
> > >>> + /*
> > >>> + * Fill the available engines with hanging virtual engines and verify
> > >>> + * that execution continues onto the second batch.
> > >>> + */
> > >>> +
> > >>> + for (int class = 1; class < 32; class++) {
> > >>> + struct i915_engine_class_instance *ci;
> > >>> + struct client {
> > >>> + igt_spin_t *spin[2];
> > >>> + } *client;
> > >>> + unsigned int count;
> > >>> + uint32_t bg;
> > >>> +
> > >>> + ci = list_engines(i915, 1u << class, &count);
> > >>> + if (!ci)
> > >>> + continue;
> > >>> +
> > >>> + if (count < 2) {
> > >>> + free(ci);
> > >>> + continue;
> > >>> + }
> > >>> +
> > >>> + client = malloc(sizeof(*client) * count);
> > >>> + igt_assert(client);
> > >>> +
> > >>> + for (int i = 0; i < count; i++) {
> > >>> + uint32_t ctx = gem_context_create(i915);
> > >>> + struct client *c = &client[i];
> > >>> + unsigned int flags;
> > >>> +
> > >>> + set_unbannable(i915, ctx);
> > >>> + set_load_balancer(i915, ctx, ci, count, NULL);
> > >>> +
> > >>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> > >>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> > >>> + c->spin[j] = igt_spin_new(i915, ctx,
> > >>> + .flags = flags);
> > >>> + flags = IGT_SPIN_FENCE_OUT;
> > >>> + }
> > >>> +
> > >>> + gem_context_destroy(i915, ctx);
> > >>> + }
> > >>> +
> > >>> + /* Apply some background context to speed up hang detection */
> > >>> + bg = gem_context_create(i915);
> > >>> + set_engines(i915, bg, ci, count);
> > >>> + for (int i = 0; i < count; i++) {
> > >>> + struct drm_i915_gem_execbuffer2 execbuf = {
> > >>> + .buffers_ptr = to_user_pointer(&batch),
> > >>> + .buffer_count = 1,
> > >>> + .flags = i,
> > >>> + .rsvd1 = bg,
> > >>> + };
> > >>> + gem_execbuf(i915, &execbuf);
> > >>> + }
> > >>> + gem_context_destroy(i915, bg);
> > >>> +
> > >>> + for (int i = 0; i < count; i++) {
> > >>> + struct client *c = &client[i];
> > >>> +
> > >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> > >>> + gem_sync(i915, c->spin[0]->handle);
> > >>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> > >>> + -EIO);
> > >>> +
> > >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> > >>> + gem_sync(i915, c->spin[1]->handle);
> > >>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> > >>> + -EIO);
> > >>> +
> > >>> + igt_spin_free(i915, c->spin[0]);
> > >>> + igt_spin_free(i915, c->spin[1]);
> > >>> + }
> > >>> + free(client);
> > >>> + }
> > >>> +
> > >>> + gem_close(i915, batch.handle);
> > >>> + gem_quiescent_gpu(i915);
> > >>> +}
> > >>> +
> > >>> static void smoketest(int i915, int timeout)
> > >>> {
> > >>> struct drm_i915_gem_exec_object2 batch[2] = {
> > >>> @@ -1486,4 +1583,12 @@ igt_main
> > >>> igt_fixture {
> > >>> igt_stop_hang_detector();
> > >>> }
> > >>> +
> > >>> + igt_subtest("hang") {
> > >>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> > >>> +
> > >>> + hangme(i915);
> > >>> +
> > >>> + igt_disallow_hang(i915, hang);
> > >>> + }
> > >>> }
> > >>>
> > >>
> > >> Looks good. But do we need some core helpers to figure out when preempt
> > >> timeout is compiled out?
> > >
> > > It should still work the same, but slower; 10s hang detection rather
> > > than ~200ms.
> >
> > You are talking about old hangcheck? I was thinking about all new
> > Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
>
> Works even faster. :)
>
> The spinners then get killed when the contexts are closed (default is
> non-persistent contexts if you disable heartbeats entirely). The
> challenge is really on the per-engine heartbeat controls to make sure we
> kick off the dead contexts, but that's for the future.
And for the other kconfig, with no preemption timeout, you just get
regular heartbeats, so roughly the 10s hangcheck timeout.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 14:59 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-15 14:59 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Chris Wilson (2019-11-15 14:58:00)
> Quoting Tvrtko Ursulin (2019-11-15 14:52:16)
> >
> > On 15/11/2019 13:09, Chris Wilson wrote:
> > > Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
> > >>
> > >> On 14/11/2019 19:15, Chris Wilson wrote:
> > >>> Although a virtual engine itself has no hang detection; that is on the
> > >>> underlying physical engines, it does provide a unique means for clients
> > >>> to try and break the system. Try and break it before they do.
> > >>>
> > >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > >>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > >>> ---
> > >>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> > >>> 1 file changed, 105 insertions(+)
> > >>>
> > >>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> > >>> index 70c4529b4..86028cfdd 100644
> > >>> --- a/tests/i915/gem_exec_balancer.c
> > >>> +++ b/tests/i915/gem_exec_balancer.c
> > >>> @@ -24,6 +24,7 @@
> > >>> #include <sched.h>
> > >>>
> > >>> #include "igt.h"
> > >>> +#include "igt_gt.h"
> > >>> #include "igt_perf.h"
> > >>> #include "i915/gem_ring.h"
> > >>> #include "sw_sync.h"
> > >>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> > >>> gem_quiescent_gpu(i915);
> > >>> }
> > >>>
> > >>> +static void set_unbannable(int i915, uint32_t ctx)
> > >>> +{
> > >>> + struct drm_i915_gem_context_param p = {
> > >>> + .ctx_id = ctx,
> > >>> + .param = I915_CONTEXT_PARAM_BANNABLE,
> > >>> + };
> > >>> +
> > >>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> > >>> +}
> > >>> +
> > >>> +static void hangme(int i915)
> > >>> +{
> > >>> + struct drm_i915_gem_exec_object2 batch = {
> > >>> + .handle = batch_create(i915),
> > >>> + };
> > >>> +
> > >>> + /*
> > >>> + * Fill the available engines with hanging virtual engines and verify
> > >>> + * that execution continues onto the second batch.
> > >>> + */
> > >>> +
> > >>> + for (int class = 1; class < 32; class++) {
> > >>> + struct i915_engine_class_instance *ci;
> > >>> + struct client {
> > >>> + igt_spin_t *spin[2];
> > >>> + } *client;
> > >>> + unsigned int count;
> > >>> + uint32_t bg;
> > >>> +
> > >>> + ci = list_engines(i915, 1u << class, &count);
> > >>> + if (!ci)
> > >>> + continue;
> > >>> +
> > >>> + if (count < 2) {
> > >>> + free(ci);
> > >>> + continue;
> > >>> + }
> > >>> +
> > >>> + client = malloc(sizeof(*client) * count);
> > >>> + igt_assert(client);
> > >>> +
> > >>> + for (int i = 0; i < count; i++) {
> > >>> + uint32_t ctx = gem_context_create(i915);
> > >>> + struct client *c = &client[i];
> > >>> + unsigned int flags;
> > >>> +
> > >>> + set_unbannable(i915, ctx);
> > >>> + set_load_balancer(i915, ctx, ci, count, NULL);
> > >>> +
> > >>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> > >>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> > >>> + c->spin[j] = igt_spin_new(i915, ctx,
> > >>> + .flags = flags);
> > >>> + flags = IGT_SPIN_FENCE_OUT;
> > >>> + }
> > >>> +
> > >>> + gem_context_destroy(i915, ctx);
> > >>> + }
> > >>> +
> > >>> + /* Apply some background context to speed up hang detection */
> > >>> + bg = gem_context_create(i915);
> > >>> + set_engines(i915, bg, ci, count);
> > >>> + for (int i = 0; i < count; i++) {
> > >>> + struct drm_i915_gem_execbuffer2 execbuf = {
> > >>> + .buffers_ptr = to_user_pointer(&batch),
> > >>> + .buffer_count = 1,
> > >>> + .flags = i,
> > >>> + .rsvd1 = bg,
> > >>> + };
> > >>> + gem_execbuf(i915, &execbuf);
> > >>> + }
> > >>> + gem_context_destroy(i915, bg);
> > >>> +
> > >>> + for (int i = 0; i < count; i++) {
> > >>> + struct client *c = &client[i];
> > >>> +
> > >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> > >>> + gem_sync(i915, c->spin[0]->handle);
> > >>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> > >>> + -EIO);
> > >>> +
> > >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> > >>> + gem_sync(i915, c->spin[1]->handle);
> > >>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> > >>> + -EIO);
> > >>> +
> > >>> + igt_spin_free(i915, c->spin[0]);
> > >>> + igt_spin_free(i915, c->spin[1]);
> > >>> + }
> > >>> + free(client);
> > >>> + }
> > >>> +
> > >>> + gem_close(i915, batch.handle);
> > >>> + gem_quiescent_gpu(i915);
> > >>> +}
> > >>> +
> > >>> static void smoketest(int i915, int timeout)
> > >>> {
> > >>> struct drm_i915_gem_exec_object2 batch[2] = {
> > >>> @@ -1486,4 +1583,12 @@ igt_main
> > >>> igt_fixture {
> > >>> igt_stop_hang_detector();
> > >>> }
> > >>> +
> > >>> + igt_subtest("hang") {
> > >>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> > >>> +
> > >>> + hangme(i915);
> > >>> +
> > >>> + igt_disallow_hang(i915, hang);
> > >>> + }
> > >>> }
> > >>>
> > >>
> > >> Looks good. But do we need some core helpers to figure out when preempt
> > >> timeout is compiled out?
> > >
> > > It should still work the same, but slower; 10s hang detection rather
> > > than ~200ms.
> >
> > You are talking about old hangcheck? I was thinking about all new
> > Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
>
> Works even faster. :)
>
> The spinners then get killed when the contexts are closed (default is
> non-persistent contexts if you disable heartbeats entirely). The
> challenge is really on the per-engine heartbeat controls to make sure we
> kick off the dead contexts, but that's for the future.
And for the other kconfig, with no preemption timeout, you just get
regular heartbeats, so roughly the 10s hangcheck timeout.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 14:59 ` Chris Wilson
0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2019-11-15 14:59 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx; +Cc: igt-dev
Quoting Chris Wilson (2019-11-15 14:58:00)
> Quoting Tvrtko Ursulin (2019-11-15 14:52:16)
> >
> > On 15/11/2019 13:09, Chris Wilson wrote:
> > > Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
> > >>
> > >> On 14/11/2019 19:15, Chris Wilson wrote:
> > >>> Although a virtual engine itself has no hang detection; that is on the
> > >>> underlying physical engines, it does provide a unique means for clients
> > >>> to try and break the system. Try and break it before they do.
> > >>>
> > >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > >>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > >>> ---
> > >>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
> > >>> 1 file changed, 105 insertions(+)
> > >>>
> > >>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
> > >>> index 70c4529b4..86028cfdd 100644
> > >>> --- a/tests/i915/gem_exec_balancer.c
> > >>> +++ b/tests/i915/gem_exec_balancer.c
> > >>> @@ -24,6 +24,7 @@
> > >>> #include <sched.h>
> > >>>
> > >>> #include "igt.h"
> > >>> +#include "igt_gt.h"
> > >>> #include "igt_perf.h"
> > >>> #include "i915/gem_ring.h"
> > >>> #include "sw_sync.h"
> > >>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
> > >>> gem_quiescent_gpu(i915);
> > >>> }
> > >>>
> > >>> +static void set_unbannable(int i915, uint32_t ctx)
> > >>> +{
> > >>> + struct drm_i915_gem_context_param p = {
> > >>> + .ctx_id = ctx,
> > >>> + .param = I915_CONTEXT_PARAM_BANNABLE,
> > >>> + };
> > >>> +
> > >>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
> > >>> +}
> > >>> +
> > >>> +static void hangme(int i915)
> > >>> +{
> > >>> + struct drm_i915_gem_exec_object2 batch = {
> > >>> + .handle = batch_create(i915),
> > >>> + };
> > >>> +
> > >>> + /*
> > >>> + * Fill the available engines with hanging virtual engines and verify
> > >>> + * that execution continues onto the second batch.
> > >>> + */
> > >>> +
> > >>> + for (int class = 1; class < 32; class++) {
> > >>> + struct i915_engine_class_instance *ci;
> > >>> + struct client {
> > >>> + igt_spin_t *spin[2];
> > >>> + } *client;
> > >>> + unsigned int count;
> > >>> + uint32_t bg;
> > >>> +
> > >>> + ci = list_engines(i915, 1u << class, &count);
> > >>> + if (!ci)
> > >>> + continue;
> > >>> +
> > >>> + if (count < 2) {
> > >>> + free(ci);
> > >>> + continue;
> > >>> + }
> > >>> +
> > >>> + client = malloc(sizeof(*client) * count);
> > >>> + igt_assert(client);
> > >>> +
> > >>> + for (int i = 0; i < count; i++) {
> > >>> + uint32_t ctx = gem_context_create(i915);
> > >>> + struct client *c = &client[i];
> > >>> + unsigned int flags;
> > >>> +
> > >>> + set_unbannable(i915, ctx);
> > >>> + set_load_balancer(i915, ctx, ci, count, NULL);
> > >>> +
> > >>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
> > >>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
> > >>> + c->spin[j] = igt_spin_new(i915, ctx,
> > >>> + .flags = flags);
> > >>> + flags = IGT_SPIN_FENCE_OUT;
> > >>> + }
> > >>> +
> > >>> + gem_context_destroy(i915, ctx);
> > >>> + }
> > >>> +
> > >>> + /* Apply some background context to speed up hang detection */
> > >>> + bg = gem_context_create(i915);
> > >>> + set_engines(i915, bg, ci, count);
> > >>> + for (int i = 0; i < count; i++) {
> > >>> + struct drm_i915_gem_execbuffer2 execbuf = {
> > >>> + .buffers_ptr = to_user_pointer(&batch),
> > >>> + .buffer_count = 1,
> > >>> + .flags = i,
> > >>> + .rsvd1 = bg,
> > >>> + };
> > >>> + gem_execbuf(i915, &execbuf);
> > >>> + }
> > >>> + gem_context_destroy(i915, bg);
> > >>> +
> > >>> + for (int i = 0; i < count; i++) {
> > >>> + struct client *c = &client[i];
> > >>> +
> > >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
> > >>> + gem_sync(i915, c->spin[0]->handle);
> > >>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
> > >>> + -EIO);
> > >>> +
> > >>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
> > >>> + gem_sync(i915, c->spin[1]->handle);
> > >>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
> > >>> + -EIO);
> > >>> +
> > >>> + igt_spin_free(i915, c->spin[0]);
> > >>> + igt_spin_free(i915, c->spin[1]);
> > >>> + }
> > >>> + free(client);
> > >>> + }
> > >>> +
> > >>> + gem_close(i915, batch.handle);
> > >>> + gem_quiescent_gpu(i915);
> > >>> +}
> > >>> +
> > >>> static void smoketest(int i915, int timeout)
> > >>> {
> > >>> struct drm_i915_gem_exec_object2 batch[2] = {
> > >>> @@ -1486,4 +1583,12 @@ igt_main
> > >>> igt_fixture {
> > >>> igt_stop_hang_detector();
> > >>> }
> > >>> +
> > >>> + igt_subtest("hang") {
> > >>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
> > >>> +
> > >>> + hangme(i915);
> > >>> +
> > >>> + igt_disallow_hang(i915, hang);
> > >>> + }
> > >>> }
> > >>>
> > >>
> > >> Looks good. But do we need some core helpers to figure out when preempt
> > >> timeout is compiled out?
> > >
> > > It should still work the same, but slower; 10s hang detection rather
> > > than ~200ms.
> >
> > You are talking about old hangcheck? I was thinking about all new
> > Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
>
> Works even faster. :)
>
> The spinners then get killed when the contexts are closed (default is
> non-persistent contexts if you disable heartbeats entirely). The
> challenge is really on the per-engine heartbeat controls to make sure we
> kick off the dead contexts, but that's for the future.
And for the other kconfig, with no preemption timeout, you just get
regular heartbeats, so roughly the 10s hangcheck timeout.
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 15:26 ` Tvrtko Ursulin
0 siblings, 0 replies; 23+ messages in thread
From: Tvrtko Ursulin @ 2019-11-15 15:26 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 15/11/2019 14:59, Chris Wilson wrote:
> Quoting Chris Wilson (2019-11-15 14:58:00)
>> Quoting Tvrtko Ursulin (2019-11-15 14:52:16)
>>>
>>> On 15/11/2019 13:09, Chris Wilson wrote:
>>>> Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
>>>>>
>>>>> On 14/11/2019 19:15, Chris Wilson wrote:
>>>>>> Although a virtual engine itself has no hang detection; that is on the
>>>>>> underlying physical engines, it does provide a unique means for clients
>>>>>> to try and break the system. Try and break it before they do.
>>>>>>
>>>>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>>>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>>>>> ---
>>>>>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
>>>>>> 1 file changed, 105 insertions(+)
>>>>>>
>>>>>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
>>>>>> index 70c4529b4..86028cfdd 100644
>>>>>> --- a/tests/i915/gem_exec_balancer.c
>>>>>> +++ b/tests/i915/gem_exec_balancer.c
>>>>>> @@ -24,6 +24,7 @@
>>>>>> #include <sched.h>
>>>>>>
>>>>>> #include "igt.h"
>>>>>> +#include "igt_gt.h"
>>>>>> #include "igt_perf.h"
>>>>>> #include "i915/gem_ring.h"
>>>>>> #include "sw_sync.h"
>>>>>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
>>>>>> gem_quiescent_gpu(i915);
>>>>>> }
>>>>>>
>>>>>> +static void set_unbannable(int i915, uint32_t ctx)
>>>>>> +{
>>>>>> + struct drm_i915_gem_context_param p = {
>>>>>> + .ctx_id = ctx,
>>>>>> + .param = I915_CONTEXT_PARAM_BANNABLE,
>>>>>> + };
>>>>>> +
>>>>>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
>>>>>> +}
>>>>>> +
>>>>>> +static void hangme(int i915)
>>>>>> +{
>>>>>> + struct drm_i915_gem_exec_object2 batch = {
>>>>>> + .handle = batch_create(i915),
>>>>>> + };
>>>>>> +
>>>>>> + /*
>>>>>> + * Fill the available engines with hanging virtual engines and verify
>>>>>> + * that execution continues onto the second batch.
>>>>>> + */
>>>>>> +
>>>>>> + for (int class = 1; class < 32; class++) {
>>>>>> + struct i915_engine_class_instance *ci;
>>>>>> + struct client {
>>>>>> + igt_spin_t *spin[2];
>>>>>> + } *client;
>>>>>> + unsigned int count;
>>>>>> + uint32_t bg;
>>>>>> +
>>>>>> + ci = list_engines(i915, 1u << class, &count);
>>>>>> + if (!ci)
>>>>>> + continue;
>>>>>> +
>>>>>> + if (count < 2) {
>>>>>> + free(ci);
>>>>>> + continue;
>>>>>> + }
>>>>>> +
>>>>>> + client = malloc(sizeof(*client) * count);
>>>>>> + igt_assert(client);
>>>>>> +
>>>>>> + for (int i = 0; i < count; i++) {
>>>>>> + uint32_t ctx = gem_context_create(i915);
>>>>>> + struct client *c = &client[i];
>>>>>> + unsigned int flags;
>>>>>> +
>>>>>> + set_unbannable(i915, ctx);
>>>>>> + set_load_balancer(i915, ctx, ci, count, NULL);
>>>>>> +
>>>>>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
>>>>>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
>>>>>> + c->spin[j] = igt_spin_new(i915, ctx,
>>>>>> + .flags = flags);
>>>>>> + flags = IGT_SPIN_FENCE_OUT;
>>>>>> + }
>>>>>> +
>>>>>> + gem_context_destroy(i915, ctx);
>>>>>> + }
>>>>>> +
>>>>>> + /* Apply some background context to speed up hang detection */
>>>>>> + bg = gem_context_create(i915);
>>>>>> + set_engines(i915, bg, ci, count);
>>>>>> + for (int i = 0; i < count; i++) {
>>>>>> + struct drm_i915_gem_execbuffer2 execbuf = {
>>>>>> + .buffers_ptr = to_user_pointer(&batch),
>>>>>> + .buffer_count = 1,
>>>>>> + .flags = i,
>>>>>> + .rsvd1 = bg,
>>>>>> + };
>>>>>> + gem_execbuf(i915, &execbuf);
>>>>>> + }
>>>>>> + gem_context_destroy(i915, bg);
>>>>>> +
>>>>>> + for (int i = 0; i < count; i++) {
>>>>>> + struct client *c = &client[i];
>>>>>> +
>>>>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
>>>>>> + gem_sync(i915, c->spin[0]->handle);
>>>>>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
>>>>>> + -EIO);
>>>>>> +
>>>>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
>>>>>> + gem_sync(i915, c->spin[1]->handle);
>>>>>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
>>>>>> + -EIO);
>>>>>> +
>>>>>> + igt_spin_free(i915, c->spin[0]);
>>>>>> + igt_spin_free(i915, c->spin[1]);
>>>>>> + }
>>>>>> + free(client);
>>>>>> + }
>>>>>> +
>>>>>> + gem_close(i915, batch.handle);
>>>>>> + gem_quiescent_gpu(i915);
>>>>>> +}
>>>>>> +
>>>>>> static void smoketest(int i915, int timeout)
>>>>>> {
>>>>>> struct drm_i915_gem_exec_object2 batch[2] = {
>>>>>> @@ -1486,4 +1583,12 @@ igt_main
>>>>>> igt_fixture {
>>>>>> igt_stop_hang_detector();
>>>>>> }
>>>>>> +
>>>>>> + igt_subtest("hang") {
>>>>>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
>>>>>> +
>>>>>> + hangme(i915);
>>>>>> +
>>>>>> + igt_disallow_hang(i915, hang);
>>>>>> + }
>>>>>> }
>>>>>>
>>>>>
>>>>> Looks good. But do we need some core helpers to figure out when preempt
>>>>> timeout is compiled out?
>>>>
>>>> It should still work the same, but slower; 10s hang detection rather
>>>> than ~200ms.
>>>
>>> You are talking about old hangcheck? I was thinking about all new
>>> Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
>>
>> Works even faster. :)
>>
>> The spinners then get killed when the contexts are closed (default is
>> non-persistent contexts if you disable heartbeats entirely). The
>> challenge is really on the per-engine heartbeat controls to make sure we
>> kick off the dead contexts, but that's for the future.
>
> And for the other kconfig, with no preemption timeout, you just get
> regular heartbeats, so roughly the 10s hangcheck timeout.
Good then. No other opens:
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 15:26 ` Tvrtko Ursulin
0 siblings, 0 replies; 23+ messages in thread
From: Tvrtko Ursulin @ 2019-11-15 15:26 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 15/11/2019 14:59, Chris Wilson wrote:
> Quoting Chris Wilson (2019-11-15 14:58:00)
>> Quoting Tvrtko Ursulin (2019-11-15 14:52:16)
>>>
>>> On 15/11/2019 13:09, Chris Wilson wrote:
>>>> Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
>>>>>
>>>>> On 14/11/2019 19:15, Chris Wilson wrote:
>>>>>> Although a virtual engine itself has no hang detection; that is on the
>>>>>> underlying physical engines, it does provide a unique means for clients
>>>>>> to try and break the system. Try and break it before they do.
>>>>>>
>>>>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>>>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>>>>> ---
>>>>>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
>>>>>> 1 file changed, 105 insertions(+)
>>>>>>
>>>>>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
>>>>>> index 70c4529b4..86028cfdd 100644
>>>>>> --- a/tests/i915/gem_exec_balancer.c
>>>>>> +++ b/tests/i915/gem_exec_balancer.c
>>>>>> @@ -24,6 +24,7 @@
>>>>>> #include <sched.h>
>>>>>>
>>>>>> #include "igt.h"
>>>>>> +#include "igt_gt.h"
>>>>>> #include "igt_perf.h"
>>>>>> #include "i915/gem_ring.h"
>>>>>> #include "sw_sync.h"
>>>>>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
>>>>>> gem_quiescent_gpu(i915);
>>>>>> }
>>>>>>
>>>>>> +static void set_unbannable(int i915, uint32_t ctx)
>>>>>> +{
>>>>>> + struct drm_i915_gem_context_param p = {
>>>>>> + .ctx_id = ctx,
>>>>>> + .param = I915_CONTEXT_PARAM_BANNABLE,
>>>>>> + };
>>>>>> +
>>>>>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
>>>>>> +}
>>>>>> +
>>>>>> +static void hangme(int i915)
>>>>>> +{
>>>>>> + struct drm_i915_gem_exec_object2 batch = {
>>>>>> + .handle = batch_create(i915),
>>>>>> + };
>>>>>> +
>>>>>> + /*
>>>>>> + * Fill the available engines with hanging virtual engines and verify
>>>>>> + * that execution continues onto the second batch.
>>>>>> + */
>>>>>> +
>>>>>> + for (int class = 1; class < 32; class++) {
>>>>>> + struct i915_engine_class_instance *ci;
>>>>>> + struct client {
>>>>>> + igt_spin_t *spin[2];
>>>>>> + } *client;
>>>>>> + unsigned int count;
>>>>>> + uint32_t bg;
>>>>>> +
>>>>>> + ci = list_engines(i915, 1u << class, &count);
>>>>>> + if (!ci)
>>>>>> + continue;
>>>>>> +
>>>>>> + if (count < 2) {
>>>>>> + free(ci);
>>>>>> + continue;
>>>>>> + }
>>>>>> +
>>>>>> + client = malloc(sizeof(*client) * count);
>>>>>> + igt_assert(client);
>>>>>> +
>>>>>> + for (int i = 0; i < count; i++) {
>>>>>> + uint32_t ctx = gem_context_create(i915);
>>>>>> + struct client *c = &client[i];
>>>>>> + unsigned int flags;
>>>>>> +
>>>>>> + set_unbannable(i915, ctx);
>>>>>> + set_load_balancer(i915, ctx, ci, count, NULL);
>>>>>> +
>>>>>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
>>>>>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
>>>>>> + c->spin[j] = igt_spin_new(i915, ctx,
>>>>>> + .flags = flags);
>>>>>> + flags = IGT_SPIN_FENCE_OUT;
>>>>>> + }
>>>>>> +
>>>>>> + gem_context_destroy(i915, ctx);
>>>>>> + }
>>>>>> +
>>>>>> + /* Apply some background context to speed up hang detection */
>>>>>> + bg = gem_context_create(i915);
>>>>>> + set_engines(i915, bg, ci, count);
>>>>>> + for (int i = 0; i < count; i++) {
>>>>>> + struct drm_i915_gem_execbuffer2 execbuf = {
>>>>>> + .buffers_ptr = to_user_pointer(&batch),
>>>>>> + .buffer_count = 1,
>>>>>> + .flags = i,
>>>>>> + .rsvd1 = bg,
>>>>>> + };
>>>>>> + gem_execbuf(i915, &execbuf);
>>>>>> + }
>>>>>> + gem_context_destroy(i915, bg);
>>>>>> +
>>>>>> + for (int i = 0; i < count; i++) {
>>>>>> + struct client *c = &client[i];
>>>>>> +
>>>>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
>>>>>> + gem_sync(i915, c->spin[0]->handle);
>>>>>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
>>>>>> + -EIO);
>>>>>> +
>>>>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
>>>>>> + gem_sync(i915, c->spin[1]->handle);
>>>>>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
>>>>>> + -EIO);
>>>>>> +
>>>>>> + igt_spin_free(i915, c->spin[0]);
>>>>>> + igt_spin_free(i915, c->spin[1]);
>>>>>> + }
>>>>>> + free(client);
>>>>>> + }
>>>>>> +
>>>>>> + gem_close(i915, batch.handle);
>>>>>> + gem_quiescent_gpu(i915);
>>>>>> +}
>>>>>> +
>>>>>> static void smoketest(int i915, int timeout)
>>>>>> {
>>>>>> struct drm_i915_gem_exec_object2 batch[2] = {
>>>>>> @@ -1486,4 +1583,12 @@ igt_main
>>>>>> igt_fixture {
>>>>>> igt_stop_hang_detector();
>>>>>> }
>>>>>> +
>>>>>> + igt_subtest("hang") {
>>>>>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
>>>>>> +
>>>>>> + hangme(i915);
>>>>>> +
>>>>>> + igt_disallow_hang(i915, hang);
>>>>>> + }
>>>>>> }
>>>>>>
>>>>>
>>>>> Looks good. But do we need some core helpers to figure out when preempt
>>>>> timeout is compiled out?
>>>>
>>>> It should still work the same, but slower; 10s hang detection rather
>>>> than ~200ms.
>>>
>>> You are talking about old hangcheck? I was thinking about all new
>>> Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
>>
>> Works even faster. :)
>>
>> The spinners then get killed when the contexts are closed (default is
>> non-persistent contexts if you disable heartbeats entirely). The
>> challenge is really on the per-engine heartbeat controls to make sure we
>> kick off the dead contexts, but that's for the future.
>
> And for the other kconfig, with no preemption timeout, you just get
> regular heartbeats, so roughly the 10s hangcheck timeout.
Good then. No other opens:
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [igt-dev] [Intel-gfx] [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
@ 2019-11-15 15:26 ` Tvrtko Ursulin
0 siblings, 0 replies; 23+ messages in thread
From: Tvrtko Ursulin @ 2019-11-15 15:26 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 15/11/2019 14:59, Chris Wilson wrote:
> Quoting Chris Wilson (2019-11-15 14:58:00)
>> Quoting Tvrtko Ursulin (2019-11-15 14:52:16)
>>>
>>> On 15/11/2019 13:09, Chris Wilson wrote:
>>>> Quoting Tvrtko Ursulin (2019-11-15 13:02:24)
>>>>>
>>>>> On 14/11/2019 19:15, Chris Wilson wrote:
>>>>>> Although a virtual engine itself has no hang detection; that is on the
>>>>>> underlying physical engines, it does provide a unique means for clients
>>>>>> to try and break the system. Try and break it before they do.
>>>>>>
>>>>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>>>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>>>>> ---
>>>>>> tests/i915/gem_exec_balancer.c | 105 +++++++++++++++++++++++++++++++++
>>>>>> 1 file changed, 105 insertions(+)
>>>>>>
>>>>>> diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
>>>>>> index 70c4529b4..86028cfdd 100644
>>>>>> --- a/tests/i915/gem_exec_balancer.c
>>>>>> +++ b/tests/i915/gem_exec_balancer.c
>>>>>> @@ -24,6 +24,7 @@
>>>>>> #include <sched.h>
>>>>>>
>>>>>> #include "igt.h"
>>>>>> +#include "igt_gt.h"
>>>>>> #include "igt_perf.h"
>>>>>> #include "i915/gem_ring.h"
>>>>>> #include "sw_sync.h"
>>>>>> @@ -1314,6 +1315,102 @@ static void semaphore(int i915)
>>>>>> gem_quiescent_gpu(i915);
>>>>>> }
>>>>>>
>>>>>> +static void set_unbannable(int i915, uint32_t ctx)
>>>>>> +{
>>>>>> + struct drm_i915_gem_context_param p = {
>>>>>> + .ctx_id = ctx,
>>>>>> + .param = I915_CONTEXT_PARAM_BANNABLE,
>>>>>> + };
>>>>>> +
>>>>>> + igt_assert_eq(__gem_context_set_param(i915, &p), 0);
>>>>>> +}
>>>>>> +
>>>>>> +static void hangme(int i915)
>>>>>> +{
>>>>>> + struct drm_i915_gem_exec_object2 batch = {
>>>>>> + .handle = batch_create(i915),
>>>>>> + };
>>>>>> +
>>>>>> + /*
>>>>>> + * Fill the available engines with hanging virtual engines and verify
>>>>>> + * that execution continues onto the second batch.
>>>>>> + */
>>>>>> +
>>>>>> + for (int class = 1; class < 32; class++) {
>>>>>> + struct i915_engine_class_instance *ci;
>>>>>> + struct client {
>>>>>> + igt_spin_t *spin[2];
>>>>>> + } *client;
>>>>>> + unsigned int count;
>>>>>> + uint32_t bg;
>>>>>> +
>>>>>> + ci = list_engines(i915, 1u << class, &count);
>>>>>> + if (!ci)
>>>>>> + continue;
>>>>>> +
>>>>>> + if (count < 2) {
>>>>>> + free(ci);
>>>>>> + continue;
>>>>>> + }
>>>>>> +
>>>>>> + client = malloc(sizeof(*client) * count);
>>>>>> + igt_assert(client);
>>>>>> +
>>>>>> + for (int i = 0; i < count; i++) {
>>>>>> + uint32_t ctx = gem_context_create(i915);
>>>>>> + struct client *c = &client[i];
>>>>>> + unsigned int flags;
>>>>>> +
>>>>>> + set_unbannable(i915, ctx);
>>>>>> + set_load_balancer(i915, ctx, ci, count, NULL);
>>>>>> +
>>>>>> + flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION;
>>>>>> + for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
>>>>>> + c->spin[j] = igt_spin_new(i915, ctx,
>>>>>> + .flags = flags);
>>>>>> + flags = IGT_SPIN_FENCE_OUT;
>>>>>> + }
>>>>>> +
>>>>>> + gem_context_destroy(i915, ctx);
>>>>>> + }
>>>>>> +
>>>>>> + /* Apply some background context to speed up hang detection */
>>>>>> + bg = gem_context_create(i915);
>>>>>> + set_engines(i915, bg, ci, count);
>>>>>> + for (int i = 0; i < count; i++) {
>>>>>> + struct drm_i915_gem_execbuffer2 execbuf = {
>>>>>> + .buffers_ptr = to_user_pointer(&batch),
>>>>>> + .buffer_count = 1,
>>>>>> + .flags = i,
>>>>>> + .rsvd1 = bg,
>>>>>> + };
>>>>>> + gem_execbuf(i915, &execbuf);
>>>>>> + }
>>>>>> + gem_context_destroy(i915, bg);
>>>>>> +
>>>>>> + for (int i = 0; i < count; i++) {
>>>>>> + struct client *c = &client[i];
>>>>>> +
>>>>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 0);
>>>>>> + gem_sync(i915, c->spin[0]->handle);
>>>>>> + igt_assert_eq(sync_fence_status(c->spin[0]->out_fence),
>>>>>> + -EIO);
>>>>>> +
>>>>>> + igt_debug("Waiting for client[%d].spin[%d]\n", i, 1);
>>>>>> + gem_sync(i915, c->spin[1]->handle);
>>>>>> + igt_assert_eq(sync_fence_status(c->spin[1]->out_fence),
>>>>>> + -EIO);
>>>>>> +
>>>>>> + igt_spin_free(i915, c->spin[0]);
>>>>>> + igt_spin_free(i915, c->spin[1]);
>>>>>> + }
>>>>>> + free(client);
>>>>>> + }
>>>>>> +
>>>>>> + gem_close(i915, batch.handle);
>>>>>> + gem_quiescent_gpu(i915);
>>>>>> +}
>>>>>> +
>>>>>> static void smoketest(int i915, int timeout)
>>>>>> {
>>>>>> struct drm_i915_gem_exec_object2 batch[2] = {
>>>>>> @@ -1486,4 +1583,12 @@ igt_main
>>>>>> igt_fixture {
>>>>>> igt_stop_hang_detector();
>>>>>> }
>>>>>> +
>>>>>> + igt_subtest("hang") {
>>>>>> + igt_hang_t hang = igt_allow_hang(i915, 0, 0);
>>>>>> +
>>>>>> + hangme(i915);
>>>>>> +
>>>>>> + igt_disallow_hang(i915, hang);
>>>>>> + }
>>>>>> }
>>>>>>
>>>>>
>>>>> Looks good. But do we need some core helpers to figure out when preempt
>>>>> timeout is compiled out?
>>>>
>>>> It should still work the same, but slower; 10s hang detection rather
>>>> than ~200ms.
>>>
>>> You are talking about old hangcheck? I was thinking about all new
>>> Kconfig's compiled out. No heartbeats, no preemption timeout. Still works?
>>
>> Works even faster. :)
>>
>> The spinners then get killed when the contexts are closed (default is
>> non-persistent contexts if you disable heartbeats entirely). The
>> challenge is really on the per-engine heartbeat controls to make sure we
>> kick off the dead contexts, but that's for the future.
>
> And for the other kconfig, with no preemption timeout, you just get
> regular heartbeats, so roughly the 10s hangcheck timeout.
Good then. No other opens:
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Regards,
Tvrtko
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 23+ messages in thread
* [igt-dev] ✗ Fi.CI.IGT: failure for i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
2019-11-14 19:15 ` [Intel-gfx] " Chris Wilson
` (4 preceding siblings ...)
(?)
@ 2019-11-16 0:20 ` Patchwork
-1 siblings, 0 replies; 23+ messages in thread
From: Patchwork @ 2019-11-16 0:20 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
== Series Details ==
Series: i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines
URL : https://patchwork.freedesktop.org/series/69490/
State : failure
== Summary ==
CI Bug Log - changes from CI_DRM_7346_full -> IGTPW_3707_full
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with IGTPW_3707_full absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in IGTPW_3707_full, please notify your bug team to allow them
to document this new failure mode, which will reduce false positives in CI.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/index.html
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in IGTPW_3707_full:
### IGT changes ###
#### Possible regressions ####
* igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive:
- shard-glk: [PASS][1] -> [DMESG-FAIL][2]
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-glk2/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-glk8/igt@gem_persistent_relocs@forked-faulting-reloc-thrash-inactive.html
* igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive:
- shard-apl: [PASS][3] -> [DMESG-FAIL][4]
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-apl4/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-apl3/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrash-inactive.html
New tests
---------
New tests have been introduced between CI_DRM_7346_full and IGTPW_3707_full:
### New IGT tests (1) ###
* igt@gem_exec_balancer@hang:
- Statuses : 2 pass(s)
- Exec time: [0.53, 0.63] s
Known issues
------------
Here are the changes found in IGTPW_3707_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_ctx_isolation@vcs1-dirty-create:
- shard-iclb: [PASS][5] -> [SKIP][6] ([fdo#109276] / [fdo#112080]) +2 similar issues
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb1/igt@gem_ctx_isolation@vcs1-dirty-create.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb3/igt@gem_ctx_isolation@vcs1-dirty-create.html
* igt@gem_ctx_switch@queue-light:
- shard-tglb: [PASS][7] -> [INCOMPLETE][8] ([fdo#111672])
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb3/igt@gem_ctx_switch@queue-light.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb6/igt@gem_ctx_switch@queue-light.html
* igt@gem_eio@in-flight-suspend:
- shard-tglb: [PASS][9] -> [INCOMPLETE][10] ([fdo#111832] / [fdo#111850] / [fdo#112081])
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb9/igt@gem_eio@in-flight-suspend.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb7/igt@gem_eio@in-flight-suspend.html
* igt@gem_eio@unwedge-stress:
- shard-tglb: [PASS][11] -> [INCOMPLETE][12] ([fdo#111866])
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb2/igt@gem_eio@unwedge-stress.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb6/igt@gem_eio@unwedge-stress.html
* igt@gem_exec_parallel@vcs1-fds:
- shard-iclb: [PASS][13] -> [SKIP][14] ([fdo#112080]) +9 similar issues
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb2/igt@gem_exec_parallel@vcs1-fds.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb8/igt@gem_exec_parallel@vcs1-fds.html
* igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd:
- shard-iclb: [PASS][15] -> [SKIP][16] ([fdo#112146]) +4 similar issues
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb6/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb4/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd.html
* igt@gem_userptr_blits@dmabuf-sync:
- shard-snb: [PASS][17] -> [DMESG-WARN][18] ([fdo#111870]) +1 similar issue
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-snb4/igt@gem_userptr_blits@dmabuf-sync.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-snb5/igt@gem_userptr_blits@dmabuf-sync.html
* igt@gem_userptr_blits@dmabuf-unsync:
- shard-hsw: [PASS][19] -> [DMESG-WARN][20] ([fdo#111870]) +1 similar issue
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-hsw5/igt@gem_userptr_blits@dmabuf-unsync.html
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-hsw8/igt@gem_userptr_blits@dmabuf-unsync.html
* igt@i915_pm_dc@dc6-psr:
- shard-iclb: [PASS][21] -> [FAIL][22] ([fdo#111830 ])
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb6/igt@i915_pm_dc@dc6-psr.html
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb2/igt@i915_pm_dc@dc6-psr.html
* igt@i915_selftest@live_gt_timelines:
- shard-tglb: [PASS][23] -> [INCOMPLETE][24] ([fdo#111831])
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb6/igt@i915_selftest@live_gt_timelines.html
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb8/igt@i915_selftest@live_gt_timelines.html
* igt@kms_flip@flip-vs-expired-vblank-interruptible:
- shard-kbl: [PASS][25] -> [FAIL][26] ([fdo#105363])
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-kbl4/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-kbl6/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
* igt@kms_flip@flip-vs-suspend-interruptible:
- shard-hsw: [PASS][27] -> [INCOMPLETE][28] ([fdo#103540]) +1 similar issue
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-hsw8/igt@kms_flip@flip-vs-suspend-interruptible.html
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-hsw2/igt@kms_flip@flip-vs-suspend-interruptible.html
* igt@kms_frontbuffer_tracking@fbc-suspend:
- shard-kbl: [PASS][29] -> [DMESG-WARN][30] ([fdo#108566]) +3 similar issues
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-kbl4/igt@kms_frontbuffer_tracking@fbc-suspend.html
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-kbl3/igt@kms_frontbuffer_tracking@fbc-suspend.html
* igt@kms_frontbuffer_tracking@fbcpsr-1p-pri-indfb-multidraw:
- shard-tglb: [PASS][31] -> [FAIL][32] ([fdo#103167])
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb3/igt@kms_frontbuffer_tracking@fbcpsr-1p-pri-indfb-multidraw.html
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb5/igt@kms_frontbuffer_tracking@fbcpsr-1p-pri-indfb-multidraw.html
* igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite:
- shard-iclb: [PASS][33] -> [FAIL][34] ([fdo#103167]) +4 similar issues
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb5/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite.html
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb2/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-cur-indfb-draw-pwrite.html
* igt@kms_psr@psr2_primary_mmap_gtt:
- shard-iclb: [PASS][35] -> [SKIP][36] ([fdo#109441])
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb2/igt@kms_psr@psr2_primary_mmap_gtt.html
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb5/igt@kms_psr@psr2_primary_mmap_gtt.html
* igt@kms_psr@suspend:
- shard-tglb: [PASS][37] -> [INCOMPLETE][38] ([fdo#111832] / [fdo#111850]) +2 similar issues
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb8/igt@kms_psr@suspend.html
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb1/igt@kms_psr@suspend.html
* igt@kms_vblank@pipe-a-ts-continuation-suspend:
- shard-apl: [PASS][39] -> [DMESG-WARN][40] ([fdo#108566]) +2 similar issues
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-apl3/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-apl1/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
* igt@perf@oa-exponents:
- shard-glk: [PASS][41] -> [FAIL][42] ([fdo#105483])
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-glk3/igt@perf@oa-exponents.html
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-glk7/igt@perf@oa-exponents.html
* igt@prime_vgem@busy-blt:
- shard-kbl: [PASS][43] -> [FAIL][44] ([fdo#112291])
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-kbl4/igt@prime_vgem@busy-blt.html
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-kbl4/igt@prime_vgem@busy-blt.html
- shard-apl: [PASS][45] -> [FAIL][46] ([fdo#112291])
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-apl8/igt@prime_vgem@busy-blt.html
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-apl3/igt@prime_vgem@busy-blt.html
- shard-glk: [PASS][47] -> [FAIL][48] ([fdo#112291])
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-glk5/igt@prime_vgem@busy-blt.html
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-glk6/igt@prime_vgem@busy-blt.html
* igt@prime_vgem@fence-wait-bsd2:
- shard-iclb: [PASS][49] -> [SKIP][50] ([fdo#109276]) +10 similar issues
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb2/igt@prime_vgem@fence-wait-bsd2.html
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb7/igt@prime_vgem@fence-wait-bsd2.html
#### Possible fixes ####
* igt@gem_ctx_isolation@bcs0-s3:
- shard-tglb: [INCOMPLETE][51] ([fdo#111832]) -> [PASS][52]
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb7/igt@gem_ctx_isolation@bcs0-s3.html
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb9/igt@gem_ctx_isolation@bcs0-s3.html
* igt@gem_ctx_isolation@rcs0-s3:
- shard-kbl: [DMESG-WARN][53] ([fdo#108566]) -> [PASS][54] +2 similar issues
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-kbl4/igt@gem_ctx_isolation@rcs0-s3.html
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-kbl6/igt@gem_ctx_isolation@rcs0-s3.html
* igt@gem_ctx_persistence@vcs1-queued:
- shard-iclb: [SKIP][55] ([fdo#109276] / [fdo#112080]) -> [PASS][56] +2 similar issues
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb7/igt@gem_ctx_persistence@vcs1-queued.html
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb1/igt@gem_ctx_persistence@vcs1-queued.html
* igt@gem_ctx_shared@exec-single-timeline-bsd:
- shard-iclb: [SKIP][57] ([fdo#110841]) -> [PASS][58]
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb2/igt@gem_ctx_shared@exec-single-timeline-bsd.html
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb8/igt@gem_ctx_shared@exec-single-timeline-bsd.html
* igt@gem_exec_schedule@preempt-contexts-bsd2:
- shard-iclb: [SKIP][59] ([fdo#109276]) -> [PASS][60] +13 similar issues
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb7/igt@gem_exec_schedule@preempt-contexts-bsd2.html
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb4/igt@gem_exec_schedule@preempt-contexts-bsd2.html
* igt@gem_exec_schedule@reorder-wide-bsd:
- shard-iclb: [SKIP][61] ([fdo#112146]) -> [PASS][62] +7 similar issues
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb2/igt@gem_exec_schedule@reorder-wide-bsd.html
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb3/igt@gem_exec_schedule@reorder-wide-bsd.html
* igt@gem_persistent_relocs@forked-interruptible-thrashing:
- shard-kbl: [FAIL][63] ([fdo#112037]) -> [PASS][64]
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-kbl6/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-kbl3/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
* igt@gem_userptr_blits@map-fixed-invalidate-busy-gup:
- shard-snb: [DMESG-WARN][65] ([fdo#111870]) -> [PASS][66] +2 similar issues
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-snb7/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-snb6/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html
* igt@gem_userptr_blits@sync-unmap-cycles:
- shard-hsw: [DMESG-WARN][67] ([fdo#111870]) -> [PASS][68] +3 similar issues
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-hsw7/igt@gem_userptr_blits@sync-unmap-cycles.html
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-hsw7/igt@gem_userptr_blits@sync-unmap-cycles.html
* igt@i915_suspend@fence-restore-tiled2untiled:
- shard-apl: [DMESG-WARN][69] ([fdo#108566]) -> [PASS][70] +2 similar issues
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-apl1/igt@i915_suspend@fence-restore-tiled2untiled.html
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-apl4/igt@i915_suspend@fence-restore-tiled2untiled.html
* igt@kms_cursor_crc@pipe-d-cursor-suspend:
- shard-tglb: [INCOMPLETE][71] ([fdo#111850]) -> [PASS][72]
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb8/igt@kms_cursor_crc@pipe-d-cursor-suspend.html
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb8/igt@kms_cursor_crc@pipe-d-cursor-suspend.html
* igt@kms_draw_crc@draw-method-rgb565-mmap-cpu-xtiled:
- shard-snb: [SKIP][73] ([fdo#109271]) -> [PASS][74]
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-snb2/igt@kms_draw_crc@draw-method-rgb565-mmap-cpu-xtiled.html
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-snb6/igt@kms_draw_crc@draw-method-rgb565-mmap-cpu-xtiled.html
* igt@kms_flip@flip-vs-absolute-wf_vblank-interruptible:
- shard-hsw: [INCOMPLETE][75] ([fdo#103540]) -> [PASS][76]
[75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-hsw8/igt@kms_flip@flip-vs-absolute-wf_vblank-interruptible.html
[76]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-hsw6/igt@kms_flip@flip-vs-absolute-wf_vblank-interruptible.html
* igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-pwrite:
- shard-iclb: [FAIL][77] ([fdo#103167]) -> [PASS][78] +3 similar issues
[77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb7/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-pwrite.html
[78]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb7/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-shrfb-draw-pwrite.html
* igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-blt:
- shard-tglb: [FAIL][79] ([fdo#103167]) -> [PASS][80] +2 similar issues
[79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb9/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-blt.html
[80]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb6/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-pri-indfb-draw-blt.html
* igt@kms_plane_lowres@pipe-a-tiling-y:
- shard-iclb: [FAIL][81] ([fdo#103166]) -> [PASS][82]
[81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb6/igt@kms_plane_lowres@pipe-a-tiling-y.html
[82]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb2/igt@kms_plane_lowres@pipe-a-tiling-y.html
* igt@kms_psr@no_drrs:
- shard-iclb: [FAIL][83] ([fdo#108341]) -> [PASS][84]
[83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb1/igt@kms_psr@no_drrs.html
[84]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb8/igt@kms_psr@no_drrs.html
* igt@kms_psr@psr2_sprite_plane_move:
- shard-iclb: [SKIP][85] ([fdo#109441]) -> [PASS][86] +2 similar issues
[85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb5/igt@kms_psr@psr2_sprite_plane_move.html
[86]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb2/igt@kms_psr@psr2_sprite_plane_move.html
* igt@perf_pmu@busy-vcs1:
- shard-iclb: [SKIP][87] ([fdo#112080]) -> [PASS][88] +5 similar issues
[87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb8/igt@perf_pmu@busy-vcs1.html
[88]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb2/igt@perf_pmu@busy-vcs1.html
* igt@prime_vgem@wait-blt:
- shard-kbl: [FAIL][89] ([fdo#112292]) -> [PASS][90]
[89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-kbl4/igt@prime_vgem@wait-blt.html
[90]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-kbl3/igt@prime_vgem@wait-blt.html
- shard-glk: [FAIL][91] ([fdo#112292]) -> [PASS][92]
[91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-glk5/igt@prime_vgem@wait-blt.html
[92]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-glk1/igt@prime_vgem@wait-blt.html
#### Warnings ####
* igt@gem_ctx_isolation@vcs1-nonpriv-switch:
- shard-iclb: [FAIL][93] ([fdo#111329]) -> [SKIP][94] ([fdo#109276] / [fdo#112080])
[93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb4/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html
[94]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb8/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html
* igt@gem_ctx_isolation@vcs2-reset:
- shard-tglb: [SKIP][95] ([fdo#111912] / [fdo#112080]) -> [SKIP][96] ([fdo#112080])
[95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb7/igt@gem_ctx_isolation@vcs2-reset.html
[96]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb9/igt@gem_ctx_isolation@vcs2-reset.html
* igt@gem_eio@kms:
- shard-snb: [DMESG-WARN][97] ([fdo#111781]) -> [DMESG-WARN][98] ([fdo# 112000 ] / [fdo#111781])
[97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-snb4/igt@gem_eio@kms.html
[98]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-snb6/igt@gem_eio@kms.html
* igt@gem_exec_schedule@deep-bsd1:
- shard-tglb: [INCOMPLETE][99] ([fdo#111671]) -> [FAIL][100] ([fdo#111646])
[99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb8/igt@gem_exec_schedule@deep-bsd1.html
[100]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb6/igt@gem_exec_schedule@deep-bsd1.html
* igt@gem_exec_schedule@deep-render:
- shard-tglb: [FAIL][101] ([fdo#111646]) -> [INCOMPLETE][102] ([fdo#111671])
[101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb8/igt@gem_exec_schedule@deep-render.html
[102]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-tglb2/igt@gem_exec_schedule@deep-render.html
* igt@kms_dp_dsc@basic-dsc-enable-edp:
- shard-iclb: [DMESG-WARN][103] ([fdo#107724]) -> [SKIP][104] ([fdo#109349])
[103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-iclb2/igt@kms_dp_dsc@basic-dsc-enable-edp.html
[104]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/shard-iclb6/igt@kms_dp_dsc@basic-dsc-enable-edp.html
* igt@kms_psr@psr2_suspend:
- shard-tglb: [DMESG-WARN][105] ([fdo#111600]) -> [INCOMPLETE][106] ([fdo#111832] / [fdo#111850])
[105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7346/shard-tglb9/igt@kms_psr@psr2_suspend.html
[106]: https://intel-gfx-c
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_3707/index.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 23+ messages in thread
end of thread, other threads:[~2019-11-16 0:20 UTC | newest]
Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-14 19:15 [PATCH i-g-t] i915/gem_exec_balancer: Throw a few hangs into the virtual pipelines Chris Wilson
2019-11-14 19:15 ` [igt-dev] " Chris Wilson
2019-11-14 19:15 ` [Intel-gfx] " Chris Wilson
2019-11-14 20:07 ` [igt-dev] ✓ Fi.CI.BAT: success for " Patchwork
2019-11-14 20:11 ` [igt-dev] ✗ GitLab.Pipeline: warning " Patchwork
2019-11-15 13:02 ` [PATCH i-g-t] " Tvrtko Ursulin
2019-11-15 13:02 ` [igt-dev] [Intel-gfx] " Tvrtko Ursulin
2019-11-15 13:02 ` Tvrtko Ursulin
2019-11-15 13:09 ` Chris Wilson
2019-11-15 13:09 ` [igt-dev] [Intel-gfx] " Chris Wilson
2019-11-15 13:09 ` Chris Wilson
2019-11-15 14:52 ` Tvrtko Ursulin
2019-11-15 14:52 ` [Intel-gfx] " Tvrtko Ursulin
2019-11-15 14:58 ` Chris Wilson
2019-11-15 14:58 ` [igt-dev] [Intel-gfx] " Chris Wilson
2019-11-15 14:58 ` Chris Wilson
2019-11-15 14:59 ` Chris Wilson
2019-11-15 14:59 ` [igt-dev] [Intel-gfx] " Chris Wilson
2019-11-15 14:59 ` Chris Wilson
2019-11-15 15:26 ` Tvrtko Ursulin
2019-11-15 15:26 ` [igt-dev] [Intel-gfx] " Tvrtko Ursulin
2019-11-15 15:26 ` Tvrtko Ursulin
2019-11-16 0:20 ` [igt-dev] ✗ Fi.CI.IGT: failure for " Patchwork
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.