All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/i915/selftests: Exercise context switching in parallell
@ 2019-09-30 11:09 Chris Wilson
  2019-09-30 11:31 ` [PATCH v2] drm/i915/selftests: Exercise context switching in parallel Chris Wilson
                   ` (8 more replies)
  0 siblings, 9 replies; 16+ messages in thread
From: Chris Wilson @ 2019-09-30 11:09 UTC (permalink / raw)
  To: intel-gfx

We currently test context switching on each engine as a basic stress
test (just verifying that nothing explodes if we execute 2 requests from
different contexts sequentially). What we have not tested is what
happens if we try and do so on all available engines simultaneously,
putting our SW and the HW under the maximal stress.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 .../drm/i915/gem/selftests/i915_gem_context.c | 205 ++++++++++++++++++
 1 file changed, 205 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index dc25bcc3e372..33acc33bc778 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -156,6 +156,210 @@ static int live_nop_switch(void *arg)
 	return err;
 }
 
+struct parallel_switch {
+	struct task_struct *tsk;
+	struct intel_context *ce[2];
+};
+
+static int __live_parallel_switch1(void *data)
+{
+	struct parallel_switch *arg = data;
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_request *rq;
+		int err;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[0]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[1]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		err = 0;
+		if (i915_request_wait(rq, 0, HZ / 5) < 0)
+			err = -ETIME;
+		i915_request_put(rq);
+		if (err)
+			return err;
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+	return 0;
+}
+
+static int __live_parallel_switchN(void *data)
+{
+	struct parallel_switch *arg = data;
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_request *rq;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[0]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[1]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+	return 0;
+}
+
+static int live_parallel_switch(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	static int (* const func[])(void *arg) = {
+		__live_parallel_switch1,
+		__live_parallel_switchN,
+		NULL,
+	};
+	struct i915_gem_context *ctx[2];
+	struct parallel_switch *data;
+	int (* const *fn)(void *arg);
+	struct drm_file *file;
+	int err = 0;
+	int n;
+
+	/*
+	 * Check we can process switches on all engines simultaneously.
+	 */
+
+	if (!DRIVER_CAPS(i915)->has_logical_contexts)
+		return 0;
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	data = kcalloc(I915_NUM_ENGINES, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	for (n = 0; n < ARRAY_SIZE(ctx); n++) {
+		struct i915_gem_engines_iter it;
+		struct intel_context *ce;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		ctx[n] = live_context(i915, file);
+		mutex_unlock(&i915->drm.struct_mutex);
+		if (IS_ERR(ctx[n])) {
+			err = PTR_ERR(ctx[n]);
+			goto out;
+		}
+
+		for_each_gem_engine(ce,
+				    i915_gem_context_lock_engines(ctx[n]), it) {
+			mutex_lock(&i915->drm.struct_mutex);
+			err = intel_context_pin(ce);
+			mutex_unlock(&i915->drm.struct_mutex);
+			if (err) {
+				i915_gem_context_unlock_engines(ctx[n]);
+				goto out;
+			}
+			data[ce->engine->legacy_idx].ce[n] = ce;
+		}
+		i915_gem_context_unlock_engines(ctx[n]);
+	}
+
+	for (fn = func; !err && *fn; fn++) {
+		struct igt_live_test t;
+		int n;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		err = igt_live_test_begin(&t, i915, __func__, "");
+		mutex_unlock(&i915->drm.struct_mutex);
+		if (err)
+			break;
+
+		for (n = 0; n < I915_NUM_ENGINES; n++) {
+			if (data[n].ce[0] == NULL || data[n].ce[1] == NULL)
+				continue;
+
+			data[n].tsk = kthread_run(*fn, &data[n],
+						  "igt/parallel:%s",
+						  data[n].ce[0]->engine->name);
+			if (IS_ERR(data[n].tsk)) {
+				err = PTR_ERR(data[n].tsk);
+				break;
+			}
+			get_task_struct(data[n].tsk);
+		}
+
+		for (n = 0; n < I915_NUM_ENGINES; n++) {
+			int status;
+
+			if (IS_ERR_OR_NULL(data[n].tsk))
+				continue;
+
+			status = kthread_stop(data[n].tsk);
+			if (status && !err)
+				err = status;
+
+			put_task_struct(data[n].tsk);
+			data[n].tsk = NULL;
+		}
+
+		mutex_lock(&i915->drm.struct_mutex);
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		mutex_unlock(&i915->drm.struct_mutex);
+	}
+
+out:
+	mutex_lock(&i915->drm.struct_mutex);
+	for (n = 0; n < I915_NUM_ENGINES; n++) {
+		if (data[n].ce[0])
+			intel_context_unpin(data[n].ce[0]);
+		if (data[n].ce[1])
+			intel_context_unpin(data[n].ce[1]);
+	}
+	mutex_unlock(&i915->drm.struct_mutex);
+	kfree(data);
+	mock_file_free(i915, file);
+	return err;
+}
+
 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
 {
 	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
@@ -1681,6 +1885,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_nop_switch),
+		SUBTEST(live_parallel_switch),
 		SUBTEST(igt_ctx_exec),
 		SUBTEST(igt_ctx_readonly),
 		SUBTEST(igt_ctx_sseu),
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH v2] drm/i915/selftests: Exercise context switching in parallel
  2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
@ 2019-09-30 11:31 ` Chris Wilson
  2019-09-30 13:47   ` Tvrtko Ursulin
                     ` (2 more replies)
  2019-09-30 11:55 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev2) Patchwork
                   ` (7 subsequent siblings)
  8 siblings, 3 replies; 16+ messages in thread
From: Chris Wilson @ 2019-09-30 11:31 UTC (permalink / raw)
  To: intel-gfx

We currently test context switching on each engine as a basic stress
test (just verifying that nothing explodes if we execute 2 requests from
different contexts sequentially). What we have not tested is what
happens if we try and do so on all available engines simultaneously,
putting our SW and the HW under the maximal stress.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
Keep struct_mutex the outer lock while it still exists
---
 .../drm/i915/gem/selftests/i915_gem_context.c | 203 ++++++++++++++++++
 1 file changed, 203 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index dc25bcc3e372..8325c7329dc7 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -156,6 +156,208 @@ static int live_nop_switch(void *arg)
 	return err;
 }
 
+struct parallel_switch {
+	struct task_struct *tsk;
+	struct intel_context *ce[2];
+};
+
+static int __live_parallel_switch1(void *data)
+{
+	struct parallel_switch *arg = data;
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_request *rq;
+		int err;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[0]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[1]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		err = 0;
+		if (i915_request_wait(rq, 0, HZ / 5) < 0)
+			err = -ETIME;
+		i915_request_put(rq);
+		if (err)
+			return err;
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+	return 0;
+}
+
+static int __live_parallel_switchN(void *data)
+{
+	struct parallel_switch *arg = data;
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_request *rq;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[0]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[1]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+	return 0;
+}
+
+static int live_parallel_switch(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	static int (* const func[])(void *arg) = {
+		__live_parallel_switch1,
+		__live_parallel_switchN,
+		NULL,
+	};
+	struct i915_gem_context *ctx[2];
+	struct parallel_switch *data;
+	int (* const *fn)(void *arg);
+	struct drm_file *file;
+	int err = 0;
+	int n;
+
+	/*
+	 * Check we can process switches on all engines simultaneously.
+	 */
+
+	if (!DRIVER_CAPS(i915)->has_logical_contexts)
+		return 0;
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	data = kcalloc(I915_NUM_ENGINES, sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+
+	for (n = 0; n < ARRAY_SIZE(ctx); n++) {
+		struct i915_gem_engines_iter it;
+		struct intel_context *ce;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		ctx[n] = live_context(i915, file);
+		if (IS_ERR(ctx[n])) {
+			err = PTR_ERR(ctx[n]);
+			goto out_locked;
+		}
+
+		for_each_gem_engine(ce,
+				    i915_gem_context_lock_engines(ctx[n]), it) {
+			err = intel_context_pin(ce);
+			if (err) {
+				i915_gem_context_unlock_engines(ctx[n]);
+				goto out_locked;
+			}
+			data[ce->engine->legacy_idx].ce[n] = ce;
+		}
+		i915_gem_context_unlock_engines(ctx[n]);
+		mutex_unlock(&i915->drm.struct_mutex);
+	}
+
+	for (fn = func; !err && *fn; fn++) {
+		struct igt_live_test t;
+		int n;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		err = igt_live_test_begin(&t, i915, __func__, "");
+		mutex_unlock(&i915->drm.struct_mutex);
+		if (err)
+			break;
+
+		for (n = 0; n < I915_NUM_ENGINES; n++) {
+			if (data[n].ce[0] == NULL || data[n].ce[1] == NULL)
+				continue;
+
+			data[n].tsk = kthread_run(*fn, &data[n],
+						  "igt/parallel:%s",
+						  data[n].ce[0]->engine->name);
+			if (IS_ERR(data[n].tsk)) {
+				err = PTR_ERR(data[n].tsk);
+				break;
+			}
+			get_task_struct(data[n].tsk);
+		}
+
+		for (n = 0; n < I915_NUM_ENGINES; n++) {
+			int status;
+
+			if (IS_ERR_OR_NULL(data[n].tsk))
+				continue;
+
+			status = kthread_stop(data[n].tsk);
+			if (status && !err)
+				err = status;
+
+			put_task_struct(data[n].tsk);
+			data[n].tsk = NULL;
+		}
+
+		mutex_lock(&i915->drm.struct_mutex);
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		mutex_unlock(&i915->drm.struct_mutex);
+	}
+
+	mutex_lock(&i915->drm.struct_mutex);
+out_locked:
+	for (n = 0; n < I915_NUM_ENGINES; n++) {
+		if (data[n].ce[0])
+			intel_context_unpin(data[n].ce[0]);
+		if (data[n].ce[1])
+			intel_context_unpin(data[n].ce[1]);
+	}
+	mutex_unlock(&i915->drm.struct_mutex);
+	kfree(data);
+	mock_file_free(i915, file);
+	return err;
+}
+
 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
 {
 	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
@@ -1681,6 +1883,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_nop_switch),
+		SUBTEST(live_parallel_switch),
 		SUBTEST(igt_ctx_exec),
 		SUBTEST(igt_ctx_readonly),
 		SUBTEST(igt_ctx_sseu),
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev2)
  2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
  2019-09-30 11:31 ` [PATCH v2] drm/i915/selftests: Exercise context switching in parallel Chris Wilson
@ 2019-09-30 11:55 ` Patchwork
  2019-09-30 12:22 ` ✓ Fi.CI.BAT: success " Patchwork
                   ` (6 subsequent siblings)
  8 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2019-09-30 11:55 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Exercise context switching in parallell (rev2)
URL   : https://patchwork.freedesktop.org/series/67395/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
b214c64b2862 drm/i915/selftests: Exercise context switching in parallel
-:32: WARNING:LINE_SPACING: Missing a blank line after declarations
#32: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:168:
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);

-:79: WARNING:LINE_SPACING: Missing a blank line after declarations
#79: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:215:
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);

-:116: WARNING:LINE_SPACING: Missing a blank line after declarations
#116: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:252:
+	struct drm_i915_private *i915 = arg;
+	static int (* const func[])(void *arg) = {

-:123: WARNING:LINE_SPACING: Missing a blank line after declarations
#123: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:259:
+	struct parallel_switch *data;
+	int (* const *fn)(void *arg);

-:178: CHECK:COMPARISON_TO_NULL: Comparison to NULL could be written "!data[n].ce[0]"
#178: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:314:
+			if (data[n].ce[0] == NULL || data[n].ce[1] == NULL)

-:178: CHECK:COMPARISON_TO_NULL: Comparison to NULL could be written "!data[n].ce[1]"
#178: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:314:
+			if (data[n].ce[0] == NULL || data[n].ce[1] == NULL)

total: 0 errors, 4 warnings, 2 checks, 215 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915/selftests: Exercise context switching in parallell (rev2)
  2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
  2019-09-30 11:31 ` [PATCH v2] drm/i915/selftests: Exercise context switching in parallel Chris Wilson
  2019-09-30 11:55 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev2) Patchwork
@ 2019-09-30 12:22 ` Patchwork
  2019-09-30 14:22 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev3) Patchwork
                   ` (5 subsequent siblings)
  8 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2019-09-30 12:22 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Exercise context switching in parallell (rev2)
URL   : https://patchwork.freedesktop.org/series/67395/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6973 -> Patchwork_14582
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/index.html

Known issues
------------

  Here are the changes found in Patchwork_14582 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_create@basic-files:
    - fi-icl-u2:          [PASS][1] -> [INCOMPLETE][2] ([fdo#107713] / [fdo#109100])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-icl-u2/igt@gem_ctx_create@basic-files.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/fi-icl-u2/igt@gem_ctx_create@basic-files.html

  * igt@i915_selftest@live_hangcheck:
    - fi-bsw-n3050:       [PASS][3] -> [INCOMPLETE][4] ([fdo#105876])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-bsw-n3050/igt@i915_selftest@live_hangcheck.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/fi-bsw-n3050/igt@i915_selftest@live_hangcheck.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][5] -> [FAIL][6] ([fdo#111407])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  * igt@kms_pipe_crc_basic@hang-read-crc-pipe-a:
    - fi-icl-u3:          [PASS][7] -> [DMESG-WARN][8] ([fdo#107724]) +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-icl-u3/igt@kms_pipe_crc_basic@hang-read-crc-pipe-a.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/fi-icl-u3/igt@kms_pipe_crc_basic@hang-read-crc-pipe-a.html

  
#### Possible fixes ####

  * igt@gem_mmap@basic-small-bo:
    - fi-icl-u3:          [DMESG-WARN][9] ([fdo#107724]) -> [PASS][10] +1 similar issue
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-icl-u3/igt@gem_mmap@basic-small-bo.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/fi-icl-u3/igt@gem_mmap@basic-small-bo.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#105876]: https://bugs.freedesktop.org/show_bug.cgi?id=105876
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#109100]: https://bugs.freedesktop.org/show_bug.cgi?id=109100
  [fdo#110566]: https://bugs.freedesktop.org/show_bug.cgi?id=110566
  [fdo#111381]: https://bugs.freedesktop.org/show_bug.cgi?id=111381
  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
  [fdo#111867]: https://bugs.freedesktop.org/show_bug.cgi?id=111867


Participating hosts (53 -> 45)
------------------------------

  Additional (1): fi-cml-h 
  Missing    (9): fi-ilk-m540 fi-bxt-dsi fi-hsw-4200u fi-skl-6770hq fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_6973 -> Patchwork_14582

  CI-20190529: 20190529
  CI_DRM_6973: 7462c58bba0fb6e85bd380591c3fd86e298c0f95 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5206: 5a6c68568def840cd720f18fc66f529a89f84675 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14582: b214c64b2862a064deb1795b395fff8aa95557a9 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

b214c64b2862 drm/i915/selftests: Exercise context switching in parallel

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] drm/i915/selftests: Exercise context switching in parallel
  2019-09-30 11:31 ` [PATCH v2] drm/i915/selftests: Exercise context switching in parallel Chris Wilson
@ 2019-09-30 13:47   ` Tvrtko Ursulin
  2019-09-30 13:59     ` Chris Wilson
  2019-09-30 14:15   ` [PATCH v3] " Chris Wilson
  2019-09-30 14:49   ` [PATCH v4] " Chris Wilson
  2 siblings, 1 reply; 16+ messages in thread
From: Tvrtko Ursulin @ 2019-09-30 13:47 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 30/09/2019 12:31, Chris Wilson wrote:
> We currently test context switching on each engine as a basic stress
> test (just verifying that nothing explodes if we execute 2 requests from
> different contexts sequentially). What we have not tested is what
> happens if we try and do so on all available engines simultaneously,
> putting our SW and the HW under the maximal stress.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
> Keep struct_mutex the outer lock while it still exists
> ---
>   .../drm/i915/gem/selftests/i915_gem_context.c | 203 ++++++++++++++++++
>   1 file changed, 203 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> index dc25bcc3e372..8325c7329dc7 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> @@ -156,6 +156,208 @@ static int live_nop_switch(void *arg)
>   	return err;
>   }
>   
> +struct parallel_switch {
> +	struct task_struct *tsk;
> +	struct intel_context *ce[2];
> +};
> +
> +static int __live_parallel_switch1(void *data)
> +{
> +	struct parallel_switch *arg = data;
> +	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
> +	IGT_TIMEOUT(end_time);
> +	unsigned long count;
> +
> +	count = 0;
> +	do {
> +		struct i915_request *rq;
> +		int err;
> +
> +		mutex_lock(&i915->drm.struct_mutex);
> +		rq = i915_request_create(arg->ce[0]);
> +		if (IS_ERR(rq)) {
> +			mutex_unlock(&i915->drm.struct_mutex);
> +			return PTR_ERR(rq);
> +		}
> +
> +		i915_request_add(rq);
> +		mutex_unlock(&i915->drm.struct_mutex);
> +
> +		mutex_lock(&i915->drm.struct_mutex);

unlock-lock! :) I guess in anticipation of removing it all.

> +		rq = i915_request_create(arg->ce[1]);
> +		if (IS_ERR(rq)) {
> +			mutex_unlock(&i915->drm.struct_mutex);
> +			return PTR_ERR(rq);
> +		}
> +
> +		i915_request_get(rq);
> +		i915_request_add(rq);
> +		mutex_unlock(&i915->drm.struct_mutex);
> +
> +		err = 0;
> +		if (i915_request_wait(rq, 0, HZ / 5) < 0)
> +			err = -ETIME;
> +		i915_request_put(rq);
> +		if (err)
> +			return err;
> +
> +		count++;
> +	} while (!__igt_timeout(end_time, NULL));
> +
> +	pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
> +	return 0;
> +}
> +
> +static int __live_parallel_switchN(void *data)
> +{
> +	struct parallel_switch *arg = data;
> +	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
> +	IGT_TIMEOUT(end_time);
> +	unsigned long count;
> +
> +	count = 0;
> +	do {
> +		struct i915_request *rq;
> +
> +		mutex_lock(&i915->drm.struct_mutex);
> +		rq = i915_request_create(arg->ce[0]);
> +		if (IS_ERR(rq)) {
> +			mutex_unlock(&i915->drm.struct_mutex);
> +			return PTR_ERR(rq);
> +		}
> +
> +		i915_request_add(rq);
> +		mutex_unlock(&i915->drm.struct_mutex);
> +
> +		mutex_lock(&i915->drm.struct_mutex);
> +		rq = i915_request_create(arg->ce[1]);
> +		if (IS_ERR(rq)) {
> +			mutex_unlock(&i915->drm.struct_mutex);
> +			return PTR_ERR(rq);
> +		}
> +
> +		i915_request_add(rq);
> +		mutex_unlock(&i915->drm.struct_mutex);
> +
> +		count++;
> +	} while (!__igt_timeout(end_time, NULL));
> +
> +	pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
> +	return 0;
> +}
> +
> +static int live_parallel_switch(void *arg)
> +{
> +	struct drm_i915_private *i915 = arg;
> +	static int (* const func[])(void *arg) = {
> +		__live_parallel_switch1,
> +		__live_parallel_switchN,
> +		NULL,
> +	};
> +	struct i915_gem_context *ctx[2];
> +	struct parallel_switch *data;
> +	int (* const *fn)(void *arg);
> +	struct drm_file *file;
> +	int err = 0;
> +	int n;
> +
> +	/*
> +	 * Check we can process switches on all engines simultaneously.
> +	 */
> +
> +	if (!DRIVER_CAPS(i915)->has_logical_contexts)
> +		return 0;
> +
> +	file = mock_file(i915);
> +	if (IS_ERR(file))
> +		return PTR_ERR(file);
> +
> +	data = kcalloc(I915_NUM_ENGINES, sizeof(*data), GFP_KERNEL);

There is a little bit of mixing up I915_NUM_ENGINES and gem engines 
(which contains the num_engines field) in this function.

I think it would be better to limit to one - so maybe get the count from 
gem engines? It can't change during selftest so don't have to have them 
locked for the whole time.

> +	if (!data)

mock_file_free

> +		return -ENOMEM;
> +
> +	for (n = 0; n < ARRAY_SIZE(ctx); n++) {
> +		struct i915_gem_engines_iter it;
> +		struct intel_context *ce;
> +
> +		mutex_lock(&i915->drm.struct_mutex);
> +		ctx[n] = live_context(i915, file);
> +		if (IS_ERR(ctx[n])) {
> +			err = PTR_ERR(ctx[n]);
> +			goto out_locked;
> +		}
> +
> +		for_each_gem_engine(ce,
> +				    i915_gem_context_lock_engines(ctx[n]), it) {
> +			err = intel_context_pin(ce);
> +			if (err) {
> +				i915_gem_context_unlock_engines(ctx[n]);
> +				goto out_locked;
> +			}
> +			data[ce->engine->legacy_idx].ce[n] = ce;

IMHO a bit confusing to use legacy_idx - makes it sound like there is 
some significance to the legacy part so why not just use engine->id?

> +		}
> +		i915_gem_context_unlock_engines(ctx[n]);
> +		mutex_unlock(&i915->drm.struct_mutex);
> +	}
> +
> +	for (fn = func; !err && *fn; fn++) {
> +		struct igt_live_test t;
> +		int n;
> +
> +		mutex_lock(&i915->drm.struct_mutex);
> +		err = igt_live_test_begin(&t, i915, __func__, "");
> +		mutex_unlock(&i915->drm.struct_mutex);
> +		if (err)
> +			break;
> +
> +		for (n = 0; n < I915_NUM_ENGINES; n++) {
> +			if (data[n].ce[0] == NULL || data[n].ce[1] == NULL)
> +				continue;
> +
> +			data[n].tsk = kthread_run(*fn, &data[n],
> +						  "igt/parallel:%s",
> +						  data[n].ce[0]->engine->name);
> +			if (IS_ERR(data[n].tsk)) {
> +				err = PTR_ERR(data[n].tsk);
> +				break;
> +			}
> +			get_task_struct(data[n].tsk);
> +		}
> +
> +		for (n = 0; n < I915_NUM_ENGINES; n++) {
> +			int status;
> +
> +			if (IS_ERR_OR_NULL(data[n].tsk))
> +				continue;
> +
> +			status = kthread_stop(data[n].tsk);
> +			if (status && !err)
> +				err = status;
> +
> +			put_task_struct(data[n].tsk);
> +			data[n].tsk = NULL;
> +		}
> +
> +		mutex_lock(&i915->drm.struct_mutex);
> +		if (igt_live_test_end(&t))
> +			err = -EIO;
> +		mutex_unlock(&i915->drm.struct_mutex);
> +	}
> +
> +	mutex_lock(&i915->drm.struct_mutex);
> +out_locked:
> +	for (n = 0; n < I915_NUM_ENGINES; n++) {
> +		if (data[n].ce[0])
> +			intel_context_unpin(data[n].ce[0]);
> +		if (data[n].ce[1])
> +			intel_context_unpin(data[n].ce[1]);
> +	}
> +	mutex_unlock(&i915->drm.struct_mutex);
> +	kfree(data);
> +	mock_file_free(i915, file);
> +	return err;
> +}
> +
>   static unsigned long real_page_count(struct drm_i915_gem_object *obj)
>   {
>   	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
> @@ -1681,6 +1883,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
>   {
>   	static const struct i915_subtest tests[] = {
>   		SUBTEST(live_nop_switch),
> +		SUBTEST(live_parallel_switch),
>   		SUBTEST(igt_ctx_exec),
>   		SUBTEST(igt_ctx_readonly),
>   		SUBTEST(igt_ctx_sseu),
> 

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v2] drm/i915/selftests: Exercise context switching in parallel
  2019-09-30 13:47   ` Tvrtko Ursulin
@ 2019-09-30 13:59     ` Chris Wilson
  0 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2019-09-30 13:59 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-09-30 14:47:26)
> 
> On 30/09/2019 12:31, Chris Wilson wrote:
> > +static int live_parallel_switch(void *arg)
> > +{
> > +     struct drm_i915_private *i915 = arg;
> > +     static int (* const func[])(void *arg) = {
> > +             __live_parallel_switch1,
> > +             __live_parallel_switchN,
> > +             NULL,
> > +     };
> > +     struct i915_gem_context *ctx[2];
> > +     struct parallel_switch *data;
> > +     int (* const *fn)(void *arg);
> > +     struct drm_file *file;
> > +     int err = 0;
> > +     int n;
> > +
> > +     /*
> > +      * Check we can process switches on all engines simultaneously.
> > +      */
> > +
> > +     if (!DRIVER_CAPS(i915)->has_logical_contexts)
> > +             return 0;
> > +
> > +     file = mock_file(i915);
> > +     if (IS_ERR(file))
> > +             return PTR_ERR(file);
> > +
> > +     data = kcalloc(I915_NUM_ENGINES, sizeof(*data), GFP_KERNEL);
> 
> There is a little bit of mixing up I915_NUM_ENGINES and gem engines 
> (which contains the num_engines field) in this function.
> 
> I think it would be better to limit to one - so maybe get the count from 
> gem engines? It can't change during selftest so don't have to have them 
> locked for the whole time.
> 
> > +     if (!data)
> 
> mock_file_free
> 
> > +             return -ENOMEM;
> > +
> > +     for (n = 0; n < ARRAY_SIZE(ctx); n++) {
> > +             struct i915_gem_engines_iter it;
> > +             struct intel_context *ce;
> > +
> > +             mutex_lock(&i915->drm.struct_mutex);
> > +             ctx[n] = live_context(i915, file);
> > +             if (IS_ERR(ctx[n])) {
> > +                     err = PTR_ERR(ctx[n]);
> > +                     goto out_locked;
> > +             }
> > +
> > +             for_each_gem_engine(ce,
> > +                                 i915_gem_context_lock_engines(ctx[n]), it) {
> > +                     err = intel_context_pin(ce);
> > +                     if (err) {
> > +                             i915_gem_context_unlock_engines(ctx[n]);
> > +                             goto out_locked;
> > +                     }
> > +                     data[ce->engine->legacy_idx].ce[n] = ce;
> 
> IMHO a bit confusing to use legacy_idx - makes it sound like there is 
> some significance to the legacy part so why not just use engine->id?

Default engine list with legacy_idx is nice and linear, with a cap of
I915_NUM_ENGINES.

Ok, I have a weirder plan...
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v3] drm/i915/selftests: Exercise context switching in parallel
  2019-09-30 11:31 ` [PATCH v2] drm/i915/selftests: Exercise context switching in parallel Chris Wilson
  2019-09-30 13:47   ` Tvrtko Ursulin
@ 2019-09-30 14:15   ` Chris Wilson
  2019-09-30 14:18     ` Chris Wilson
  2019-09-30 14:49   ` [PATCH v4] " Chris Wilson
  2 siblings, 1 reply; 16+ messages in thread
From: Chris Wilson @ 2019-09-30 14:15 UTC (permalink / raw)
  To: intel-gfx

We currently test context switching on each engine as a basic stress
test (just verifying that nothing explodes if we execute 2 requests from
different contexts sequentially). What we have not tested is what
happens if we try and do so on all available engines simultaneously,
putting our SW and the HW under the maximal stress.

v2: Clone the set of engines from the first context into the secondary
contexts.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 .../drm/i915/gem/selftests/i915_gem_context.c | 238 ++++++++++++++++++
 1 file changed, 238 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index dc25bcc3e372..c221ed53620f 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -156,6 +156,243 @@ static int live_nop_switch(void *arg)
 	return err;
 }
 
+struct parallel_switch {
+	struct task_struct *tsk;
+	struct intel_context *ce[2];
+};
+
+static int __live_parallel_switch1(void *data)
+{
+	struct parallel_switch *arg = data;
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_request *rq;
+		int err;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[0]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[1]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_get(rq);
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		err = 0;
+		if (i915_request_wait(rq, 0, HZ / 5) < 0)
+			err = -ETIME;
+		i915_request_put(rq);
+		if (err)
+			return err;
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+	return 0;
+}
+
+static int __live_parallel_switchN(void *data)
+{
+	struct parallel_switch *arg = data;
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_request *rq;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[0]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		mutex_lock(&i915->drm.struct_mutex);
+		rq = i915_request_create(arg->ce[1]);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+		mutex_unlock(&i915->drm.struct_mutex);
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+	return 0;
+}
+
+static int live_parallel_switch(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	static int (* const func[])(void *arg) = {
+		__live_parallel_switch1,
+		__live_parallel_switchN,
+		NULL,
+	};
+	struct i915_gem_engines *engines;
+	struct i915_gem_engines_iter it;
+	int (* const *fn)(void *arg);
+	struct parallel_switch *data;
+	struct i915_gem_context *ctx;
+	struct intel_context *ce;
+	struct drm_file *file;
+	int n, m, count;
+	int err = 0;
+
+	/*
+	 * Check we can process switches on all engines simultaneously.
+	 */
+
+	if (!DRIVER_CAPS(i915)->has_logical_contexts)
+		return 0;
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	ctx = live_context(i915, file);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out_locked;
+	}
+
+	engines = i915_gem_context_lock_engines(ctx);
+	count = engines->num_engines;
+
+	data = kcalloc(count, sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		i915_gem_context_unlock_engines(ctx);
+		err = -ENOMEM;
+		goto out_locked;
+	}
+
+	m = 0; /* Use the first context as our template for the engines */
+	for_each_gem_engine(ce, engines, it) {
+		err = intel_context_pin(ce);
+		if (err) {
+			i915_gem_context_unlock_engines(ctx);
+			goto out_locked;
+		}
+		data[m++].ce[0] = intel_context_get(ce);
+	}
+	i915_gem_context_unlock_engines(ctx);
+
+	/* Clone the same set of engines in the other contexts */
+	for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
+		ctx = live_context(i915, file);
+		if (IS_ERR(ctx)) {
+			err = PTR_ERR(ctx);
+			goto out_locked;
+		}
+
+		for (m = 0; m < count; m++) {
+			if (!data[m].ce[0])
+				continue;
+
+			ce = intel_context_create(ctx, data[m].ce[0]->engine);
+			if (IS_ERR(ce))
+				goto out_locked;
+
+			err = intel_context_pin(ce);
+			if (err) {
+				intel_context_put(ce);
+				goto out_locked;
+			}
+
+			data[m].ce[n] = ce;
+		}
+	}
+
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	for (fn = func; !err && *fn; fn++) {
+		struct igt_live_test t;
+		int n;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		err = igt_live_test_begin(&t, i915, __func__, "");
+		mutex_unlock(&i915->drm.struct_mutex);
+		if (err)
+			break;
+
+		for (n = 0; n < count; n++) {
+			if (data[n].ce[0] == NULL)
+				continue;
+
+			data[n].tsk = kthread_run(*fn, &data[n],
+						  "igt/parallel:%s",
+						  data[n].ce[0]->engine->name);
+			if (IS_ERR(data[n].tsk)) {
+				err = PTR_ERR(data[n].tsk);
+				break;
+			}
+			get_task_struct(data[n].tsk);
+		}
+
+		for (n = 0; n < count; n++) {
+			int status;
+
+			if (IS_ERR_OR_NULL(data[n].tsk))
+				continue;
+
+			status = kthread_stop(data[n].tsk);
+			if (status && !err)
+				err = status;
+
+			put_task_struct(data[n].tsk);
+			data[n].tsk = NULL;
+		}
+
+		mutex_lock(&i915->drm.struct_mutex);
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		mutex_unlock(&i915->drm.struct_mutex);
+	}
+
+	mutex_lock(&i915->drm.struct_mutex);
+out_locked:
+	for (n = 0; n < count; n++) {
+		for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
+			if (!data[n].ce[m])
+				continue;
+
+			intel_context_unpin(data[n].ce[m]);
+			intel_context_put(data[n].ce[m]);
+		}
+	}
+	mutex_unlock(&i915->drm.struct_mutex);
+	kfree(data);
+	mock_file_free(i915, file);
+	return err;
+}
+
 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
 {
 	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
@@ -1681,6 +1918,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_nop_switch),
+		SUBTEST(live_parallel_switch),
 		SUBTEST(igt_ctx_exec),
 		SUBTEST(igt_ctx_readonly),
 		SUBTEST(igt_ctx_sseu),
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH v3] drm/i915/selftests: Exercise context switching in parallel
  2019-09-30 14:15   ` [PATCH v3] " Chris Wilson
@ 2019-09-30 14:18     ` Chris Wilson
  0 siblings, 0 replies; 16+ messages in thread
From: Chris Wilson @ 2019-09-30 14:18 UTC (permalink / raw)
  To: intel-gfx

Quoting Chris Wilson (2019-09-30 15:15:22)
> We currently test context switching on each engine as a basic stress
> test (just verifying that nothing explodes if we execute 2 requests from
> different contexts sequentially). What we have not tested is what
> happens if we try and do so on all available engines simultaneously,
> putting our SW and the HW under the maximal stress.
> 
> v2: Clone the set of engines from the first context into the secondary
> contexts.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---

> +static int live_parallel_switch(void *arg)
> +{
> +       struct drm_i915_private *i915 = arg;
> +       static int (* const func[])(void *arg) = {
> +               __live_parallel_switch1,
> +               __live_parallel_switchN,
> +               NULL,
> +       };
> +       struct i915_gem_engines *engines;
> +       struct i915_gem_engines_iter it;
> +       int (* const *fn)(void *arg);
> +       struct parallel_switch *data;
> +       struct i915_gem_context *ctx;
> +       struct intel_context *ce;
> +       struct drm_file *file;
> +       int n, m, count;
> +       int err = 0;
> +
> +       /*
> +        * Check we can process switches on all engines simultaneously.
> +        */
> +
> +       if (!DRIVER_CAPS(i915)->has_logical_contexts)
> +               return 0;
> +
> +       file = mock_file(i915);
> +       if (IS_ERR(file))
> +               return PTR_ERR(file);
> +
> +       mutex_lock(&i915->drm.struct_mutex);
> +
> +       ctx = live_context(i915, file);
> +       if (IS_ERR(ctx)) {
> +               err = PTR_ERR(ctx);
> +               goto out_locked;

This needs data = NULL to be safe.
...

> +       mutex_lock(&i915->drm.struct_mutex);
> +out_locked:
> +       for (n = 0; n < count; n++) {
> +               for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
> +                       if (!data[n].ce[m])
> +                               continue;
> +
> +                       intel_context_unpin(data[n].ce[m]);
> +                       intel_context_put(data[n].ce[m]);
> +               }
> +       }
> +       mutex_unlock(&i915->drm.struct_mutex);
> +       kfree(data);
> +       mock_file_free(i915, file);
> +       return err;
> +}
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev3)
  2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
                   ` (2 preceding siblings ...)
  2019-09-30 12:22 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-09-30 14:22 ` Patchwork
  2019-09-30 14:54 ` ✓ Fi.CI.BAT: success " Patchwork
                   ` (4 subsequent siblings)
  8 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2019-09-30 14:22 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Exercise context switching in parallell (rev3)
URL   : https://patchwork.freedesktop.org/series/67395/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
b6e359fa23f7 drm/i915/selftests: Exercise context switching in parallel
-:36: WARNING:LINE_SPACING: Missing a blank line after declarations
#36: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:168:
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);

-:83: WARNING:LINE_SPACING: Missing a blank line after declarations
#83: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:215:
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);

-:120: WARNING:LINE_SPACING: Missing a blank line after declarations
#120: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:252:
+	struct drm_i915_private *i915 = arg;
+	static int (* const func[])(void *arg) = {

-:127: WARNING:LINE_SPACING: Missing a blank line after declarations
#127: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:259:
+	struct i915_gem_engines_iter it;
+	int (* const *fn)(void *arg);

-:214: CHECK:COMPARISON_TO_NULL: Comparison to NULL could be written "!data[n].ce[0]"
#214: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:346:
+			if (data[n].ce[0] == NULL)

total: 0 errors, 4 warnings, 1 checks, 250 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH v4] drm/i915/selftests: Exercise context switching in parallel
  2019-09-30 11:31 ` [PATCH v2] drm/i915/selftests: Exercise context switching in parallel Chris Wilson
  2019-09-30 13:47   ` Tvrtko Ursulin
  2019-09-30 14:15   ` [PATCH v3] " Chris Wilson
@ 2019-09-30 14:49   ` Chris Wilson
  2019-09-30 16:18     ` Tvrtko Ursulin
  2 siblings, 1 reply; 16+ messages in thread
From: Chris Wilson @ 2019-09-30 14:49 UTC (permalink / raw)
  To: intel-gfx

We currently test context switching on each engine as a basic stress
test (just verifying that nothing explodes if we execute 2 requests from
different contexts sequentially). What we have not tested is what
happens if we try and do so on all available engines simultaneously,
putting our SW and the HW under the maximal stress.

v2: Clone the set of engines from the first context into the secondary
contexts.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 .../drm/i915/gem/selftests/i915_gem_context.c | 225 ++++++++++++++++++
 1 file changed, 225 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index dc25bcc3e372..81a83c34404c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -156,6 +156,230 @@ static int live_nop_switch(void *arg)
 	return err;
 }
 
+struct parallel_switch {
+	struct task_struct *tsk;
+	struct intel_context *ce[2];
+};
+
+static int __live_parallel_switch1(void *data)
+{
+	struct parallel_switch *arg = data;
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_request *rq = NULL;
+		int err, n;
+
+		for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
+			i915_request_put(rq);
+
+			mutex_lock(&i915->drm.struct_mutex);
+			rq = i915_request_create(arg->ce[n]);
+			if (IS_ERR(rq)) {
+				mutex_unlock(&i915->drm.struct_mutex);
+				return PTR_ERR(rq);
+			}
+
+			i915_request_get(rq);
+			i915_request_add(rq);
+			mutex_unlock(&i915->drm.struct_mutex);
+		}
+
+		err = 0;
+		if (i915_request_wait(rq, 0, HZ / 5) < 0)
+			err = -ETIME;
+		i915_request_put(rq);
+		if (err)
+			return err;
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
+	return 0;
+}
+
+static int __live_parallel_switchN(void *data)
+{
+	struct parallel_switch *arg = data;
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+	int n;
+
+	count = 0;
+	do {
+		for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
+			struct i915_request *rq;
+
+			mutex_lock(&i915->drm.struct_mutex);
+			rq = i915_request_create(arg->ce[n]);
+			if (IS_ERR(rq)) {
+				mutex_unlock(&i915->drm.struct_mutex);
+				return PTR_ERR(rq);
+			}
+
+			i915_request_add(rq);
+			mutex_unlock(&i915->drm.struct_mutex);
+		}
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
+	return 0;
+}
+
+static int live_parallel_switch(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	static int (* const func[])(void *arg) = {
+		__live_parallel_switch1,
+		__live_parallel_switchN,
+		NULL,
+	};
+	struct parallel_switch *data = NULL;
+	struct i915_gem_engines *engines;
+	struct i915_gem_engines_iter it;
+	int (* const *fn)(void *arg);
+	struct i915_gem_context *ctx;
+	struct intel_context *ce;
+	struct drm_file *file;
+	int n, m, count;
+	int err = 0;
+
+	/*
+	 * Check we can process switches on all engines simultaneously.
+	 */
+
+	if (!DRIVER_CAPS(i915)->has_logical_contexts)
+		return 0;
+
+	file = mock_file(i915);
+	if (IS_ERR(file))
+		return PTR_ERR(file);
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	ctx = live_context(i915, file);
+	if (IS_ERR(ctx)) {
+		err = PTR_ERR(ctx);
+		goto out_locked;
+	}
+
+	engines = i915_gem_context_lock_engines(ctx);
+	count = engines->num_engines;
+
+	data = kcalloc(count, sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		i915_gem_context_unlock_engines(ctx);
+		err = -ENOMEM;
+		goto out_locked;
+	}
+
+	m = 0; /* Use the first context as our template for the engines */
+	for_each_gem_engine(ce, engines, it) {
+		err = intel_context_pin(ce);
+		if (err) {
+			i915_gem_context_unlock_engines(ctx);
+			goto out_locked;
+		}
+		data[m++].ce[0] = intel_context_get(ce);
+	}
+	i915_gem_context_unlock_engines(ctx);
+
+	/* Clone the same set of engines in the other contexts */
+	for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
+		ctx = live_context(i915, file);
+		if (IS_ERR(ctx)) {
+			err = PTR_ERR(ctx);
+			goto out_locked;
+		}
+
+		for (m = 0; m < count; m++) {
+			if (!data[m].ce[0])
+				continue;
+
+			ce = intel_context_create(ctx, data[m].ce[0]->engine);
+			if (IS_ERR(ce))
+				goto out_locked;
+
+			err = intel_context_pin(ce);
+			if (err) {
+				intel_context_put(ce);
+				goto out_locked;
+			}
+
+			data[m].ce[n] = ce;
+		}
+	}
+
+	mutex_unlock(&i915->drm.struct_mutex);
+
+	for (fn = func; !err && *fn; fn++) {
+		struct igt_live_test t;
+		int n;
+
+		mutex_lock(&i915->drm.struct_mutex);
+		err = igt_live_test_begin(&t, i915, __func__, "");
+		mutex_unlock(&i915->drm.struct_mutex);
+		if (err)
+			break;
+
+		for (n = 0; n < count; n++) {
+			if (data[n].ce[0] == NULL)
+				continue;
+
+			data[n].tsk = kthread_run(*fn, &data[n],
+						  "igt/parallel:%s",
+						  data[n].ce[0]->engine->name);
+			if (IS_ERR(data[n].tsk)) {
+				err = PTR_ERR(data[n].tsk);
+				break;
+			}
+			get_task_struct(data[n].tsk);
+		}
+
+		for (n = 0; n < count; n++) {
+			int status;
+
+			if (IS_ERR_OR_NULL(data[n].tsk))
+				continue;
+
+			status = kthread_stop(data[n].tsk);
+			if (status && !err)
+				err = status;
+
+			put_task_struct(data[n].tsk);
+			data[n].tsk = NULL;
+		}
+
+		mutex_lock(&i915->drm.struct_mutex);
+		if (igt_live_test_end(&t))
+			err = -EIO;
+		mutex_unlock(&i915->drm.struct_mutex);
+	}
+
+	mutex_lock(&i915->drm.struct_mutex);
+out_locked:
+	for (n = 0; n < count; n++) {
+		for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
+			if (!data[n].ce[m])
+				continue;
+
+			intel_context_unpin(data[n].ce[m]);
+			intel_context_put(data[n].ce[m]);
+		}
+	}
+	mutex_unlock(&i915->drm.struct_mutex);
+	kfree(data);
+	mock_file_free(i915, file);
+	return err;
+}
+
 static unsigned long real_page_count(struct drm_i915_gem_object *obj)
 {
 	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
@@ -1681,6 +1905,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(live_nop_switch),
+		SUBTEST(live_parallel_switch),
 		SUBTEST(igt_ctx_exec),
 		SUBTEST(igt_ctx_readonly),
 		SUBTEST(igt_ctx_sseu),
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915/selftests: Exercise context switching in parallell (rev3)
  2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
                   ` (3 preceding siblings ...)
  2019-09-30 14:22 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev3) Patchwork
@ 2019-09-30 14:54 ` Patchwork
  2019-09-30 15:24 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev4) Patchwork
                   ` (3 subsequent siblings)
  8 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2019-09-30 14:54 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Exercise context switching in parallell (rev3)
URL   : https://patchwork.freedesktop.org/series/67395/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6973 -> Patchwork_14585
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14585/index.html

Known issues
------------

  Here are the changes found in Patchwork_14585 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_module_load@reload:
    - fi-blb-e6850:       [PASS][1] -> [INCOMPLETE][2] ([fdo#107718])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-blb-e6850/igt@i915_module_load@reload.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14585/fi-blb-e6850/igt@i915_module_load@reload.html

  * igt@i915_selftest@live_sanitycheck:
    - fi-icl-u3:          [PASS][3] -> [DMESG-WARN][4] ([fdo#107724]) +1 similar issue
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14585/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][5] -> [FAIL][6] ([fdo#111407])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14585/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
#### Possible fixes ####

  * igt@gem_mmap@basic-small-bo:
    - fi-icl-u3:          [DMESG-WARN][7] ([fdo#107724]) -> [PASS][8] +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-icl-u3/igt@gem_mmap@basic-small-bo.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14585/fi-icl-u3/igt@gem_mmap@basic-small-bo.html

  * igt@i915_selftest@live_gem_contexts:
    - fi-bxt-dsi:         [INCOMPLETE][9] ([fdo#103927]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-bxt-dsi/igt@i915_selftest@live_gem_contexts.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14585/fi-bxt-dsi/igt@i915_selftest@live_gem_contexts.html

  
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#107718]: https://bugs.freedesktop.org/show_bug.cgi?id=107718
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407


Participating hosts (53 -> 45)
------------------------------

  Additional (1): fi-cml-h 
  Missing    (9): fi-ilk-m540 fi-hsw-4200u fi-tgl-u2 fi-skl-6770hq fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_6973 -> Patchwork_14585

  CI-20190529: 20190529
  CI_DRM_6973: 7462c58bba0fb6e85bd380591c3fd86e298c0f95 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5206: 5a6c68568def840cd720f18fc66f529a89f84675 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14585: b6e359fa23f7dc9c62fac7fc6948d7813ca91232 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

b6e359fa23f7 drm/i915/selftests: Exercise context switching in parallel

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14585/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev4)
  2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
                   ` (4 preceding siblings ...)
  2019-09-30 14:54 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-09-30 15:24 ` Patchwork
  2019-09-30 15:46 ` ✓ Fi.CI.IGT: success for drm/i915/selftests: Exercise context switching in parallell (rev2) Patchwork
                   ` (2 subsequent siblings)
  8 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2019-09-30 15:24 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Exercise context switching in parallell (rev4)
URL   : https://patchwork.freedesktop.org/series/67395/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
d759ff077352 drm/i915/selftests: Exercise context switching in parallel
-:36: WARNING:LINE_SPACING: Missing a blank line after declarations
#36: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:168:
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);

-:77: WARNING:LINE_SPACING: Missing a blank line after declarations
#77: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:209:
+	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
+	IGT_TIMEOUT(end_time);

-:107: WARNING:LINE_SPACING: Missing a blank line after declarations
#107: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:239:
+	struct drm_i915_private *i915 = arg;
+	static int (* const func[])(void *arg) = {

-:115: WARNING:LINE_SPACING: Missing a blank line after declarations
#115: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:247:
+	struct i915_gem_engines_iter it;
+	int (* const *fn)(void *arg);

-:201: CHECK:COMPARISON_TO_NULL: Comparison to NULL could be written "!data[n].ce[0]"
#201: FILE: drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c:333:
+			if (data[n].ce[0] == NULL)

total: 0 errors, 4 warnings, 1 checks, 237 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* ✓ Fi.CI.IGT: success for drm/i915/selftests: Exercise context switching in parallell (rev2)
  2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
                   ` (5 preceding siblings ...)
  2019-09-30 15:24 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev4) Patchwork
@ 2019-09-30 15:46 ` Patchwork
  2019-09-30 15:47 ` ✓ Fi.CI.BAT: success for drm/i915/selftests: Exercise context switching in parallell (rev4) Patchwork
  2019-09-30 20:00 ` ✓ Fi.CI.IGT: " Patchwork
  8 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2019-09-30 15:46 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Exercise context switching in parallell (rev2)
URL   : https://patchwork.freedesktop.org/series/67395/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6973_full -> Patchwork_14582_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_14582_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@rcs0-s3:
    - shard-apl:          [PASS][1] -> [DMESG-WARN][2] ([fdo#108566]) +5 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-apl5/igt@gem_ctx_isolation@rcs0-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-apl7/igt@gem_ctx_isolation@rcs0-s3.html

  * igt@gem_exec_schedule@reorder-wide-bsd:
    - shard-iclb:         [PASS][3] -> [SKIP][4] ([fdo#111325]) +5 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb6/igt@gem_exec_schedule@reorder-wide-bsd.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb4/igt@gem_exec_schedule@reorder-wide-bsd.html

  * igt@gem_softpin@noreloc-s3:
    - shard-kbl:          [PASS][5] -> [DMESG-WARN][6] ([fdo#103313])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-kbl4/igt@gem_softpin@noreloc-s3.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-kbl2/igt@gem_softpin@noreloc-s3.html

  * igt@i915_selftest@live_execlists:
    - shard-skl:          [PASS][7] -> [DMESG-FAIL][8] ([fdo#111108])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl9/igt@i915_selftest@live_execlists.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-skl5/igt@i915_selftest@live_execlists.html

  * igt@i915_selftest@live_hangcheck:
    - shard-iclb:         [PASS][9] -> [INCOMPLETE][10] ([fdo#107713] / [fdo#108569])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb8/igt@i915_selftest@live_hangcheck.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb8/igt@i915_selftest@live_hangcheck.html

  * igt@kms_atomic_transition@2x-modeset-transitions:
    - shard-hsw:          [PASS][11] -> [SKIP][12] ([fdo#109271])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-hsw6/igt@kms_atomic_transition@2x-modeset-transitions.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-hsw5/igt@kms_atomic_transition@2x-modeset-transitions.html

  * igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic:
    - shard-hsw:          [PASS][13] -> [FAIL][14] ([fdo#105767])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-hsw6/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-hsw4/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render:
    - shard-iclb:         [PASS][15] -> [FAIL][16] ([fdo#103167]) +6 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html

  * igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min:
    - shard-skl:          [PASS][17] -> [FAIL][18] ([fdo#108145])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl2/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-skl6/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html

  * igt@kms_psr@psr2_cursor_render:
    - shard-iclb:         [PASS][19] -> [SKIP][20] ([fdo#109441]) +1 similar issue
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb2/igt@kms_psr@psr2_cursor_render.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb1/igt@kms_psr@psr2_cursor_render.html

  * igt@kms_setmode@basic:
    - shard-apl:          [PASS][21] -> [FAIL][22] ([fdo#99912])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-apl1/igt@kms_setmode@basic.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-apl7/igt@kms_setmode@basic.html
    - shard-skl:          [PASS][23] -> [FAIL][24] ([fdo#99912])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl2/igt@kms_setmode@basic.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-skl3/igt@kms_setmode@basic.html

  * igt@prime_busy@hang-bsd2:
    - shard-iclb:         [PASS][25] -> [SKIP][26] ([fdo#109276]) +15 similar issues
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb2/igt@prime_busy@hang-bsd2.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb3/igt@prime_busy@hang-bsd2.html

  
#### Possible fixes ####

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [SKIP][27] ([fdo#111325]) -> [PASS][28] +2 similar issues
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb1/igt@gem_exec_schedule@in-order-bsd.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb8/igt@gem_exec_schedule@in-order-bsd.html

  * igt@gem_exec_schedule@preempt-contexts-bsd2:
    - shard-iclb:         [SKIP][29] ([fdo#109276]) -> [PASS][30] +10 similar issues
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb7/igt@gem_exec_schedule@preempt-contexts-bsd2.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb2/igt@gem_exec_schedule@preempt-contexts-bsd2.html

  * igt@i915_pm_rpm@system-suspend-execbuf:
    - shard-iclb:         [DMESG-WARN][31] ([fdo#111764]) -> [PASS][32] +1 similar issue
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb1/igt@i915_pm_rpm@system-suspend-execbuf.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb5/igt@i915_pm_rpm@system-suspend-execbuf.html

  * igt@i915_suspend@sysfs-reader:
    - shard-apl:          [DMESG-WARN][33] ([fdo#108566]) -> [PASS][34] +1 similar issue
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-apl4/igt@i915_suspend@sysfs-reader.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-apl3/igt@i915_suspend@sysfs-reader.html

  * igt@kms_cursor_crc@pipe-a-cursor-64x21-sliding:
    - shard-skl:          [FAIL][35] ([fdo#103232]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl8/igt@kms_cursor_crc@pipe-a-cursor-64x21-sliding.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-skl9/igt@kms_cursor_crc@pipe-a-cursor-64x21-sliding.html

  * igt@kms_cursor_legacy@all-pipes-torture-move:
    - shard-kbl:          [DMESG-WARN][37] ([fdo#107122]) -> [PASS][38]
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-kbl3/igt@kms_cursor_legacy@all-pipes-torture-move.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-kbl1/igt@kms_cursor_legacy@all-pipes-torture-move.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic:
    - shard-skl:          [FAIL][39] ([fdo#102670]) -> [PASS][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-skl7/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-skl:          [FAIL][41] ([fdo#105363]) -> [PASS][42]
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl5/igt@kms_flip@flip-vs-expired-vblank.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-skl2/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite:
    - shard-iclb:         [FAIL][43] ([fdo#103167]) -> [PASS][44] +2 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb3/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html

  * igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min:
    - shard-skl:          [FAIL][45] ([fdo#108145]) -> [PASS][46]
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl8/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-skl9/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html

  * igt@kms_plane_lowres@pipe-a-tiling-x:
    - shard-iclb:         [FAIL][47] ([fdo#103166]) -> [PASS][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb6/igt@kms_plane_lowres@pipe-a-tiling-x.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb4/igt@kms_plane_lowres@pipe-a-tiling-x.html

  * igt@kms_psr@psr2_sprite_mmap_gtt:
    - shard-iclb:         [SKIP][49] ([fdo#109441]) -> [PASS][50] +1 similar issue
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb4/igt@kms_psr@psr2_sprite_mmap_gtt.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb2/igt@kms_psr@psr2_sprite_mmap_gtt.html

  * igt@kms_vblank@pipe-c-query-idle-hang:
    - shard-apl:          [INCOMPLETE][51] ([fdo#103927]) -> [PASS][52] +1 similar issue
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-apl8/igt@kms_vblank@pipe-c-query-idle-hang.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-apl5/igt@kms_vblank@pipe-c-query-idle-hang.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv:
    - shard-iclb:         [SKIP][53] ([fdo#109276]) -> [FAIL][54] ([fdo#111329])
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb8/igt@gem_ctx_isolation@vcs1-nonpriv.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb4/igt@gem_ctx_isolation@vcs1-nonpriv.html

  * igt@gem_mocs_settings@mocs-isolation-bsd2:
    - shard-iclb:         [SKIP][55] ([fdo#109276]) -> [FAIL][56] ([fdo#111330]) +1 similar issue
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb7/igt@gem_mocs_settings@mocs-isolation-bsd2.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/shard-iclb2/igt@gem_mocs_settings@mocs-isolation-bsd2.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#102670]: https://bugs.freedesktop.org/show_bug.cgi?id=102670
  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#103313]: https://bugs.freedesktop.org/show_bug.cgi?id=103313
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#105767]: https://bugs.freedesktop.org/show_bug.cgi?id=105767
  [fdo#107122]: https://bugs.freedesktop.org/show_bug.cgi?id=107122
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#108569]: https://bugs.freedesktop.org/show_bug.cgi?id=108569
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#111108]: https://bugs.freedesktop.org/show_bug.cgi?id=111108
  [fdo#111325]: https://bugs.freedesktop.org/show_bug.cgi?id=111325
  [fdo#111329]: https://bugs.freedesktop.org/show_bug.cgi?id=111329
  [fdo#111330]: https://bugs.freedesktop.org/show_bug.cgi?id=111330
  [fdo#111764]: https://bugs.freedesktop.org/show_bug.cgi?id=111764
  [fdo#111781]: https://bugs.freedesktop.org/show_bug.cgi?id=111781
  [fdo#99912]: https://bugs.freedesktop.org/show_bug.cgi?id=99912


Participating hosts (16 -> 10)
------------------------------

  Missing    (6): shard-tglb1 shard-tglb2 shard-tglb3 shard-tglb4 shard-tglb5 shard-tglb6 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_6973 -> Patchwork_14582

  CI-20190529: 20190529
  CI_DRM_6973: 7462c58bba0fb6e85bd380591c3fd86e298c0f95 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5206: 5a6c68568def840cd720f18fc66f529a89f84675 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14582: b214c64b2862a064deb1795b395fff8aa95557a9 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14582/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915/selftests: Exercise context switching in parallell (rev4)
  2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
                   ` (6 preceding siblings ...)
  2019-09-30 15:46 ` ✓ Fi.CI.IGT: success for drm/i915/selftests: Exercise context switching in parallell (rev2) Patchwork
@ 2019-09-30 15:47 ` Patchwork
  2019-09-30 20:00 ` ✓ Fi.CI.IGT: " Patchwork
  8 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2019-09-30 15:47 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Exercise context switching in parallell (rev4)
URL   : https://patchwork.freedesktop.org/series/67395/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6973 -> Patchwork_14587
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/index.html

Known issues
------------

  Here are the changes found in Patchwork_14587 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_suspend@basic-s3:
    - fi-blb-e6850:       [PASS][1] -> [INCOMPLETE][2] ([fdo#107718])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-blb-e6850/igt@gem_exec_suspend@basic-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/fi-blb-e6850/igt@gem_exec_suspend@basic-s3.html

  * igt@gem_mmap_gtt@basic-read-write-distinct:
    - fi-icl-u3:          [PASS][3] -> [DMESG-WARN][4] ([fdo#107724])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-icl-u3/igt@gem_mmap_gtt@basic-read-write-distinct.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/fi-icl-u3/igt@gem_mmap_gtt@basic-read-write-distinct.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][5] -> [FAIL][6] ([fdo#111407])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
#### Possible fixes ####

  * igt@gem_mmap@basic-small-bo:
    - fi-icl-u3:          [DMESG-WARN][7] ([fdo#107724]) -> [PASS][8] +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-icl-u3/igt@gem_mmap@basic-small-bo.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/fi-icl-u3/igt@gem_mmap@basic-small-bo.html

  * igt@i915_selftest@live_gem_contexts:
    - fi-bxt-dsi:         [INCOMPLETE][9] ([fdo#103927]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/fi-bxt-dsi/igt@i915_selftest@live_gem_contexts.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/fi-bxt-dsi/igt@i915_selftest@live_gem_contexts.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#107718]: https://bugs.freedesktop.org/show_bug.cgi?id=107718
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
  [fdo#111600]: https://bugs.freedesktop.org/show_bug.cgi?id=111600
  [fdo#111736]: https://bugs.freedesktop.org/show_bug.cgi?id=111736


Participating hosts (53 -> 45)
------------------------------

  Additional (1): fi-cml-h 
  Missing    (9): fi-ilk-m540 fi-hsw-4200u fi-skl-6770hq fi-skl-guc fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_6973 -> Patchwork_14587

  CI-20190529: 20190529
  CI_DRM_6973: 7462c58bba0fb6e85bd380591c3fd86e298c0f95 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5206: 5a6c68568def840cd720f18fc66f529a89f84675 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14587: d759ff0773524d9e4accb50023b488f0a9c63ed4 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

d759ff077352 drm/i915/selftests: Exercise context switching in parallel

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH v4] drm/i915/selftests: Exercise context switching in parallel
  2019-09-30 14:49   ` [PATCH v4] " Chris Wilson
@ 2019-09-30 16:18     ` Tvrtko Ursulin
  0 siblings, 0 replies; 16+ messages in thread
From: Tvrtko Ursulin @ 2019-09-30 16:18 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 30/09/2019 15:49, Chris Wilson wrote:
> We currently test context switching on each engine as a basic stress
> test (just verifying that nothing explodes if we execute 2 requests from
> different contexts sequentially). What we have not tested is what
> happens if we try and do so on all available engines simultaneously,
> putting our SW and the HW under the maximal stress.
> 
> v2: Clone the set of engines from the first context into the secondary
> contexts.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   .../drm/i915/gem/selftests/i915_gem_context.c | 225 ++++++++++++++++++
>   1 file changed, 225 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> index dc25bcc3e372..81a83c34404c 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
> @@ -156,6 +156,230 @@ static int live_nop_switch(void *arg)
>   	return err;
>   }
>   
> +struct parallel_switch {
> +	struct task_struct *tsk;
> +	struct intel_context *ce[2];
> +};
> +
> +static int __live_parallel_switch1(void *data)
> +{
> +	struct parallel_switch *arg = data;
> +	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
> +	IGT_TIMEOUT(end_time);
> +	unsigned long count;
> +
> +	count = 0;
> +	do {
> +		struct i915_request *rq = NULL;
> +		int err, n;
> +
> +		for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
> +			i915_request_put(rq);
> +
> +			mutex_lock(&i915->drm.struct_mutex);
> +			rq = i915_request_create(arg->ce[n]);
> +			if (IS_ERR(rq)) {
> +				mutex_unlock(&i915->drm.struct_mutex);
> +				return PTR_ERR(rq);
> +			}
> +
> +			i915_request_get(rq);
> +			i915_request_add(rq);
> +			mutex_unlock(&i915->drm.struct_mutex);
> +		}
> +
> +		err = 0;
> +		if (i915_request_wait(rq, 0, HZ / 5) < 0)
> +			err = -ETIME;
> +		i915_request_put(rq);
> +		if (err)
> +			return err;
> +
> +		count++;
> +	} while (!__igt_timeout(end_time, NULL));
> +
> +	pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count);
> +	return 0;
> +}
> +
> +static int __live_parallel_switchN(void *data)
> +{
> +	struct parallel_switch *arg = data;
> +	struct drm_i915_private *i915 = arg->ce[0]->engine->i915;
> +	IGT_TIMEOUT(end_time);
> +	unsigned long count;
> +	int n;
> +
> +	count = 0;
> +	do {
> +		for (n = 0; n < ARRAY_SIZE(arg->ce); n++) {
> +			struct i915_request *rq;
> +
> +			mutex_lock(&i915->drm.struct_mutex);
> +			rq = i915_request_create(arg->ce[n]);
> +			if (IS_ERR(rq)) {
> +				mutex_unlock(&i915->drm.struct_mutex);
> +				return PTR_ERR(rq);
> +			}
> +
> +			i915_request_add(rq);
> +			mutex_unlock(&i915->drm.struct_mutex);
> +		}
> +
> +		count++;
> +	} while (!__igt_timeout(end_time, NULL));
> +
> +	pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count);
> +	return 0;
> +}
> +
> +static int live_parallel_switch(void *arg)
> +{
> +	struct drm_i915_private *i915 = arg;
> +	static int (* const func[])(void *arg) = {
> +		__live_parallel_switch1,
> +		__live_parallel_switchN,
> +		NULL,
> +	};
> +	struct parallel_switch *data = NULL;
> +	struct i915_gem_engines *engines;
> +	struct i915_gem_engines_iter it;
> +	int (* const *fn)(void *arg);
> +	struct i915_gem_context *ctx;
> +	struct intel_context *ce;
> +	struct drm_file *file;
> +	int n, m, count;
> +	int err = 0;
> +
> +	/*
> +	 * Check we can process switches on all engines simultaneously.
> +	 */
> +
> +	if (!DRIVER_CAPS(i915)->has_logical_contexts)
> +		return 0;
> +
> +	file = mock_file(i915);
> +	if (IS_ERR(file))
> +		return PTR_ERR(file);
> +
> +	mutex_lock(&i915->drm.struct_mutex);
> +
> +	ctx = live_context(i915, file);
> +	if (IS_ERR(ctx)) {
> +		err = PTR_ERR(ctx);
> +		goto out_locked;
> +	}
> +
> +	engines = i915_gem_context_lock_engines(ctx);
> +	count = engines->num_engines;
> +
> +	data = kcalloc(count, sizeof(*data), GFP_KERNEL);
> +	if (!data) {
> +		i915_gem_context_unlock_engines(ctx);
> +		err = -ENOMEM;
> +		goto out_locked;
> +	}
> +
> +	m = 0; /* Use the first context as our template for the engines */
> +	for_each_gem_engine(ce, engines, it) {
> +		err = intel_context_pin(ce);
> +		if (err) {
> +			i915_gem_context_unlock_engines(ctx);
> +			goto out_locked;
> +		}
> +		data[m++].ce[0] = intel_context_get(ce);
> +	}
> +	i915_gem_context_unlock_engines(ctx);
> +
> +	/* Clone the same set of engines in the other contexts */
> +	for (n = 1; n < ARRAY_SIZE(data->ce); n++) {
> +		ctx = live_context(i915, file);
> +		if (IS_ERR(ctx)) {
> +			err = PTR_ERR(ctx);
> +			goto out_locked;
> +		}
> +
> +		for (m = 0; m < count; m++) {
> +			if (!data[m].ce[0])
> +				continue;
> +
> +			ce = intel_context_create(ctx, data[m].ce[0]->engine);
> +			if (IS_ERR(ce))
> +				goto out_locked;
> +
> +			err = intel_context_pin(ce);
> +			if (err) {
> +				intel_context_put(ce);
> +				goto out_locked;
> +			}
> +
> +			data[m].ce[n] = ce;
> +		}
> +	}
> +
> +	mutex_unlock(&i915->drm.struct_mutex);
> +
> +	for (fn = func; !err && *fn; fn++) {
> +		struct igt_live_test t;
> +		int n;
> +
> +		mutex_lock(&i915->drm.struct_mutex);
> +		err = igt_live_test_begin(&t, i915, __func__, "");
> +		mutex_unlock(&i915->drm.struct_mutex);
> +		if (err)
> +			break;
> +
> +		for (n = 0; n < count; n++) {
> +			if (data[n].ce[0] == NULL)
> +				continue;
> +
> +			data[n].tsk = kthread_run(*fn, &data[n],
> +						  "igt/parallel:%s",
> +						  data[n].ce[0]->engine->name);
> +			if (IS_ERR(data[n].tsk)) {
> +				err = PTR_ERR(data[n].tsk);
> +				break;
> +			}
> +			get_task_struct(data[n].tsk);
> +		}
> +
> +		for (n = 0; n < count; n++) {
> +			int status;
> +
> +			if (IS_ERR_OR_NULL(data[n].tsk))
> +				continue;
> +
> +			status = kthread_stop(data[n].tsk);
> +			if (status && !err)
> +				err = status;
> +
> +			put_task_struct(data[n].tsk);
> +			data[n].tsk = NULL;
> +		}
> +
> +		mutex_lock(&i915->drm.struct_mutex);
> +		if (igt_live_test_end(&t))
> +			err = -EIO;
> +		mutex_unlock(&i915->drm.struct_mutex);
> +	}
> +
> +	mutex_lock(&i915->drm.struct_mutex);
> +out_locked:
> +	for (n = 0; n < count; n++) {
> +		for (m = 0; m < ARRAY_SIZE(data->ce); m++) {
> +			if (!data[n].ce[m])
> +				continue;
> +
> +			intel_context_unpin(data[n].ce[m]);
> +			intel_context_put(data[n].ce[m]);
> +		}
> +	}
> +	mutex_unlock(&i915->drm.struct_mutex);
> +	kfree(data);
> +	mock_file_free(i915, file);
> +	return err;
> +}
> +
>   static unsigned long real_page_count(struct drm_i915_gem_object *obj)
>   {
>   	return huge_gem_object_phys_size(obj) >> PAGE_SHIFT;
> @@ -1681,6 +1905,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915)
>   {
>   	static const struct i915_subtest tests[] = {
>   		SUBTEST(live_nop_switch),
> +		SUBTEST(live_parallel_switch),
>   		SUBTEST(igt_ctx_exec),
>   		SUBTEST(igt_ctx_readonly),
>   		SUBTEST(igt_ctx_sseu),
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

* ✓ Fi.CI.IGT: success for drm/i915/selftests: Exercise context switching in parallell (rev4)
  2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
                   ` (7 preceding siblings ...)
  2019-09-30 15:47 ` ✓ Fi.CI.BAT: success for drm/i915/selftests: Exercise context switching in parallell (rev4) Patchwork
@ 2019-09-30 20:00 ` Patchwork
  8 siblings, 0 replies; 16+ messages in thread
From: Patchwork @ 2019-09-30 20:00 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Exercise context switching in parallell (rev4)
URL   : https://patchwork.freedesktop.org/series/67395/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6973_full -> Patchwork_14587_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_14587_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_schedule@reorder-wide-bsd:
    - shard-iclb:         [PASS][1] -> [SKIP][2] ([fdo#111325]) +5 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb6/igt@gem_exec_schedule@reorder-wide-bsd.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb4/igt@gem_exec_schedule@reorder-wide-bsd.html

  * igt@gem_workarounds@suspend-resume-context:
    - shard-apl:          [PASS][3] -> [DMESG-WARN][4] ([fdo#108566]) +7 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-apl6/igt@gem_workarounds@suspend-resume-context.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-apl4/igt@gem_workarounds@suspend-resume-context.html

  * igt@i915_pm_rpm@system-suspend-execbuf:
    - shard-skl:          [PASS][5] -> [INCOMPLETE][6] ([fdo#104108] / [fdo#107807])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl3/igt@i915_pm_rpm@system-suspend-execbuf.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl8/igt@i915_pm_rpm@system-suspend-execbuf.html

  * igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic:
    - shard-hsw:          [PASS][7] -> [FAIL][8] ([fdo#105767])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-hsw6/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-hsw1/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-atomic.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [PASS][9] -> [FAIL][10] ([fdo#105363])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl6/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl7/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
    - shard-apl:          [PASS][11] -> [FAIL][12] ([fdo#105363])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-apl2/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-apl8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_flip@flip-vs-suspend-interruptible:
    - shard-hsw:          [PASS][13] -> [INCOMPLETE][14] ([fdo#103540]) +1 similar issue
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-hsw1/igt@kms_flip@flip-vs-suspend-interruptible.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-hsw5/igt@kms_flip@flip-vs-suspend-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw:
    - shard-iclb:         [PASS][15] -> [FAIL][16] ([fdo#103167]) +5 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb3/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-b-planes:
    - shard-skl:          [PASS][17] -> [INCOMPLETE][18] ([fdo#104108])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl3/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-b-planes.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl2/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-b-planes.html

  * igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min:
    - shard-skl:          [PASS][19] -> [FAIL][20] ([fdo#108145])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl2/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl6/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html

  * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
    - shard-skl:          [PASS][21] -> [FAIL][22] ([fdo#108145] / [fdo#110403])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl6/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl7/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_render:
    - shard-iclb:         [PASS][23] -> [SKIP][24] ([fdo#109441]) +2 similar issues
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb2/igt@kms_psr@psr2_cursor_render.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb3/igt@kms_psr@psr2_cursor_render.html

  * igt@kms_setmode@basic:
    - shard-skl:          [PASS][25] -> [FAIL][26] ([fdo#99912])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl2/igt@kms_setmode@basic.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl9/igt@kms_setmode@basic.html

  * igt@prime_busy@hang-bsd2:
    - shard-iclb:         [PASS][27] -> [SKIP][28] ([fdo#109276]) +17 similar issues
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb2/igt@prime_busy@hang-bsd2.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb8/igt@prime_busy@hang-bsd2.html

  * igt@tools_test@tools_test:
    - shard-apl:          [PASS][29] -> [SKIP][30] ([fdo#109271])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-apl2/igt@tools_test@tools_test.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-apl8/igt@tools_test@tools_test.html

  
#### Possible fixes ####

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [SKIP][31] ([fdo#110841]) -> [PASS][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb4/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb6/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_exec_schedule@preempt-contexts-bsd2:
    - shard-iclb:         [SKIP][33] ([fdo#109276]) -> [PASS][34] +14 similar issues
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb7/igt@gem_exec_schedule@preempt-contexts-bsd2.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb4/igt@gem_exec_schedule@preempt-contexts-bsd2.html

  * igt@gem_exec_schedule@preempt-other-chain-bsd:
    - shard-iclb:         [SKIP][35] ([fdo#111325]) -> [PASS][36] +9 similar issues
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb4/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb8/igt@gem_exec_schedule@preempt-other-chain-bsd.html

  * igt@i915_suspend@sysfs-reader:
    - shard-apl:          [DMESG-WARN][37] ([fdo#108566]) -> [PASS][38] +1 similar issue
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-apl4/igt@i915_suspend@sysfs-reader.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-apl4/igt@i915_suspend@sysfs-reader.html

  * igt@kms_cursor_legacy@all-pipes-torture-move:
    - shard-kbl:          [DMESG-WARN][39] ([fdo#107122]) -> [PASS][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-kbl3/igt@kms_cursor_legacy@all-pipes-torture-move.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-kbl2/igt@kms_cursor_legacy@all-pipes-torture-move.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic:
    - shard-skl:          [FAIL][41] ([fdo#102670]) -> [PASS][42]
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl3/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl4/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-skl:          [FAIL][43] ([fdo#105363]) -> [PASS][44]
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl5/igt@kms_flip@flip-vs-expired-vblank.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl3/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite:
    - shard-iclb:         [FAIL][45] ([fdo#103167]) -> [PASS][46] +4 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb8/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [FAIL][47] ([fdo#108145] / [fdo#110403]) -> [PASS][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl5/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl2/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_plane_lowres@pipe-a-tiling-x:
    - shard-iclb:         [FAIL][49] ([fdo#103166]) -> [PASS][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb6/igt@kms_plane_lowres@pipe-a-tiling-x.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb4/igt@kms_plane_lowres@pipe-a-tiling-x.html

  * igt@kms_psr2_su@page_flip:
    - shard-iclb:         [SKIP][51] ([fdo#109642] / [fdo#111068]) -> [PASS][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb5/igt@kms_psr2_su@page_flip.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb2/igt@kms_psr2_su@page_flip.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         [SKIP][53] ([fdo#109441]) -> [PASS][54] +2 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb5/igt@kms_psr@psr2_cursor_plane_move.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@perf@blocking:
    - shard-skl:          [FAIL][55] ([fdo#110728]) -> [PASS][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-skl6/igt@perf@blocking.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-skl7/igt@perf@blocking.html

  
#### Warnings ####

  * igt@gem_mocs_settings@mocs-isolation-bsd2:
    - shard-iclb:         [SKIP][57] ([fdo#109276]) -> [FAIL][58] ([fdo#111330])
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb7/igt@gem_mocs_settings@mocs-isolation-bsd2.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb4/igt@gem_mocs_settings@mocs-isolation-bsd2.html

  * igt@gem_mocs_settings@mocs-settings-bsd2:
    - shard-iclb:         [FAIL][59] ([fdo#111330]) -> [SKIP][60] ([fdo#109276])
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6973/shard-iclb2/igt@gem_mocs_settings@mocs-settings-bsd2.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/shard-iclb3/igt@gem_mocs_settings@mocs-settings-bsd2.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#102670]: https://bugs.freedesktop.org/show_bug.cgi?id=102670
  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103540]: https://bugs.freedesktop.org/show_bug.cgi?id=103540
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#105767]: https://bugs.freedesktop.org/show_bug.cgi?id=105767
  [fdo#107122]: https://bugs.freedesktop.org/show_bug.cgi?id=107122
  [fdo#107807]: https://bugs.freedesktop.org/show_bug.cgi?id=107807
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
  [fdo#110403]: https://bugs.freedesktop.org/show_bug.cgi?id=110403
  [fdo#110728]: https://bugs.freedesktop.org/show_bug.cgi?id=110728
  [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
  [fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
  [fdo#111325]: https://bugs.freedesktop.org/show_bug.cgi?id=111325
  [fdo#111330]: https://bugs.freedesktop.org/show_bug.cgi?id=111330
  [fdo#111781]: https://bugs.freedesktop.org/show_bug.cgi?id=111781
  [fdo#99912]: https://bugs.freedesktop.org/show_bug.cgi?id=99912


Participating hosts (16 -> 10)
------------------------------

  Missing    (6): shard-tglb1 shard-tglb2 shard-tglb3 shard-tglb4 shard-tglb5 shard-tglb6 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_6973 -> Patchwork_14587

  CI-20190529: 20190529
  CI_DRM_6973: 7462c58bba0fb6e85bd380591c3fd86e298c0f95 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5206: 5a6c68568def840cd720f18fc66f529a89f84675 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14587: d759ff0773524d9e4accb50023b488f0a9c63ed4 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14587/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2019-09-30 20:00 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-09-30 11:09 [PATCH] drm/i915/selftests: Exercise context switching in parallell Chris Wilson
2019-09-30 11:31 ` [PATCH v2] drm/i915/selftests: Exercise context switching in parallel Chris Wilson
2019-09-30 13:47   ` Tvrtko Ursulin
2019-09-30 13:59     ` Chris Wilson
2019-09-30 14:15   ` [PATCH v3] " Chris Wilson
2019-09-30 14:18     ` Chris Wilson
2019-09-30 14:49   ` [PATCH v4] " Chris Wilson
2019-09-30 16:18     ` Tvrtko Ursulin
2019-09-30 11:55 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev2) Patchwork
2019-09-30 12:22 ` ✓ Fi.CI.BAT: success " Patchwork
2019-09-30 14:22 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev3) Patchwork
2019-09-30 14:54 ` ✓ Fi.CI.BAT: success " Patchwork
2019-09-30 15:24 ` ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Exercise context switching in parallell (rev4) Patchwork
2019-09-30 15:46 ` ✓ Fi.CI.IGT: success for drm/i915/selftests: Exercise context switching in parallell (rev2) Patchwork
2019-09-30 15:47 ` ✓ Fi.CI.BAT: success for drm/i915/selftests: Exercise context switching in parallell (rev4) Patchwork
2019-09-30 20:00 ` ✓ Fi.CI.IGT: " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.