All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/i915/selftests: Teach requests to use all available engines
@ 2019-10-16 12:52 Chris Wilson
  2019-10-16 18:11 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
                   ` (3 more replies)
  0 siblings, 4 replies; 6+ messages in thread
From: Chris Wilson @ 2019-10-16 12:52 UTC (permalink / raw)
  To: intel-gfx

The request selftests straddle the boundary between checking the driver
and the hardware. They are subject to the quirks of the underlying HW,
but operate on top of the backend abstractions. The tests focus on the
scheduler elements and so should check for interactions of the scheduler
across all exposed engines.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/selftests/i915_request.c | 276 +++++++++++-------
 1 file changed, 170 insertions(+), 106 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 0897a7b04944..b95a0e8431ab 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -37,6 +37,18 @@
 #include "mock_drm.h"
 #include "mock_gem_device.h"
 
+static unsigned int num_uabi_engines(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *engine;
+	unsigned int count;
+
+	count = 0;
+	for_each_uabi_engine(engine, i915)
+		count++;
+
+	return count;
+}
+
 static int igt_add_request(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -511,15 +523,15 @@ static int live_nop_request(void *arg)
 	struct drm_i915_private *i915 = arg;
 	struct intel_engine_cs *engine;
 	struct igt_live_test t;
-	unsigned int id;
 	int err = -ENODEV;
 
-	/* Submit various sized batches of empty requests, to each engine
+	/*
+	 * Submit various sized batches of empty requests, to each engine
 	 * (individually), and wait for the batch to complete. We can check
 	 * the overhead of submitting requests to the hardware.
 	 */
 
-	for_each_engine(engine, i915, id) {
+	for_each_uabi_engine(engine, i915) {
 		unsigned long n, prime;
 		IGT_TIMEOUT(end_time);
 		ktime_t times[2] = {};
@@ -539,7 +551,8 @@ static int live_nop_request(void *arg)
 				if (IS_ERR(request))
 					return PTR_ERR(request);
 
-				/* This space is left intentionally blank.
+				/*
+				 * This space is left intentionally blank.
 				 *
 				 * We do not actually want to perform any
 				 * action with this request, we just want
@@ -657,10 +670,10 @@ static int live_empty_request(void *arg)
 	struct intel_engine_cs *engine;
 	struct igt_live_test t;
 	struct i915_vma *batch;
-	unsigned int id;
 	int err = 0;
 
-	/* Submit various sized batches of empty requests, to each engine
+	/*
+	 * Submit various sized batches of empty requests, to each engine
 	 * (individually), and wait for the batch to complete. We can check
 	 * the overhead of submitting requests to the hardware.
 	 */
@@ -669,7 +682,7 @@ static int live_empty_request(void *arg)
 	if (IS_ERR(batch))
 		return PTR_ERR(batch);
 
-	for_each_engine(engine, i915, id) {
+	for_each_uabi_engine(engine, i915) {
 		IGT_TIMEOUT(end_time);
 		struct i915_request *request;
 		unsigned long n, prime;
@@ -801,63 +814,73 @@ static int recursive_batch_resolve(struct i915_vma *batch)
 static int live_all_engines(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
+	const unsigned int nengines = num_uabi_engines(i915);
 	struct intel_engine_cs *engine;
-	struct i915_request *request[I915_NUM_ENGINES];
+	struct i915_request **request;
 	struct igt_live_test t;
 	struct i915_vma *batch;
-	unsigned int id;
+	unsigned int idx;
 	int err;
 
-	/* Check we can submit requests to all engines simultaneously. We
+	/*
+	 * Check we can submit requests to all engines simultaneously. We
 	 * send a recursive batch to each engine - checking that we don't
 	 * block doing so, and that they don't complete too soon.
 	 */
 
+	request = kmalloc_array(nengines, sizeof(*request), GFP_KERNEL);
+	if (!request)
+		return -ENOMEM;
+
 	err = igt_live_test_begin(&t, i915, __func__, "");
 	if (err)
-		return err;
+		goto out_free;
 
 	batch = recursive_batch(i915);
 	if (IS_ERR(batch)) {
 		err = PTR_ERR(batch);
 		pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
-		return err;
+		goto out_free;
 	}
 
-	for_each_engine(engine, i915, id) {
-		request[id] = i915_request_create(engine->kernel_context);
-		if (IS_ERR(request[id])) {
-			err = PTR_ERR(request[id]);
+	idx = 0;
+	for_each_uabi_engine(engine, i915) {
+		request[idx] = i915_request_create(engine->kernel_context);
+		if (IS_ERR(request[idx])) {
+			err = PTR_ERR(request[idx]);
 			pr_err("%s: Request allocation failed with err=%d\n",
 			       __func__, err);
 			goto out_request;
 		}
 
-		err = engine->emit_bb_start(request[id],
+		err = engine->emit_bb_start(request[idx],
 					    batch->node.start,
 					    batch->node.size,
 					    0);
 		GEM_BUG_ON(err);
-		request[id]->batch = batch;
+		request[idx]->batch = batch;
 
 		i915_vma_lock(batch);
-		err = i915_request_await_object(request[id], batch->obj, 0);
+		err = i915_request_await_object(request[idx], batch->obj, 0);
 		if (err == 0)
-			err = i915_vma_move_to_active(batch, request[id], 0);
+			err = i915_vma_move_to_active(batch, request[idx], 0);
 		i915_vma_unlock(batch);
 		GEM_BUG_ON(err);
 
-		i915_request_get(request[id]);
-		i915_request_add(request[id]);
+		i915_request_get(request[idx]);
+		i915_request_add(request[idx]);
+		idx++;
 	}
 
-	for_each_engine(engine, i915, id) {
-		if (i915_request_completed(request[id])) {
+	idx = 0;
+	for_each_uabi_engine(engine, i915) {
+		if (i915_request_completed(request[idx])) {
 			pr_err("%s(%s): request completed too early!\n",
 			       __func__, engine->name);
 			err = -EINVAL;
 			goto out_request;
 		}
+		idx++;
 	}
 
 	err = recursive_batch_resolve(batch);
@@ -866,10 +889,11 @@ static int live_all_engines(void *arg)
 		goto out_request;
 	}
 
-	for_each_engine(engine, i915, id) {
+	idx = 0;
+	for_each_uabi_engine(engine, i915) {
 		long timeout;
 
-		timeout = i915_request_wait(request[id], 0,
+		timeout = i915_request_wait(request[idx], 0,
 					    MAX_SCHEDULE_TIMEOUT);
 		if (timeout < 0) {
 			err = timeout;
@@ -878,43 +902,57 @@ static int live_all_engines(void *arg)
 			goto out_request;
 		}
 
-		GEM_BUG_ON(!i915_request_completed(request[id]));
-		i915_request_put(request[id]);
-		request[id] = NULL;
+		GEM_BUG_ON(!i915_request_completed(request[idx]));
+		i915_request_put(request[idx]);
+		request[idx] = NULL;
+		idx++;
 	}
 
 	err = igt_live_test_end(&t);
 
 out_request:
-	for_each_engine(engine, i915, id)
-		if (request[id])
-			i915_request_put(request[id]);
+	idx = 0;
+	for_each_uabi_engine(engine, i915) {
+		if (request[idx])
+			i915_request_put(request[idx]);
+		idx++;
+	}
 	i915_vma_unpin(batch);
 	i915_vma_put(batch);
+out_free:
+	kfree(request);
 	return err;
 }
 
 static int live_sequential_engines(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct i915_request *request[I915_NUM_ENGINES] = {};
+	const unsigned int nengines = num_uabi_engines(i915);
+	struct i915_request **request;
 	struct i915_request *prev = NULL;
 	struct intel_engine_cs *engine;
 	struct igt_live_test t;
-	unsigned int id;
+	unsigned int idx;
 	int err;
 
-	/* Check we can submit requests to all engines sequentially, such
+	/*
+	 * Check we can submit requests to all engines sequentially, such
 	 * that each successive request waits for the earlier ones. This
 	 * tests that we don't execute requests out of order, even though
 	 * they are running on independent engines.
 	 */
 
+	request = kmalloc_array(nengines, sizeof(*request),
+				GFP_KERNEL | __GFP_ZERO);
+	if (!request)
+		return -ENOMEM;
+
 	err = igt_live_test_begin(&t, i915, __func__, "");
 	if (err)
-		return err;
+		goto out_free;
 
-	for_each_engine(engine, i915, id) {
+	idx = 0;
+	for_each_uabi_engine(engine, i915) {
 		struct i915_vma *batch;
 
 		batch = recursive_batch(i915);
@@ -922,66 +960,69 @@ static int live_sequential_engines(void *arg)
 			err = PTR_ERR(batch);
 			pr_err("%s: Unable to create batch for %s, err=%d\n",
 			       __func__, engine->name, err);
-			return err;
+			goto out_free;
 		}
 
-		request[id] = i915_request_create(engine->kernel_context);
-		if (IS_ERR(request[id])) {
-			err = PTR_ERR(request[id]);
+		request[idx] = i915_request_create(engine->kernel_context);
+		if (IS_ERR(request[idx])) {
+			err = PTR_ERR(request[idx]);
 			pr_err("%s: Request allocation failed for %s with err=%d\n",
 			       __func__, engine->name, err);
 			goto out_request;
 		}
 
 		if (prev) {
-			err = i915_request_await_dma_fence(request[id],
+			err = i915_request_await_dma_fence(request[idx],
 							   &prev->fence);
 			if (err) {
-				i915_request_add(request[id]);
+				i915_request_add(request[idx]);
 				pr_err("%s: Request await failed for %s with err=%d\n",
 				       __func__, engine->name, err);
 				goto out_request;
 			}
 		}
 
-		err = engine->emit_bb_start(request[id],
+		err = engine->emit_bb_start(request[idx],
 					    batch->node.start,
 					    batch->node.size,
 					    0);
 		GEM_BUG_ON(err);
-		request[id]->batch = batch;
+		request[idx]->batch = batch;
 
 		i915_vma_lock(batch);
-		err = i915_request_await_object(request[id], batch->obj, false);
+		err = i915_request_await_object(request[idx],
+						batch->obj, false);
 		if (err == 0)
-			err = i915_vma_move_to_active(batch, request[id], 0);
+			err = i915_vma_move_to_active(batch, request[idx], 0);
 		i915_vma_unlock(batch);
 		GEM_BUG_ON(err);
 
-		i915_request_get(request[id]);
-		i915_request_add(request[id]);
+		i915_request_get(request[idx]);
+		i915_request_add(request[idx]);
 
-		prev = request[id];
+		prev = request[idx];
+		idx++;
 	}
 
-	for_each_engine(engine, i915, id) {
+	idx = 0;
+	for_each_uabi_engine(engine, i915) {
 		long timeout;
 
-		if (i915_request_completed(request[id])) {
+		if (i915_request_completed(request[idx])) {
 			pr_err("%s(%s): request completed too early!\n",
 			       __func__, engine->name);
 			err = -EINVAL;
 			goto out_request;
 		}
 
-		err = recursive_batch_resolve(request[id]->batch);
+		err = recursive_batch_resolve(request[idx]->batch);
 		if (err) {
 			pr_err("%s: failed to resolve batch, err=%d\n",
 			       __func__, err);
 			goto out_request;
 		}
 
-		timeout = i915_request_wait(request[id], 0,
+		timeout = i915_request_wait(request[idx], 0,
 					    MAX_SCHEDULE_TIMEOUT);
 		if (timeout < 0) {
 			err = timeout;
@@ -990,30 +1031,35 @@ static int live_sequential_engines(void *arg)
 			goto out_request;
 		}
 
-		GEM_BUG_ON(!i915_request_completed(request[id]));
+		GEM_BUG_ON(!i915_request_completed(request[idx]));
+		idx++;
 	}
 
 	err = igt_live_test_end(&t);
 
 out_request:
-	for_each_engine(engine, i915, id) {
+	idx = 0;
+	for_each_uabi_engine(engine, i915) {
 		u32 *cmd;
 
-		if (!request[id])
+		if (!request[idx])
 			break;
 
-		cmd = i915_gem_object_pin_map(request[id]->batch->obj,
+		cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
 					      I915_MAP_WC);
 		if (!IS_ERR(cmd)) {
 			*cmd = MI_BATCH_BUFFER_END;
 			intel_gt_chipset_flush(engine->gt);
 
-			i915_gem_object_unpin_map(request[id]->batch->obj);
+			i915_gem_object_unpin_map(request[idx]->batch->obj);
 		}
 
-		i915_vma_put(request[id]->batch);
-		i915_request_put(request[id]);
+		i915_vma_put(request[idx]->batch);
+		i915_request_put(request[idx]);
+		idx++;
 	}
+out_free:
+	kfree(request);
 	return err;
 }
 
@@ -1079,9 +1125,10 @@ static int live_parallel_engines(void *arg)
 		__live_parallel_engineN,
 		NULL,
 	};
+	const unsigned int nengines = num_uabi_engines(i915);
 	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
 	int (* const *fn)(void *arg);
+	struct task_struct **tsk;
 	int err = 0;
 
 	/*
@@ -1089,42 +1136,49 @@ static int live_parallel_engines(void *arg)
 	 * tests that we load up the system maximally.
 	 */
 
+	tsk = kmalloc_array(nengines, sizeof(*tsk), GFP_KERNEL | __GFP_ZERO);
+	if (!tsk)
+		return -ENOMEM;
+
 	for (fn = func; !err && *fn; fn++) {
-		struct task_struct *tsk[I915_NUM_ENGINES] = {};
 		struct igt_live_test t;
+		unsigned int idx;
 
 		err = igt_live_test_begin(&t, i915, __func__, "");
 		if (err)
 			break;
 
-		for_each_engine(engine, i915, id) {
-			tsk[id] = kthread_run(*fn, engine,
+		idx = 0;
+		for_each_uabi_engine(engine, i915) {
+			tsk[idx] = kthread_run(*fn, engine,
 					      "igt/parallel:%s",
 					      engine->name);
-			if (IS_ERR(tsk[id])) {
-				err = PTR_ERR(tsk[id]);
+			if (IS_ERR(tsk[idx])) {
+				err = PTR_ERR(tsk[idx]);
 				break;
 			}
-			get_task_struct(tsk[id]);
+			get_task_struct(tsk[idx++]);
 		}
 
-		for_each_engine(engine, i915, id) {
+		idx = 0;
+		for_each_uabi_engine(engine, i915) {
 			int status;
 
-			if (IS_ERR_OR_NULL(tsk[id]))
-				continue;
+			if (IS_ERR(tsk[idx]))
+				break;
 
-			status = kthread_stop(tsk[id]);
+			status = kthread_stop(tsk[idx]);
 			if (status && !err)
 				err = status;
 
-			put_task_struct(tsk[id]);
+			put_task_struct(tsk[idx++]);
 		}
 
 		if (igt_live_test_end(&t))
 			err = -EIO;
 	}
 
+	kfree(tsk);
 	return err;
 }
 
@@ -1168,16 +1222,16 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
 static int live_breadcrumbs_smoketest(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
-	struct smoketest t[I915_NUM_ENGINES];
-	unsigned int ncpus = num_online_cpus();
+	const unsigned int nengines = num_uabi_engines(i915);
+	const unsigned int ncpus = num_online_cpus();
 	unsigned long num_waits, num_fences;
 	struct intel_engine_cs *engine;
 	struct task_struct **threads;
 	struct igt_live_test live;
-	enum intel_engine_id id;
 	intel_wakeref_t wakeref;
 	struct drm_file *file;
-	unsigned int n;
+	struct smoketest *smoke;
+	unsigned int n, idx;
 	int ret = 0;
 
 	/*
@@ -1196,28 +1250,31 @@ static int live_breadcrumbs_smoketest(void *arg)
 		goto out_rpm;
 	}
 
-	threads = kcalloc(ncpus * I915_NUM_ENGINES,
-			  sizeof(*threads),
-			  GFP_KERNEL);
-	if (!threads) {
+	smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
+	if (!smoke) {
 		ret = -ENOMEM;
 		goto out_file;
 	}
 
-	memset(&t[0], 0, sizeof(t[0]));
-	t[0].request_alloc = __live_request_alloc;
-	t[0].ncontexts = 64;
-	t[0].contexts = kmalloc_array(t[0].ncontexts,
-				      sizeof(*t[0].contexts),
-				      GFP_KERNEL);
-	if (!t[0].contexts) {
+	threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
+	if (!threads) {
+		ret = -ENOMEM;
+		goto out_smoke;
+	}
+
+	smoke[0].request_alloc = __live_request_alloc;
+	smoke[0].ncontexts = 64;
+	smoke[0].contexts = kmalloc_array(smoke[0].ncontexts,
+					  sizeof(*smoke[0].contexts),
+					  GFP_KERNEL);
+	if (!smoke[0].contexts) {
 		ret = -ENOMEM;
 		goto out_threads;
 	}
 
-	for (n = 0; n < t[0].ncontexts; n++) {
-		t[0].contexts[n] = live_context(i915, file);
-		if (!t[0].contexts[n]) {
+	for (n = 0; n < smoke[0].ncontexts; n++) {
+		smoke[0].contexts[n] = live_context(i915, file);
+		if (!smoke[0].contexts[n]) {
 			ret = -ENOMEM;
 			goto out_contexts;
 		}
@@ -1227,42 +1284,46 @@ static int live_breadcrumbs_smoketest(void *arg)
 	if (ret)
 		goto out_contexts;
 
-	for_each_engine(engine, i915, id) {
-		t[id] = t[0];
-		t[id].engine = engine;
-		t[id].max_batch = max_batches(t[0].contexts[0], engine);
-		if (t[id].max_batch < 0) {
-			ret = t[id].max_batch;
+	idx = 0;
+	for_each_uabi_engine(engine, i915) {
+		smoke[idx] = smoke[0];
+		smoke[idx].engine = engine;
+		smoke[idx].max_batch = max_batches(smoke[0].contexts[0], engine);
+		if (smoke[idx].max_batch < 0) {
+			ret = smoke[idx].max_batch;
 			goto out_flush;
 		}
 		/* One ring interleaved between requests from all cpus */
-		t[id].max_batch /= num_online_cpus() + 1;
+		smoke[idx].max_batch /= num_online_cpus() + 1;
 		pr_debug("Limiting batches to %d requests on %s\n",
-			 t[id].max_batch, engine->name);
+			 smoke[idx].max_batch, engine->name);
 
 		for (n = 0; n < ncpus; n++) {
 			struct task_struct *tsk;
 
 			tsk = kthread_run(__igt_breadcrumbs_smoketest,
-					  &t[id], "igt/%d.%d", id, n);
+					  &smoke[idx], "igt/%d.%d", idx, n);
 			if (IS_ERR(tsk)) {
 				ret = PTR_ERR(tsk);
 				goto out_flush;
 			}
 
 			get_task_struct(tsk);
-			threads[id * ncpus + n] = tsk;
+			threads[idx * ncpus + n] = tsk;
 		}
+
+		idx++;
 	}
 
 	msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
 
 out_flush:
+	idx = 0;
 	num_waits = 0;
 	num_fences = 0;
-	for_each_engine(engine, i915, id) {
+	for_each_uabi_engine(engine, i915) {
 		for (n = 0; n < ncpus; n++) {
-			struct task_struct *tsk = threads[id * ncpus + n];
+			struct task_struct *tsk = threads[idx * ncpus + n];
 			int err;
 
 			if (!tsk)
@@ -1275,17 +1336,20 @@ static int live_breadcrumbs_smoketest(void *arg)
 			put_task_struct(tsk);
 		}
 
-		num_waits += atomic_long_read(&t[id].num_waits);
-		num_fences += atomic_long_read(&t[id].num_fences);
+		num_waits += atomic_long_read(&smoke[idx].num_waits);
+		num_fences += atomic_long_read(&smoke[idx].num_fences);
+		idx++;
 	}
 	pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
 		num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
 
 	ret = igt_live_test_end(&live) ?: ret;
 out_contexts:
-	kfree(t[0].contexts);
+	kfree(smoke[0].contexts);
 out_threads:
 	kfree(threads);
+out_smoke:
+	kfree(smoke);
 out_file:
 	mock_file_free(i915, file);
 out_rpm:
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for drm/i915/selftests: Teach requests to use all available engines
  2019-10-16 12:52 [PATCH] drm/i915/selftests: Teach requests to use all available engines Chris Wilson
@ 2019-10-16 18:11 ` Patchwork
  2019-10-16 18:36 ` ✓ Fi.CI.BAT: success " Patchwork
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2019-10-16 18:11 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Teach requests to use all available engines
URL   : https://patchwork.freedesktop.org/series/68100/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
0acec5432335 drm/i915/selftests: Teach requests to use all available engines
-:428: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#428: FILE: drivers/gpu/drm/i915/selftests/i915_request.c:1154:
+			tsk[idx] = kthread_run(*fn, engine,
 					      "igt/parallel:%s",

total: 0 errors, 0 warnings, 1 checks, 581 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915/selftests: Teach requests to use all available engines
  2019-10-16 12:52 [PATCH] drm/i915/selftests: Teach requests to use all available engines Chris Wilson
  2019-10-16 18:11 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
@ 2019-10-16 18:36 ` Patchwork
  2019-10-17 10:14 ` ✗ Fi.CI.IGT: failure " Patchwork
  2019-10-17 16:54 ` [PATCH] " Tvrtko Ursulin
  3 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2019-10-16 18:36 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Teach requests to use all available engines
URL   : https://patchwork.freedesktop.org/series/68100/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7109 -> Patchwork_14837
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/index.html

Known issues
------------

  Here are the changes found in Patchwork_14837 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_mmap_gtt@basic-write-cpu-read-gtt:
    - fi-icl-u3:          [PASS][1] -> [DMESG-WARN][2] ([fdo#107724]) +2 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/fi-icl-u3/igt@gem_mmap_gtt@basic-write-cpu-read-gtt.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/fi-icl-u3/igt@gem_mmap_gtt@basic-write-cpu-read-gtt.html

  * igt@i915_selftest@live_hangcheck:
    - fi-icl-u2:          [PASS][3] -> [INCOMPLETE][4] ([fdo#107713] / [fdo#108569])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/fi-icl-u2/igt@i915_selftest@live_hangcheck.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/fi-icl-u2/igt@i915_selftest@live_hangcheck.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][5] -> [FAIL][6] ([fdo#111045] / [fdo#111096])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
#### Possible fixes ####

  * igt@gem_linear_blits@basic:
    - fi-icl-u3:          [DMESG-WARN][7] ([fdo#107724]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/fi-icl-u3/igt@gem_linear_blits@basic.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/fi-icl-u3/igt@gem_linear_blits@basic.html

  * igt@gem_sync@basic-all:
    - {fi-tgl-u}:         [INCOMPLETE][9] ([fdo#111880]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/fi-tgl-u/igt@gem_sync@basic-all.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/fi-tgl-u/igt@gem_sync@basic-all.html

  * igt@i915_selftest@live_execlists:
    - fi-cfl-8109u:       [DMESG-FAIL][11] -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/fi-cfl-8109u/igt@i915_selftest@live_execlists.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/fi-cfl-8109u/igt@i915_selftest@live_execlists.html

  * igt@kms_busy@basic-flip-c:
    - {fi-icl-u4}:        [DMESG-WARN][13] ([fdo#105602]) -> [PASS][14] +5 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/fi-icl-u4/igt@kms_busy@basic-flip-c.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/fi-icl-u4/igt@kms_busy@basic-flip-c.html

  * igt@kms_chamelium@hdmi-edid-read:
    - {fi-icl-u4}:        [FAIL][15] ([fdo#111045]) -> [PASS][16] +1 similar issue
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/fi-icl-u4/igt@kms_chamelium@hdmi-edid-read.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/fi-icl-u4/igt@kms_chamelium@hdmi-edid-read.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-hsw-peppy:       [DMESG-WARN][17] ([fdo#102614]) -> [PASS][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/fi-hsw-peppy/igt@kms_frontbuffer_tracking@basic.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/fi-hsw-peppy/igt@kms_frontbuffer_tracking@basic.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#102505]: https://bugs.freedesktop.org/show_bug.cgi?id=102505
  [fdo#102614]: https://bugs.freedesktop.org/show_bug.cgi?id=102614
  [fdo#105602]: https://bugs.freedesktop.org/show_bug.cgi?id=105602
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108569]: https://bugs.freedesktop.org/show_bug.cgi?id=108569
  [fdo#111045]: https://bugs.freedesktop.org/show_bug.cgi?id=111045
  [fdo#111049]: https://bugs.freedesktop.org/show_bug.cgi?id=111049
  [fdo#111096]: https://bugs.freedesktop.org/show_bug.cgi?id=111096
  [fdo#111764]: https://bugs.freedesktop.org/show_bug.cgi?id=111764
  [fdo#111833]: https://bugs.freedesktop.org/show_bug.cgi?id=111833
  [fdo#111880]: https://bugs.freedesktop.org/show_bug.cgi?id=111880


Participating hosts (53 -> 46)
------------------------------

  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7109 -> Patchwork_14837

  CI-20190529: 20190529
  CI_DRM_7109: e72058f1225aedff7c5c1ec10f978fad5291814e @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5231: e293051f8f99c72cb01d21e4b73a5928ea351eb3 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14837: 0acec543233522d0ec945057529e0aa6cccbe40e @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

0acec5432335 drm/i915/selftests: Teach requests to use all available engines

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* ✗ Fi.CI.IGT: failure for drm/i915/selftests: Teach requests to use all available engines
  2019-10-16 12:52 [PATCH] drm/i915/selftests: Teach requests to use all available engines Chris Wilson
  2019-10-16 18:11 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
  2019-10-16 18:36 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-10-17 10:14 ` Patchwork
  2019-10-17 16:54 ` [PATCH] " Tvrtko Ursulin
  3 siblings, 0 replies; 6+ messages in thread
From: Patchwork @ 2019-10-17 10:14 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: drm/i915/selftests: Teach requests to use all available engines
URL   : https://patchwork.freedesktop.org/series/68100/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7109_full -> Patchwork_14837_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_14837_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_14837_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_14837_full:

### IGT changes ###

#### Possible regressions ####

  * igt@gem_exec_schedule@semaphore-resolve:
    - shard-iclb:         NOTRUN -> [FAIL][1]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb8/igt@gem_exec_schedule@semaphore-resolve.html

  * igt@gem_persistent_relocs@forked-interruptible-thrashing:
    - shard-apl:          [PASS][2] -> [FAIL][3]
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-apl1/igt@gem_persistent_relocs@forked-interruptible-thrashing.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-apl1/igt@gem_persistent_relocs@forked-interruptible-thrashing.html

  * igt@i915_selftest@live_execlists:
    - shard-skl:          [PASS][4] -> [DMESG-FAIL][5]
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-skl3/igt@i915_selftest@live_execlists.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-skl4/igt@i915_selftest@live_execlists.html

  
#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@kms_atomic_transition@4x-modeset-transitions-nonblocking:
    - {shard-tglb}:       NOTRUN -> [SKIP][6] +6 similar issues
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-tglb6/igt@kms_atomic_transition@4x-modeset-transitions-nonblocking.html

  * {igt@kms_cursor_crc@pipe-d-cursor-128x42-random}:
    - {shard-tglb}:       NOTRUN -> [FAIL][7]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-tglb1/igt@kms_cursor_crc@pipe-d-cursor-128x42-random.html

  
Known issues
------------

  Here are the changes found in Patchwork_14837_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_eio@in-flight-contexts-immediate:
    - shard-snb:          [PASS][8] -> [FAIL][9] ([fdo#111925])
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-snb1/igt@gem_eio@in-flight-contexts-immediate.html
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-snb1/igt@gem_eio@in-flight-contexts-immediate.html

  * igt@gem_exec_schedule@independent-bsd2:
    - shard-iclb:         [PASS][10] -> [SKIP][11] ([fdo#109276]) +13 similar issues
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb4/igt@gem_exec_schedule@independent-bsd2.html
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb5/igt@gem_exec_schedule@independent-bsd2.html

  * igt@gem_exec_schedule@preemptive-hang-bsd:
    - shard-iclb:         [PASS][12] -> [SKIP][13] ([fdo#111325]) +5 similar issues
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb7/igt@gem_exec_schedule@preemptive-hang-bsd.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd.html

  * igt@gem_persistent_relocs@forked-thrashing:
    - shard-snb:          [PASS][14] -> [INCOMPLETE][15] ([fdo#105411])
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-snb1/igt@gem_persistent_relocs@forked-thrashing.html
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-snb6/igt@gem_persistent_relocs@forked-thrashing.html

  * igt@gem_userptr_blits@dmabuf-sync:
    - shard-snb:          [PASS][16] -> [DMESG-WARN][17] ([fdo#111870])
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-snb5/igt@gem_userptr_blits@dmabuf-sync.html
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-snb1/igt@gem_userptr_blits@dmabuf-sync.html

  * igt@gem_userptr_blits@sync-unmap-after-close:
    - shard-hsw:          [PASS][18] -> [DMESG-WARN][19] ([fdo#111870])
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-hsw5/igt@gem_userptr_blits@sync-unmap-after-close.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-hsw1/igt@gem_userptr_blits@sync-unmap-after-close.html
    - shard-glk:          [PASS][20] -> [DMESG-WARN][21] ([fdo#111870])
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-glk3/igt@gem_userptr_blits@sync-unmap-after-close.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-glk5/igt@gem_userptr_blits@sync-unmap-after-close.html

  * igt@kms_cursor_crc@pipe-b-cursor-128x42-sliding:
    - shard-apl:          [PASS][22] -> [FAIL][23] ([fdo#103232])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-apl3/igt@kms_cursor_crc@pipe-b-cursor-128x42-sliding.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-apl2/igt@kms_cursor_crc@pipe-b-cursor-128x42-sliding.html

  * igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
    - shard-glk:          [PASS][24] -> [FAIL][25] ([fdo#104873])
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-glk1/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-glk3/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [PASS][26] -> [FAIL][27] ([fdo#105363])
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-skl2/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-skl1/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite:
    - shard-iclb:         [PASS][28] -> [FAIL][29] ([fdo#103167]) +3 similar issues
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb3/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-pwrite.html

  * igt@kms_vblank@pipe-a-ts-continuation-suspend:
    - shard-apl:          [PASS][30] -> [DMESG-WARN][31] ([fdo#108566]) +6 similar issues
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-apl4/igt@kms_vblank@pipe-a-ts-continuation-suspend.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-apl1/igt@kms_vblank@pipe-a-ts-continuation-suspend.html

  * igt@perf_pmu@cpu-hotplug:
    - shard-glk:          [PASS][32] -> [TIMEOUT][33] ([fdo#111800])
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-glk1/igt@perf_pmu@cpu-hotplug.html
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-glk2/igt@perf_pmu@cpu-hotplug.html

  * igt@vgem_basic@unload:
    - shard-glk:          [PASS][34] -> [DMESG-WARN][35] ([fdo#107732])
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-glk1/igt@vgem_basic@unload.html
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-glk2/igt@vgem_basic@unload.html

  
#### Possible fixes ####

  * igt@gem_ctx_shared@q-smoketest-render:
    - shard-apl:          [INCOMPLETE][36] ([fdo#103927]) -> [PASS][37] +1 similar issue
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-apl6/igt@gem_ctx_shared@q-smoketest-render.html
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-apl6/igt@gem_ctx_shared@q-smoketest-render.html

  * igt@gem_exec_balancer@smoke:
    - shard-iclb:         [SKIP][38] ([fdo#110854]) -> [PASS][39]
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb6/igt@gem_exec_balancer@smoke.html
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb4/igt@gem_exec_balancer@smoke.html

  * igt@gem_exec_parallel@contexts:
    - shard-iclb:         [INCOMPLETE][40] ([fdo#107713]) -> [PASS][41]
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb7/igt@gem_exec_parallel@contexts.html
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb8/igt@gem_exec_parallel@contexts.html

  * igt@gem_exec_schedule@in-order-bsd:
    - shard-iclb:         [SKIP][42] ([fdo#111325]) -> [PASS][43] +4 similar issues
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb4/igt@gem_exec_schedule@in-order-bsd.html
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb8/igt@gem_exec_schedule@in-order-bsd.html

  * igt@gem_exec_schedule@preempt-contexts-bsd2:
    - shard-iclb:         [SKIP][44] ([fdo#109276]) -> [PASS][45] +12 similar issues
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb8/igt@gem_exec_schedule@preempt-contexts-bsd2.html
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb2/igt@gem_exec_schedule@preempt-contexts-bsd2.html

  * igt@gem_userptr_blits@map-fixed-invalidate-busy-gup:
    - shard-hsw:          [DMESG-WARN][46] ([fdo#111870]) -> [PASS][47] +1 similar issue
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-hsw7/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-hsw8/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-snb:          [DMESG-WARN][48] ([fdo#111870]) -> [PASS][49]
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-snb5/igt@gem_userptr_blits@sync-unmap-cycles.html
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-snb4/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@gem_workarounds@suspend-resume-context:
    - shard-apl:          [DMESG-WARN][50] ([fdo#108566]) -> [PASS][51] +8 similar issues
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-apl8/igt@gem_workarounds@suspend-resume-context.html
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-apl3/igt@gem_workarounds@suspend-resume-context.html

  * {igt@i915_pm_dc@dc6-psr}:
    - shard-iclb:         [FAIL][52] ([fdo#110548]) -> [PASS][53]
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb4/igt@i915_pm_dc@dc6-psr.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb8/igt@i915_pm_dc@dc6-psr.html

  * igt@kms_color@pipe-b-ctm-0-5:
    - shard-skl:          [DMESG-WARN][54] ([fdo#106107]) -> [PASS][55]
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-skl9/igt@kms_color@pipe-b-ctm-0-5.html
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-skl3/igt@kms_color@pipe-b-ctm-0-5.html

  * igt@kms_cursor_crc@pipe-a-cursor-suspend:
    - shard-skl:          [INCOMPLETE][56] ([fdo#110741]) -> [PASS][57]
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-skl3/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-skl1/igt@kms_cursor_crc@pipe-a-cursor-suspend.html

  * igt@kms_cursor_crc@pipe-b-cursor-128x42-sliding:
    - shard-kbl:          [FAIL][58] ([fdo#103232]) -> [PASS][59]
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-kbl3/igt@kms_cursor_crc@pipe-b-cursor-128x42-sliding.html
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-kbl6/igt@kms_cursor_crc@pipe-b-cursor-128x42-sliding.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-plflip-blt:
    - shard-iclb:         [FAIL][60] ([fdo#103167]) -> [PASS][61] +4 similar issues
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb7/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-plflip-blt.html
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb1/igt@kms_frontbuffer_tracking@fbcpsr-1p-primscrn-shrfb-plflip-blt.html

  * igt@kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-cpu:
    - shard-skl:          [FAIL][62] ([fdo#103167]) -> [PASS][63]
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-skl2/igt@kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-cpu.html
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-skl3/igt@kms_frontbuffer_tracking@psr-rgb101010-draw-mmap-cpu.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
    - shard-kbl:          [INCOMPLETE][64] ([fdo#103665]) -> [PASS][65]
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-kbl2/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-kbl6/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-b-planes:
    - shard-skl:          [INCOMPLETE][66] ([fdo#104108]) -> [PASS][67]
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-skl9/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-b-planes.html
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-skl1/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-b-planes.html

  * igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min:
    - shard-skl:          [FAIL][68] ([fdo#108145]) -> [PASS][69] +1 similar issue
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-skl4/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-skl2/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html

  * igt@kms_psr@psr2_sprite_render:
    - shard-iclb:         [SKIP][70] ([fdo#109441]) -> [PASS][71] +1 similar issue
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb8/igt@kms_psr@psr2_sprite_render.html
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb2/igt@kms_psr@psr2_sprite_render.html

  
#### Warnings ####

  * igt@gem_mocs_settings@mocs-settings-bsd2:
    - shard-iclb:         [FAIL][72] ([fdo#111330]) -> [SKIP][73] ([fdo#109276]) +1 similar issue
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb4/igt@gem_mocs_settings@mocs-settings-bsd2.html
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb8/igt@gem_mocs_settings@mocs-settings-bsd2.html

  * igt@kms_psr@psr2_suspend:
    - shard-iclb:         [DMESG-WARN][74] ([fdo#107724]) -> [SKIP][75] ([fdo#109441])
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7109/shard-iclb2/igt@kms_psr@psr2_suspend.html
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/shard-iclb4/igt@kms_psr@psr2_suspend.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#103665]: https://bugs.freedesktop.org/show_bug.cgi?id=103665
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#104873]: https://bugs.freedesktop.org/show_bug.cgi?id=104873
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#106107]: https://bugs.freedesktop.org/show_bug.cgi?id=106107
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#107732]: https://bugs.freedesktop.org/show_bug.cgi?id=107732
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110548]: https://bugs.freedesktop.org/show_bug.cgi?id=110548
  [fdo#110741]: https://bugs.freedesktop.org/show_bug.cgi?id=110741
  [fdo#110854]: https://bugs.freedesktop.org/show_bug.cgi?id=110854
  [fdo#111325]: https://bugs.freedesktop.org/show_bug.cgi?id=111325
  [fdo#111330]: https://bugs.freedesktop.org/show_bug.cgi?id=111330
  [fdo#111800]: https://bugs.freedesktop.org/show_bug.cgi?id=111800
  [fdo#111870]: https://bugs.freedesktop.org/show_bug.cgi?id=111870
  [fdo#111925]: https://bugs.freedesktop.org/show_bug.cgi?id=111925


Participating hosts (11 -> 11)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7109 -> Patchwork_14837

  CI-20190529: 20190529
  CI_DRM_7109: e72058f1225aedff7c5c1ec10f978fad5291814e @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5231: e293051f8f99c72cb01d21e4b73a5928ea351eb3 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14837: 0acec543233522d0ec945057529e0aa6cccbe40e @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14837/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] drm/i915/selftests: Teach requests to use all available engines
  2019-10-16 12:52 [PATCH] drm/i915/selftests: Teach requests to use all available engines Chris Wilson
                   ` (2 preceding siblings ...)
  2019-10-17 10:14 ` ✗ Fi.CI.IGT: failure " Patchwork
@ 2019-10-17 16:54 ` Tvrtko Ursulin
  2019-10-17 20:09   ` Chris Wilson
  3 siblings, 1 reply; 6+ messages in thread
From: Tvrtko Ursulin @ 2019-10-17 16:54 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 16/10/2019 13:52, Chris Wilson wrote:
> The request selftests straddle the boundary between checking the driver
> and the hardware. They are subject to the quirks of the underlying HW,
> but operate on top of the backend abstractions. The tests focus on the
> scheduler elements and so should check for interactions of the scheduler
> across all exposed engines.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/selftests/i915_request.c | 276 +++++++++++-------
>   1 file changed, 170 insertions(+), 106 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
> index 0897a7b04944..b95a0e8431ab 100644
> --- a/drivers/gpu/drm/i915/selftests/i915_request.c
> +++ b/drivers/gpu/drm/i915/selftests/i915_request.c
> @@ -37,6 +37,18 @@
>   #include "mock_drm.h"
>   #include "mock_gem_device.h"
>   
> +static unsigned int num_uabi_engines(struct drm_i915_private *i915)
> +{
> +	struct intel_engine_cs *engine;
> +	unsigned int count;
> +
> +	count = 0;
> +	for_each_uabi_engine(engine, i915)
> +		count++;
> +
> +	return count;
> +}
> +
>   static int igt_add_request(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
> @@ -511,15 +523,15 @@ static int live_nop_request(void *arg)
>   	struct drm_i915_private *i915 = arg;
>   	struct intel_engine_cs *engine;
>   	struct igt_live_test t;
> -	unsigned int id;
>   	int err = -ENODEV;
>   
> -	/* Submit various sized batches of empty requests, to each engine
> +	/*
> +	 * Submit various sized batches of empty requests, to each engine
>   	 * (individually), and wait for the batch to complete. We can check
>   	 * the overhead of submitting requests to the hardware.
>   	 */
>   
> -	for_each_engine(engine, i915, id) {
> +	for_each_uabi_engine(engine, i915) {
>   		unsigned long n, prime;
>   		IGT_TIMEOUT(end_time);
>   		ktime_t times[2] = {};
> @@ -539,7 +551,8 @@ static int live_nop_request(void *arg)
>   				if (IS_ERR(request))
>   					return PTR_ERR(request);
>   
> -				/* This space is left intentionally blank.
> +				/*
> +				 * This space is left intentionally blank.
>   				 *
>   				 * We do not actually want to perform any
>   				 * action with this request, we just want
> @@ -657,10 +670,10 @@ static int live_empty_request(void *arg)
>   	struct intel_engine_cs *engine;
>   	struct igt_live_test t;
>   	struct i915_vma *batch;
> -	unsigned int id;
>   	int err = 0;
>   
> -	/* Submit various sized batches of empty requests, to each engine
> +	/*
> +	 * Submit various sized batches of empty requests, to each engine
>   	 * (individually), and wait for the batch to complete. We can check
>   	 * the overhead of submitting requests to the hardware.
>   	 */
> @@ -669,7 +682,7 @@ static int live_empty_request(void *arg)
>   	if (IS_ERR(batch))
>   		return PTR_ERR(batch);
>   
> -	for_each_engine(engine, i915, id) {
> +	for_each_uabi_engine(engine, i915) {
>   		IGT_TIMEOUT(end_time);
>   		struct i915_request *request;
>   		unsigned long n, prime;
> @@ -801,63 +814,73 @@ static int recursive_batch_resolve(struct i915_vma *batch)
>   static int live_all_engines(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
> +	const unsigned int nengines = num_uabi_engines(i915);
>   	struct intel_engine_cs *engine;
> -	struct i915_request *request[I915_NUM_ENGINES];
> +	struct i915_request **request;
>   	struct igt_live_test t;
>   	struct i915_vma *batch;
> -	unsigned int id;
> +	unsigned int idx;
>   	int err;
>   
> -	/* Check we can submit requests to all engines simultaneously. We
> +	/*
> +	 * Check we can submit requests to all engines simultaneously. We
>   	 * send a recursive batch to each engine - checking that we don't
>   	 * block doing so, and that they don't complete too soon.
>   	 */
>   
> +	request = kmalloc_array(nengines, sizeof(*request), GFP_KERNEL);

__GFP_ZERO as live_sequential for error unwind to work, I think.

> +	if (!request)
> +		return -ENOMEM;
> +
>   	err = igt_live_test_begin(&t, i915, __func__, "");
>   	if (err)
> -		return err;
> +		goto out_free;
>   
>   	batch = recursive_batch(i915);
>   	if (IS_ERR(batch)) {
>   		err = PTR_ERR(batch);
>   		pr_err("%s: Unable to create batch, err=%d\n", __func__, err);
> -		return err;
> +		goto out_free;
>   	}
>   
> -	for_each_engine(engine, i915, id) {
> -		request[id] = i915_request_create(engine->kernel_context);
> -		if (IS_ERR(request[id])) {
> -			err = PTR_ERR(request[id]);
> +	idx = 0;
> +	for_each_uabi_engine(engine, i915) {
> +		request[idx] = i915_request_create(engine->kernel_context);
> +		if (IS_ERR(request[idx])) {
> +			err = PTR_ERR(request[idx]);
>   			pr_err("%s: Request allocation failed with err=%d\n",
>   			       __func__, err);
>   			goto out_request;
>   		}
>   
> -		err = engine->emit_bb_start(request[id],
> +		err = engine->emit_bb_start(request[idx],
>   					    batch->node.start,
>   					    batch->node.size,
>   					    0);
>   		GEM_BUG_ON(err);
> -		request[id]->batch = batch;
> +		request[idx]->batch = batch;
>   
>   		i915_vma_lock(batch);
> -		err = i915_request_await_object(request[id], batch->obj, 0);
> +		err = i915_request_await_object(request[idx], batch->obj, 0);
>   		if (err == 0)
> -			err = i915_vma_move_to_active(batch, request[id], 0);
> +			err = i915_vma_move_to_active(batch, request[idx], 0);
>   		i915_vma_unlock(batch);
>   		GEM_BUG_ON(err);
>   
> -		i915_request_get(request[id]);
> -		i915_request_add(request[id]);
> +		i915_request_get(request[idx]);
> +		i915_request_add(request[idx]);
> +		idx++;
>   	}
>   
> -	for_each_engine(engine, i915, id) {
> -		if (i915_request_completed(request[id])) {
> +	idx = 0;
> +	for_each_uabi_engine(engine, i915) {
> +		if (i915_request_completed(request[idx])) {
>   			pr_err("%s(%s): request completed too early!\n",
>   			       __func__, engine->name);
>   			err = -EINVAL;
>   			goto out_request;
>   		}
> +		idx++;
>   	}
>   
>   	err = recursive_batch_resolve(batch);
> @@ -866,10 +889,11 @@ static int live_all_engines(void *arg)
>   		goto out_request;
>   	}
>   
> -	for_each_engine(engine, i915, id) {
> +	idx = 0;
> +	for_each_uabi_engine(engine, i915) {
>   		long timeout;
>   
> -		timeout = i915_request_wait(request[id], 0,
> +		timeout = i915_request_wait(request[idx], 0,
>   					    MAX_SCHEDULE_TIMEOUT);
>   		if (timeout < 0) {
>   			err = timeout;
> @@ -878,43 +902,57 @@ static int live_all_engines(void *arg)
>   			goto out_request;
>   		}
>   
> -		GEM_BUG_ON(!i915_request_completed(request[id]));
> -		i915_request_put(request[id]);
> -		request[id] = NULL;
> +		GEM_BUG_ON(!i915_request_completed(request[idx]));
> +		i915_request_put(request[idx]);
> +		request[idx] = NULL;
> +		idx++;
>   	}
>   
>   	err = igt_live_test_end(&t);
>   
>   out_request:
> -	for_each_engine(engine, i915, id)
> -		if (request[id])
> -			i915_request_put(request[id]);
> +	idx = 0;
> +	for_each_uabi_engine(engine, i915) {
> +		if (request[idx])
> +			i915_request_put(request[idx]);
> +		idx++;
> +	}
>   	i915_vma_unpin(batch);
>   	i915_vma_put(batch);
> +out_free:
> +	kfree(request);
>   	return err;
>   }
>   
>   static int live_sequential_engines(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
> -	struct i915_request *request[I915_NUM_ENGINES] = {};
> +	const unsigned int nengines = num_uabi_engines(i915);
> +	struct i915_request **request;
>   	struct i915_request *prev = NULL;
>   	struct intel_engine_cs *engine;
>   	struct igt_live_test t;
> -	unsigned int id;
> +	unsigned int idx;
>   	int err;
>   
> -	/* Check we can submit requests to all engines sequentially, such
> +	/*
> +	 * Check we can submit requests to all engines sequentially, such
>   	 * that each successive request waits for the earlier ones. This
>   	 * tests that we don't execute requests out of order, even though
>   	 * they are running on independent engines.
>   	 */
>   
> +	request = kmalloc_array(nengines, sizeof(*request),
> +				GFP_KERNEL | __GFP_ZERO);
> +	if (!request)
> +		return -ENOMEM;
> +
>   	err = igt_live_test_begin(&t, i915, __func__, "");
>   	if (err)
> -		return err;
> +		goto out_free;
>   
> -	for_each_engine(engine, i915, id) {
> +	idx = 0;
> +	for_each_uabi_engine(engine, i915) {
>   		struct i915_vma *batch;
>   
>   		batch = recursive_batch(i915);
> @@ -922,66 +960,69 @@ static int live_sequential_engines(void *arg)
>   			err = PTR_ERR(batch);
>   			pr_err("%s: Unable to create batch for %s, err=%d\n",
>   			       __func__, engine->name, err);
> -			return err;
> +			goto out_free;
>   		}
>   
> -		request[id] = i915_request_create(engine->kernel_context);
> -		if (IS_ERR(request[id])) {
> -			err = PTR_ERR(request[id]);
> +		request[idx] = i915_request_create(engine->kernel_context);
> +		if (IS_ERR(request[idx])) {
> +			err = PTR_ERR(request[idx]);
>   			pr_err("%s: Request allocation failed for %s with err=%d\n",
>   			       __func__, engine->name, err);
>   			goto out_request;
>   		}
>   
>   		if (prev) {
> -			err = i915_request_await_dma_fence(request[id],
> +			err = i915_request_await_dma_fence(request[idx],
>   							   &prev->fence);
>   			if (err) {
> -				i915_request_add(request[id]);
> +				i915_request_add(request[idx]);
>   				pr_err("%s: Request await failed for %s with err=%d\n",
>   				       __func__, engine->name, err);
>   				goto out_request;
>   			}
>   		}
>   
> -		err = engine->emit_bb_start(request[id],
> +		err = engine->emit_bb_start(request[idx],
>   					    batch->node.start,
>   					    batch->node.size,
>   					    0);
>   		GEM_BUG_ON(err);
> -		request[id]->batch = batch;
> +		request[idx]->batch = batch;
>   
>   		i915_vma_lock(batch);
> -		err = i915_request_await_object(request[id], batch->obj, false);
> +		err = i915_request_await_object(request[idx],
> +						batch->obj, false);
>   		if (err == 0)
> -			err = i915_vma_move_to_active(batch, request[id], 0);
> +			err = i915_vma_move_to_active(batch, request[idx], 0);
>   		i915_vma_unlock(batch);
>   		GEM_BUG_ON(err);
>   
> -		i915_request_get(request[id]);
> -		i915_request_add(request[id]);
> +		i915_request_get(request[idx]);
> +		i915_request_add(request[idx]);
>   
> -		prev = request[id];
> +		prev = request[idx];
> +		idx++;
>   	}
>   
> -	for_each_engine(engine, i915, id) {
> +	idx = 0;
> +	for_each_uabi_engine(engine, i915) {
>   		long timeout;
>   
> -		if (i915_request_completed(request[id])) {
> +		if (i915_request_completed(request[idx])) {
>   			pr_err("%s(%s): request completed too early!\n",
>   			       __func__, engine->name);
>   			err = -EINVAL;
>   			goto out_request;
>   		}
>   
> -		err = recursive_batch_resolve(request[id]->batch);
> +		err = recursive_batch_resolve(request[idx]->batch);
>   		if (err) {
>   			pr_err("%s: failed to resolve batch, err=%d\n",
>   			       __func__, err);
>   			goto out_request;
>   		}
>   
> -		timeout = i915_request_wait(request[id], 0,
> +		timeout = i915_request_wait(request[idx], 0,
>   					    MAX_SCHEDULE_TIMEOUT);
>   		if (timeout < 0) {
>   			err = timeout;
> @@ -990,30 +1031,35 @@ static int live_sequential_engines(void *arg)
>   			goto out_request;
>   		}
>   
> -		GEM_BUG_ON(!i915_request_completed(request[id]));
> +		GEM_BUG_ON(!i915_request_completed(request[idx]));
> +		idx++;
>   	}
>   
>   	err = igt_live_test_end(&t);
>   
>   out_request:
> -	for_each_engine(engine, i915, id) {
> +	idx = 0;
> +	for_each_uabi_engine(engine, i915) {
>   		u32 *cmd;
>   
> -		if (!request[id])
> +		if (!request[idx])
>   			break;
>   
> -		cmd = i915_gem_object_pin_map(request[id]->batch->obj,
> +		cmd = i915_gem_object_pin_map(request[idx]->batch->obj,
>   					      I915_MAP_WC);
>   		if (!IS_ERR(cmd)) {
>   			*cmd = MI_BATCH_BUFFER_END;
>   			intel_gt_chipset_flush(engine->gt);
>   
> -			i915_gem_object_unpin_map(request[id]->batch->obj);
> +			i915_gem_object_unpin_map(request[idx]->batch->obj);
>   		}
>   
> -		i915_vma_put(request[id]->batch);
> -		i915_request_put(request[id]);
> +		i915_vma_put(request[idx]->batch);
> +		i915_request_put(request[idx]);
> +		idx++;
>   	}
> +out_free:
> +	kfree(request);
>   	return err;
>   }
>   
> @@ -1079,9 +1125,10 @@ static int live_parallel_engines(void *arg)
>   		__live_parallel_engineN,
>   		NULL,
>   	};
> +	const unsigned int nengines = num_uabi_engines(i915);
>   	struct intel_engine_cs *engine;
> -	enum intel_engine_id id;
>   	int (* const *fn)(void *arg);
> +	struct task_struct **tsk;
>   	int err = 0;
>   
>   	/*
> @@ -1089,42 +1136,49 @@ static int live_parallel_engines(void *arg)
>   	 * tests that we load up the system maximally.
>   	 */
>   
> +	tsk = kmalloc_array(nengines, sizeof(*tsk), GFP_KERNEL | __GFP_ZERO);
> +	if (!tsk)
> +		return -ENOMEM;
> +
>   	for (fn = func; !err && *fn; fn++) {
> -		struct task_struct *tsk[I915_NUM_ENGINES] = {};
>   		struct igt_live_test t;
> +		unsigned int idx;
>   
>   		err = igt_live_test_begin(&t, i915, __func__, "");
>   		if (err)
>   			break;
>   
> -		for_each_engine(engine, i915, id) {
> -			tsk[id] = kthread_run(*fn, engine,
> +		idx = 0;
> +		for_each_uabi_engine(engine, i915) {
> +			tsk[idx] = kthread_run(*fn, engine,
>   					      "igt/parallel:%s",
>   					      engine->name);
> -			if (IS_ERR(tsk[id])) {
> -				err = PTR_ERR(tsk[id]);
> +			if (IS_ERR(tsk[idx])) {
> +				err = PTR_ERR(tsk[idx]);
>   				break;
>   			}
> -			get_task_struct(tsk[id]);
> +			get_task_struct(tsk[idx++]);
>   		}
>   
> -		for_each_engine(engine, i915, id) {
> +		idx = 0;
> +		for_each_uabi_engine(engine, i915) {
>   			int status;
>   
> -			if (IS_ERR_OR_NULL(tsk[id]))
> -				continue;
> +			if (IS_ERR(tsk[idx]))
> +				break;
>   
> -			status = kthread_stop(tsk[id]);
> +			status = kthread_stop(tsk[idx]);
>   			if (status && !err)
>   				err = status;
>   
> -			put_task_struct(tsk[id]);
> +			put_task_struct(tsk[idx++]);
>   		}
>   
>   		if (igt_live_test_end(&t))
>   			err = -EIO;
>   	}
>   
> +	kfree(tsk);
>   	return err;
>   }
>   
> @@ -1168,16 +1222,16 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
>   static int live_breadcrumbs_smoketest(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
> -	struct smoketest t[I915_NUM_ENGINES];
> -	unsigned int ncpus = num_online_cpus();
> +	const unsigned int nengines = num_uabi_engines(i915);
> +	const unsigned int ncpus = num_online_cpus();
>   	unsigned long num_waits, num_fences;
>   	struct intel_engine_cs *engine;
>   	struct task_struct **threads;
>   	struct igt_live_test live;
> -	enum intel_engine_id id;
>   	intel_wakeref_t wakeref;
>   	struct drm_file *file;
> -	unsigned int n;
> +	struct smoketest *smoke;
> +	unsigned int n, idx;
>   	int ret = 0;
>   
>   	/*
> @@ -1196,28 +1250,31 @@ static int live_breadcrumbs_smoketest(void *arg)
>   		goto out_rpm;
>   	}
>   
> -	threads = kcalloc(ncpus * I915_NUM_ENGINES,
> -			  sizeof(*threads),
> -			  GFP_KERNEL);
> -	if (!threads) {
> +	smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL);
> +	if (!smoke) {
>   		ret = -ENOMEM;
>   		goto out_file;
>   	}
>   
> -	memset(&t[0], 0, sizeof(t[0]));
> -	t[0].request_alloc = __live_request_alloc;
> -	t[0].ncontexts = 64;
> -	t[0].contexts = kmalloc_array(t[0].ncontexts,
> -				      sizeof(*t[0].contexts),
> -				      GFP_KERNEL);
> -	if (!t[0].contexts) {
> +	threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL);
> +	if (!threads) {
> +		ret = -ENOMEM;
> +		goto out_smoke;
> +	}
> +
> +	smoke[0].request_alloc = __live_request_alloc;
> +	smoke[0].ncontexts = 64;
> +	smoke[0].contexts = kmalloc_array(smoke[0].ncontexts,
> +					  sizeof(*smoke[0].contexts),
> +					  GFP_KERNEL);
> +	if (!smoke[0].contexts) {
>   		ret = -ENOMEM;
>   		goto out_threads;
>   	}
>   
> -	for (n = 0; n < t[0].ncontexts; n++) {
> -		t[0].contexts[n] = live_context(i915, file);
> -		if (!t[0].contexts[n]) {
> +	for (n = 0; n < smoke[0].ncontexts; n++) {
> +		smoke[0].contexts[n] = live_context(i915, file);
> +		if (!smoke[0].contexts[n]) {
>   			ret = -ENOMEM;
>   			goto out_contexts;
>   		}
> @@ -1227,42 +1284,46 @@ static int live_breadcrumbs_smoketest(void *arg)
>   	if (ret)
>   		goto out_contexts;
>   
> -	for_each_engine(engine, i915, id) {
> -		t[id] = t[0];
> -		t[id].engine = engine;
> -		t[id].max_batch = max_batches(t[0].contexts[0], engine);
> -		if (t[id].max_batch < 0) {
> -			ret = t[id].max_batch;
> +	idx = 0;
> +	for_each_uabi_engine(engine, i915) {
> +		smoke[idx] = smoke[0];
> +		smoke[idx].engine = engine;
> +		smoke[idx].max_batch = max_batches(smoke[0].contexts[0], engine);
> +		if (smoke[idx].max_batch < 0) {
> +			ret = smoke[idx].max_batch;
>   			goto out_flush;
>   		}
>   		/* One ring interleaved between requests from all cpus */
> -		t[id].max_batch /= num_online_cpus() + 1;
> +		smoke[idx].max_batch /= num_online_cpus() + 1;
>   		pr_debug("Limiting batches to %d requests on %s\n",
> -			 t[id].max_batch, engine->name);
> +			 smoke[idx].max_batch, engine->name);
>   
>   		for (n = 0; n < ncpus; n++) {
>   			struct task_struct *tsk;
>   
>   			tsk = kthread_run(__igt_breadcrumbs_smoketest,
> -					  &t[id], "igt/%d.%d", id, n);
> +					  &smoke[idx], "igt/%d.%d", idx, n);
>   			if (IS_ERR(tsk)) {
>   				ret = PTR_ERR(tsk);
>   				goto out_flush;
>   			}
>   
>   			get_task_struct(tsk);
> -			threads[id * ncpus + n] = tsk;
> +			threads[idx * ncpus + n] = tsk;
>   		}
> +
> +		idx++;
>   	}
>   
>   	msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies));
>   
>   out_flush:
> +	idx = 0;
>   	num_waits = 0;
>   	num_fences = 0;
> -	for_each_engine(engine, i915, id) {
> +	for_each_uabi_engine(engine, i915) {
>   		for (n = 0; n < ncpus; n++) {
> -			struct task_struct *tsk = threads[id * ncpus + n];
> +			struct task_struct *tsk = threads[idx * ncpus + n];
>   			int err;
>   
>   			if (!tsk)
> @@ -1275,17 +1336,20 @@ static int live_breadcrumbs_smoketest(void *arg)
>   			put_task_struct(tsk);
>   		}
>   
> -		num_waits += atomic_long_read(&t[id].num_waits);
> -		num_fences += atomic_long_read(&t[id].num_fences);
> +		num_waits += atomic_long_read(&smoke[idx].num_waits);
> +		num_fences += atomic_long_read(&smoke[idx].num_fences);
> +		idx++;
>   	}
>   	pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n",
>   		num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus);
>   
>   	ret = igt_live_test_end(&live) ?: ret;
>   out_contexts:
> -	kfree(t[0].contexts);
> +	kfree(smoke[0].contexts);
>   out_threads:
>   	kfree(threads);
> +out_smoke:
> +	kfree(smoke);
>   out_file:
>   	mock_file_free(i915, file);
>   out_rpm:
> 

Rest looks fine.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Will we end up with a for_each_uabi_engine_idx iterator? And storing the 
num_uabi_engines in somewhere?

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] drm/i915/selftests: Teach requests to use all available engines
  2019-10-17 16:54 ` [PATCH] " Tvrtko Ursulin
@ 2019-10-17 20:09   ` Chris Wilson
  0 siblings, 0 replies; 6+ messages in thread
From: Chris Wilson @ 2019-10-17 20:09 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-17 17:54:55)
> Will we end up with a for_each_uabi_engine_idx iterator? And storing the 
> num_uabi_engines in somewhere?

I see the repetition, yeah, not convinced from this set that we want to
proliferate the magic macros. I expect you will notice in future if the
pattern continues to hold, and I might not then have a good excuse to be
lazy. Hopefully someone else gets caught out first :)
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2019-10-17 20:09 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-16 12:52 [PATCH] drm/i915/selftests: Teach requests to use all available engines Chris Wilson
2019-10-16 18:11 ` ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2019-10-16 18:36 ` ✓ Fi.CI.BAT: success " Patchwork
2019-10-17 10:14 ` ✗ Fi.CI.IGT: failure " Patchwork
2019-10-17 16:54 ` [PATCH] " Tvrtko Ursulin
2019-10-17 20:09   ` Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.