All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads
@ 2018-10-01 12:32 Chris Wilson
  2018-10-01 12:32 ` [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest Chris Wilson
                   ` (6 more replies)
  0 siblings, 7 replies; 12+ messages in thread
From: Chris Wilson @ 2018-10-01 12:32 UTC (permalink / raw)
  To: intel-gfx

When submitting chains to each engine, we can do so (mostly) in
parallel, so delegate submission to threads on a per-engine basis.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/intel_lrc.c | 73 ++++++++++++++++++----
 1 file changed, 61 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 3a474bb64c05..d68a924c530e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -587,8 +587,10 @@ static int random_priority(struct rnd_state *rnd)
 struct preempt_smoke {
 	struct drm_i915_private *i915;
 	struct i915_gem_context **contexts;
+	struct intel_engine_cs *engine;
 	unsigned int ncontext;
 	struct rnd_state prng;
+	unsigned long count;
 };
 
 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
@@ -597,31 +599,78 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
 							  &smoke->prng)];
 }
 
+static int smoke_crescendo_thread(void *arg)
+{
+	struct preempt_smoke *smoke = arg;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_gem_context *ctx = smoke_context(smoke);
+		struct i915_request *rq;
+
+		mutex_lock(&smoke->i915->drm.struct_mutex);
+
+		ctx->sched.priority = count % I915_PRIORITY_MAX;
+
+		rq = i915_request_alloc(smoke->engine, ctx);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&smoke->i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+
+		mutex_unlock(&smoke->i915->drm.struct_mutex);
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	smoke->count = count;
+	return 0;
+}
+
 static int smoke_crescendo(struct preempt_smoke *smoke)
 {
+	struct task_struct *tsk[I915_NUM_ENGINES] = {};
+	struct preempt_smoke arg[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	unsigned long count;
+	int err = 0;
+
+	mutex_unlock(&smoke->i915->drm.struct_mutex);
 
-	count = 0;
 	for_each_engine(engine, smoke->i915, id) {
-		IGT_TIMEOUT(end_time);
+		arg[id] = *smoke;
+		arg[id].engine = engine;
+		arg[id].count = 0;
+
+		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+				      "igt/smoke:%d", id);
+		if (IS_ERR(tsk[id])) {
+			err = PTR_ERR(tsk[id]);
+			break;
+		}
+	}
 
-		do {
-			struct i915_gem_context *ctx = smoke_context(smoke);
-			struct i915_request *rq;
+	count = 0;
+	for_each_engine(engine, smoke->i915, id) {
+		int status;
 
-			ctx->sched.priority = count % I915_PRIORITY_MAX;
+		if (IS_ERR_OR_NULL(tsk[id]))
+			continue;
 
-			rq = i915_request_alloc(engine, ctx);
-			if (IS_ERR(rq))
-				return PTR_ERR(rq);
+		status = kthread_stop(tsk[id]);
+		if (status && !err)
+			err = status;
 
-			i915_request_add(rq);
-			count++;
-		} while (!__igt_timeout(end_time, NULL));
+		count += arg[id].count;
 	}
 
+	mutex_lock(&smoke->i915->drm.struct_mutex);
+
 	pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
 		count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
 	return 0;
-- 
2.19.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest
  2018-10-01 12:32 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
@ 2018-10-01 12:32 ` Chris Wilson
  2018-10-01 14:01   ` Tvrtko Ursulin
  2018-10-01 12:32 ` [PATCH 3/4] drm/i915: Reserve some priority bits for internal use Chris Wilson
                   ` (5 subsequent siblings)
  6 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2018-10-01 12:32 UTC (permalink / raw)
  To: intel-gfx

Include a batch full of a page of arbitration points in order to provide
a window for inject_preempt_context() in the preemption smoketests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/intel_lrc.c | 134 ++++++++++++++++-----
 1 file changed, 101 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index d68a924c530e..d67fe8335ceb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -588,6 +588,7 @@ struct preempt_smoke {
 	struct drm_i915_private *i915;
 	struct i915_gem_context **contexts;
 	struct intel_engine_cs *engine;
+	struct drm_i915_gem_object *batch;
 	unsigned int ncontext;
 	struct rnd_state prng;
 	unsigned long count;
@@ -599,6 +600,49 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
 							  &smoke->prng)];
 }
 
+static int smoke_submit(struct preempt_smoke *smoke,
+			struct i915_gem_context *ctx, int prio,
+			struct drm_i915_gem_object *batch)
+{
+	struct i915_request *rq;
+	struct i915_vma *vma = NULL;
+	int err = 0;
+
+	if (batch) {
+		vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
+		if (IS_ERR(vma))
+			return PTR_ERR(vma);
+
+		err = i915_vma_pin(vma, 0, 0, PIN_USER);
+		if (err)
+			return err;
+	}
+
+	ctx->sched.priority = prio;
+
+	rq = i915_request_alloc(smoke->engine, ctx);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto unpin;
+	}
+
+	if (vma) {
+		err = rq->engine->emit_bb_start(rq,
+						vma->node.start,
+						PAGE_SIZE, 0);
+		if (!err)
+			err = i915_vma_move_to_active(vma, rq, 0);
+	}
+
+	i915_request_add(rq);
+
+unpin:
+	if (vma)
+		i915_vma_unpin(vma);
+
+	return err;
+}
+
 static int smoke_crescendo_thread(void *arg)
 {
 	struct preempt_smoke *smoke = arg;
@@ -608,21 +652,15 @@ static int smoke_crescendo_thread(void *arg)
 	count = 0;
 	do {
 		struct i915_gem_context *ctx = smoke_context(smoke);
-		struct i915_request *rq;
+		int err;
 
 		mutex_lock(&smoke->i915->drm.struct_mutex);
-
-		ctx->sched.priority = count % I915_PRIORITY_MAX;
-
-		rq = i915_request_alloc(smoke->engine, ctx);
-		if (IS_ERR(rq)) {
-			mutex_unlock(&smoke->i915->drm.struct_mutex);
-			return PTR_ERR(rq);
-		}
-
-		i915_request_add(rq);
-
+		err = smoke_submit(smoke,
+				   ctx, count % I915_PRIORITY_MAX,
+				   smoke->batch);
 		mutex_unlock(&smoke->i915->drm.struct_mutex);
+		if (err)
+			return err;
 
 		count++;
 	} while (!__igt_timeout(end_time, NULL));
@@ -631,7 +669,8 @@ static int smoke_crescendo_thread(void *arg)
 	return 0;
 }
 
-static int smoke_crescendo(struct preempt_smoke *smoke)
+static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
+#define BATCH BIT(0)
 {
 	struct task_struct *tsk[I915_NUM_ENGINES] = {};
 	struct preempt_smoke arg[I915_NUM_ENGINES];
@@ -645,6 +684,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke)
 	for_each_engine(engine, smoke->i915, id) {
 		arg[id] = *smoke;
 		arg[id].engine = engine;
+		if (!(flags & BATCH))
+			arg[id].batch = NULL;
 		arg[id].count = 0;
 
 		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
@@ -671,37 +712,37 @@ static int smoke_crescendo(struct preempt_smoke *smoke)
 
 	mutex_lock(&smoke->i915->drm.struct_mutex);
 
-	pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
-		count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
+		count, flags,
+		INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
 	return 0;
 }
 
-static int smoke_random(struct preempt_smoke *smoke)
+static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
 {
-	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	IGT_TIMEOUT(end_time);
 	unsigned long count;
 
 	count = 0;
 	do {
-		for_each_engine(engine, smoke->i915, id) {
+		for_each_engine(smoke->engine, smoke->i915, id) {
 			struct i915_gem_context *ctx = smoke_context(smoke);
-			struct i915_request *rq;
-
-			ctx->sched.priority = random_priority(&smoke->prng);
+			int err;
 
-			rq = i915_request_alloc(engine, ctx);
-			if (IS_ERR(rq))
-				return PTR_ERR(rq);
+			err = smoke_submit(smoke,
+					   ctx, random_priority(&smoke->prng),
+					   flags & BATCH ? smoke->batch : NULL);
+			if (err)
+				return err;
 
-			i915_request_add(rq);
 			count++;
 		}
 	} while (!__igt_timeout(end_time, NULL));
 
-	pr_info("Submitted %lu random requests across %d engines and %d contexts\n",
-		count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
+	pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
+		count, flags,
+		INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
 	return 0;
 }
 
@@ -712,7 +753,9 @@ static int live_preempt_smoke(void *arg)
 		.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
 		.ncontext = 1024,
 	};
+	const unsigned int phase[] = { 0, BATCH };
 	int err = -ENOMEM;
+	u32 *cs;
 	int n;
 
 	if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
@@ -727,19 +770,41 @@ static int live_preempt_smoke(void *arg)
 	mutex_lock(&smoke.i915->drm.struct_mutex);
 	intel_runtime_pm_get(smoke.i915);
 
+	smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
+	if (IS_ERR(smoke.batch)) {
+		err = PTR_ERR(smoke.batch);
+		goto err_unlock;
+	}
+
+	cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
+	if (IS_ERR(cs)) {
+		err = PTR_ERR(cs);
+		goto err_batch;
+	}
+	for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
+		cs[n] = MI_ARB_CHECK;
+	cs[n] = MI_BATCH_BUFFER_END;
+	i915_gem_object_unpin_map(smoke.batch);
+
+	err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
+	if (err)
+		goto err_batch;
+
 	for (n = 0; n < smoke.ncontext; n++) {
 		smoke.contexts[n] = kernel_context(smoke.i915);
 		if (!smoke.contexts[n])
 			goto err_ctx;
 	}
 
-	err = smoke_crescendo(&smoke);
-	if (err)
-		goto err_ctx;
+	for (n = 0; n < ARRAY_SIZE(phase); n++) {
+		err = smoke_crescendo(&smoke, phase[n]);
+		if (err)
+			goto err_ctx;
 
-	err = smoke_random(&smoke);
-	if (err)
-		goto err_ctx;
+		err = smoke_random(&smoke, phase[n]);
+		if (err)
+			goto err_ctx;
+	}
 
 err_ctx:
 	if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
@@ -751,6 +816,9 @@ static int live_preempt_smoke(void *arg)
 		kernel_context_close(smoke.contexts[n]);
 	}
 
+err_batch:
+	i915_gem_object_put(smoke.batch);
+err_unlock:
 	intel_runtime_pm_put(smoke.i915);
 	mutex_unlock(&smoke.i915->drm.struct_mutex);
 	kfree(smoke.contexts);
-- 
2.19.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/4] drm/i915: Reserve some priority bits for internal use
  2018-10-01 12:32 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
  2018-10-01 12:32 ` [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest Chris Wilson
@ 2018-10-01 12:32 ` Chris Wilson
  2018-10-01 12:32 ` [PATCH 4/4] drm/i915: Combine multiple internal plists into the same i915_priolist bucket Chris Wilson
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2018-10-01 12:32 UTC (permalink / raw)
  To: intel-gfx

In the next few patches, we will want to give a small priority boost to
some requests/queues but not so much that we perturb the user controlled
order. As such we will shift the user priority bits higher leaving
ourselves a few low priority bits for our internal bumping.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            | 2 +-
 drivers/gpu/drm/i915/i915_gem_context.c    | 9 +++++----
 drivers/gpu/drm/i915/i915_scheduler.h      | 6 ++++++
 drivers/gpu/drm/i915/selftests/intel_lrc.c | 8 +++++---
 4 files changed, 17 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 30191523c309..b672ed0cac24 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3236,7 +3236,7 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
 				  const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_PRIORITY_MAX
+#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
 
 int __must_check
 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 15c92f75b1b8..8cbe58070561 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -337,7 +337,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	kref_init(&ctx->ref);
 	list_add_tail(&ctx->link, &dev_priv->contexts.list);
 	ctx->i915 = dev_priv;
-	ctx->sched.priority = I915_PRIORITY_NORMAL;
+	ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
 
 	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
 		struct intel_context *ce = &ctx->__engine[n];
@@ -504,7 +504,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 	}
 
 	i915_gem_context_clear_bannable(ctx);
-	ctx->sched.priority = prio;
+	ctx->sched.priority = I915_USER_PRIORITY(prio);
 	ctx->ring_size = PAGE_SIZE;
 
 	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -879,7 +879,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		args->value = i915_gem_context_is_bannable(ctx);
 		break;
 	case I915_CONTEXT_PARAM_PRIORITY:
-		args->value = ctx->sched.priority;
+		args->value = ctx->sched.priority >> I915_USER_PRIORITY_SHIFT;
 		break;
 	default:
 		ret = -EINVAL;
@@ -948,7 +948,8 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
 				 !capable(CAP_SYS_NICE))
 				ret = -EPERM;
 			else
-				ctx->sched.priority = priority;
+				ctx->sched.priority =
+					I915_USER_PRIORITY(priority);
 		}
 		break;
 
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 70a42220358d..89d456312557 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -19,6 +19,12 @@ enum {
 	I915_PRIORITY_INVALID = INT_MIN
 };
 
+#define I915_USER_PRIORITY_SHIFT 0
+#define I915_USER_PRIORITY(x) ((x) << I915_USER_PRIORITY_SHIFT)
+
+#define I915_PRIORITY_COUNT BIT(I915_USER_PRIORITY_SHIFT)
+#define I915_PRIORITY_MASK (I915_PRIORITY_COUNT - 1)
+
 struct i915_sched_attr {
 	/**
 	 * @priority: execution and service priority
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index d67fe8335ceb..c8b7f03c35bd 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -292,12 +292,14 @@ static int live_preempt(void *arg)
 	ctx_hi = kernel_context(i915);
 	if (!ctx_hi)
 		goto err_spin_lo;
-	ctx_hi->sched.priority = I915_CONTEXT_MAX_USER_PRIORITY;
+	ctx_hi->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
 
 	ctx_lo = kernel_context(i915);
 	if (!ctx_lo)
 		goto err_ctx_hi;
-	ctx_lo->sched.priority = I915_CONTEXT_MIN_USER_PRIORITY;
+	ctx_lo->sched.priority =
+		I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
 
 	for_each_engine(engine, i915, id) {
 		struct i915_request *rq;
@@ -418,7 +420,7 @@ static int live_late_preempt(void *arg)
 			goto err_wedged;
 		}
 
-		attr.priority = I915_PRIORITY_MAX;
+		attr.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX);
 		engine->schedule(rq, &attr);
 
 		if (!wait_for_spinner(&spin_hi, rq)) {
-- 
2.19.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 4/4] drm/i915: Combine multiple internal plists into the same i915_priolist bucket
  2018-10-01 12:32 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
  2018-10-01 12:32 ` [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest Chris Wilson
  2018-10-01 12:32 ` [PATCH 3/4] drm/i915: Reserve some priority bits for internal use Chris Wilson
@ 2018-10-01 12:32 ` Chris Wilson
  2018-10-01 12:45 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads Patchwork
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2018-10-01 12:32 UTC (permalink / raw)
  To: intel-gfx

As we are about to allow ourselves to slightly bump the user priority
into a few different sublevels, packthose internal priority lists
into the same i915_priolist to keep the rbtree compact and avoid having
to allocate the default user priority even after the internal bumping.
The downside to having an requests[] rather than a node per active list,
is that we then have to walk over the empty higher priority lists. To
compensate, we track the active buckets and use a small bitmap to skip
over any inactive ones.

v2: Use MASK of internal levels to simplify our usage.
v3: Prevent overflow when SHIFT is zero.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/intel_engine_cs.c      |  6 +-
 drivers/gpu/drm/i915/intel_guc_submission.c | 12 ++-
 drivers/gpu/drm/i915/intel_lrc.c            | 87 ++++++++++++++-------
 drivers/gpu/drm/i915/intel_ringbuffer.h     | 13 ++-
 4 files changed, 80 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 6726d57f018f..1c6143bdf5a4 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1534,10 +1534,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	count = 0;
 	drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
-		struct i915_priolist *p =
-			rb_entry(rb, typeof(*p), node);
+		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+		int i;
 
-		list_for_each_entry(rq, &p->requests, sched.link) {
+		priolist_for_each_request(rq, p, i) {
 			if (count++ < MAX_REQUESTS_TO_SHOW - 1)
 				print_request(m, rq, "\t\tQ ");
 			else
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 4874a212754c..ac862b42f6a1 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -746,30 +746,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
+		int i;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+		priolist_for_each_request_consume(rq, rn, p, i) {
 			if (last && rq->hw_context != last->hw_context) {
-				if (port == last_port) {
-					__list_del_many(&p->requests,
-							&rq->sched.link);
+				if (port == last_port)
 					goto done;
-				}
 
 				if (submit)
 					port_assign(port, last);
 				port++;
 			}
 
-			INIT_LIST_HEAD(&rq->sched.link);
+			list_del_init(&rq->sched.link);
 
 			__i915_request_submit(rq);
 			trace_i915_request_in(rq, port_index(port, execlists));
+
 			last = rq;
 			submit = true;
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 15345e74d8ce..4ee00f531153 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -259,14 +259,49 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 	ce->lrc_desc = desc;
 }
 
-static struct i915_priolist *
+static void assert_priolists(struct intel_engine_execlists * const execlists,
+			     long queue_priority)
+{
+	struct rb_node *rb;
+	long last_prio, i;
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		return;
+
+	GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
+		   rb_first(&execlists->queue.rb_root));
+
+	last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+		struct i915_priolist *p = to_priolist(rb);
+
+		GEM_BUG_ON(p->priority >= last_prio);
+		last_prio = p->priority;
+
+		GEM_BUG_ON(!p->used);
+		for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
+			if (list_empty(&p->requests[i]))
+				continue;
+
+			GEM_BUG_ON(!(p->used & BIT(i)));
+		}
+	}
+}
+
+static struct list_head *
 lookup_priolist(struct intel_engine_cs *engine, int prio)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_priolist *p;
 	struct rb_node **parent, *rb;
 	bool first = true;
+	int idx, i;
+
+	assert_priolists(execlists, INT_MAX);
 
+	/* buckets sorted from highest [in slot 0] to lowest priority */
+	idx = I915_PRIORITY_COUNT - (prio & I915_PRIORITY_MASK) - 1;
+	prio >>= I915_USER_PRIORITY_SHIFT;
 	if (unlikely(execlists->no_priolist))
 		prio = I915_PRIORITY_NORMAL;
 
@@ -283,7 +318,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
 			parent = &rb->rb_right;
 			first = false;
 		} else {
-			return p;
+			goto out;
 		}
 	}
 
@@ -309,11 +344,15 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
 	}
 
 	p->priority = prio;
-	INIT_LIST_HEAD(&p->requests);
+	for (i = 0; i < ARRAY_SIZE(p->requests); i++)
+		INIT_LIST_HEAD(&p->requests[i]);
 	rb_link_node(&p->node, rb, parent);
 	rb_insert_color_cached(&p->node, &execlists->queue, first);
+	p->used = 0;
 
-	return p;
+out:
+	p->used |= BIT(idx);
+	return &p->requests[idx];
 }
 
 static void unwind_wa_tail(struct i915_request *rq)
@@ -325,7 +364,7 @@ static void unwind_wa_tail(struct i915_request *rq)
 static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
 	struct i915_request *rq, *rn;
-	struct i915_priolist *uninitialized_var(p);
+	struct list_head *uninitialized_var(pl);
 	int last_prio = I915_PRIORITY_INVALID;
 
 	lockdep_assert_held(&engine->timeline.lock);
@@ -342,12 +381,11 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
 		if (rq_prio(rq) != last_prio) {
 			last_prio = rq_prio(rq);
-			p = lookup_priolist(engine, last_prio);
+			pl = lookup_priolist(engine, last_prio);
 		}
 		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 
-		GEM_BUG_ON(p->priority != rq_prio(rq));
-		list_add(&rq->sched.link, &p->requests);
+		list_add(&rq->sched.link, pl);
 	}
 }
 
@@ -664,8 +702,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
+		int i;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+		priolist_for_each_request_consume(rq, rn, p, i) {
 			/*
 			 * Can we combine this request with the current port?
 			 * It has to be the same context/ringbuffer and not
@@ -684,11 +723,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * combine this request with the last, then we
 				 * are done.
 				 */
-				if (port == last_port) {
-					__list_del_many(&p->requests,
-							&rq->sched.link);
+				if (port == last_port)
 					goto done;
-				}
 
 				/*
 				 * If GVT overrides us we only ever submit
@@ -698,11 +734,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * request) to the second port.
 				 */
 				if (ctx_single_port_submission(last->hw_context) ||
-				    ctx_single_port_submission(rq->hw_context)) {
-					__list_del_many(&p->requests,
-							&rq->sched.link);
+				    ctx_single_port_submission(rq->hw_context))
 					goto done;
-				}
 
 				GEM_BUG_ON(last->hw_context == rq->hw_context);
 
@@ -713,15 +746,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				GEM_BUG_ON(port_isset(port));
 			}
 
-			INIT_LIST_HEAD(&rq->sched.link);
+			list_del_init(&rq->sched.link);
+
 			__i915_request_submit(rq);
 			trace_i915_request_in(rq, port_index(port, execlists));
+
 			last = rq;
 			submit = true;
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
@@ -745,6 +779,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 */
 	execlists->queue_priority =
 		port != execlists->port ? rq_prio(last) : INT_MIN;
+	assert_priolists(execlists, execlists->queue_priority);
 
 	if (submit) {
 		port_assign(port, last);
@@ -856,16 +891,16 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	/* Flush the queued requests to the timeline list (for retiring). */
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
+		int i;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
-			INIT_LIST_HEAD(&rq->sched.link);
+		priolist_for_each_request_consume(rq, rn, p, i) {
+			list_del_init(&rq->sched.link);
 
 			dma_fence_set_error(&rq->fence, -EIO);
 			__i915_request_submit(rq);
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
@@ -1071,8 +1106,7 @@ static void queue_request(struct intel_engine_cs *engine,
 			  struct i915_sched_node *node,
 			  int prio)
 {
-	list_add_tail(&node->link,
-		      &lookup_priolist(engine, prio)->requests);
+	list_add_tail(&node->link, lookup_priolist(engine, prio));
 }
 
 static void __update_queue(struct intel_engine_cs *engine, int prio)
@@ -1142,7 +1176,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
 static void execlists_schedule(struct i915_request *request,
 			       const struct i915_sched_attr *attr)
 {
-	struct i915_priolist *uninitialized_var(pl);
+	struct list_head *uninitialized_var(pl);
 	struct intel_engine_cs *engine, *last;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
@@ -1241,8 +1275,7 @@ static void execlists_schedule(struct i915_request *request,
 				pl = lookup_priolist(engine, prio);
 				last = engine;
 			}
-			GEM_BUG_ON(pl->priority != prio);
-			list_move_tail(&node->link, &pl->requests);
+			list_move_tail(&node->link, pl);
 		} else {
 			/*
 			 * If the request is not in the priolist queue because
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..1534de5bb852 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -190,11 +190,22 @@ enum intel_engine_id {
 };
 
 struct i915_priolist {
+	struct list_head requests[I915_PRIORITY_COUNT];
 	struct rb_node node;
-	struct list_head requests;
+	unsigned long used;
 	int priority;
 };
 
+#define priolist_for_each_request(it, plist, idx) \
+	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+	for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+		list_for_each_entry_safe(it, n, \
+					 &(plist)->requests[idx - 1], \
+					 sched.link)
+
 struct st_preempt_hang {
 	struct completion completion;
 	bool inject_hang;
-- 
2.19.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads
  2018-10-01 12:32 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
                   ` (2 preceding siblings ...)
  2018-10-01 12:32 ` [PATCH 4/4] drm/i915: Combine multiple internal plists into the same i915_priolist bucket Chris Wilson
@ 2018-10-01 12:45 ` Patchwork
  2018-10-01 13:08 ` ✓ Fi.CI.BAT: success " Patchwork
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-10-01 12:45 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads
URL   : https://patchwork.freedesktop.org/series/50389/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
7cbb475e40b4 drm/i915/selftests: Split preemption smoke test into threads
-:33: WARNING:LINE_SPACING: Missing a blank line after declarations
#33: FILE: drivers/gpu/drm/i915/selftests/intel_lrc.c:605:
+	struct preempt_smoke *smoke = arg;
+	IGT_TIMEOUT(end_time);

total: 0 errors, 1 warnings, 0 checks, 100 lines checked
e3babdb4224b drm/i915/selftests: Include arbitration points in preemption smoketest
a4e4f339f7f9 drm/i915: Reserve some priority bits for internal use
f6db93f476e4 drm/i915: Combine multiple internal plists into the same i915_priolist bucket
-:168: WARNING:FUNCTION_ARGUMENTS: function definition argument 'pl' should also have an identifier name
#168: FILE: drivers/gpu/drm/i915/intel_lrc.c:367:
+	struct list_head *uninitialized_var(pl);

-:286: WARNING:FUNCTION_ARGUMENTS: function definition argument 'pl' should also have an identifier name
#286: FILE: drivers/gpu/drm/i915/intel_lrc.c:1179:
+	struct list_head *uninitialized_var(pl);

-:315: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'plist' - possible side-effects?
#315: FILE: drivers/gpu/drm/i915/intel_ringbuffer.h:199:
+#define priolist_for_each_request(it, plist, idx) \
+	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+		list_for_each_entry(it, &(plist)->requests[idx], sched.link)

-:315: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'idx' - possible side-effects?
#315: FILE: drivers/gpu/drm/i915/intel_ringbuffer.h:199:
+#define priolist_for_each_request(it, plist, idx) \
+	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+		list_for_each_entry(it, &(plist)->requests[idx], sched.link)

-:319: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'plist' - possible side-effects?
#319: FILE: drivers/gpu/drm/i915/intel_ringbuffer.h:203:
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+	for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+		list_for_each_entry_safe(it, n, \
+					 &(plist)->requests[idx - 1], \
+					 sched.link)

-:319: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'idx' - possible side-effects?
#319: FILE: drivers/gpu/drm/i915/intel_ringbuffer.h:203:
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+	for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+		list_for_each_entry_safe(it, n, \
+					 &(plist)->requests[idx - 1], \
+					 sched.link)

total: 0 errors, 2 warnings, 4 checks, 272 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads
  2018-10-01 12:32 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
                   ` (3 preceding siblings ...)
  2018-10-01 12:45 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads Patchwork
@ 2018-10-01 13:08 ` Patchwork
  2018-10-01 13:16 ` [PATCH 1/4] " Tvrtko Ursulin
  2018-10-01 14:41 ` ✓ Fi.CI.IGT: success for series starting with [1/4] " Patchwork
  6 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-10-01 13:08 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads
URL   : https://patchwork.freedesktop.org/series/50389/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_4906 -> Patchwork_10306 =

== Summary - SUCCESS ==

  No regressions found.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/50389/revisions/1/mbox/

== Known issues ==

  Here are the changes found in Patchwork_10306 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@kms_flip@basic-flip-vs-modeset:
      fi-ilk-650:         PASS -> DMESG-WARN (fdo#106387)

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b:
      fi-bdw-samus:       NOTRUN -> INCOMPLETE (fdo#107773)

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-c:
      fi-icl-u:           PASS -> INCOMPLETE (fdo#107713)

    igt@pm_rpm@module-reload:
      fi-skl-caroline:    NOTRUN -> INCOMPLETE (fdo#107807)

    
    ==== Possible fixes ====

    igt@gem_exec_suspend@basic-s3:
      fi-bdw-samus:       INCOMPLETE (fdo#107773) -> PASS

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
      fi-byt-clapper:     FAIL (fdo#107362, fdo#103191) -> PASS

    
  fdo#103191 https://bugs.freedesktop.org/show_bug.cgi?id=103191
  fdo#106387 https://bugs.freedesktop.org/show_bug.cgi?id=106387
  fdo#107362 https://bugs.freedesktop.org/show_bug.cgi?id=107362
  fdo#107713 https://bugs.freedesktop.org/show_bug.cgi?id=107713
  fdo#107773 https://bugs.freedesktop.org/show_bug.cgi?id=107773
  fdo#107807 https://bugs.freedesktop.org/show_bug.cgi?id=107807


== Participating hosts (50 -> 45) ==

  Additional (2): fi-skl-caroline fi-snb-2520m 
  Missing    (7): fi-ilk-m540 fi-byt-squawks fi-icl-u2 fi-bsw-cyan fi-ctg-p8600 fi-gdg-551 fi-pnv-d510 


== Build changes ==

    * Linux: CI_DRM_4906 -> Patchwork_10306

  CI_DRM_4906: 187637a6495f71dd240d02badbf2fecc1e3c1bb2 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4658: cab89ce2c5da684d01deff402d4e8e11441beadb @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_10306: f6db93f476e4b04720ff6dabb561daa96b3fba6f @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

f6db93f476e4 drm/i915: Combine multiple internal plists into the same i915_priolist bucket
a4e4f339f7f9 drm/i915: Reserve some priority bits for internal use
e3babdb4224b drm/i915/selftests: Include arbitration points in preemption smoketest
7cbb475e40b4 drm/i915/selftests: Split preemption smoke test into threads

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_10306/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads
  2018-10-01 12:32 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
                   ` (4 preceding siblings ...)
  2018-10-01 13:08 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2018-10-01 13:16 ` Tvrtko Ursulin
  2018-10-01 14:41 ` ✓ Fi.CI.IGT: success for series starting with [1/4] " Patchwork
  6 siblings, 0 replies; 12+ messages in thread
From: Tvrtko Ursulin @ 2018-10-01 13:16 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 01/10/2018 13:32, Chris Wilson wrote:
> When submitting chains to each engine, we can do so (mostly) in
> parallel, so delegate submission to threads on a per-engine basis.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/selftests/intel_lrc.c | 73 ++++++++++++++++++----
>   1 file changed, 61 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> index 3a474bb64c05..d68a924c530e 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> @@ -587,8 +587,10 @@ static int random_priority(struct rnd_state *rnd)
>   struct preempt_smoke {
>   	struct drm_i915_private *i915;
>   	struct i915_gem_context **contexts;
> +	struct intel_engine_cs *engine;
>   	unsigned int ncontext;
>   	struct rnd_state prng;
> +	unsigned long count;
>   };
>   
>   static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
> @@ -597,31 +599,78 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
>   							  &smoke->prng)];
>   }
>   
> +static int smoke_crescendo_thread(void *arg)
> +{
> +	struct preempt_smoke *smoke = arg;
> +	IGT_TIMEOUT(end_time);
> +	unsigned long count;
> +
> +	count = 0;
> +	do {
> +		struct i915_gem_context *ctx = smoke_context(smoke);
> +		struct i915_request *rq;
> +
> +		mutex_lock(&smoke->i915->drm.struct_mutex);
> +
> +		ctx->sched.priority = count % I915_PRIORITY_MAX;
> +
> +		rq = i915_request_alloc(smoke->engine, ctx);
> +		if (IS_ERR(rq)) {
> +			mutex_unlock(&smoke->i915->drm.struct_mutex);
> +			return PTR_ERR(rq);
> +		}
> +
> +		i915_request_add(rq);
> +
> +		mutex_unlock(&smoke->i915->drm.struct_mutex);
> +
> +		count++;
> +	} while (!__igt_timeout(end_time, NULL));
> +
> +	smoke->count = count;
> +	return 0;
> +}
> +
>   static int smoke_crescendo(struct preempt_smoke *smoke)
>   {
> +	struct task_struct *tsk[I915_NUM_ENGINES] = {};
> +	struct preempt_smoke arg[I915_NUM_ENGINES];
>   	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
>   	unsigned long count;
> +	int err = 0;
> +
> +	mutex_unlock(&smoke->i915->drm.struct_mutex);
>   
> -	count = 0;
>   	for_each_engine(engine, smoke->i915, id) {
> -		IGT_TIMEOUT(end_time);
> +		arg[id] = *smoke;
> +		arg[id].engine = engine;
> +		arg[id].count = 0;
> +
> +		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
> +				      "igt/smoke:%d", id);
> +		if (IS_ERR(tsk[id])) {
> +			err = PTR_ERR(tsk[id]);
> +			break;
> +		}
> +	}
>   
> -		do {
> -			struct i915_gem_context *ctx = smoke_context(smoke);
> -			struct i915_request *rq;
> +	count = 0;
> +	for_each_engine(engine, smoke->i915, id) {
> +		int status;
>   
> -			ctx->sched.priority = count % I915_PRIORITY_MAX;
> +		if (IS_ERR_OR_NULL(tsk[id]))
> +			continue;
>   
> -			rq = i915_request_alloc(engine, ctx);
> -			if (IS_ERR(rq))
> -				return PTR_ERR(rq);
> +		status = kthread_stop(tsk[id]);
> +		if (status && !err)
> +			err = status;
>   
> -			i915_request_add(rq);
> -			count++;
> -		} while (!__igt_timeout(end_time, NULL));
> +		count += arg[id].count;
>   	}
>   
> +	mutex_lock(&smoke->i915->drm.struct_mutex);
> +
>   	pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
>   		count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
>   	return 0;
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest
  2018-10-01 12:32 ` [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest Chris Wilson
@ 2018-10-01 14:01   ` Tvrtko Ursulin
  0 siblings, 0 replies; 12+ messages in thread
From: Tvrtko Ursulin @ 2018-10-01 14:01 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 01/10/2018 13:32, Chris Wilson wrote:
> Include a batch full of a page of arbitration points in order to provide
> a window for inject_preempt_context() in the preemption smoketests.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/selftests/intel_lrc.c | 134 ++++++++++++++++-----
>   1 file changed, 101 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> index d68a924c530e..d67fe8335ceb 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> @@ -588,6 +588,7 @@ struct preempt_smoke {
>   	struct drm_i915_private *i915;
>   	struct i915_gem_context **contexts;
>   	struct intel_engine_cs *engine;
> +	struct drm_i915_gem_object *batch;
>   	unsigned int ncontext;
>   	struct rnd_state prng;
>   	unsigned long count;
> @@ -599,6 +600,49 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
>   							  &smoke->prng)];
>   }
>   
> +static int smoke_submit(struct preempt_smoke *smoke,
> +			struct i915_gem_context *ctx, int prio,
> +			struct drm_i915_gem_object *batch)
> +{
> +	struct i915_request *rq;
> +	struct i915_vma *vma = NULL;
> +	int err = 0;
> +
> +	if (batch) {
> +		vma = i915_vma_instance(batch, &ctx->ppgtt->vm, NULL);
> +		if (IS_ERR(vma))
> +			return PTR_ERR(vma);
> +
> +		err = i915_vma_pin(vma, 0, 0, PIN_USER);
> +		if (err)
> +			return err;
> +	}
> +
> +	ctx->sched.priority = prio;
> +
> +	rq = i915_request_alloc(smoke->engine, ctx);
> +	if (IS_ERR(rq)) {
> +		err = PTR_ERR(rq);
> +		goto unpin;
> +	}
> +
> +	if (vma) {
> +		err = rq->engine->emit_bb_start(rq,
> +						vma->node.start,
> +						PAGE_SIZE, 0);
> +		if (!err)
> +			err = i915_vma_move_to_active(vma, rq, 0);
> +	}
> +
> +	i915_request_add(rq);
> +
> +unpin:
> +	if (vma)
> +		i915_vma_unpin(vma);
> +
> +	return err;
> +}
> +
>   static int smoke_crescendo_thread(void *arg)
>   {
>   	struct preempt_smoke *smoke = arg;
> @@ -608,21 +652,15 @@ static int smoke_crescendo_thread(void *arg)
>   	count = 0;
>   	do {
>   		struct i915_gem_context *ctx = smoke_context(smoke);
> -		struct i915_request *rq;
> +		int err;
>   
>   		mutex_lock(&smoke->i915->drm.struct_mutex);
> -
> -		ctx->sched.priority = count % I915_PRIORITY_MAX;
> -
> -		rq = i915_request_alloc(smoke->engine, ctx);
> -		if (IS_ERR(rq)) {
> -			mutex_unlock(&smoke->i915->drm.struct_mutex);
> -			return PTR_ERR(rq);
> -		}
> -
> -		i915_request_add(rq);
> -
> +		err = smoke_submit(smoke,
> +				   ctx, count % I915_PRIORITY_MAX,
> +				   smoke->batch);
>   		mutex_unlock(&smoke->i915->drm.struct_mutex);
> +		if (err)
> +			return err;
>   
>   		count++;
>   	} while (!__igt_timeout(end_time, NULL));
> @@ -631,7 +669,8 @@ static int smoke_crescendo_thread(void *arg)
>   	return 0;
>   }
>   
> -static int smoke_crescendo(struct preempt_smoke *smoke)
> +static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags)
> +#define BATCH BIT(0)
>   {
>   	struct task_struct *tsk[I915_NUM_ENGINES] = {};
>   	struct preempt_smoke arg[I915_NUM_ENGINES];
> @@ -645,6 +684,8 @@ static int smoke_crescendo(struct preempt_smoke *smoke)
>   	for_each_engine(engine, smoke->i915, id) {
>   		arg[id] = *smoke;
>   		arg[id].engine = engine;
> +		if (!(flags & BATCH))
> +			arg[id].batch = NULL;
>   		arg[id].count = 0;
>   
>   		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
> @@ -671,37 +712,37 @@ static int smoke_crescendo(struct preempt_smoke *smoke)
>   
>   	mutex_lock(&smoke->i915->drm.struct_mutex);
>   
> -	pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
> -		count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
> +	pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n",
> +		count, flags,
> +		INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
>   	return 0;
>   }
>   
> -static int smoke_random(struct preempt_smoke *smoke)
> +static int smoke_random(struct preempt_smoke *smoke, unsigned int flags)
>   {
> -	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
>   	IGT_TIMEOUT(end_time);
>   	unsigned long count;
>   
>   	count = 0;
>   	do {
> -		for_each_engine(engine, smoke->i915, id) {
> +		for_each_engine(smoke->engine, smoke->i915, id) {
>   			struct i915_gem_context *ctx = smoke_context(smoke);
> -			struct i915_request *rq;
> -
> -			ctx->sched.priority = random_priority(&smoke->prng);
> +			int err;
>   
> -			rq = i915_request_alloc(engine, ctx);
> -			if (IS_ERR(rq))
> -				return PTR_ERR(rq);
> +			err = smoke_submit(smoke,
> +					   ctx, random_priority(&smoke->prng),
> +					   flags & BATCH ? smoke->batch : NULL);
> +			if (err)
> +				return err;
>   
> -			i915_request_add(rq);
>   			count++;
>   		}
>   	} while (!__igt_timeout(end_time, NULL));
>   
> -	pr_info("Submitted %lu random requests across %d engines and %d contexts\n",
> -		count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
> +	pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n",
> +		count, flags,
> +		INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
>   	return 0;
>   }
>   
> @@ -712,7 +753,9 @@ static int live_preempt_smoke(void *arg)
>   		.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed),
>   		.ncontext = 1024,
>   	};
> +	const unsigned int phase[] = { 0, BATCH };
>   	int err = -ENOMEM;
> +	u32 *cs;
>   	int n;
>   
>   	if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915))
> @@ -727,19 +770,41 @@ static int live_preempt_smoke(void *arg)
>   	mutex_lock(&smoke.i915->drm.struct_mutex);
>   	intel_runtime_pm_get(smoke.i915);
>   
> +	smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE);
> +	if (IS_ERR(smoke.batch)) {
> +		err = PTR_ERR(smoke.batch);
> +		goto err_unlock;
> +	}
> +
> +	cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB);
> +	if (IS_ERR(cs)) {
> +		err = PTR_ERR(cs);
> +		goto err_batch;
> +	}
> +	for (n = 0; n < PAGE_SIZE / sizeof(*cs) - 1; n++)
> +		cs[n] = MI_ARB_CHECK;
> +	cs[n] = MI_BATCH_BUFFER_END;
> +	i915_gem_object_unpin_map(smoke.batch);
> +
> +	err = i915_gem_object_set_to_gtt_domain(smoke.batch, false);
> +	if (err)
> +		goto err_batch;
> +
>   	for (n = 0; n < smoke.ncontext; n++) {
>   		smoke.contexts[n] = kernel_context(smoke.i915);
>   		if (!smoke.contexts[n])
>   			goto err_ctx;
>   	}
>   
> -	err = smoke_crescendo(&smoke);
> -	if (err)
> -		goto err_ctx;
> +	for (n = 0; n < ARRAY_SIZE(phase); n++) {
> +		err = smoke_crescendo(&smoke, phase[n]);
> +		if (err)
> +			goto err_ctx;
>   
> -	err = smoke_random(&smoke);
> -	if (err)
> -		goto err_ctx;
> +		err = smoke_random(&smoke, phase[n]);
> +		if (err)
> +			goto err_ctx;
> +	}
>   
>   err_ctx:
>   	if (igt_flush_test(smoke.i915, I915_WAIT_LOCKED))
> @@ -751,6 +816,9 @@ static int live_preempt_smoke(void *arg)
>   		kernel_context_close(smoke.contexts[n]);
>   	}
>   
> +err_batch:
> +	i915_gem_object_put(smoke.batch);
> +err_unlock:
>   	intel_runtime_pm_put(smoke.i915);
>   	mutex_unlock(&smoke.i915->drm.struct_mutex);
>   	kfree(smoke.contexts);
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* ✓ Fi.CI.IGT: success for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads
  2018-10-01 12:32 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
                   ` (5 preceding siblings ...)
  2018-10-01 13:16 ` [PATCH 1/4] " Tvrtko Ursulin
@ 2018-10-01 14:41 ` Patchwork
  6 siblings, 0 replies; 12+ messages in thread
From: Patchwork @ 2018-10-01 14:41 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads
URL   : https://patchwork.freedesktop.org/series/50389/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_4906_full -> Patchwork_10306_full =

== Summary - SUCCESS ==

  No regressions found.

  

== Known issues ==

  Here are the changes found in Patchwork_10306_full that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@drv_suspend@shrink:
      shard-snb:          PASS -> INCOMPLETE (fdo#105411, fdo#106886)

    igt@gem_exec_await@wide-contexts:
      shard-apl:          PASS -> FAIL (fdo#106680)

    igt@gem_exec_big:
      shard-hsw:          PASS -> TIMEOUT (fdo#107937)

    igt@kms_flip@flip-vs-panning-vs-hang:
      shard-glk:          PASS -> INCOMPLETE (fdo#103359, k.org#198133)

    igt@kms_setmode@basic:
      shard-kbl:          PASS -> FAIL (fdo#99912)

    
    ==== Possible fixes ====

    igt@kms_busy@extended-pageflip-hang-newfb-render-c:
      shard-glk:          DMESG-WARN (fdo#107956) -> PASS

    igt@kms_color@pipe-c-ctm-blue-to-red:
      shard-kbl:          DMESG-WARN (fdo#103558, fdo#105602) -> PASS +24

    igt@kms_cursor_legacy@cursora-vs-flipa-toggle:
      shard-glk:          DMESG-WARN (fdo#105763, fdo#106538) -> PASS

    igt@kms_draw_crc@draw-method-xrgb8888-render-ytiled:
      shard-glk:          FAIL (fdo#103232) -> PASS

    igt@kms_flip@flip-vs-expired-vblank-interruptible:
      shard-glk:          FAIL (fdo#105363) -> PASS

    
    ==== Warnings ====

    igt@kms_available_modes_crc@available_mode_test_crc:
      shard-kbl:          DMESG-WARN (fdo#103558, fdo#105602) -> FAIL (fdo#106641)

    
  fdo#103232 https://bugs.freedesktop.org/show_bug.cgi?id=103232
  fdo#103359 https://bugs.freedesktop.org/show_bug.cgi?id=103359
  fdo#103558 https://bugs.freedesktop.org/show_bug.cgi?id=103558
  fdo#105363 https://bugs.freedesktop.org/show_bug.cgi?id=105363
  fdo#105411 https://bugs.freedesktop.org/show_bug.cgi?id=105411
  fdo#105602 https://bugs.freedesktop.org/show_bug.cgi?id=105602
  fdo#105763 https://bugs.freedesktop.org/show_bug.cgi?id=105763
  fdo#106538 https://bugs.freedesktop.org/show_bug.cgi?id=106538
  fdo#106641 https://bugs.freedesktop.org/show_bug.cgi?id=106641
  fdo#106680 https://bugs.freedesktop.org/show_bug.cgi?id=106680
  fdo#106886 https://bugs.freedesktop.org/show_bug.cgi?id=106886
  fdo#107937 https://bugs.freedesktop.org/show_bug.cgi?id=107937
  fdo#107956 https://bugs.freedesktop.org/show_bug.cgi?id=107956
  fdo#99912 https://bugs.freedesktop.org/show_bug.cgi?id=99912
  k.org#198133 https://bugzilla.kernel.org/show_bug.cgi?id=198133


== Participating hosts (6 -> 5) ==

  Missing    (1): shard-skl 


== Build changes ==

    * Linux: CI_DRM_4906 -> Patchwork_10306

  CI_DRM_4906: 187637a6495f71dd240d02badbf2fecc1e3c1bb2 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4658: cab89ce2c5da684d01deff402d4e8e11441beadb @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_10306: f6db93f476e4b04720ff6dabb561daa96b3fba6f @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_10306/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads
  2018-10-01 10:38 ` Tvrtko Ursulin
@ 2018-10-01 10:53   ` Chris Wilson
  0 siblings, 0 replies; 12+ messages in thread
From: Chris Wilson @ 2018-10-01 10:53 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2018-10-01 11:38:37)
> 
> On 28/09/2018 14:58, Chris Wilson wrote:
> > When submitting chains to each engine, we can do so (mostly) in
> > parallel, so delegate submission to threads on a per-engine basis.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >   drivers/gpu/drm/i915/selftests/intel_lrc.c | 73 ++++++++++++++++++----
> >   1 file changed, 61 insertions(+), 12 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> > index 3a474bb64c05..d68a924c530e 100644
> > --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> > @@ -587,8 +587,10 @@ static int random_priority(struct rnd_state *rnd)
> >   struct preempt_smoke {
> >       struct drm_i915_private *i915;
> >       struct i915_gem_context **contexts;
> > +     struct intel_engine_cs *engine;
> >       unsigned int ncontext;
> >       struct rnd_state prng;
> > +     unsigned long count;
> >   };
> >   
> >   static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
> > @@ -597,31 +599,78 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
> >                                                         &smoke->prng)];
> >   }
> >   
> > +static int smoke_crescendo_thread(void *arg)
> > +{
> > +     struct preempt_smoke *smoke = arg;
> > +     IGT_TIMEOUT(end_time);
> > +     unsigned long count;
> > +
> > +     count = 0;
> > +     do {
> > +             struct i915_gem_context *ctx = smoke_context(smoke);
> > +             struct i915_request *rq;
> > +
> > +             mutex_lock(&smoke->i915->drm.struct_mutex);
> > +
> > +             ctx->sched.priority = count % I915_PRIORITY_MAX;
> > +
> > +             rq = i915_request_alloc(smoke->engine, ctx);
> > +             if (IS_ERR(rq)) {
> > +                     mutex_unlock(&smoke->i915->drm.struct_mutex);
> > +                     return PTR_ERR(rq);
> > +             }
> > +
> > +             i915_request_add(rq);
> > +
> > +             mutex_unlock(&smoke->i915->drm.struct_mutex);
> > +
> > +             count++;
> 
> Very little outside the mutex so I am not sure if parallelization will 
> work that well. Every thread could probably fill the ring in it's 
> timeslice?

Very unlikely due to the randomised ring. And we are working on that,
right? :)

> And then it blocks the others until there is space. It will 
> heavily rely on scheduler behaviour and mutex fairness I think.

But it does bring the overall subtest time from num_engines to 1s, for
the same pattern.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads
  2018-09-28 13:58 [PATCH 1/4] " Chris Wilson
@ 2018-10-01 10:38 ` Tvrtko Ursulin
  2018-10-01 10:53   ` Chris Wilson
  0 siblings, 1 reply; 12+ messages in thread
From: Tvrtko Ursulin @ 2018-10-01 10:38 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 28/09/2018 14:58, Chris Wilson wrote:
> When submitting chains to each engine, we can do so (mostly) in
> parallel, so delegate submission to threads on a per-engine basis.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/selftests/intel_lrc.c | 73 ++++++++++++++++++----
>   1 file changed, 61 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> index 3a474bb64c05..d68a924c530e 100644
> --- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
> @@ -587,8 +587,10 @@ static int random_priority(struct rnd_state *rnd)
>   struct preempt_smoke {
>   	struct drm_i915_private *i915;
>   	struct i915_gem_context **contexts;
> +	struct intel_engine_cs *engine;
>   	unsigned int ncontext;
>   	struct rnd_state prng;
> +	unsigned long count;
>   };
>   
>   static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
> @@ -597,31 +599,78 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
>   							  &smoke->prng)];
>   }
>   
> +static int smoke_crescendo_thread(void *arg)
> +{
> +	struct preempt_smoke *smoke = arg;
> +	IGT_TIMEOUT(end_time);
> +	unsigned long count;
> +
> +	count = 0;
> +	do {
> +		struct i915_gem_context *ctx = smoke_context(smoke);
> +		struct i915_request *rq;
> +
> +		mutex_lock(&smoke->i915->drm.struct_mutex);
> +
> +		ctx->sched.priority = count % I915_PRIORITY_MAX;
> +
> +		rq = i915_request_alloc(smoke->engine, ctx);
> +		if (IS_ERR(rq)) {
> +			mutex_unlock(&smoke->i915->drm.struct_mutex);
> +			return PTR_ERR(rq);
> +		}
> +
> +		i915_request_add(rq);
> +
> +		mutex_unlock(&smoke->i915->drm.struct_mutex);
> +
> +		count++;

Very little outside the mutex so I am not sure if parallelization will 
work that well. Every thread could probably fill the ring in it's 
timeslice? And then it blocks the others until there is space. It will 
heavily rely on scheduler behaviour and mutex fairness I think.

Regards,

Tvrtko

> +	} while (!__igt_timeout(end_time, NULL));
> +
> +	smoke->count = count;
> +	return 0;
> +}
> +
>   static int smoke_crescendo(struct preempt_smoke *smoke)
>   {
> +	struct task_struct *tsk[I915_NUM_ENGINES] = {};
> +	struct preempt_smoke arg[I915_NUM_ENGINES];
>   	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
>   	unsigned long count;
> +	int err = 0;
> +
> +	mutex_unlock(&smoke->i915->drm.struct_mutex);
>   
> -	count = 0;
>   	for_each_engine(engine, smoke->i915, id) {
> -		IGT_TIMEOUT(end_time);
> +		arg[id] = *smoke;
> +		arg[id].engine = engine;
> +		arg[id].count = 0;
> +
> +		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
> +				      "igt/smoke:%d", id);
> +		if (IS_ERR(tsk[id])) {
> +			err = PTR_ERR(tsk[id]);
> +			break;
> +		}
> +	}
>   
> -		do {
> -			struct i915_gem_context *ctx = smoke_context(smoke);
> -			struct i915_request *rq;
> +	count = 0;
> +	for_each_engine(engine, smoke->i915, id) {
> +		int status;
>   
> -			ctx->sched.priority = count % I915_PRIORITY_MAX;
> +		if (IS_ERR_OR_NULL(tsk[id]))
> +			continue;
>   
> -			rq = i915_request_alloc(engine, ctx);
> -			if (IS_ERR(rq))
> -				return PTR_ERR(rq);
> +		status = kthread_stop(tsk[id]);
> +		if (status && !err)
> +			err = status;
>   
> -			i915_request_add(rq);
> -			count++;
> -		} while (!__igt_timeout(end_time, NULL));
> +		count += arg[id].count;
>   	}
>   
> +	mutex_lock(&smoke->i915->drm.struct_mutex);
> +
>   	pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
>   		count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
>   	return 0;
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads
@ 2018-09-28 13:58 Chris Wilson
  2018-10-01 10:38 ` Tvrtko Ursulin
  0 siblings, 1 reply; 12+ messages in thread
From: Chris Wilson @ 2018-09-28 13:58 UTC (permalink / raw)
  To: intel-gfx

When submitting chains to each engine, we can do so (mostly) in
parallel, so delegate submission to threads on a per-engine basis.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/intel_lrc.c | 73 ++++++++++++++++++----
 1 file changed, 61 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 3a474bb64c05..d68a924c530e 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -587,8 +587,10 @@ static int random_priority(struct rnd_state *rnd)
 struct preempt_smoke {
 	struct drm_i915_private *i915;
 	struct i915_gem_context **contexts;
+	struct intel_engine_cs *engine;
 	unsigned int ncontext;
 	struct rnd_state prng;
+	unsigned long count;
 };
 
 static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
@@ -597,31 +599,78 @@ static struct i915_gem_context *smoke_context(struct preempt_smoke *smoke)
 							  &smoke->prng)];
 }
 
+static int smoke_crescendo_thread(void *arg)
+{
+	struct preempt_smoke *smoke = arg;
+	IGT_TIMEOUT(end_time);
+	unsigned long count;
+
+	count = 0;
+	do {
+		struct i915_gem_context *ctx = smoke_context(smoke);
+		struct i915_request *rq;
+
+		mutex_lock(&smoke->i915->drm.struct_mutex);
+
+		ctx->sched.priority = count % I915_PRIORITY_MAX;
+
+		rq = i915_request_alloc(smoke->engine, ctx);
+		if (IS_ERR(rq)) {
+			mutex_unlock(&smoke->i915->drm.struct_mutex);
+			return PTR_ERR(rq);
+		}
+
+		i915_request_add(rq);
+
+		mutex_unlock(&smoke->i915->drm.struct_mutex);
+
+		count++;
+	} while (!__igt_timeout(end_time, NULL));
+
+	smoke->count = count;
+	return 0;
+}
+
 static int smoke_crescendo(struct preempt_smoke *smoke)
 {
+	struct task_struct *tsk[I915_NUM_ENGINES] = {};
+	struct preempt_smoke arg[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 	unsigned long count;
+	int err = 0;
+
+	mutex_unlock(&smoke->i915->drm.struct_mutex);
 
-	count = 0;
 	for_each_engine(engine, smoke->i915, id) {
-		IGT_TIMEOUT(end_time);
+		arg[id] = *smoke;
+		arg[id].engine = engine;
+		arg[id].count = 0;
+
+		tsk[id] = kthread_run(smoke_crescendo_thread, &arg,
+				      "igt/smoke:%d", id);
+		if (IS_ERR(tsk[id])) {
+			err = PTR_ERR(tsk[id]);
+			break;
+		}
+	}
 
-		do {
-			struct i915_gem_context *ctx = smoke_context(smoke);
-			struct i915_request *rq;
+	count = 0;
+	for_each_engine(engine, smoke->i915, id) {
+		int status;
 
-			ctx->sched.priority = count % I915_PRIORITY_MAX;
+		if (IS_ERR_OR_NULL(tsk[id]))
+			continue;
 
-			rq = i915_request_alloc(engine, ctx);
-			if (IS_ERR(rq))
-				return PTR_ERR(rq);
+		status = kthread_stop(tsk[id]);
+		if (status && !err)
+			err = status;
 
-			i915_request_add(rq);
-			count++;
-		} while (!__igt_timeout(end_time, NULL));
+		count += arg[id].count;
 	}
 
+	mutex_lock(&smoke->i915->drm.struct_mutex);
+
 	pr_info("Submitted %lu crescendo requests across %d engines and %d contexts\n",
 		count, INTEL_INFO(smoke->i915)->num_rings, smoke->ncontext);
 	return 0;
-- 
2.19.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2018-10-01 14:41 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-10-01 12:32 [PATCH 1/4] drm/i915/selftests: Split preemption smoke test into threads Chris Wilson
2018-10-01 12:32 ` [PATCH 2/4] drm/i915/selftests: Include arbitration points in preemption smoketest Chris Wilson
2018-10-01 14:01   ` Tvrtko Ursulin
2018-10-01 12:32 ` [PATCH 3/4] drm/i915: Reserve some priority bits for internal use Chris Wilson
2018-10-01 12:32 ` [PATCH 4/4] drm/i915: Combine multiple internal plists into the same i915_priolist bucket Chris Wilson
2018-10-01 12:45 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Split preemption smoke test into threads Patchwork
2018-10-01 13:08 ` ✓ Fi.CI.BAT: success " Patchwork
2018-10-01 13:16 ` [PATCH 1/4] " Tvrtko Ursulin
2018-10-01 14:41 ` ✓ Fi.CI.IGT: success for series starting with [1/4] " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2018-09-28 13:58 [PATCH 1/4] " Chris Wilson
2018-10-01 10:38 ` Tvrtko Ursulin
2018-10-01 10:53   ` Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.