All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH 1/4] drm/i915/selftests: Add tests for timeslicing virtual engines
@ 2020-05-14 16:54 Chris Wilson
  2020-05-14 16:54 ` [Intel-gfx] [PATCH 2/4] drm/i915/gt: Kick virtual siblings on timeslice out Chris Wilson
                   ` (5 more replies)
  0 siblings, 6 replies; 8+ messages in thread
From: Chris Wilson @ 2020-05-14 16:54 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

Make sure that we can execute a virtual request on an already busy
engine, and conversely that we can execute a normal request if the
engines are already fully occupied by virtual requests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/selftest_lrc.c | 177 +++++++++++++++++++++++++
 1 file changed, 177 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 824f99c4cc7c..523c05a3c7f8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -3766,6 +3766,182 @@ static int live_virtual_mask(void *arg)
 	return 0;
 }
 
+static int slicein_virtual_engine(struct intel_gt *gt,
+				  struct intel_engine_cs **siblings,
+				  unsigned int nsibling)
+{
+	struct intel_context *ce;
+	struct i915_request *rq;
+	struct igt_spinner spin;
+	unsigned int n;
+	int err = 0;
+
+	/*
+	 * Virtual requests must take part in timeslicing on the target engines.
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for (n = 0; n < nsibling; n++) {
+		ce = intel_context_create(siblings[n]);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		intel_context_put(ce);
+
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+
+		i915_request_add(rq);
+	}
+
+	ce = intel_execlists_create_virtual(siblings, nsibling);
+	if (IS_ERR(ce)) {
+		err = PTR_ERR(ce);
+		goto out;
+	}
+
+	rq = intel_context_create_request(ce);
+	intel_context_put(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out;
+	}
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+		GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+			  __func__, rq->engine->name);
+		GEM_TRACE_DUMP();
+		intel_gt_set_wedged(gt);
+		err = -EIO;
+	}
+	i915_request_put(rq);
+
+out:
+	igt_spinner_end(&spin);
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int sliceout_virtual_engine(struct intel_gt *gt,
+				   struct intel_engine_cs **siblings,
+				   unsigned int nsibling)
+{
+	struct intel_context *ce;
+	struct i915_request *rq;
+	struct igt_spinner spin;
+	unsigned int n;
+	int err = 0;
+
+	/*
+	 * Virtual requests must allow others a fair timeslice.
+	 */
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for (n = 0; n < nsibling; n++) {
+		ce = intel_execlists_create_virtual(siblings, nsibling);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			goto out;
+		}
+
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		intel_context_put(ce);
+
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+
+		i915_request_add(rq);
+	}
+
+	ce = intel_execlists_create_virtual(siblings, nsibling);
+	if (IS_ERR(ce)) {
+		err = PTR_ERR(ce);
+		goto out;
+	}
+
+	rq = intel_context_create_request(ce);
+	intel_context_put(ce);
+
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out;
+	}
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (i915_request_wait(rq, 0, HZ / 10) < 0) {
+		GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+				__func__, siblings[n]->name);
+		GEM_TRACE_DUMP();
+		intel_gt_set_wedged(gt);
+		err = -EIO;
+	}
+	i915_request_put(rq);
+
+out:
+	igt_spinner_end(&spin);
+	if (igt_flush_test(gt->i915))
+		err = -EIO;
+	igt_spinner_fini(&spin);
+	return err;
+}
+
+static int live_virtual_slice(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+	unsigned int class, inst;
+	int err;
+
+	if (intel_uc_uses_guc_submission(&gt->uc))
+		return 0;
+
+	for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+		unsigned int nsibling;
+
+		nsibling = 0;
+		for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+			struct intel_engine_cs *engine;
+
+			engine = gt->engine_class[class][inst];
+			if (!engine)
+				break;
+
+			if (!intel_engine_has_timeslices(engine))
+				continue;
+
+			siblings[nsibling++] = engine;
+		}
+		if (nsibling < 2)
+			continue;
+
+		err = slicein_virtual_engine(gt, siblings, nsibling);
+		if (err)
+			return err;
+
+		err = sliceout_virtual_engine(gt, siblings, nsibling);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int preserved_virtual_engine(struct intel_gt *gt,
 				    struct intel_engine_cs **siblings,
 				    unsigned int nsibling)
@@ -4329,6 +4505,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_virtual_engine),
 		SUBTEST(live_virtual_mask),
 		SUBTEST(live_virtual_preserved),
+		SUBTEST(live_virtual_slice),
 		SUBTEST(live_virtual_bond),
 		SUBTEST(live_virtual_reset),
 	};
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Intel-gfx] [PATCH 2/4] drm/i915/gt: Kick virtual siblings on timeslice out
  2020-05-14 16:54 [Intel-gfx] [PATCH 1/4] drm/i915/selftests: Add tests for timeslicing virtual engines Chris Wilson
@ 2020-05-14 16:54 ` Chris Wilson
  2020-05-14 16:54 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Incorporate the virtual engine into timeslicing Chris Wilson
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-05-14 16:54 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

If we decide to timeslice out the current virtual request, we will
unsubmit it while it is still busy (ve->context.inflight == sibling[0]).
If the virtual tasklet and then the other sibling tasklets run before we
completely schedule out the active virtual request for the preemption,
those other tasklets will see that the virtul request is still inflight
on sibling[0] and leave it be. Therefore when we finally schedule-out
the virtual request and if we see that we have passed it back to the
virtual engine, reschedule the virtual tasklet so that it may be
resubmitted on any of the siblings.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 87e6c5bdd2dc..d550fe871be5 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1402,7 +1402,7 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
 	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
 	struct i915_request *next = READ_ONCE(ve->request);
 
-	if (next && next->execution_mask & ~rq->execution_mask)
+	if (next == rq || (next && next->execution_mask & ~rq->execution_mask))
 		tasklet_schedule(&ve->base.execlists.tasklet);
 }
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Intel-gfx] [PATCH 3/4] drm/i915/gt: Incorporate the virtual engine into timeslicing
  2020-05-14 16:54 [Intel-gfx] [PATCH 1/4] drm/i915/selftests: Add tests for timeslicing virtual engines Chris Wilson
  2020-05-14 16:54 ` [Intel-gfx] [PATCH 2/4] drm/i915/gt: Kick virtual siblings on timeslice out Chris Wilson
@ 2020-05-14 16:54 ` Chris Wilson
  2020-05-14 16:54 ` [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Optimise away false timeslicing on virtual engines Chris Wilson
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-05-14 16:54 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

It was quite the oversight to only factor in the normal queue to decide
the timeslicing switch priority. By leaving out the next virtual request
from the priority decision, we would not timeslice the current engine if
there was an available virtual request.

Testcase: igt/gem_exec_balancer/sliced
Fixes: 3df2deed411e ("drm/i915/execlists: Enable timeslice on partial virtual engine dequeue")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 34 +++++++++++++++++++++++------
 1 file changed, 27 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index d550fe871be5..398f597b15a3 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1895,7 +1895,8 @@ static void defer_active(struct intel_engine_cs *engine)
 
 static bool
 need_timeslice(const struct intel_engine_cs *engine,
-	       const struct i915_request *rq)
+	       const struct i915_request *rq,
+	       const struct rb_node *rb)
 {
 	int hint;
 
@@ -1903,6 +1904,24 @@ need_timeslice(const struct intel_engine_cs *engine,
 		return false;
 
 	hint = engine->execlists.queue_priority_hint;
+
+	if (rb) {
+		const struct virtual_engine *ve =
+			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+		const struct intel_engine_cs *inflight =
+			intel_context_inflight(&ve->context);
+
+		if (!inflight || inflight == engine) {
+			struct i915_request *next;
+
+			rcu_read_lock();
+			next = READ_ONCE(ve->request);
+			if (next)
+				hint = max(hint, rq_prio(next));
+			rcu_read_unlock();
+		}
+	}
+
 	if (!list_is_last(&rq->sched.link, &engine->active.requests))
 		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
 
@@ -1977,10 +1996,9 @@ static void set_timeslice(struct intel_engine_cs *engine)
 	set_timer_ms(&engine->execlists.timer, duration);
 }
 
-static void start_timeslice(struct intel_engine_cs *engine)
+static void start_timeslice(struct intel_engine_cs *engine, int prio)
 {
 	struct intel_engine_execlists *execlists = &engine->execlists;
-	const int prio = queue_prio(execlists);
 	unsigned long duration;
 
 	if (!intel_engine_has_timeslices(engine))
@@ -2140,7 +2158,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			__unwind_incomplete_requests(engine);
 
 			last = NULL;
-		} else if (need_timeslice(engine, last) &&
+		} else if (need_timeslice(engine, last, rb) &&
 			   timeslice_expired(execlists, last)) {
 			if (i915_request_completed(last)) {
 				tasklet_hi_schedule(&execlists->tasklet);
@@ -2188,7 +2206,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * Even if ELSP[1] is occupied and not worthy
 				 * of timeslices, our queue might be.
 				 */
-				start_timeslice(engine);
+				start_timeslice(engine, queue_prio(execlists));
 				return;
 			}
 		}
@@ -2223,7 +2241,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 			if (last && !can_merge_rq(last, rq)) {
 				spin_unlock(&ve->base.active.lock);
-				start_timeslice(engine);
+				start_timeslice(engine, rq_prio(rq));
 				return; /* leave this for another sibling */
 			}
 
@@ -5519,7 +5537,9 @@ static void virtual_submission_tasklet(unsigned long data)
 submit_engine:
 		GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
 		node->prio = prio;
-		if (first && prio > sibling->execlists.queue_priority_hint)
+		if (first &&
+		    prio >= min(sibling->execlists.switch_priority_hint,
+				sibling->execlists.queue_priority_hint))
 			tasklet_hi_schedule(&sibling->execlists.tasklet);
 
 		spin_unlock(&sibling->active.lock);
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Optimise away false timeslicing on virtual engines
  2020-05-14 16:54 [Intel-gfx] [PATCH 1/4] drm/i915/selftests: Add tests for timeslicing virtual engines Chris Wilson
  2020-05-14 16:54 ` [Intel-gfx] [PATCH 2/4] drm/i915/gt: Kick virtual siblings on timeslice out Chris Wilson
  2020-05-14 16:54 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Incorporate the virtual engine into timeslicing Chris Wilson
@ 2020-05-14 16:54 ` Chris Wilson
  2020-05-15  9:18   ` Chris Wilson
  2020-05-15 15:45 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Add tests for timeslicing " Patchwork
                   ` (2 subsequent siblings)
  5 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2020-05-14 16:54 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

The virtual timeslicing failed under a series of unfortunate slices.
Suppose the user submitted a virtual request, over [vcs0, vcs1] and also
a request that dependent upon the result of the virtal request to vcs0.

submit {veng, vcs0}
time slice expires: unsubmit vcs0, veng
submit { vcs0 } # no timeslicing
virtual tasklet requeues veng

veng saw that it was still in flight on vcs0 and did not queue onto
vcs1, and on vcs0 it saw no reason to preempt the active request.

Patch "drm/i915/gt: Kick virtual siblings on timeslice out" fixes not
waking up the siblings after timeslicing, and patch "drm/i915/gt:
Incorporate the virtual engine into timeslicing" fixes the issue of not
restarting timeslicing after the replacement of veng.

After applying those patches, we should then expire the timeslice on
vcs0 and resubmit {veng, vcs0} and reschedule the timeslices. Repeating
the sequence until the veng finally completes. Our usual strategy to
recognise the data dependent timeslicing is to notice the resubmission
of the same pair of requests, and so skip the ELSP write, leaving them
executing on the HW. (This will also then disable the timeslicing until
either completes or we see a new request.)

In order for the virtual request to be resubmitted immediately after
expiring the timeslice, we need to ensure that we make the virtual
engine immediately available again for submission in this execlists
tasklet.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 102 +++++++++++++++++-----------
 1 file changed, 63 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 398f597b15a3..c0666229cf9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -451,7 +451,7 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
 
 static inline bool need_preempt(const struct intel_engine_cs *engine,
 				const struct i915_request *rq,
-				struct rb_node *rb)
+				struct virtual_engine *ve)
 {
 	int last_prio;
 
@@ -488,9 +488,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
 	    rq_prio(list_next_entry(rq, sched.link)) > last_prio)
 		return true;
 
-	if (rb) {
-		struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+	if (ve) {
 		bool preempt = false;
 
 		if (engine == ve->siblings[0]) { /* only preempt one sibling */
@@ -1099,6 +1097,23 @@ static const u8 *reg_offsets(const struct intel_engine_cs *engine)
 	}
 }
 
+static void resubmit_virtual_request(struct intel_engine_cs *engine,
+				     struct i915_request *rq)
+{
+	struct ve_node * const node =
+		&to_virtual_engine(rq->engine)->nodes[engine->id];
+	struct intel_engine_execlists *el = &engine->execlists;
+
+	if (!RB_EMPTY_ROOT(&el->virtual.rb_root))
+		return;
+
+	GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+
+	rb_link_node(&node->rb, NULL, &el->virtual.rb_root.rb_node);
+	rb_insert_color_cached(&node->rb, &el->virtual, true);
+	node->prio = rq_prio(rq);
+}
+
 static struct i915_request *
 __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
@@ -1153,6 +1168,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 				spin_unlock(&rq->lock);
 			}
 			WRITE_ONCE(rq->engine, owner);
+			resubmit_virtual_request(engine, rq);
 			owner->submit_request(rq);
 			active = NULL;
 		}
@@ -1812,6 +1828,35 @@ static bool virtual_matches(const struct virtual_engine *ve,
 	return true;
 }
 
+static struct virtual_engine *
+first_virtual_engine(struct intel_engine_cs *engine)
+{
+	struct intel_engine_execlists *el = &engine->execlists;
+	struct rb_node *rb = rb_first_cached(&el->virtual);
+
+	while (rb) {
+		struct virtual_engine *ve =
+			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+		struct i915_request *rq = READ_ONCE(ve->request);
+
+		if (!rq) { /* lazily cleanup after another engine handled rq */
+			rb_erase_cached(rb, &el->virtual);
+			RB_CLEAR_NODE(rb);
+			rb = rb_first_cached(&el->virtual);
+			continue;
+		}
+
+		if (!virtual_matches(ve, rq, engine)) {
+			rb = rb_next(rb);
+			continue;
+		}
+
+		return ve;
+	}
+
+	return NULL;
+}
+
 static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
 {
 	/*
@@ -1896,7 +1941,7 @@ static void defer_active(struct intel_engine_cs *engine)
 static bool
 need_timeslice(const struct intel_engine_cs *engine,
 	       const struct i915_request *rq,
-	       const struct rb_node *rb)
+	       struct virtual_engine *ve)
 {
 	int hint;
 
@@ -1905,9 +1950,7 @@ need_timeslice(const struct intel_engine_cs *engine,
 
 	hint = engine->execlists.queue_priority_hint;
 
-	if (rb) {
-		const struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+	if (ve) {
 		const struct intel_engine_cs *inflight =
 			intel_context_inflight(&ve->context);
 
@@ -2059,6 +2102,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	struct i915_request ** const last_port = port + execlists->port_mask;
 	struct i915_request * const *active;
 	struct i915_request *last;
+	struct virtual_engine *ve;
 	struct rb_node *rb;
 	bool submit = false;
 
@@ -2084,25 +2128,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 * and context switches) submission.
 	 */
 
-	for (rb = rb_first_cached(&execlists->virtual); rb; ) {
-		struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
-		struct i915_request *rq = READ_ONCE(ve->request);
-
-		if (!rq) { /* lazily cleanup after another engine handled rq */
-			rb_erase_cached(rb, &execlists->virtual);
-			RB_CLEAR_NODE(rb);
-			rb = rb_first_cached(&execlists->virtual);
-			continue;
-		}
-
-		if (!virtual_matches(ve, rq, engine)) {
-			rb = rb_next(rb);
-			continue;
-		}
-
-		break;
-	}
 
 	/*
 	 * If the queue is higher priority than the last
@@ -2127,7 +2152,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 */
 
 	if ((last = *active)) {
-		if (need_preempt(engine, last, rb)) {
+		ve = first_virtual_engine(engine);
+
+		if (need_preempt(engine, last, ve)) {
 			if (i915_request_completed(last)) {
 				tasklet_hi_schedule(&execlists->tasklet);
 				return;
@@ -2158,7 +2185,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			__unwind_incomplete_requests(engine);
 
 			last = NULL;
-		} else if (need_timeslice(engine, last, rb) &&
+		} else if (need_timeslice(engine, last, ve) &&
 			   timeslice_expired(execlists, last)) {
 			if (i915_request_completed(last)) {
 				tasklet_hi_schedule(&execlists->tasklet);
@@ -2212,9 +2239,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		}
 	}
 
-	while (rb) { /* XXX virtual is always taking precedence */
-		struct virtual_engine *ve =
-			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+	/* XXX virtual is always taking precedence */
+	while ((ve = first_virtual_engine(engine))) {
 		struct i915_request *rq;
 
 		spin_lock(&ve->base.active.lock);
@@ -2222,9 +2248,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		rq = ve->request;
 		if (unlikely(!rq)) { /* lost the race to a sibling */
 			spin_unlock(&ve->base.active.lock);
+
+			rb = &ve->nodes[engine->id].rb;
 			rb_erase_cached(rb, &execlists->virtual);
 			RB_CLEAR_NODE(rb);
-			rb = rb_first_cached(&execlists->virtual);
 			continue;
 		}
 
@@ -2233,11 +2260,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		GEM_BUG_ON(rq->context != &ve->context);
 
 		if (rq_prio(rq) >= queue_prio(execlists)) {
-			if (!virtual_matches(ve, rq, engine)) {
-				spin_unlock(&ve->base.active.lock);
-				rb = rb_next(rb);
-				continue;
-			}
+			GEM_BUG_ON(!virtual_matches(ve, rq, engine));
 
 			if (last && !can_merge_rq(last, rq)) {
 				spin_unlock(&ve->base.active.lock);
@@ -2257,6 +2280,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			WRITE_ONCE(ve->request, NULL);
 			WRITE_ONCE(ve->base.execlists.queue_priority_hint,
 				   INT_MIN);
+
+			rb = &ve->nodes[engine->id].rb;
 			rb_erase_cached(rb, &execlists->virtual);
 			RB_CLEAR_NODE(rb);
 
@@ -2309,7 +2334,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 */
 			if (!submit) {
 				spin_unlock(&ve->base.active.lock);
-				rb = rb_first_cached(&execlists->virtual);
 				continue;
 			}
 		}
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Optimise away false timeslicing on virtual engines
  2020-05-14 16:54 ` [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Optimise away false timeslicing on virtual engines Chris Wilson
@ 2020-05-15  9:18   ` Chris Wilson
  0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-05-15  9:18 UTC (permalink / raw)
  To: intel-gfx

Quoting Chris Wilson (2020-05-14 17:54:36)
> The virtual timeslicing failed under a series of unfortunate slices.
> Suppose the user submitted a virtual request, over [vcs0, vcs1] and also
> a request that dependent upon the result of the virtal request to vcs0.
> 
> submit {veng, vcs0}
> time slice expires: unsubmit vcs0, veng
> submit { vcs0 } # no timeslicing
> virtual tasklet requeues veng
> 
> veng saw that it was still in flight on vcs0 and did not queue onto
> vcs1, and on vcs0 it saw no reason to preempt the active request.
> 
> Patch "drm/i915/gt: Kick virtual siblings on timeslice out" fixes not
> waking up the siblings after timeslicing, and patch "drm/i915/gt:
> Incorporate the virtual engine into timeslicing" fixes the issue of not
> restarting timeslicing after the replacement of veng.
> 
> After applying those patches, we should then expire the timeslice on
> vcs0 and resubmit {veng, vcs0} and reschedule the timeslices. Repeating
> the sequence until the veng finally completes. Our usual strategy to
> recognise the data dependent timeslicing is to notice the resubmission
> of the same pair of requests, and so skip the ELSP write, leaving them
> executing on the HW. (This will also then disable the timeslicing until
> either completes or we see a new request.)
> 
> In order for the virtual request to be resubmitted immediately after
> expiring the timeslice, we need to ensure that we make the virtual
> engine immediately available again for submission in this execlists
> tasklet.

Of course Tvrtko has examples where mixing load-balanced with explicit
engine requests requires us to migrate on timeslice expiry. I hear the
death nails being driven into the

> -       while (rb) { /* XXX virtual is always taking precedence */
> -               struct virtual_engine *ve =
> -                       rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
> +       /* XXX virtual is always taking precedence */
> +       while ((ve = first_virtual_engine(engine))) {

coffin.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Add tests for timeslicing virtual engines
  2020-05-14 16:54 [Intel-gfx] [PATCH 1/4] drm/i915/selftests: Add tests for timeslicing virtual engines Chris Wilson
                   ` (2 preceding siblings ...)
  2020-05-14 16:54 ` [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Optimise away false timeslicing on virtual engines Chris Wilson
@ 2020-05-15 15:45 ` Patchwork
  2020-05-15 16:09 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
  2020-05-15 19:05 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
  5 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-05-15 15:45 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Add tests for timeslicing virtual engines
URL   : https://patchwork.freedesktop.org/series/77274/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
175d05822d38 drm/i915/selftests: Add tests for timeslicing virtual engines
-:72: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#72: FILE: drivers/gpu/drm/i915/gt/selftest_lrc.c:3821:
+		GEM_TRACE_ERR("%s(%s) failed to slice in virtual request\n",
+			  __func__, rq->engine->name);

-:140: CHECK:PARENTHESIS_ALIGNMENT: Alignment should match open parenthesis
#140: FILE: drivers/gpu/drm/i915/gt/selftest_lrc.c:3889:
+		GEM_TRACE_ERR("%s(%s) failed to slice out virtual request\n",
+				__func__, siblings[n]->name);

total: 0 errors, 0 warnings, 2 checks, 189 lines checked
155df023a532 drm/i915/gt: Kick virtual siblings on timeslice out
408949995022 drm/i915/gt: Incorporate the virtual engine into timeslicing
c391d6f84f90 drm/i915/execlists: Optimise away false timeslicing on virtual engines
-:9: WARNING:TYPO_SPELLING: 'virtal' may be misspelled - perhaps 'virtual'?
#9: 
a request that dependent upon the result of the virtal request to vcs0.

total: 0 errors, 1 warnings, 0 checks, 200 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/4] drm/i915/selftests: Add tests for timeslicing virtual engines
  2020-05-14 16:54 [Intel-gfx] [PATCH 1/4] drm/i915/selftests: Add tests for timeslicing virtual engines Chris Wilson
                   ` (3 preceding siblings ...)
  2020-05-15 15:45 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Add tests for timeslicing " Patchwork
@ 2020-05-15 16:09 ` Patchwork
  2020-05-15 19:05 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
  5 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-05-15 16:09 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Add tests for timeslicing virtual engines
URL   : https://patchwork.freedesktop.org/series/77274/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_8488 -> Patchwork_17666
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/index.html

Known issues
------------

  Here are the changes found in Patchwork_17666 that come from known issues:

### IGT changes ###

#### Possible fixes ####

  * igt@i915_selftest@live@execlists:
    - fi-whl-u:           [INCOMPLETE][1] ([i915#656]) -> [PASS][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/fi-whl-u/igt@i915_selftest@live@execlists.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/fi-whl-u/igt@i915_selftest@live@execlists.html

  
  [i915#656]: https://gitlab.freedesktop.org/drm/intel/issues/656


Participating hosts (51 -> 44)
------------------------------

  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_8488 -> Patchwork_17666

  CI-20190529: 20190529
  CI_DRM_8488: d40ec60813532e485e9c63623c91babf556cfbe3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5655: 2cc4c1edc3065590f9917930b6d049a90c4a38fd @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_17666: c391d6f84f905212fdc986357b120dae52e1989b @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

c391d6f84f90 drm/i915/execlists: Optimise away false timeslicing on virtual engines
408949995022 drm/i915/gt: Incorporate the virtual engine into timeslicing
155df023a532 drm/i915/gt: Kick virtual siblings on timeslice out
175d05822d38 drm/i915/selftests: Add tests for timeslicing virtual engines

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [1/4] drm/i915/selftests: Add tests for timeslicing virtual engines
  2020-05-14 16:54 [Intel-gfx] [PATCH 1/4] drm/i915/selftests: Add tests for timeslicing virtual engines Chris Wilson
                   ` (4 preceding siblings ...)
  2020-05-15 16:09 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2020-05-15 19:05 ` Patchwork
  5 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-05-15 19:05 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/selftests: Add tests for timeslicing virtual engines
URL   : https://patchwork.freedesktop.org/series/77274/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_8488_full -> Patchwork_17666_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_17666_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_17666_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_17666_full:

### IGT changes ###

#### Possible regressions ####

  * igt@gem_exec_balancer@nop:
    - shard-tglb:         [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-tglb3/igt@gem_exec_balancer@nop.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-tglb7/igt@gem_exec_balancer@nop.html

  
Known issues
------------

  Here are the changes found in Patchwork_17666_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_fenced_exec_thrash@2-spare-fences:
    - shard-snb:          [PASS][3] -> [INCOMPLETE][4] ([i915#82])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-snb4/igt@gem_fenced_exec_thrash@2-spare-fences.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-snb1/igt@gem_fenced_exec_thrash@2-spare-fences.html

  * igt@i915_suspend@fence-restore-tiled2untiled:
    - shard-skl:          [PASS][5] -> [INCOMPLETE][6] ([i915#69])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl1/igt@i915_suspend@fence-restore-tiled2untiled.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl10/igt@i915_suspend@fence-restore-tiled2untiled.html

  * igt@kms_draw_crc@draw-method-rgb565-blt-ytiled:
    - shard-skl:          [PASS][7] -> [FAIL][8] ([i915#52] / [i915#54])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl10/igt@kms_draw_crc@draw-method-rgb565-blt-ytiled.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl9/igt@kms_draw_crc@draw-method-rgb565-blt-ytiled.html

  * igt@kms_frontbuffer_tracking@fbc-suspend:
    - shard-apl:          [PASS][9] -> [DMESG-WARN][10] ([i915#180] / [i915#95])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-apl1/igt@kms_frontbuffer_tracking@fbc-suspend.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-apl1/igt@kms_frontbuffer_tracking@fbc-suspend.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-kbl:          [PASS][11] -> [DMESG-WARN][12] ([i915#180]) +5 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-kbl6/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-kbl6/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [PASS][13] -> [FAIL][14] ([fdo#108145] / [i915#265]) +2 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl7/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl10/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_blt:
    - shard-iclb:         [PASS][15] -> [SKIP][16] ([fdo#109441]) +2 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-iclb2/igt@kms_psr@psr2_cursor_blt.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-iclb1/igt@kms_psr@psr2_cursor_blt.html

  
#### Possible fixes ####

  * igt@gem_workarounds@suspend-resume-fd:
    - shard-skl:          [INCOMPLETE][17] ([i915#69]) -> [PASS][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl6/igt@gem_workarounds@suspend-resume-fd.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl1/igt@gem_workarounds@suspend-resume-fd.html

  * igt@kms_color@pipe-a-ctm-green-to-red:
    - shard-skl:          [FAIL][19] ([i915#129]) -> [PASS][20]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl6/igt@kms_color@pipe-a-ctm-green-to-red.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl8/igt@kms_color@pipe-a-ctm-green-to-red.html

  * {igt@kms_flip@flip-vs-expired-vblank@b-edp1}:
    - shard-skl:          [FAIL][21] ([i915#79]) -> [PASS][22]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl2/igt@kms_flip@flip-vs-expired-vblank@b-edp1.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl4/igt@kms_flip@flip-vs-expired-vblank@b-edp1.html

  * {igt@kms_flip@flip-vs-suspend-interruptible@a-dp1}:
    - shard-kbl:          [DMESG-WARN][23] ([i915#180]) -> [PASS][24] +5 similar issues
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-kbl7/igt@kms_flip@flip-vs-suspend-interruptible@a-dp1.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-kbl6/igt@kms_flip@flip-vs-suspend-interruptible@a-dp1.html

  * {igt@kms_flip@flip-vs-suspend@a-vga1}:
    - shard-snb:          [DMESG-WARN][25] ([i915#42]) -> [PASS][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-snb2/igt@kms_flip@flip-vs-suspend@a-vga1.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-snb6/igt@kms_flip@flip-vs-suspend@a-vga1.html

  * {igt@kms_flip@flip-vs-suspend@c-dp1}:
    - shard-apl:          [DMESG-WARN][27] ([i915#180]) -> [PASS][28]
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-apl7/igt@kms_flip@flip-vs-suspend@c-dp1.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-apl7/igt@kms_flip@flip-vs-suspend@c-dp1.html

  * {igt@kms_flip@plain-flip-fb-recreate@b-edp1}:
    - shard-skl:          [FAIL][29] ([i915#1883]) -> [PASS][30]
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl6/igt@kms_flip@plain-flip-fb-recreate@b-edp1.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl2/igt@kms_flip@plain-flip-fb-recreate@b-edp1.html

  * igt@kms_frontbuffer_tracking@psr-suspend:
    - shard-skl:          [INCOMPLETE][31] ([i915#123] / [i915#69]) -> [PASS][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl8/igt@kms_frontbuffer_tracking@psr-suspend.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl9/igt@kms_frontbuffer_tracking@psr-suspend.html

  * igt@kms_hdr@bpc-switch:
    - shard-skl:          [FAIL][33] ([i915#1188]) -> [PASS][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl6/igt@kms_hdr@bpc-switch.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl8/igt@kms_hdr@bpc-switch.html

  * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
    - shard-skl:          [FAIL][35] ([fdo#108145] / [i915#265]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-skl4/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-skl8/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html

  * igt@kms_psr@psr2_sprite_render:
    - shard-iclb:         [SKIP][37] ([fdo#109441]) -> [PASS][38] +1 similar issue
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-iclb6/igt@kms_psr@psr2_sprite_render.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-iclb2/igt@kms_psr@psr2_sprite_render.html

  * {igt@perf@blocking-parameterized}:
    - shard-iclb:         [FAIL][39] ([i915#1542]) -> [PASS][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-iclb8/igt@perf@blocking-parameterized.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-iclb5/igt@perf@blocking-parameterized.html

  
#### Warnings ####

  * igt@i915_pm_rpm@cursor-dpms:
    - shard-snb:          [INCOMPLETE][41] ([i915#82]) -> [SKIP][42] ([fdo#109271])
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-snb1/igt@i915_pm_rpm@cursor-dpms.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-snb1/igt@i915_pm_rpm@cursor-dpms.html

  * igt@kms_content_protection@atomic:
    - shard-apl:          [FAIL][43] ([fdo#110321] / [fdo#110336] / [i915#95]) -> [TIMEOUT][44] ([i915#1319])
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-apl6/igt@kms_content_protection@atomic.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-apl4/igt@kms_content_protection@atomic.html

  * igt@kms_content_protection@legacy:
    - shard-apl:          [FAIL][45] ([fdo#110321] / [fdo#110336]) -> [TIMEOUT][46] ([i915#1319])
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-apl3/igt@kms_content_protection@legacy.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-apl6/igt@kms_content_protection@legacy.html

  * igt@kms_fbcon_fbt@fbc-suspend:
    - shard-kbl:          [DMESG-FAIL][47] ([i915#180] / [i915#95]) -> [FAIL][48] ([i915#1121] / [i915#93] / [i915#95])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_8488/shard-kbl4/igt@kms_fbcon_fbt@fbc-suspend.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/shard-kbl3/igt@kms_fbcon_fbt@fbc-suspend.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110321]: https://bugs.freedesktop.org/show_bug.cgi?id=110321
  [fdo#110336]: https://bugs.freedesktop.org/show_bug.cgi?id=110336
  [i915#1121]: https://gitlab.freedesktop.org/drm/intel/issues/1121
  [i915#1188]: https://gitlab.freedesktop.org/drm/intel/issues/1188
  [i915#123]: https://gitlab.freedesktop.org/drm/intel/issues/123
  [i915#129]: https://gitlab.freedesktop.org/drm/intel/issues/129
  [i915#1319]: https://gitlab.freedesktop.org/drm/intel/issues/1319
  [i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#1883]: https://gitlab.freedesktop.org/drm/intel/issues/1883
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#42]: https://gitlab.freedesktop.org/drm/intel/issues/42
  [i915#52]: https://gitlab.freedesktop.org/drm/intel/issues/52
  [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
  [i915#69]: https://gitlab.freedesktop.org/drm/intel/issues/69
  [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
  [i915#82]: https://gitlab.freedesktop.org/drm/intel/issues/82
  [i915#93]: https://gitlab.freedesktop.org/drm/intel/issues/93
  [i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95


Participating hosts (11 -> 11)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * Linux: CI_DRM_8488 -> Patchwork_17666

  CI-20190529: 20190529
  CI_DRM_8488: d40ec60813532e485e9c63623c91babf556cfbe3 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5655: 2cc4c1edc3065590f9917930b6d049a90c4a38fd @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_17666: c391d6f84f905212fdc986357b120dae52e1989b @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_17666/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-05-15 19:05 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-05-14 16:54 [Intel-gfx] [PATCH 1/4] drm/i915/selftests: Add tests for timeslicing virtual engines Chris Wilson
2020-05-14 16:54 ` [Intel-gfx] [PATCH 2/4] drm/i915/gt: Kick virtual siblings on timeslice out Chris Wilson
2020-05-14 16:54 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Incorporate the virtual engine into timeslicing Chris Wilson
2020-05-14 16:54 ` [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Optimise away false timeslicing on virtual engines Chris Wilson
2020-05-15  9:18   ` Chris Wilson
2020-05-15 15:45 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/selftests: Add tests for timeslicing " Patchwork
2020-05-15 16:09 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-05-15 19:05 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.