All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch
@ 2020-01-13 10:44 Chris Wilson
  2020-01-13 10:44 ` [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine Chris Wilson
                   ` (7 more replies)
  0 siblings, 8 replies; 17+ messages in thread
From: Chris Wilson @ 2020-01-13 10:44 UTC (permalink / raw)
  To: intel-gfx

Currently, we reset the timer after a pre-eemption event. This has the
side-effect that the timeslice runs into the second context after the
first is completed. To be more fair, we want to reset the clock after
promotion as well.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 9af1b2b493f4..a6ac37dece0a 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2285,7 +2285,6 @@ static void process_csb(struct intel_engine_cs *engine)
 
 			/* Point active to the new ELSP; prevent overwriting */
 			WRITE_ONCE(execlists->active, execlists->pending);
-			set_timeslice(engine);
 
 			if (!inject_preempt_hang(execlists))
 				ring_set_paused(engine, 0);
@@ -2325,6 +2324,9 @@ static void process_csb(struct intel_engine_cs *engine)
 		}
 	} while (head != tail);
 
+	if (execlists_active(execlists))
+		set_timeslice(engine);
+
 	execlists->csb_head = head;
 
 	/*
-- 
2.25.0.rc2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine
  2020-01-13 10:44 [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
@ 2020-01-13 10:44 ` Chris Wilson
  2020-01-14 11:13   ` Tvrtko Ursulin
  2020-01-13 10:44 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Allow temporary suspension of inflight requests Chris Wilson
                   ` (6 subsequent siblings)
  7 siblings, 1 reply; 17+ messages in thread
From: Chris Wilson @ 2020-01-13 10:44 UTC (permalink / raw)
  To: intel-gfx

Since commit 422d7df4f090 ("drm/i915: Replace engine->timeline with a
plain list"), we used the default embedded priotree slot for the virtual
engine request queue, which means we can also use the same solitary slot
with the scheduler. However, the priolist is expected to be guarded by
the engine->active.lock, but this is not true for the virtual engine

References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c   |  3 +++
 drivers/gpu/drm/i915/i915_request.c   |  4 +++-
 drivers/gpu/drm/i915/i915_request.h   | 16 ++++++++++++++++
 drivers/gpu/drm/i915/i915_scheduler.c |  3 +--
 4 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index a6ac37dece0a..685659f079a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 
 			list_move(&rq->sched.link, pl);
+			set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
 			active = rq;
 		} else {
 			struct intel_engine_cs *owner = rq->context->engine;
@@ -2473,6 +2475,7 @@ static void execlists_submit_request(struct i915_request *request)
 	spin_lock_irqsave(&engine->active.lock, flags);
 
 	queue_request(engine, &request->sched, rq_prio(request));
+	set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
 
 	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 	GEM_BUG_ON(list_empty(&request->sched.link));
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index be185886e4fc..9ed0d3bc7249 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
 xfer:	/* We may be recursing from the signal callback of another i915 fence */
 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
 
-	if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
+	if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
 		list_move_tail(&request->sched.link, &engine->active.requests);
+		clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+	}
 
 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 031433691a06..f3e50ec989b8 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -70,6 +70,17 @@ enum {
 	 */
 	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
 
+	/*
+	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
+	 *
+	 * Using the scheduler, when a request is ready for execution it is put
+	 * into the priority queue. We want to track its membership within that
+	 * queue so that we can easily check before rescheduling.
+	 *
+	 * See i915_request_in_priority_queue()
+	 */
+	I915_FENCE_FLAG_PQUEUE,
+
 	/*
 	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
 	 *
@@ -361,6 +372,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 }
 
+static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
+{
+	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
 /**
  * Returns true if seq1 is later than seq2.
  */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index bf87c70bfdd9..4f6e4d6c590a 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -338,8 +338,7 @@ static void __i915_schedule(struct i915_sched_node *node,
 			continue;
 		}
 
-		if (!intel_engine_is_virtual(engine) &&
-		    !i915_request_is_active(node_to_request(node))) {
+		if (i915_request_in_priority_queue(node_to_request(node))) {
 			if (!cache.priolist)
 				cache.priolist =
 					i915_sched_lookup_priolist(engine,
-- 
2.25.0.rc2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH 3/4] drm/i915/gt: Allow temporary suspension of inflight requests
  2020-01-13 10:44 [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
  2020-01-13 10:44 ` [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine Chris Wilson
@ 2020-01-13 10:44 ` Chris Wilson
  2020-01-14 18:33   ` Tvrtko Ursulin
  2020-01-13 10:44 ` [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Offline error capture Chris Wilson
                   ` (5 subsequent siblings)
  7 siblings, 1 reply; 17+ messages in thread
From: Chris Wilson @ 2020-01-13 10:44 UTC (permalink / raw)
  To: intel-gfx

In order to support out-of-line error capture, we need to remove the
active request from HW and put it to one side while a worker compresses
and stores all the details associated with that request. (As that
compression may take an arbitrary user-controlled amount of time, we
want to let the engine continue running on other workloads while the
hanging request is dumped.) Not only do we need to remove the active
request, but we also have to remove its context and all requests that
were dependent on it (both in flight, queued and future submission).

Finally once the capture is complete, we need to be able to resubmit the
request and its dependents and allow them to execute.

References: https://gitlab.freedesktop.org/drm/intel/issues/738
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_cs.c    |   1 +
 drivers/gpu/drm/i915/gt/intel_engine_types.h |   1 +
 drivers/gpu/drm/i915/gt/intel_lrc.c          | 109 ++++++++++++++++++-
 drivers/gpu/drm/i915/gt/selftest_lrc.c       | 103 ++++++++++++++++++
 drivers/gpu/drm/i915/i915_request.h          |  22 ++++
 5 files changed, 231 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f451ef376548..c296aaf381e7 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -671,6 +671,7 @@ void
 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
 {
 	INIT_LIST_HEAD(&engine->active.requests);
+	INIT_LIST_HEAD(&engine->active.hold);
 
 	spin_lock_init(&engine->active.lock);
 	lockdep_set_subclass(&engine->active.lock, subclass);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 00287515e7af..6f4f9912a364 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -295,6 +295,7 @@ struct intel_engine_cs {
 	struct {
 		spinlock_t lock;
 		struct list_head requests;
+		struct list_head hold;
 	} active;
 
 	struct llist_head barrier_tasks;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 685659f079a2..e0347e05e577 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2355,6 +2355,77 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
 	}
 }
 
+static void __execlists_hold(struct i915_request *rq)
+{
+	struct i915_dependency *p;
+
+	if (!i915_request_completed(rq)) {
+		RQ_TRACE(rq, "on hold\n");
+		if (i915_request_is_active(rq))
+			__i915_request_unsubmit(rq);
+		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+		list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+		i915_request_set_hold(rq);
+	}
+
+	list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+		struct i915_request *w =
+			container_of(p->waiter, typeof(*w), sched);
+
+		/* Leave semaphores spinning on the other engines */
+		if (w->engine != rq->engine)
+			continue;
+
+		__execlists_hold(w);
+	}
+}
+
+__maybe_unused
+static void execlists_hold(struct intel_engine_cs *engine,
+			   struct i915_request *rq)
+{
+	spin_lock_irq(&engine->active.lock);
+	__execlists_hold(rq);
+	spin_unlock_irq(&engine->active.lock);
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+	struct i915_dependency *p;
+
+	if (!i915_request_has_hold(rq))
+		return;
+
+	i915_request_clear_hold(rq);
+	list_move_tail(&rq->sched.link,
+		       i915_sched_lookup_priolist(rq->engine, rq_prio(rq)));
+	set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+	RQ_TRACE(rq, "hold release\n");
+
+	list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+		struct i915_request *w =
+			container_of(p->waiter, typeof(*w), sched);
+
+		if (w->engine != rq->engine)
+			continue;
+
+		__execlists_unhold(w);
+	}
+}
+
+__maybe_unused
+static void execlists_unhold(struct intel_engine_cs *engine,
+			     struct i915_request *rq)
+{
+	spin_lock_irq(&engine->active.lock);
+	__execlists_unhold(rq);
+	if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+		engine->execlists.queue_priority_hint = rq_prio(rq);
+		tasklet_hi_schedule(&engine->execlists.tasklet);
+	}
+	spin_unlock_irq(&engine->active.lock);
+}
+
 static noinline void preempt_reset(struct intel_engine_cs *engine)
 {
 	const unsigned int bit = I915_RESET_ENGINE + engine->id;
@@ -2466,6 +2537,29 @@ static void submit_queue(struct intel_engine_cs *engine,
 	__submit_queue_imm(engine);
 }
 
+static bool hold_request(const struct i915_request *rq)
+{
+	struct i915_dependency *p;
+
+	list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
+		const struct i915_request *s =
+			container_of(p->signaler, typeof(*s), sched);
+
+		if (s->engine != rq->engine)
+			continue;
+
+		return i915_request_has_hold(s);
+	}
+
+	return false;
+}
+
+static bool on_hold(const struct intel_engine_cs *engine,
+		    const struct i915_request *rq)
+{
+	return !list_empty(&engine->active.hold) && hold_request(rq);
+}
+
 static void execlists_submit_request(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
@@ -2474,13 +2568,18 @@ static void execlists_submit_request(struct i915_request *request)
 	/* Will be called from irq-context when using foreign fences. */
 	spin_lock_irqsave(&engine->active.lock, flags);
 
-	queue_request(engine, &request->sched, rq_prio(request));
-	set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+	if (unlikely(on_hold(engine, request))) {
+		i915_request_set_hold(request);
+		list_add_tail(&request->sched.link, &engine->active.hold);
+	} else {
+		queue_request(engine, &request->sched, rq_prio(request));
+		set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
 
-	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-	GEM_BUG_ON(list_empty(&request->sched.link));
+		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+		GEM_BUG_ON(list_empty(&request->sched.link));
 
-	submit_queue(engine, request);
+		submit_queue(engine, request);
+	}
 
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 15cda024e3e4..78501d79c0ea 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -285,6 +285,108 @@ static int live_unlite_preempt(void *arg)
 	return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
 }
 
+static int live_hold_reset(void *arg)
+{
+	struct intel_gt *gt = arg;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	struct igt_spinner spin;
+	int err = 0;
+
+	/*
+	 * In order to support offline error capture for fast preempt reset,
+	 * we need to decouple the guilty request and ensure that it and its
+	 * descendents are not executed while the capture is in progress.
+	 */
+
+	if (!intel_has_reset_engine(gt))
+		return 0;
+
+	if (igt_spinner_init(&spin, gt))
+		return -ENOMEM;
+
+	for_each_engine(engine, gt, id) {
+		struct intel_context *ce;
+		unsigned long heartbeat;
+		struct i915_request *rq;
+
+		ce = intel_context_create(engine);
+		if (IS_ERR(ce)) {
+			err = PTR_ERR(ce);
+			break;
+		}
+
+		engine_heartbeat_disable(engine, &heartbeat);
+
+		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto out;
+		}
+		i915_request_add(rq);
+
+		if (!igt_wait_for_spinner(&spin, rq)) {
+			intel_gt_set_wedged(gt);
+			err = -ETIME;
+			goto out;
+		}
+
+		/* We have our request executing, now remove it and reset */
+
+		if (test_and_set_bit(I915_RESET_ENGINE + id,
+				     &gt->reset.flags)) {
+			spin_unlock_irq(&engine->active.lock);
+			intel_gt_set_wedged(gt);
+			err = -EBUSY;
+			goto out;
+		}
+		tasklet_disable(&engine->execlists.tasklet);
+
+		engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+		execlists_hold(engine, rq);
+		GEM_BUG_ON(!i915_request_has_hold(rq));
+
+		intel_engine_reset(engine, NULL);
+		GEM_BUG_ON(rq->fence.error != -EIO);
+
+		tasklet_enable(&engine->execlists.tasklet);
+		clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+				      &gt->reset.flags);
+
+		/* Check that we do not resubmit the held request */
+		i915_request_get(rq);
+		if (!i915_request_wait(rq, 0, HZ / 5)) {
+			pr_err("%s: on hold request completed!\n",
+			       engine->name);
+			i915_request_put(rq);
+			err = -EIO;
+			goto out;
+		}
+		GEM_BUG_ON(!i915_request_has_hold(rq));
+
+		/* But is resubmitted on release */
+		execlists_unhold(engine, rq);
+		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+			pr_err("%s: held request did not complete!\n",
+			       engine->name);
+			intel_gt_set_wedged(gt);
+			err = -ETIME;
+		}
+		i915_request_put(rq);
+
+out:
+		engine_heartbeat_enable(engine, heartbeat);
+		intel_context_put(ce);
+		if (err)
+			break;
+	}
+
+	igt_spinner_fini(&spin);
+	return err;
+}
+
 static int
 emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
 {
@@ -3315,6 +3417,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_sanitycheck),
 		SUBTEST(live_unlite_switch),
 		SUBTEST(live_unlite_preempt),
+		SUBTEST(live_hold_reset),
 		SUBTEST(live_timeslice_preempt),
 		SUBTEST(live_timeslice_queue),
 		SUBTEST(live_busywait_preempt),
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index f3e50ec989b8..2da8cfb3f3a1 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -89,6 +89,13 @@ enum {
 	 */
 	I915_FENCE_FLAG_SIGNAL,
 
+	/*
+	 * I915_FENCE_FLAG_HOLD - this request is currently on hold
+	 *
+	 * This request has been suspended, pending an ongoing investigation.
+	 */
+	I915_FENCE_FLAG_HOLD,
+
 	/*
 	 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
 	 *
@@ -499,6 +506,21 @@ static inline bool i915_request_has_sentinel(const struct i915_request *rq)
 	return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
 }
 
+static inline bool i915_request_has_hold(const struct i915_request *rq)
+{
+	return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
+}
+
+static inline void i915_request_set_hold(struct i915_request *rq)
+{
+	set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+}
+
+static inline void i915_request_clear_hold(struct i915_request *rq)
+{
+	clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+}
+
 static inline struct intel_timeline *
 i915_request_timeline(struct i915_request *rq)
 {
-- 
2.25.0.rc2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Offline error capture
  2020-01-13 10:44 [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
  2020-01-13 10:44 ` [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine Chris Wilson
  2020-01-13 10:44 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Allow temporary suspension of inflight requests Chris Wilson
@ 2020-01-13 10:44 ` Chris Wilson
  2020-01-13 11:32 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch Patchwork
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 17+ messages in thread
From: Chris Wilson @ 2020-01-13 10:44 UTC (permalink / raw)
  To: intel-gfx

Currently, we skip error capture upon forced preemption. We apply forced
preemption when there is a higher priority request that should be
running but is being blocked, and we skip inline error capture so that
the preemption request is not further delayed by a user controlled
capture -- extending the denial of service.

However, preemption reset is also used for heartbeats and regular GPU
hangs. By skipping the error capture, we remove the ability to debug GPU
hangs.

In order to capture the error without delaying the preemption request
further, we can do an out-of-line capture by removing the guilty request
from the execution queue and scheduling a work to dump that request.
When removing a request, we need to remove the entire context and all
descendants from the execution queue, so that they do not jump past.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/738
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 99 ++++++++++++++++++++++++++++-
 1 file changed, 97 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index e0347e05e577..ad474b4bc2f3 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2380,7 +2380,6 @@ static void __execlists_hold(struct i915_request *rq)
 	}
 }
 
-__maybe_unused
 static void execlists_hold(struct intel_engine_cs *engine,
 			   struct i915_request *rq)
 {
@@ -2413,7 +2412,12 @@ static void __execlists_unhold(struct i915_request *rq)
 	}
 }
 
-__maybe_unused
+struct execlists_capture {
+	struct work_struct work;
+	struct i915_request *rq;
+	struct i915_gpu_coredump *error;
+};
+
 static void execlists_unhold(struct intel_engine_cs *engine,
 			     struct i915_request *rq)
 {
@@ -2426,6 +2430,96 @@ static void execlists_unhold(struct intel_engine_cs *engine,
 	spin_unlock_irq(&engine->active.lock);
 }
 
+static void execlists_capture_work(struct work_struct *work)
+{
+	struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+	const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+	struct intel_engine_cs *engine = cap->rq->engine;
+	struct intel_gt_coredump *gt = cap->error->gt;
+	struct intel_engine_capture_vma *vma;
+
+	vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+	if (vma) {
+		struct i915_vma_compress *comp = i915_vma_capture_prepare(gt);
+
+		intel_engine_coredump_add_vma(gt->engine, vma, comp);
+		i915_vma_capture_finish(gt, comp);
+	}
+
+	gt->simulated = gt->engine->simulated;
+	cap->error->simulated = gt->simulated;
+
+	i915_error_state_store(cap->error);
+	i915_gpu_coredump_put(cap->error);
+
+	execlists_unhold(engine, cap->rq);
+
+	kfree(cap);
+}
+
+static struct i915_gpu_coredump *capture_regs(struct intel_engine_cs *engine)
+{
+	const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+	struct i915_gpu_coredump *e;
+
+	e = i915_gpu_coredump_alloc(engine->i915, gfp);
+	if (!e)
+		return NULL;
+
+	e->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+	if (!e->gt)
+		goto err;
+
+	e->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+	if (!e->gt->engine)
+		goto err_gt;
+
+	return e;
+
+err_gt:
+	kfree(e->gt);
+err:
+	kfree(e);
+	return NULL;
+}
+
+static void execlists_capture(struct intel_engine_cs *engine)
+{
+	struct execlists_capture *cap;
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+		return;
+
+	cap = kmalloc(sizeof(*cap), GFP_ATOMIC);
+	if (!cap)
+		return;
+
+	cap->rq = execlists_active(&engine->execlists);
+	GEM_BUG_ON(!cap->rq);
+
+	/*
+	 * We need to _quickly_ capture the engine state before we reset.
+	 * We are inside an atomic section (softirq) here and we are delaying
+	 * the forced preemption event.
+	 */
+	cap->error = capture_regs(engine);
+	if (!cap->error) {
+		kfree(cap);
+		return;
+	}
+
+	/*
+	 * Remove the request from the execlists queue, and take ownership
+	 * of the request. We pass it to our worker who will _slowly_ compress
+	 * all the pages the _user_ requested for debugging their batch, after
+	 * which we return it to the queue for signaling.
+	 */
+	execlists_hold(engine, cap->rq);
+
+	INIT_WORK(&cap->work, execlists_capture_work);
+	schedule_work(&cap->work);
+}
+
 static noinline void preempt_reset(struct intel_engine_cs *engine)
 {
 	const unsigned int bit = I915_RESET_ENGINE + engine->id;
@@ -2443,6 +2537,7 @@ static noinline void preempt_reset(struct intel_engine_cs *engine)
 	ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
 		     READ_ONCE(engine->props.preempt_timeout_ms),
 		     jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+	execlists_capture(engine);
 	intel_engine_reset(engine, "preemption time out");
 
 	tasklet_enable(&engine->execlists.tasklet);
-- 
2.25.0.rc2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch
  2020-01-13 10:44 [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
                   ` (2 preceding siblings ...)
  2020-01-13 10:44 ` [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Offline error capture Chris Wilson
@ 2020-01-13 11:32 ` Patchwork
  2020-01-13 12:59 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 17+ messages in thread
From: Patchwork @ 2020-01-13 11:32 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch
URL   : https://patchwork.freedesktop.org/series/71951/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
83ec2e3b2a82 drm/i915/gt: Always reset the timeslice after a context switch
6279ccd05671 drm/i915: Use common priotree lists for virtual engine
-:12: WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#12: 
References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")

-:12: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")'
#12: 
References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")

total: 1 errors, 1 warnings, 0 checks, 63 lines checked
eeed45e73d81 drm/i915/gt: Allow temporary suspension of inflight requests
27f79e3f9f38 drm/i915/execlists: Offline error capture

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BAT: failure for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch
  2020-01-13 10:44 [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
                   ` (3 preceding siblings ...)
  2020-01-13 11:32 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch Patchwork
@ 2020-01-13 12:59 ` Patchwork
  2020-01-13 13:00 ` [Intel-gfx] ✗ Fi.CI.BUILD: warning " Patchwork
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 17+ messages in thread
From: Patchwork @ 2020-01-13 12:59 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch
URL   : https://patchwork.freedesktop.org/series/71951/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7729 -> Patchwork_16068
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_16068 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_16068, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_16068:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_pm_rpm@module-reload:
    - fi-icl-u3:          [PASS][1] -> [FAIL][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-icl-u3/igt@i915_pm_rpm@module-reload.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-icl-u3/igt@i915_pm_rpm@module-reload.html

  
Known issues
------------

  Here are the changes found in Patchwork_16068 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_module_load@reload-with-fault-injection:
    - fi-skl-lmem:        [PASS][3] -> [INCOMPLETE][4] ([i915#671])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-skl-lmem/igt@i915_module_load@reload-with-fault-injection.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-skl-lmem/igt@i915_module_load@reload-with-fault-injection.html

  * igt@i915_selftest@live_execlists:
    - fi-icl-y:           [PASS][5] -> [DMESG-FAIL][6] ([fdo#108569])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-icl-y/igt@i915_selftest@live_execlists.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-icl-y/igt@i915_selftest@live_execlists.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][7] -> [FAIL][8] ([fdo#111096] / [i915#323])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  * igt@kms_cursor_legacy@basic-flip-after-cursor-atomic:
    - fi-icl-u2:          [PASS][9] -> [DMESG-WARN][10] ([i915#263])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-icl-u2/igt@kms_cursor_legacy@basic-flip-after-cursor-atomic.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-icl-u2/igt@kms_cursor_legacy@basic-flip-after-cursor-atomic.html

  
#### Possible fixes ####

  * igt@gem_exec_fence@basic-busy-default:
    - {fi-ehl-1}:         [INCOMPLETE][11] ([i915#937]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-ehl-1/igt@gem_exec_fence@basic-busy-default.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-ehl-1/igt@gem_exec_fence@basic-busy-default.html

  * igt@gem_exec_gttfill@basic:
    - fi-bsw-n3050:       [TIMEOUT][13] ([fdo#112271]) -> [PASS][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-bsw-n3050/igt@gem_exec_gttfill@basic.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-bsw-n3050/igt@gem_exec_gttfill@basic.html

  * igt@i915_module_load@reload:
    - fi-icl-u2:          [DMESG-WARN][15] ([i915#289]) -> [PASS][16] +3 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-icl-u2/igt@i915_module_load@reload.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-icl-u2/igt@i915_module_load@reload.html

  * igt@i915_module_load@reload-with-fault-injection:
    - fi-cfl-8700k:       [DMESG-WARN][17] ([i915#889]) -> [PASS][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-cfl-8700k/igt@i915_module_load@reload-with-fault-injection.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-cfl-8700k/igt@i915_module_load@reload-with-fault-injection.html

  * igt@i915_pm_rpm@module-reload:
    - fi-cfl-8700k:       [INCOMPLETE][19] ([i915#148]) -> [PASS][20]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-cfl-8700k/igt@i915_pm_rpm@module-reload.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-cfl-8700k/igt@i915_pm_rpm@module-reload.html

  * igt@i915_selftest@live_blt:
    - fi-ivb-3770:        [DMESG-FAIL][21] ([i915#725]) -> [PASS][22]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-ivb-3770/igt@i915_selftest@live_blt.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-ivb-3770/igt@i915_selftest@live_blt.html
    - fi-hsw-4770:        [DMESG-FAIL][23] ([i915#553] / [i915#725]) -> [PASS][24]
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-hsw-4770/igt@i915_selftest@live_blt.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-hsw-4770/igt@i915_selftest@live_blt.html

  * igt@i915_selftest@live_coherency:
    - fi-cfl-guc:         [DMESG-FAIL][25] ([i915#889]) -> [PASS][26] +7 similar issues
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-cfl-guc/igt@i915_selftest@live_coherency.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-cfl-guc/igt@i915_selftest@live_coherency.html

  * igt@i915_selftest@live_gt_timelines:
    - fi-cfl-guc:         [DMESG-WARN][27] ([i915#889]) -> [PASS][28] +23 similar issues
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-cfl-guc/igt@i915_selftest@live_gt_timelines.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-cfl-guc/igt@i915_selftest@live_gt_timelines.html

  
#### Warnings ####

  * igt@kms_chamelium@common-hpd-after-suspend:
    - fi-icl-u2:          [DMESG-WARN][29] ([IGT#4] / [i915#263]) -> [FAIL][30] ([i915#323])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7729/fi-icl-u2/igt@kms_chamelium@common-hpd-after-suspend.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/fi-icl-u2/igt@kms_chamelium@common-hpd-after-suspend.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [IGT#4]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/4
  [fdo#108569]: https://bugs.freedesktop.org/show_bug.cgi?id=108569
  [fdo#111096]: https://bugs.freedesktop.org/show_bug.cgi?id=111096
  [fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271
  [i915#148]: https://gitlab.freedesktop.org/drm/intel/issues/148
  [i915#263]: https://gitlab.freedesktop.org/drm/intel/issues/263
  [i915#289]: https://gitlab.freedesktop.org/drm/intel/issues/289
  [i915#323]: https://gitlab.freedesktop.org/drm/intel/issues/323
  [i915#553]: https://gitlab.freedesktop.org/drm/intel/issues/553
  [i915#671]: https://gitlab.freedesktop.org/drm/intel/issues/671
  [i915#725]: https://gitlab.freedesktop.org/drm/intel/issues/725
  [i915#889]: https://gitlab.freedesktop.org/drm/intel/issues/889
  [i915#937]: https://gitlab.freedesktop.org/drm/intel/issues/937


Participating hosts (42 -> 47)
------------------------------

  Additional (9): fi-hsw-4770r fi-bdw-5557u fi-skl-6770hq fi-glk-dsi fi-snb-2520m fi-whl-u fi-bsw-kefka fi-kbl-7560u fi-skl-6600u 
  Missing    (4): fi-ctg-p8600 fi-byt-squawks fi-bsw-cyan fi-hsw-4200u 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7729 -> Patchwork_16068

  CI-20190529: 20190529
  CI_DRM_7729: 99867b455970a3243157609efd76f32b19f062ea @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5364: b7cb6ffdb65cbd233f5ddee2f2dabf97b34fa640 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16068: 27f79e3f9f3883e033b0ef83a62ff2e9960811bf @ git://anongit.freedesktop.org/gfx-ci/linux


== Kernel 32bit build ==

Warning: Kernel 32bit buildtest failed:
https://intel-gfx-ci.01.org/Patchwork_16068/build_32bit.log

  CALL    scripts/checksyscalls.sh
  CALL    scripts/atomic/check-atomics.sh
  CHK     include/generated/compile.h
Kernel: arch/x86/boot/bzImage is ready  (#1)
  Building modules, stage 2.
  MODPOST 122 modules
ERROR: "__udivdi3" [drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined!
scripts/Makefile.modpost:93: recipe for target '__modpost' failed
make[1]: *** [__modpost] Error 1
Makefile:1282: recipe for target 'modules' failed
make: *** [modules] Error 2


== Linux commits ==

27f79e3f9f38 drm/i915/execlists: Offline error capture
eeed45e73d81 drm/i915/gt: Allow temporary suspension of inflight requests
6279ccd05671 drm/i915: Use common priotree lists for virtual engine
83ec2e3b2a82 drm/i915/gt: Always reset the timeslice after a context switch

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BUILD: warning for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch
  2020-01-13 10:44 [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
                   ` (4 preceding siblings ...)
  2020-01-13 12:59 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
@ 2020-01-13 13:00 ` Patchwork
  2020-01-13 17:26 ` [Intel-gfx] [PATCH 1/4] " Tvrtko Ursulin
  2020-01-14 12:51 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch (rev2) Patchwork
  7 siblings, 0 replies; 17+ messages in thread
From: Patchwork @ 2020-01-13 13:00 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch
URL   : https://patchwork.freedesktop.org/series/71951/
State : warning

== Summary ==

CALL    scripts/checksyscalls.sh
  CALL    scripts/atomic/check-atomics.sh
  CHK     include/generated/compile.h
Kernel: arch/x86/boot/bzImage is ready  (#1)
  Building modules, stage 2.
  MODPOST 122 modules
ERROR: "__udivdi3" [drivers/gpu/drm/amd/amdgpu/amdgpu.ko] undefined!
scripts/Makefile.modpost:93: recipe for target '__modpost' failed
make[1]: *** [__modpost] Error 1
Makefile:1282: recipe for target 'modules' failed
make: *** [modules] Error 2

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16068/build_32bit.log
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch
  2020-01-13 10:44 [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
                   ` (5 preceding siblings ...)
  2020-01-13 13:00 ` [Intel-gfx] ✗ Fi.CI.BUILD: warning " Patchwork
@ 2020-01-13 17:26 ` Tvrtko Ursulin
  2020-01-14 12:51 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch (rev2) Patchwork
  7 siblings, 0 replies; 17+ messages in thread
From: Tvrtko Ursulin @ 2020-01-13 17:26 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 13/01/2020 10:44, Chris Wilson wrote:
> Currently, we reset the timer after a pre-eemption event. This has the
> side-effect that the timeslice runs into the second context after the
> first is completed. To be more fair, we want to reset the clock after
> promotion as well.

You mean after completion? If so then it makes sense to me.
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_lrc.c | 4 +++-
>   1 file changed, 3 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 9af1b2b493f4..a6ac37dece0a 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -2285,7 +2285,6 @@ static void process_csb(struct intel_engine_cs *engine)
>   
>   			/* Point active to the new ELSP; prevent overwriting */
>   			WRITE_ONCE(execlists->active, execlists->pending);
> -			set_timeslice(engine);
>   
>   			if (!inject_preempt_hang(execlists))
>   				ring_set_paused(engine, 0);
> @@ -2325,6 +2324,9 @@ static void process_csb(struct intel_engine_cs *engine)
>   		}
>   	} while (head != tail);
>   
> +	if (execlists_active(execlists))
> +		set_timeslice(engine);
> +
>   	execlists->csb_head = head;
>   
>   	/*
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine
  2020-01-13 10:44 ` [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine Chris Wilson
@ 2020-01-14 11:13   ` Tvrtko Ursulin
  2020-01-14 11:20     ` Chris Wilson
                       ` (2 more replies)
  0 siblings, 3 replies; 17+ messages in thread
From: Tvrtko Ursulin @ 2020-01-14 11:13 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx



On 13/01/2020 10:44, Chris Wilson wrote:
> Since commit 422d7df4f090 ("drm/i915: Replace engine->timeline with a
> plain list"), we used the default embedded priotree slot for the virtual
> engine request queue, which means we can also use the same solitary slot
> with the scheduler. However, the priolist is expected to be guarded by
> the engine->active.lock, but this is not true for the virtual engine
> 
> References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_lrc.c   |  3 +++
>   drivers/gpu/drm/i915/i915_request.c   |  4 +++-
>   drivers/gpu/drm/i915/i915_request.h   | 16 ++++++++++++++++
>   drivers/gpu/drm/i915/i915_scheduler.c |  3 +--
>   4 files changed, 23 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index a6ac37dece0a..685659f079a2 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
>   			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
>   
>   			list_move(&rq->sched.link, pl);
> +			set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> +
>   			active = rq;
>   		} else {
>   			struct intel_engine_cs *owner = rq->context->engine;
> @@ -2473,6 +2475,7 @@ static void execlists_submit_request(struct i915_request *request)
>   	spin_lock_irqsave(&engine->active.lock, flags);
>   
>   	queue_request(engine, &request->sched, rq_prio(request));
> +	set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);

Move into queue_request so it is closer to priolist management, just like at other call sites?

Also, these are all under the engine active lock so non-atomic set/clear could be used, no?

>   
>   	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
>   	GEM_BUG_ON(list_empty(&request->sched.link));
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index be185886e4fc..9ed0d3bc7249 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
>   xfer:	/* We may be recursing from the signal callback of another i915 fence */
>   	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
>   
> -	if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
> +	if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
>   		list_move_tail(&request->sched.link, &engine->active.requests);
> +		clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
> +	}
>   
>   	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
>   	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 031433691a06..f3e50ec989b8 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -70,6 +70,17 @@ enum {
>   	 */
>   	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
>   
> +	/*
> +	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
> +	 *
> +	 * Using the scheduler, when a request is ready for execution it is put
> +	 * into the priority queue. We want to track its membership within that
> +	 * queue so that we can easily check before rescheduling.
> +	 *
> +	 * See i915_request_in_priority_queue()
> +	 */
> +	I915_FENCE_FLAG_PQUEUE,
> +
>   	/*
>   	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
>   	 *
> @@ -361,6 +372,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
>   	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
>   }
>   
> +static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
> +{
> +	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> +}
> +
>   /**
>    * Returns true if seq1 is later than seq2.
>    */
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index bf87c70bfdd9..4f6e4d6c590a 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -338,8 +338,7 @@ static void __i915_schedule(struct i915_sched_node *node,
>   			continue;
>   		}
>   
> -		if (!intel_engine_is_virtual(engine) &&
> -		    !i915_request_is_active(node_to_request(node))) {
> +		if (i915_request_in_priority_queue(node_to_request(node))) {

Not shown in this diff before this if block we have:

	if (list_empty(&node->link)) {
		/*
		 * If the request is not in the priolist queue because
		 * it is not yet runnable, then it doesn't contribute
		 * to our preemption decisions. On the other hand,
		 * if the request is on the HW, it too is not in the
		 * queue; but in that case we may still need to reorder
		 * the inflight requests.
		 */
		continue;
	}

What is the difference between list_empty(&node->link) and !i915_request_in_priority_queue?

Regards,

Tvrtko

>   			if (!cache.priolist)
>   				cache.priolist =
>   					i915_sched_lookup_priolist(engine,
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine
  2020-01-14 11:13   ` Tvrtko Ursulin
@ 2020-01-14 11:20     ` Chris Wilson
  2020-01-14 13:48       ` Tvrtko Ursulin
  2020-01-14 11:38     ` Chris Wilson
  2020-01-14 11:55     ` [Intel-gfx] [PATCH v2] " Chris Wilson
  2 siblings, 1 reply; 17+ messages in thread
From: Chris Wilson @ 2020-01-14 11:20 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-01-14 11:13:06)
> 
> 
> On 13/01/2020 10:44, Chris Wilson wrote:
> > Since commit 422d7df4f090 ("drm/i915: Replace engine->timeline with a
> > plain list"), we used the default embedded priotree slot for the virtual
> > engine request queue, which means we can also use the same solitary slot
> > with the scheduler. However, the priolist is expected to be guarded by
> > the engine->active.lock, but this is not true for the virtual engine
> > 
> > References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gt/intel_lrc.c   |  3 +++
> >   drivers/gpu/drm/i915/i915_request.c   |  4 +++-
> >   drivers/gpu/drm/i915/i915_request.h   | 16 ++++++++++++++++
> >   drivers/gpu/drm/i915/i915_scheduler.c |  3 +--
> >   4 files changed, 23 insertions(+), 3 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > index a6ac37dece0a..685659f079a2 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > @@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
> >                       GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
> >   
> >                       list_move(&rq->sched.link, pl);
> > +                     set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> > +
> >                       active = rq;
> >               } else {
> >                       struct intel_engine_cs *owner = rq->context->engine;
> > @@ -2473,6 +2475,7 @@ static void execlists_submit_request(struct i915_request *request)
> >       spin_lock_irqsave(&engine->active.lock, flags);
> >   
> >       queue_request(engine, &request->sched, rq_prio(request));
> > +     set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
> 
> Move into queue_request so it is closer to priolist management, just like at other call sites?
> 
> Also, these are all under the engine active lock so non-atomic set/clear could be used, no?

It's not the bit that is important, but if there may be any other
concurrent access to the dword.

Thread A:			Thread B:
__set_bit(0, &rq->flags)	__set_bit(31, &rq->flags)

*does* cause an issue, speaking from sad experience. So if in doubt, and
here there's always doubt with preempt-to-busy and background signaling,
go atomic.

> 
> >   
> >       GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
> >       GEM_BUG_ON(list_empty(&request->sched.link));
> > diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> > index be185886e4fc..9ed0d3bc7249 100644
> > --- a/drivers/gpu/drm/i915/i915_request.c
> > +++ b/drivers/gpu/drm/i915/i915_request.c
> > @@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
> >   xfer:       /* We may be recursing from the signal callback of another i915 fence */
> >       spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
> >   
> > -     if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
> > +     if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
> >               list_move_tail(&request->sched.link, &engine->active.requests);
> > +             clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
> > +     }
> >   
> >       if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
> >           !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
> > diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> > index 031433691a06..f3e50ec989b8 100644
> > --- a/drivers/gpu/drm/i915/i915_request.h
> > +++ b/drivers/gpu/drm/i915/i915_request.h
> > @@ -70,6 +70,17 @@ enum {
> >        */
> >       I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
> >   
> > +     /*
> > +      * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
> > +      *
> > +      * Using the scheduler, when a request is ready for execution it is put
> > +      * into the priority queue. We want to track its membership within that
> > +      * queue so that we can easily check before rescheduling.
> > +      *
> > +      * See i915_request_in_priority_queue()
> > +      */
> > +     I915_FENCE_FLAG_PQUEUE,
> > +
> >       /*
> >        * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
> >        *
> > @@ -361,6 +372,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
> >       return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
> >   }
> >   
> > +static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
> > +{
> > +     return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> > +}
> > +
> >   /**
> >    * Returns true if seq1 is later than seq2.
> >    */
> > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> > index bf87c70bfdd9..4f6e4d6c590a 100644
> > --- a/drivers/gpu/drm/i915/i915_scheduler.c
> > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> > @@ -338,8 +338,7 @@ static void __i915_schedule(struct i915_sched_node *node,
> >                       continue;
> >               }
> >   
> > -             if (!intel_engine_is_virtual(engine) &&
> > -                 !i915_request_is_active(node_to_request(node))) {
> > +             if (i915_request_in_priority_queue(node_to_request(node))) {
> 
> Not shown in this diff before this if block we have:
> 
>         if (list_empty(&node->link)) {
>                 /*
>                  * If the request is not in the priolist queue because
>                  * it is not yet runnable, then it doesn't contribute
>                  * to our preemption decisions. On the other hand,
>                  * if the request is on the HW, it too is not in the
>                  * queue; but in that case we may still need to reorder
>                  * the inflight requests.

This second sentence is obsolete, we now use node->link for active.

>                  */
>                 continue;
>         }
> 
> What is the difference between list_empty(&node->link) and !i915_request_in_priority_queue?

list_empty() -> prior to being ready, we will put into the plist upon
submit_request()

Once ready, we only want to fiddle with its place in the priority lists,
if it is in the plist.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine
  2020-01-14 11:13   ` Tvrtko Ursulin
  2020-01-14 11:20     ` Chris Wilson
@ 2020-01-14 11:38     ` Chris Wilson
  2020-01-14 11:55     ` [Intel-gfx] [PATCH v2] " Chris Wilson
  2 siblings, 0 replies; 17+ messages in thread
From: Chris Wilson @ 2020-01-14 11:38 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-01-14 11:13:06)
> 
> 
> On 13/01/2020 10:44, Chris Wilson wrote:
> > @@ -2473,6 +2475,7 @@ static void execlists_submit_request(struct i915_request *request)
> >       spin_lock_irqsave(&engine->active.lock, flags);
> >   
> >       queue_request(engine, &request->sched, rq_prio(request));
> > +     set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
> 
> Move into queue_request so it is closer to priolist management, just like at other call sites?

Sure. When going through them, it felt natural to treat this as the list
operation. (That's what I was thinking :)
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [Intel-gfx] [PATCH v2] drm/i915: Use common priotree lists for virtual engine
  2020-01-14 11:13   ` Tvrtko Ursulin
  2020-01-14 11:20     ` Chris Wilson
  2020-01-14 11:38     ` Chris Wilson
@ 2020-01-14 11:55     ` Chris Wilson
  2 siblings, 0 replies; 17+ messages in thread
From: Chris Wilson @ 2020-01-14 11:55 UTC (permalink / raw)
  To: intel-gfx

Since commit 422d7df4f090 ("drm/i915: Replace engine->timeline with a
plain list"), we used the default embedded priotree slot for the virtual
engine request queue, which means we can also use the same solitary slot
with the scheduler. However, the priolist is expected to be guarded by
the engine->active.lock, but this is not true for the virtual engine

v2: Update i915_sched_node.link explanation for current usage where it
is a link on both the queue and on the runlists.

References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c   | 13 ++++++++-----
 drivers/gpu/drm/i915/i915_request.c   |  4 +++-
 drivers/gpu/drm/i915/i915_request.h   | 17 +++++++++++++++++
 drivers/gpu/drm/i915/i915_scheduler.c | 22 ++++++++++------------
 4 files changed, 38 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index e9e42de9a9c3..09cfe7a8ecd0 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 
 			list_move(&rq->sched.link, pl);
+			set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
 			active = rq;
 		} else {
 			struct intel_engine_cs *owner = rq->context->engine;
@@ -2452,11 +2454,12 @@ static void execlists_preempt(struct timer_list *timer)
 }
 
 static void queue_request(struct intel_engine_cs *engine,
-			  struct i915_sched_node *node,
-			  int prio)
+			  struct i915_request *rq)
 {
-	GEM_BUG_ON(!list_empty(&node->link));
-	list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
+	GEM_BUG_ON(!list_empty(&rq->sched.link));
+	list_add_tail(&rq->sched.link,
+		      i915_sched_lookup_priolist(engine, rq_prio(rq)));
+	set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 }
 
 static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -2492,7 +2495,7 @@ static void execlists_submit_request(struct i915_request *request)
 	/* Will be called from irq-context when using foreign fences. */
 	spin_lock_irqsave(&engine->active.lock, flags);
 
-	queue_request(engine, &request->sched, rq_prio(request));
+	queue_request(engine, request);
 
 	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 	GEM_BUG_ON(list_empty(&request->sched.link));
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f5696698d234..705ade87d166 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
 xfer:	/* We may be recursing from the signal callback of another i915 fence */
 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
 
-	if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
+	if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
 		list_move_tail(&request->sched.link, &engine->active.requests);
+		clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+	}
 
 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 031433691a06..a9f0d3c8d8b7 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -70,6 +70,18 @@ enum {
 	 */
 	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
 
+	/*
+	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
+	 *
+	 * Using the scheduler, when a request is ready for execution it is put
+	 * into the priority queue, and removed from the queue when transferred
+	 * to the HW runlists. We want to track its membership within that
+	 * queue so that we can easily check before rescheduling.
+	 *
+	 * See i915_request_in_priority_queue()
+	 */
+	I915_FENCE_FLAG_PQUEUE,
+
 	/*
 	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
 	 *
@@ -361,6 +373,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 }
 
+static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
+{
+	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
 /**
  * Returns true if seq1 is later than seq2.
  */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index bf87c70bfdd9..db3da81b7f05 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -326,20 +326,18 @@ static void __i915_schedule(struct i915_sched_node *node,
 
 		node->attr.priority = prio;
 
-		if (list_empty(&node->link)) {
-			/*
-			 * If the request is not in the priolist queue because
-			 * it is not yet runnable, then it doesn't contribute
-			 * to our preemption decisions. On the other hand,
-			 * if the request is on the HW, it too is not in the
-			 * queue; but in that case we may still need to reorder
-			 * the inflight requests.
-			 */
+		/*
+		 * Once the request is ready, it will be place into the
+		 * priority lists and then onto the HW runlist. Before the
+		 * request is ready, it does not contribute to our preemption
+		 * decisions and we can safely ignore it, as it will, and
+		 * any preemption required, be dealt with upon submission.
+		 * See engine->submit_request()
+		 */
+		if (list_empty(&node->link))
 			continue;
-		}
 
-		if (!intel_engine_is_virtual(engine) &&
-		    !i915_request_is_active(node_to_request(node))) {
+		if (i915_request_in_priority_queue(node_to_request(node))) {
 			if (!cache.priolist)
 				cache.priolist =
 					i915_sched_lookup_priolist(engine,
-- 
2.25.0.rc2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch (rev2)
  2020-01-13 10:44 [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
                   ` (6 preceding siblings ...)
  2020-01-13 17:26 ` [Intel-gfx] [PATCH 1/4] " Tvrtko Ursulin
@ 2020-01-14 12:51 ` Patchwork
  7 siblings, 0 replies; 17+ messages in thread
From: Patchwork @ 2020-01-14 12:51 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch (rev2)
URL   : https://patchwork.freedesktop.org/series/71951/
State : failure

== Summary ==

Applying: drm/i915/gt: Always reset the timeslice after a context switch
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/gt/intel_lrc.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/gt/intel_lrc.c
Applying: drm/i915: Use common priotree lists for virtual engine
Applying: drm/i915/gt: Allow temporary suspension of inflight requests
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/gt/intel_lrc.c
M	drivers/gpu/drm/i915/i915_request.h
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/i915_request.h
Auto-merging drivers/gpu/drm/i915/gt/intel_lrc.c
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/gt/intel_lrc.c
error: Failed to merge in the changes.
hint: Use 'git am --show-current-patch' to see the failed patch
Patch failed at 0003 drm/i915/gt: Allow temporary suspension of inflight requests
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine
  2020-01-14 11:20     ` Chris Wilson
@ 2020-01-14 13:48       ` Tvrtko Ursulin
  2020-01-14 13:53         ` Chris Wilson
  0 siblings, 1 reply; 17+ messages in thread
From: Tvrtko Ursulin @ 2020-01-14 13:48 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 14/01/2020 11:20, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2020-01-14 11:13:06)
>> On 13/01/2020 10:44, Chris Wilson wrote:
>>> Since commit 422d7df4f090 ("drm/i915: Replace engine->timeline with a
>>> plain list"), we used the default embedded priotree slot for the virtual
>>> engine request queue, which means we can also use the same solitary slot
>>> with the scheduler. However, the priolist is expected to be guarded by
>>> the engine->active.lock, but this is not true for the virtual engine
>>>
>>> References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
>>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>> ---
>>>    drivers/gpu/drm/i915/gt/intel_lrc.c   |  3 +++
>>>    drivers/gpu/drm/i915/i915_request.c   |  4 +++-
>>>    drivers/gpu/drm/i915/i915_request.h   | 16 ++++++++++++++++
>>>    drivers/gpu/drm/i915/i915_scheduler.c |  3 +--
>>>    4 files changed, 23 insertions(+), 3 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
>>> index a6ac37dece0a..685659f079a2 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
>>> @@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
>>>                        GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
>>>    
>>>                        list_move(&rq->sched.link, pl);
>>> +                     set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
>>> +
>>>                        active = rq;
>>>                } else {
>>>                        struct intel_engine_cs *owner = rq->context->engine;
>>> @@ -2473,6 +2475,7 @@ static void execlists_submit_request(struct i915_request *request)
>>>        spin_lock_irqsave(&engine->active.lock, flags);
>>>    
>>>        queue_request(engine, &request->sched, rq_prio(request));
>>> +     set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
>>
>> Move into queue_request so it is closer to priolist management, just like at other call sites?
>>
>> Also, these are all under the engine active lock so non-atomic set/clear could be used, no?
> 
> It's not the bit that is important, but if there may be any other
> concurrent access to the dword.
> 
> Thread A:			Thread B:
> __set_bit(0, &rq->flags)	__set_bit(31, &rq->flags)
> 
> *does* cause an issue, speaking from sad experience. So if in doubt, and
> here there's always doubt with preempt-to-busy and background signaling,
> go atomic.
> 
>>
>>>    
>>>        GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
>>>        GEM_BUG_ON(list_empty(&request->sched.link));
>>> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
>>> index be185886e4fc..9ed0d3bc7249 100644
>>> --- a/drivers/gpu/drm/i915/i915_request.c
>>> +++ b/drivers/gpu/drm/i915/i915_request.c
>>> @@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
>>>    xfer:       /* We may be recursing from the signal callback of another i915 fence */
>>>        spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
>>>    
>>> -     if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
>>> +     if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
>>>                list_move_tail(&request->sched.link, &engine->active.requests);
>>> +             clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
>>> +     }
>>>    
>>>        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
>>>            !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
>>> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
>>> index 031433691a06..f3e50ec989b8 100644
>>> --- a/drivers/gpu/drm/i915/i915_request.h
>>> +++ b/drivers/gpu/drm/i915/i915_request.h
>>> @@ -70,6 +70,17 @@ enum {
>>>         */
>>>        I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
>>>    
>>> +     /*
>>> +      * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
>>> +      *
>>> +      * Using the scheduler, when a request is ready for execution it is put
>>> +      * into the priority queue. We want to track its membership within that
>>> +      * queue so that we can easily check before rescheduling.
>>> +      *
>>> +      * See i915_request_in_priority_queue()
>>> +      */
>>> +     I915_FENCE_FLAG_PQUEUE,
>>> +
>>>        /*
>>>         * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
>>>         *
>>> @@ -361,6 +372,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
>>>        return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
>>>    }
>>>    
>>> +static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
>>> +{
>>> +     return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
>>> +}
>>> +
>>>    /**
>>>     * Returns true if seq1 is later than seq2.
>>>     */
>>> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
>>> index bf87c70bfdd9..4f6e4d6c590a 100644
>>> --- a/drivers/gpu/drm/i915/i915_scheduler.c
>>> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
>>> @@ -338,8 +338,7 @@ static void __i915_schedule(struct i915_sched_node *node,
>>>                        continue;
>>>                }
>>>    
>>> -             if (!intel_engine_is_virtual(engine) &&
>>> -                 !i915_request_is_active(node_to_request(node))) {
>>> +             if (i915_request_in_priority_queue(node_to_request(node))) {
>>
>> Not shown in this diff before this if block we have:
>>
>>          if (list_empty(&node->link)) {
>>                  /*
>>                   * If the request is not in the priolist queue because
>>                   * it is not yet runnable, then it doesn't contribute
>>                   * to our preemption decisions. On the other hand,
>>                   * if the request is on the HW, it too is not in the
>>                   * queue; but in that case we may still need to reorder
>>                   * the inflight requests.
> 
> This second sentence is obsolete, we now use node->link for active.

Which active?  I915_FENCE_FLAG_ACTIVE is set when request is not on the 
priority queue any more, no?

> 
>>                   */
>>                  continue;
>>          }
>>
>> What is the difference between list_empty(&node->link) and !i915_request_in_priority_queue?
> 
> list_empty() -> prior to being ready, we will put into the plist upon
> submit_request()
> 
> Once ready, we only want to fiddle with its place in the priority lists,
> if it is in the plist.

Yes brain fart on the list_empty check.

However I need to go a step back and ask what is the whole point of this 
block:

	if (!intel_engine_is_virtual(engine) &&
	    !i915_request_is_active(node_to_request(node))) {

For active request, they are already on the hw so no need to push them 
up. But I forgot why are virtual ones special? VE is single context so 
in order, or in other words no need to track prio levels for it, is that it?

But then also commit says "Use common priotree lists for virtual 
engine". It already uses that before the patch because of the 
!intel_engine_is_virtual condition. So I am confused.

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine
  2020-01-14 13:48       ` Tvrtko Ursulin
@ 2020-01-14 13:53         ` Chris Wilson
  0 siblings, 0 replies; 17+ messages in thread
From: Chris Wilson @ 2020-01-14 13:53 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-01-14 13:48:11)
> 
> On 14/01/2020 11:20, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2020-01-14 11:13:06)
> >> On 13/01/2020 10:44, Chris Wilson wrote:
> >>> Since commit 422d7df4f090 ("drm/i915: Replace engine->timeline with a
> >>> plain list"), we used the default embedded priotree slot for the virtual
> >>> engine request queue, which means we can also use the same solitary slot
> >>> with the scheduler. However, the priolist is expected to be guarded by
> >>> the engine->active.lock, but this is not true for the virtual engine
> >>>
> >>> References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")
> >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> >>> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> >>> ---
> >>>    drivers/gpu/drm/i915/gt/intel_lrc.c   |  3 +++
> >>>    drivers/gpu/drm/i915/i915_request.c   |  4 +++-
> >>>    drivers/gpu/drm/i915/i915_request.h   | 16 ++++++++++++++++
> >>>    drivers/gpu/drm/i915/i915_scheduler.c |  3 +--
> >>>    4 files changed, 23 insertions(+), 3 deletions(-)
> >>>
> >>> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> >>> index a6ac37dece0a..685659f079a2 100644
> >>> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> >>> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> >>> @@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
> >>>                        GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
> >>>    
> >>>                        list_move(&rq->sched.link, pl);
> >>> +                     set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> >>> +
> >>>                        active = rq;
> >>>                } else {
> >>>                        struct intel_engine_cs *owner = rq->context->engine;
> >>> @@ -2473,6 +2475,7 @@ static void execlists_submit_request(struct i915_request *request)
> >>>        spin_lock_irqsave(&engine->active.lock, flags);
> >>>    
> >>>        queue_request(engine, &request->sched, rq_prio(request));
> >>> +     set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
> >>
> >> Move into queue_request so it is closer to priolist management, just like at other call sites?
> >>
> >> Also, these are all under the engine active lock so non-atomic set/clear could be used, no?
> > 
> > It's not the bit that is important, but if there may be any other
> > concurrent access to the dword.
> > 
> > Thread A:                     Thread B:
> > __set_bit(0, &rq->flags)      __set_bit(31, &rq->flags)
> > 
> > *does* cause an issue, speaking from sad experience. So if in doubt, and
> > here there's always doubt with preempt-to-busy and background signaling,
> > go atomic.
> > 
> >>
> >>>    
> >>>        GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
> >>>        GEM_BUG_ON(list_empty(&request->sched.link));
> >>> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> >>> index be185886e4fc..9ed0d3bc7249 100644
> >>> --- a/drivers/gpu/drm/i915/i915_request.c
> >>> +++ b/drivers/gpu/drm/i915/i915_request.c
> >>> @@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
> >>>    xfer:       /* We may be recursing from the signal callback of another i915 fence */
> >>>        spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
> >>>    
> >>> -     if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
> >>> +     if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
> >>>                list_move_tail(&request->sched.link, &engine->active.requests);
> >>> +             clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
> >>> +     }
> >>>    
> >>>        if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
> >>>            !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
> >>> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> >>> index 031433691a06..f3e50ec989b8 100644
> >>> --- a/drivers/gpu/drm/i915/i915_request.h
> >>> +++ b/drivers/gpu/drm/i915/i915_request.h
> >>> @@ -70,6 +70,17 @@ enum {
> >>>         */
> >>>        I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
> >>>    
> >>> +     /*
> >>> +      * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
> >>> +      *
> >>> +      * Using the scheduler, when a request is ready for execution it is put
> >>> +      * into the priority queue. We want to track its membership within that
> >>> +      * queue so that we can easily check before rescheduling.
> >>> +      *
> >>> +      * See i915_request_in_priority_queue()
> >>> +      */
> >>> +     I915_FENCE_FLAG_PQUEUE,
> >>> +
> >>>        /*
> >>>         * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
> >>>         *
> >>> @@ -361,6 +372,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
> >>>        return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
> >>>    }
> >>>    
> >>> +static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
> >>> +{
> >>> +     return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> >>> +}
> >>> +
> >>>    /**
> >>>     * Returns true if seq1 is later than seq2.
> >>>     */
> >>> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> >>> index bf87c70bfdd9..4f6e4d6c590a 100644
> >>> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> >>> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> >>> @@ -338,8 +338,7 @@ static void __i915_schedule(struct i915_sched_node *node,
> >>>                        continue;
> >>>                }
> >>>    
> >>> -             if (!intel_engine_is_virtual(engine) &&
> >>> -                 !i915_request_is_active(node_to_request(node))) {
> >>> +             if (i915_request_in_priority_queue(node_to_request(node))) {
> >>
> >> Not shown in this diff before this if block we have:
> >>
> >>          if (list_empty(&node->link)) {
> >>                  /*
> >>                   * If the request is not in the priolist queue because
> >>                   * it is not yet runnable, then it doesn't contribute
> >>                   * to our preemption decisions. On the other hand,
> >>                   * if the request is on the HW, it too is not in the
> >>                   * queue; but in that case we may still need to reorder
> >>                   * the inflight requests.
> > 
> > This second sentence is obsolete, we now use node->link for active.
> 
> Which active?  I915_FENCE_FLAG_ACTIVE is set when request is not on the 
> priority queue any more, no?

In the next patch, we introduce an inbetween state, ready but not active
and not in the queue.

> 
> > 
> >>                   */
> >>                  continue;
> >>          }
> >>
> >> What is the difference between list_empty(&node->link) and !i915_request_in_priority_queue?
> > 
> > list_empty() -> prior to being ready, we will put into the plist upon
> > submit_request()
> > 
> > Once ready, we only want to fiddle with its place in the priority lists,
> > if it is in the plist.
> 
> Yes brain fart on the list_empty check.
> 
> However I need to go a step back and ask what is the whole point of this 
> block:
> 
>         if (!intel_engine_is_virtual(engine) &&
>             !i915_request_is_active(node_to_request(node))) {
> 
> For active request, they are already on the hw so no need to push them 
> up. But I forgot why are virtual ones special? VE is single context so 
> in order, or in other words no need to track prio levels for it, is that it?

Yeah, it's because the lookup_prio didn't used work for virtual as it
didn't have the tree.

> But then also commit says "Use common priotree lists for virtual 
> engine". It already uses that before the patch because of the 
> !intel_engine_is_virtual condition. So I am confused.

First iteration of this patch tweaked it so this block worked on virtual
with no hassle. Then I realised I had a problem with on-hold requests,
so took the patch a step further.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 3/4] drm/i915/gt: Allow temporary suspension of inflight requests
  2020-01-13 10:44 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Allow temporary suspension of inflight requests Chris Wilson
@ 2020-01-14 18:33   ` Tvrtko Ursulin
  2020-01-14 18:57     ` Chris Wilson
  0 siblings, 1 reply; 17+ messages in thread
From: Tvrtko Ursulin @ 2020-01-14 18:33 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 13/01/2020 10:44, Chris Wilson wrote:
> In order to support out-of-line error capture, we need to remove the
> active request from HW and put it to one side while a worker compresses
> and stores all the details associated with that request. (As that
> compression may take an arbitrary user-controlled amount of time, we
> want to let the engine continue running on other workloads while the
> hanging request is dumped.) Not only do we need to remove the active
> request, but we also have to remove its context and all requests that
> were dependent on it (both in flight, queued and future submission).
> 
> Finally once the capture is complete, we need to be able to resubmit the
> request and its dependents and allow them to execute.
> 
> References: https://gitlab.freedesktop.org/drm/intel/issues/738
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_engine_cs.c    |   1 +
>   drivers/gpu/drm/i915/gt/intel_engine_types.h |   1 +
>   drivers/gpu/drm/i915/gt/intel_lrc.c          | 109 ++++++++++++++++++-
>   drivers/gpu/drm/i915/gt/selftest_lrc.c       | 103 ++++++++++++++++++
>   drivers/gpu/drm/i915/i915_request.h          |  22 ++++
>   5 files changed, 231 insertions(+), 5 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> index f451ef376548..c296aaf381e7 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> @@ -671,6 +671,7 @@ void
>   intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
>   {
>   	INIT_LIST_HEAD(&engine->active.requests);
> +	INIT_LIST_HEAD(&engine->active.hold);
>   
>   	spin_lock_init(&engine->active.lock);
>   	lockdep_set_subclass(&engine->active.lock, subclass);
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index 00287515e7af..6f4f9912a364 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -295,6 +295,7 @@ struct intel_engine_cs {
>   	struct {
>   		spinlock_t lock;
>   		struct list_head requests;
> +		struct list_head hold;
>   	} active;
>   
>   	struct llist_head barrier_tasks;
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 685659f079a2..e0347e05e577 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -2355,6 +2355,77 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
>   	}
>   }
>   
> +static void __execlists_hold(struct i915_request *rq)
> +{
> +	struct i915_dependency *p;
> +
> +	if (!i915_request_completed(rq)) {
> +		RQ_TRACE(rq, "on hold\n");
> +		if (i915_request_is_active(rq))
> +			__i915_request_unsubmit(rq);
> +		clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> +		list_move_tail(&rq->sched.link, &rq->engine->active.hold);
> +		i915_request_set_hold(rq);
> +	}
> +
> +	list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
> +		struct i915_request *w =
> +			container_of(p->waiter, typeof(*w), sched);
> +
> +		/* Leave semaphores spinning on the other engines */
> +		if (w->engine != rq->engine)
> +			continue;
> +
> +		__execlists_hold(w);

Seems like easy stack overflow attach due recursion.

> +	}
> +}
> +
> +__maybe_unused
> +static void execlists_hold(struct intel_engine_cs *engine,
> +			   struct i915_request *rq)
> +{
> +	spin_lock_irq(&engine->active.lock);
> +	__execlists_hold(rq);

Can this get called with rq->engine != engine?

> +	spin_unlock_irq(&engine->active.lock);
> +}
> +
> +static void __execlists_unhold(struct i915_request *rq)
> +{
> +	struct i915_dependency *p;
> +
> +	if (!i915_request_has_hold(rq))
> +		return;
> +
> +	i915_request_clear_hold(rq);
> +	list_move_tail(&rq->sched.link,
> +		       i915_sched_lookup_priolist(rq->engine, rq_prio(rq)));
> +	set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> +	RQ_TRACE(rq, "hold release\n");
> +
> +	list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
> +		struct i915_request *w =
> +			container_of(p->waiter, typeof(*w), sched);
> +
> +		if (w->engine != rq->engine)
> +			continue;
> +
> +		__execlists_unhold(w);
> +	}
> +}
> +
> +__maybe_unused
> +static void execlists_unhold(struct intel_engine_cs *engine,
> +			     struct i915_request *rq)
> +{
> +	spin_lock_irq(&engine->active.lock);
> +	__execlists_unhold(rq);
> +	if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
> +		engine->execlists.queue_priority_hint = rq_prio(rq);
> +		tasklet_hi_schedule(&engine->execlists.tasklet);
> +	}
> +	spin_unlock_irq(&engine->active.lock);
> +}
> +
>   static noinline void preempt_reset(struct intel_engine_cs *engine)
>   {
>   	const unsigned int bit = I915_RESET_ENGINE + engine->id;
> @@ -2466,6 +2537,29 @@ static void submit_queue(struct intel_engine_cs *engine,
>   	__submit_queue_imm(engine);
>   }
>   
> +static bool hold_request(const struct i915_request *rq)
> +{
> +	struct i915_dependency *p;
> +
> +	list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
> +		const struct i915_request *s =
> +			container_of(p->signaler, typeof(*s), sched);
> +
> +		if (s->engine != rq->engine)
> +			continue;
> +
> +		return i915_request_has_hold(s);
> +	}
> +
> +	return false;
> +}

I am not sure what question this function is answering. Needs a comment 
or a better name I think.

> +
> +static bool on_hold(const struct intel_engine_cs *engine,
> +		    const struct i915_request *rq)
> +{
> +	return !list_empty(&engine->active.hold) && hold_request(rq);

When is engine != rq->engine?

Secondly, first part of the conditional seems to be asking whether this 
engine has any requests on hold. Second part whether a particular rq has 
a dependency on hold, on the same engine as rq->engine. Needs to be 
explained with a comment.

Is the first check just an optimisation?

> +}
> +
>   static void execlists_submit_request(struct i915_request *request)
>   {
>   	struct intel_engine_cs *engine = request->engine;
> @@ -2474,13 +2568,18 @@ static void execlists_submit_request(struct i915_request *request)
>   	/* Will be called from irq-context when using foreign fences. */
>   	spin_lock_irqsave(&engine->active.lock, flags);
>   
> -	queue_request(engine, &request->sched, rq_prio(request));
> -	set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
> +	if (unlikely(on_hold(engine, request))) {
> +		i915_request_set_hold(request);
> +		list_add_tail(&request->sched.link, &engine->active.hold);

Or explained here.

But in general this part is for when new dependencies of a held request 
become runnable?

Regards,

Tvrtko

> +	} else {
> +		queue_request(engine, &request->sched, rq_prio(request));
> +		set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
>   
> -	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
> -	GEM_BUG_ON(list_empty(&request->sched.link));
> +		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
> +		GEM_BUG_ON(list_empty(&request->sched.link));
>   
> -	submit_queue(engine, request);
> +		submit_queue(engine, request);
> +	}
>   
>   	spin_unlock_irqrestore(&engine->active.lock, flags);
>   }
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index 15cda024e3e4..78501d79c0ea 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -285,6 +285,108 @@ static int live_unlite_preempt(void *arg)
>   	return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
>   }
>   
> +static int live_hold_reset(void *arg)
> +{
> +	struct intel_gt *gt = arg;
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +	struct igt_spinner spin;
> +	int err = 0;
> +
> +	/*
> +	 * In order to support offline error capture for fast preempt reset,
> +	 * we need to decouple the guilty request and ensure that it and its
> +	 * descendents are not executed while the capture is in progress.
> +	 */
> +
> +	if (!intel_has_reset_engine(gt))
> +		return 0;
> +
> +	if (igt_spinner_init(&spin, gt))
> +		return -ENOMEM;
> +
> +	for_each_engine(engine, gt, id) {
> +		struct intel_context *ce;
> +		unsigned long heartbeat;
> +		struct i915_request *rq;
> +
> +		ce = intel_context_create(engine);
> +		if (IS_ERR(ce)) {
> +			err = PTR_ERR(ce);
> +			break;
> +		}
> +
> +		engine_heartbeat_disable(engine, &heartbeat);
> +
> +		rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
> +		if (IS_ERR(rq)) {
> +			err = PTR_ERR(rq);
> +			goto out;
> +		}
> +		i915_request_add(rq);
> +
> +		if (!igt_wait_for_spinner(&spin, rq)) {
> +			intel_gt_set_wedged(gt);
> +			err = -ETIME;
> +			goto out;
> +		}
> +
> +		/* We have our request executing, now remove it and reset */
> +
> +		if (test_and_set_bit(I915_RESET_ENGINE + id,
> +				     &gt->reset.flags)) {
> +			spin_unlock_irq(&engine->active.lock);
> +			intel_gt_set_wedged(gt);
> +			err = -EBUSY;
> +			goto out;
> +		}
> +		tasklet_disable(&engine->execlists.tasklet);
> +
> +		engine->execlists.tasklet.func(engine->execlists.tasklet.data);
> +		GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
> +
> +		execlists_hold(engine, rq);
> +		GEM_BUG_ON(!i915_request_has_hold(rq));
> +
> +		intel_engine_reset(engine, NULL);
> +		GEM_BUG_ON(rq->fence.error != -EIO);
> +
> +		tasklet_enable(&engine->execlists.tasklet);
> +		clear_and_wake_up_bit(I915_RESET_ENGINE + id,
> +				      &gt->reset.flags);
> +
> +		/* Check that we do not resubmit the held request */
> +		i915_request_get(rq);
> +		if (!i915_request_wait(rq, 0, HZ / 5)) {
> +			pr_err("%s: on hold request completed!\n",
> +			       engine->name);
> +			i915_request_put(rq);
> +			err = -EIO;
> +			goto out;
> +		}
> +		GEM_BUG_ON(!i915_request_has_hold(rq));
> +
> +		/* But is resubmitted on release */
> +		execlists_unhold(engine, rq);
> +		if (i915_request_wait(rq, 0, HZ / 5) < 0) {
> +			pr_err("%s: held request did not complete!\n",
> +			       engine->name);
> +			intel_gt_set_wedged(gt);
> +			err = -ETIME;
> +		}
> +		i915_request_put(rq);
> +
> +out:
> +		engine_heartbeat_enable(engine, heartbeat);
> +		intel_context_put(ce);
> +		if (err)
> +			break;
> +	}
> +
> +	igt_spinner_fini(&spin);
> +	return err;
> +}
> +
>   static int
>   emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
>   {
> @@ -3315,6 +3417,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
>   		SUBTEST(live_sanitycheck),
>   		SUBTEST(live_unlite_switch),
>   		SUBTEST(live_unlite_preempt),
> +		SUBTEST(live_hold_reset),
>   		SUBTEST(live_timeslice_preempt),
>   		SUBTEST(live_timeslice_queue),
>   		SUBTEST(live_busywait_preempt),
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index f3e50ec989b8..2da8cfb3f3a1 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -89,6 +89,13 @@ enum {
>   	 */
>   	I915_FENCE_FLAG_SIGNAL,
>   
> +	/*
> +	 * I915_FENCE_FLAG_HOLD - this request is currently on hold
> +	 *
> +	 * This request has been suspended, pending an ongoing investigation.
> +	 */
> +	I915_FENCE_FLAG_HOLD,
> +
>   	/*
>   	 * I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
>   	 *
> @@ -499,6 +506,21 @@ static inline bool i915_request_has_sentinel(const struct i915_request *rq)
>   	return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
>   }
>   
> +static inline bool i915_request_has_hold(const struct i915_request *rq)
> +{
> +	return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
> +}
> +
> +static inline void i915_request_set_hold(struct i915_request *rq)
> +{
> +	set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
> +}
> +
> +static inline void i915_request_clear_hold(struct i915_request *rq)
> +{
> +	clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
> +}
> +
>   static inline struct intel_timeline *
>   i915_request_timeline(struct i915_request *rq)
>   {
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [Intel-gfx] [PATCH 3/4] drm/i915/gt: Allow temporary suspension of inflight requests
  2020-01-14 18:33   ` Tvrtko Ursulin
@ 2020-01-14 18:57     ` Chris Wilson
  0 siblings, 0 replies; 17+ messages in thread
From: Chris Wilson @ 2020-01-14 18:57 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2020-01-14 18:33:07)
> 
> On 13/01/2020 10:44, Chris Wilson wrote:
> > In order to support out-of-line error capture, we need to remove the
> > active request from HW and put it to one side while a worker compresses
> > and stores all the details associated with that request. (As that
> > compression may take an arbitrary user-controlled amount of time, we
> > want to let the engine continue running on other workloads while the
> > hanging request is dumped.) Not only do we need to remove the active
> > request, but we also have to remove its context and all requests that
> > were dependent on it (both in flight, queued and future submission).
> > 
> > Finally once the capture is complete, we need to be able to resubmit the
> > request and its dependents and allow them to execute.
> > 
> > References: https://gitlab.freedesktop.org/drm/intel/issues/738
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gt/intel_engine_cs.c    |   1 +
> >   drivers/gpu/drm/i915/gt/intel_engine_types.h |   1 +
> >   drivers/gpu/drm/i915/gt/intel_lrc.c          | 109 ++++++++++++++++++-
> >   drivers/gpu/drm/i915/gt/selftest_lrc.c       | 103 ++++++++++++++++++
> >   drivers/gpu/drm/i915/i915_request.h          |  22 ++++
> >   5 files changed, 231 insertions(+), 5 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> > index f451ef376548..c296aaf381e7 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> > @@ -671,6 +671,7 @@ void
> >   intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
> >   {
> >       INIT_LIST_HEAD(&engine->active.requests);
> > +     INIT_LIST_HEAD(&engine->active.hold);
> >   
> >       spin_lock_init(&engine->active.lock);
> >       lockdep_set_subclass(&engine->active.lock, subclass);
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> > index 00287515e7af..6f4f9912a364 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> > @@ -295,6 +295,7 @@ struct intel_engine_cs {
> >       struct {
> >               spinlock_t lock;
> >               struct list_head requests;
> > +             struct list_head hold;
> >       } active;
> >   
> >       struct llist_head barrier_tasks;
> > diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > index 685659f079a2..e0347e05e577 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > @@ -2355,6 +2355,77 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
> >       }
> >   }
> >   
> > +static void __execlists_hold(struct i915_request *rq)
> > +{
> > +     struct i915_dependency *p;
> > +
> > +     if (!i915_request_completed(rq)) {
> > +             RQ_TRACE(rq, "on hold\n");
> > +             if (i915_request_is_active(rq))
> > +                     __i915_request_unsubmit(rq);
> > +             clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> > +             list_move_tail(&rq->sched.link, &rq->engine->active.hold);
> > +             i915_request_set_hold(rq);
> > +     }
> > +
> > +     list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
> > +             struct i915_request *w =
> > +                     container_of(p->waiter, typeof(*w), sched);
> > +
> > +             /* Leave semaphores spinning on the other engines */
> > +             if (w->engine != rq->engine)
> > +                     continue;
> > +
> > +             __execlists_hold(w);
> 
> Seems like easy stack overflow attach due recursion.

Beh, not that easy with 8k stack and a context limit, and finite ring
sizes. We can definitely reduce the stack to a local list_head, just
need to think carefully about list ordering. (We can use the dfs_link as
we can't take the spinlock. One day, we'll find a way to avoid the
global dfs_lock. Just not today.)

> 
> > +     }
> > +}
> > +
> > +__maybe_unused
> > +static void execlists_hold(struct intel_engine_cs *engine,
> > +                        struct i915_request *rq)
> > +{
> > +     spin_lock_irq(&engine->active.lock);
> > +     __execlists_hold(rq);
> 
> Can this get called with rq->engine != engine?

Not intentionally.

> > +     spin_unlock_irq(&engine->active.lock);
> > +}
> > +
> > +static void __execlists_unhold(struct i915_request *rq)
> > +{
> > +     struct i915_dependency *p;
> > +
> > +     if (!i915_request_has_hold(rq))
> > +             return;
> > +
> > +     i915_request_clear_hold(rq);
> > +     list_move_tail(&rq->sched.link,
> > +                    i915_sched_lookup_priolist(rq->engine, rq_prio(rq)));
> > +     set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
> > +     RQ_TRACE(rq, "hold release\n");
> > +
> > +     list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
> > +             struct i915_request *w =
> > +                     container_of(p->waiter, typeof(*w), sched);
> > +
> > +             if (w->engine != rq->engine)
> > +                     continue;
> > +
> > +             __execlists_unhold(w);
> > +     }
> > +}
> > +
> > +__maybe_unused
> > +static void execlists_unhold(struct intel_engine_cs *engine,
> > +                          struct i915_request *rq)
> > +{
> > +     spin_lock_irq(&engine->active.lock);
> > +     __execlists_unhold(rq);
> > +     if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
> > +             engine->execlists.queue_priority_hint = rq_prio(rq);
> > +             tasklet_hi_schedule(&engine->execlists.tasklet);
> > +     }
> > +     spin_unlock_irq(&engine->active.lock);
> > +}
> > +
> >   static noinline void preempt_reset(struct intel_engine_cs *engine)
> >   {
> >       const unsigned int bit = I915_RESET_ENGINE + engine->id;
> > @@ -2466,6 +2537,29 @@ static void submit_queue(struct intel_engine_cs *engine,
> >       __submit_queue_imm(engine);
> >   }
> >   
> > +static bool hold_request(const struct i915_request *rq)
> > +{
> > +     struct i915_dependency *p;
> > +
> > +     list_for_each_entry(p, &rq->sched.signalers_list, signal_link) {
> > +             const struct i915_request *s =
> > +                     container_of(p->signaler, typeof(*s), sched);
> > +
> > +             if (s->engine != rq->engine)
> > +                     continue;
> > +
> > +             return i915_request_has_hold(s);
> > +     }
> > +
> > +     return false;
> > +}
> 
> I am not sure what question this function is answering. Needs a comment 
> or a better name I think.

if (hold_request(rq))

/* Do we want to hold this request? */

Seems redundant. If you want a better verb, suggest one, but not
suspend.

> > +
> > +static bool on_hold(const struct intel_engine_cs *engine,
> > +                 const struct i915_request *rq)
> > +{
> > +     return !list_empty(&engine->active.hold) && hold_request(rq);
> 
> When is engine != rq->engine?

Since we already have engine = rq->engine in a local and this is just a
descriptive helper predicate.

> Secondly, first part of the conditional seems to be asking whether this 
> engine has any requests on hold. Second part whether a particular rq has 
> a dependency on hold, on the same engine as rq->engine. Needs to be 
> explained with a comment.

Really, seems to be self-explanatory. Not much more one can say that
isn't said by the code.

/* Are we holding any requests? Are we holding this request? */
> 
> Is the first check just an optimisation?

No small optimisation, but just an optimisation.

> > +}
> > +
> >   static void execlists_submit_request(struct i915_request *request)
> >   {
> >       struct intel_engine_cs *engine = request->engine;
> > @@ -2474,13 +2568,18 @@ static void execlists_submit_request(struct i915_request *request)
> >       /* Will be called from irq-context when using foreign fences. */
> >       spin_lock_irqsave(&engine->active.lock, flags);
> >   
> > -     queue_request(engine, &request->sched, rq_prio(request));
> > -     set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
> > +     if (unlikely(on_hold(engine, request))) {
> > +             i915_request_set_hold(request);
> > +             list_add_tail(&request->sched.link, &engine->active.hold);
> 
> Or explained here.
> 
> But in general this part is for when new dependencies of a held request 
> become runnable?

Yes. Seemed pretty clear :-p
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2020-01-14 18:57 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-13 10:44 [Intel-gfx] [PATCH 1/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
2020-01-13 10:44 ` [Intel-gfx] [PATCH 2/4] drm/i915: Use common priotree lists for virtual engine Chris Wilson
2020-01-14 11:13   ` Tvrtko Ursulin
2020-01-14 11:20     ` Chris Wilson
2020-01-14 13:48       ` Tvrtko Ursulin
2020-01-14 13:53         ` Chris Wilson
2020-01-14 11:38     ` Chris Wilson
2020-01-14 11:55     ` [Intel-gfx] [PATCH v2] " Chris Wilson
2020-01-13 10:44 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Allow temporary suspension of inflight requests Chris Wilson
2020-01-14 18:33   ` Tvrtko Ursulin
2020-01-14 18:57     ` Chris Wilson
2020-01-13 10:44 ` [Intel-gfx] [PATCH 4/4] drm/i915/execlists: Offline error capture Chris Wilson
2020-01-13 11:32 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch Patchwork
2020-01-13 12:59 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-01-13 13:00 ` [Intel-gfx] ✗ Fi.CI.BUILD: warning " Patchwork
2020-01-13 17:26 ` [Intel-gfx] [PATCH 1/4] " Tvrtko Ursulin
2020-01-14 12:51 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/4] drm/i915/gt: Always reset the timeslice after a context switch (rev2) Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.