All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 04/62] drm/i915/gt: Defer schedule_out until after the next dequeue
Date: Wed, 23 Dec 2020 11:10:28 +0000	[thread overview]
Message-ID: <20201223111126.3338-4-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20201223111126.3338-1-chris@chris-wilson.co.uk>

Inside schedule_out, we do extra work upon idling the context, such as
updating the runtime, kicking off retires, kicking virtual engines.
However, if we are in a series of processing single requests per
contexts, we may find ourselves scheduling out the context, only to
immediately schedule it back in during dequeue. This is just extra work
that we can avoid if we keep the context marked as inflight across the
dequeue. This becomes more significant later on for minimising virtual
engine misses.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |   8 +-
 .../drm/i915/gt/intel_execlists_submission.c  | 174 +++++++++++-------
 2 files changed, 115 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 52fa9c132746..f7a0fb6f3a2e 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -58,8 +58,12 @@ struct intel_context {
 
 	struct intel_engine_cs *engine;
 	struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
+#define __intel_context_inflight(engine) ptr_mask_bits(engine, 3)
+#define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3)
+#define intel_context_inflight(ce) \
+	__intel_context_inflight(READ_ONCE((ce)->inflight))
+#define intel_context_inflight_count(ce) \
+	__intel_context_inflight_count(READ_ONCE((ce)->inflight))
 
 	struct i915_address_space *vm;
 	struct i915_gem_context __rcu *gem_context;
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index fbd0572ed834..b5f256be73dd 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -205,7 +205,7 @@ static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
 
 static void mark_eio(struct i915_request *rq)
 {
-	if (i915_request_completed(rq))
+	if (__i915_request_is_complete(rq))
 		return;
 
 	GEM_BUG_ON(i915_request_signaled(rq));
@@ -221,7 +221,7 @@ active_request(const struct intel_timeline * const tl, struct i915_request *rq)
 
 	rcu_read_lock();
 	list_for_each_entry_continue_reverse(rq, &tl->requests, link) {
-		if (i915_request_completed(rq))
+		if (__i915_request_is_complete(rq))
 			break;
 
 		active = rq;
@@ -381,7 +381,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 	list_for_each_entry_safe_reverse(rq, rn,
 					 &engine->active.requests,
 					 sched.link) {
-		if (i915_request_completed(rq)) {
+		if (__i915_request_is_complete(rq)) {
 			list_del_init(&rq->sched.link);
 			continue;
 		}
@@ -506,7 +506,7 @@ static void reset_active(struct i915_request *rq,
 		     rq->fence.context, rq->fence.seqno);
 
 	/* On resubmission of the active request, payload will be scrubbed */
-	if (i915_request_completed(rq))
+	if (__i915_request_is_complete(rq))
 		head = rq->tail;
 	else
 		head = active_request(ce->timeline, rq)->head;
@@ -607,7 +607,7 @@ __execlists_schedule_out(struct i915_request *rq,
 	 * idle and we want to re-enter powersaving.
 	 */
 	if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
-	    i915_request_completed(rq))
+	    __i915_request_is_complete(rq))
 		intel_engine_add_retire(engine, ce->timeline);
 
 	ccid >>= GEN11_SW_CTX_ID_SHIFT - 32;
@@ -728,8 +728,8 @@ dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq)
 		 prefix,
 		 rq->context->lrc.ccid,
 		 rq->fence.context, rq->fence.seqno,
-		 i915_request_completed(rq) ? "!" :
-		 i915_request_started(rq) ? "*" :
+		 __i915_request_is_complete(rq) ? "!" :
+		 __i915_request_has_started(rq) ? "*" :
 		 "",
 		 rq_prio(rq));
 
@@ -831,7 +831,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
 		if (!spin_trylock_irqsave(&rq->lock, flags))
 			continue;
 
-		if (i915_request_completed(rq))
+		if (__i915_request_is_complete(rq))
 			goto unlock;
 
 		if (i915_active_is_idle(&ce->active) &&
@@ -944,7 +944,7 @@ static bool can_merge_rq(const struct i915_request *prev,
 	 * contexts, despite the best efforts of preempt-to-busy to confuse
 	 * us.
 	 */
-	if (i915_request_completed(next))
+	if (__i915_request_is_complete(next))
 		return true;
 
 	if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
@@ -1065,8 +1065,8 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
 
 			/* No waiter should start before its signaler */
 			GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) &&
-				   i915_request_started(w) &&
-				   !i915_request_completed(rq));
+				   __i915_request_has_started(w) &&
+				   !__i915_request_is_complete(rq));
 
 			GEM_BUG_ON(i915_request_is_active(w));
 			if (!i915_request_is_ready(w))
@@ -1159,7 +1159,7 @@ static unsigned long active_timeslice(const struct intel_engine_cs *engine)
 	const struct intel_engine_execlists *execlists = &engine->execlists;
 	const struct i915_request *rq = *execlists->active;
 
-	if (!rq || i915_request_completed(rq))
+	if (!rq || __i915_request_is_complete(rq))
 		return 0;
 
 	if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
@@ -1232,19 +1232,6 @@ static void set_preempt_timeout(struct intel_engine_cs *engine,
 		     active_preempt_timeout(engine, rq));
 }
 
-static inline void clear_ports(struct i915_request **ports, int count)
-{
-	memset_p((void **)ports, NULL, count);
-}
-
-static inline void
-copy_ports(struct i915_request **dst, struct i915_request **src, int count)
-{
-	/* A memcpy_p() would be very useful here! */
-	while (count--)
-		WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
-}
-
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1299,7 +1286,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 */
 
 	if (last) {
-		if (i915_request_completed(last)) {
+		if (__i915_request_is_complete(last)) {
 			goto check_secondary;
 		} else if (need_preempt(engine, last)) {
 			ENGINE_TRACE(engine,
@@ -1409,8 +1396,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			     "virtual rq=%llx:%lld%s, new engine? %s\n",
 			     rq->fence.context,
 			     rq->fence.seqno,
-			     i915_request_completed(rq) ? "!" :
-			     i915_request_started(rq) ? "*" :
+			     __i915_request_is_complete(rq) ? "!" :
+			     __i915_request_has_started(rq) ? "*" :
 			     "",
 			     yesno(engine != ve->siblings[0]));
 
@@ -1593,18 +1580,32 @@ static void execlists_dequeue_irq(struct intel_engine_cs *engine)
 	local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */
 }
 
-static void
-cancel_port_requests(struct intel_engine_execlists * const execlists)
+static inline void clear_ports(struct i915_request **ports, int count)
+{
+	memset_p((void **)ports, NULL, count);
+}
+
+static inline void
+copy_ports(struct i915_request **dst, struct i915_request **src, int count)
+{
+	/* A memcpy_p() would be very useful here! */
+	while (count--)
+		WRITE_ONCE(*dst++, *src++); /* avoid write tearing */
+}
+
+static struct i915_request **
+cancel_port_requests(struct intel_engine_execlists * const execlists,
+		     struct i915_request **inactive)
 {
 	struct i915_request * const *port;
 
 	for (port = execlists->pending; *port; port++)
-		execlists_schedule_out(*port);
+		*inactive++ = *port;
 	clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending));
 
 	/* Mark the end of active before we overwrite *active */
 	for (port = xchg(&execlists->active, execlists->pending); *port; port++)
-		execlists_schedule_out(*port);
+		*inactive++ = *port;
 	clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
 
 	smp_wmb(); /* complete the seqlock for execlists_active() */
@@ -1614,6 +1615,8 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
 	GEM_BUG_ON(execlists->pending[0]);
 	cancel_timer(&execlists->timer);
 	cancel_timer(&execlists->preempt);
+
+	return inactive;
 }
 
 static inline void
@@ -1741,7 +1744,8 @@ csb_read(const struct intel_engine_cs *engine, u64 * const csb)
 	return entry;
 }
 
-static void process_csb(struct intel_engine_cs *engine)
+static struct i915_request **
+process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	u64 * const buf = execlists->csb_status;
@@ -1770,7 +1774,7 @@ static void process_csb(struct intel_engine_cs *engine)
 	head = execlists->csb_head;
 	tail = READ_ONCE(*execlists->csb_write);
 	if (unlikely(head == tail))
-		return;
+		return inactive;
 
 	/*
 	 * We will consume all events from HW, or at least pretend to.
@@ -1850,7 +1854,7 @@ static void process_csb(struct intel_engine_cs *engine)
 			/* cancel old inflight, prepare for switch */
 			trace_ports(execlists, "preempted", old);
 			while (*old)
-				execlists_schedule_out(*old++);
+				*inactive++ = *old++;
 
 			/* switch pending to inflight */
 			GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
@@ -1884,7 +1888,7 @@ static void process_csb(struct intel_engine_cs *engine)
 			 * itself...
 			 */
 			if (GEM_SHOW_DEBUG() &&
-			    !i915_request_completed(*execlists->active)) {
+			    !__i915_request_is_complete(*execlists->active)) {
 				struct i915_request *rq = *execlists->active;
 				const u32 *regs __maybe_unused =
 					rq->context->lrc_reg_state;
@@ -1912,7 +1916,7 @@ static void process_csb(struct intel_engine_cs *engine)
 					     regs[CTX_RING_TAIL]);
 			}
 
-			execlists_schedule_out(*execlists->active++);
+			*inactive++ = *execlists->active++;
 
 			GEM_BUG_ON(execlists->active - execlists->inflight >
 				   execlists_num_ports(execlists));
@@ -1933,6 +1937,15 @@ static void process_csb(struct intel_engine_cs *engine)
 	 * invalidation before.
 	 */
 	invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
+
+	return inactive;
+}
+
+static void post_process_csb(struct i915_request **port,
+			     struct i915_request **last)
+{
+	while (port != last)
+		execlists_schedule_out(*port++);
 }
 
 static void __execlists_hold(struct i915_request *rq)
@@ -1961,7 +1974,7 @@ static void __execlists_hold(struct i915_request *rq)
 			if (!i915_request_is_ready(w))
 				continue;
 
-			if (i915_request_completed(w))
+			if (__i915_request_is_complete(w))
 				continue;
 
 			if (i915_request_on_hold(w))
@@ -1982,7 +1995,7 @@ static bool execlists_hold(struct intel_engine_cs *engine,
 
 	spin_lock_irq(&engine->active.lock);
 
-	if (i915_request_completed(rq)) { /* too late! */
+	if (__i915_request_is_complete(rq)) { /* too late! */
 		rq = NULL;
 		goto unlock;
 	}
@@ -2208,8 +2221,8 @@ active_context(struct intel_engine_cs *engine, u32 ccid)
 	for (port = el->active; (rq = *port); port++) {
 		if (rq->context->lrc.ccid == ccid) {
 			ENGINE_TRACE(engine,
-				     "ccid found at active:%zd\n",
-				     port - el->active);
+				     "ccid:%x found at active:%zd\n",
+				     ccid, port - el->active);
 			return rq;
 		}
 	}
@@ -2217,8 +2230,8 @@ active_context(struct intel_engine_cs *engine, u32 ccid)
 	for (port = el->pending; (rq = *port); port++) {
 		if (rq->context->lrc.ccid == ccid) {
 			ENGINE_TRACE(engine,
-				     "ccid found at pending:%zd\n",
-				     port - el->pending);
+				     "ccid:%x found at pending:%zd\n",
+				     ccid, port - el->pending);
 			return rq;
 		}
 	}
@@ -2336,8 +2349,12 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
 static void execlists_submission_tasklet(unsigned long data)
 {
 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+	struct i915_request **inactive;
 
-	process_csb(engine);
+	rcu_read_lock();
+	inactive = process_csb(engine, post);
+	GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
 
 	if (unlikely(preempt_timeout(engine))) {
 		cancel_timer(&engine->execlists.preempt);
@@ -2363,6 +2380,9 @@ static void execlists_submission_tasklet(unsigned long data)
 
 	if (!engine->execlists.pending[0])
 		execlists_dequeue_irq(engine);
+
+	post_process_csb(post, inactive);
+	rcu_read_unlock();
 }
 
 static void __execlists_kick(struct intel_engine_execlists *execlists)
@@ -2735,8 +2755,6 @@ static void enable_execlists(struct intel_engine_cs *engine)
 	ENGINE_POSTING_READ(engine, RING_HWS_PGA);
 
 	enable_error_interrupt(engine);
-
-	engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
 }
 
 static bool unexpected_starting_state(struct intel_engine_cs *engine)
@@ -2806,22 +2824,30 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
 	engine->execlists.reset_ccid = active_ccid(engine);
 }
 
-static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
+static struct i915_request **
+reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct intel_context *ce;
-	struct i915_request *rq;
-	u32 head;
 
 	mb(); /* paranoia: read the CSB pointers from after the reset */
 	clflush(execlists->csb_write);
 	mb();
 
-	process_csb(engine); /* drain preemption events */
+	inactive = process_csb(engine, inactive); /* drain preemption events */
 
 	/* Following the reset, we need to reload the CSB read/write pointers */
 	reset_csb_pointers(engine);
 
+	return inactive;
+}
+
+static void
+execlists_reset_active(struct intel_engine_cs *engine, bool stalled)
+{
+	struct intel_context *ce;
+	struct i915_request *rq;
+	u32 head;
+
 	/*
 	 * Save the currently executing context, even if we completed
 	 * its request, it was still running at the time of the
@@ -2829,12 +2855,12 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	 */
 	rq = active_context(engine, engine->execlists.reset_ccid);
 	if (!rq)
-		goto unwind;
+		return;
 
 	ce = rq->context;
 	GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
 
-	if (i915_request_completed(rq)) {
+	if (__i915_request_is_complete(rq)) {
 		/* Idle context; tidy up the ring so we can restart afresh */
 		head = intel_ring_wrap(ce->ring, rq->tail);
 		goto out_replay;
@@ -2862,7 +2888,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 	 * Otherwise, if we have not started yet, the request should replay
 	 * perfectly and we do not need to flag the result as being erroneous.
 	 */
-	if (!i915_request_started(rq))
+	if (!__i915_request_has_started(rq))
 		goto out_replay;
 
 	/*
@@ -2891,11 +2917,22 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
 		     head, ce->ring->tail);
 	lrc_reset_regs(ce, engine);
 	ce->lrc.lrca = lrc_update_regs(ce, engine, head);
+}
 
-unwind:
-	/* Push back any incomplete requests for replay after the reset. */
-	cancel_port_requests(execlists);
-	__unwind_incomplete_requests(engine);
+static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
+{
+	struct intel_engine_execlists * const execlists = &engine->execlists;
+	struct i915_request *post[2 * EXECLIST_MAX_PORTS];
+	struct i915_request **inactive;
+
+	rcu_read_lock();
+	inactive = reset_csb(engine, post);
+
+	execlists_reset_active(engine, true);
+
+	inactive = cancel_port_requests(execlists, inactive);
+	post_process_csb(post, inactive);
+	rcu_read_unlock();
 }
 
 static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
@@ -2904,11 +2941,15 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
 
 	ENGINE_TRACE(engine, "\n");
 
-	spin_lock_irqsave(&engine->active.lock, flags);
-
-	__execlists_reset(engine, stalled);
+	/* Process the csb, find the guilty context and throw away */
+	execlists_reset_csb(engine, stalled);
 
+	/* Push back any incomplete requests for replay after the reset. */
+	rcu_read_lock();
+	spin_lock_irqsave(&engine->active.lock, flags);
+	__unwind_incomplete_requests(engine);
 	spin_unlock_irqrestore(&engine->active.lock, flags);
+	rcu_read_unlock();
 }
 
 static void nop_submission_tasklet(unsigned long data)
@@ -2942,9 +2983,10 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 	 * submission's irq state, we also wish to remind ourselves that
 	 * it is irq state.)
 	 */
-	spin_lock_irqsave(&engine->active.lock, flags);
+	execlists_reset_csb(engine, true);
 
-	__execlists_reset(engine, true);
+	rcu_read_lock();
+	spin_lock_irqsave(&engine->active.lock, flags);
 
 	/* Mark all executing requests as skipped. */
 	list_for_each_entry(rq, &engine->active.requests, sched.link)
@@ -3000,6 +3042,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 	execlists->tasklet.func = nop_submission_tasklet;
 
 	spin_unlock_irqrestore(&engine->active.lock, flags);
+	rcu_read_unlock();
 }
 
 static void execlists_reset_finish(struct intel_engine_cs *engine)
@@ -3211,6 +3254,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 	else
 		execlists->csb_size = GEN11_CSB_ENTRIES;
 
+	engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0);
 	if (INTEL_GEN(engine->i915) >= 11) {
 		execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32);
 		execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32);
@@ -3515,12 +3559,12 @@ static void virtual_submit_request(struct i915_request *rq)
 
 	old = ve->request;
 	if (old) { /* background completion event from preempt-to-busy */
-		GEM_BUG_ON(!i915_request_completed(old));
+		GEM_BUG_ON(!__i915_request_is_complete(old));
 		__i915_request_submit(old);
 		i915_request_put(old);
 	}
 
-	if (i915_request_completed(rq)) {
+	if (__i915_request_is_complete(rq)) {
 		__i915_request_submit(rq);
 
 		ve->base.execlists.queue_priority_hint = INT_MIN;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-12-23 11:12 UTC|newest]

Thread overview: 71+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-23 11:10 [Intel-gfx] [PATCH 01/62] drm/i915/gt: Replace direct submit with direct call to tasklet Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 02/62] drm/i915/gt: Use virtual_engine during execlists_dequeue Chris Wilson
2020-12-24 12:07   ` Matthew Auld
2020-12-23 11:10 ` [Intel-gfx] [PATCH 03/62] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-12-24 12:57   ` Matthew Auld
2020-12-23 11:10 ` Chris Wilson [this message]
2020-12-24 13:28   ` [Intel-gfx] [PATCH 04/62] drm/i915/gt: Defer schedule_out until after the next dequeue Matthew Auld
2020-12-23 11:10 ` [Intel-gfx] [PATCH 05/62] drm/i915/gt: Remove virtual breadcrumb before transfer Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 06/62] drm/i915/gt: Shrink the critical section for irq signaling Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 07/62] drm/i915/gt: Resubmit the virtual engine on schedule-out Chris Wilson
2020-12-24 13:49   ` Matthew Auld
2020-12-23 11:10 ` [Intel-gfx] [PATCH 08/62] drm/i915/gt: Simplify virtual engine handling for execlists_hold() Chris Wilson
2020-12-24 13:52   ` Matthew Auld
2020-12-23 11:10 ` [Intel-gfx] [PATCH 09/62] drm/i915/gt: ce->inflight updates are now serialised Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 10/62] drm/i915/selftests: Confirm CS_TIMESTAMP / CTX_TIMESTAMP share a clock Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 11/62] drm/i915/gt: Consolidate the CS timestamp clocks Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 12/62] drm/i915: Drop i915_request.lock serialisation around await_start Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 13/62] drm/i915: Drop i915_request.lock requirement for intel_rps_boost() Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 14/62] drm/i915/gem: Reduce ctx->engine_mutex for reading the clone source Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 15/62] drm/i915/gem: Reduce ctx->engines_mutex for get_engines() Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 16/62] drm/i915: Reduce test_and_set_bit to set_bit in i915_request_submit() Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 17/62] drm/i915/gt: Drop atomic for engine->fw_active tracking Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 18/62] drm/i915/gt: Extract busy-stats for ring-scheduler Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 19/62] drm/i915/gt: Convert stats.active to plain unsigned int Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 20/62] drm/i915/gt: Refactor heartbeat request construction and submission Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 21/62] drm/i915/gt: Do not suspend bonded requests if one hangs Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 22/62] drm/i915/gt: Remove timeslice suppression Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 23/62] drm/i915: Strip out internal priorities Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 24/62] drm/i915: Remove I915_USER_PRIORITY_SHIFT Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 25/62] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 26/62] drm/i915: Teach the i915_dependency to use a double-lock Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 27/62] drm/i915: Restructure priority inheritance Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 28/62] drm/i915/selftests: Measure set-priority duration Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 29/62] drm/i915/selftests: Exercise priority inheritance around an engine loop Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 30/62] drm/i915: Improve DFS for priority inheritance Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 31/62] drm/i915: Extract request submission from execlists Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 32/62] drm/i915: Extract request rewinding " Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 33/62] drm/i915: Extract request suspension from the execlists backend Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 34/62] drm/i915: Extract the ability to defer and rerun a request later Chris Wilson
2020-12-23 11:10 ` [Intel-gfx] [PATCH 35/62] drm/i915: Fix the iterative dfs for defering requests Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 36/62] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 37/62] drm/i915: Move scheduler queue Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 38/62] drm/i915: Move tasklet from execlists to sched Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 39/62] drm/i915: Replace priolist rbtree with a skiplist Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 40/62] drm/i915: Wrap cmpxchg64 with try_cmpxchg64() helper Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 41/62] drm/i915: Fair low-latency scheduling Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 42/62] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 43/62] drm/i915: Extend the priority boosting for the display with a deadline Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 44/62] drm/i915/gt: Skip over completed active execlists, again Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 45/62] drm/i915/gt: Support virtual engine queues Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 46/62] drm/i915: Move saturated workload detection back to the context Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 47/62] drm/i915: Bump default timeslicing quantum to 5ms Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 48/62] drm/i915/gt: Wrap intel_timeline.has_initial_breadcrumb Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 49/62] drm/i915/gt: Track timeline GGTT offset separately from subpage offset Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 50/62] drm/i915/gt: Add timeline "mode" Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 51/62] drm/i915/gt: Use indices for writing into relative timelines Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 52/62] drm/i915/selftests: Exercise relative timeline modes Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 53/62] drm/i915/gt: Use ppHWSP for unshared non-semaphore related timelines Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 54/62] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 55/62] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 56/62] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 57/62] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 58/62] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 59/62] drm/i915/gt: Enable busy-stats for ring-scheduler Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 60/62] drm/i915/gt: Implement ring scheduler for gen6/7 Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 61/62] drm/i915/gt: Enable ring scheduling " Chris Wilson
2020-12-23 11:11 ` [Intel-gfx] [PATCH 62/62] drm/i915: Mark up protected uses of 'i915_request_completed' Chris Wilson
2020-12-23 14:20 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/62] drm/i915/gt: Replace direct submit with direct call to tasklet Patchwork
2020-12-23 14:22 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-12-23 14:50 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-12-23 17:57 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201223111126.3338-4-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.