intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 20/42] drm/i915/gt: Resubmit the virtual engine on schedule-out
Date: Sun,  2 Aug 2020 17:43:50 +0100	[thread overview]
Message-ID: <20200802164412.2738-21-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200802164412.2738-1-chris@chris-wilson.co.uk>

Having recognised that we do not change the sibling until we schedule
out, we can then defer the decision to resubmit the virtual engine from
the unwind of the active queue to scheduling out of the virtual context.

By keeping the unwind order intact on the local engine, we can preserve
data dependency ordering while doing a preempt-to-busy pass until we
have determined the new ELSP. This means that if we try to timeslice
between a virtual engine and a data-dependent ordinary request, the pair
will maintain their relative ordering and we will avoid the
resubmission, cancelling the timeslicing until further change.

The dilemma though is that we then may end up in a situation where the
'demotion' of the virtual request to an ordinary request in the engine
queue results in filling the ELSP[] with virtual requests instead of
spreading the load across the engines. To compensate for this, we mark
each virtual request and refuse to resubmit a virtual request in the
secondary ELSP slots, thus forcing subsequent virtual requests to be
scheduled out after timeslicing. By delaying the decision until we
schedule out, we will avoid unnecessary resubmission.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c    | 124 +++++++++++++++----------
 drivers/gpu/drm/i915/gt/selftest_lrc.c |   2 +-
 2 files changed, 78 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index a9ae0638e117..d1a45d5e4225 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1111,39 +1111,23 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 
 		__i915_request_unsubmit(rq);
 
-		/*
-		 * Push the request back into the queue for later resubmission.
-		 * If this request is not native to this physical engine (i.e.
-		 * it came from a virtual source), push it back onto the virtual
-		 * engine so that it can be moved across onto another physical
-		 * engine as load dictates.
-		 */
-		if (likely(rq->execution_mask == engine->mask)) {
-			GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
-			if (rq_prio(rq) != prio) {
-				prio = rq_prio(rq);
-				pl = i915_sched_lookup_priolist(engine, prio);
-			}
-			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
-
-			list_move(&rq->sched.link, pl);
-			set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
+		if (rq_prio(rq) != prio) {
+			prio = rq_prio(rq);
+			pl = i915_sched_lookup_priolist(engine, prio);
+		}
+		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 
-			/* Check in case we rollback so far we wrap [size/2] */
-			if (intel_ring_direction(rq->ring,
-						 intel_ring_wrap(rq->ring,
-								 rq->tail),
-						 rq->ring->tail) > 0)
-				rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
+		list_move(&rq->sched.link, pl);
+		set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
 
-			active = rq;
-		} else {
-			struct intel_engine_cs *owner = rq->context->engine;
+		/* Check in case we rollback so far we wrap [size/2] */
+		if (intel_ring_direction(rq->ring,
+					 intel_ring_wrap(rq->ring, rq->tail),
+					 rq->ring->tail) > 0)
+			rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE;
 
-			WRITE_ONCE(rq->engine, owner);
-			owner->submit_request(rq);
-			active = NULL;
-		}
+		active = rq;
 	}
 
 	return active;
@@ -1387,12 +1371,37 @@ static inline void execlists_schedule_in(struct i915_request *rq, int idx)
 	GEM_BUG_ON(intel_context_inflight(ce) != rq->engine);
 }
 
+static void
+resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
+{
+	struct intel_engine_cs *engine = rq->engine;
+	unsigned long flags;
+
+	spin_lock_irqsave(&engine->active.lock, flags);
+
+	clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+	WRITE_ONCE(rq->engine, &ve->base);
+	ve->base.submit_request(rq);
+
+	spin_unlock_irqrestore(&engine->active.lock, flags);
+}
+
 static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
 {
 	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
 
 	if (READ_ONCE(ve->request))
 		tasklet_hi_schedule(&ve->base.execlists.tasklet);
+
+	/*
+	 * This engine is now too busy to run this virtual request, so
+	 * see if we can find an alternative engine for it to execute on.
+	 * Once a request has become bonded to this engine, we treat it the
+	 * same as other native request.
+	 */
+	if (i915_request_in_priority_queue(rq) &&
+	    rq->execution_mask != rq->engine->mask)
+		resubmit_virtual_request(rq, ve);
 }
 
 static inline void
@@ -1635,6 +1644,20 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
 		}
 		sentinel = i915_request_has_sentinel(rq);
 
+		/*
+		 * We want virtual requests to only be in the first slot so
+		 * that they are never stuck behind a hog and can be immediately
+		 * transferred onto the next idle engine.
+		 */
+		if (rq->execution_mask != engine->mask &&
+		    port != execlists->pending) {
+			GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n",
+				      engine->name,
+				      ce->timeline->fence_context,
+				      port - execlists->pending);
+			return false;
+		}
+
 		/* Hold tightly onto the lock to prevent concurrent retires! */
 		if (!spin_trylock_irqsave(&rq->lock, flags))
 			continue;
@@ -2310,6 +2333,15 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				if (i915_request_has_sentinel(last))
 					goto done;
 
+				/*
+				 * We avoid submitting virtual requests into
+				 * the secondary ports so that we can migrate
+				 * the request immediately to another engine
+				 * rather than wait for the primary request.
+				 */
+				if (rq->execution_mask != engine->mask)
+					goto done;
+
 				/*
 				 * If GVT overrides us we only ever submit
 				 * port[0], leaving port[1] empty. Note that we
@@ -5577,7 +5609,6 @@ static void virtual_submission_tasklet(unsigned long data)
 static void virtual_submit_request(struct i915_request *rq)
 {
 	struct virtual_engine *ve = to_virtual_engine(rq->engine);
-	struct i915_request *old;
 	unsigned long flags;
 
 	ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
@@ -5588,28 +5619,27 @@ static void virtual_submit_request(struct i915_request *rq)
 
 	spin_lock_irqsave(&ve->base.active.lock, flags);
 
-	old = ve->request;
-	if (old) { /* background completion event from preempt-to-busy */
-		GEM_BUG_ON(!i915_request_completed(old));
-		__i915_request_submit(old);
-		i915_request_put(old);
-	}
-
+	/* By the time we resubmit a request, it may be completed */
 	if (i915_request_completed(rq)) {
 		__i915_request_submit(rq);
+		goto unlock;
+	}
 
-		ve->base.execlists.queue_priority_hint = INT_MIN;
-		ve->request = NULL;
-	} else {
-		ve->base.execlists.queue_priority_hint = rq_prio(rq);
-		ve->request = i915_request_get(rq);
+	if (ve->request) { /* background completion from preempt-to-busy */
+		GEM_BUG_ON(!i915_request_completed(ve->request));
+		__i915_request_submit(ve->request);
+		i915_request_put(ve->request);
+	}
 
-		GEM_BUG_ON(!list_empty(virtual_queue(ve)));
-		list_move_tail(&rq->sched.link, virtual_queue(ve));
+	ve->base.execlists.queue_priority_hint = rq_prio(rq);
+	ve->request = i915_request_get(rq);
 
-		tasklet_hi_schedule(&ve->base.execlists.tasklet);
-	}
+	GEM_BUG_ON(!list_empty(virtual_queue(ve)));
+	list_move_tail(&rq->sched.link, virtual_queue(ve));
 
+	tasklet_hi_schedule(&ve->base.execlists.tasklet);
+
+unlock:
 	spin_unlock_irqrestore(&ve->base.active.lock, flags);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 3686ad3e32e6..79c4a2d80770 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -4589,7 +4589,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
 	spin_lock_irq(&engine->active.lock);
 	__unwind_incomplete_requests(engine);
 	spin_unlock_irq(&engine->active.lock);
-	GEM_BUG_ON(rq->engine != ve->engine);
+	GEM_BUG_ON(rq->engine != engine);
 
 	/* Reset the engine while keeping our active request on hold */
 	execlists_hold(engine, rq);
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-08-02 16:44 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-02 16:43 [Intel-gfx] Time, where did it go? Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 01/42] drm/i915: Fix wrong return value Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 02/42] drm/i915/gem: Don't drop the timeline lock during execbuf Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 03/42] drm/i915/gem: Reduce context termination list iteration guard to RCU Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 04/42] drm/i915/gt: Protect context lifetime with RCU Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 05/42] drm/i915/gt: Free stale request on destroying the virtual engine Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 06/42] drm/i915/gt: Track signaled breadcrumbs outside of the breadcrumb spinlock Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 07/42] drm/i915/gt: Split the breadcrumb spinlock between global and contexts Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 08/42] drm/i915: Drop i915_request.lock serialisation around await_start Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 09/42] drm/i915: Drop i915_request.lock requirement for intel_rps_boost() Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 10/42] drm/i915/gem: Reduce ctx->engine_mutex for reading the clone source Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 11/42] drm/i915/gem: Reduce ctx->engines_mutex for get_engines() Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 12/42] drm/i915: Reduce test_and_set_bit to set_bit in i915_request_submit() Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 13/42] drm/i915/gt: Decouple completed requests on unwind Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 14/42] drm/i915/gt: Check for a completed last request once Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 15/42] drm/i915/gt: Refactor heartbeat request construction and submission Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 16/42] drm/i915/gt: Replace direct submit with direct call to tasklet Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 17/42] drm/i915/gt: Use virtual_engine during execlists_dequeue Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 18/42] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 19/42] drm/i915/gt: Defer schedule_out until after the next dequeue Chris Wilson
2020-08-02 16:43 ` Chris Wilson [this message]
2020-08-02 16:43 ` [Intel-gfx] [PATCH 21/42] drm/i915/gt: Simplify virtual engine handling for execlists_hold() Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 22/42] drm/i915/gt: ce->inflight updates are now serialised Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 23/42] drm/i915/gt: Drop atomic for engine->fw_active tracking Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 24/42] drm/i915/gt: Extract busy-stats for ring-scheduler Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 25/42] drm/i915/gt: Convert stats.active to plain unsigned int Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 26/42] drm/i915: Lift waiter/signaler iterators Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 27/42] drm/i915: Strip out internal priorities Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 28/42] drm/i915: Remove I915_USER_PRIORITY_SHIFT Chris Wilson
2020-08-02 16:43 ` [Intel-gfx] [PATCH 29/42] drm/i915/gt: Defer the kmem_cache_free() until after the HW submit Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 30/42] drm/i915: Prune empty priolists Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 31/42] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 32/42] drm/i915/gt: Do not suspend bonded requests if one hangs Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 33/42] drm/i915: Teach the i915_dependency to use a double-lock Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 34/42] drm/i915: Restructure priority inheritance Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 35/42] drm/i915/selftests: Measure set-priority duration Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 36/42] drm/i915: Improve DFS for priority inheritance Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 37/42] drm/i915/gt: Remove timeslice suppression Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 38/42] drm/i915: Fair low-latency scheduling Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 39/42] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 40/42] drm/i915: Replace the priority boosting for the display with a deadline Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 41/42] drm/i915: Move saturated workload detection back to the context Chris Wilson
2020-08-02 16:44 ` [Intel-gfx] [PATCH 42/42] drm/i915/gt: Another tweak for flushing the tasklets Chris Wilson
2020-08-02 17:14 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/42] drm/i915: Fix wrong return value Patchwork
2020-08-02 17:14 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-08-02 17:34 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-08-02 17:56 ` [Intel-gfx] Time, where did it go? Dave Airlie
2020-08-02 19:36   ` Chris Wilson
2020-08-04 21:45     ` Dave Airlie
2020-08-07  7:12       ` Chris Wilson
2020-08-09 20:01         ` Dave Airlie
2020-08-02 21:03 ` [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [01/42] drm/i915: Fix wrong return value Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200802164412.2738-21-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).