All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH 08/14] drm/i915: Use common priotree lists for virtual engine
Date: Thu,  9 Jan 2020 08:58:33 +0000	[thread overview]
Message-ID: <20200109085839.873553-8-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200109085839.873553-1-chris@chris-wilson.co.uk>

Since commit 422d7df4f090 ("drm/i915: Replace engine->timeline with a
plain list"), we used the default embedded priotree slot for the virtual
engine request queue, which means we can also use the same solitary slot
with the scheduler. However, the priolist is expected to be guarded by
the engine->active.lock, but this is not true for the virtual engine

References: 422d7df4f090 ("drm/i915: Replace engine->timeline with a plain list")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c   |  3 +++
 drivers/gpu/drm/i915/i915_request.c   |  4 +++-
 drivers/gpu/drm/i915/i915_request.h   | 16 ++++++++++++++++
 drivers/gpu/drm/i915/i915_scheduler.c |  3 +--
 4 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 08d5087e7582..45f90cfbe2d6 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -985,6 +985,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
 			GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 
 			list_move(&rq->sched.link, pl);
+			set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
 			active = rq;
 		} else {
 			struct intel_engine_cs *owner = rq->context->engine;
@@ -2499,6 +2501,7 @@ static void execlists_submit_request(struct i915_request *request)
 	spin_lock_irqsave(&engine->active.lock, flags);
 
 	queue_request(engine, &request->sched, rq_prio(request));
+	set_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
 
 	GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 	GEM_BUG_ON(list_empty(&request->sched.link));
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index be185886e4fc..9ed0d3bc7249 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -408,8 +408,10 @@ bool __i915_request_submit(struct i915_request *request)
 xfer:	/* We may be recursing from the signal callback of another i915 fence */
 	spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
 
-	if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
+	if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
 		list_move_tail(&request->sched.link, &engine->active.requests);
+		clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+	}
 
 	if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
 	    !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 031433691a06..f3e50ec989b8 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -70,6 +70,17 @@ enum {
 	 */
 	I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
 
+	/*
+	 * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
+	 *
+	 * Using the scheduler, when a request is ready for execution it is put
+	 * into the priority queue. We want to track its membership within that
+	 * queue so that we can easily check before rescheduling.
+	 *
+	 * See i915_request_in_priority_queue()
+	 */
+	I915_FENCE_FLAG_PQUEUE,
+
 	/*
 	 * I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
 	 *
@@ -361,6 +372,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
 	return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
 }
 
+static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
+{
+	return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
 /**
  * Returns true if seq1 is later than seq2.
  */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index bf87c70bfdd9..4f6e4d6c590a 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -338,8 +338,7 @@ static void __i915_schedule(struct i915_sched_node *node,
 			continue;
 		}
 
-		if (!intel_engine_is_virtual(engine) &&
-		    !i915_request_is_active(node_to_request(node))) {
+		if (i915_request_in_priority_queue(node_to_request(node))) {
 			if (!cache.priolist)
 				cache.priolist =
 					i915_sched_lookup_priolist(engine,
-- 
2.25.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-01-09  8:59 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-01-09  8:58 [Intel-gfx] [PATCH 01/14] drm/i915/gt: Push context state allocation earlier Chris Wilson
2020-01-09  8:58 ` [Intel-gfx] [PATCH 02/14] drm/i915/gt: Pull context activation into central intel_context_pin() Chris Wilson
2020-01-09  8:58 ` [Intel-gfx] [PATCH 03/14] drm/i915/gt: runtime-pm is no longer required for ce->ops->pin() Chris Wilson
2020-01-10 14:37   ` Mika Kuoppala
2020-01-09  8:58 ` [Intel-gfx] [PATCH 04/14] drm/i915: Pin the context as we work on it Chris Wilson
2020-01-09  8:58 ` [Intel-gfx] [PATCH 05/14] drm/i915: Replace vma parking with a clock aging algorithm Chris Wilson
2020-01-09  8:58 ` [Intel-gfx] [PATCH 06/14] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
2020-01-09  8:58 ` [Intel-gfx] [PATCH 07/14] drm/i915/gt: Yield the timeslice if waiting on a semaphore Chris Wilson
2020-01-09  8:58 ` Chris Wilson [this message]
2020-01-09  8:58 ` [Intel-gfx] [PATCH 09/14] drm/i915/gt: Allow temporary suspension of inflight requests Chris Wilson
2020-01-09  8:58 ` [Intel-gfx] [PATCH 10/14] drm/i915: Start chopping up the GPU error capture Chris Wilson
2020-01-09 15:31   ` Andi Shyti
2020-01-09 15:40     ` Chris Wilson
2020-01-09 15:57       ` Andi Shyti
2020-01-11 10:24   ` kbuild test robot
2020-01-09  8:58 ` [Intel-gfx] [PATCH 11/14] drm/i915: Drop the shadow ring state from the " Chris Wilson
2020-01-09  9:04   ` Mika Kuoppala
2020-01-09  9:14     ` Chris Wilson
2020-01-10 10:41       ` Mika Kuoppala
2020-01-09  8:58 ` [Intel-gfx] [PATCH 12/14] drm/i915: Drop the shadow w/a batch buffer Chris Wilson
2020-01-09  9:05   ` Mika Kuoppala
2020-01-10 11:02   ` Andi Shyti
2020-01-09  8:58 ` [Intel-gfx] [PATCH 13/14] drm/i915: Drop request list from error state Chris Wilson
2020-01-10 11:04   ` Andi Shyti
2020-01-09  8:58 ` [Intel-gfx] [PATCH 14/14] drm/i915/execlists: Offline error capture Chris Wilson
2020-01-10 22:46   ` kbuild test robot
2020-01-10 22:46     ` kbuild test robot
2020-01-11 22:33   ` kbuild test robot
2020-01-11 22:33     ` kbuild test robot
2020-01-09 18:13 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [01/14] drm/i915/gt: Push context state allocation earlier Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200109085839.873553-8-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.