All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 14/40] drm/i915: Combine multiple internal plists into the same i915_priolist bucket
Date: Wed, 19 Sep 2018 20:55:18 +0100	[thread overview]
Message-ID: <20180919195544.1511-14-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20180919195544.1511-1-chris@chris-wilson.co.uk>

As we are about to allow ourselves to slightly bump the user priority
into a few different sublevels, packthose internal priority lists
into the same i915_priolist to keep the rbtree compact and avoid having
to allocate the default user priority even after the internal bumping.
The downside to having an requests[] rather than a node per active list,
is that we then have to walk over the empty higher priority lists. To
compensate, we track the active buckets and use a small bitmap to skip
over any inactive ones.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_engine_cs.c      |  6 +-
 drivers/gpu/drm/i915/intel_guc_submission.c | 12 ++-
 drivers/gpu/drm/i915/intel_lrc.c            | 87 ++++++++++++++-------
 drivers/gpu/drm/i915/intel_ringbuffer.h     | 13 ++-
 4 files changed, 80 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 217ed3ee1cab..83f2f7774c1f 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1534,10 +1534,10 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 	count = 0;
 	drm_printf(m, "\t\tQueue priority: %d\n", execlists->queue_priority);
 	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
-		struct i915_priolist *p =
-			rb_entry(rb, typeof(*p), node);
+		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+		int i;
 
-		list_for_each_entry(rq, &p->requests, sched.link) {
+		priolist_for_each_request(rq, p, i) {
 			if (count++ < MAX_REQUESTS_TO_SHOW - 1)
 				print_request(m, rq, "\t\tQ ");
 			else
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 6f693ef62c64..8531bd917ec3 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -726,30 +726,28 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
+		int i;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+		priolist_for_each_request_consume(rq, rn, p, i) {
 			if (last && rq->hw_context != last->hw_context) {
-				if (port == last_port) {
-					__list_del_many(&p->requests,
-							&rq->sched.link);
+				if (port == last_port)
 					goto done;
-				}
 
 				if (submit)
 					port_assign(port, last);
 				port++;
 			}
 
-			INIT_LIST_HEAD(&rq->sched.link);
+			list_del_init(&rq->sched.link);
 
 			__i915_request_submit(rq);
 			trace_i915_request_in(rq, port_index(port, execlists));
+
 			last = rq;
 			submit = true;
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e8de250c3413..aeae82b5223c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -259,14 +259,49 @@ intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
 	ce->lrc_desc = desc;
 }
 
-static struct i915_priolist *
+static void assert_priolists(struct intel_engine_execlists * const execlists,
+			     int queue_priority)
+{
+	struct rb_node *rb;
+	int last_prio, i;
+
+	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+		return;
+
+	GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
+		   rb_first(&execlists->queue.rb_root));
+
+	last_prio = (queue_priority >> I915_USER_PRIORITY_SHIFT) + 1;
+	for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+		struct i915_priolist *p = to_priolist(rb);
+
+		GEM_BUG_ON(p->priority >= last_prio);
+		last_prio = p->priority;
+
+		GEM_BUG_ON(!p->used);
+		for (i = 0; i < ARRAY_SIZE(p->requests); i++) {
+			if (list_empty(&p->requests[i]))
+				continue;
+
+			GEM_BUG_ON(!(p->used & BIT(i)));
+		}
+	}
+}
+
+static struct list_head *
 lookup_priolist(struct intel_engine_cs *engine, int prio)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_priolist *p;
 	struct rb_node **parent, *rb;
 	bool first = true;
+	int idx, i;
+
+	assert_priolists(execlists, INT_MAX);
 
+	/* buckets sorted from highest [in slot 0] to lowest priority */
+	idx = I915_PRIORITY_COUNT - (prio & ~I915_PRIORITY_MASK) - 1;
+	prio >>= I915_USER_PRIORITY_SHIFT;
 	if (unlikely(execlists->no_priolist))
 		prio = I915_PRIORITY_NORMAL;
 
@@ -283,7 +318,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
 			parent = &rb->rb_right;
 			first = false;
 		} else {
-			return p;
+			goto out;
 		}
 	}
 
@@ -309,11 +344,15 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
 	}
 
 	p->priority = prio;
-	INIT_LIST_HEAD(&p->requests);
+	for (i = 0; i < ARRAY_SIZE(p->requests); i++)
+		INIT_LIST_HEAD(&p->requests[i]);
 	rb_link_node(&p->node, rb, parent);
 	rb_insert_color_cached(&p->node, &execlists->queue, first);
+	p->used = 0;
 
-	return p;
+out:
+	p->used |= BIT(idx);
+	return &p->requests[idx];
 }
 
 static void unwind_wa_tail(struct i915_request *rq)
@@ -325,7 +364,7 @@ static void unwind_wa_tail(struct i915_request *rq)
 static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
 	struct i915_request *rq, *rn;
-	struct i915_priolist *uninitialized_var(p);
+	struct list_head *uninitialized_var(pl);
 	int last_prio = I915_PRIORITY_INVALID;
 
 	lockdep_assert_held(&engine->timeline.lock);
@@ -342,12 +381,11 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 		GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
 		if (rq_prio(rq) != last_prio) {
 			last_prio = rq_prio(rq);
-			p = lookup_priolist(engine, last_prio);
+			pl = lookup_priolist(engine, last_prio);
 		}
 		GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
 
-		GEM_BUG_ON(p->priority != rq_prio(rq));
-		list_add(&rq->sched.link, &p->requests);
+		list_add(&rq->sched.link, pl);
 	}
 }
 
@@ -665,8 +703,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
 		struct i915_request *rq, *rn;
+		int i;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
+		priolist_for_each_request_consume(rq, rn, p, i) {
 			/*
 			 * Can we combine this request with the current port?
 			 * It has to be the same context/ringbuffer and not
@@ -685,11 +724,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * combine this request with the last, then we
 				 * are done.
 				 */
-				if (port == last_port) {
-					__list_del_many(&p->requests,
-							&rq->sched.link);
+				if (port == last_port)
 					goto done;
-				}
 
 				/*
 				 * If GVT overrides us we only ever submit
@@ -699,11 +735,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * request) to the second port.
 				 */
 				if (ctx_single_port_submission(last->hw_context) ||
-				    ctx_single_port_submission(rq->hw_context)) {
-					__list_del_many(&p->requests,
-							&rq->sched.link);
+				    ctx_single_port_submission(rq->hw_context))
 					goto done;
-				}
 
 				GEM_BUG_ON(last->hw_context == rq->hw_context);
 
@@ -714,15 +747,16 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				GEM_BUG_ON(port_isset(port));
 			}
 
-			INIT_LIST_HEAD(&rq->sched.link);
+			list_del_init(&rq->sched.link);
+
 			__i915_request_submit(rq);
 			trace_i915_request_in(rq, port_index(port, execlists));
+
 			last = rq;
 			submit = true;
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
@@ -746,6 +780,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 */
 	execlists->queue_priority =
 		port != execlists->port ? rq_prio(last) : INT_MIN;
+	assert_priolists(execlists, execlists->queue_priority);
 
 	if (submit) {
 		port_assign(port, last);
@@ -857,16 +892,16 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 	/* Flush the queued requests to the timeline list (for retiring). */
 	while ((rb = rb_first_cached(&execlists->queue))) {
 		struct i915_priolist *p = to_priolist(rb);
+		int i;
 
-		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
-			INIT_LIST_HEAD(&rq->sched.link);
+		priolist_for_each_request_consume(rq, rn, p, i) {
+			list_del_init(&rq->sched.link);
 
 			dma_fence_set_error(&rq->fence, -EIO);
 			__i915_request_submit(rq);
 		}
 
 		rb_erase_cached(&p->node, &execlists->queue);
-		INIT_LIST_HEAD(&p->requests);
 		if (p->priority != I915_PRIORITY_NORMAL)
 			kmem_cache_free(engine->i915->priorities, p);
 	}
@@ -1072,8 +1107,7 @@ static void queue_request(struct intel_engine_cs *engine,
 			  struct i915_sched_node *node,
 			  int prio)
 {
-	list_add_tail(&node->link,
-		      &lookup_priolist(engine, prio)->requests);
+	list_add_tail(&node->link, lookup_priolist(engine, prio));
 }
 
 static void __update_queue(struct intel_engine_cs *engine, int prio)
@@ -1143,7 +1177,7 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
 static void execlists_schedule(struct i915_request *request,
 			       const struct i915_sched_attr *attr)
 {
-	struct i915_priolist *uninitialized_var(pl);
+	struct list_head *uninitialized_var(pl);
 	struct intel_engine_cs *engine, *last;
 	struct i915_dependency *dep, *p;
 	struct i915_dependency stack;
@@ -1242,8 +1276,7 @@ static void execlists_schedule(struct i915_request *request,
 				pl = lookup_priolist(engine, prio);
 				last = engine;
 			}
-			GEM_BUG_ON(pl->priority != prio);
-			list_move_tail(&node->link, &pl->requests);
+			list_move_tail(&node->link, pl);
 		} else {
 			/*
 			 * If the request is not in the priolist queue because
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfa585712c2..1534de5bb852 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -190,11 +190,22 @@ enum intel_engine_id {
 };
 
 struct i915_priolist {
+	struct list_head requests[I915_PRIORITY_COUNT];
 	struct rb_node node;
-	struct list_head requests;
+	unsigned long used;
 	int priority;
 };
 
+#define priolist_for_each_request(it, plist, idx) \
+	for (idx = 0; idx < ARRAY_SIZE((plist)->requests); idx++) \
+		list_for_each_entry(it, &(plist)->requests[idx], sched.link)
+
+#define priolist_for_each_request_consume(it, n, plist, idx) \
+	for (; (idx = ffs((plist)->used)); (plist)->used &= ~BIT(idx - 1)) \
+		list_for_each_entry_safe(it, n, \
+					 &(plist)->requests[idx - 1], \
+					 sched.link)
+
 struct st_preempt_hang {
 	struct completion completion;
 	bool inject_hang;
-- 
2.19.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2018-09-19 19:56 UTC|newest]

Thread overview: 106+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-09-19 19:55 [PATCH 01/40] drm: Use default dma_fence hooks where possible for null syncobj Chris Wilson
2018-09-19 19:55 ` [PATCH 02/40] drm: Fix syncobj handing of schedule() returning 0 Chris Wilson
2018-09-20 14:13   ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 03/40] drm/i915/selftests: Live tests emit requests and so require rpm Chris Wilson
2018-09-20 14:01   ` Tvrtko Ursulin
2018-09-20 14:47     ` Chris Wilson
2018-09-19 19:55 ` [PATCH 04/40] drm/i915: Park the GPU on module load Chris Wilson
2018-09-20 14:02   ` Tvrtko Ursulin
2018-09-20 14:52     ` Chris Wilson
2018-09-19 19:55 ` [PATCH 05/40] drm/i915: Handle incomplete Z_FINISH for compressed error states Chris Wilson
2018-09-19 19:55   ` Chris Wilson
2018-09-19 19:55 ` [PATCH 06/40] drm/i915: Clear the error PTE just once on finish Chris Wilson
2018-09-19 19:55 ` [PATCH 07/40] drm/i915: Cache the error string Chris Wilson
2018-09-19 19:55 ` [PATCH 08/40] drm/i915/execlists: Avoid kicking priority on the current context Chris Wilson
2018-09-19 19:55 ` [PATCH 09/40] drm/i915/selftests: Free the batch along the contexts error path Chris Wilson
2018-09-20  8:30   ` Mika Kuoppala
2018-09-20  8:36     ` Chris Wilson
2018-09-20  9:19       ` Mika Kuoppala
2018-09-19 19:55 ` [PATCH 10/40] drm/i915/selftests: Basic stress test for rapid context switching Chris Wilson
2018-09-20 10:38   ` Mika Kuoppala
2018-09-20 10:46     ` Chris Wilson
2018-09-19 19:55 ` [PATCH 11/40] drm/i915/execlists: Onion unwind for logical_ring_init() failure Chris Wilson
2018-09-20 14:18   ` Mika Kuoppala
2018-09-20 14:21   ` Tvrtko Ursulin
2018-09-20 19:59     ` Chris Wilson
2018-09-21 10:00       ` Tvrtko Ursulin
2018-09-21 10:01         ` Chris Wilson
2018-09-19 19:55 ` [PATCH 12/40] drm/i915/execlists: Assert the queue is non-empty on unsubmitting Chris Wilson
2018-09-24  9:07   ` Tvrtko Ursulin
2018-09-25  7:41     ` Chris Wilson
2018-09-25  8:51       ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 13/40] drm/i915: Reserve some priority bits for internal use Chris Wilson
2018-09-24  9:12   ` Tvrtko Ursulin
2018-09-19 19:55 ` Chris Wilson [this message]
2018-09-24 10:25   ` [PATCH 14/40] drm/i915: Combine multiple internal plists into the same i915_priolist bucket Tvrtko Ursulin
2018-09-25  7:55     ` Chris Wilson
2018-09-19 19:55 ` [PATCH 15/40] drm/i915: Priority boost for new clients Chris Wilson
2018-09-24 10:29   ` Tvrtko Ursulin
2018-09-25  8:01     ` Chris Wilson
2018-09-25  8:26       ` Chris Wilson
2018-09-25  8:57         ` Tvrtko Ursulin
2018-09-25  9:06           ` Chris Wilson
2018-09-25  9:08             ` Tvrtko Ursulin
2018-09-25 11:20         ` Michal Wajdeczko
2018-09-19 19:55 ` [PATCH 16/40] drm/i915: Pull scheduling under standalone lock Chris Wilson
2018-09-24 11:19   ` Tvrtko Ursulin
2018-09-25  8:19     ` Chris Wilson
2018-09-25  9:01       ` Tvrtko Ursulin
2018-09-25  9:10         ` Chris Wilson
2018-09-25  9:19           ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 17/40] drm/i915: Priority boost for waiting clients Chris Wilson
2018-09-24 11:29   ` Tvrtko Ursulin
2018-09-25  9:00     ` Chris Wilson
2018-09-25  9:07       ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 18/40] drm/i915: Report the number of closed vma held by each context in debugfs Chris Wilson
2018-09-24 11:57   ` Tvrtko Ursulin
2018-09-25 12:20     ` Chris Wilson
2018-09-19 19:55 ` [PATCH 19/40] drm/i915: Remove debugfs/i915_ppgtt_info Chris Wilson
2018-09-24 12:03   ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 20/40] drm/i915: Track all held rpm wakerefs Chris Wilson
2018-09-19 19:55 ` [PATCH 21/40] drm/i915: Markup paired operations on wakerefs Chris Wilson
2018-09-19 19:55 ` [PATCH 22/40] drm/i915: Syntatic sugar for using intel_runtime_pm Chris Wilson
2018-09-24 12:08   ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 23/40] drm/i915: Markup paired operations on display power domains Chris Wilson
2018-09-19 19:55 ` [PATCH 24/40] drm/i915: Track the wakeref used to initialise " Chris Wilson
2018-09-19 19:55 ` [PATCH 25/40] drm/i915/dp: Markup pps lock power well Chris Wilson
2018-09-19 19:55 ` [PATCH 26/40] drm/i915: Complain if hsw_get_pipe_config acquires the same power well twice Chris Wilson
2018-09-19 19:55 ` [PATCH 27/40] drm/i915: Mark up Ironlake ips with rpm wakerefs Chris Wilson
2018-09-19 19:55 ` [PATCH 28/40] drm/i915: Serialise concurrent calls to i915_gem_set_wedged() Chris Wilson
2018-09-19 19:55 ` [PATCH 29/40] drm/i915: Differentiate between ggtt->mutex and ppgtt->mutex Chris Wilson
2018-09-24 13:04   ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 30/40] drm/i915: Pull all the reset functionality together into i915_reset.c Chris Wilson
2018-09-19 19:55 ` [PATCH 31/40] drm/i915: Make all GPU resets atomic Chris Wilson
2018-09-19 19:55 ` [PATCH 32/40] drm/i915: Introduce the i915_user_extension_method Chris Wilson
2018-09-24 13:20   ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 33/40] drm/i915: Extend CREATE_CONTEXT to allow inheritance ala clone() Chris Wilson
2018-09-24 17:22   ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 34/40] drm/i915: Allow contexts to share a single timeline across all engines Chris Wilson
2018-09-25  8:45   ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 35/40] drm/i915: Fix I915_EXEC_RING_MASK Chris Wilson
2018-09-25  8:46   ` [Intel-gfx] " Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 36/40] drm/i915: Re-arrange execbuf so context is known before engine Chris Wilson
2018-09-19 19:55 ` [PATCH 37/40] drm/i915: Allow a context to define its set of engines Chris Wilson
2018-09-27 11:28   ` Tvrtko Ursulin
2018-09-28 20:22     ` Chris Wilson
2018-10-01  8:30       ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 38/40] drm/i915/execlists: Flush the CS events before unpinning Chris Wilson
2018-10-01 10:51   ` Tvrtko Ursulin
2018-10-01 11:06     ` Chris Wilson
2018-10-01 13:15       ` Tvrtko Ursulin
2018-10-01 13:26         ` Chris Wilson
2018-10-01 14:03           ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 39/40] drm/i915/execlists: Refactor out can_merge_rq() Chris Wilson
2018-09-27 11:32   ` Tvrtko Ursulin
2018-09-28 20:11     ` Chris Wilson
2018-10-01  8:14       ` Tvrtko Ursulin
2018-10-01  8:18         ` Chris Wilson
2018-10-01 10:18           ` Tvrtko Ursulin
2018-09-19 19:55 ` [PATCH 40/40] drm/i915: Load balancing across a virtual engine Chris Wilson
2018-10-01 11:37   ` Tvrtko Ursulin
2018-09-19 21:54 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/40] drm: Use default dma_fence hooks where possible for null syncobj Patchwork
2018-09-19 22:08 ` ✗ Fi.CI.SPARSE: " Patchwork
2018-09-19 22:17 ` ✗ Fi.CI.BAT: failure " Patchwork
2018-09-20 13:34 ` [PATCH 01/40] " Tvrtko Ursulin
2018-09-20 13:40   ` Chris Wilson
2018-09-20 13:54     ` Tvrtko Ursulin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180919195544.1511-14-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.