All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 34/56] drm/i915: Replace priolist rbtree with a skiplist
Date: Tue, 29 Dec 2020 12:01:23 +0000	[thread overview]
Message-ID: <20201229120145.26045-34-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20201229120145.26045-1-chris@chris-wilson.co.uk>

Replace the priolist rbtree with a skiplist. The crucial difference is
that walking and removing the first element of a skiplist is O(1), but
O(lgN) for an rbtree, as we need to rebalance on remove. This is a
hindrance for submission latency as it occurs between picking a request
for the priolist and submitting it to hardware, as well effectively
trippling the number of O(lgN) operations required under the irqoff lock.
This is critical to reducing the latency jitter with multiple clients.

The downsides to skiplists are that lookup/insertion is only
probablistically O(lgN) and there is a significant memory penalty to
as each skip node is larger than the rbtree equivalent. Furthermore, we
don't use dynamic arrays for the skiplist, so the allocation is fixed,
and imposes an upper bound on the scalability wrt to the number of
inflight requests.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../drm/i915/gt/intel_execlists_submission.c  |  63 ++---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c |  30 +--
 drivers/gpu/drm/i915/i915_priolist_types.h    |  29 ++-
 drivers/gpu/drm/i915/i915_scheduler.c         | 233 +++++++++++++-----
 drivers/gpu/drm/i915/i915_scheduler.h         |  11 +-
 drivers/gpu/drm/i915/i915_scheduler_types.h   |   2 +-
 .../drm/i915/selftests/i915_mock_selftests.h  |   1 +
 .../gpu/drm/i915/selftests/i915_scheduler.c   |  42 +++-
 8 files changed, 294 insertions(+), 117 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 8ba7db3d6004..a8820ef0282a 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -256,11 +256,6 @@ ring_set_paused(const struct intel_engine_cs *engine, int state)
 		wmb();
 }
 
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
-	return rb_entry(rb, struct i915_priolist, node);
-}
-
 static inline int rq_prio(const struct i915_request *rq)
 {
 	return READ_ONCE(rq->sched.attr.priority);
@@ -284,15 +279,31 @@ static int effective_prio(const struct i915_request *rq)
 	return prio;
 }
 
-static int queue_prio(const struct i915_sched_engine *se)
+static struct i915_request *first_request(struct i915_sched_engine *se)
 {
-	struct rb_node *rb;
+	struct i915_priolist *pl;
+
+	for_each_priolist(pl, &se->queue) {
+		if (likely(!list_empty(&pl->requests)))
+			return list_first_entry(&pl->requests,
+						struct i915_request,
+						sched.link);
+
+		i915_priolist_advance(&se->queue, pl);
+	}
+
+	return NULL;
+}
 
-	rb = rb_first_cached(&se->queue);
-	if (!rb)
+static int queue_prio(struct i915_sched_engine *se)
+{
+	struct i915_request *rq;
+
+	rq = first_request(se);
+	if (!rq)
 		return INT_MIN;
 
-	return to_priolist(rb)->priority;
+	return rq_prio(rq);
 }
 
 static int virtual_prio(const struct intel_engine_execlists *el)
@@ -302,7 +313,7 @@ static int virtual_prio(const struct intel_engine_execlists *el)
 	return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
 }
 
-static inline bool need_preempt(const struct intel_engine_cs *engine,
+static inline bool need_preempt(struct intel_engine_cs *engine,
 				const struct i915_request *rq)
 {
 	int last_prio;
@@ -1142,6 +1153,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	struct i915_request ** const last_port = port + execlists->port_mask;
 	struct i915_request *last, * const *active;
 	struct virtual_engine *ve;
+	struct i915_priolist *pl;
 	struct rb_node *rb;
 	bool submit = false;
 
@@ -1341,11 +1353,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			break;
 	}
 
-	while ((rb = rb_first_cached(&engine->active.queue))) {
-		struct i915_priolist *p = to_priolist(rb);
+	for_each_priolist(pl, &engine->active.queue) {
 		struct i915_request *rq, *rn;
 
-		priolist_for_each_request_consume(rq, rn, p) {
+		priolist_for_each_request_safe(rq, rn, pl) {
 			bool merge = true;
 
 			/*
@@ -1420,8 +1431,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			}
 		}
 
-		rb_erase_cached(&p->node, &engine->active.queue);
-		i915_priolist_free(p);
+		i915_priolist_advance(&engine->active.queue, pl);
 	}
 done:
 	*port++ = i915_request_get(last);
@@ -2646,6 +2656,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request *rq, *rn;
+	struct i915_priolist *pl;
 	struct rb_node *rb;
 	unsigned long flags;
 
@@ -2676,16 +2687,12 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 	intel_engine_signal_breadcrumbs(engine);
 
 	/* Flush the queued requests to the timeline list (for retiring). */
-	while ((rb = rb_first_cached(&engine->active.queue))) {
-		struct i915_priolist *p = to_priolist(rb);
-
-		priolist_for_each_request_consume(rq, rn, p) {
+	for_each_priolist(pl, &engine->active.queue) {
+		priolist_for_each_request_safe(rq, rn, pl) {
 			mark_eio(rq);
 			__i915_request_submit(rq);
 		}
-
-		rb_erase_cached(&p->node, &engine->active.queue);
-		i915_priolist_free(p);
+		i915_priolist_advance(&engine->active.queue, pl);
 	}
 	GEM_BUG_ON(!i915_sched_is_idle(&engine->active));
 
@@ -2718,7 +2725,6 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	execlists->queue_priority_hint = INT_MIN;
-	engine->active.queue = RB_ROOT_CACHED;
 
 	GEM_BUG_ON(__tasklet_is_enabled(&engine->active.tasklet));
 	engine->active.tasklet.func = nop_submission_tasklet;
@@ -3101,6 +3107,8 @@ static void virtual_context_exit(struct intel_context *ce)
 
 	for (n = 0; n < ve->num_siblings; n++)
 		intel_engine_pm_put(ve->siblings[n]);
+
+	i915_sched_park_engine(&ve->base.active);
 }
 
 static const struct intel_context_ops virtual_context_ops = {
@@ -3511,6 +3519,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 {
 	const struct intel_engine_execlists *execlists = &engine->execlists;
 	struct i915_request *rq, *last;
+	struct i915_priolist *pl;
 	unsigned long flags;
 	unsigned int count;
 	struct rb_node *rb;
@@ -3540,10 +3549,8 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 
 	last = NULL;
 	count = 0;
-	for (rb = rb_first_cached(&engine->active.queue); rb; rb = rb_next(rb)) {
-		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
-
-		priolist_for_each_request(rq, p) {
+	for_each_priolist(pl, &engine->active.queue) {
+		priolist_for_each_request(rq, pl) {
 			if (count++ < max - 1)
 				show_request(m, rq, "\t\t", 0);
 			else
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index b9cb6807d101..34be8dc8949e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -55,11 +55,6 @@
  *
  */
 
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
-	return rb_entry(rb, struct i915_priolist, node);
-}
-
 static struct guc_stage_desc *__get_stage_desc(struct intel_guc *guc, u32 id)
 {
 	struct guc_stage_desc *base = guc->stage_desc_pool_vaddr;
@@ -292,8 +287,8 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 	struct i915_request ** const last_port = first + execlists->port_mask;
 	struct i915_request *last = first[0];
 	struct i915_request **port;
+	struct i915_priolist *pl;
 	bool submit = false;
-	struct rb_node *rb;
 
 	lockdep_assert_held(&engine->active.lock);
 
@@ -310,11 +305,10 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 	 * event.
 	 */
 	port = first;
-	while ((rb = rb_first_cached(&engine->active.queue))) {
-		struct i915_priolist *p = to_priolist(rb);
+	for_each_priolist(pl, &engine->active.queue) {
 		struct i915_request *rq, *rn;
 
-		priolist_for_each_request_consume(rq, rn, p) {
+		priolist_for_each_request_safe(rq, rn, pl) {
 			if (last && rq->context != last->context) {
 				if (port == last_port)
 					goto done;
@@ -330,12 +324,11 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 			last = rq;
 		}
 
-		rb_erase_cached(&p->node, &engine->active.queue);
-		i915_priolist_free(p);
+		i915_priolist_advance(&engine->active.queue, pl);
 	}
 done:
 	execlists->queue_priority_hint =
-		rb ? to_priolist(rb)->priority : INT_MIN;
+		pl != &engine->active.queue.sentinel ? pl->priority : INT_MIN;
 	if (submit) {
 		*port = schedule_in(last, port - execlists->inflight);
 		*++port = NULL;
@@ -450,7 +443,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
 	struct i915_request *rq, *rn;
-	struct rb_node *rb;
+	struct i915_priolist *p;
 	unsigned long flags;
 
 	ENGINE_TRACE(engine, "\n");
@@ -481,25 +474,20 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
 	}
 
 	/* Flush the queued requests to the timeline list (for retiring). */
-	while ((rb = rb_first_cached(&engine->active.queue))) {
-		struct i915_priolist *p = to_priolist(rb);
-
-		priolist_for_each_request_consume(rq, rn, p) {
+	for_each_priolist(p, &engine->active.queue) {
+		priolist_for_each_request_safe(rq, rn, p) {
 			list_del_init(&rq->sched.link);
 			__i915_request_submit(rq);
 			dma_fence_set_error(&rq->fence, -EIO);
 			i915_request_mark_complete(rq);
 		}
-
-		rb_erase_cached(&p->node, &engine->active.queue);
-		i915_priolist_free(p);
+		i915_priolist_advance(&engine->active.queue, p);
 	}
 	GEM_BUG_ON(!i915_sched_is_idle(&engine->active));
 
 	/* Remaining _unready_ requests will be nop'ed when submitted */
 
 	execlists->queue_priority_hint = INT_MIN;
-	engine->active.queue = RB_ROOT_CACHED;
 
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index bc2fa84f98a8..807cd8f65481 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -38,10 +38,37 @@ enum {
 #define I915_PRIORITY_UNPREEMPTABLE INT_MAX
 #define I915_PRIORITY_BARRIER (I915_PRIORITY_UNPREEMPTABLE - 1)
 
+#ifdef CONFIG_64BIT
+#define I915_PRIOLIST_HEIGHT 12
+#else
+#define I915_PRIOLIST_HEIGHT 11
+#endif
+
 struct i915_priolist {
 	struct list_head requests;
-	struct rb_node node;
 	int priority;
+
+	int level;
+	struct i915_priolist *next[I915_PRIOLIST_HEIGHT];
+};
+
+struct i915_priolist_root {
+	struct i915_priolist sentinel;
+	u32 prng;
 };
 
+#define priolist_first(root) ((root)->sentinel.next[0])
+#define i915_priolist_is_empty(root) (priolist_first(root) == &(root)->sentinel)
+
+#define for_each_priolist(p, root) \
+	for ((p) = priolist_first(root); \
+	     (p) != &(root)->sentinel; \
+	     (p) = (p)->next[0])
+
+#define priolist_for_each_request(it, plist) \
+	list_for_each_entry(it, &(plist)->requests, sched.link)
+
+#define priolist_for_each_request_safe(it, n, plist) \
+	list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
+
 #endif /* _I915_PRIOLIST_TYPES_H_ */
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index f99d757d4d5c..afb4860868d1 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -4,7 +4,9 @@
  * Copyright © 2018 Intel Corporation
  */
 
+#include <linux/bitops.h>
 #include <linux/mutex.h>
+#include <linux/prandom.h>
 
 #include "gt/intel_ring.h"
 #include "gt/intel_lrc_reg.h"
@@ -91,15 +93,23 @@ static void i915_sched_init_ipi(struct i915_sched_ipi *ipi)
 	ipi->list = NULL;
 }
 
+static void init_priolist(struct i915_priolist_root *const root)
+{
+	struct i915_priolist *pl = &root->sentinel;
+
+	memset_p((void **)pl->next, pl, ARRAY_SIZE(pl->next));
+	pl->priority = INT_MIN;
+}
+
 void i915_sched_init_engine(struct i915_sched_engine *se,
 			    unsigned int subclass)
 {
 	spin_lock_init(&se->lock);
 	lockdep_set_subclass(&se->lock, subclass);
 
+	init_priolist(&se->queue);
 	INIT_LIST_HEAD(&se->requests);
 	INIT_LIST_HEAD(&se->hold);
-	se->queue = RB_ROOT_CACHED;
 
 	i915_sched_init_ipi(&se->ipi);
 
@@ -116,8 +126,57 @@ void i915_sched_init_engine(struct i915_sched_engine *se,
 #endif
 }
 
+__maybe_unused static bool priolist_idle(struct i915_priolist_root *root)
+{
+	struct i915_priolist *pl = &root->sentinel;
+	int lvl;
+
+	for (lvl = 0; lvl < ARRAY_SIZE(pl->next); lvl++) {
+		if (pl->next[lvl] != pl) {
+			GEM_TRACE_ERR("root[%d] is not empty\n", lvl);
+			return false;
+		}
+	}
+
+	if (pl->level) {
+		GEM_TRACE_ERR("root is not clear: %d\n", pl->level);
+		return false;
+	}
+
+	return true;
+}
+
+static void pl_push(struct i915_priolist *pl, struct list_head *head)
+{
+	pl->requests.next = head->next;
+	head->next = &pl->requests;
+}
+
+static struct i915_priolist *pl_pop(struct list_head *head)
+{
+	struct i915_priolist *pl;
+
+	pl = container_of(head->next, typeof(*pl), requests);
+	head->next = pl->requests.next;
+
+	return pl;
+}
+
+static bool pl_empty(struct list_head *head)
+{
+	return !head->next;
+}
+
 void i915_sched_park_engine(struct i915_sched_engine *se)
 {
+	struct i915_priolist_root *root = &se->queue;
+	struct list_head *list = &root->sentinel.requests;
+
+	GEM_BUG_ON(!priolist_idle(root));
+
+	while (!pl_empty(list))
+		kmem_cache_free(global.slab_priorities, pl_pop(list));
+
 	GEM_BUG_ON(!i915_sched_is_idle(se));
 	se->no_priolist = false;
 }
@@ -183,71 +242,55 @@ static inline bool node_signaled(const struct i915_sched_node *node)
 	return i915_request_completed(node_to_request(node));
 }
 
-static inline struct i915_priolist *to_priolist(struct rb_node *rb)
-{
-	return rb_entry(rb, struct i915_priolist, node);
-}
-
-static void assert_priolists(struct i915_sched_engine * const se)
+static inline unsigned int random_level(struct i915_priolist_root *root)
 {
-	struct rb_node *rb;
-	long last_prio;
-
-	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
-		return;
-
-	GEM_BUG_ON(rb_first_cached(&se->queue) !=
-		   rb_first(&se->queue.rb_root));
-
-	last_prio = INT_MAX;
-	for (rb = rb_first_cached(&se->queue); rb; rb = rb_next(rb)) {
-		const struct i915_priolist *p = to_priolist(rb);
-
-		GEM_BUG_ON(p->priority > last_prio);
-		last_prio = p->priority;
-	}
+	root->prng = next_pseudo_random32(root->prng);
+	return min_t(int, __ffs(root->prng) / 2, I915_PRIOLIST_HEIGHT - 1);
 }
 
 static struct list_head *
 lookup_priolist(struct intel_engine_cs *engine, int prio)
 {
+	struct i915_priolist *update[I915_PRIOLIST_HEIGHT];
 	struct i915_sched_engine * const se = &engine->active;
-	struct i915_priolist *p;
-	struct rb_node **parent, *rb;
-	bool first = true;
-
-	lockdep_assert_held(&engine->active.lock);
-	assert_priolists(se);
+	struct i915_priolist_root *root = &se->queue;
+	struct i915_priolist *pl, *tmp;
+	int lvl;
 
+	lockdep_assert_held(&se->lock);
 	if (unlikely(se->no_priolist))
 		prio = I915_PRIORITY_NORMAL;
 
-find_priolist:
-	/* most positive priority is scheduled first, equal priorities fifo */
-	rb = NULL;
-	parent = &se->queue.rb_root.rb_node;
-	while (*parent) {
-		rb = *parent;
-		p = to_priolist(rb);
-		if (prio > p->priority) {
-			parent = &rb->rb_left;
-		} else if (prio < p->priority) {
-			parent = &rb->rb_right;
-			first = false;
-		} else {
-			return &p->requests;
-		}
+	for_each_priolist(pl, root) { /* recycle any empty elements before us */
+		if (pl->priority >= prio || !list_empty(&pl->requests))
+			break;
+
+		i915_priolist_advance(root, pl);
 	}
 
+find_priolist:
+	pl = &root->sentinel;
+	lvl = pl->level;
+	do {
+		while (tmp = pl->next[lvl], tmp->priority >= prio)
+			pl = tmp;
+		if (pl->priority == prio)
+			goto out;
+		update[lvl] = pl;
+	} while (--lvl >= 0);
+
 	if (prio == I915_PRIORITY_NORMAL) {
-		p = &se->default_priolist;
+		pl = &se->default_priolist;
+	} else if (!pl_empty(&root->sentinel.requests)) {
+		pl = pl_pop(&root->sentinel.requests);
 	} else {
-		p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
+		pl = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
 		/* Convert an allocation failure to a priority bump */
-		if (unlikely(!p)) {
+		if (unlikely(!pl)) {
 			prio = I915_PRIORITY_NORMAL; /* recurses just once */
 
-			/* To maintain ordering with all rendering, after an
+			/*
+			 * To maintain ordering with all rendering, after an
 			 * allocation failure we have to disable all scheduling.
 			 * Requests will then be executed in fifo, and schedule
 			 * will ensure that dependencies are emitted in fifo.
@@ -260,18 +303,91 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
 		}
 	}
 
-	p->priority = prio;
-	INIT_LIST_HEAD(&p->requests);
+	pl->priority = prio;
+	INIT_LIST_HEAD(&pl->requests);
+
+	lvl = random_level(root);
+	if (lvl > root->sentinel.level) {
+		lvl = ++root->sentinel.level;
+		update[lvl] = &root->sentinel;
+	}
+
+	pl->level = lvl;
+	do {
+		tmp = update[lvl];
+		pl->next[lvl] = update[lvl]->next[lvl];
+		tmp->next[lvl] = pl;
+	} while (--lvl >= 0);
+
+	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+		struct i915_priolist *chk;
+
+		chk = &root->sentinel;
+		lvl = chk->level;
+		do {
+			while (tmp = chk->next[lvl], tmp->priority >= prio)
+				chk = tmp;
+		} while (--lvl >= 0);
+
+		GEM_BUG_ON(chk != pl);
+	}
 
-	rb_link_node(&p->node, rb, parent);
-	rb_insert_color_cached(&p->node, &se->queue, first);
+out:
+	GEM_BUG_ON(pl == &root->sentinel);
+	return &pl->requests;
+}
 
-	return &p->requests;
+static void remove_priolist(struct intel_engine_cs *engine,
+			    struct list_head *plist)
+{
+	struct i915_sched_engine * const se = &engine->active;
+	struct i915_priolist_root *root = &se->queue;
+	struct i915_priolist *pl, *tmp;
+	struct i915_priolist *old =
+		container_of(plist, struct i915_priolist, requests);
+	int prio = old->priority;
+	int lvl;
+
+	lockdep_assert_held(&se->lock);
+	GEM_BUG_ON(!list_empty(plist));
+
+	pl = &root->sentinel;
+	lvl = pl->level;
+
+	pl_push(old, &pl->requests);
+	do {
+		while (tmp = pl->next[lvl], tmp->priority > prio)
+			pl = tmp;
+		if (lvl <= old->level) {
+			pl->next[lvl] = old->next[lvl];
+			if (pl == &root->sentinel && old->next[lvl] == pl)
+				pl->level--;
+		}
+	} while (--lvl >= 0);
+	GEM_BUG_ON(tmp != old);
 }
 
-void __i915_priolist_free(struct i915_priolist *p)
+void i915_priolist_advance(struct i915_priolist_root *root,
+			   struct i915_priolist *pl)
 {
-	kmem_cache_free(global.slab_priorities, p);
+	int lvl;
+
+	GEM_BUG_ON(!list_empty(&pl->requests));
+	GEM_BUG_ON(pl == &root->sentinel);
+
+	if (pl->priority != I915_PRIORITY_NORMAL)
+		pl_push(pl, &root->sentinel.requests);
+
+	for (lvl = 0; lvl <= pl->level; lvl++)
+		root->sentinel.next[lvl] = pl->next[lvl];
+
+	if (pl->level > 0 && pl->level == root->sentinel.level) {
+		pl = &root->sentinel;
+		lvl = pl->level;
+		while (lvl > 0 && pl->next[lvl] == pl)
+			lvl--;
+		pl->level = lvl;
+	}
 }
 
 static struct i915_request *
@@ -420,8 +536,13 @@ static void __i915_request_set_priority(struct i915_request *rq, int prio)
 			continue;
 
 		GEM_BUG_ON(rq->engine != engine);
-		if (i915_request_in_priority_queue(rq))
+		if (i915_request_in_priority_queue(rq)) {
+			struct list_head *prev = rq->sched.link.prev;
+
 			list_move_tail(&rq->sched.link, plist);
+			if (list_empty(prev))
+				remove_priolist(engine, prev);
+		}
 
 		/* Defer (tasklet) submission until after all updates. */
 		kick_submission(engine, rq, prio);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index a68747d682d2..4432aab230b6 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -16,12 +16,6 @@
 
 struct drm_printer;
 
-#define priolist_for_each_request(it, plist) \
-	list_for_each_entry(it, &(plist)->requests, sched.link)
-
-#define priolist_for_each_request_consume(it, n, plist) \
-	list_for_each_entry_safe(it, n, &(plist)->requests, sched.link)
-
 void i915_sched_node_init(struct i915_sched_node *node);
 void i915_sched_node_reinit(struct i915_sched_node *node);
 
@@ -64,7 +58,7 @@ static inline void i915_priolist_free(struct i915_priolist *p)
 
 static inline bool i915_sched_is_idle(const struct i915_sched_engine *se)
 {
-	return RB_EMPTY_ROOT(&se->queue.rb_root);
+	return i915_priolist_is_empty(&se->queue);
 }
 
 static inline bool
@@ -94,6 +88,9 @@ static inline void i915_sched_kick(struct i915_sched_engine *se)
 	tasklet_hi_schedule(&se->tasklet);
 }
 
+void i915_priolist_advance(struct i915_priolist_root *root,
+			   struct i915_priolist *old);
+
 void i915_request_show_with_schedule(struct drm_printer *m,
 				     const struct i915_request *rq,
 				     const char *prefix,
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 92332fb71b14..c155e4faa30b 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -89,7 +89,7 @@ struct i915_sched_engine {
 	/**
 	 * @queue: queue of requests, in priority lists
 	 */
-	struct rb_root_cached queue;
+	struct i915_priolist_root queue;
 
 	struct i915_sched_ipi ipi;
 
diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
index 3db34d3eea58..946c93441c1f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h
@@ -25,6 +25,7 @@ selftest(ring, intel_ring_mock_selftests)
 selftest(engine, intel_engine_cs_mock_selftests)
 selftest(timelines, intel_timeline_mock_selftests)
 selftest(requests, i915_request_mock_selftests)
+selftest(scheduler, i915_scheduler_mock_selftests)
 selftest(objects, i915_gem_object_mock_selftests)
 selftest(phys, i915_gem_phys_mock_selftests)
 selftest(dmabuf, i915_gem_dmabuf_mock_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 5b1dd227e24e..a851a156d460 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -12,6 +12,43 @@
 #include "selftests/igt_spinner.h"
 #include "selftests/i915_random.h"
 
+static int mock_skiplist_levels(void *dummy)
+{
+	struct i915_priolist_root root = {};
+	struct i915_priolist *pl = &root.sentinel;
+	int count, lvl;
+
+	for (count = 0; count < 16384; count++) {
+		lvl = random_level(&root);
+		if (lvl > pl->level)
+			lvl = ++pl->level;
+
+		pl->next[lvl] = ptr_inc(pl->next[lvl]);
+	}
+
+	for (lvl = 0; lvl <= pl->level; lvl++) {
+		int x = ilog2((unsigned long)pl->next[lvl]);
+		char row[80];
+
+		memset(row, '*', x);
+		row[x] = '\0';
+
+		pr_info("[%2d] %5lu %s\n",
+			lvl, (unsigned long)pl->next[lvl], row);
+	}
+
+	return 0;
+}
+
+int i915_scheduler_mock_selftests(void)
+{
+	static const struct i915_subtest tests[] = {
+		SUBTEST(mock_skiplist_levels),
+	};
+
+	return i915_subtests(tests, NULL);
+}
+
 static void scheduling_disable(struct intel_engine_cs *engine)
 {
 	engine->props.preempt_timeout_ms = 0;
@@ -80,9 +117,9 @@ static int all_engines(struct drm_i915_private *i915,
 static bool check_context_order(struct intel_engine_cs *engine)
 {
 	u64 last_seqno, last_context;
+	struct i915_priolist *p;
 	unsigned long count;
 	bool result = false;
-	struct rb_node *rb;
 	int last_prio;
 
 	/* We expect the execution order to follow ascending fence-context */
@@ -92,8 +129,7 @@ static bool check_context_order(struct intel_engine_cs *engine)
 	last_context = 0;
 	last_seqno = 0;
 	last_prio = 0;
-	for (rb = rb_first_cached(&engine->active.queue); rb; rb = rb_next(rb)) {
-		struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
+	for_each_priolist(p, &engine->active.queue) {
 		struct i915_request *rq;
 
 		priolist_for_each_request(rq, p) {
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-12-29 12:03 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-29 12:00 [Intel-gfx] [PATCH 01/56] drm/i915/gt: Restore ce->signal flush before releasing virtual engine Chris Wilson
2020-12-29 12:00 ` [Intel-gfx] [PATCH 02/56] drm/i915/gt: Only retire on the last breadcrumb if the last request Chris Wilson
2020-12-29 12:00 ` [Intel-gfx] [PATCH 03/56] drm/i915/gt: Cancel submitted requests upon context reset Chris Wilson
2020-12-30 21:07   ` Mika Kuoppala
2020-12-29 12:00 ` [Intel-gfx] [PATCH 04/56] drm/i915/gt: Pull context closure check from request submit to schedule-in Chris Wilson
2020-12-29 12:00 ` [Intel-gfx] [PATCH 05/56] drm/i915/gem: Peek at the inflight context Chris Wilson
2020-12-29 12:00 ` [Intel-gfx] [PATCH 06/56] drm/i915: Mark up protected uses of 'i915_request_completed' Chris Wilson
2020-12-29 12:00 ` [Intel-gfx] [PATCH 07/56] drm/i915: Drop i915_request.lock serialisation around await_start Chris Wilson
2020-12-29 12:00 ` [Intel-gfx] [PATCH 08/56] drm/i915: Drop i915_request.lock requirement for intel_rps_boost() Chris Wilson
2020-12-29 12:00 ` [Intel-gfx] [PATCH 09/56] drm/i915/gem: Reduce ctx->engine_mutex for reading the clone source Chris Wilson
2020-12-29 12:00 ` [Intel-gfx] [PATCH 10/56] drm/i915/gem: Reduce ctx->engines_mutex for get_engines() Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 11/56] drm/i915: Reduce test_and_set_bit to set_bit in i915_request_submit() Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 12/56] drm/i915/gt: Drop atomic for engine->fw_active tracking Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 13/56] drm/i915/gt: Extract busy-stats for ring-scheduler Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 14/56] drm/i915/gt: Convert stats.active to plain unsigned int Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 15/56] drm/i915/gt: Do not suspend bonded requests if one hangs Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 16/56] drm/i915/gt: Remove timeslice suppression Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 17/56] drm/i915/gt: Skip over completed active execlists, again Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 18/56] drm/i915: Strip out internal priorities Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 19/56] drm/i915: Remove I915_USER_PRIORITY_SHIFT Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 20/56] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 21/56] drm/i915: Teach the i915_dependency to use a double-lock Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 22/56] drm/i915: Restructure priority inheritance Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 23/56] drm/i915/selftests: Measure set-priority duration Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 24/56] drm/i915/selftests: Exercise priority inheritance around an engine loop Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 25/56] drm/i915: Improve DFS for priority inheritance Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 26/56] drm/i915: Extract request submission from execlists Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 27/56] drm/i915: Extract request rewinding " Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 28/56] drm/i915: Extract request suspension from the execlists backend Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 29/56] drm/i915: Extract the ability to defer and rerun a request later Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 30/56] drm/i915: Fix the iterative dfs for defering requests Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 31/56] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 32/56] drm/i915: Move scheduler queue Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 33/56] drm/i915: Move tasklet from execlists to sched Chris Wilson
2020-12-29 12:01 ` Chris Wilson [this message]
2020-12-29 12:01 ` [Intel-gfx] [PATCH 35/56] drm/i915: Wrap cmpxchg64 with try_cmpxchg64() helper Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 36/56] drm/i915: Fair low-latency scheduling Chris Wilson
2021-01-07 16:05   ` Matthew Brost
2021-01-07 16:45     ` Chris Wilson
2021-01-07 17:10       ` Matthew Brost
2020-12-29 12:01 ` [Intel-gfx] [PATCH 37/56] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 38/56] drm/i915: Extend the priority boosting for the display with a deadline Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 39/56] drm/i915/gt: Support virtual engine queues Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 40/56] drm/i915: Move saturated workload detection back to the context Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 41/56] drm/i915: Bump default timeslicing quantum to 5ms Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 42/56] drm/i915/gt: Wrap intel_timeline.has_initial_breadcrumb Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 43/56] drm/i915/gt: Track timeline GGTT offset separately from subpage offset Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 44/56] drm/i915/gt: Add timeline "mode" Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 45/56] drm/i915/gt: Use indices for writing into relative timelines Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 46/56] drm/i915/selftests: Exercise relative timeline modes Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 47/56] drm/i915/gt: Use ppHWSP for unshared non-semaphore related timelines Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 48/56] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 49/56] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 50/56] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 51/56] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 52/56] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2020-12-29 18:34   ` kernel test robot
2020-12-29 18:34     ` kernel test robot
2020-12-29 12:01 ` [Intel-gfx] [PATCH 53/56] drm/i915/gt: Enable busy-stats for ring-scheduler Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 54/56] drm/i915/gt: Implement ring scheduler for gen6/7 Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 55/56] drm/i915/gt: Enable ring scheduling " Chris Wilson
2020-12-29 12:01 ` [Intel-gfx] [PATCH 56/56] drm/i915/gt: Limit C-states while waiting for requests Chris Wilson
2020-12-29 12:20 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/56] drm/i915/gt: Restore ce->signal flush before releasing virtual engine Patchwork
2020-12-29 12:22 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-12-29 12:49 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201229120145.26045-34-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.