From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: stable@vger.kernel.org, Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 38/66] drm/i915/gt: Use virtual_engine during execlists_dequeue
Date: Wed, 15 Jul 2020 12:51:19 +0100 [thread overview]
Message-ID: <20200715115147.11866-38-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200715115147.11866-1-chris@chris-wilson.co.uk>
Rather than going back and forth between the rb_node entry and the
virtual_engine type, store the ve local and reuse it. As the
container_of conversion from rb_node to virtual_engine requires a
variable offset, performing that conversion just once shaves off a bit
of code.
v2: Keep a single virtual engine lookup, for typical use.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: <stable@vger.kernel.org> # v5.4+
---
drivers/gpu/drm/i915/gt/intel_lrc.c | 254 ++++++++++++----------------
1 file changed, 111 insertions(+), 143 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index fabb20a6800b..ec533dfe3be9 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -453,9 +453,15 @@ static int queue_prio(const struct intel_engine_execlists *execlists)
return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
}
+static int virtual_prio(const struct intel_engine_execlists *el)
+{
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN;
+}
+
static inline bool need_preempt(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- struct rb_node *rb)
+ const struct i915_request *rq)
{
int last_prio;
@@ -492,25 +498,6 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
rq_prio(list_next_entry(rq, sched.link)) > last_prio)
return true;
- if (rb) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- bool preempt = false;
-
- if (engine == ve->siblings[0]) { /* only preempt one sibling */
- struct i915_request *next;
-
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- preempt = rq_prio(next) > last_prio;
- rcu_read_unlock();
- }
-
- if (preempt)
- return preempt;
- }
-
/*
* If the inflight context did not trigger the preemption, then maybe
* it was the set of queued requests? Pick the highest priority in
@@ -521,7 +508,8 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
* ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
* context, it's priority would not exceed ELSP[0] aka last_prio.
*/
- return queue_prio(&engine->execlists) > last_prio;
+ return max(virtual_prio(&engine->execlists),
+ queue_prio(&engine->execlists)) > last_prio;
}
__maybe_unused static inline bool
@@ -1806,6 +1794,35 @@ static bool virtual_matches(const struct virtual_engine *ve,
return true;
}
+static struct virtual_engine *
+first_virtual_engine(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *el = &engine->execlists;
+ struct rb_node *rb = rb_first_cached(&el->virtual);
+
+ while (rb) {
+ struct virtual_engine *ve =
+ rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq = READ_ONCE(ve->request);
+
+ /* lazily cleanup after another engine handled rq */
+ if (!rq) {
+ rb_erase_cached(rb, &el->virtual);
+ RB_CLEAR_NODE(rb);
+ rb = rb_first_cached(&el->virtual);
+ continue;
+ }
+
+ if (!virtual_matches(ve, rq, engine)) {
+ rb = rb_next(rb);
+ continue;
+ }
+ return ve;
+ }
+
+ return NULL;
+}
+
static void virtual_xfer_breadcrumbs(struct virtual_engine *ve)
{
/*
@@ -1889,32 +1906,15 @@ static void defer_active(struct intel_engine_cs *engine)
static bool
need_timeslice(const struct intel_engine_cs *engine,
- const struct i915_request *rq,
- const struct rb_node *rb)
+ const struct i915_request *rq)
{
int hint;
if (!intel_engine_has_timeslices(engine))
return false;
- hint = engine->execlists.queue_priority_hint;
-
- if (rb) {
- const struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- const struct intel_engine_cs *inflight =
- intel_context_inflight(&ve->context);
-
- if (!inflight || inflight == engine) {
- struct i915_request *next;
-
- rcu_read_lock();
- next = READ_ONCE(ve->request);
- if (next)
- hint = max(hint, rq_prio(next));
- rcu_read_unlock();
- }
- }
+ hint = max(engine->execlists.queue_priority_hint,
+ virtual_prio(&engine->execlists));
if (!list_is_last(&rq->sched.link, &engine->active.requests))
hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
@@ -2053,6 +2053,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask;
struct i915_request * const *active = execlists->active;
+ struct virtual_engine *ve;
struct i915_request *last;
unsigned long flags;
struct rb_node *rb;
@@ -2081,26 +2082,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
spin_lock_irqsave(&engine->active.lock, flags);
- for (rb = rb_first_cached(&execlists->virtual); rb; ) {
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
- struct i915_request *rq = READ_ONCE(ve->request);
-
- if (!rq) { /* lazily cleanup after another engine handled rq */
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
-
- if (!virtual_matches(ve, rq, engine)) {
- rb = rb_next(rb);
- continue;
- }
-
- break;
- }
-
/*
* If the queue is higher priority than the last
* request in the currently active context, submit afresh.
@@ -2123,7 +2104,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if ((last = *active)) {
if (i915_request_completed(last)) {
goto check_secondary;
- } else if (need_preempt(engine, last, rb)) {
+ } else if (need_preempt(engine, last)) {
ENGINE_TRACE(engine,
"preempting last=%llx:%lld, prio=%d, hint=%d\n",
last->fence.context,
@@ -2149,7 +2130,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
__unwind_incomplete_requests(engine);
last = NULL;
- } else if (need_timeslice(engine, last, rb) &&
+ } else if (need_timeslice(engine, last) &&
timeslice_expired(execlists, last)) {
ENGINE_TRACE(engine,
"expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
@@ -2201,111 +2182,98 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}
- while (rb) { /* XXX virtual is always taking precedence */
- struct virtual_engine *ve =
- rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ /* XXX virtual is always taking precedence */
+ while ((ve = first_virtual_engine(engine))) {
struct i915_request *rq;
spin_lock(&ve->base.active.lock);
rq = ve->request;
- if (unlikely(!rq)) { /* lost the race to a sibling */
- spin_unlock(&ve->base.active.lock);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
- rb = rb_first_cached(&execlists->virtual);
- continue;
- }
+ if (unlikely(!rq)) /* lost the race to a sibling */
+ goto unlock;
- GEM_BUG_ON(rq != ve->request);
GEM_BUG_ON(rq->engine != &ve->base);
GEM_BUG_ON(rq->context != &ve->context);
- if (rq_prio(rq) >= queue_prio(execlists)) {
- if (!virtual_matches(ve, rq, engine)) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_next(rb);
- continue;
- }
+ if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
+ spin_unlock(&ve->base.active.lock);
+ break;
+ }
- if (last && !can_merge_rq(last, rq)) {
- spin_unlock(&ve->base.active.lock);
- spin_unlock_irqrestore(&engine->active.lock, flags);
- start_timeslice(engine, rq_prio(rq));
- return; /* leave this for another sibling */
- }
+ GEM_BUG_ON(!virtual_matches(ve, rq, engine));
- ENGINE_TRACE(engine,
- "virtual rq=%llx:%lld%s, new engine? %s\n",
- rq->fence.context,
- rq->fence.seqno,
- i915_request_completed(rq) ? "!" :
- i915_request_started(rq) ? "*" :
- "",
- yesno(engine != ve->siblings[0]));
-
- WRITE_ONCE(ve->request, NULL);
- WRITE_ONCE(ve->base.execlists.queue_priority_hint,
- INT_MIN);
- rb_erase_cached(rb, &execlists->virtual);
- RB_CLEAR_NODE(rb);
+ if (last && !can_merge_rq(last, rq)) {
+ spin_unlock(&ve->base.active.lock);
+ spin_unlock_irqrestore(&engine->active.lock, flags);
+ start_timeslice(engine, rq_prio(rq));
+ return; /* leave this for another sibling */
+ }
- GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- WRITE_ONCE(rq->engine, engine);
+ ENGINE_TRACE(engine,
+ "virtual rq=%llx:%lld%s, new engine? %s\n",
+ rq->fence.context,
+ rq->fence.seqno,
+ i915_request_completed(rq) ? "!" :
+ i915_request_started(rq) ? "*" :
+ "",
+ yesno(engine != ve->siblings[0]));
- if (engine != ve->siblings[0]) {
- u32 *regs = ve->context.lrc_reg_state;
- unsigned int n;
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint, INT_MIN);
- GEM_BUG_ON(READ_ONCE(ve->context.inflight));
+ rb = &ve->nodes[engine->id].rb;
+ rb_erase_cached(rb, &execlists->virtual);
+ RB_CLEAR_NODE(rb);
- if (!intel_engine_has_relative_mmio(engine))
- virtual_update_register_offsets(regs,
- engine);
+ GEM_BUG_ON(!(rq->execution_mask & engine->mask));
+ WRITE_ONCE(rq->engine, engine);
- if (!list_empty(&ve->context.signals))
- virtual_xfer_breadcrumbs(ve);
+ if (engine != ve->siblings[0]) {
+ u32 *regs = ve->context.lrc_reg_state;
+ unsigned int n;
- /*
- * Move the bound engine to the top of the list
- * for future execution. We then kick this
- * tasklet first before checking others, so that
- * we preferentially reuse this set of bound
- * registers.
- */
- for (n = 1; n < ve->num_siblings; n++) {
- if (ve->siblings[n] == engine) {
- swap(ve->siblings[n],
- ve->siblings[0]);
- break;
- }
- }
+ GEM_BUG_ON(READ_ONCE(ve->context.inflight));
- GEM_BUG_ON(ve->siblings[0] != engine);
- }
+ if (!intel_engine_has_relative_mmio(engine))
+ virtual_update_register_offsets(regs, engine);
- if (__i915_request_submit(rq)) {
- submit = true;
- last = rq;
- }
- i915_request_put(rq);
+ if (!list_empty(&ve->context.signals))
+ virtual_xfer_breadcrumbs(ve);
/*
- * Hmm, we have a bunch of virtual engine requests,
- * but the first one was already completed (thanks
- * preempt-to-busy!). Keep looking at the veng queue
- * until we have no more relevant requests (i.e.
- * the normal submit queue has higher priority).
+ * Move the bound engine to the top of the list for
+ * future execution. We then kick this tasklet first
+ * before checking others, so that we preferentially
+ * reuse this set of bound registers.
*/
- if (!submit) {
- spin_unlock(&ve->base.active.lock);
- rb = rb_first_cached(&execlists->virtual);
- continue;
+ for (n = 1; n < ve->num_siblings; n++) {
+ if (ve->siblings[n] == engine) {
+ swap(ve->siblings[n], ve->siblings[0]);
+ break;
+ }
}
+
+ GEM_BUG_ON(ve->siblings[0] != engine);
+ }
+
+ if (__i915_request_submit(rq)) {
+ submit = true;
+ last = rq;
}
+ i915_request_put(rq);
+unlock:
spin_unlock(&ve->base.active.lock);
- break;
+
+ /*
+ * Hmm, we have a bunch of virtual engine requests,
+ * but the first one was already completed (thanks
+ * preempt-to-busy!). Keep looking at the veng queue
+ * until we have no more relevant requests (i.e.
+ * the normal submit queue has higher priority).
+ */
+ if (submit)
+ break;
}
while ((rb = rb_first_cached(&execlists->queue))) {
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2020-07-15 11:52 UTC|newest]
Thread overview: 154+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-15 11:50 [Intel-gfx] [PATCH 01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 02/66] drm/i915: Remove i915_request.lock requirement for execution callbacks Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 03/66] drm/i915: Remove requirement for holding i915_request.lock for breadcrumbs Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 04/66] drm/i915: Add a couple of missing i915_active_fini() Chris Wilson
2020-07-17 12:00 ` Tvrtko Ursulin
2020-07-21 12:23 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 05/66] drm/i915: Skip taking acquire mutex for no ref->active callback Chris Wilson
2020-07-17 12:04 ` Tvrtko Ursulin
2020-07-21 12:32 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 06/66] drm/i915: Export a preallocate variant of i915_active_acquire() Chris Wilson
2020-07-17 12:21 ` Tvrtko Ursulin
2020-07-17 12:45 ` Chris Wilson
2020-07-17 13:06 ` Tvrtko Ursulin
2020-07-21 15:33 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 07/66] drm/i915: Keep the most recently used active-fence upon discard Chris Wilson
2020-07-17 12:38 ` Tvrtko Ursulin
2020-07-28 14:22 ` Chris Wilson
2020-07-22 9:46 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 08/66] drm/i915: Make the stale cached active node available for any timeline Chris Wilson
2020-07-17 13:04 ` Tvrtko Ursulin
2020-07-28 14:28 ` Chris Wilson
2020-07-29 12:40 ` Tvrtko Ursulin
2020-07-29 13:42 ` Chris Wilson
2020-07-29 13:53 ` Chris Wilson
2020-07-29 14:22 ` Tvrtko Ursulin
2020-07-29 14:39 ` Chris Wilson
2020-07-29 14:52 ` Chris Wilson
2020-07-29 15:31 ` Tvrtko Ursulin
2020-07-22 11:19 ` Thomas Hellström (Intel)
2020-07-28 14:31 ` Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 09/66] drm/i915: Provide a fastpath for waiting on vma bindings Chris Wilson
2020-07-17 13:23 ` Tvrtko Ursulin
2020-07-28 14:35 ` Chris Wilson
2020-07-29 12:43 ` Tvrtko Ursulin
2020-07-22 15:07 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 10/66] drm/i915: Soften the tasklet flush frequency before waits Chris Wilson
2020-07-16 14:23 ` Mika Kuoppala
2020-07-22 15:10 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 11/66] drm/i915: Preallocate stashes for vma page-directories Chris Wilson
2020-07-20 10:35 ` Matthew Auld
2020-07-23 14:33 ` Thomas Hellström (Intel)
2020-07-28 14:42 ` Chris Wilson
2020-07-31 7:43 ` Thomas Hellström (Intel)
2020-07-27 9:24 ` Thomas Hellström (Intel)
2020-07-28 14:50 ` Chris Wilson
2020-07-30 12:04 ` Thomas Hellström (Intel)
2020-07-30 12:28 ` Thomas Hellström (Intel)
2020-08-04 14:08 ` Chris Wilson
2020-08-04 16:14 ` Daniel Vetter
2020-07-15 11:50 ` [Intel-gfx] [PATCH 12/66] drm/i915: Switch to object allocations for page directories Chris Wilson
2020-07-20 10:34 ` Matthew Auld
2020-07-20 10:40 ` Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 13/66] drm/i915/gem: Don't drop the timeline lock during execbuf Chris Wilson
2020-07-23 16:09 ` Thomas Hellström (Intel)
2020-07-28 14:46 ` Thomas Hellström (Intel)
2020-07-28 14:51 ` Chris Wilson
2020-07-31 8:09 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 14/66] drm/i915/gem: Rename execbuf.bind_link to unbound_link Chris Wilson
2020-07-31 8:11 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 15/66] drm/i915/gem: Break apart the early i915_vma_pin from execbuf object lookup Chris Wilson
2020-07-31 8:51 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 16/66] drm/i915/gem: Remove the call for no-evict i915_vma_pin Chris Wilson
2020-07-17 14:36 ` Tvrtko Ursulin
2020-07-28 15:04 ` Chris Wilson
2020-07-28 9:46 ` Thomas Hellström (Intel)
2020-07-28 15:05 ` Chris Wilson
2020-07-31 8:58 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 17/66] drm/i915: Add list_for_each_entry_safe_continue_reverse Chris Wilson
2020-07-31 8:59 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 18/66] drm/i915: Always defer fenced work to the worker Chris Wilson
2020-07-31 9:03 ` Thomas Hellström (Intel)
2020-07-31 13:28 ` Chris Wilson
2020-07-31 13:31 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 19/66] drm/i915/gem: Assign context id for async work Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 20/66] drm/i915/gem: Separate the ww_mutex walker into its own list Chris Wilson
2020-07-31 9:23 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 21/66] drm/i915/gem: Asynchronous GTT unbinding Chris Wilson
2020-07-31 13:09 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 22/66] drm/i915/gem: Bind the fence async for execbuf Chris Wilson
2020-07-27 18:19 ` Thomas Hellström (Intel)
2020-07-28 15:08 ` Chris Wilson
2020-07-31 13:12 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 23/66] drm/i915/gem: Include cmdparser in common execbuf pinning Chris Wilson
2020-07-31 9:43 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 24/66] drm/i915/gem: Include secure batch " Chris Wilson
2020-07-31 9:47 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 25/66] drm/i915/gem: Reintroduce multiple passes for reloc processing Chris Wilson
2020-07-31 10:05 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 26/66] drm/i915: Add an implementation for i915_gem_ww_ctx locking, v2 Chris Wilson
2020-07-31 10:07 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 27/66] drm/i915/gem: Pull execbuf dma resv under a single critical section Chris Wilson
2020-07-27 18:08 ` Thomas Hellström (Intel)
2020-07-28 15:16 ` Chris Wilson
2020-07-30 12:57 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 28/66] drm/i915/gem: Replace i915_gem_object.mm.mutex with reservation_ww_class Chris Wilson
2020-07-15 15:43 ` Maarten Lankhorst
2020-07-16 15:53 ` Tvrtko Ursulin
2020-07-28 11:17 ` Thomas Hellström (Intel)
2020-07-29 7:56 ` Thomas Hellström (Intel)
2020-07-29 12:17 ` Tvrtko Ursulin
2020-07-29 13:44 ` Thomas Hellström (Intel)
2020-08-05 12:12 ` Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 29/66] drm/i915: Hold wakeref for the duration of the vma GGTT binding Chris Wilson
2020-07-31 10:09 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 30/66] drm/i915: Specialise " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 31/66] drm/i915/gt: Acquire backing storage for the context Chris Wilson
2020-07-31 10:27 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 32/66] drm/i915/gt: Push the wait for the context to bound to the request Chris Wilson
2020-07-31 10:48 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 33/66] drm/i915: Remove unused i915_gem_evict_vm() Chris Wilson
2020-07-31 10:51 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 34/66] drm/i915/gt: Decouple completed requests on unwind Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 35/66] drm/i915/gt: Check for a completed last request once Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 36/66] drm/i915/gt: Replace direct submit with direct call to tasklet Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 37/66] drm/i915/gt: Free stale request on destroying the virtual engine Chris Wilson
2020-07-15 11:51 ` Chris Wilson [this message]
2020-07-15 11:51 ` [Intel-gfx] [PATCH 39/66] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 40/66] drm/i915/gt: Defer schedule_out until after the next dequeue Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 41/66] drm/i915/gt: Resubmit the virtual engine on schedule-out Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 42/66] drm/i915/gt: Simplify virtual engine handling for execlists_hold() Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 43/66] drm/i915/gt: ce->inflight updates are now serialised Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 44/66] drm/i915/gt: Drop atomic for engine->fw_active tracking Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 45/66] drm/i915/gt: Extract busy-stats for ring-scheduler Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 46/66] drm/i915/gt: Convert stats.active to plain unsigned int Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 47/66] drm/i915: Lift waiter/signaler iterators Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 48/66] drm/i915: Strip out internal priorities Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 49/66] drm/i915: Remove I915_USER_PRIORITY_SHIFT Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 50/66] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 51/66] drm/i915/gt: Do not suspend bonded requests if one hangs Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 52/66] drm/i915: Teach the i915_dependency to use a double-lock Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 53/66] drm/i915: Restructure priority inheritance Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 54/66] drm/i915/gt: Remove timeslice suppression Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 55/66] drm/i915: Fair low-latency scheduling Chris Wilson
2020-07-15 15:33 ` [Intel-gfx] [PATCH] " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 56/66] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 57/66] drm/i915: Replace the priority boosting for the display with a deadline Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 58/66] drm/i915: Move saturated workload detection to the GT Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 59/66] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 60/66] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 61/66] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 62/66] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 63/66] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 64/66] drm/i915/gt: Implement ring scheduler for gen6/7 Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 65/66] drm/i915/gt: Enable ring scheduling " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 66/66] drm/i915/gem: Remove timeline nesting from snb relocs Chris Wilson
2020-07-15 13:27 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait Patchwork
2020-07-15 13:28 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-15 14:20 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-07-15 15:41 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait (rev2) Patchwork
2020-07-15 15:42 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-15 16:03 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-07-15 19:55 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2020-07-23 20:32 ` [Intel-gfx] [PATCH 01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait Dave Airlie
2020-07-27 9:35 ` Tvrtko Ursulin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200715115147.11866-38-chris@chris-wilson.co.uk \
--to=chris@chris-wilson.co.uk \
--cc=intel-gfx@lists.freedesktop.org \
--cc=stable@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).