All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self
@ 2019-01-23 17:44 Chris Wilson
  2019-01-23 17:44 ` [PATCH v2 2/2] drm/i915/execlists: Suppress redundant preemption Chris Wilson
                   ` (5 more replies)
  0 siblings, 6 replies; 9+ messages in thread
From: Chris Wilson @ 2019-01-23 17:44 UTC (permalink / raw)
  To: intel-gfx

In order to avoid preempting ourselves, we currently refuse to schedule
the tasklet if we reschedule an inflight context. However, this glosses
over a few issues such as what happens after a CS completion event and
we then preempt the newly executing context with itself, or if something
else causes a tasklet_schedule triggering the same evaluation to
preempt the active context with itself.

To avoid the extra complications, after deciding that we have
potentially queued a request with higher priority than the currently
executing request, inspect the head of the queue to see if it is indeed
higher priority from another context.

v2: We can simplify a bunch of tests based on the knowledge that PI will
ensure that earlier requests along the same context will have the highest
priority.

References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_scheduler.c | 20 ++++--
 drivers/gpu/drm/i915/intel_lrc.c      | 91 ++++++++++++++++++++++++---
 2 files changed, 100 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 340faea6c08a..fb5d953430e5 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -239,6 +239,18 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
 	return engine;
 }
 
+static bool inflight(const struct i915_request *rq,
+		     const struct intel_engine_cs *engine)
+{
+	const struct i915_request *active;
+
+	if (!rq->global_seqno)
+		return false;
+
+	active = port_request(engine->execlists.port);
+	return active->hw_context == rq->hw_context;
+}
+
 static void __i915_schedule(struct i915_request *rq,
 			    const struct i915_sched_attr *attr)
 {
@@ -328,6 +340,7 @@ static void __i915_schedule(struct i915_request *rq,
 		INIT_LIST_HEAD(&dep->dfs_link);
 
 		engine = sched_lock_engine(node, engine);
+		lockdep_assert_held(&engine->timeline.lock);
 
 		/* Recheck after acquiring the engine->timeline.lock */
 		if (prio <= node->attr.priority || node_signaled(node))
@@ -356,17 +369,16 @@ static void __i915_schedule(struct i915_request *rq,
 		if (prio <= engine->execlists.queue_priority)
 			continue;
 
+		engine->execlists.queue_priority = prio;
+
 		/*
 		 * If we are already the currently executing context, don't
 		 * bother evaluating if we should preempt ourselves.
 		 */
-		if (node_to_request(node)->global_seqno &&
-		    i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
-				      node_to_request(node)->global_seqno))
+		if (inflight(node_to_request(node), engine))
 			continue;
 
 		/* Defer (tasklet) submission until after all of our updates. */
-		engine->execlists.queue_priority = prio;
 		tasklet_hi_schedule(&engine->execlists.tasklet);
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8aa8a4862543..b61235304734 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -181,13 +181,89 @@ static inline int rq_prio(const struct i915_request *rq)
 	return rq->sched.attr.priority;
 }
 
+static int queue_prio(const struct intel_engine_execlists *execlists)
+{
+	struct i915_priolist *p;
+	struct rb_node *rb;
+
+	rb = rb_first_cached(&execlists->queue);
+	if (!rb)
+		return INT_MIN;
+
+	/*
+	 * As the priolist[] are inverted, with the highest priority in [0],
+	 * we have to flip the index value to become priority.
+	 */
+	p = to_priolist(rb);
+	return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
+}
+
 static inline bool need_preempt(const struct intel_engine_cs *engine,
-				const struct i915_request *last,
-				int prio)
+				const struct i915_request *rq,
+				int q_prio)
 {
-	return (intel_engine_has_preemption(engine) &&
-		__execlists_need_preempt(prio, rq_prio(last)) &&
-		!i915_request_completed(last));
+	const struct intel_context *ctx = rq->hw_context;
+	const int last_prio = rq_prio(rq);
+
+	if (!intel_engine_has_preemption(engine))
+		return false;
+
+	if (i915_request_completed(rq))
+		return false;
+
+	/*
+	 * Check if the current queue_priority merits a preemption attempt.
+	 *
+	 * However, the queue_priority is a mere hint that we may need to
+	 * preempt. If that hint is stale or we may be trying to preempt
+	 * ourselves, ignore the request.
+	 */
+	if (!__execlists_need_preempt(q_prio, last_prio))
+		return false;
+
+	/*
+	 * Check against the first request in ELSP[1], it will, thanks to the
+	 * power of PI, be the highest priority of that context.
+	 */
+	if (!list_is_last(&rq->link, &engine->timeline.requests)) {
+		rq = list_next_entry(rq, link);
+		GEM_BUG_ON(rq->hw_context == ctx);
+		if (rq_prio(rq) > last_prio)
+			return true;
+	}
+
+	/*
+	 * If the inflight context did not trigger the preemption, then maybe
+	 * it was the set of queued requests? Pick the highest priority in
+	 * the queue (the first active priolist) and see if it deserves to be
+	 * running instead of ELSP[0].
+	 *
+	 * The highest priority request in the queue can not be either
+	 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
+	 * context, it's priority would not exceed ELSP[0] aka last_prio.
+	 */
+	return queue_prio(&engine->execlists) > last_prio;
+}
+
+__maybe_unused static inline bool
+assert_priority_queue(const struct intel_engine_execlists *execlists,
+		      const struct i915_request *prev,
+		      const struct i915_request *next)
+{
+	if (!prev)
+		return true;
+
+	/*
+	 * Without preemption, the prev may refer to the still active element
+	 * which we refuse to let go.
+	 *
+	 * Even with premption, there are times when we think it is better not
+	 * to preempt and leave an ostensibly lower priority request in flight.
+	 */
+	if (port_request(execlists->port) == prev)
+		return true;
+
+	return rq_prio(prev) >= rq_prio(next);
 }
 
 /*
@@ -626,8 +702,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		int i;
 
 		priolist_for_each_request_consume(rq, rn, p, i) {
-			GEM_BUG_ON(last &&
-				   need_preempt(engine, last, rq_prio(rq)));
+			GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
 
 			/*
 			 * Can we combine this request with the current port?
@@ -872,6 +947,8 @@ static void process_csb(struct intel_engine_cs *engine)
 	const u32 * const buf = execlists->csb_status;
 	u8 head, tail;
 
+	lockdep_assert_held(&engine->timeline.lock);
+
 	/*
 	 * Note that csb_write, csb_status may be either in HWSP or mmio.
 	 * When reading from the csb_write mmio register, we have to be
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v2 2/2] drm/i915/execlists: Suppress redundant preemption
  2019-01-23 17:44 [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self Chris Wilson
@ 2019-01-23 17:44 ` Chris Wilson
  2019-01-23 17:51 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self Patchwork
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2019-01-23 17:44 UTC (permalink / raw)
  To: intel-gfx

On unwinding the active request we give it a small (limited to internal
priority levels) boost to prevent it from being gazumped a second time.
However, this means that it can be promoted to above the request that
triggered the preemption request, causing a preempt-to-idle cycle for no
change. We can avoid this if we take the boost into account when
checking if the preemption request is valid.

v2: After preemption the active request will be after the preemptee if
they end up with equal priority.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_lrc.c | 39 ++++++++++++++++++++++++++++----
 1 file changed, 35 insertions(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index b61235304734..e11b31eab7d2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -163,6 +163,8 @@
 #define WA_TAIL_DWORDS 2
 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
 
+#define ACTIVE_PRIORITY (I915_PRIORITY_NEWCLIENT)
+
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 					    struct intel_engine_cs *engine,
 					    struct intel_context *ce);
@@ -181,6 +183,34 @@ static inline int rq_prio(const struct i915_request *rq)
 	return rq->sched.attr.priority;
 }
 
+static inline int active_prio(const struct i915_request *rq)
+{
+	int prio = rq_prio(rq);
+
+	/*
+	 * On unwinding the active request, we give it a priority bump
+	 * equivalent to a freshly submitted request. This protects it from
+	 * being gazumped again, but it would be preferable if we didn't
+	 * let it be gazumped in the first place!
+	 *
+	 * See __unwind_incomplete_requests()
+	 */
+	if ((prio & ACTIVE_PRIORITY) != ACTIVE_PRIORITY &&
+	    i915_request_started(rq)) {
+		/*
+		 * After preemption, we insert the active request at the
+		 * end of the new priority level. This means that we will be
+		 * _lower_ priority than the preemptee all things equal (and
+		 * so the preemption is valid), so adjust our comparison
+		 * accordingly.
+		 */
+		prio |= ACTIVE_PRIORITY;
+		prio--;
+	}
+
+	return prio;
+}
+
 static int queue_prio(const struct intel_engine_execlists *execlists)
 {
 	struct i915_priolist *p;
@@ -203,7 +233,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
 				int q_prio)
 {
 	const struct intel_context *ctx = rq->hw_context;
-	const int last_prio = rq_prio(rq);
+	int last_prio;
 
 	if (!intel_engine_has_preemption(engine))
 		return false;
@@ -218,6 +248,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
 	 * preempt. If that hint is stale or we may be trying to preempt
 	 * ourselves, ignore the request.
 	 */
+	last_prio = active_prio(rq);
 	if (!__execlists_need_preempt(q_prio, last_prio))
 		return false;
 
@@ -344,7 +375,7 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 {
 	struct i915_request *rq, *rn, *active = NULL;
 	struct list_head *uninitialized_var(pl);
-	int prio = I915_PRIORITY_INVALID | I915_PRIORITY_NEWCLIENT;
+	int prio = I915_PRIORITY_INVALID | ACTIVE_PRIORITY;
 
 	lockdep_assert_held(&engine->timeline.lock);
 
@@ -376,8 +407,8 @@ static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
 	 * stream, so give it the equivalent small priority bump to prevent
 	 * it being gazumped a second time by another peer.
 	 */
-	if (!(prio & I915_PRIORITY_NEWCLIENT)) {
-		prio |= I915_PRIORITY_NEWCLIENT;
+	if ((prio & ACTIVE_PRIORITY) != ACTIVE_PRIORITY) {
+		prio |= ACTIVE_PRIORITY;
 		active->sched.attr.priority = prio;
 		list_move_tail(&active->sched.link,
 			       i915_sched_lookup_priolist(engine, prio));
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self
  2019-01-23 17:44 [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self Chris Wilson
  2019-01-23 17:44 ` [PATCH v2 2/2] drm/i915/execlists: Suppress redundant preemption Chris Wilson
@ 2019-01-23 17:51 ` Patchwork
  2019-01-23 17:52 ` ✗ Fi.CI.SPARSE: " Patchwork
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2019-01-23 17:51 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self
URL   : https://patchwork.freedesktop.org/series/55648/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
b7170f749928 drm/i915/execlists: Suppress preempting self
-:22: WARNING:COMMIT_LOG_LONG_LINE: Possible unwrapped commit description (prefer a maximum 75 chars per line)
#22: 
References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")

-:22: ERROR:GIT_COMMIT_ID: Please use git commit description style 'commit <12+ chars of sha1> ("<title line>")' - ie: 'commit a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")'
#22: 
References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")

-:167: WARNING:TYPO_SPELLING: 'premption' may be misspelled - perhaps 'preemption'?
#167: FILE: drivers/gpu/drm/i915/intel_lrc.c:260:
+	 * Even with premption, there are times when we think it is better not

total: 1 errors, 2 warnings, 0 checks, 156 lines checked
5a4d8c8df0ab drm/i915/execlists: Suppress redundant preemption

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self
  2019-01-23 17:44 [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self Chris Wilson
  2019-01-23 17:44 ` [PATCH v2 2/2] drm/i915/execlists: Suppress redundant preemption Chris Wilson
  2019-01-23 17:51 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self Patchwork
@ 2019-01-23 17:52 ` Patchwork
  2019-01-23 18:09 ` ✓ Fi.CI.BAT: success " Patchwork
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2019-01-23 17:52 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self
URL   : https://patchwork.freedesktop.org/series/55648/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.5.2
Commit: drm/i915/execlists: Suppress preempting self
-drivers/gpu/drm/i915/intel_ringbuffer.h:602:23: warning: expression using sizeof(void)

Commit: drm/i915/execlists: Suppress redundant preemption
Okay!

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self
  2019-01-23 17:44 [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self Chris Wilson
                   ` (2 preceding siblings ...)
  2019-01-23 17:52 ` ✗ Fi.CI.SPARSE: " Patchwork
@ 2019-01-23 18:09 ` Patchwork
  2019-01-23 20:18 ` ✓ Fi.CI.IGT: " Patchwork
  2019-01-24 14:18 ` [PATCH v2 1/2] " Tvrtko Ursulin
  5 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2019-01-23 18:09 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self
URL   : https://patchwork.freedesktop.org/series/55648/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_5471 -> Patchwork_12019
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/55648/revisions/1/mbox/

Known issues
------------

  Here are the changes found in Patchwork_12019 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@kms_frontbuffer_tracking@basic:
    - fi-byt-clapper:     PASS -> FAIL [fdo#103167]

  * igt@kms_pipe_crc_basic@read-crc-pipe-b-frame-sequence:
    - fi-byt-clapper:     PASS -> FAIL [fdo#103191] / [fdo#107362]

  * igt@pm_rpm@basic-rte:
    - fi-byt-j1900:       PASS -> FAIL [fdo#108800]

  
#### Possible fixes ####

  * igt@kms_busy@basic-flip-c:
    - fi-kbl-7500u:       {SKIP} [fdo#109271] / [fdo#109278] -> PASS +2

  * igt@kms_chamelium@dp-hpd-fast:
    - fi-kbl-7500u:       DMESG-WARN [fdo#102505] / [fdo#103558] / [fdo#105602] -> PASS

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       FAIL [fdo#108767] -> PASS

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - fi-byt-clapper:     FAIL [fdo#103191] / [fdo#107362] -> PASS

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b:
    - fi-blb-e6850:       INCOMPLETE [fdo#107718] -> PASS

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#102505]: https://bugs.freedesktop.org/show_bug.cgi?id=102505
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103191]: https://bugs.freedesktop.org/show_bug.cgi?id=103191
  [fdo#103558]: https://bugs.freedesktop.org/show_bug.cgi?id=103558
  [fdo#105602]: https://bugs.freedesktop.org/show_bug.cgi?id=105602
  [fdo#107362]: https://bugs.freedesktop.org/show_bug.cgi?id=107362
  [fdo#107718]: https://bugs.freedesktop.org/show_bug.cgi?id=107718
  [fdo#108767]: https://bugs.freedesktop.org/show_bug.cgi?id=108767
  [fdo#108800]: https://bugs.freedesktop.org/show_bug.cgi?id=108800
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278


Participating hosts (46 -> 41)
------------------------------

  Missing    (5): fi-kbl-soraka fi-ilk-m540 fi-byt-squawks fi-bsw-cyan fi-bdw-samus 


Build changes
-------------

    * Linux: CI_DRM_5471 -> Patchwork_12019

  CI_DRM_5471: 198addb18e12d2469bc41d57f9ed63e1072a7f82 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4786: 85cf76182087c09604bcae2bbee9e58b33bcb4f2 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_12019: 5a4d8c8df0abe349b2351fdd3820246e660d8dd9 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

5a4d8c8df0ab drm/i915/execlists: Suppress redundant preemption
b7170f749928 drm/i915/execlists: Suppress preempting self

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_12019/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* ✓ Fi.CI.IGT: success for series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self
  2019-01-23 17:44 [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self Chris Wilson
                   ` (3 preceding siblings ...)
  2019-01-23 18:09 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-01-23 20:18 ` Patchwork
  2019-01-24 14:18 ` [PATCH v2 1/2] " Tvrtko Ursulin
  5 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2019-01-23 20:18 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self
URL   : https://patchwork.freedesktop.org/series/55648/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_5471_full -> Patchwork_12019_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_12019_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@kms_color@pipe-c-degamma:
    - shard-apl:          PASS -> FAIL [fdo#104782]

  * igt@kms_cursor_crc@cursor-128x128-onscreen:
    - shard-apl:          PASS -> FAIL [fdo#103232] +1

  * igt@kms_cursor_crc@cursor-128x128-random:
    - shard-glk:          PASS -> FAIL [fdo#103232]

  * igt@kms_flip@2x-flip-vs-expired-vblank-interruptible:
    - shard-glk:          PASS -> FAIL [fdo#105363]

  * igt@kms_plane_multiple@atomic-pipe-a-tiling-y:
    - shard-apl:          PASS -> FAIL [fdo#103166] +1

  * igt@kms_plane_multiple@atomic-pipe-a-tiling-yf:
    - shard-glk:          PASS -> FAIL [fdo#103166] +1

  
#### Possible fixes ####

  * igt@kms_ccs@pipe-a-crc-sprite-planes-basic:
    - shard-apl:          FAIL [fdo#106510] / [fdo#108145] -> PASS

  * igt@kms_cursor_crc@cursor-128x128-suspend:
    - shard-glk:          FAIL [fdo#103232] -> PASS +2

  * igt@kms_cursor_crc@cursor-256x256-random:
    - shard-apl:          FAIL [fdo#103232] -> PASS +1

  * igt@kms_plane@pixel-format-pipe-a-planes-source-clamping:
    - shard-apl:          FAIL [fdo#108948] -> PASS

  * igt@kms_plane_alpha_blend@pipe-c-constant-alpha-max:
    - shard-glk:          FAIL [fdo#108145] -> PASS

  * igt@kms_plane_multiple@atomic-pipe-c-tiling-y:
    - shard-glk:          FAIL [fdo#103166] -> PASS

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#104782]: https://bugs.freedesktop.org/show_bug.cgi?id=104782
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#106510]: https://bugs.freedesktop.org/show_bug.cgi?id=106510
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108948]: https://bugs.freedesktop.org/show_bug.cgi?id=108948
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278


Participating hosts (7 -> 5)
------------------------------

  Missing    (2): shard-skl shard-iclb 


Build changes
-------------

    * Linux: CI_DRM_5471 -> Patchwork_12019

  CI_DRM_5471: 198addb18e12d2469bc41d57f9ed63e1072a7f82 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4786: 85cf76182087c09604bcae2bbee9e58b33bcb4f2 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_12019: 5a4d8c8df0abe349b2351fdd3820246e660d8dd9 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_12019/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self
  2019-01-23 17:44 [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self Chris Wilson
                   ` (4 preceding siblings ...)
  2019-01-23 20:18 ` ✓ Fi.CI.IGT: " Patchwork
@ 2019-01-24 14:18 ` Tvrtko Ursulin
  2019-01-24 14:40   ` Chris Wilson
  5 siblings, 1 reply; 9+ messages in thread
From: Tvrtko Ursulin @ 2019-01-24 14:18 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 23/01/2019 17:44, Chris Wilson wrote:
> In order to avoid preempting ourselves, we currently refuse to schedule
> the tasklet if we reschedule an inflight context. However, this glosses
> over a few issues such as what happens after a CS completion event and
> we then preempt the newly executing context with itself, or if something
> else causes a tasklet_schedule triggering the same evaluation to
> preempt the active context with itself.
> 
> To avoid the extra complications, after deciding that we have
> potentially queued a request with higher priority than the currently
> executing request, inspect the head of the queue to see if it is indeed
> higher priority from another context.
> 
> v2: We can simplify a bunch of tests based on the knowledge that PI will
> ensure that earlier requests along the same context will have the highest
> priority.
> 
> References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Is there a bug or a testcase for this?

> ---
>   drivers/gpu/drm/i915/i915_scheduler.c | 20 ++++--
>   drivers/gpu/drm/i915/intel_lrc.c      | 91 ++++++++++++++++++++++++---
>   2 files changed, 100 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> index 340faea6c08a..fb5d953430e5 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler.c
> +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> @@ -239,6 +239,18 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
>   	return engine;
>   }
>   
> +static bool inflight(const struct i915_request *rq,
> +		     const struct intel_engine_cs *engine)
> +{
> +	const struct i915_request *active;
> +
> +	if (!rq->global_seqno)
> +		return false;
> +
> +	active = port_request(engine->execlists.port);
> +	return active->hw_context == rq->hw_context;
> +}
> +
>   static void __i915_schedule(struct i915_request *rq,
>   			    const struct i915_sched_attr *attr)
>   {
> @@ -328,6 +340,7 @@ static void __i915_schedule(struct i915_request *rq,
>   		INIT_LIST_HEAD(&dep->dfs_link);
>   
>   		engine = sched_lock_engine(node, engine);
> +		lockdep_assert_held(&engine->timeline.lock);
>   
>   		/* Recheck after acquiring the engine->timeline.lock */
>   		if (prio <= node->attr.priority || node_signaled(node))
> @@ -356,17 +369,16 @@ static void __i915_schedule(struct i915_request *rq,
>   		if (prio <= engine->execlists.queue_priority)
>   			continue;
>   
> +		engine->execlists.queue_priority = prio;

This is a fix on it's own right? Making sure queue_priority always 
reflects the real top of the tree.

> +
>   		/*
>   		 * If we are already the currently executing context, don't
>   		 * bother evaluating if we should preempt ourselves.
>   		 */
> -		if (node_to_request(node)->global_seqno &&
> -		    i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
> -				      node_to_request(node)->global_seqno))
> +		if (inflight(node_to_request(node), engine))
>   			continue;

Before the check was if someone is doing a priority bump on the 
currently executing request and skip it if so.

With this change we also skip queuing the tasklet if any new requests 
have arrived in the meantime on the same context. Those requests haven't 
been dequeued yet into port0 due same priority. Then priority elevation 
comes in and decides to skip queuing the tasklet.

So we end up waiting for context complete before we queue more of the 
same context in. Which may be alright from the point of view of tracking 
priorities per request (ignoring the side not that is not future proof), 
but previously code would attempt to coalesce those new ones into port0.

In one way old code had priority inheritance even on the executing 
requests, while the new one does not. Which is better I don't know.

>   
>   		/* Defer (tasklet) submission until after all of our updates. */
> -		engine->execlists.queue_priority = prio;
>   		tasklet_hi_schedule(&engine->execlists.tasklet);
>   	}
>   
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 8aa8a4862543..b61235304734 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -181,13 +181,89 @@ static inline int rq_prio(const struct i915_request *rq)
>   	return rq->sched.attr.priority;
>   }
>   
> +static int queue_prio(const struct intel_engine_execlists *execlists)
> +{
> +	struct i915_priolist *p;
> +	struct rb_node *rb;
> +
> +	rb = rb_first_cached(&execlists->queue);
> +	if (!rb)
> +		return INT_MIN;
> +
> +	/*
> +	 * As the priolist[] are inverted, with the highest priority in [0],
> +	 * we have to flip the index value to become priority.
> +	 */
> +	p = to_priolist(rb);
> +	return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);

I need to remind myself of this later.

> +}
> +
>   static inline bool need_preempt(const struct intel_engine_cs *engine,
> -				const struct i915_request *last,
> -				int prio)
> +				const struct i915_request *rq,
> +				int q_prio)
>   {
> -	return (intel_engine_has_preemption(engine) &&
> -		__execlists_need_preempt(prio, rq_prio(last)) &&
> -		!i915_request_completed(last));
> +	const struct intel_context *ctx = rq->hw_context;
> +	const int last_prio = rq_prio(rq);
> +
> +	if (!intel_engine_has_preemption(engine))
> +		return false;
> +
> +	if (i915_request_completed(rq))
> +		return false;
> +
> +	/*
> +	 * Check if the current queue_priority merits a preemption attempt.
> +	 *
> +	 * However, the queue_priority is a mere hint that we may need to
> +	 * preempt. If that hint is stale or we may be trying to preempt
> +	 * ourselves, ignore the request.
> +	 */
> +	if (!__execlists_need_preempt(q_prio, last_prio))
> +		return false;
> +
> +	/*
> +	 * Check against the first request in ELSP[1], it will, thanks to the
> +	 * power of PI, be the highest priority of that context.
> +	 */
> +	if (!list_is_last(&rq->link, &engine->timeline.requests)) {
> +		rq = list_next_entry(rq, link);
> +		GEM_BUG_ON(rq->hw_context == ctx);
> +		if (rq_prio(rq) > last_prio)
> +			return true;
> +	}

So because queue_priority might now be referring to context in port0, or 
any other context not port1.

Could we just unsubmit from the engine timelines at re-schedule time in 
this case? Hard I guess, we'd need to find what requests, or at least 
what context, got overtaken to unsubmit them.

> +
> +	/*
> +	 * If the inflight context did not trigger the preemption, then maybe
> +	 * it was the set of queued requests? Pick the highest priority in
> +	 * the queue (the first active priolist) and see if it deserves to be
> +	 * running instead of ELSP[0].
> +	 *
> +	 * The highest priority request in the queue can not be either
> +	 * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
> +	 * context, it's priority would not exceed ELSP[0] aka last_prio.
> +	 */
> +	return queue_prio(&engine->execlists) > last_prio;

Could we avoid this check if we only knew the current/latest priority of 
ctx in port0? Submitted or not, depending on our policy. But is we see 
that queue_priority == port0->ctx->priority we can avoid preempting 
itself. I guess that could be defeated by a priority ctx set param after 
submission but do we care?

Regards,

Tvrtko

> +}
> +
> +__maybe_unused static inline bool
> +assert_priority_queue(const struct intel_engine_execlists *execlists,
> +		      const struct i915_request *prev,
> +		      const struct i915_request *next)
> +{
> +	if (!prev)
> +		return true;
> +
> +	/*
> +	 * Without preemption, the prev may refer to the still active element
> +	 * which we refuse to let go.
> +	 *
> +	 * Even with premption, there are times when we think it is better not
> +	 * to preempt and leave an ostensibly lower priority request in flight.
> +	 */
> +	if (port_request(execlists->port) == prev)
> +		return true;
> +
> +	return rq_prio(prev) >= rq_prio(next);
>   }
>   
>   /*
> @@ -626,8 +702,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   		int i;
>   
>   		priolist_for_each_request_consume(rq, rn, p, i) {
> -			GEM_BUG_ON(last &&
> -				   need_preempt(engine, last, rq_prio(rq)));
> +			GEM_BUG_ON(!assert_priority_queue(execlists, last, rq));
>   
>   			/*
>   			 * Can we combine this request with the current port?
> @@ -872,6 +947,8 @@ static void process_csb(struct intel_engine_cs *engine)
>   	const u32 * const buf = execlists->csb_status;
>   	u8 head, tail;
>   
> +	lockdep_assert_held(&engine->timeline.lock);
> +
>   	/*
>   	 * Note that csb_write, csb_status may be either in HWSP or mmio.
>   	 * When reading from the csb_write mmio register, we have to be
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self
  2019-01-24 14:18 ` [PATCH v2 1/2] " Tvrtko Ursulin
@ 2019-01-24 14:40   ` Chris Wilson
  2019-01-24 15:07     ` Chris Wilson
  0 siblings, 1 reply; 9+ messages in thread
From: Chris Wilson @ 2019-01-24 14:40 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-01-24 14:18:54)
> 
> On 23/01/2019 17:44, Chris Wilson wrote:
> > In order to avoid preempting ourselves, we currently refuse to schedule
> > the tasklet if we reschedule an inflight context. However, this glosses
> > over a few issues such as what happens after a CS completion event and
> > we then preempt the newly executing context with itself, or if something
> > else causes a tasklet_schedule triggering the same evaluation to
> > preempt the active context with itself.
> > 
> > To avoid the extra complications, after deciding that we have
> > potentially queued a request with higher priority than the currently
> > executing request, inspect the head of the queue to see if it is indeed
> > higher priority from another context.
> > 
> > v2: We can simplify a bunch of tests based on the knowledge that PI will
> > ensure that earlier requests along the same context will have the highest
> > priority.
> > 
> > References: a2bf92e8cc16 ("drm/i915/execlists: Avoid kicking priority on the current context")
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> Is there a bug or a testcase for this?

It's should just be a perf optimisation, on the order of 30us on a good
day.

I think it's a contributing factor to
https://bugs.freedesktop.org/show_bug.cgi?id=108598
but for that the rearrangement make for i915_request_wait later should
fix the minor regression I measured. (The reported regression is 10x
worse than I've been able to reproduce.)

The previous fix was the root cause of the media-bench surprise
(multi-client perf lower with HW semaphores).

> >   drivers/gpu/drm/i915/i915_scheduler.c | 20 ++++--
> >   drivers/gpu/drm/i915/intel_lrc.c      | 91 ++++++++++++++++++++++++---
> >   2 files changed, 100 insertions(+), 11 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> > index 340faea6c08a..fb5d953430e5 100644
> > --- a/drivers/gpu/drm/i915/i915_scheduler.c
> > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> > @@ -239,6 +239,18 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
> >       return engine;
> >   }
> >   
> > +static bool inflight(const struct i915_request *rq,
> > +                  const struct intel_engine_cs *engine)
> > +{
> > +     const struct i915_request *active;
> > +
> > +     if (!rq->global_seqno)
> > +             return false;
> > +
> > +     active = port_request(engine->execlists.port);
> > +     return active->hw_context == rq->hw_context;
> > +}
> > +
> >   static void __i915_schedule(struct i915_request *rq,
> >                           const struct i915_sched_attr *attr)
> >   {
> > @@ -328,6 +340,7 @@ static void __i915_schedule(struct i915_request *rq,
> >               INIT_LIST_HEAD(&dep->dfs_link);
> >   
> >               engine = sched_lock_engine(node, engine);
> > +             lockdep_assert_held(&engine->timeline.lock);
> >   
> >               /* Recheck after acquiring the engine->timeline.lock */
> >               if (prio <= node->attr.priority || node_signaled(node))
> > @@ -356,17 +369,16 @@ static void __i915_schedule(struct i915_request *rq,
> >               if (prio <= engine->execlists.queue_priority)
> >                       continue;
> >   
> > +             engine->execlists.queue_priority = prio;
> 
> This is a fix on it's own right? Making sure queue_priority always 
> reflects the real top of the tree.

It's a consistency fix (it also ensures we don't kick the tasklet more
than we really have to), but we used the inconsistency to our advantage
before. We only skipped the update iff we knew this would not cause
preemption, but because we left it at a lower value than the queue, we
would more eagerly kick the submission tasklet than was strictly
necessarily. (If another request was added to the top of the queue, then
it would reflect the right value, otherwise it was lower wouldn't cause
preemption and that was ok as the inflight request already held the
right priority.)

> >               /*
> >                * If we are already the currently executing context, don't
> >                * bother evaluating if we should preempt ourselves.
> >                */
> > -             if (node_to_request(node)->global_seqno &&
> > -                 i915_seqno_passed(port_request(engine->execlists.port)->global_seqno,
> > -                                   node_to_request(node)->global_seqno))
> > +             if (inflight(node_to_request(node), engine))
> >                       continue;
> 
> Before the check was if someone is doing a priority bump on the 
> currently executing request and skip it if so.
> 
> With this change we also skip queuing the tasklet if any new requests 
> have arrived in the meantime on the same context. Those requests haven't 
> been dequeued yet into port0 due same priority. Then priority elevation 
> comes in and decides to skip queuing the tasklet.
> 
> So we end up waiting for context complete before we queue more of the 
> same context in. Which may be alright from the point of view of tracking 
> priorities per request (ignoring the side not that is not future proof), 
> but previously code would attempt to coalesce those new ones into port0.
> 
> In one way old code had priority inheritance even on the executing 
> requests, while the new one does not. Which is better I don't know.

No. The old way forced a preempt-to-idle to perform the "lite-restore"
you are suggesting, exactly as we would do now except after the CS.

There's no clear cut favourite. Both have a bubble and add the same
latency to the overall execution.

> >               /* Defer (tasklet) submission until after all of our updates. */
> > -             engine->execlists.queue_priority = prio;
> >               tasklet_hi_schedule(&engine->execlists.tasklet);
> >       }
> >   
> > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > index 8aa8a4862543..b61235304734 100644
> > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > @@ -181,13 +181,89 @@ static inline int rq_prio(const struct i915_request *rq)
> >       return rq->sched.attr.priority;
> >   }
> >   
> > +static int queue_prio(const struct intel_engine_execlists *execlists)
> > +{
> > +     struct i915_priolist *p;
> > +     struct rb_node *rb;
> > +
> > +     rb = rb_first_cached(&execlists->queue);
> > +     if (!rb)
> > +             return INT_MIN;
> > +
> > +     /*
> > +      * As the priolist[] are inverted, with the highest priority in [0],
> > +      * we have to flip the index value to become priority.
> > +      */
> > +     p = to_priolist(rb);
> > +     return ((p->priority + 1) << I915_USER_PRIORITY_SHIFT) - ffs(p->used);
> 
> I need to remind myself of this later.
> 
> > +}
> > +
> >   static inline bool need_preempt(const struct intel_engine_cs *engine,
> > -                             const struct i915_request *last,
> > -                             int prio)
> > +                             const struct i915_request *rq,
> > +                             int q_prio)
> >   {
> > -     return (intel_engine_has_preemption(engine) &&
> > -             __execlists_need_preempt(prio, rq_prio(last)) &&
> > -             !i915_request_completed(last));
> > +     const struct intel_context *ctx = rq->hw_context;
> > +     const int last_prio = rq_prio(rq);
> > +
> > +     if (!intel_engine_has_preemption(engine))
> > +             return false;
> > +
> > +     if (i915_request_completed(rq))
> > +             return false;
> > +
> > +     /*
> > +      * Check if the current queue_priority merits a preemption attempt.
> > +      *
> > +      * However, the queue_priority is a mere hint that we may need to
> > +      * preempt. If that hint is stale or we may be trying to preempt
> > +      * ourselves, ignore the request.
> > +      */
> > +     if (!__execlists_need_preempt(q_prio, last_prio))
> > +             return false;
> > +
> > +     /*
> > +      * Check against the first request in ELSP[1], it will, thanks to the
> > +      * power of PI, be the highest priority of that context.
> > +      */
> > +     if (!list_is_last(&rq->link, &engine->timeline.requests)) {
> > +             rq = list_next_entry(rq, link);
> > +             GEM_BUG_ON(rq->hw_context == ctx);
> > +             if (rq_prio(rq) > last_prio)
> > +                     return true;
> > +     }
> 
> So because queue_priority might now be referring to context in port0, or 
> any other context not port1.
> 
> Could we just unsubmit from the engine timelines at re-schedule time in 
> this case? Hard I guess, we'd need to find what requests, or at least 
> what context, got overtaken to unsubmit them.

Now you are talking... You are starting along the path to preempt-busy
:)

> > +     /*
> > +      * If the inflight context did not trigger the preemption, then maybe
> > +      * it was the set of queued requests? Pick the highest priority in
> > +      * the queue (the first active priolist) and see if it deserves to be
> > +      * running instead of ELSP[0].
> > +      *
> > +      * The highest priority request in the queue can not be either
> > +      * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
> > +      * context, it's priority would not exceed ELSP[0] aka last_prio.
> > +      */
> > +     return queue_prio(&engine->execlists) > last_prio;
> 
> Could we avoid this check if we only knew the current/latest priority of 
> ctx in port0? Submitted or not, depending on our policy. But is we see 
> that queue_priority == port0->ctx->priority we can avoid preempting 
> itself. I guess that could be defeated by a priority ctx set param after 
> submission but do we care?

It's upset because at this point we are no longer certain that
execlists->queue_priority refers to anything. We may have been trying to
schedule a preemption point on behalf of a request that has long since
completed.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self
  2019-01-24 14:40   ` Chris Wilson
@ 2019-01-24 15:07     ` Chris Wilson
  0 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2019-01-24 15:07 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Chris Wilson (2019-01-24 14:40:40)
> Quoting Tvrtko Ursulin (2019-01-24 14:18:54)
> > 
> > On 23/01/2019 17:44, Chris Wilson wrote:
> > > +     /*
> > > +      * If the inflight context did not trigger the preemption, then maybe
> > > +      * it was the set of queued requests? Pick the highest priority in
> > > +      * the queue (the first active priolist) and see if it deserves to be
> > > +      * running instead of ELSP[0].
> > > +      *
> > > +      * The highest priority request in the queue can not be either
> > > +      * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same
> > > +      * context, it's priority would not exceed ELSP[0] aka last_prio.
> > > +      */
> > > +     return queue_prio(&engine->execlists) > last_prio;
> > 
> > Could we avoid this check if we only knew the current/latest priority of 
> > ctx in port0? Submitted or not, depending on our policy. But is we see 
> > that queue_priority == port0->ctx->priority we can avoid preempting 
> > itself. I guess that could be defeated by a priority ctx set param after 
> > submission but do we care?
> 
> It's upset because at this point we are no longer certain that
> execlists->queue_priority refers to anything. We may have been trying to
> schedule a preemption point on behalf of a request that has long since
> completed.

Or even run on an entirely different engine for veng.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2019-01-24 15:07 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-01-23 17:44 [PATCH v2 1/2] drm/i915/execlists: Suppress preempting self Chris Wilson
2019-01-23 17:44 ` [PATCH v2 2/2] drm/i915/execlists: Suppress redundant preemption Chris Wilson
2019-01-23 17:51 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [v2,1/2] drm/i915/execlists: Suppress preempting self Patchwork
2019-01-23 17:52 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-01-23 18:09 ` ✓ Fi.CI.BAT: success " Patchwork
2019-01-23 20:18 ` ✓ Fi.CI.IGT: " Patchwork
2019-01-24 14:18 ` [PATCH v2 1/2] " Tvrtko Ursulin
2019-01-24 14:40   ` Chris Wilson
2019-01-24 15:07     ` Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.