intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking
@ 2021-01-06 16:36 Chris Wilson
  2021-01-06 16:36 ` [Intel-gfx] [PATCH v2 2/2] drm/i915/gt: Remove timeslice suppression Chris Wilson
                   ` (7 more replies)
  0 siblings, 8 replies; 13+ messages in thread
From: Chris Wilson @ 2021-01-06 16:36 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

Refactor our timer_list.expires checking into its own timer_active()
helper.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_utils.c | 2 +-
 drivers/gpu/drm/i915/i915_utils.h | 7 ++++++-
 2 files changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index 4c305d838016..f9e780dee9de 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -87,7 +87,7 @@ bool i915_error_injected(void)
 
 void cancel_timer(struct timer_list *t)
 {
-	if (!READ_ONCE(t->expires))
+	if (!timer_active(t))
 		return;
 
 	del_timer(t);
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 54773371e6bd..abd4dcd9f79c 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -438,9 +438,14 @@ static inline void __add_taint_for_CI(unsigned int taint)
 void cancel_timer(struct timer_list *t);
 void set_timer_ms(struct timer_list *t, unsigned long timeout);
 
+static inline bool timer_active(const struct timer_list *t)
+{
+	return READ_ONCE(t->expires);
+}
+
 static inline bool timer_expired(const struct timer_list *t)
 {
-	return READ_ONCE(t->expires) && !timer_pending(t);
+	return timer_active(t) && !timer_pending(t);
 }
 
 /*
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] [PATCH v2 2/2] drm/i915/gt: Remove timeslice suppression
  2021-01-06 16:36 [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking Chris Wilson
@ 2021-01-06 16:36 ` Chris Wilson
  2021-01-07 11:01   ` [Intel-gfx] [PATCH v3] " Chris Wilson
  2021-01-07 11:05   ` Chris Wilson
  2021-01-06 16:49 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking Patchwork
                   ` (6 subsequent siblings)
  7 siblings, 2 replies; 13+ messages in thread
From: Chris Wilson @ 2021-01-06 16:36 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

In the next^W future patch, we remove the strict priority system and
continuously re-evaluate the relative priority of tasks. As such we need
to enable the timeslice whenever there is more than one context in the
pipeline. This simplifies the decision and removes some of the tweaks to
suppress timeslicing, allowing us to lift the timeslice enabling to a
common spot at the end of running the submission tasklet.

One consequence of the suppression is that it was reducing fairness
between virtual engines on an over saturated system; undermining the
principle for timeslicing.

v2: Commentary

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2802
Testcase: igt/gem_exec_balancer/fairslice
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  10 -
 .../drm/i915/gt/intel_execlists_submission.c  | 195 ++++++++----------
 2 files changed, 90 insertions(+), 115 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 430066e5884c..df62e793e747 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -238,16 +238,6 @@ struct intel_engine_execlists {
 	 */
 	unsigned int port_mask;
 
-	/**
-	 * @switch_priority_hint: Second context priority.
-	 *
-	 * We submit multiple contexts to the HW simultaneously and would
-	 * like to occasionally switch between them to emulate timeslicing.
-	 * To know when timeslicing is suitable, we track the priority of
-	 * the context submitted second.
-	 */
-	int switch_priority_hint;
-
 	/**
 	 * @queue_priority_hint: Highest pending priority.
 	 *
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index a5b442683c18..e6c89a8bf80a 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1148,25 +1148,6 @@ static void defer_active(struct intel_engine_cs *engine)
 	defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
 }
 
-static bool
-need_timeslice(const struct intel_engine_cs *engine,
-	       const struct i915_request *rq)
-{
-	int hint;
-
-	if (!intel_engine_has_timeslices(engine))
-		return false;
-
-	hint = max(engine->execlists.queue_priority_hint,
-		   virtual_prio(&engine->execlists));
-
-	if (!list_is_last(&rq->sched.link, &engine->active.requests))
-		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
-
-	GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
-	return hint >= effective_prio(rq);
-}
-
 static bool
 timeslice_yield(const struct intel_engine_execlists *el,
 		const struct i915_request *rq)
@@ -1186,76 +1167,74 @@ timeslice_yield(const struct intel_engine_execlists *el,
 	return rq->context->lrc.ccid == READ_ONCE(el->yield);
 }
 
-static bool
-timeslice_expired(const struct intel_engine_execlists *el,
-		  const struct i915_request *rq)
+static bool needs_timeslice(const struct intel_engine_cs *engine,
+			    const struct i915_request *rq)
 {
+	if (!intel_engine_has_timeslices(engine))
+		return false;
+
+	/* If not currently active, or about to switch, wait for next event */
+	if (!rq || __i915_request_is_complete(rq))
+		return false;
+
+	/* We do not need to start the timeslice until after the ACK */
+	if (READ_ONCE(engine->execlists.pending[0]))
+		return false;
+
+	/* If ELSP[1] is occupied, always check to see if worth slicing */
+	if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests))
+		return true;
+
+	/* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
+	if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
+		return true;
+
+	return !RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root);
+}
+
+static bool
+timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+	const struct intel_engine_execlists *el = &engine->execlists;
+
+	if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
+		return false;
+
+	if (!needs_timeslice(engine, rq))
+		return false;
+
 	return timer_expired(&el->timer) || timeslice_yield(el, rq);
 }
 
-static int
-switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
-{
-	if (list_is_last(&rq->sched.link, &engine->active.requests))
-		return engine->execlists.queue_priority_hint;
-
-	return rq_prio(list_next_entry(rq, sched.link));
-}
-
-static inline unsigned long
-timeslice(const struct intel_engine_cs *engine)
+static unsigned long timeslice(const struct intel_engine_cs *engine)
 {
 	return READ_ONCE(engine->props.timeslice_duration_ms);
 }
 
-static unsigned long active_timeslice(const struct intel_engine_cs *engine)
-{
-	const struct intel_engine_execlists *execlists = &engine->execlists;
-	const struct i915_request *rq = *execlists->active;
-
-	if (!rq || __i915_request_is_complete(rq))
-		return 0;
-
-	if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
-		return 0;
-
-	return timeslice(engine);
-}
-
-static void set_timeslice(struct intel_engine_cs *engine)
+static void start_timeslice(struct intel_engine_cs *engine)
 {
+	struct intel_engine_execlists *el = &engine->execlists;
 	unsigned long duration;
 
-	if (!intel_engine_has_timeslices(engine))
-		return;
+	/* Disable the timer if there is nothing to switch to */
+	duration = 0;
+	if (needs_timeslice(engine, *el->active)) {
+		/* Avoid continually prolonging an active timeslice */
+		if (timer_active(&el->timer)) {
+			/*
+			 * If we just submitted a new ELSP after an old
+			 * context, that context may have already consumed
+			 * its timeslice, so recheck.
+			 */
+			if (!timer_pending(&el->timer))
+				tasklet_hi_schedule(&engine->execlists.tasklet);
+			return;
+		}
 
-	duration = active_timeslice(engine);
-	ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+		duration = timeslice(engine);
+	}
 
-	set_timer_ms(&engine->execlists.timer, duration);
-}
-
-static void start_timeslice(struct intel_engine_cs *engine, int prio)
-{
-	struct intel_engine_execlists *execlists = &engine->execlists;
-	unsigned long duration;
-
-	if (!intel_engine_has_timeslices(engine))
-		return;
-
-	WRITE_ONCE(execlists->switch_priority_hint, prio);
-	if (prio == INT_MIN)
-		return;
-
-	if (timer_pending(&execlists->timer))
-		return;
-
-	duration = timeslice(engine);
-	ENGINE_TRACE(engine,
-		     "start timeslicing, prio:%d, interval:%lu",
-		     prio, duration);
-
-	set_timer_ms(&execlists->timer, duration);
+	set_timer_ms(&el->timer, duration);
 }
 
 static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1368,16 +1347,32 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			__unwind_incomplete_requests(engine);
 
 			last = NULL;
-		} else if (need_timeslice(engine, last) &&
-			   timeslice_expired(execlists, last)) {
+		} else if (timeslice_expired(engine, last)) {
 			ENGINE_TRACE(engine,
-				     "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
-				     last->fence.context,
-				     last->fence.seqno,
-				     last->sched.attr.priority,
+				     "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
+				     yesno(timer_expired(&execlists->timer)),
+				     last->fence.context, last->fence.seqno,
+				     rq_prio(last),
 				     execlists->queue_priority_hint,
 				     yesno(timeslice_yield(execlists, last)));
 
+			/*
+			 * Consume this timeslice; ensure we start a new one.
+			 *
+			 * The timeslice expired, and we will unwind the
+			 * running contexts and recompute the next ELSP.
+			 * If that submit will be the same pair of contexts
+			 * (due to dependency ordering), we will skip the
+			 * submission. If we don't cancel the timer now,
+			 * we will see that the timer has expired and
+			 * reschedule the tasklet; continually until the
+			 * next context switch or other preeemption event.
+			 *
+			 * Since we have decided to reschedule based on
+			 * consumption of this timeslice, if we submit the
+			 * same context again, grant it a full timeslice.
+			 */
+			cancel_timer(&execlists->timer);
 			ring_set_paused(engine, 1);
 			defer_active(engine);
 
@@ -1413,7 +1408,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * of timeslices, our queue might be.
 				 */
 				spin_unlock(&engine->active.lock);
-				start_timeslice(engine, queue_prio(execlists));
 				return;
 			}
 		}
@@ -1440,7 +1434,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		if (last && !can_merge_rq(last, rq)) {
 			spin_unlock(&ve->base.active.lock);
 			spin_unlock(&engine->active.lock);
-			start_timeslice(engine, rq_prio(rq));
 			return; /* leave this for another sibling */
 		}
 
@@ -1604,29 +1597,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	execlists->queue_priority_hint = queue_prio(execlists);
 	spin_unlock(&engine->active.lock);
 
-	if (submit) {
-		/*
-		 * Skip if we ended up with exactly the same set of requests,
-		 * e.g. trying to timeslice a pair of ordered contexts
-		 */
-		if (!memcmp(execlists->active,
-			    execlists->pending,
-			    (port - execlists->pending) * sizeof(*port)))
-			goto skip_submit;
-
+	/*
+	 * We can skip poking the HW if we ended up with exactly the same set
+	 * of requests as currently running, e.g. trying to timeslice a pair
+	 * of ordered contexts.
+	 */
+	if (submit &&
+	    memcmp(execlists->active,
+		   execlists->pending,
+		   (port - execlists->pending) * sizeof(*port))) {
 		*port = NULL;
 		while (port-- != execlists->pending)
 			execlists_schedule_in(*port, port - execlists->pending);
 
-		execlists->switch_priority_hint =
-			switch_prio(engine, *execlists->pending);
-
 		WRITE_ONCE(execlists->yield, -1);
 		set_preempt_timeout(engine, *execlists->active);
 		execlists_submit_ports(engine);
 	} else {
-		start_timeslice(engine, execlists->queue_priority_hint);
-skip_submit:
 		ring_set_paused(engine, 0);
 		while (port-- != execlists->pending)
 			i915_request_put(*port);
@@ -1984,8 +1971,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 		}
 	} while (head != tail);
 
-	set_timeslice(engine);
-
 	/*
 	 * Gen11 has proven to fail wrt global observation point between
 	 * entry and tail update, failing on the ordering and thus
@@ -1998,6 +1983,7 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 	 * invalidation before.
 	 */
 	invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
+	cancel_timer(&execlists->timer);
 
 	return inactive;
 }
@@ -2410,8 +2396,10 @@ static void execlists_submission_tasklet(unsigned long data)
 		execlists_reset(engine, msg);
 	}
 
-	if (!engine->execlists.pending[0])
+	if (!engine->execlists.pending[0]) {
 		execlists_dequeue_irq(engine);
+		start_timeslice(engine);
+	}
 
 	post_process_csb(post, inactive);
 	rcu_read_unlock();
@@ -3856,9 +3844,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 		show_request(m, last, "\t\t", 0);
 	}
 
-	if (execlists->switch_priority_hint != INT_MIN)
-		drm_printf(m, "\t\tSwitch priority hint: %d\n",
-			   READ_ONCE(execlists->switch_priority_hint));
 	if (execlists->queue_priority_hint != INT_MIN)
 		drm_printf(m, "\t\tQueue priority hint: %d\n",
 			   READ_ONCE(execlists->queue_priority_hint));
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking
  2021-01-06 16:36 [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking Chris Wilson
  2021-01-06 16:36 ` [Intel-gfx] [PATCH v2 2/2] drm/i915/gt: Remove timeslice suppression Chris Wilson
@ 2021-01-06 16:49 ` Patchwork
  2021-01-06 17:16 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2021-01-06 16:49 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking
URL   : https://patchwork.freedesktop.org/series/85551/
State : warning

== Summary ==

$ dim sparse --fast origin/drm-tip
Sparse version: v0.6.2
Fast mode used, each commit won't be checked separately.
+drivers/gpu/drm/i915/intel_wakeref.c:137:19: warning: context imbalance in 'wakeref_auto_timeout' - unexpected unlock
+drivers/gpu/drm/i915/selftests/i915_syncmap.c:80:54: warning: dubious: x | !y


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking
  2021-01-06 16:36 [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking Chris Wilson
  2021-01-06 16:36 ` [Intel-gfx] [PATCH v2 2/2] drm/i915/gt: Remove timeslice suppression Chris Wilson
  2021-01-06 16:49 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking Patchwork
@ 2021-01-06 17:16 ` Patchwork
  2021-01-07  1:55 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2021-01-06 17:16 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 3646 bytes --]

== Series Details ==

Series: series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking
URL   : https://patchwork.freedesktop.org/series/85551/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_9551 -> Patchwork_19271
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/index.html

Known issues
------------

  Here are the changes found in Patchwork_19271 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@amdgpu/amd_basic@query-info:
    - fi-bsw-kefka:       NOTRUN -> [SKIP][1] ([fdo#109271]) +17 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/fi-bsw-kefka/igt@amdgpu/amd_basic@query-info.html

  * igt@gem_exec_gttfill@basic:
    - fi-tgl-y:           [PASS][2] -> [DMESG-WARN][3] ([i915#402]) +2 similar issues
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/fi-tgl-y/igt@gem_exec_gttfill@basic.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/fi-tgl-y/igt@gem_exec_gttfill@basic.html

  
#### Possible fixes ####

  * igt@gem_sync@basic-all:
    - fi-tgl-y:           [DMESG-WARN][4] ([i915#402]) -> [PASS][5] +1 similar issue
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/fi-tgl-y/igt@gem_sync@basic-all.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/fi-tgl-y/igt@gem_sync@basic-all.html

  * igt@i915_selftest@live@gem:
    - fi-bsw-kefka:       [DMESG-WARN][6] ([i915#2826]) -> [PASS][7]
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/fi-bsw-kefka/igt@i915_selftest@live@gem.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/fi-bsw-kefka/igt@i915_selftest@live@gem.html

  * igt@i915_selftest@live@gem_contexts:
    - fi-bsw-kefka:       [INCOMPLETE][8] ([i915#2369]) -> [PASS][9]
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/fi-bsw-kefka/igt@i915_selftest@live@gem_contexts.html
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/fi-bsw-kefka/igt@i915_selftest@live@gem_contexts.html

  * igt@i915_selftest@live@gt_heartbeat:
    - fi-tgl-y:           [DMESG-FAIL][10] ([i915#2601]) -> [PASS][11]
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/fi-tgl-y/igt@i915_selftest@live@gt_heartbeat.html
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/fi-tgl-y/igt@i915_selftest@live@gt_heartbeat.html

  
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [i915#2369]: https://gitlab.freedesktop.org/drm/intel/issues/2369
  [i915#2601]: https://gitlab.freedesktop.org/drm/intel/issues/2601
  [i915#2826]: https://gitlab.freedesktop.org/drm/intel/issues/2826
  [i915#402]: https://gitlab.freedesktop.org/drm/intel/issues/402


Participating hosts (42 -> 37)
------------------------------

  Missing    (5): fi-ilk-m540 fi-hsw-4200u fi-bsw-cyan fi-ctg-p8600 fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_9551 -> Patchwork_19271

  CI-20190529: 20190529
  CI_DRM_9551: 48e2246bf27ad73d7ba2c32d942d236c6bc692fa @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5945: f0ad1a564956d6796d9ff09182c48d78fb00eefe @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_19271: 272b92deec724bfb19cff708f63d16057c68ce5c @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

272b92deec72 drm/i915/gt: Remove timeslice suppression
ee097290f6f5 drm/i915: Wrap our timer_list.expires checking

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/index.html

[-- Attachment #1.2: Type: text/html, Size: 4466 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking
  2021-01-06 16:36 [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking Chris Wilson
                   ` (2 preceding siblings ...)
  2021-01-06 17:16 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2021-01-07  1:55 ` Patchwork
  2021-01-07  9:25 ` [Intel-gfx] [PATCH v2 1/2] " Tvrtko Ursulin
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2021-01-07  1:55 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 14735 bytes --]

== Series Details ==

Series: series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking
URL   : https://patchwork.freedesktop.org/series/85551/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_9551_full -> Patchwork_19271_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_19271_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_reloc@basic-wide-active@vcs1:
    - shard-iclb:         NOTRUN -> [FAIL][1] ([i915#2389])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-iclb1/igt@gem_exec_reloc@basic-wide-active@vcs1.html

  * igt@kms_color@pipe-c-ctm-0-25:
    - shard-skl:          [PASS][2] -> [DMESG-WARN][3] ([i915#1982])
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl2/igt@kms_color@pipe-c-ctm-0-25.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl10/igt@kms_color@pipe-c-ctm-0-25.html

  * igt@kms_cursor_crc@pipe-a-cursor-256x85-onscreen:
    - shard-skl:          [PASS][4] -> [FAIL][5] ([i915#54]) +5 similar issues
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl5/igt@kms_cursor_crc@pipe-a-cursor-256x85-onscreen.html
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl3/igt@kms_cursor_crc@pipe-a-cursor-256x85-onscreen.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions:
    - shard-skl:          NOTRUN -> [FAIL][6] ([i915#2346])
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl5/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions.html

  * igt@kms_flip@2x-flip-vs-expired-vblank@ac-hdmi-a1-hdmi-a2:
    - shard-glk:          [PASS][7] -> [FAIL][8] ([i915#79])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-glk2/igt@kms_flip@2x-flip-vs-expired-vblank@ac-hdmi-a1-hdmi-a2.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-glk5/igt@kms_flip@2x-flip-vs-expired-vblank@ac-hdmi-a1-hdmi-a2.html

  * igt@kms_flip@plain-flip-ts-check-interruptible@b-dp1:
    - shard-apl:          [PASS][9] -> [FAIL][10] ([i915#2122])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-apl2/igt@kms_flip@plain-flip-ts-check-interruptible@b-dp1.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-apl3/igt@kms_flip@plain-flip-ts-check-interruptible@b-dp1.html

  * igt@kms_hdr@bpc-switch-dpms:
    - shard-skl:          [PASS][11] -> [FAIL][12] ([i915#1188])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl4/igt@kms_hdr@bpc-switch-dpms.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl1/igt@kms_hdr@bpc-switch-dpms.html

  * igt@kms_plane_alpha_blend@pipe-b-coverage-7efc:
    - shard-skl:          [PASS][13] -> [FAIL][14] ([fdo#108145] / [i915#265])
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl8/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl6/igt@kms_plane_alpha_blend@pipe-b-coverage-7efc.html

  * igt@kms_plane_lowres@pipe-c-tiling-x:
    - shard-kbl:          [PASS][15] -> [DMESG-WARN][16] ([i915#165] / [i915#180] / [i915#78]) +2 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-kbl1/igt@kms_plane_lowres@pipe-c-tiling-x.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-kbl2/igt@kms_plane_lowres@pipe-c-tiling-x.html

  * igt@kms_psr@psr2_primary_mmap_cpu:
    - shard-iclb:         [PASS][17] -> [SKIP][18] ([fdo#109441]) +2 similar issues
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-iclb2/igt@kms_psr@psr2_primary_mmap_cpu.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-iclb6/igt@kms_psr@psr2_primary_mmap_cpu.html

  * igt@kms_vblank@pipe-d-query-idle:
    - shard-iclb:         NOTRUN -> [SKIP][19] ([fdo#109278])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-iclb3/igt@kms_vblank@pipe-d-query-idle.html

  * igt@nouveau_crc@pipe-a-ctx-flip-skip-current-frame:
    - shard-skl:          NOTRUN -> [SKIP][20] ([fdo#109271])
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl5/igt@nouveau_crc@pipe-a-ctx-flip-skip-current-frame.html

  * igt@perf@polling-parameterized:
    - shard-skl:          [PASS][21] -> [FAIL][22] ([i915#1542])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl3/igt@perf@polling-parameterized.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl1/igt@perf@polling-parameterized.html

  
#### Possible fixes ####

  * {igt@gem_exec_balancer@fairslice}:
    - shard-tglb:         [FAIL][23] ([i915#2802]) -> [PASS][24]
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-tglb5/igt@gem_exec_balancer@fairslice.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-tglb6/igt@gem_exec_balancer@fairslice.html
    - shard-iclb:         [FAIL][25] ([i915#2802]) -> [PASS][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-iclb2/igt@gem_exec_balancer@fairslice.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-iclb5/igt@gem_exec_balancer@fairslice.html

  * {igt@gem_exec_fair@basic-none-share@rcs0}:
    - shard-apl:          [SKIP][27] ([fdo#109271]) -> [PASS][28]
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-apl2/igt@gem_exec_fair@basic-none-share@rcs0.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-apl3/igt@gem_exec_fair@basic-none-share@rcs0.html

  * {igt@gem_exec_fair@basic-none@vcs0}:
    - shard-kbl:          [FAIL][29] ([i915#2842]) -> [PASS][30] +1 similar issue
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-kbl7/igt@gem_exec_fair@basic-none@vcs0.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-kbl1/igt@gem_exec_fair@basic-none@vcs0.html

  * {igt@gem_exec_fair@basic-pace-share@rcs0}:
    - shard-tglb:         [FAIL][31] ([i915#2842]) -> [PASS][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-tglb1/igt@gem_exec_fair@basic-pace-share@rcs0.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-tglb2/igt@gem_exec_fair@basic-pace-share@rcs0.html

  * {igt@gem_exec_fair@basic-pace@vecs0}:
    - shard-iclb:         [FAIL][33] ([i915#2842]) -> [PASS][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-iclb2/igt@gem_exec_fair@basic-pace@vecs0.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-iclb6/igt@gem_exec_fair@basic-pace@vecs0.html

  * igt@gem_exec_whisper@basic-fds-forked:
    - shard-glk:          [DMESG-WARN][35] ([i915#118] / [i915#95]) -> [PASS][36] +1 similar issue
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-glk3/igt@gem_exec_whisper@basic-fds-forked.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-glk8/igt@gem_exec_whisper@basic-fds-forked.html

  * igt@kms_cursor_crc@pipe-a-cursor-128x42-random:
    - shard-skl:          [FAIL][37] ([i915#54]) -> [PASS][38] +4 similar issues
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl2/igt@kms_cursor_crc@pipe-a-cursor-128x42-random.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl10/igt@kms_cursor_crc@pipe-a-cursor-128x42-random.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size:
    - shard-skl:          [FAIL][39] ([i915#2346] / [i915#533]) -> [PASS][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl1/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl9/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html

  * igt@kms_flip@flip-vs-expired-vblank@a-edp1:
    - shard-skl:          [FAIL][41] ([i915#2122]) -> [PASS][42]
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl9/igt@kms_flip@flip-vs-expired-vblank@a-edp1.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl2/igt@kms_flip@flip-vs-expired-vblank@a-edp1.html

  * igt@kms_lease@lease_unleased_crtc:
    - shard-snb:          [SKIP][43] ([fdo#109271]) -> [PASS][44]
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-snb5/igt@kms_lease@lease_unleased_crtc.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-snb5/igt@kms_lease@lease_unleased_crtc.html

  * igt@kms_plane_cursor@pipe-c-viewport-size-256:
    - shard-kbl:          [DMESG-WARN][45] ([i915#165]) -> [PASS][46]
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-kbl2/igt@kms_plane_cursor@pipe-c-viewport-size-256.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-kbl3/igt@kms_plane_cursor@pipe-c-viewport-size-256.html

  * igt@kms_psr@psr2_primary_page_flip:
    - shard-iclb:         [SKIP][47] ([fdo#109441]) -> [PASS][48] +1 similar issue
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-iclb1/igt@kms_psr@psr2_primary_page_flip.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-iclb2/igt@kms_psr@psr2_primary_page_flip.html

  
#### Warnings ####

  * igt@kms_async_flips@test-time-stamp:
    - shard-tglb:         [FAIL][49] ([i915#2574]) -> [FAIL][50] ([i915#2597])
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-tglb3/igt@kms_async_flips@test-time-stamp.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-tglb7/igt@kms_async_flips@test-time-stamp.html

  * igt@runner@aborted:
    - shard-kbl:          [FAIL][51] ([i915#2295] / [i915#483]) -> ([FAIL][52], [FAIL][53]) ([i915#2295] / [i915#2426] / [i915#2505] / [i915#483])
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-kbl1/igt@runner@aborted.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-kbl3/igt@runner@aborted.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-kbl6/igt@runner@aborted.html
    - shard-iclb:         [FAIL][54] ([i915#2295] / [i915#2724]) -> [FAIL][55] ([i915#2295] / [i915#2724] / [i915#483])
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-iclb4/igt@runner@aborted.html
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-iclb8/igt@runner@aborted.html
    - shard-glk:          [FAIL][56] ([i915#2295] / [k.org#202321]) -> ([FAIL][57], [FAIL][58]) ([i915#2295] / [i915#2426] / [k.org#202321])
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-glk9/igt@runner@aborted.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-glk8/igt@runner@aborted.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-glk2/igt@runner@aborted.html
    - shard-skl:          ([FAIL][59], [FAIL][60]) ([i915#2295] / [i915#483]) -> ([FAIL][61], [FAIL][62], [FAIL][63]) ([i915#2295] / [i915#2426] / [i915#483])
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl7/igt@runner@aborted.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9551/shard-skl5/igt@runner@aborted.html
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl7/igt@runner@aborted.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl9/igt@runner@aborted.html
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/shard-skl4/igt@runner@aborted.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [i915#118]: https://gitlab.freedesktop.org/drm/intel/issues/118
  [i915#1188]: https://gitlab.freedesktop.org/drm/intel/issues/1188
  [i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
  [i915#1610]: https://gitlab.freedesktop.org/drm/intel/issues/1610
  [i915#165]: https://gitlab.freedesktop.org/drm/intel/issues/165
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
  [i915#2122]: https://gitlab.freedesktop.org/drm/intel/issues/2122
  [i915#2295]: https://gitlab.freedesktop.org/drm/intel/issues/2295
  [i915#2346]: https://gitlab.freedesktop.org/drm/intel/issues/2346
  [i915#2389]: https://gitlab.freedesktop.org/drm/intel/issues/2389
  [i915#2426]: https://gitlab.freedesktop.org/drm/intel/issues/2426
  [i915#2505]: https://gitlab.freedesktop.org/drm/intel/issues/2505
  [i915#2574]: https://gitlab.freedesktop.org/drm/intel/issues/2574
  [i915#2597]: https://gitlab.freedesktop.org/drm/intel/issues/2597
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#2724]: https://gitlab.freedesktop.org/drm/intel/issues/2724
  [i915#2802]: https://gitlab.freedesktop.org/drm/intel/issues/2802
  [i915#2803]: https://gitlab.freedesktop.org/drm/intel/issues/2803
  [i915#2842]: https://gitlab.freedesktop.org/drm/intel/issues/2842
  [i915#2849]: https://gitlab.freedesktop.org/drm/intel/issues/2849
  [i915#483]: https://gitlab.freedesktop.org/drm/intel/issues/483
  [i915#533]: https://gitlab.freedesktop.org/drm/intel/issues/533
  [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
  [i915#78]: https://gitlab.freedesktop.org/drm/intel/issues/78
  [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
  [i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95
  [k.org#202321]: https://bugzilla.kernel.org/show_bug.cgi?id=202321


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * Linux: CI_DRM_9551 -> Patchwork_19271

  CI-20190529: 20190529
  CI_DRM_9551: 48e2246bf27ad73d7ba2c32d942d236c6bc692fa @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5945: f0ad1a564956d6796d9ff09182c48d78fb00eefe @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_19271: 272b92deec724bfb19cff708f63d16057c68ce5c @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19271/index.html

[-- Attachment #1.2: Type: text/html, Size: 17488 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking
  2021-01-06 16:36 [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking Chris Wilson
                   ` (3 preceding siblings ...)
  2021-01-07  1:55 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
@ 2021-01-07  9:25 ` Tvrtko Ursulin
  2021-01-07 16:47 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking (rev3) Patchwork
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 13+ messages in thread
From: Tvrtko Ursulin @ 2021-01-07  9:25 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 06/01/2021 16:36, Chris Wilson wrote:
> Refactor our timer_list.expires checking into its own timer_active()
> helper.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_utils.c | 2 +-
>   drivers/gpu/drm/i915/i915_utils.h | 7 ++++++-
>   2 files changed, 7 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
> index 4c305d838016..f9e780dee9de 100644
> --- a/drivers/gpu/drm/i915/i915_utils.c
> +++ b/drivers/gpu/drm/i915/i915_utils.c
> @@ -87,7 +87,7 @@ bool i915_error_injected(void)
>   
>   void cancel_timer(struct timer_list *t)
>   {
> -	if (!READ_ONCE(t->expires))
> +	if (!timer_active(t))
>   		return;
>   
>   	del_timer(t);
> diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
> index 54773371e6bd..abd4dcd9f79c 100644
> --- a/drivers/gpu/drm/i915/i915_utils.h
> +++ b/drivers/gpu/drm/i915/i915_utils.h
> @@ -438,9 +438,14 @@ static inline void __add_taint_for_CI(unsigned int taint)
>   void cancel_timer(struct timer_list *t);
>   void set_timer_ms(struct timer_list *t, unsigned long timeout);
>   
> +static inline bool timer_active(const struct timer_list *t)
> +{
> +	return READ_ONCE(t->expires);
> +}
> +
>   static inline bool timer_expired(const struct timer_list *t)
>   {
> -	return READ_ONCE(t->expires) && !timer_pending(t);
> +	return timer_active(t) && !timer_pending(t);
>   }
>   
>   /*
> 

Don't know really, it's a bit dodgy to prefix with generic timer_ when 
semantics are i915 specific and apply only to timers managed with our 
set_timer_ms().

Have it for now and perhaps we can revisit to tidy later.

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] [PATCH v3] drm/i915/gt: Remove timeslice suppression
  2021-01-06 16:36 ` [Intel-gfx] [PATCH v2 2/2] drm/i915/gt: Remove timeslice suppression Chris Wilson
@ 2021-01-07 11:01   ` Chris Wilson
  2021-01-07 11:05   ` Chris Wilson
  1 sibling, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2021-01-07 11:01 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

In the next^W future patch, we remove the strict priority system and
continuously re-evaluate the relative priority of tasks. As such we need
to enable the timeslice whenever there is more than one context in the
pipeline. This simplifies the decision and removes some of the tweaks to
suppress timeslicing, allowing us to lift the timeslice enabling to a
common spot at the end of running the submission tasklet.

One consequence of the suppression is that it was reducing fairness
between virtual engines on an over saturated system; undermining the
principle for timeslicing.

v2: Commentary
v3: Commentary for the right cancel_timer()

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2802
Testcase: igt/gem_exec_balancer/fairslice
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  10 -
 .../drm/i915/gt/intel_execlists_submission.c  | 202 +++++++++---------
 2 files changed, 97 insertions(+), 115 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 430066e5884c..df62e793e747 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -238,16 +238,6 @@ struct intel_engine_execlists {
 	 */
 	unsigned int port_mask;
 
-	/**
-	 * @switch_priority_hint: Second context priority.
-	 *
-	 * We submit multiple contexts to the HW simultaneously and would
-	 * like to occasionally switch between them to emulate timeslicing.
-	 * To know when timeslicing is suitable, we track the priority of
-	 * the context submitted second.
-	 */
-	int switch_priority_hint;
-
 	/**
 	 * @queue_priority_hint: Highest pending priority.
 	 *
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index a5b442683c18..19bd1843e4b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1148,25 +1148,6 @@ static void defer_active(struct intel_engine_cs *engine)
 	defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
 }
 
-static bool
-need_timeslice(const struct intel_engine_cs *engine,
-	       const struct i915_request *rq)
-{
-	int hint;
-
-	if (!intel_engine_has_timeslices(engine))
-		return false;
-
-	hint = max(engine->execlists.queue_priority_hint,
-		   virtual_prio(&engine->execlists));
-
-	if (!list_is_last(&rq->sched.link, &engine->active.requests))
-		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
-
-	GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
-	return hint >= effective_prio(rq);
-}
-
 static bool
 timeslice_yield(const struct intel_engine_execlists *el,
 		const struct i915_request *rq)
@@ -1186,76 +1167,74 @@ timeslice_yield(const struct intel_engine_execlists *el,
 	return rq->context->lrc.ccid == READ_ONCE(el->yield);
 }
 
-static bool
-timeslice_expired(const struct intel_engine_execlists *el,
-		  const struct i915_request *rq)
+static bool needs_timeslice(const struct intel_engine_cs *engine,
+			    const struct i915_request *rq)
 {
+	if (!intel_engine_has_timeslices(engine))
+		return false;
+
+	/* If not currently active, or about to switch, wait for next event */
+	if (!rq || __i915_request_is_complete(rq))
+		return false;
+
+	/* We do not need to start the timeslice until after the ACK */
+	if (READ_ONCE(engine->execlists.pending[0]))
+		return false;
+
+	/* If ELSP[1] is occupied, always check to see if worth slicing */
+	if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests))
+		return true;
+
+	/* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
+	if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
+		return true;
+
+	return !RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root);
+}
+
+static bool
+timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+	const struct intel_engine_execlists *el = &engine->execlists;
+
+	if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
+		return false;
+
+	if (!needs_timeslice(engine, rq))
+		return false;
+
 	return timer_expired(&el->timer) || timeslice_yield(el, rq);
 }
 
-static int
-switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
-{
-	if (list_is_last(&rq->sched.link, &engine->active.requests))
-		return engine->execlists.queue_priority_hint;
-
-	return rq_prio(list_next_entry(rq, sched.link));
-}
-
-static inline unsigned long
-timeslice(const struct intel_engine_cs *engine)
+static unsigned long timeslice(const struct intel_engine_cs *engine)
 {
 	return READ_ONCE(engine->props.timeslice_duration_ms);
 }
 
-static unsigned long active_timeslice(const struct intel_engine_cs *engine)
-{
-	const struct intel_engine_execlists *execlists = &engine->execlists;
-	const struct i915_request *rq = *execlists->active;
-
-	if (!rq || __i915_request_is_complete(rq))
-		return 0;
-
-	if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
-		return 0;
-
-	return timeslice(engine);
-}
-
-static void set_timeslice(struct intel_engine_cs *engine)
+static void start_timeslice(struct intel_engine_cs *engine)
 {
+	struct intel_engine_execlists *el = &engine->execlists;
 	unsigned long duration;
 
-	if (!intel_engine_has_timeslices(engine))
-		return;
+	/* Disable the timer if there is nothing to switch to */
+	duration = 0;
+	if (needs_timeslice(engine, *el->active)) {
+		/* Avoid continually prolonging an active timeslice */
+		if (timer_active(&el->timer)) {
+			/*
+			 * If we just submitted a new ELSP after an old
+			 * context, that context may have already consumed
+			 * its timeslice, so recheck.
+			 */
+			if (!timer_pending(&el->timer))
+				tasklet_hi_schedule(&engine->execlists.tasklet);
+			return;
+		}
 
-	duration = active_timeslice(engine);
-	ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+		duration = timeslice(engine);
+	}
 
-	set_timer_ms(&engine->execlists.timer, duration);
-}
-
-static void start_timeslice(struct intel_engine_cs *engine, int prio)
-{
-	struct intel_engine_execlists *execlists = &engine->execlists;
-	unsigned long duration;
-
-	if (!intel_engine_has_timeslices(engine))
-		return;
-
-	WRITE_ONCE(execlists->switch_priority_hint, prio);
-	if (prio == INT_MIN)
-		return;
-
-	if (timer_pending(&execlists->timer))
-		return;
-
-	duration = timeslice(engine);
-	ENGINE_TRACE(engine,
-		     "start timeslicing, prio:%d, interval:%lu",
-		     prio, duration);
-
-	set_timer_ms(&execlists->timer, duration);
+	set_timer_ms(&el->timer, duration);
 }
 
 static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1368,16 +1347,32 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			__unwind_incomplete_requests(engine);
 
 			last = NULL;
-		} else if (need_timeslice(engine, last) &&
-			   timeslice_expired(execlists, last)) {
+		} else if (timeslice_expired(engine, last)) {
 			ENGINE_TRACE(engine,
-				     "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
-				     last->fence.context,
-				     last->fence.seqno,
-				     last->sched.attr.priority,
+				     "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
+				     yesno(timer_expired(&execlists->timer)),
+				     last->fence.context, last->fence.seqno,
+				     rq_prio(last),
 				     execlists->queue_priority_hint,
 				     yesno(timeslice_yield(execlists, last)));
 
+			/*
+			 * Consume this timeslice; ensure we start a new one.
+			 *
+			 * The timeslice expired, and we will unwind the
+			 * running contexts and recompute the next ELSP.
+			 * If that submit will be the same pair of contexts
+			 * (due to dependency ordering), we will skip the
+			 * submission. If we don't cancel the timer now,
+			 * we will see that the timer has expired and
+			 * reschedule the tasklet; continually until the
+			 * next context switch or other preeemption event.
+			 *
+			 * Since we have decided to reschedule based on
+			 * consumption of this timeslice, if we submit the
+			 * same context again, grant it a full timeslice.
+			 */
+			cancel_timer(&execlists->timer);
 			ring_set_paused(engine, 1);
 			defer_active(engine);
 
@@ -1413,7 +1408,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * of timeslices, our queue might be.
 				 */
 				spin_unlock(&engine->active.lock);
-				start_timeslice(engine, queue_prio(execlists));
 				return;
 			}
 		}
@@ -1440,7 +1434,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		if (last && !can_merge_rq(last, rq)) {
 			spin_unlock(&ve->base.active.lock);
 			spin_unlock(&engine->active.lock);
-			start_timeslice(engine, rq_prio(rq));
 			return; /* leave this for another sibling */
 		}
 
@@ -1604,29 +1597,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	execlists->queue_priority_hint = queue_prio(execlists);
 	spin_unlock(&engine->active.lock);
 
-	if (submit) {
-		/*
-		 * Skip if we ended up with exactly the same set of requests,
-		 * e.g. trying to timeslice a pair of ordered contexts
-		 */
-		if (!memcmp(execlists->active,
-			    execlists->pending,
-			    (port - execlists->pending) * sizeof(*port)))
-			goto skip_submit;
-
+	/*
+	 * We can skip poking the HW if we ended up with exactly the same set
+	 * of requests as currently running, e.g. trying to timeslice a pair
+	 * of ordered contexts.
+	 */
+	if (submit &&
+	    memcmp(execlists->active,
+		   execlists->pending,
+		   (port - execlists->pending) * sizeof(*port))) {
 		*port = NULL;
 		while (port-- != execlists->pending)
 			execlists_schedule_in(*port, port - execlists->pending);
 
-		execlists->switch_priority_hint =
-			switch_prio(engine, *execlists->pending);
-
 		WRITE_ONCE(execlists->yield, -1);
 		set_preempt_timeout(engine, *execlists->active);
 		execlists_submit_ports(engine);
 	} else {
-		start_timeslice(engine, execlists->queue_priority_hint);
-skip_submit:
 		ring_set_paused(engine, 0);
 		while (port-- != execlists->pending)
 			i915_request_put(*port);
@@ -1984,8 +1971,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 		}
 	} while (head != tail);
 
-	set_timeslice(engine);
-
 	/*
 	 * Gen11 has proven to fail wrt global observation point between
 	 * entry and tail update, failing on the ordering and thus
@@ -1999,6 +1984,14 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 	 */
 	invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
 
+	/*
+	 * We assume that any event reflects a change in context flow
+	 * and merits a fresh timeslice. We reinstall the timer after
+	 * inspecting the queue to see if we need to resumbit.
+	 */
+	if (*inactive != *execlists->active) /* elide lite-restores */
+		cancel_timer(&execlists->timer);
+
 	return inactive;
 }
 
@@ -2410,8 +2403,10 @@ static void execlists_submission_tasklet(unsigned long data)
 		execlists_reset(engine, msg);
 	}
 
-	if (!engine->execlists.pending[0])
+	if (!engine->execlists.pending[0]) {
 		execlists_dequeue_irq(engine);
+		start_timeslice(engine);
+	}
 
 	post_process_csb(post, inactive);
 	rcu_read_unlock();
@@ -3856,9 +3851,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 		show_request(m, last, "\t\t", 0);
 	}
 
-	if (execlists->switch_priority_hint != INT_MIN)
-		drm_printf(m, "\t\tSwitch priority hint: %d\n",
-			   READ_ONCE(execlists->switch_priority_hint));
 	if (execlists->queue_priority_hint != INT_MIN)
 		drm_printf(m, "\t\tQueue priority hint: %d\n",
 			   READ_ONCE(execlists->queue_priority_hint));
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [Intel-gfx] [PATCH v3] drm/i915/gt: Remove timeslice suppression
  2021-01-06 16:36 ` [Intel-gfx] [PATCH v2 2/2] drm/i915/gt: Remove timeslice suppression Chris Wilson
  2021-01-07 11:01   ` [Intel-gfx] [PATCH v3] " Chris Wilson
@ 2021-01-07 11:05   ` Chris Wilson
  2021-01-07 11:34     ` Andi Shyti
  2021-01-07 12:53     ` Tvrtko Ursulin
  1 sibling, 2 replies; 13+ messages in thread
From: Chris Wilson @ 2021-01-07 11:05 UTC (permalink / raw)
  To: intel-gfx; +Cc: Chris Wilson

In the next^W future patch, we remove the strict priority system and
continuously re-evaluate the relative priority of tasks. As such we need
to enable the timeslice whenever there is more than one context in the
pipeline. This simplifies the decision and removes some of the tweaks to
suppress timeslicing, allowing us to lift the timeslice enabling to a
common spot at the end of running the submission tasklet.

One consequence of the suppression is that it was reducing fairness
between virtual engines on an over saturated system; undermining the
principle for timeslicing.

v2: Commentary
v3: Commentary for the right cancel_timer()

Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2802
Testcase: igt/gem_exec_balancer/fairslice
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  10 -
 .../drm/i915/gt/intel_execlists_submission.c  | 204 +++++++++---------
 2 files changed, 99 insertions(+), 115 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 430066e5884c..df62e793e747 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -238,16 +238,6 @@ struct intel_engine_execlists {
 	 */
 	unsigned int port_mask;
 
-	/**
-	 * @switch_priority_hint: Second context priority.
-	 *
-	 * We submit multiple contexts to the HW simultaneously and would
-	 * like to occasionally switch between them to emulate timeslicing.
-	 * To know when timeslicing is suitable, we track the priority of
-	 * the context submitted second.
-	 */
-	int switch_priority_hint;
-
 	/**
 	 * @queue_priority_hint: Highest pending priority.
 	 *
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index a5b442683c18..13be71a81a38 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1148,25 +1148,6 @@ static void defer_active(struct intel_engine_cs *engine)
 	defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
 }
 
-static bool
-need_timeslice(const struct intel_engine_cs *engine,
-	       const struct i915_request *rq)
-{
-	int hint;
-
-	if (!intel_engine_has_timeslices(engine))
-		return false;
-
-	hint = max(engine->execlists.queue_priority_hint,
-		   virtual_prio(&engine->execlists));
-
-	if (!list_is_last(&rq->sched.link, &engine->active.requests))
-		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
-
-	GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
-	return hint >= effective_prio(rq);
-}
-
 static bool
 timeslice_yield(const struct intel_engine_execlists *el,
 		const struct i915_request *rq)
@@ -1186,76 +1167,74 @@ timeslice_yield(const struct intel_engine_execlists *el,
 	return rq->context->lrc.ccid == READ_ONCE(el->yield);
 }
 
-static bool
-timeslice_expired(const struct intel_engine_execlists *el,
-		  const struct i915_request *rq)
+static bool needs_timeslice(const struct intel_engine_cs *engine,
+			    const struct i915_request *rq)
 {
+	if (!intel_engine_has_timeslices(engine))
+		return false;
+
+	/* If not currently active, or about to switch, wait for next event */
+	if (!rq || __i915_request_is_complete(rq))
+		return false;
+
+	/* We do not need to start the timeslice until after the ACK */
+	if (READ_ONCE(engine->execlists.pending[0]))
+		return false;
+
+	/* If ELSP[1] is occupied, always check to see if worth slicing */
+	if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests))
+		return true;
+
+	/* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
+	if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
+		return true;
+
+	return !RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root);
+}
+
+static bool
+timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
+{
+	const struct intel_engine_execlists *el = &engine->execlists;
+
+	if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
+		return false;
+
+	if (!needs_timeslice(engine, rq))
+		return false;
+
 	return timer_expired(&el->timer) || timeslice_yield(el, rq);
 }
 
-static int
-switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
-{
-	if (list_is_last(&rq->sched.link, &engine->active.requests))
-		return engine->execlists.queue_priority_hint;
-
-	return rq_prio(list_next_entry(rq, sched.link));
-}
-
-static inline unsigned long
-timeslice(const struct intel_engine_cs *engine)
+static unsigned long timeslice(const struct intel_engine_cs *engine)
 {
 	return READ_ONCE(engine->props.timeslice_duration_ms);
 }
 
-static unsigned long active_timeslice(const struct intel_engine_cs *engine)
-{
-	const struct intel_engine_execlists *execlists = &engine->execlists;
-	const struct i915_request *rq = *execlists->active;
-
-	if (!rq || __i915_request_is_complete(rq))
-		return 0;
-
-	if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
-		return 0;
-
-	return timeslice(engine);
-}
-
-static void set_timeslice(struct intel_engine_cs *engine)
+static void start_timeslice(struct intel_engine_cs *engine)
 {
+	struct intel_engine_execlists *el = &engine->execlists;
 	unsigned long duration;
 
-	if (!intel_engine_has_timeslices(engine))
-		return;
+	/* Disable the timer if there is nothing to switch to */
+	duration = 0;
+	if (needs_timeslice(engine, *el->active)) {
+		/* Avoid continually prolonging an active timeslice */
+		if (timer_active(&el->timer)) {
+			/*
+			 * If we just submitted a new ELSP after an old
+			 * context, that context may have already consumed
+			 * its timeslice, so recheck.
+			 */
+			if (!timer_pending(&el->timer))
+				tasklet_hi_schedule(&engine->execlists.tasklet);
+			return;
+		}
 
-	duration = active_timeslice(engine);
-	ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
+		duration = timeslice(engine);
+	}
 
-	set_timer_ms(&engine->execlists.timer, duration);
-}
-
-static void start_timeslice(struct intel_engine_cs *engine, int prio)
-{
-	struct intel_engine_execlists *execlists = &engine->execlists;
-	unsigned long duration;
-
-	if (!intel_engine_has_timeslices(engine))
-		return;
-
-	WRITE_ONCE(execlists->switch_priority_hint, prio);
-	if (prio == INT_MIN)
-		return;
-
-	if (timer_pending(&execlists->timer))
-		return;
-
-	duration = timeslice(engine);
-	ENGINE_TRACE(engine,
-		     "start timeslicing, prio:%d, interval:%lu",
-		     prio, duration);
-
-	set_timer_ms(&execlists->timer, duration);
+	set_timer_ms(&el->timer, duration);
 }
 
 static void record_preemption(struct intel_engine_execlists *execlists)
@@ -1368,16 +1347,32 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			__unwind_incomplete_requests(engine);
 
 			last = NULL;
-		} else if (need_timeslice(engine, last) &&
-			   timeslice_expired(execlists, last)) {
+		} else if (timeslice_expired(engine, last)) {
 			ENGINE_TRACE(engine,
-				     "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
-				     last->fence.context,
-				     last->fence.seqno,
-				     last->sched.attr.priority,
+				     "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
+				     yesno(timer_expired(&execlists->timer)),
+				     last->fence.context, last->fence.seqno,
+				     rq_prio(last),
 				     execlists->queue_priority_hint,
 				     yesno(timeslice_yield(execlists, last)));
 
+			/*
+			 * Consume this timeslice; ensure we start a new one.
+			 *
+			 * The timeslice expired, and we will unwind the
+			 * running contexts and recompute the next ELSP.
+			 * If that submit will be the same pair of contexts
+			 * (due to dependency ordering), we will skip the
+			 * submission. If we don't cancel the timer now,
+			 * we will see that the timer has expired and
+			 * reschedule the tasklet; continually until the
+			 * next context switch or other preeemption event.
+			 *
+			 * Since we have decided to reschedule based on
+			 * consumption of this timeslice, if we submit the
+			 * same context again, grant it a full timeslice.
+			 */
+			cancel_timer(&execlists->timer);
 			ring_set_paused(engine, 1);
 			defer_active(engine);
 
@@ -1413,7 +1408,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * of timeslices, our queue might be.
 				 */
 				spin_unlock(&engine->active.lock);
-				start_timeslice(engine, queue_prio(execlists));
 				return;
 			}
 		}
@@ -1440,7 +1434,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		if (last && !can_merge_rq(last, rq)) {
 			spin_unlock(&ve->base.active.lock);
 			spin_unlock(&engine->active.lock);
-			start_timeslice(engine, rq_prio(rq));
 			return; /* leave this for another sibling */
 		}
 
@@ -1604,29 +1597,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	execlists->queue_priority_hint = queue_prio(execlists);
 	spin_unlock(&engine->active.lock);
 
-	if (submit) {
-		/*
-		 * Skip if we ended up with exactly the same set of requests,
-		 * e.g. trying to timeslice a pair of ordered contexts
-		 */
-		if (!memcmp(execlists->active,
-			    execlists->pending,
-			    (port - execlists->pending) * sizeof(*port)))
-			goto skip_submit;
-
+	/*
+	 * We can skip poking the HW if we ended up with exactly the same set
+	 * of requests as currently running, e.g. trying to timeslice a pair
+	 * of ordered contexts.
+	 */
+	if (submit &&
+	    memcmp(execlists->active,
+		   execlists->pending,
+		   (port - execlists->pending) * sizeof(*port))) {
 		*port = NULL;
 		while (port-- != execlists->pending)
 			execlists_schedule_in(*port, port - execlists->pending);
 
-		execlists->switch_priority_hint =
-			switch_prio(engine, *execlists->pending);
-
 		WRITE_ONCE(execlists->yield, -1);
 		set_preempt_timeout(engine, *execlists->active);
 		execlists_submit_ports(engine);
 	} else {
-		start_timeslice(engine, execlists->queue_priority_hint);
-skip_submit:
 		ring_set_paused(engine, 0);
 		while (port-- != execlists->pending)
 			i915_request_put(*port);
@@ -1865,6 +1852,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 	 * we perform the READ_ONCE(*csb_write).
 	 */
 	rmb();
+
+	*inactive = NULL;
 	do {
 		bool promote;
 		u64 csb;
@@ -1984,8 +1973,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 		}
 	} while (head != tail);
 
-	set_timeslice(engine);
-
 	/*
 	 * Gen11 has proven to fail wrt global observation point between
 	 * entry and tail update, failing on the ordering and thus
@@ -1999,6 +1986,14 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
 	 */
 	invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
 
+	/*
+	 * We assume that any event reflects a change in context flow
+	 * and merits a fresh timeslice. We reinstall the timer after
+	 * inspecting the queue to see if we need to resumbit.
+	 */
+	if (*inactive != *execlists->active) /* elide lite-restores */
+		cancel_timer(&execlists->timer);
+
 	return inactive;
 }
 
@@ -2410,8 +2405,10 @@ static void execlists_submission_tasklet(unsigned long data)
 		execlists_reset(engine, msg);
 	}
 
-	if (!engine->execlists.pending[0])
+	if (!engine->execlists.pending[0]) {
 		execlists_dequeue_irq(engine);
+		start_timeslice(engine);
+	}
 
 	post_process_csb(post, inactive);
 	rcu_read_unlock();
@@ -3856,9 +3853,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 		show_request(m, last, "\t\t", 0);
 	}
 
-	if (execlists->switch_priority_hint != INT_MIN)
-		drm_printf(m, "\t\tSwitch priority hint: %d\n",
-			   READ_ONCE(execlists->switch_priority_hint));
 	if (execlists->queue_priority_hint != INT_MIN)
 		drm_printf(m, "\t\tQueue priority hint: %d\n",
 			   READ_ONCE(execlists->queue_priority_hint));
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH v3] drm/i915/gt: Remove timeslice suppression
  2021-01-07 11:05   ` Chris Wilson
@ 2021-01-07 11:34     ` Andi Shyti
  2021-01-07 12:53     ` Tvrtko Ursulin
  1 sibling, 0 replies; 13+ messages in thread
From: Andi Shyti @ 2021-01-07 11:34 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

Hi Chris,

On Thu, Jan 07, 2021 at 11:05:55AM +0000, Chris Wilson wrote:
> In the next^W future patch, we remove the strict priority system and
> continuously re-evaluate the relative priority of tasks. As such we need
> to enable the timeslice whenever there is more than one context in the
> pipeline. This simplifies the decision and removes some of the tweaks to
> suppress timeslicing, allowing us to lift the timeslice enabling to a
> common spot at the end of running the submission tasklet.
> 
> One consequence of the suppression is that it was reducing fairness
> between virtual engines on an over saturated system; undermining the
> principle for timeslicing.
> 
> v2: Commentary
> v3: Commentary for the right cancel_timer()
> 
> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2802
> Testcase: igt/gem_exec_balancer/fairslice
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

I don't know if you have answered all Tvrtko's question, but I
to me this looks good.

Reviewed-by: Andi Shyti <andi.shyti@intel.com>

Thanks,
Andi
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [Intel-gfx] [PATCH v3] drm/i915/gt: Remove timeslice suppression
  2021-01-07 11:05   ` Chris Wilson
  2021-01-07 11:34     ` Andi Shyti
@ 2021-01-07 12:53     ` Tvrtko Ursulin
  1 sibling, 0 replies; 13+ messages in thread
From: Tvrtko Ursulin @ 2021-01-07 12:53 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 07/01/2021 11:05, Chris Wilson wrote:
> In the next^W future patch, we remove the strict priority system and
> continuously re-evaluate the relative priority of tasks. As such we need
> to enable the timeslice whenever there is more than one context in the
> pipeline. This simplifies the decision and removes some of the tweaks to
> suppress timeslicing, allowing us to lift the timeslice enabling to a
> common spot at the end of running the submission tasklet.
> 
> One consequence of the suppression is that it was reducing fairness
> between virtual engines on an over saturated system; undermining the
> principle for timeslicing.
> 
> v2: Commentary
> v3: Commentary for the right cancel_timer()
> 
> Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/2802
> Testcase: igt/gem_exec_balancer/fairslice
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_engine_types.h  |  10 -
>   .../drm/i915/gt/intel_execlists_submission.c  | 204 +++++++++---------
>   2 files changed, 99 insertions(+), 115 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index 430066e5884c..df62e793e747 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -238,16 +238,6 @@ struct intel_engine_execlists {
>   	 */
>   	unsigned int port_mask;
>   
> -	/**
> -	 * @switch_priority_hint: Second context priority.
> -	 *
> -	 * We submit multiple contexts to the HW simultaneously and would
> -	 * like to occasionally switch between them to emulate timeslicing.
> -	 * To know when timeslicing is suitable, we track the priority of
> -	 * the context submitted second.
> -	 */
> -	int switch_priority_hint;
> -
>   	/**
>   	 * @queue_priority_hint: Highest pending priority.
>   	 *
> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> index a5b442683c18..13be71a81a38 100644
> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> @@ -1148,25 +1148,6 @@ static void defer_active(struct intel_engine_cs *engine)
>   	defer_request(rq, i915_sched_lookup_priolist(engine, rq_prio(rq)));
>   }
>   
> -static bool
> -need_timeslice(const struct intel_engine_cs *engine,
> -	       const struct i915_request *rq)
> -{
> -	int hint;
> -
> -	if (!intel_engine_has_timeslices(engine))
> -		return false;
> -
> -	hint = max(engine->execlists.queue_priority_hint,
> -		   virtual_prio(&engine->execlists));
> -
> -	if (!list_is_last(&rq->sched.link, &engine->active.requests))
> -		hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
> -
> -	GEM_BUG_ON(hint >= I915_PRIORITY_UNPREEMPTABLE);
> -	return hint >= effective_prio(rq);
> -}
> -
>   static bool
>   timeslice_yield(const struct intel_engine_execlists *el,
>   		const struct i915_request *rq)
> @@ -1186,76 +1167,74 @@ timeslice_yield(const struct intel_engine_execlists *el,
>   	return rq->context->lrc.ccid == READ_ONCE(el->yield);
>   }
>   
> -static bool
> -timeslice_expired(const struct intel_engine_execlists *el,
> -		  const struct i915_request *rq)
> +static bool needs_timeslice(const struct intel_engine_cs *engine,
> +			    const struct i915_request *rq)
>   {
> +	if (!intel_engine_has_timeslices(engine))
> +		return false;
> +
> +	/* If not currently active, or about to switch, wait for next event */
> +	if (!rq || __i915_request_is_complete(rq))
> +		return false;
> +
> +	/* We do not need to start the timeslice until after the ACK */
> +	if (READ_ONCE(engine->execlists.pending[0]))
> +		return false;
> +
> +	/* If ELSP[1] is occupied, always check to see if worth slicing */
> +	if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests))
> +		return true;
> +
> +	/* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
> +	if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
> +		return true;
> +
> +	return !RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root);
> +}
> +
> +static bool
> +timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq)
> +{
> +	const struct intel_engine_execlists *el = &engine->execlists;
> +
> +	if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq))
> +		return false;
> +
> +	if (!needs_timeslice(engine, rq))
> +		return false;
> +
>   	return timer_expired(&el->timer) || timeslice_yield(el, rq);
>   }
>   
> -static int
> -switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
> -{
> -	if (list_is_last(&rq->sched.link, &engine->active.requests))
> -		return engine->execlists.queue_priority_hint;
> -
> -	return rq_prio(list_next_entry(rq, sched.link));
> -}
> -
> -static inline unsigned long
> -timeslice(const struct intel_engine_cs *engine)
> +static unsigned long timeslice(const struct intel_engine_cs *engine)
>   {
>   	return READ_ONCE(engine->props.timeslice_duration_ms);
>   }
>   
> -static unsigned long active_timeslice(const struct intel_engine_cs *engine)
> -{
> -	const struct intel_engine_execlists *execlists = &engine->execlists;
> -	const struct i915_request *rq = *execlists->active;
> -
> -	if (!rq || __i915_request_is_complete(rq))
> -		return 0;
> -
> -	if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
> -		return 0;
> -
> -	return timeslice(engine);
> -}
> -
> -static void set_timeslice(struct intel_engine_cs *engine)
> +static void start_timeslice(struct intel_engine_cs *engine)
>   {
> +	struct intel_engine_execlists *el = &engine->execlists;
>   	unsigned long duration;
>   
> -	if (!intel_engine_has_timeslices(engine))
> -		return;
> +	/* Disable the timer if there is nothing to switch to */
> +	duration = 0;
> +	if (needs_timeslice(engine, *el->active)) {
> +		/* Avoid continually prolonging an active timeslice */
> +		if (timer_active(&el->timer)) {
> +			/*
> +			 * If we just submitted a new ELSP after an old
> +			 * context, that context may have already consumed
> +			 * its timeslice, so recheck.
> +			 */
> +			if (!timer_pending(&el->timer))
> +				tasklet_hi_schedule(&engine->execlists.tasklet);
> +			return;
> +		}
>   
> -	duration = active_timeslice(engine);
> -	ENGINE_TRACE(engine, "bump timeslicing, interval:%lu", duration);
> +		duration = timeslice(engine);
> +	}
>   
> -	set_timer_ms(&engine->execlists.timer, duration);
> -}
> -
> -static void start_timeslice(struct intel_engine_cs *engine, int prio)
> -{
> -	struct intel_engine_execlists *execlists = &engine->execlists;
> -	unsigned long duration;
> -
> -	if (!intel_engine_has_timeslices(engine))
> -		return;
> -
> -	WRITE_ONCE(execlists->switch_priority_hint, prio);
> -	if (prio == INT_MIN)
> -		return;
> -
> -	if (timer_pending(&execlists->timer))
> -		return;
> -
> -	duration = timeslice(engine);
> -	ENGINE_TRACE(engine,
> -		     "start timeslicing, prio:%d, interval:%lu",
> -		     prio, duration);
> -
> -	set_timer_ms(&execlists->timer, duration);
> +	set_timer_ms(&el->timer, duration);
>   }
>   
>   static void record_preemption(struct intel_engine_execlists *execlists)
> @@ -1368,16 +1347,32 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   			__unwind_incomplete_requests(engine);
>   
>   			last = NULL;
> -		} else if (need_timeslice(engine, last) &&
> -			   timeslice_expired(execlists, last)) {
> +		} else if (timeslice_expired(engine, last)) {
>   			ENGINE_TRACE(engine,
> -				     "expired last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
> -				     last->fence.context,
> -				     last->fence.seqno,
> -				     last->sched.attr.priority,
> +				     "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n",
> +				     yesno(timer_expired(&execlists->timer)),
> +				     last->fence.context, last->fence.seqno,
> +				     rq_prio(last),
>   				     execlists->queue_priority_hint,
>   				     yesno(timeslice_yield(execlists, last)));
>   
> +			/*
> +			 * Consume this timeslice; ensure we start a new one.
> +			 *
> +			 * The timeslice expired, and we will unwind the
> +			 * running contexts and recompute the next ELSP.
> +			 * If that submit will be the same pair of contexts
> +			 * (due to dependency ordering), we will skip the
> +			 * submission. If we don't cancel the timer now,
> +			 * we will see that the timer has expired and
> +			 * reschedule the tasklet; continually until the
> +			 * next context switch or other preeemption event.
> +			 *
> +			 * Since we have decided to reschedule based on
> +			 * consumption of this timeslice, if we submit the
> +			 * same context again, grant it a full timeslice.
> +			 */
> +			cancel_timer(&execlists->timer);
>   			ring_set_paused(engine, 1);
>   			defer_active(engine);
>   
> @@ -1413,7 +1408,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   				 * of timeslices, our queue might be.
>   				 */
>   				spin_unlock(&engine->active.lock);
> -				start_timeslice(engine, queue_prio(execlists));
>   				return;
>   			}
>   		}
> @@ -1440,7 +1434,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   		if (last && !can_merge_rq(last, rq)) {
>   			spin_unlock(&ve->base.active.lock);
>   			spin_unlock(&engine->active.lock);
> -			start_timeslice(engine, rq_prio(rq));
>   			return; /* leave this for another sibling */
>   		}
>   
> @@ -1604,29 +1597,23 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   	execlists->queue_priority_hint = queue_prio(execlists);
>   	spin_unlock(&engine->active.lock);
>   
> -	if (submit) {
> -		/*
> -		 * Skip if we ended up with exactly the same set of requests,
> -		 * e.g. trying to timeslice a pair of ordered contexts
> -		 */
> -		if (!memcmp(execlists->active,
> -			    execlists->pending,
> -			    (port - execlists->pending) * sizeof(*port)))
> -			goto skip_submit;
> -
> +	/*
> +	 * We can skip poking the HW if we ended up with exactly the same set
> +	 * of requests as currently running, e.g. trying to timeslice a pair
> +	 * of ordered contexts.
> +	 */
> +	if (submit &&
> +	    memcmp(execlists->active,
> +		   execlists->pending,
> +		   (port - execlists->pending) * sizeof(*port))) {
>   		*port = NULL;
>   		while (port-- != execlists->pending)
>   			execlists_schedule_in(*port, port - execlists->pending);
>   
> -		execlists->switch_priority_hint =
> -			switch_prio(engine, *execlists->pending);
> -
>   		WRITE_ONCE(execlists->yield, -1);
>   		set_preempt_timeout(engine, *execlists->active);
>   		execlists_submit_ports(engine);
>   	} else {
> -		start_timeslice(engine, execlists->queue_priority_hint);
> -skip_submit:
>   		ring_set_paused(engine, 0);
>   		while (port-- != execlists->pending)
>   			i915_request_put(*port);
> @@ -1865,6 +1852,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
>   	 * we perform the READ_ONCE(*csb_write).
>   	 */
>   	rmb();
> +
> +	*inactive = NULL;
>   	do {
>   		bool promote;
>   		u64 csb;
> @@ -1984,8 +1973,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
>   		}
>   	} while (head != tail);
>   
> -	set_timeslice(engine);
> -
>   	/*
>   	 * Gen11 has proven to fail wrt global observation point between
>   	 * entry and tail update, failing on the ordering and thus
> @@ -1999,6 +1986,14 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
>   	 */
>   	invalidate_csb_entries(&buf[0], &buf[num_entries - 1]);
>   
> +	/*
> +	 * We assume that any event reflects a change in context flow
> +	 * and merits a fresh timeslice. We reinstall the timer after
> +	 * inspecting the queue to see if we need to resumbit.
> +	 */
> +	if (*inactive != *execlists->active) /* elide lite-restores */
> +		cancel_timer(&execlists->timer);
> +
>   	return inactive;
>   }
>   
> @@ -2410,8 +2405,10 @@ static void execlists_submission_tasklet(unsigned long data)
>   		execlists_reset(engine, msg);
>   	}
>   
> -	if (!engine->execlists.pending[0])
> +	if (!engine->execlists.pending[0]) {
>   		execlists_dequeue_irq(engine);
> +		start_timeslice(engine);
> +	}
>   
>   	post_process_csb(post, inactive);
>   	rcu_read_unlock();
> @@ -3856,9 +3853,6 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
>   		show_request(m, last, "\t\t", 0);
>   	}
>   
> -	if (execlists->switch_priority_hint != INT_MIN)
> -		drm_printf(m, "\t\tSwitch priority hint: %d\n",
> -			   READ_ONCE(execlists->switch_priority_hint));
>   	if (execlists->queue_priority_hint != INT_MIN)
>   		drm_printf(m, "\t\tQueue priority hint: %d\n",
>   			   READ_ONCE(execlists->queue_priority_hint));
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking (rev3)
  2021-01-06 16:36 [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking Chris Wilson
                   ` (4 preceding siblings ...)
  2021-01-07  9:25 ` [Intel-gfx] [PATCH v2 1/2] " Tvrtko Ursulin
@ 2021-01-07 16:47 ` Patchwork
  2021-01-07 17:15 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
  2021-01-07 21:37 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
  7 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2021-01-07 16:47 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking (rev3)
URL   : https://patchwork.freedesktop.org/series/85551/
State : warning

== Summary ==

$ dim sparse --fast origin/drm-tip
Sparse version: v0.6.2
Fast mode used, each commit won't be checked separately.
+drivers/gpu/drm/i915/intel_wakeref.c:137:19: warning: context imbalance in 'wakeref_auto_timeout' - unexpected unlock
+drivers/gpu/drm/i915/selftests/i915_syncmap.c:80:54: warning: dubious: x | !y


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking (rev3)
  2021-01-06 16:36 [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking Chris Wilson
                   ` (5 preceding siblings ...)
  2021-01-07 16:47 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking (rev3) Patchwork
@ 2021-01-07 17:15 ` Patchwork
  2021-01-07 21:37 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
  7 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2021-01-07 17:15 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 4089 bytes --]

== Series Details ==

Series: series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking (rev3)
URL   : https://patchwork.freedesktop.org/series/85551/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_9562 -> Patchwork_19279
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/index.html

Known issues
------------

  Here are the changes found in Patchwork_19279 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_suspend@basic-s3:
    - fi-tgl-u2:          [PASS][1] -> [FAIL][2] ([i915#1888])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/fi-tgl-u2/igt@gem_exec_suspend@basic-s3.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/fi-tgl-u2/igt@gem_exec_suspend@basic-s3.html

  * igt@gem_mmap_gtt@basic:
    - fi-tgl-y:           [PASS][3] -> [DMESG-WARN][4] ([i915#402]) +1 similar issue
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/fi-tgl-y/igt@gem_mmap_gtt@basic.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/fi-tgl-y/igt@gem_mmap_gtt@basic.html

  * igt@i915_selftest@live@gem:
    - fi-bsw-n3050:       [PASS][5] -> [DMESG-WARN][6] ([i915#2826])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/fi-bsw-n3050/igt@i915_selftest@live@gem.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/fi-bsw-n3050/igt@i915_selftest@live@gem.html

  * igt@i915_selftest@live@gem_contexts:
    - fi-bsw-n3050:       [PASS][7] -> [INCOMPLETE][8] ([i915#2369])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/fi-bsw-n3050/igt@i915_selftest@live@gem_contexts.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/fi-bsw-n3050/igt@i915_selftest@live@gem_contexts.html

  * igt@runner@aborted:
    - fi-bsw-n3050:       NOTRUN -> [FAIL][9] ([i915#1436] / [i915#483])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/fi-bsw-n3050/igt@runner@aborted.html

  
#### Possible fixes ####

  * igt@debugfs_test@read_all_entries:
    - fi-tgl-y:           [DMESG-WARN][10] ([i915#402]) -> [PASS][11] +2 similar issues
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/fi-tgl-y/igt@debugfs_test@read_all_entries.html
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/fi-tgl-y/igt@debugfs_test@read_all_entries.html

  * igt@gem_exec_suspend@basic-s3:
    - fi-tgl-y:           [DMESG-WARN][12] ([i915#2411] / [i915#402]) -> [PASS][13]
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/fi-tgl-y/igt@gem_exec_suspend@basic-s3.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/fi-tgl-y/igt@gem_exec_suspend@basic-s3.html

  
  [i915#1436]: https://gitlab.freedesktop.org/drm/intel/issues/1436
  [i915#1888]: https://gitlab.freedesktop.org/drm/intel/issues/1888
  [i915#2369]: https://gitlab.freedesktop.org/drm/intel/issues/2369
  [i915#2411]: https://gitlab.freedesktop.org/drm/intel/issues/2411
  [i915#2826]: https://gitlab.freedesktop.org/drm/intel/issues/2826
  [i915#402]: https://gitlab.freedesktop.org/drm/intel/issues/402
  [i915#483]: https://gitlab.freedesktop.org/drm/intel/issues/483


Participating hosts (43 -> 38)
------------------------------

  Missing    (5): fi-ilk-m540 fi-hsw-4200u fi-bsw-cyan fi-ctg-p8600 fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_9562 -> Patchwork_19279

  CI-20190529: 20190529
  CI_DRM_9562: fc8d32007355b4babc37b621b3c9a4e0fe998d27 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5946: 641e5545213dd9a82d80a4e065013a138afb58ff @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_19279: 00408bfe5158970f1350607258989d54d4845eaa @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

00408bfe5158 drm/i915/gt: Remove timeslice suppression
48f5aaacb2c8 drm/i915: Wrap our timer_list.expires checking

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/index.html

[-- Attachment #1.2: Type: text/html, Size: 4997 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [Intel-gfx] ✓ Fi.CI.IGT: success for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking (rev3)
  2021-01-06 16:36 [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking Chris Wilson
                   ` (6 preceding siblings ...)
  2021-01-07 17:15 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2021-01-07 21:37 ` Patchwork
  7 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2021-01-07 21:37 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 17598 bytes --]

== Series Details ==

Series: series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking (rev3)
URL   : https://patchwork.freedesktop.org/series/85551/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_9562_full -> Patchwork_19279_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_19279_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_ctx_isolation@preservation-s3@rcs0:
    - shard-skl:          [PASS][1] -> [INCOMPLETE][2] ([i915#198]) +1 similar issue
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl3/igt@gem_ctx_isolation@preservation-s3@rcs0.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl1/igt@gem_ctx_isolation@preservation-s3@rcs0.html

  * igt@gem_ctx_persistence@idempotent:
    - shard-hsw:          NOTRUN -> [SKIP][3] ([fdo#109271] / [i915#1099])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-hsw6/igt@gem_ctx_persistence@idempotent.html

  * igt@gem_ctx_persistence@legacy-engines-mixed:
    - shard-snb:          NOTRUN -> [SKIP][4] ([fdo#109271] / [i915#1099])
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-snb6/igt@gem_ctx_persistence@legacy-engines-mixed.html

  * igt@gem_exec_whisper@basic-forked-all:
    - shard-glk:          [PASS][5] -> [DMESG-WARN][6] ([i915#118] / [i915#95])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-glk9/igt@gem_exec_whisper@basic-forked-all.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-glk8/igt@gem_exec_whisper@basic-forked-all.html

  * igt@gem_userptr_blits@mmap-offset-invalidate-active@wb:
    - shard-snb:          NOTRUN -> [SKIP][7] ([fdo#109271]) +48 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-snb6/igt@gem_userptr_blits@mmap-offset-invalidate-active@wb.html

  * igt@gem_userptr_blits@process-exit-mmap@wc:
    - shard-hsw:          NOTRUN -> [SKIP][8] ([fdo#109271]) +111 similar issues
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-hsw4/igt@gem_userptr_blits@process-exit-mmap@wc.html

  * igt@kms_async_flips@alternate-sync-async-flip:
    - shard-skl:          [PASS][9] -> [FAIL][10] ([i915#2521])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl6/igt@kms_async_flips@alternate-sync-async-flip.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl5/igt@kms_async_flips@alternate-sync-async-flip.html

  * igt@kms_color_chamelium@pipe-a-ctm-0-5:
    - shard-hsw:          NOTRUN -> [SKIP][11] ([fdo#109271] / [fdo#111827]) +4 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-hsw6/igt@kms_color_chamelium@pipe-a-ctm-0-5.html

  * igt@kms_color_chamelium@pipe-a-gamma:
    - shard-skl:          NOTRUN -> [SKIP][12] ([fdo#109271] / [fdo#111827]) +2 similar issues
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl10/igt@kms_color_chamelium@pipe-a-gamma.html

  * igt@kms_color_chamelium@pipe-c-ctm-red-to-blue:
    - shard-snb:          NOTRUN -> [SKIP][13] ([fdo#109271] / [fdo#111827]) +3 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-snb6/igt@kms_color_chamelium@pipe-c-ctm-red-to-blue.html

  * igt@kms_cursor_crc@pipe-a-cursor-128x128-random:
    - shard-skl:          [PASS][14] -> [FAIL][15] ([i915#54]) +5 similar issues
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl9/igt@kms_cursor_crc@pipe-a-cursor-128x128-random.html
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl9/igt@kms_cursor_crc@pipe-a-cursor-128x128-random.html

  * igt@kms_cursor_crc@pipe-b-cursor-64x64-onscreen:
    - shard-skl:          NOTRUN -> [FAIL][16] ([i915#54])
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl10/igt@kms_cursor_crc@pipe-b-cursor-64x64-onscreen.html

  * igt@kms_cursor_legacy@pipe-d-torture-move:
    - shard-skl:          NOTRUN -> [SKIP][17] ([fdo#109271]) +38 similar issues
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl10/igt@kms_cursor_legacy@pipe-d-torture-move.html

  * igt@kms_flip@plain-flip-fb-recreate-interruptible@a-edp1:
    - shard-skl:          [PASS][18] -> [FAIL][19] ([i915#2122]) +1 similar issue
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl5/igt@kms_flip@plain-flip-fb-recreate-interruptible@a-edp1.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl5/igt@kms_flip@plain-flip-fb-recreate-interruptible@a-edp1.html

  * igt@kms_psr@psr2_sprite_plane_move:
    - shard-iclb:         [PASS][20] -> [SKIP][21] ([fdo#109441]) +2 similar issues
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-iclb2/igt@kms_psr@psr2_sprite_plane_move.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-iclb4/igt@kms_psr@psr2_sprite_plane_move.html

  * igt@perf@polling-parameterized:
    - shard-skl:          [PASS][22] -> [FAIL][23] ([i915#1542])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl6/igt@perf@polling-parameterized.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl2/igt@perf@polling-parameterized.html

  * igt@runner@aborted:
    - shard-hsw:          NOTRUN -> [FAIL][24] ([i915#142] / [i915#2292] / [i915#2505])
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-hsw4/igt@runner@aborted.html

  
#### Possible fixes ####

  * {igt@gem_exec_balancer@fairslice}:
    - shard-tglb:         [FAIL][25] ([i915#2802]) -> [PASS][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-tglb3/igt@gem_exec_balancer@fairslice.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-tglb6/igt@gem_exec_balancer@fairslice.html

  * {igt@gem_exec_fair@basic-deadline}:
    - shard-kbl:          [FAIL][27] ([i915#2846]) -> [PASS][28]
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-kbl6/igt@gem_exec_fair@basic-deadline.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-kbl4/igt@gem_exec_fair@basic-deadline.html

  * {igt@gem_exec_fair@basic-flow@rcs0}:
    - shard-tglb:         [FAIL][29] ([i915#2842]) -> [PASS][30] +1 similar issue
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-tglb5/igt@gem_exec_fair@basic-flow@rcs0.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-tglb7/igt@gem_exec_fair@basic-flow@rcs0.html

  * {igt@gem_exec_fair@basic-none-share@rcs0}:
    - shard-iclb:         [FAIL][31] ([i915#2842]) -> [PASS][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-iclb7/igt@gem_exec_fair@basic-none-share@rcs0.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-iclb8/igt@gem_exec_fair@basic-none-share@rcs0.html

  * {igt@gem_exec_fair@basic-pace-share@rcs0}:
    - shard-glk:          [FAIL][33] ([i915#2842]) -> [PASS][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-glk5/igt@gem_exec_fair@basic-pace-share@rcs0.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-glk1/igt@gem_exec_fair@basic-pace-share@rcs0.html

  * {igt@gem_exec_fair@basic-pace@vcs0}:
    - shard-kbl:          [SKIP][35] ([fdo#109271]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-kbl1/igt@gem_exec_fair@basic-pace@vcs0.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-kbl1/igt@gem_exec_fair@basic-pace@vcs0.html

  * igt@gem_userptr_blits@map-fixed-invalidate-busy@uc:
    - shard-snb:          [INCOMPLETE][37] -> [PASS][38]
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-snb6/igt@gem_userptr_blits@map-fixed-invalidate-busy@uc.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-snb6/igt@gem_userptr_blits@map-fixed-invalidate-busy@uc.html

  * igt@kms_async_flips@test-time-stamp:
    - shard-tglb:         [FAIL][39] ([i915#2574]) -> [PASS][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-tglb6/igt@kms_async_flips@test-time-stamp.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-tglb7/igt@kms_async_flips@test-time-stamp.html

  * igt@kms_cursor_crc@pipe-c-cursor-128x42-offscreen:
    - shard-skl:          [FAIL][41] ([i915#54]) -> [PASS][42] +4 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl7/igt@kms_cursor_crc@pipe-c-cursor-128x42-offscreen.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl7/igt@kms_cursor_crc@pipe-c-cursor-128x42-offscreen.html

  * igt@kms_cursor_edge_walk@pipe-a-128x128-top-edge:
    - shard-snb:          [SKIP][43] ([fdo#109271]) -> [PASS][44]
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-snb4/igt@kms_cursor_edge_walk@pipe-a-128x128-top-edge.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-snb7/igt@kms_cursor_edge_walk@pipe-a-128x128-top-edge.html

  * igt@kms_cursor_edge_walk@pipe-c-256x256-right-edge:
    - shard-skl:          [DMESG-WARN][45] ([i915#1982]) -> [PASS][46] +1 similar issue
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl2/igt@kms_cursor_edge_walk@pipe-c-256x256-right-edge.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl3/igt@kms_cursor_edge_walk@pipe-c-256x256-right-edge.html

  * igt@kms_cursor_legacy@flip-vs-cursor-toggle:
    - shard-tglb:         [FAIL][47] ([i915#2346]) -> [PASS][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-tglb7/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-tglb1/igt@kms_cursor_legacy@flip-vs-cursor-toggle.html

  * igt@kms_flip@plain-flip-fb-recreate-interruptible@c-edp1:
    - shard-skl:          [FAIL][49] ([i915#2122]) -> [PASS][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl5/igt@kms_flip@plain-flip-fb-recreate-interruptible@c-edp1.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl5/igt@kms_flip@plain-flip-fb-recreate-interruptible@c-edp1.html

  * igt@kms_hdr@bpc-switch:
    - shard-skl:          [FAIL][51] ([i915#1188]) -> [PASS][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl2/igt@kms_hdr@bpc-switch.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl10/igt@kms_hdr@bpc-switch.html

  * igt@kms_plane@plane-position-hole-pipe-a-planes:
    - shard-skl:          [FAIL][53] ([i915#2472]) -> [PASS][54]
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl1/igt@kms_plane@plane-position-hole-pipe-a-planes.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl7/igt@kms_plane@plane-position-hole-pipe-a-planes.html

  * igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min:
    - shard-skl:          [FAIL][55] ([fdo#108145] / [i915#265]) -> [PASS][56] +1 similar issue
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl1/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl7/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html

  * igt@kms_psr@psr2_no_drrs:
    - shard-iclb:         [SKIP][57] ([fdo#109441]) -> [PASS][58] +2 similar issues
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-iclb3/igt@kms_psr@psr2_no_drrs.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-iclb2/igt@kms_psr@psr2_no_drrs.html

  * igt@perf@polling-parameterized:
    - shard-glk:          [FAIL][59] ([i915#1542]) -> [PASS][60]
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-glk8/igt@perf@polling-parameterized.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-glk6/igt@perf@polling-parameterized.html

  
#### Warnings ####

  * igt@i915_pm_dc@dc3co-vpb-simulation:
    - shard-iclb:         [SKIP][61] ([i915#658]) -> [SKIP][62] ([i915#588])
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-iclb3/igt@i915_pm_dc@dc3co-vpb-simulation.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-iclb2/igt@i915_pm_dc@dc3co-vpb-simulation.html

  * igt@i915_pm_dc@dc6-dpms:
    - shard-skl:          [FAIL][63] ([i915#454]) -> [INCOMPLETE][64] ([i915#198])
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-skl4/igt@i915_pm_dc@dc6-dpms.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-skl2/igt@i915_pm_dc@dc6-dpms.html

  * igt@i915_pm_rc6_residency@rc6-fence:
    - shard-iclb:         [WARN][65] ([i915#1804] / [i915#2684]) -> [WARN][66] ([i915#2684])
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-iclb3/igt@i915_pm_rc6_residency@rc6-fence.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-iclb2/igt@i915_pm_rc6_residency@rc6-fence.html

  * igt@runner@aborted:
    - shard-kbl:          [FAIL][67] ([i915#2295]) -> [FAIL][68] ([i915#2295] / [i915#483])
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-kbl7/igt@runner@aborted.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-kbl3/igt@runner@aborted.html
    - shard-iclb:         [FAIL][69] ([i915#2295] / [i915#2724] / [i915#483]) -> [FAIL][70] ([i915#2295] / [i915#2724])
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-iclb5/igt@runner@aborted.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-iclb4/igt@runner@aborted.html
    - shard-tglb:         [FAIL][71] ([i915#2295] / [i915#2667]) -> ([FAIL][72], [FAIL][73]) ([i915#2295] / [i915#2426] / [i915#2667] / [i915#2803])
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9562/shard-tglb1/igt@runner@aborted.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-tglb6/igt@runner@aborted.html
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/shard-tglb1/igt@runner@aborted.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [i915#1099]: https://gitlab.freedesktop.org/drm/intel/issues/1099
  [i915#118]: https://gitlab.freedesktop.org/drm/intel/issues/118
  [i915#1188]: https://gitlab.freedesktop.org/drm/intel/issues/1188
  [i915#142]: https://gitlab.freedesktop.org/drm/intel/issues/142
  [i915#1542]: https://gitlab.freedesktop.org/drm/intel/issues/1542
  [i915#1804]: https://gitlab.freedesktop.org/drm/intel/issues/1804
  [i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198
  [i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
  [i915#2122]: https://gitlab.freedesktop.org/drm/intel/issues/2122
  [i915#2292]: https://gitlab.freedesktop.org/drm/intel/issues/2292
  [i915#2295]: https://gitlab.freedesktop.org/drm/intel/issues/2295
  [i915#2346]: https://gitlab.freedesktop.org/drm/intel/issues/2346
  [i915#2426]: https://gitlab.freedesktop.org/drm/intel/issues/2426
  [i915#2472]: https://gitlab.freedesktop.org/drm/intel/issues/2472
  [i915#2505]: https://gitlab.freedesktop.org/drm/intel/issues/2505
  [i915#2521]: https://gitlab.freedesktop.org/drm/intel/issues/2521
  [i915#2574]: https://gitlab.freedesktop.org/drm/intel/issues/2574
  [i915#265]: https://gitlab.freedesktop.org/drm/intel/issues/265
  [i915#2667]: https://gitlab.freedesktop.org/drm/intel/issues/2667
  [i915#2684]: https://gitlab.freedesktop.org/drm/intel/issues/2684
  [i915#2724]: https://gitlab.freedesktop.org/drm/intel/issues/2724
  [i915#2802]: https://gitlab.freedesktop.org/drm/intel/issues/2802
  [i915#2803]: https://gitlab.freedesktop.org/drm/intel/issues/2803
  [i915#2842]: https://gitlab.freedesktop.org/drm/intel/issues/2842
  [i915#2846]: https://gitlab.freedesktop.org/drm/intel/issues/2846
  [i915#2852]: https://gitlab.freedesktop.org/drm/intel/issues/2852
  [i915#454]: https://gitlab.freedesktop.org/drm/intel/issues/454
  [i915#483]: https://gitlab.freedesktop.org/drm/intel/issues/483
  [i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
  [i915#588]: https://gitlab.freedesktop.org/drm/intel/issues/588
  [i915#658]: https://gitlab.freedesktop.org/drm/intel/issues/658
  [i915#95]: https://gitlab.freedesktop.org/drm/intel/issues/95


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * Linux: CI_DRM_9562 -> Patchwork_19279

  CI-20190529: 20190529
  CI_DRM_9562: fc8d32007355b4babc37b621b3c9a4e0fe998d27 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5946: 641e5545213dd9a82d80a4e065013a138afb58ff @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_19279: 00408bfe5158970f1350607258989d54d4845eaa @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19279/index.html

[-- Attachment #1.2: Type: text/html, Size: 21035 bytes --]

[-- Attachment #2: Type: text/plain, Size: 160 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2021-01-07 21:37 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-01-06 16:36 [Intel-gfx] [PATCH v2 1/2] drm/i915: Wrap our timer_list.expires checking Chris Wilson
2021-01-06 16:36 ` [Intel-gfx] [PATCH v2 2/2] drm/i915/gt: Remove timeslice suppression Chris Wilson
2021-01-07 11:01   ` [Intel-gfx] [PATCH v3] " Chris Wilson
2021-01-07 11:05   ` Chris Wilson
2021-01-07 11:34     ` Andi Shyti
2021-01-07 12:53     ` Tvrtko Ursulin
2021-01-06 16:49 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking Patchwork
2021-01-06 17:16 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-01-07  1:55 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
2021-01-07  9:25 ` [Intel-gfx] [PATCH v2 1/2] " Tvrtko Ursulin
2021-01-07 16:47 ` [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/2] drm/i915: Wrap our timer_list.expires checking (rev3) Patchwork
2021-01-07 17:15 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-01-07 21:37 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).