All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative
@ 2019-06-25 13:01 Chris Wilson
  2019-06-25 13:01 ` [PATCH 02/20] drm/i915/gt: Pass intel_gt to pm routines Chris Wilson
                   ` (28 more replies)
  0 siblings, 29 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

As this engine owns the lock around rq->sched.link (for those waiters
submitted to this engine), we can use that link as an element in a local
list. We can thus replace the recursive algorithm with an iterative walk
over the ordered list of waiters.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 52 +++++++++++++++--------------
 1 file changed, 27 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 28685ba91a2c..22afd2616d7f 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -833,10 +833,9 @@ last_active(const struct intel_engine_execlists *execlists)
 	return *last;
 }
 
-static void
-defer_request(struct i915_request * const rq, struct list_head * const pl)
+static void defer_request(struct i915_request *rq, struct list_head * const pl)
 {
-	struct i915_dependency *p;
+	LIST_HEAD(list);
 
 	/*
 	 * We want to move the interrupted request to the back of
@@ -845,34 +844,37 @@ defer_request(struct i915_request * const rq, struct list_head * const pl)
 	 * flight and were waiting for the interrupted request to
 	 * be run after it again.
 	 */
-	list_move_tail(&rq->sched.link, pl);
+	do {
+		struct i915_dependency *p;
 
-	list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
-		struct i915_request *w =
-			container_of(p->waiter, typeof(*w), sched);
+		GEM_BUG_ON(i915_request_is_active(rq));
+		list_move_tail(&rq->sched.link, pl);
 
-		/* Leave semaphores spinning on the other engines */
-		if (w->engine != rq->engine)
-			continue;
+		list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+			struct i915_request *w =
+				container_of(p->waiter, typeof(*w), sched);
 
-		/* No waiter should start before the active request completed */
-		GEM_BUG_ON(i915_request_started(w));
+			/* Leave semaphores spinning on the other engines */
+			if (w->engine != rq->engine)
+				continue;
 
-		GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
-		if (rq_prio(w) < rq_prio(rq))
-			continue;
+			/* No waiter should start before its signaler */
+			GEM_BUG_ON(i915_request_started(w) &&
+				   !i915_request_completed(rq));
 
-		if (list_empty(&w->sched.link))
-			continue; /* Not yet submitted; unready */
+			GEM_BUG_ON(i915_request_is_active(w));
+			if (list_empty(&w->sched.link))
+				continue; /* Not yet submitted; unready */
 
-		/*
-		 * This should be very shallow as it is limited by the
-		 * number of requests that can fit in a ring (<64) and
-		 * the number of contexts that can be in flight on this
-		 * engine.
-		 */
-		defer_request(w, pl);
-	}
+			if (rq_prio(w) < rq_prio(rq))
+				continue;
+
+			GEM_BUG_ON(rq_prio(w) > rq_prio(rq));
+			list_move_tail(&w->sched.link, &list);
+		}
+
+		rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+	} while (rq);
 }
 
 static void defer_active(struct intel_engine_cs *engine)
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 02/20] drm/i915/gt: Pass intel_gt to pm routines
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 18:07   ` Matthew Auld
  2019-06-25 13:01 ` [PATCH 03/20] drm/i915/selftests: Serialise nop reset with retirement Chris Wilson
                   ` (27 subsequent siblings)
  28 siblings, 1 reply; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

Switch from passing the i915 container to newly named struct intel_gt.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  4 ++--
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |  2 +-
 .../drm/i915/gem/selftests/i915_gem_mman.c    |  4 ++--
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |  8 +++----
 drivers/gpu/drm/i915/gt/intel_gt_pm.c         | 24 +++++++++++--------
 drivers/gpu/drm/i915/gt/intel_gt_pm.h         |  9 ++++---
 drivers/gpu/drm/i915/gt/intel_reset.c         |  6 ++---
 drivers/gpu/drm/i915/i915_drv.c               |  2 +-
 drivers/gpu/drm/i915/i915_gem.c               |  2 +-
 drivers/gpu/drm/i915/selftests/i915_gem.c     |  2 +-
 10 files changed, 33 insertions(+), 30 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index cf8edb6822ee..1c5dfbfad71b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2437,7 +2437,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	 * wakeref that we hold until the GPU has been idle for at least
 	 * 100ms.
 	 */
-	intel_gt_pm_get(eb.i915);
+	intel_gt_pm_get(&eb.i915->gt);
 
 	err = i915_mutex_lock_interruptible(dev);
 	if (err)
@@ -2607,7 +2607,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_unlock:
 	mutex_unlock(&dev->struct_mutex);
 err_rpm:
-	intel_gt_pm_put(eb.i915);
+	intel_gt_pm_put(&eb.i915->gt);
 	i915_gem_context_put(eb.gem_context);
 err_destroy:
 	eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 8f721cf0ab99..ee1f66594a35 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -258,7 +258,7 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	 * guarantee that the context image is complete. So let's just reset
 	 * it and start again.
 	 */
-	intel_gt_resume(i915);
+	intel_gt_resume(&i915->gt);
 
 	if (i915_gem_init_hw(i915))
 		goto err_wedged;
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 24a3c677ccd5..a1f0b235f56b 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -379,7 +379,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
 {
 	i915_gem_shrinker_unregister(i915);
 
-	intel_gt_pm_get(i915);
+	intel_gt_pm_get(&i915->gt);
 
 	cancel_delayed_work_sync(&i915->gem.retire_work);
 	flush_work(&i915->gem.idle_work);
@@ -387,7 +387,7 @@ static void disable_retire_worker(struct drm_i915_private *i915)
 
 static void restore_retire_worker(struct drm_i915_private *i915)
 {
-	intel_gt_pm_put(i915);
+	intel_gt_pm_put(&i915->gt);
 
 	mutex_lock(&i915->drm.struct_mutex);
 	igt_flush_test(i915, I915_WAIT_LOCKED);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 2ce00d3dc42a..5253c382034d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -18,7 +18,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
 
 	GEM_TRACE("%s\n", engine->name);
 
-	intel_gt_pm_get(engine->i915);
+	intel_gt_pm_get(engine->gt);
 
 	/* Pin the default state for fast resets from atomic context. */
 	map = NULL;
@@ -129,7 +129,7 @@ static int __engine_park(struct intel_wakeref *wf)
 
 	engine->execlists.no_priolist = false;
 
-	intel_gt_pm_put(engine->i915);
+	intel_gt_pm_put(engine->gt);
 	return 0;
 }
 
@@ -149,7 +149,7 @@ int intel_engines_resume(struct drm_i915_private *i915)
 	enum intel_engine_id id;
 	int err = 0;
 
-	intel_gt_pm_get(i915);
+	intel_gt_pm_get(&i915->gt);
 	for_each_engine(engine, i915, id) {
 		intel_engine_pm_get(engine);
 		engine->serial++; /* kernel context lost */
@@ -162,7 +162,7 @@ int intel_engines_resume(struct drm_i915_private *i915)
 			break;
 		}
 	}
-	intel_gt_pm_put(i915);
+	intel_gt_pm_put(&i915->gt);
 
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 6062840b5b46..ec6b69d014b6 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -50,9 +50,11 @@ static int intel_gt_unpark(struct intel_wakeref *wf)
 	return 0;
 }
 
-void intel_gt_pm_get(struct drm_i915_private *i915)
+void intel_gt_pm_get(struct intel_gt *gt)
 {
-	intel_wakeref_get(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_unpark);
+	struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
+
+	intel_wakeref_get(rpm, &gt->wakeref, intel_gt_unpark);
 }
 
 static int intel_gt_park(struct intel_wakeref *wf)
@@ -75,9 +77,11 @@ static int intel_gt_park(struct intel_wakeref *wf)
 	return 0;
 }
 
-void intel_gt_pm_put(struct drm_i915_private *i915)
+void intel_gt_pm_put(struct intel_gt *gt)
 {
-	intel_wakeref_put(&i915->runtime_pm, &i915->gt.wakeref, intel_gt_park);
+	struct intel_runtime_pm *rpm = &gt->i915->runtime_pm;
+
+	intel_wakeref_put(rpm, &gt->wakeref, intel_gt_park);
 }
 
 void intel_gt_pm_init_early(struct intel_gt *gt)
@@ -96,7 +100,7 @@ static bool reset_engines(struct drm_i915_private *i915)
 
 /**
  * intel_gt_sanitize: called after the GPU has lost power
- * @i915: the i915 device
+ * @gt: the i915 GT container
  * @force: ignore a failed reset and sanitize engine state anyway
  *
  * Anytime we reset the GPU, either with an explicit GPU reset or through a
@@ -104,21 +108,21 @@ static bool reset_engines(struct drm_i915_private *i915)
  * to match. Note that calling intel_gt_sanitize() if the GPU has not
  * been reset results in much confusion!
  */
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force)
+void intel_gt_sanitize(struct intel_gt *gt, bool force)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
 	GEM_TRACE("\n");
 
-	if (!reset_engines(i915) && !force)
+	if (!reset_engines(gt->i915) && !force)
 		return;
 
-	for_each_engine(engine, i915, id)
+	for_each_engine(engine, gt->i915, id)
 		intel_engine_reset(engine, false);
 }
 
-void intel_gt_resume(struct drm_i915_private *i915)
+void intel_gt_resume(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
@@ -129,7 +133,7 @@ void intel_gt_resume(struct drm_i915_private *i915)
 	 * Only the kernel contexts should remain pinned over suspend,
 	 * allowing us to fixup the user contexts on their first pin.
 	 */
-	for_each_engine(engine, i915, id) {
+	for_each_engine(engine, gt->i915, id) {
 		struct intel_context *ce;
 
 		ce = engine->kernel_context;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index b6049a907890..4dbb92cf58d7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -9,7 +9,6 @@
 
 #include <linux/types.h>
 
-struct drm_i915_private;
 struct intel_gt;
 
 enum {
@@ -17,12 +16,12 @@ enum {
 	INTEL_GT_PARK,
 };
 
-void intel_gt_pm_get(struct drm_i915_private *i915);
-void intel_gt_pm_put(struct drm_i915_private *i915);
+void intel_gt_pm_get(struct intel_gt *gt);
+void intel_gt_pm_put(struct intel_gt *gt);
 
 void intel_gt_pm_init_early(struct intel_gt *gt);
 
-void intel_gt_sanitize(struct drm_i915_private *i915, bool force);
-void intel_gt_resume(struct drm_i915_private *i915);
+void intel_gt_sanitize(struct intel_gt *gt, bool force);
+void intel_gt_resume(struct intel_gt *gt);
 
 #endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 3c925af64793..e92054e118cc 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -714,7 +714,7 @@ static void reset_prepare(struct drm_i915_private *i915)
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
-	intel_gt_pm_get(i915);
+	intel_gt_pm_get(&i915->gt);
 	for_each_engine(engine, i915, id)
 		reset_prepare_engine(engine);
 
@@ -765,7 +765,7 @@ static void reset_finish(struct drm_i915_private *i915)
 		reset_finish_engine(engine);
 		intel_engine_signal_breadcrumbs(engine);
 	}
-	intel_gt_pm_put(i915);
+	intel_gt_pm_put(&i915->gt);
 }
 
 static void nop_submit_request(struct i915_request *request)
@@ -891,7 +891,7 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
 	}
 	mutex_unlock(&i915->gt.timelines.mutex);
 
-	intel_gt_sanitize(i915, false);
+	intel_gt_sanitize(&i915->gt, false);
 
 	/*
 	 * Undo nop_submit_request. We prevent all new i915 requests from
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e1817f89f5d5..07240edd75f0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2376,7 +2376,7 @@ static int i915_drm_resume_early(struct drm_device *dev)
 
 	intel_power_domains_resume(dev_priv);
 
-	intel_gt_sanitize(dev_priv, true);
+	intel_gt_sanitize(&dev_priv->gt, true);
 
 	enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e59be5c05e1b..deecbe128e5b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1157,7 +1157,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 	 * it may impact the display and we are uncertain about the stability
 	 * of the reset, so this could be applied to even earlier gen.
 	 */
-	intel_gt_sanitize(i915, false);
+	intel_gt_sanitize(&i915->gt, false);
 
 	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
 	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index c6a01a6e87f1..ed0c17bf6613 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -115,7 +115,7 @@ static void pm_resume(struct drm_i915_private *i915)
 	 * that runtime-pm just works.
 	 */
 	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-		intel_gt_sanitize(i915, false);
+		intel_gt_sanitize(&i915->gt, false);
 		i915_gem_sanitize(i915);
 		i915_gem_resume(i915);
 	}
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 03/20] drm/i915/selftests: Serialise nop reset with retirement
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
  2019-06-25 13:01 ` [PATCH 02/20] drm/i915/gt: Pass intel_gt to pm routines Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 04/20] drm/i915/selftests: Drop manual request wakerefs around hangcheck Chris Wilson
                   ` (26 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

In order for the reset count to be accurate across our selftest, we need
to prevent the background retire worker from modifying our expected
state.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 3ceb397c8645..0e0b6c572ae9 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -398,6 +398,7 @@ static int igt_reset_nop(void *arg)
 	count = 0;
 	do {
 		mutex_lock(&i915->drm.struct_mutex);
+
 		for_each_engine(engine, i915, id) {
 			int i;
 
@@ -413,11 +414,12 @@ static int igt_reset_nop(void *arg)
 				i915_request_add(rq);
 			}
 		}
-		mutex_unlock(&i915->drm.struct_mutex);
 
 		igt_global_reset_lock(i915);
 		i915_reset(i915, ALL_ENGINES, NULL);
 		igt_global_reset_unlock(i915);
+
+		mutex_unlock(&i915->drm.struct_mutex);
 		if (i915_reset_failed(i915)) {
 			err = -EIO;
 			break;
@@ -511,9 +513,8 @@ static int igt_reset_nop_engine(void *arg)
 
 				i915_request_add(rq);
 			}
-			mutex_unlock(&i915->drm.struct_mutex);
-
 			err = i915_reset_engine(engine, NULL);
+			mutex_unlock(&i915->drm.struct_mutex);
 			if (err) {
 				pr_err("i915_reset_engine failed\n");
 				break;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 04/20] drm/i915/selftests: Drop manual request wakerefs around hangcheck
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
  2019-06-25 13:01 ` [PATCH 02/20] drm/i915/gt: Pass intel_gt to pm routines Chris Wilson
  2019-06-25 13:01 ` [PATCH 03/20] drm/i915/selftests: Serialise nop reset with retirement Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 05/20] drm/i915/selftests: Fixup atomic reset checking Chris Wilson
                   ` (25 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

We no longer need to manually acquire a wakeref for request emission, so
drop the redundant wakerefs, letting us test our wakeref handling more
precisely.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 7 -------
 drivers/gpu/drm/i915/gt/selftest_reset.c     | 4 ++--
 2 files changed, 2 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 0e0b6c572ae9..cf592a049a71 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -373,7 +373,6 @@ static int igt_reset_nop(void *arg)
 	struct i915_gem_context *ctx;
 	unsigned int reset_count, count;
 	enum intel_engine_id id;
-	intel_wakeref_t wakeref;
 	struct drm_file *file;
 	IGT_TIMEOUT(end_time);
 	int err = 0;
@@ -393,7 +392,6 @@ static int igt_reset_nop(void *arg)
 	}
 
 	i915_gem_context_clear_bannable(ctx);
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	reset_count = i915_reset_count(&i915->gpu_error);
 	count = 0;
 	do {
@@ -442,8 +440,6 @@ static int igt_reset_nop(void *arg)
 	err = igt_flush_test(i915, I915_WAIT_LOCKED);
 	mutex_unlock(&i915->drm.struct_mutex);
 
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
-
 out:
 	mock_file_free(i915, file);
 	if (i915_reset_failed(i915))
@@ -457,7 +453,6 @@ static int igt_reset_nop_engine(void *arg)
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
 	enum intel_engine_id id;
-	intel_wakeref_t wakeref;
 	struct drm_file *file;
 	int err = 0;
 
@@ -479,7 +474,6 @@ static int igt_reset_nop_engine(void *arg)
 	}
 
 	i915_gem_context_clear_bannable(ctx);
-	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
 	for_each_engine(engine, i915, id) {
 		unsigned int reset_count, reset_engine_count;
 		unsigned int count;
@@ -549,7 +543,6 @@ static int igt_reset_nop_engine(void *arg)
 	err = igt_flush_test(i915, I915_WAIT_LOCKED);
 	mutex_unlock(&i915->drm.struct_mutex);
 
-	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
 out:
 	mock_file_free(i915, file);
 	if (i915_reset_failed(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 89da9e7cc1ba..64c2c8ab64ec 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -63,8 +63,8 @@ static int igt_atomic_reset(void *arg)
 
 	/* Check that the resets are usable from atomic context */
 
+	intel_gt_pm_get(&i915->gt);
 	igt_global_reset_lock(i915);
-	mutex_lock(&i915->drm.struct_mutex);
 
 	/* Flush any requests before we get started and check basics */
 	if (!igt_force_reset(i915))
@@ -89,8 +89,8 @@ static int igt_atomic_reset(void *arg)
 	igt_force_reset(i915);
 
 unlock:
-	mutex_unlock(&i915->drm.struct_mutex);
 	igt_global_reset_unlock(i915);
+	intel_gt_pm_put(&i915->gt);
 
 	return err;
 }
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 05/20] drm/i915/selftests: Fixup atomic reset checking
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (2 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 04/20] drm/i915/selftests: Drop manual request wakerefs around hangcheck Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 06/20] drm/i915: Rename intel_wakeref_[is]_active Chris Wilson
                   ` (24 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

We require that the intel_gpu_reset() was atomic, not the whole of
i915_reset() which is guarded by a mutex. However, we do require that
i915_reset_engine() is atomic for use from within the submission tasklet.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/selftest_reset.c | 65 +++++++++++++++++++++++-
 1 file changed, 63 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 64c2c8ab64ec..641cf3aee8d5 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -73,11 +73,13 @@ static int igt_atomic_reset(void *arg)
 	for (p = igt_atomic_phases; p->name; p++) {
 		GEM_TRACE("intel_gpu_reset under %s\n", p->name);
 
-		p->critical_section_begin();
 		reset_prepare(i915);
+		p->critical_section_begin();
+
 		err = intel_gpu_reset(i915, ALL_ENGINES);
-		reset_finish(i915);
+
 		p->critical_section_end();
+		reset_finish(i915);
 
 		if (err) {
 			pr_err("intel_gpu_reset failed under %s\n", p->name);
@@ -95,12 +97,71 @@ static int igt_atomic_reset(void *arg)
 	return err;
 }
 
+static int igt_atomic_engine_reset(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	const typeof(*igt_atomic_phases) *p;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = 0;
+
+	/* Check that the resets are usable from atomic context */
+
+	if (!intel_has_reset_engine(i915))
+		return 0;
+
+	if (USES_GUC_SUBMISSION(i915))
+		return 0;
+
+	intel_gt_pm_get(&i915->gt);
+	igt_global_reset_lock(i915);
+
+	/* Flush any requests before we get started and check basics */
+	if (!igt_force_reset(i915))
+		goto out_unlock;
+
+	for_each_engine(engine, i915, id) {
+		tasklet_disable_nosync(&engine->execlists.tasklet);
+		intel_engine_pm_get(engine);
+
+		for (p = igt_atomic_phases; p->name; p++) {
+			GEM_TRACE("i915_reset_engine(%s) under %s\n",
+				  engine->name, p->name);
+
+			p->critical_section_begin();
+			err = i915_reset_engine(engine, NULL);
+			p->critical_section_end();
+
+			if (err) {
+				pr_err("i915_reset_engine(%s) failed under %s\n",
+				       engine->name, p->name);
+				break;
+			}
+		}
+
+		intel_engine_pm_put(engine);
+		tasklet_enable(&engine->execlists.tasklet);
+		if (err)
+			break;
+	}
+
+	/* As we poke around the guts, do a full reset before continuing. */
+	igt_force_reset(i915);
+
+out_unlock:
+	igt_global_reset_unlock(i915);
+	intel_gt_pm_put(&i915->gt);
+
+	return err;
+}
+
 int intel_reset_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_global_reset), /* attempt to recover GPU first */
 		SUBTEST(igt_wedged_reset),
 		SUBTEST(igt_atomic_reset),
+		SUBTEST(igt_atomic_engine_reset),
 	};
 	intel_wakeref_t wakeref;
 	int err = 0;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 06/20] drm/i915: Rename intel_wakeref_[is]_active
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (3 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 05/20] drm/i915/selftests: Fixup atomic reset checking Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 07/20] drm/i915: Add a wakeref getter for iff the wakeref is already active Chris Wilson
                   ` (23 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

Our general rule is to use is/has as the verb for boolean functions,
rename intel_wakeref_active to intel_wakeref_is_active so the question
being asked is clear.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c    | 3 ++-
 drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.h | 9 +++++++++
 drivers/gpu/drm/i915/gt/intel_lrc.c       | 2 +-
 drivers/gpu/drm/i915/gt/intel_reset.c     | 2 +-
 drivers/gpu/drm/i915/intel_wakeref.h      | 4 ++--
 6 files changed, 16 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index ee1f66594a35..6b730bd4d72f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -54,7 +54,8 @@ static void idle_work_handler(struct work_struct *work)
 	mutex_lock(&i915->drm.struct_mutex);
 
 	intel_wakeref_lock(&i915->gt.wakeref);
-	park = !intel_wakeref_active(&i915->gt.wakeref) && !work_pending(work);
+	park = (!intel_wakeref_is_active(&i915->gt.wakeref) &&
+		!work_pending(work));
 	intel_wakeref_unlock(&i915->gt.wakeref);
 	if (park)
 		i915_gem_park(i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 4961f74fd902..d1508f0b4c84 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1155,7 +1155,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
 	if (i915_reset_failed(engine->i915))
 		return true;
 
-	if (!intel_wakeref_active(&engine->wakeref))
+	if (!intel_engine_pm_is_awake(engine))
 		return true;
 
 	/* Waiting to drain ELSP? */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index b326cd993d60..f3f5b031b4a1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -7,12 +7,21 @@
 #ifndef INTEL_ENGINE_PM_H
 #define INTEL_ENGINE_PM_H
 
+#include "intel_engine_types.h"
+#include "intel_wakeref.h"
+
 struct drm_i915_private;
 struct intel_engine_cs;
 
 void intel_engine_pm_get(struct intel_engine_cs *engine);
 void intel_engine_pm_put(struct intel_engine_cs *engine);
 
+static inline bool
+intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
+{
+	return intel_wakeref_is_active(&engine->wakeref);
+}
+
 void intel_engine_park(struct intel_engine_cs *engine);
 
 void intel_engine_init__pm(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 22afd2616d7f..471e134de186 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -684,7 +684,7 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 	 * that all ELSP are drained i.e. we have processed the CSB,
 	 * before allowing ourselves to idle and calling intel_runtime_pm_put().
 	 */
-	GEM_BUG_ON(!intel_wakeref_active(&engine->wakeref));
+	GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
 
 	/*
 	 * ELSQ note: the submit queue is not cleared after being submitted
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index e92054e118cc..8ce92c51564e 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1072,7 +1072,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
 	GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
 
-	if (!intel_wakeref_active(&engine->wakeref))
+	if (!intel_engine_pm_is_awake(engine))
 		return 0;
 
 	reset_prepare_engine(engine);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index d45e78639dc4..f74272770a5c 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -128,13 +128,13 @@ intel_wakeref_unlock(struct intel_wakeref *wf)
 }
 
 /**
- * intel_wakeref_active: Query whether the wakeref is currently held
+ * intel_wakeref_is_active: Query whether the wakeref is currently held
  * @wf: the wakeref
  *
  * Returns: true if the wakeref is currently held.
  */
 static inline bool
-intel_wakeref_active(struct intel_wakeref *wf)
+intel_wakeref_is_active(const struct intel_wakeref *wf)
 {
 	return READ_ONCE(wf->wakeref);
 }
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 07/20] drm/i915: Add a wakeref getter for iff the wakeref is already active
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (4 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 06/20] drm/i915: Rename intel_wakeref_[is]_active Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 08/20] drm/i915: Only recover active engines Chris Wilson
                   ` (22 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

For use in the next patch, we want to acquire a wakeref without having
to wake the device up -- i.e. only acquire the engine wakeref if the
engine is already active.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_engine_pm.h |  7 ++++++-
 drivers/gpu/drm/i915/intel_wakeref.h      | 15 +++++++++++++++
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index f3f5b031b4a1..7d057cdcd919 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -11,7 +11,6 @@
 #include "intel_wakeref.h"
 
 struct drm_i915_private;
-struct intel_engine_cs;
 
 void intel_engine_pm_get(struct intel_engine_cs *engine);
 void intel_engine_pm_put(struct intel_engine_cs *engine);
@@ -22,6 +21,12 @@ intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
 	return intel_wakeref_is_active(&engine->wakeref);
 }
 
+static inline bool
+intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+{
+	return intel_wakeref_get_if_active(&engine->wakeref);
+}
+
 void intel_engine_park(struct intel_engine_cs *engine);
 
 void intel_engine_init__pm(struct intel_engine_cs *engine);
diff --git a/drivers/gpu/drm/i915/intel_wakeref.h b/drivers/gpu/drm/i915/intel_wakeref.h
index f74272770a5c..1d6f5986e4e5 100644
--- a/drivers/gpu/drm/i915/intel_wakeref.h
+++ b/drivers/gpu/drm/i915/intel_wakeref.h
@@ -71,6 +71,21 @@ intel_wakeref_get(struct intel_runtime_pm *rpm,
 	return 0;
 }
 
+/**
+ * intel_wakeref_get_if_in_use: Acquire the wakeref
+ * @wf: the wakeref
+ *
+ * Acquire a hold on the wakeref, but only if the wakeref is already
+ * active.
+ *
+ * Returns: true if the wakeref was acquired, false otherwise.
+ */
+static inline bool
+intel_wakeref_get_if_active(struct intel_wakeref *wf)
+{
+	return atomic_inc_not_zero(&wf->count);
+}
+
 /**
  * intel_wakeref_put: Release the wakeref
  * @i915: the drm_i915_private device
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 08/20] drm/i915: Only recover active engines
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (5 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 07/20] drm/i915: Add a wakeref getter for iff the wakeref is already active Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 09/20] drm/i915: Lift intel_engines_resume() to callers Chris Wilson
                   ` (21 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

If we issue a reset to a currently idle engine, leave it idle
afterwards. This is useful to excise a linkage between reset and the
shrinker. When waking the engine, we need to pin the default context
image which we use for overwriting a guilty context -- if the engine is
idle we do not need this pinned image! However, this pinning means that
waking the engine acquires the FS_RECLAIM, and so may trigger the
shrinker. The shrinker itself may need to wait upon the GPU to unbind
and object and so may require services of reset; ergo we should avoid
the engine wake up path.

The danger in skipping the recovery for idle engines is that we leave the
engine with no context defined, which may interfere with the operation of
the power context on some older platforms. In practice, we should only
be resetting an active GPU but it something to look out for on Ironlake
(if memory serves).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_reset.c    | 37 ++++++++++++++----------
 drivers/gpu/drm/i915/gt/selftest_reset.c |  6 ++--
 2 files changed, 26 insertions(+), 17 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 8ce92c51564e..e7cbd9cf85c1 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -678,7 +678,6 @@ static void reset_prepare_engine(struct intel_engine_cs *engine)
 	 * written to the powercontext is undefined and so we may lose
 	 * GPU state upon resume, i.e. fail to restart after a reset.
 	 */
-	intel_engine_pm_get(engine);
 	intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
 	engine->reset.prepare(engine);
 }
@@ -709,16 +708,21 @@ static void revoke_mmaps(struct drm_i915_private *i915)
 	}
 }
 
-static void reset_prepare(struct drm_i915_private *i915)
+static intel_engine_mask_t reset_prepare(struct drm_i915_private *i915)
 {
 	struct intel_engine_cs *engine;
+	intel_engine_mask_t awake = 0;
 	enum intel_engine_id id;
 
-	intel_gt_pm_get(&i915->gt);
-	for_each_engine(engine, i915, id)
+	for_each_engine(engine, i915, id) {
+		if (intel_engine_pm_get_if_awake(engine))
+			awake |= engine->mask;
 		reset_prepare_engine(engine);
+	}
 
 	intel_uc_reset_prepare(i915);
+
+	return awake;
 }
 
 static void gt_revoke(struct drm_i915_private *i915)
@@ -752,20 +756,22 @@ static int gt_reset(struct drm_i915_private *i915,
 static void reset_finish_engine(struct intel_engine_cs *engine)
 {
 	engine->reset.finish(engine);
-	intel_engine_pm_put(engine);
 	intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
+
+	intel_engine_signal_breadcrumbs(engine);
 }
 
-static void reset_finish(struct drm_i915_private *i915)
+static void reset_finish(struct drm_i915_private *i915,
+			 intel_engine_mask_t awake)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
 	for_each_engine(engine, i915, id) {
 		reset_finish_engine(engine);
-		intel_engine_signal_breadcrumbs(engine);
+		if (awake & engine->mask)
+			intel_engine_pm_put(engine);
 	}
-	intel_gt_pm_put(&i915->gt);
 }
 
 static void nop_submit_request(struct i915_request *request)
@@ -789,6 +795,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
 	struct intel_engine_cs *engine;
+	intel_engine_mask_t awake;
 	enum intel_engine_id id;
 
 	if (test_bit(I915_WEDGED, &error->flags))
@@ -808,7 +815,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
 	 * rolling the global seqno forward (since this would complete requests
 	 * for which we haven't set the fence error to EIO yet).
 	 */
-	reset_prepare(i915);
+	awake = reset_prepare(i915);
 
 	/* Even if the GPU reset fails, it should still stop the engines */
 	if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -832,7 +839,7 @@ static void __i915_gem_set_wedged(struct drm_i915_private *i915)
 	for_each_engine(engine, i915, id)
 		engine->cancel_requests(engine);
 
-	reset_finish(i915);
+	reset_finish(i915, awake);
 
 	GEM_TRACE("end\n");
 }
@@ -964,6 +971,7 @@ void i915_reset(struct drm_i915_private *i915,
 		const char *reason)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
+	intel_engine_mask_t awake;
 	int ret;
 
 	GEM_TRACE("flags=%lx\n", error->flags);
@@ -980,7 +988,7 @@ void i915_reset(struct drm_i915_private *i915,
 		dev_notice(i915->drm.dev, "Resetting chip for %s\n", reason);
 	error->reset_count++;
 
-	reset_prepare(i915);
+	awake = reset_prepare(i915);
 
 	if (!intel_has_gpu_reset(i915)) {
 		if (i915_modparams.reset)
@@ -1021,7 +1029,7 @@ void i915_reset(struct drm_i915_private *i915,
 	i915_queue_hangcheck(i915);
 
 finish:
-	reset_finish(i915);
+	reset_finish(i915, awake);
 unlock:
 	mutex_unlock(&error->wedge_mutex);
 	return;
@@ -1072,7 +1080,7 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
 	GEM_TRACE("%s flags=%lx\n", engine->name, error->flags);
 	GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
 
-	if (!intel_engine_pm_is_awake(engine))
+	if (!intel_engine_pm_get_if_awake(engine))
 		return 0;
 
 	reset_prepare_engine(engine);
@@ -1107,12 +1115,11 @@ int i915_reset_engine(struct intel_engine_cs *engine, const char *msg)
 	 * process to program RING_MODE, HWSP and re-enable submission.
 	 */
 	ret = engine->resume(engine);
-	if (ret)
-		goto out;
 
 out:
 	intel_engine_cancel_stop_cs(engine);
 	reset_finish_engine(engine);
+	intel_engine_pm_put(engine);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 641cf3aee8d5..672e32e1ef95 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -71,15 +71,17 @@ static int igt_atomic_reset(void *arg)
 		goto unlock;
 
 	for (p = igt_atomic_phases; p->name; p++) {
+		intel_engine_mask_t awake;
+
 		GEM_TRACE("intel_gpu_reset under %s\n", p->name);
 
-		reset_prepare(i915);
+		awake = reset_prepare(i915);
 		p->critical_section_begin();
 
 		err = intel_gpu_reset(i915, ALL_ENGINES);
 
 		p->critical_section_end();
-		reset_finish(i915);
+		reset_finish(i915, awake);
 
 		if (err) {
 			pr_err("intel_gpu_reset failed under %s\n", p->name);
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 09/20] drm/i915: Lift intel_engines_resume() to callers
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (6 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 08/20] drm/i915: Only recover active engines Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 10/20] drm/i915: Teach execbuffer to take the engine wakeref not GT Chris Wilson
                   ` (20 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

Since the reset path wants to recover the engines itself, it only wants
to reinitialise the hardware using i915_gem_init_hw(). Pull the call to
intel_engines_resume() to the module init/resume path so we can avoid it
during reset.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c    |   7 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c |  24 ---
 drivers/gpu/drm/i915/gt/intel_engine_pm.h |   2 -
 drivers/gpu/drm/i915/gt/intel_gt_pm.c     |  21 ++-
 drivers/gpu/drm/i915/gt/intel_gt_pm.h     |   2 +-
 drivers/gpu/drm/i915/gt/intel_reset.c     |  21 ++-
 drivers/gpu/drm/i915/i915_gem.c           | 173 +++++++++-------------
 7 files changed, 116 insertions(+), 134 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 6b730bd4d72f..4d774376f5b8 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -254,14 +254,15 @@ void i915_gem_resume(struct drm_i915_private *i915)
 	i915_gem_restore_gtt_mappings(i915);
 	i915_gem_restore_fences(i915);
 
+	if (i915_gem_init_hw(i915))
+		goto err_wedged;
+
 	/*
 	 * As we didn't flush the kernel context before suspend, we cannot
 	 * guarantee that the context image is complete. So let's just reset
 	 * it and start again.
 	 */
-	intel_gt_resume(&i915->gt);
-
-	if (i915_gem_init_hw(i915))
+	if (intel_gt_resume(&i915->gt))
 		goto err_wedged;
 
 	intel_uc_resume(i915);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 5253c382034d..84e432abe8e0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -142,27 +142,3 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
 {
 	intel_wakeref_init(&engine->wakeref);
 }
-
-int intel_engines_resume(struct drm_i915_private *i915)
-{
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int err = 0;
-
-	intel_gt_pm_get(&i915->gt);
-	for_each_engine(engine, i915, id) {
-		intel_engine_pm_get(engine);
-		engine->serial++; /* kernel context lost */
-		err = engine->resume(engine);
-		intel_engine_pm_put(engine);
-		if (err) {
-			dev_err(i915->drm.dev,
-				"Failed to restart %s (%d)\n",
-				engine->name, err);
-			break;
-		}
-	}
-	intel_gt_pm_put(&i915->gt);
-
-	return err;
-}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.h b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
index 7d057cdcd919..015ac72d7ad0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.h
@@ -31,6 +31,4 @@ void intel_engine_park(struct intel_engine_cs *engine);
 
 void intel_engine_init__pm(struct intel_engine_cs *engine);
 
-int intel_engines_resume(struct drm_i915_private *i915);
-
 #endif /* INTEL_ENGINE_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index ec6b69d014b6..36ba80e6a0b7 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -5,6 +5,7 @@
  */
 
 #include "i915_drv.h"
+#include "intel_engine_pm.h"
 #include "intel_gt_pm.h"
 #include "intel_pm.h"
 #include "intel_wakeref.h"
@@ -122,10 +123,11 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force)
 		intel_engine_reset(engine, false);
 }
 
-void intel_gt_resume(struct intel_gt *gt)
+int intel_gt_resume(struct intel_gt *gt)
 {
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
+	int err = 0;
 
 	/*
 	 * After resume, we may need to poke into the pinned kernel
@@ -133,9 +135,12 @@ void intel_gt_resume(struct intel_gt *gt)
 	 * Only the kernel contexts should remain pinned over suspend,
 	 * allowing us to fixup the user contexts on their first pin.
 	 */
+	intel_gt_pm_get(gt);
 	for_each_engine(engine, gt->i915, id) {
 		struct intel_context *ce;
 
+		intel_engine_pm_get(engine);
+
 		ce = engine->kernel_context;
 		if (ce)
 			ce->ops->reset(ce);
@@ -143,5 +148,19 @@ void intel_gt_resume(struct intel_gt *gt)
 		ce = engine->preempt_context;
 		if (ce)
 			ce->ops->reset(ce);
+
+		engine->serial++; /* kernel context lost */
+		err = engine->resume(engine);
+
+		intel_engine_pm_put(engine);
+		if (err) {
+			dev_err(gt->i915->drm.dev,
+				"Failed to restart %s (%d)\n",
+				engine->name, err);
+			break;
+		}
 	}
+	intel_gt_pm_put(gt);
+
+	return err;
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
index 4dbb92cf58d7..ba960e1fc209 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h
@@ -22,6 +22,6 @@ void intel_gt_pm_put(struct intel_gt *gt);
 void intel_gt_pm_init_early(struct intel_gt *gt);
 
 void intel_gt_sanitize(struct intel_gt *gt, bool force);
-void intel_gt_resume(struct intel_gt *gt);
+int intel_gt_resume(struct intel_gt *gt);
 
 #endif /* INTEL_GT_PM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index e7cbd9cf85c1..adfdb908587f 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -949,6 +949,21 @@ static int do_reset(struct drm_i915_private *i915,
 	return gt_reset(i915, stalled_mask);
 }
 
+static int resume(struct drm_i915_private *i915)
+{
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int ret;
+
+	for_each_engine(engine, i915, id) {
+		ret = engine->resume(engine);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
 /**
  * i915_reset - reset chip after a hang
  * @i915: #drm_i915_private to reset
@@ -1023,9 +1038,13 @@ void i915_reset(struct drm_i915_private *i915,
 	if (ret) {
 		DRM_ERROR("Failed to initialise HW following reset (%d)\n",
 			  ret);
-		goto error;
+		goto taint;
 	}
 
+	ret = resume(i915);
+	if (ret)
+		goto taint;
+
 	i915_queue_hangcheck(i915);
 
 finish:
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index deecbe128e5b..5cc3a75d521a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,6 @@
 #include "gem/i915_gem_ioctls.h"
 #include "gem/i915_gem_pm.h"
 #include "gem/i915_gemfs.h"
-#include "gt/intel_engine_pm.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 #include "gt/intel_mocs.h"
@@ -1192,12 +1191,17 @@ static void init_unused_rings(struct intel_gt *gt)
 	}
 }
 
-static int init_hw(struct intel_gt *gt)
+int i915_gem_init_hw(struct drm_i915_private *i915)
 {
-	struct drm_i915_private *i915 = gt->i915;
-	struct intel_uncore *uncore = gt->uncore;
+	struct intel_uncore *uncore = &i915->uncore;
+	struct intel_gt *gt = &i915->gt;
 	int ret;
 
+	BUG_ON(!i915->kernel_context);
+	ret = i915_terminally_wedged(i915);
+	if (ret)
+		return ret;
+
 	gt->last_init_time = ktime_get();
 
 	/* Double layer security blanket, see i915_gem_init() */
@@ -1248,51 +1252,10 @@ static int init_hw(struct intel_gt *gt)
 
 	intel_mocs_init_l3cc_table(gt);
 
-	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
-	return 0;
-
-out:
-	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
-	return ret;
-}
-
-int i915_gem_init_hw(struct drm_i915_private *i915)
-{
-	struct intel_uncore *uncore = &i915->uncore;
-	int ret;
-
-	BUG_ON(!i915->kernel_context);
-	ret = i915_terminally_wedged(i915);
-	if (ret)
-		return ret;
-
-	/* Double layer security blanket, see i915_gem_init() */
-	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
-
-	ret = init_hw(&i915->gt);
-	if (ret)
-		goto err_init;
-
-	/* Only when the HW is re-initialised, can we replay the requests */
-	ret = intel_engines_resume(i915);
-	if (ret)
-		goto err_engines;
-
-	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
 	intel_engines_set_scheduler_caps(i915);
 
-	return 0;
-
-err_engines:
-	intel_uc_fini_hw(i915);
-err_init:
+out:
 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
-
-	intel_engines_set_scheduler_caps(i915);
-
 	return ret;
 }
 
@@ -1449,28 +1412,28 @@ static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
 	return err;
 }
 
-int i915_gem_init(struct drm_i915_private *dev_priv)
+int i915_gem_init(struct drm_i915_private *i915)
 {
 	int ret;
 
 	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
-	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
-		mkwrite_device_info(dev_priv)->page_sizes =
+	if (intel_vgpu_active(i915) && !intel_vgpu_has_huge_gtt(i915))
+		mkwrite_device_info(i915)->page_sizes =
 			I915_GTT_PAGE_SIZE_4K;
 
-	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
+	i915->mm.unordered_timeline = dma_fence_context_alloc(1);
 
-	intel_timelines_init(dev_priv);
+	intel_timelines_init(i915);
 
-	ret = i915_gem_init_userptr(dev_priv);
+	ret = i915_gem_init_userptr(i915);
 	if (ret)
 		return ret;
 
-	ret = intel_uc_init_misc(dev_priv);
+	ret = intel_uc_init_misc(i915);
 	if (ret)
 		return ret;
 
-	ret = intel_wopcm_init(&dev_priv->wopcm);
+	ret = intel_wopcm_init(&i915->wopcm);
 	if (ret)
 		goto err_uc_misc;
 
@@ -1480,50 +1443,55 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * we hold the forcewake during initialisation these problems
 	 * just magically go away.
 	 */
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
 
-	ret = i915_init_ggtt(dev_priv);
+	ret = i915_init_ggtt(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_unlock;
 	}
 
-	ret = i915_gem_init_scratch(dev_priv,
-				    IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
+	ret = i915_gem_init_scratch(i915,
+				    IS_GEN(i915, 2) ? SZ_256K : PAGE_SIZE);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_ggtt;
 	}
 
-	ret = intel_engines_setup(dev_priv);
+	ret = intel_engines_setup(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_unlock;
 	}
 
-	ret = i915_gem_contexts_init(dev_priv);
+	ret = i915_gem_contexts_init(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_scratch;
 	}
 
-	ret = intel_engines_init(dev_priv);
+	ret = intel_engines_init(i915);
 	if (ret) {
 		GEM_BUG_ON(ret == -EIO);
 		goto err_context;
 	}
 
-	intel_init_gt_powersave(dev_priv);
+	intel_init_gt_powersave(i915);
 
-	ret = intel_uc_init(dev_priv);
+	ret = intel_uc_init(i915);
 	if (ret)
 		goto err_pm;
 
-	ret = i915_gem_init_hw(dev_priv);
+	ret = i915_gem_init_hw(i915);
 	if (ret)
 		goto err_uc_init;
 
+	/* Only when the HW is re-initialised, can we replay the requests */
+	ret = intel_gt_resume(&i915->gt);
+	if (ret)
+		goto err_init_hw;
+
 	/*
 	 * Despite its name intel_init_clock_gating applies both display
 	 * clock gating workarounds; GT mmio workarounds and the occasional
@@ -1533,28 +1501,28 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 *
 	 * FIXME: break up the workarounds and apply them at the right time!
 	 */
-	intel_init_clock_gating(dev_priv);
+	intel_init_clock_gating(i915);
 
-	ret = intel_engines_verify_workarounds(dev_priv);
+	ret = intel_engines_verify_workarounds(i915);
 	if (ret)
-		goto err_init_hw;
+		goto err_gt;
 
-	ret = __intel_engines_record_defaults(dev_priv);
+	ret = __intel_engines_record_defaults(i915);
 	if (ret)
-		goto err_init_hw;
+		goto err_gt;
 
 	if (i915_inject_load_failure()) {
 		ret = -ENODEV;
-		goto err_init_hw;
+		goto err_gt;
 	}
 
 	if (i915_inject_load_failure()) {
 		ret = -EIO;
-		goto err_init_hw;
+		goto err_gt;
 	}
 
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 	return 0;
 
@@ -1564,66 +1532,67 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 	 * HW as irrevisibly wedged, but keep enough state around that the
 	 * driver doesn't explode during runtime.
 	 */
-err_init_hw:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+err_gt:
+	mutex_unlock(&i915->drm.struct_mutex);
 
-	i915_gem_set_wedged(dev_priv);
-	i915_gem_suspend(dev_priv);
-	i915_gem_suspend_late(dev_priv);
+	i915_gem_set_wedged(i915);
+	i915_gem_suspend(i915);
+	i915_gem_suspend_late(i915);
 
-	i915_gem_drain_workqueue(dev_priv);
+	i915_gem_drain_workqueue(i915);
 
-	mutex_lock(&dev_priv->drm.struct_mutex);
-	intel_uc_fini_hw(dev_priv);
+	mutex_lock(&i915->drm.struct_mutex);
+err_init_hw:
+	intel_uc_fini_hw(i915);
 err_uc_init:
-	intel_uc_fini(dev_priv);
+	intel_uc_fini(i915);
 err_pm:
 	if (ret != -EIO) {
-		intel_cleanup_gt_powersave(dev_priv);
-		intel_engines_cleanup(dev_priv);
+		intel_cleanup_gt_powersave(i915);
+		intel_engines_cleanup(i915);
 	}
 err_context:
 	if (ret != -EIO)
-		i915_gem_contexts_fini(dev_priv);
+		i915_gem_contexts_fini(i915);
 err_scratch:
-	i915_gem_fini_scratch(dev_priv);
+	i915_gem_fini_scratch(i915);
 err_ggtt:
 err_unlock:
-	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
-	mutex_unlock(&dev_priv->drm.struct_mutex);
+	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
+	mutex_unlock(&i915->drm.struct_mutex);
 
 err_uc_misc:
-	intel_uc_fini_misc(dev_priv);
+	intel_uc_fini_misc(i915);
 
 	if (ret != -EIO) {
-		i915_gem_cleanup_userptr(dev_priv);
-		intel_timelines_fini(dev_priv);
+		i915_gem_cleanup_userptr(i915);
+		intel_timelines_fini(i915);
 	}
 
 	if (ret == -EIO) {
-		mutex_lock(&dev_priv->drm.struct_mutex);
+		mutex_lock(&i915->drm.struct_mutex);
 
 		/*
 		 * Allow engine initialisation to fail by marking the GPU as
 		 * wedged. But we only want to do this where the GPU is angry,
 		 * for all other failure, such as an allocation failure, bail.
 		 */
-		if (!i915_reset_failed(dev_priv)) {
-			i915_load_error(dev_priv,
+		if (!i915_reset_failed(i915)) {
+			i915_load_error(i915,
 					"Failed to initialize GPU, declaring it wedged!\n");
-			i915_gem_set_wedged(dev_priv);
+			i915_gem_set_wedged(i915);
 		}
 
 		/* Minimal basic recovery for KMS */
-		ret = i915_ggtt_enable_hw(dev_priv);
-		i915_gem_restore_gtt_mappings(dev_priv);
-		i915_gem_restore_fences(dev_priv);
-		intel_init_clock_gating(dev_priv);
+		ret = i915_ggtt_enable_hw(i915);
+		i915_gem_restore_gtt_mappings(i915);
+		i915_gem_restore_fences(i915);
+		intel_init_clock_gating(i915);
 
-		mutex_unlock(&dev_priv->drm.struct_mutex);
+		mutex_unlock(&i915->drm.struct_mutex);
 	}
 
-	i915_gem_drain_freed_objects(dev_priv);
+	i915_gem_drain_freed_objects(i915);
 	return ret;
 }
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 10/20] drm/i915: Teach execbuffer to take the engine wakeref not GT
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (7 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 09/20] drm/i915: Lift intel_engines_resume() to callers Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 11/20] drm/i915/gt: Track timeline activeness in enter/exit Chris Wilson
                   ` (19 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

In the next patch, we would like to couple into the engine wakeref to
free the batch pool on idling. The caveat here is that we therefore want
to track the engine wakeref more precisely and to hold it instead of the
broader GT wakeref as we process the ioctl.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 36 ++++++++++++-------
 drivers/gpu/drm/i915/gt/intel_context.h       |  7 ++++
 2 files changed, 31 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 1c5dfbfad71b..f43eaaa5db5f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -2143,13 +2143,35 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 	if (err)
 		return err;
 
+	/*
+	 * Take a local wakeref for preparing to dispatch the execbuf as
+	 * we expect to access the hardware fairly frequently in the
+	 * process. Upon first dispatch, we acquire another prolonged
+	 * wakeref that we hold until the GPU has been idle for at least
+	 * 100ms.
+	 */
+	err = intel_context_timeline_lock(ce);
+	if (err)
+		goto err_unpin;
+
+	intel_context_enter(ce);
+	intel_context_timeline_unlock(ce);
+
 	eb->engine = ce->engine;
 	eb->context = ce;
 	return 0;
+
+err_unpin:
+	intel_context_unpin(ce);
+	return err;
 }
 
 static void eb_unpin_context(struct i915_execbuffer *eb)
 {
+	__intel_context_timeline_lock(eb->context);
+	intel_context_exit(eb->context);
+	intel_context_timeline_unlock(eb->context);
+
 	intel_context_unpin(eb->context);
 }
 
@@ -2430,18 +2452,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (unlikely(err))
 		goto err_destroy;
 
-	/*
-	 * Take a local wakeref for preparing to dispatch the execbuf as
-	 * we expect to access the hardware fairly frequently in the
-	 * process. Upon first dispatch, we acquire another prolonged
-	 * wakeref that we hold until the GPU has been idle for at least
-	 * 100ms.
-	 */
-	intel_gt_pm_get(&eb.i915->gt);
-
 	err = i915_mutex_lock_interruptible(dev);
 	if (err)
-		goto err_rpm;
+		goto err_context;
 
 	err = eb_select_engine(&eb, file, args);
 	if (unlikely(err))
@@ -2606,8 +2619,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	eb_unpin_context(&eb);
 err_unlock:
 	mutex_unlock(&dev->struct_mutex);
-err_rpm:
-	intel_gt_pm_put(&eb.i915->gt);
+err_context:
 	i915_gem_context_put(eb.gem_context);
 err_destroy:
 	eb_destroy(&eb);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 40cd8320fcc3..065ba4ac4e87 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -126,6 +126,13 @@ static inline void intel_context_put(struct intel_context *ce)
 	kref_put(&ce->ref, ce->ops->destroy);
 }
 
+static inline void
+__intel_context_timeline_lock(struct intel_context *ce)
+	__acquires(&ce->ring->timeline->mutex)
+{
+	mutex_lock(&ce->ring->timeline->mutex);
+}
+
 static inline int __must_check
 intel_context_timeline_lock(struct intel_context *ce)
 	__acquires(&ce->ring->timeline->mutex)
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 11/20] drm/i915/gt: Track timeline activeness in enter/exit
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (8 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 10/20] drm/i915: Teach execbuffer to take the engine wakeref not GT Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 12/20] drm/i915/gt: Convert timeline tracking to spinlock Chris Wilson
                   ` (18 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

Lift moving the timeline to/from the active_list on enter/exit in order
to shorten the active tracking span in comparison to the existing
pin/unpin.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |  1 -
 drivers/gpu/drm/i915/gt/intel_context.c       |  2 +
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |  1 +
 drivers/gpu/drm/i915/gt/intel_lrc.c           |  4 +
 drivers/gpu/drm/i915/gt/intel_timeline.c      | 98 +++++++------------
 drivers/gpu/drm/i915/gt/intel_timeline.h      |  3 +-
 .../gpu/drm/i915/gt/intel_timeline_types.h    |  1 +
 drivers/gpu/drm/i915/gt/selftest_timeline.c   |  2 -
 8 files changed, 46 insertions(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 4d774376f5b8..93d188526457 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -38,7 +38,6 @@ static void i915_gem_park(struct drm_i915_private *i915)
 		i915_gem_batch_pool_fini(&engine->batch_pool);
 	}
 
-	intel_timelines_park(i915);
 	i915_vma_parked(i915);
 
 	i915_globals_park();
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 938dd032b820..bc59f57450a7 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -222,10 +222,12 @@ int __init i915_global_context_init(void)
 void intel_context_enter_engine(struct intel_context *ce)
 {
 	intel_engine_pm_get(ce->engine);
+	intel_timeline_enter(ce->ring->timeline);
 }
 
 void intel_context_exit_engine(struct intel_context *ce)
 {
+	intel_timeline_exit(ce->ring->timeline);
 	intel_engine_pm_put(ce->engine);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 84e432abe8e0..9751a02d86bc 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -88,6 +88,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 
 	/* Check again on the next retirement. */
 	engine->wakeref_serial = engine->serial + 1;
+	intel_timeline_enter(rq->timeline);
 
 	i915_request_add_barriers(rq);
 	__i915_request_commit(rq);
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 471e134de186..2bc25e3f83e6 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -3166,6 +3166,8 @@ static void virtual_context_enter(struct intel_context *ce)
 
 	for (n = 0; n < ve->num_siblings; n++)
 		intel_engine_pm_get(ve->siblings[n]);
+
+	intel_timeline_enter(ce->ring->timeline);
 }
 
 static void virtual_context_exit(struct intel_context *ce)
@@ -3173,6 +3175,8 @@ static void virtual_context_exit(struct intel_context *ce)
 	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
 	unsigned int n;
 
+	intel_timeline_exit(ce->ring->timeline);
+
 	for (n = 0; n < ve->num_siblings; n++)
 		intel_engine_pm_put(ve->siblings[n]);
 }
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 478258274986..b6bfbdefaf7c 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -285,64 +285,11 @@ void intel_timelines_init(struct drm_i915_private *i915)
 	timelines_init(&i915->gt);
 }
 
-static void timeline_add_to_active(struct intel_timeline *tl)
-{
-	struct intel_gt_timelines *gt = &tl->gt->timelines;
-
-	mutex_lock(&gt->mutex);
-	list_add(&tl->link, &gt->active_list);
-	mutex_unlock(&gt->mutex);
-}
-
-static void timeline_remove_from_active(struct intel_timeline *tl)
-{
-	struct intel_gt_timelines *gt = &tl->gt->timelines;
-
-	mutex_lock(&gt->mutex);
-	list_del(&tl->link);
-	mutex_unlock(&gt->mutex);
-}
-
-static void timelines_park(struct intel_gt *gt)
-{
-	struct intel_gt_timelines *timelines = &gt->timelines;
-	struct intel_timeline *timeline;
-
-	mutex_lock(&timelines->mutex);
-	list_for_each_entry(timeline, &timelines->active_list, link) {
-		/*
-		 * All known fences are completed so we can scrap
-		 * the current sync point tracking and start afresh,
-		 * any attempt to wait upon a previous sync point
-		 * will be skipped as the fence was signaled.
-		 */
-		i915_syncmap_free(&timeline->sync);
-	}
-	mutex_unlock(&timelines->mutex);
-}
-
-/**
- * intel_timelines_park - called when the driver idles
- * @i915: the drm_i915_private device
- *
- * When the driver is completely idle, we know that all of our sync points
- * have been signaled and our tracking is then entirely redundant. Any request
- * to wait upon an older sync point will be completed instantly as we know
- * the fence is signaled and therefore we will not even look them up in the
- * sync point map.
- */
-void intel_timelines_park(struct drm_i915_private *i915)
-{
-	timelines_park(&i915->gt);
-}
-
 void intel_timeline_fini(struct intel_timeline *timeline)
 {
 	GEM_BUG_ON(timeline->pin_count);
 	GEM_BUG_ON(!list_empty(&timeline->requests));
 
-	i915_syncmap_free(&timeline->sync);
-
 	if (timeline->hwsp_cacheline)
 		cacheline_free(timeline->hwsp_cacheline);
 	else
@@ -379,6 +326,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
 	if (tl->pin_count++)
 		return 0;
 	GEM_BUG_ON(!tl->pin_count);
+	GEM_BUG_ON(tl->active_count);
 
 	err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
 	if (err)
@@ -389,7 +337,6 @@ int intel_timeline_pin(struct intel_timeline *tl)
 		offset_in_page(tl->hwsp_offset);
 
 	cacheline_acquire(tl->hwsp_cacheline);
-	timeline_add_to_active(tl);
 
 	return 0;
 
@@ -398,6 +345,40 @@ int intel_timeline_pin(struct intel_timeline *tl)
 	return err;
 }
 
+void intel_timeline_enter(struct intel_timeline *tl)
+{
+	struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+	GEM_BUG_ON(!tl->pin_count);
+	if (tl->active_count++)
+		return;
+	GEM_BUG_ON(!tl->active_count); /* overflow? */
+
+	mutex_lock(&timelines->mutex);
+	list_add(&tl->link, &timelines->active_list);
+	mutex_unlock(&timelines->mutex);
+}
+
+void intel_timeline_exit(struct intel_timeline *tl)
+{
+	struct intel_gt_timelines *timelines = &tl->gt->timelines;
+
+	GEM_BUG_ON(!tl->active_count);
+	if (--tl->active_count)
+		return;
+
+	mutex_lock(&timelines->mutex);
+	list_del(&tl->link);
+	mutex_unlock(&timelines->mutex);
+
+	/*
+	 * Since this timeline is idle, all bariers upon which we were waiting
+	 * must also be complete and so we can discard the last used barriers
+	 * without loss of information.
+	 */
+	i915_syncmap_free(&tl->sync);
+}
+
 static u32 timeline_advance(struct intel_timeline *tl)
 {
 	GEM_BUG_ON(!tl->pin_count);
@@ -555,16 +536,9 @@ void intel_timeline_unpin(struct intel_timeline *tl)
 	if (--tl->pin_count)
 		return;
 
-	timeline_remove_from_active(tl);
+	GEM_BUG_ON(tl->active_count);
 	cacheline_release(tl->hwsp_cacheline);
 
-	/*
-	 * Since this timeline is idle, all bariers upon which we were waiting
-	 * must also be complete and so we can discard the last used barriers
-	 * without loss of information.
-	 */
-	i915_syncmap_free(&tl->sync);
-
 	__i915_vma_unpin(tl->hwsp_ggtt);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.h b/drivers/gpu/drm/i915/gt/intel_timeline.h
index e08cebf64833..f583af1ba18d 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.h
@@ -77,9 +77,11 @@ static inline bool intel_timeline_sync_is_later(struct intel_timeline *tl,
 }
 
 int intel_timeline_pin(struct intel_timeline *tl);
+void intel_timeline_enter(struct intel_timeline *tl);
 int intel_timeline_get_seqno(struct intel_timeline *tl,
 			     struct i915_request *rq,
 			     u32 *seqno);
+void intel_timeline_exit(struct intel_timeline *tl);
 void intel_timeline_unpin(struct intel_timeline *tl);
 
 int intel_timeline_read_hwsp(struct i915_request *from,
@@ -87,7 +89,6 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 			     u32 *hwsp_offset);
 
 void intel_timelines_init(struct drm_i915_private *i915);
-void intel_timelines_park(struct drm_i915_private *i915);
 void intel_timelines_fini(struct drm_i915_private *i915);
 
 #endif
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index 9a71aea7a338..b820ee76b7f5 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -58,6 +58,7 @@ struct intel_timeline {
 	 */
 	struct i915_syncmap *sync;
 
+	unsigned int active_count;
 	struct list_head link;
 	struct intel_gt *gt;
 
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index 193cc564ade2..bc7ed242db3e 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -815,8 +815,6 @@ static int live_hwsp_recycle(void *arg)
 
 			if (err)
 				goto out;
-
-			intel_timelines_park(i915); /* Encourage recycling! */
 		} while (!__igt_timeout(end_time, NULL));
 	}
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 12/20] drm/i915/gt: Convert timeline tracking to spinlock
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (9 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 11/20] drm/i915/gt: Track timeline activeness in enter/exit Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 13/20] drm/i915/gt: Guard timeline pinning with its own mutex Chris Wilson
                   ` (17 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

Convert the list manipulation of active to use spinlocks so that we can
perform the updates from underneath a quick interrupt callback.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_gt_types.h |  2 +-
 drivers/gpu/drm/i915/gt/intel_reset.c    | 13 ++++++++++---
 drivers/gpu/drm/i915/gt/intel_timeline.c | 12 +++++-------
 drivers/gpu/drm/i915/i915_gem.c          | 20 ++++++++++----------
 4 files changed, 26 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index c03e56628ee2..cfd41e6c54e1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -26,7 +26,7 @@ struct intel_gt {
 	struct i915_ggtt *ggtt;
 
 	struct intel_gt_timelines {
-		struct mutex mutex; /* protects list */
+		spinlock_t lock; /* protects active_list */
 		struct list_head active_list;
 
 		/* Pack multiple timelines' seqnos into the same page */
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index adfdb908587f..72002c0f9698 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -858,6 +858,7 @@ void i915_gem_set_wedged(struct drm_i915_private *i915)
 static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
 {
 	struct i915_gpu_error *error = &i915->gpu_error;
+	struct intel_gt_timelines *timelines = &i915->gt.timelines;
 	struct intel_timeline *tl;
 
 	if (!test_bit(I915_WEDGED, &error->flags))
@@ -878,14 +879,16 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
 	 *
 	 * No more can be submitted until we reset the wedged bit.
 	 */
-	mutex_lock(&i915->gt.timelines.mutex);
-	list_for_each_entry(tl, &i915->gt.timelines.active_list, link) {
+	spin_lock(&timelines->lock);
+	list_for_each_entry(tl, &timelines->active_list, link) {
 		struct i915_request *rq;
 
 		rq = i915_active_request_get_unlocked(&tl->last_request);
 		if (!rq)
 			continue;
 
+		spin_unlock(&timelines->lock);
+
 		/*
 		 * All internal dependencies (i915_requests) will have
 		 * been flushed by the set-wedge, but we may be stuck waiting
@@ -895,8 +898,12 @@ static bool __i915_gem_unset_wedged(struct drm_i915_private *i915)
 		 */
 		dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
 		i915_request_put(rq);
+
+		/* Restart iteration after droping lock */
+		spin_lock(&timelines->lock);
+		tl = list_entry(&timelines->active_list, typeof(*tl), link);
 	}
-	mutex_unlock(&i915->gt.timelines.mutex);
+	spin_unlock(&timelines->lock);
 
 	intel_gt_sanitize(&i915->gt, false);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index b6bfbdefaf7c..672bccbfd797 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -273,7 +273,7 @@ static void timelines_init(struct intel_gt *gt)
 {
 	struct intel_gt_timelines *timelines = &gt->timelines;
 
-	mutex_init(&timelines->mutex);
+	spin_lock_init(&timelines->lock);
 	INIT_LIST_HEAD(&timelines->active_list);
 
 	spin_lock_init(&timelines->hwsp_lock);
@@ -354,9 +354,9 @@ void intel_timeline_enter(struct intel_timeline *tl)
 		return;
 	GEM_BUG_ON(!tl->active_count); /* overflow? */
 
-	mutex_lock(&timelines->mutex);
+	spin_lock(&timelines->lock);
 	list_add(&tl->link, &timelines->active_list);
-	mutex_unlock(&timelines->mutex);
+	spin_unlock(&timelines->lock);
 }
 
 void intel_timeline_exit(struct intel_timeline *tl)
@@ -367,9 +367,9 @@ void intel_timeline_exit(struct intel_timeline *tl)
 	if (--tl->active_count)
 		return;
 
-	mutex_lock(&timelines->mutex);
+	spin_lock(&timelines->lock);
 	list_del(&tl->link);
-	mutex_unlock(&timelines->mutex);
+	spin_unlock(&timelines->lock);
 
 	/*
 	 * Since this timeline is idle, all bariers upon which we were waiting
@@ -557,8 +557,6 @@ static void timelines_fini(struct intel_gt *gt)
 
 	GEM_BUG_ON(!list_empty(&timelines->active_list));
 	GEM_BUG_ON(!list_empty(&timelines->hwsp_free_list));
-
-	mutex_destroy(&timelines->mutex);
 }
 
 void intel_timelines_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cc3a75d521a..7e390d46ad2e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -905,20 +905,20 @@ static int wait_for_engines(struct drm_i915_private *i915)
 
 static long
 wait_for_timelines(struct drm_i915_private *i915,
-		   unsigned int flags, long timeout)
+		   unsigned int wait, long timeout)
 {
-	struct intel_gt_timelines *gt = &i915->gt.timelines;
+	struct intel_gt_timelines *timelines = &i915->gt.timelines;
 	struct intel_timeline *tl;
 
-	mutex_lock(&gt->mutex);
-	list_for_each_entry(tl, &gt->active_list, link) {
+	spin_lock(&timelines->lock);
+	list_for_each_entry(tl, &timelines->active_list, link) {
 		struct i915_request *rq;
 
 		rq = i915_active_request_get_unlocked(&tl->last_request);
 		if (!rq)
 			continue;
 
-		mutex_unlock(&gt->mutex);
+		spin_unlock(&timelines->lock);
 
 		/*
 		 * "Race-to-idle".
@@ -929,19 +929,19 @@ wait_for_timelines(struct drm_i915_private *i915,
 		 * want to complete as quickly as possible to avoid prolonged
 		 * stalls, so allow the gpu to boost to maximum clocks.
 		 */
-		if (flags & I915_WAIT_FOR_IDLE_BOOST)
+		if (wait & I915_WAIT_FOR_IDLE_BOOST)
 			gen6_rps_boost(rq);
 
-		timeout = i915_request_wait(rq, flags, timeout);
+		timeout = i915_request_wait(rq, wait, timeout);
 		i915_request_put(rq);
 		if (timeout < 0)
 			return timeout;
 
 		/* restart after reacquiring the lock */
-		mutex_lock(&gt->mutex);
-		tl = list_entry(&gt->active_list, typeof(*tl), link);
+		spin_lock(&timelines->lock);
+		tl = list_entry(&timelines->active_list, typeof(*tl), link);
 	}
-	mutex_unlock(&gt->mutex);
+	spin_unlock(&timelines->lock);
 
 	return timeout;
 }
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 13/20] drm/i915/gt: Guard timeline pinning with its own mutex
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (10 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 12/20] drm/i915/gt: Convert timeline tracking to spinlock Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 14/20] drm/i915/selftests: Hold ref on request across waits Chris Wilson
                   ` (16 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

In preparation for removing struct_mutex from around context retirement,
we need to make timeline pinning safe. Since multiple engines/contexts
can share a single timeline, it needs to be protected by a mutex.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_timeline.c      | 29 +++++++++----------
 .../gpu/drm/i915/gt/intel_timeline_types.h    |  2 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |  6 ++--
 3 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 672bccbfd797..d7dcf54b18c5 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -221,7 +221,9 @@ int intel_timeline_init(struct intel_timeline *timeline,
 	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
 
 	timeline->gt = gt;
-	timeline->pin_count = 0;
+
+	atomic_set(&timeline->pin_count, 0);
+
 	timeline->has_initial_breadcrumb = !hwsp;
 	timeline->hwsp_cacheline = NULL;
 
@@ -287,7 +289,7 @@ void intel_timelines_init(struct drm_i915_private *i915)
 
 void intel_timeline_fini(struct intel_timeline *timeline)
 {
-	GEM_BUG_ON(timeline->pin_count);
+	GEM_BUG_ON(atomic_read(&timeline->pin_count));
 	GEM_BUG_ON(!list_empty(&timeline->requests));
 
 	if (timeline->hwsp_cacheline)
@@ -323,33 +325,31 @@ int intel_timeline_pin(struct intel_timeline *tl)
 {
 	int err;
 
-	if (tl->pin_count++)
+	if (atomic_add_unless(&tl->pin_count, 1, 0))
 		return 0;
-	GEM_BUG_ON(!tl->pin_count);
-	GEM_BUG_ON(tl->active_count);
 
 	err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
 	if (err)
-		goto unpin;
+		return err;
 
 	tl->hwsp_offset =
 		i915_ggtt_offset(tl->hwsp_ggtt) +
 		offset_in_page(tl->hwsp_offset);
 
 	cacheline_acquire(tl->hwsp_cacheline);
+	if (atomic_fetch_inc(&tl->pin_count)) {
+		cacheline_release(tl->hwsp_cacheline);
+		__i915_vma_unpin(tl->hwsp_ggtt);
+	}
 
 	return 0;
-
-unpin:
-	tl->pin_count = 0;
-	return err;
 }
 
 void intel_timeline_enter(struct intel_timeline *tl)
 {
 	struct intel_gt_timelines *timelines = &tl->gt->timelines;
 
-	GEM_BUG_ON(!tl->pin_count);
+	GEM_BUG_ON(!atomic_read(&tl->pin_count));
 	if (tl->active_count++)
 		return;
 	GEM_BUG_ON(!tl->active_count); /* overflow? */
@@ -381,7 +381,7 @@ void intel_timeline_exit(struct intel_timeline *tl)
 
 static u32 timeline_advance(struct intel_timeline *tl)
 {
-	GEM_BUG_ON(!tl->pin_count);
+	GEM_BUG_ON(!atomic_read(&tl->pin_count));
 	GEM_BUG_ON(tl->seqno & tl->has_initial_breadcrumb);
 
 	return tl->seqno += 1 + tl->has_initial_breadcrumb;
@@ -532,11 +532,10 @@ int intel_timeline_read_hwsp(struct i915_request *from,
 
 void intel_timeline_unpin(struct intel_timeline *tl)
 {
-	GEM_BUG_ON(!tl->pin_count);
-	if (--tl->pin_count)
+	GEM_BUG_ON(!atomic_read(&tl->pin_count));
+	if (!atomic_dec_and_test(&tl->pin_count))
 		return;
 
-	GEM_BUG_ON(tl->active_count);
 	cacheline_release(tl->hwsp_cacheline);
 
 	__i915_vma_unpin(tl->hwsp_ggtt);
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
index b820ee76b7f5..8dd14a2b8781 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h
@@ -25,7 +25,7 @@ struct intel_timeline {
 
 	struct mutex mutex; /* protects the flow of requests */
 
-	unsigned int pin_count;
+	atomic_t pin_count;
 	const u32 *hwsp_seqno;
 	struct i915_vma *hwsp_ggtt;
 	u32 hwsp_offset;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 490ebd121f4c..a48b36d31e65 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -38,13 +38,13 @@ struct mock_ring {
 
 static void mock_timeline_pin(struct intel_timeline *tl)
 {
-	tl->pin_count++;
+	atomic_inc(&tl->pin_count);
 }
 
 static void mock_timeline_unpin(struct intel_timeline *tl)
 {
-	GEM_BUG_ON(!tl->pin_count);
-	tl->pin_count--;
+	GEM_BUG_ON(!atomic_read(&tl->pin_count));
+	atomic_dec(&tl->pin_count);
 }
 
 static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 14/20] drm/i915/selftests: Hold ref on request across waits
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (11 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 13/20] drm/i915/gt: Guard timeline pinning with its own mutex Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 18:39   ` Matthew Auld
  2019-06-25 13:01 ` [PATCH 15/20] drm/i915/gt: Always call kref_init for the timeline Chris Wilson
                   ` (15 subsequent siblings)
  28 siblings, 1 reply; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

As we wait upon the request, we should be sure to hold our own reference
for our checks.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/i915_request.c | 21 +++++++++++--------
 1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 0fdf948a93a0..1bbfc43d4a9e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -75,55 +75,58 @@ static int igt_wait_request(void *arg)
 		err = -ENOMEM;
 		goto out_unlock;
 	}
+	i915_request_get(request);
 
 	if (i915_request_wait(request, 0, 0) != -ETIME) {
 		pr_err("request wait (busy query) succeeded (expected timeout before submit!)\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_wait(request, 0, T) != -ETIME) {
 		pr_err("request wait succeeded (expected timeout before submit!)\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_completed(request)) {
 		pr_err("request completed before submit!!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	i915_request_add(request);
 
 	if (i915_request_wait(request, 0, 0) != -ETIME) {
 		pr_err("request wait (busy query) succeeded (expected timeout after submit!)\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_completed(request)) {
 		pr_err("request completed immediately!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_wait(request, 0, T / 2) != -ETIME) {
 		pr_err("request wait succeeded (expected timeout!)\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_wait(request, 0, T) == -ETIME) {
 		pr_err("request wait timed out!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (!i915_request_completed(request)) {
 		pr_err("request not complete after waiting!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	if (i915_request_wait(request, 0, T) == -ETIME) {
 		pr_err("request wait timed out when already complete!\n");
-		goto out_unlock;
+		goto out_request;
 	}
 
 	err = 0;
+out_request:
+	i915_request_put(request);
 out_unlock:
 	mock_device_flush(i915);
 	mutex_unlock(&i915->drm.struct_mutex);
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 15/20] drm/i915/gt: Always call kref_init for the timeline
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (12 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 14/20] drm/i915/selftests: Hold ref on request across waits Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 18:42   ` Matthew Auld
  2019-06-25 13:01 ` [PATCH 16/20] drm/i915/gt: Drop stale commentary for timeline density Chris Wilson
                   ` (14 subsequent siblings)
  28 siblings, 1 reply; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

Always initialise the refcount, even for the embedded timelines inside
mock devices.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_timeline.c | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index d7dcf54b18c5..7fb5defd9e71 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -222,6 +222,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
 
 	timeline->gt = gt;
 
+	kref_init(&timeline->kref);
 	atomic_set(&timeline->pin_count, 0);
 
 	timeline->has_initial_breadcrumb = !hwsp;
@@ -316,8 +317,6 @@ intel_timeline_create(struct intel_gt *gt, struct i915_vma *global_hwsp)
 		return ERR_PTR(err);
 	}
 
-	kref_init(&timeline->kref);
-
 	return timeline;
 }
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 16/20] drm/i915/gt: Drop stale commentary for timeline density
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (13 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 15/20] drm/i915/gt: Always call kref_init for the timeline Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 23:28   ` Daniele Ceraolo Spurio
  2019-06-25 13:01 ` [PATCH 17/20] drm/i915/gt: Add some debug tracing for context pinning Chris Wilson
                   ` (13 subsequent siblings)
  28 siblings, 1 reply; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

We no longer allocate a continguous set of timeline ids for all engines
upon creation, so we no longer should assume that the timelines are
density allocated within a context. Hopefully, still dense enough for us
to take advantage of the compressed radix tree.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_timeline.c | 14 ++------------
 1 file changed, 2 insertions(+), 12 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 7fb5defd9e71..7b476cd55dac 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -210,21 +210,11 @@ int intel_timeline_init(struct intel_timeline *timeline,
 {
 	void *vaddr;
 
-	/*
-	 * Ideally we want a set of engines on a single leaf as we expect
-	 * to mostly be tracking synchronisation between engines. It is not
-	 * a huge issue if this is not the case, but we may want to mitigate
-	 * any page crossing penalties if they become an issue.
-	 *
-	 * Called during early_init before we know how many engines there are.
-	 */
-	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
-
-	timeline->gt = gt;
-
 	kref_init(&timeline->kref);
 	atomic_set(&timeline->pin_count, 0);
 
+	timeline->gt = gt;
+
 	timeline->has_initial_breadcrumb = !hwsp;
 	timeline->hwsp_cacheline = NULL;
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 17/20] drm/i915/gt: Add some debug tracing for context pinning
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (14 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 16/20] drm/i915/gt: Drop stale commentary for timeline density Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 18:47   ` Matthew Auld
  2019-06-25 13:01 ` [PATCH 18/20] drm/i915: Include the breadcrumb when asserting request completion Chris Wilson
                   ` (12 subsequent siblings)
  28 siblings, 1 reply; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

Add the context pin/unpin events to the trace for post-mortem debugging.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c        | 5 +++++
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c | 3 +++
 2 files changed, 8 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 2bc25e3f83e6..ce1aba3604b3 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1509,6 +1509,8 @@ static void execlists_context_destroy(struct kref *kref)
 
 static void execlists_context_unpin(struct intel_context *ce)
 {
+	GEM_TRACE("%s context:%llx unpin\n",
+		  ce->engine->name, ce->ring->timeline->fence_context);
 	i915_gem_context_unpin_hw_id(ce->gem_context);
 	i915_gem_object_unpin_map(ce->state->obj);
 }
@@ -1567,6 +1569,9 @@ __execlists_context_pin(struct intel_context *ce,
 	ce->lrc_desc = lrc_descriptor(ce, engine);
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 	__execlists_update_reg_state(ce, engine);
+	GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
+		  engine->name, ce->ring->timeline->fence_context,
+		  ce->ring->head, ce->ring->tail);
 
 	return 0;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index f094406dcc56..81f9b0422e6a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1197,6 +1197,7 @@ int intel_ring_pin(struct intel_ring *ring)
 	GEM_BUG_ON(ring->vaddr);
 	ring->vaddr = addr;
 
+	GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context);
 	return 0;
 
 err_ring:
@@ -1223,6 +1224,8 @@ void intel_ring_unpin(struct intel_ring *ring)
 	if (!atomic_dec_and_test(&ring->pin_count))
 		return;
 
+	GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
+
 	/* Discard any unused bytes beyond that submitted to hw. */
 	intel_ring_reset(ring, ring->tail);
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 18/20] drm/i915: Include the breadcrumb when asserting request completion
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (15 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 17/20] drm/i915/gt: Add some debug tracing for context pinning Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 14:42   ` [PATCH] " Chris Wilson
  2019-06-25 13:01 ` [PATCH 19/20] drm/i915: Protect request retirement with timeline->mutex Chris Wilson
                   ` (11 subsequent siblings)
  28 siblings, 1 reply; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

As we explode, include the breadcrumb results in the trace for why we
choose to explode on the request completion assertion failure.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c   |  2 +-
 drivers/gpu/drm/i915/gt/intel_reset.c |  2 +-
 drivers/gpu/drm/i915/i915_request.c   |  2 +-
 drivers/gpu/drm/i915/i915_request.h   | 12 ++++++++++++
 4 files changed, 15 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index ce1aba3604b3..18b94fc952d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1381,7 +1381,7 @@ static void process_csb(struct intel_engine_cs *engine)
 			 * coherent (visible from the CPU) before the
 			 * user interrupt and CSB is processed.
 			 */
-			GEM_BUG_ON(!i915_request_completed(rq));
+			assert_request_complete(rq);
 			execlists_schedule_out(rq);
 
 			GEM_BUG_ON(execlists->active - execlists->inflight >
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 72002c0f9698..ebddcfae212b 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -123,7 +123,7 @@ void i915_reset_request(struct i915_request *rq, bool guilty)
 		  yesno(guilty));
 
 	lockdep_assert_held(&rq->engine->active.lock);
-	GEM_BUG_ON(i915_request_completed(rq));
+	assert_request_complete(rq);
 
 	if (guilty) {
 		i915_request_skip(rq, -EIO);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5ff87c4a0cd5..5b1c638d3754 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -329,7 +329,7 @@ void i915_request_retire_upto(struct i915_request *rq)
 		  hwsp_seqno(rq));
 
 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
-	GEM_BUG_ON(!i915_request_completed(rq));
+	assert_request_complete(rq);
 
 	if (list_empty(&rq->ring_link))
 		return;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index b58ceef92e20..a41791637076 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -432,4 +432,16 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
 
 bool i915_retire_requests(struct drm_i915_private *i915);
 
+static inline void assert_request_complete(const struct i915_request *rq)
+{
+	u32 hwsp = hwsp_seqno(rq);
+
+	if (i915_seqno_passed(hwsp, rq->fence.seqno))
+		return;
+
+	GEM_TRACE("fence %llx:%llu incomplete, current %u\n",
+		  rq->fence.context, rq->fence.seqno, hwsp);
+	GEM_BUG_ON(!i915_seqno_passed(hwsp, rq->fence.seqno));
+}
+
 #endif /* I915_REQUEST_H */
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 19/20] drm/i915: Protect request retirement with timeline->mutex
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (16 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 18/20] drm/i915: Include the breadcrumb when asserting request completion Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 13:01 ` [PATCH 20/20] drm/i915: Replace struct_mutex for batch pool serialisation Chris Wilson
                   ` (10 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx

Forgo the struct_mutex requirement for request retirement as we have
been transitioning over to only using the timeline->mutex for
controlling the lifetime of a request on that timeline.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 192 ++++++++++--------
 drivers/gpu/drm/i915/gt/intel_context.h       |  25 +--
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |   1 -
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   2 -
 drivers/gpu/drm/i915/gt/intel_gt.c            |   1 -
 drivers/gpu/drm/i915/gt/intel_gt_types.h      |   2 -
 drivers/gpu/drm/i915/gt/intel_lrc.c           |   1 +
 drivers/gpu/drm/i915/gt/intel_ringbuffer.c    |  13 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |   1 -
 drivers/gpu/drm/i915/i915_request.c           | 151 +++++++-------
 drivers/gpu/drm/i915/i915_request.h           |   3 -
 11 files changed, 203 insertions(+), 189 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index f43eaaa5db5f..80c9c57a302f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -739,63 +739,6 @@ static int eb_select_context(struct i915_execbuffer *eb)
 	return 0;
 }
 
-static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
-{
-	struct i915_request *rq;
-
-	/*
-	 * Completely unscientific finger-in-the-air estimates for suitable
-	 * maximum user request size (to avoid blocking) and then backoff.
-	 */
-	if (intel_ring_update_space(ring) >= PAGE_SIZE)
-		return NULL;
-
-	/*
-	 * Find a request that after waiting upon, there will be at least half
-	 * the ring available. The hysteresis allows us to compete for the
-	 * shared ring and should mean that we sleep less often prior to
-	 * claiming our resources, but not so long that the ring completely
-	 * drains before we can submit our next request.
-	 */
-	list_for_each_entry(rq, &ring->request_list, ring_link) {
-		if (__intel_ring_space(rq->postfix,
-				       ring->emit, ring->size) > ring->size / 2)
-			break;
-	}
-	if (&rq->ring_link == &ring->request_list)
-		return NULL; /* weird, we will check again later for real */
-
-	return i915_request_get(rq);
-}
-
-static int eb_wait_for_ring(const struct i915_execbuffer *eb)
-{
-	struct i915_request *rq;
-	int ret = 0;
-
-	/*
-	 * Apply a light amount of backpressure to prevent excessive hogs
-	 * from blocking waiting for space whilst holding struct_mutex and
-	 * keeping all of their resources pinned.
-	 */
-
-	rq = __eb_wait_for_ring(eb->context->ring);
-	if (rq) {
-		mutex_unlock(&eb->i915->drm.struct_mutex);
-
-		if (i915_request_wait(rq,
-				      I915_WAIT_INTERRUPTIBLE,
-				      MAX_SCHEDULE_TIMEOUT) < 0)
-			ret = -EINTR;
-
-		i915_request_put(rq);
-
-		mutex_lock(&eb->i915->drm.struct_mutex);
-	}
-
-	return ret;
-}
-
 static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
 	struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
@@ -2122,10 +2065,75 @@ static const enum intel_engine_id user_ring_map[] = {
 	[I915_EXEC_VEBOX]	= VECS0
 };
 
-static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+static struct i915_request *eb_throttle(struct intel_context *ce)
+{
+	struct intel_ring *ring = ce->ring;
+	struct intel_timeline *tl = ring->timeline;
+	struct i915_request *rq;
+
+	/*
+	 * Completely unscientific finger-in-the-air estimates for suitable
+	 * maximum user request size (to avoid blocking) and then backoff.
+	 */
+	if (intel_ring_update_space(ring) >= PAGE_SIZE)
+		return NULL;
+
+	/*
+	 * Find a request that after waiting upon, there will be at least half
+	 * the ring available. The hysteresis allows us to compete for the
+	 * shared ring and should mean that we sleep less often prior to
+	 * claiming our resources, but not so long that the ring completely
+	 * drains before we can submit our next request.
+	 */
+	list_for_each_entry(rq, &tl->requests, link) {
+		if (rq->ring != ring)
+			continue;
+
+		if (__intel_ring_space(rq->postfix,
+				       ring->emit, ring->size) > ring->size / 2)
+			break;
+	}
+	if (&rq->link == &tl->requests)
+		return NULL; /* weird, we will check again later for real */
+
+	return i915_request_get(rq);
+}
+
+static int
+__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 {
 	int err;
 
+	if (likely(atomic_inc_not_zero(&ce->pin_count)))
+		return 0;
+
+	err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex);
+	if (err)
+		return err;
+
+	err = __intel_context_do_pin(ce);
+	mutex_unlock(&eb->i915->drm.struct_mutex);
+
+	return err;
+}
+
+static void
+__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+	if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
+		return;
+
+	mutex_lock(&eb->i915->drm.struct_mutex);
+	intel_context_unpin(ce);
+	mutex_unlock(&eb->i915->drm.struct_mutex);
+}
+
+static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
+{
+	struct intel_timeline *tl;
+	struct i915_request *rq;
+	int err;
+
 	/*
 	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
 	 * EIO if the GPU is already wedged.
@@ -2139,7 +2147,7 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 	 * GGTT space, so do this first before we reserve a seqno for
 	 * ourselves.
 	 */
-	err = intel_context_pin(ce);
+	err = __eb_pin_context(eb, ce);
 	if (err)
 		return err;
 
@@ -2150,29 +2158,52 @@ static int eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce)
 	 * wakeref that we hold until the GPU has been idle for at least
 	 * 100ms.
 	 */
-	err = intel_context_timeline_lock(ce);
-	if (err)
+	tl = intel_context_timeline_lock(ce);
+	if (IS_ERR(tl)) {
+		err = PTR_ERR(tl);
 		goto err_unpin;
+	}
 
 	intel_context_enter(ce);
-	intel_context_timeline_unlock(ce);
+	rq = eb_throttle(ce);
+
+	intel_context_timeline_unlock(tl);
+
+	if (rq) {
+		if (i915_request_wait(rq,
+				      I915_WAIT_INTERRUPTIBLE,
+				      MAX_SCHEDULE_TIMEOUT) < 0) {
+			i915_request_put(rq);
+			err = -EINTR;
+			goto err_exit;
+		}
+
+		i915_request_put(rq);
+	}
 
 	eb->engine = ce->engine;
 	eb->context = ce;
 	return 0;
 
+err_exit:
+	mutex_lock(&tl->mutex);
+	intel_context_exit(ce);
+	intel_context_timeline_unlock(tl);
 err_unpin:
-	intel_context_unpin(ce);
+	__eb_unpin_context(eb, ce);
 	return err;
 }
 
-static void eb_unpin_context(struct i915_execbuffer *eb)
+static void eb_unpin_engine(struct i915_execbuffer *eb)
 {
-	__intel_context_timeline_lock(eb->context);
-	intel_context_exit(eb->context);
-	intel_context_timeline_unlock(eb->context);
+	struct intel_context *ce = eb->context;
+	struct intel_timeline *tl = ce->ring->timeline;
+
+	mutex_lock(&tl->mutex);
+	intel_context_exit(ce);
+	intel_context_timeline_unlock(tl);
 
-	intel_context_unpin(eb->context);
+	__eb_unpin_context(eb, ce);
 }
 
 static unsigned int
@@ -2217,9 +2248,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
 }
 
 static int
-eb_select_engine(struct i915_execbuffer *eb,
-		 struct drm_file *file,
-		 struct drm_i915_gem_execbuffer2 *args)
+eb_pin_engine(struct i915_execbuffer *eb,
+	      struct drm_file *file,
+	      struct drm_i915_gem_execbuffer2 *args)
 {
 	struct intel_context *ce;
 	unsigned int idx;
@@ -2234,7 +2265,7 @@ eb_select_engine(struct i915_execbuffer *eb,
 	if (IS_ERR(ce))
 		return PTR_ERR(ce);
 
-	err = eb_pin_context(eb, ce);
+	err = __eb_pin_engine(eb, ce);
 	intel_context_put(ce);
 
 	return err;
@@ -2452,16 +2483,12 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (unlikely(err))
 		goto err_destroy;
 
-	err = i915_mutex_lock_interruptible(dev);
-	if (err)
-		goto err_context;
-
-	err = eb_select_engine(&eb, file, args);
+	err = eb_pin_engine(&eb, file, args);
 	if (unlikely(err))
-		goto err_unlock;
+		goto err_context;
 
-	err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
-	if (unlikely(err))
+	err = i915_mutex_lock_interruptible(dev);
+	if (err)
 		goto err_engine;
 
 	err = eb_relocate(&eb);
@@ -2615,10 +2642,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_vma:
 	if (eb.exec)
 		eb_release_vmas(&eb);
-err_engine:
-	eb_unpin_context(&eb);
-err_unlock:
 	mutex_unlock(&dev->struct_mutex);
+err_engine:
+	eb_unpin_engine(&eb);
 err_context:
 	i915_gem_context_put(eb.gem_context);
 err_destroy:
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 065ba4ac4e87..38b60cbf2592 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
 #include "i915_active.h"
 #include "intel_context_types.h"
 #include "intel_engine_types.h"
+#include "intel_timeline_types.h"
 
 void intel_context_init(struct intel_context *ce,
 			struct i915_gem_context *ctx,
@@ -126,24 +127,24 @@ static inline void intel_context_put(struct intel_context *ce)
 	kref_put(&ce->ref, ce->ops->destroy);
 }
 
-static inline void
-__intel_context_timeline_lock(struct intel_context *ce)
-	__acquires(&ce->ring->timeline->mutex)
-{
-	mutex_lock(&ce->ring->timeline->mutex);
-}
-
-static inline int __must_check
+static inline struct intel_timeline *__must_check
 intel_context_timeline_lock(struct intel_context *ce)
 	__acquires(&ce->ring->timeline->mutex)
 {
-	return mutex_lock_interruptible(&ce->ring->timeline->mutex);
+	struct intel_timeline *tl = ce->ring->timeline;
+	int err;
+
+	err = mutex_lock_interruptible(&tl->mutex);
+	if (err)
+		return ERR_PTR(err);
+
+	return tl;
 }
 
-static inline void intel_context_timeline_unlock(struct intel_context *ce)
-	__releases(&ce->ring->timeline->mutex)
+static inline void intel_context_timeline_unlock(struct intel_timeline *tl)
+	__releases(&tl->mutex)
 {
-	mutex_unlock(&ce->ring->timeline->mutex);
+	mutex_unlock(&tl->mutex);
 }
 
 struct i915_request *intel_context_create_request(struct intel_context *ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d1508f0b4c84..b27fc555fe09 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -745,7 +745,6 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
 				engine->status_page.vma))
 		goto out_frame;
 
-	INIT_LIST_HEAD(&frame->ring.request_list);
 	frame->ring.timeline = &frame->timeline;
 	frame->ring.vaddr = frame->cs;
 	frame->ring.size = sizeof(frame->cs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 7e056114344e..0dde7e04b102 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -69,8 +69,6 @@ struct intel_ring {
 	void *vaddr;
 
 	struct intel_timeline *timeline;
-	struct list_head request_list;
-	struct list_head active_link;
 
 	/*
 	 * As we have two types of rings, one global to the engine used
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index 8cca6b22b386..46d24d9d62ac 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -14,7 +14,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 	gt->i915 = i915;
 	gt->uncore = &i915->uncore;
 
-	INIT_LIST_HEAD(&gt->active_rings);
 	INIT_LIST_HEAD(&gt->closed_vma);
 
 	spin_lock_init(&gt->closed_lock);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index cfd41e6c54e1..f43ea830b1e8 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -34,8 +34,6 @@ struct intel_gt {
 		struct list_head hwsp_free_list;
 	} timelines;
 
-	struct list_head active_rings;
-
 	struct intel_wakeref wakeref;
 
 	struct list_head closed_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 18b94fc952d8..2965c50f29c3 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1513,6 +1513,7 @@ static void execlists_context_unpin(struct intel_context *ce)
 		  ce->engine->name, ce->ring->timeline->fence_context);
 	i915_gem_context_unpin_hw_id(ce->gem_context);
 	i915_gem_object_unpin_map(ce->state->obj);
+	intel_ring_reset(ce->ring, ce->ring->tail);
 }
 
 static void
diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
index 81f9b0422e6a..b771170eb56a 100644
--- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/gt/intel_ringbuffer.c
@@ -1227,7 +1227,7 @@ void intel_ring_unpin(struct intel_ring *ring)
 	GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
 
 	/* Discard any unused bytes beyond that submitted to hw. */
-	intel_ring_reset(ring, ring->tail);
+	intel_ring_reset(ring, ring->emit);
 
 	GEM_BUG_ON(!ring->vma);
 	i915_vma_unset_ggtt_write(ring->vma);
@@ -1293,7 +1293,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 		return ERR_PTR(-ENOMEM);
 
 	kref_init(&ring->ref);
-	INIT_LIST_HEAD(&ring->request_list);
 	ring->timeline = intel_timeline_get(timeline);
 
 	ring->size = size;
@@ -1817,21 +1816,25 @@ static int ring_request_alloc(struct i915_request *request)
 
 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
 {
+	struct intel_timeline *tl = ring->timeline;
 	struct i915_request *target;
 	long timeout;
 
 	if (intel_ring_update_space(ring) >= bytes)
 		return 0;
 
-	GEM_BUG_ON(list_empty(&ring->request_list));
-	list_for_each_entry(target, &ring->request_list, ring_link) {
+	GEM_BUG_ON(list_empty(&tl->requests));
+	list_for_each_entry(target, &tl->requests, link) {
+		if (target->ring != ring)
+			continue;
+
 		/* Would completion of this request free enough space? */
 		if (bytes <= __intel_ring_space(target->postfix,
 						ring->emit, ring->size))
 			break;
 	}
 
-	if (WARN_ON(&target->ring_link == &ring->request_list))
+	if (GEM_WARN_ON(&target->link == &tl->requests))
 		return -ENOSPC;
 
 	timeout = i915_request_wait(target,
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index a48b36d31e65..5bcb461b8372 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -68,7 +68,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	ring->base.timeline = &ring->timeline;
 	atomic_set(&ring->base.pin_count, 1);
 
-	INIT_LIST_HEAD(&ring->base.request_list);
 	intel_ring_update_space(&ring->base);
 
 	return &ring->base;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5b1c638d3754..3f33a72085f6 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -180,40 +180,6 @@ i915_request_remove_from_client(struct i915_request *request)
 	spin_unlock(&file_priv->mm.lock);
 }
 
-static void advance_ring(struct i915_request *request)
-{
-	struct intel_ring *ring = request->ring;
-	unsigned int tail;
-
-	/*
-	 * We know the GPU must have read the request to have
-	 * sent us the seqno + interrupt, so use the position
-	 * of tail of the request to update the last known position
-	 * of the GPU head.
-	 *
-	 * Note this requires that we are always called in request
-	 * completion order.
-	 */
-	GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list));
-	if (list_is_last(&request->ring_link, &ring->request_list)) {
-		/*
-		 * We may race here with execlists resubmitting this request
-		 * as we retire it. The resubmission will move the ring->tail
-		 * forwards (to request->wa_tail). We either read the
-		 * current value that was written to hw, or the value that
-		 * is just about to be. Either works, if we miss the last two
-		 * noops - they are safe to be replayed on a reset.
-		 */
-		tail = READ_ONCE(request->tail);
-		list_del(&ring->active_link);
-	} else {
-		tail = request->postfix;
-	}
-	list_del_init(&request->ring_link);
-
-	ring->head = tail;
-}
-
 static void free_capture_list(struct i915_request *request)
 {
 	struct i915_capture_list *capture;
@@ -231,7 +197,7 @@ static bool i915_request_retire(struct i915_request *rq)
 {
 	struct i915_active_request *active, *next;
 
-	lockdep_assert_held(&rq->i915->drm.struct_mutex);
+	lockdep_assert_held(&rq->timeline->mutex);
 	if (!i915_request_completed(rq))
 		return false;
 
@@ -243,7 +209,17 @@ static bool i915_request_retire(struct i915_request *rq)
 	GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
 	trace_i915_request_retire(rq);
 
-	advance_ring(rq);
+	/*
+	 * We know the GPU must have read the request to have
+	 * sent us the seqno + interrupt, so use the position
+	 * of tail of the request to update the last known position
+	 * of the GPU head.
+	 *
+	 * Note this requires that we are always called in request
+	 * completion order.
+	 */
+	GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests));
+	rq->ring->head = rq->postfix;
 
 	/*
 	 * Walk through the active list, calling retire on each. This allows
@@ -320,7 +296,7 @@ static bool i915_request_retire(struct i915_request *rq)
 
 void i915_request_retire_upto(struct i915_request *rq)
 {
-	struct intel_ring *ring = rq->ring;
+	struct intel_timeline * const tl = rq->timeline;
 	struct i915_request *tmp;
 
 	GEM_TRACE("%s fence %llx:%lld, current %d\n",
@@ -328,15 +304,11 @@ void i915_request_retire_upto(struct i915_request *rq)
 		  rq->fence.context, rq->fence.seqno,
 		  hwsp_seqno(rq));
 
-	lockdep_assert_held(&rq->i915->drm.struct_mutex);
+	lockdep_assert_held(&tl->mutex);
 	assert_request_complete(rq);
 
-	if (list_empty(&rq->ring_link))
-		return;
-
 	do {
-		tmp = list_first_entry(&ring->request_list,
-				       typeof(*tmp), ring_link);
+		tmp = list_first_entry(&tl->requests, typeof(*tmp), link);
 	} while (i915_request_retire(tmp) && tmp != rq);
 }
 
@@ -563,29 +535,28 @@ semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
 	return NOTIFY_DONE;
 }
 
-static void ring_retire_requests(struct intel_ring *ring)
+static void retire_requests(struct intel_timeline *tl)
 {
 	struct i915_request *rq, *rn;
 
-	list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link)
+	list_for_each_entry_safe(rq, rn, &tl->requests, link)
 		if (!i915_request_retire(rq))
 			break;
 }
 
 static noinline struct i915_request *
-request_alloc_slow(struct intel_context *ce, gfp_t gfp)
+request_alloc_slow(struct intel_timeline *tl, gfp_t gfp)
 {
-	struct intel_ring *ring = ce->ring;
 	struct i915_request *rq;
 
-	if (list_empty(&ring->request_list))
+	if (list_empty(&tl->requests))
 		goto out;
 
 	if (!gfpflags_allow_blocking(gfp))
 		goto out;
 
 	/* Move our oldest request to the slab-cache (if not in use!) */
-	rq = list_first_entry(&ring->request_list, typeof(*rq), ring_link);
+	rq = list_first_entry(&tl->requests, typeof(*rq), link);
 	i915_request_retire(rq);
 
 	rq = kmem_cache_alloc(global.slab_requests,
@@ -594,11 +565,11 @@ request_alloc_slow(struct intel_context *ce, gfp_t gfp)
 		return rq;
 
 	/* Ratelimit ourselves to prevent oom from malicious clients */
-	rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link);
+	rq = list_last_entry(&tl->requests, typeof(*rq), link);
 	cond_synchronize_rcu(rq->rcustate);
 
 	/* Retire our old requests in the hope that we free some */
-	ring_retire_requests(ring);
+	retire_requests(tl);
 
 out:
 	return kmem_cache_alloc(global.slab_requests, gfp);
@@ -649,7 +620,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
 	rq = kmem_cache_alloc(global.slab_requests,
 			      gfp | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
 	if (unlikely(!rq)) {
-		rq = request_alloc_slow(ce, gfp);
+		rq = request_alloc_slow(tl, gfp);
 		if (!rq) {
 			ret = -ENOMEM;
 			goto err_unreserve;
@@ -741,15 +712,15 @@ struct i915_request *
 i915_request_create(struct intel_context *ce)
 {
 	struct i915_request *rq;
-	int err;
+	struct intel_timeline *tl;
 
-	err = intel_context_timeline_lock(ce);
-	if (err)
-		return ERR_PTR(err);
+	tl = intel_context_timeline_lock(ce);
+	if (IS_ERR(tl))
+		return ERR_CAST(tl);
 
 	/* Move our oldest request to the slab-cache (if not in use!) */
-	rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
-	if (!list_is_last(&rq->ring_link, &ce->ring->request_list))
+	rq = list_first_entry(&tl->requests, typeof(*rq), link);
+	if (!list_is_last(&rq->link, &tl->requests))
 		i915_request_retire(rq);
 
 	intel_context_enter(ce);
@@ -759,22 +730,22 @@ i915_request_create(struct intel_context *ce)
 		goto err_unlock;
 
 	/* Check that we do not interrupt ourselves with a new request */
-	rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+	rq->cookie = lockdep_pin_lock(&tl->mutex);
 
 	return rq;
 
 err_unlock:
-	intel_context_timeline_unlock(ce);
+	intel_context_timeline_unlock(tl);
 	return rq;
 }
 
 static int
 i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
 {
-	if (list_is_first(&signal->ring_link, &signal->ring->request_list))
+	if (list_is_first(&signal->link, &signal->ring->timeline->requests))
 		return 0;
 
-	signal = list_prev_entry(signal, ring_link);
+	signal = list_prev_entry(signal, link);
 	if (intel_timeline_sync_is_later(rq->timeline, &signal->fence))
 		return 0;
 
@@ -1167,6 +1138,7 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
 	 */
 	GEM_BUG_ON(rq->reserved_space > ring->space);
 	rq->reserved_space = 0;
+	rq->emitted_jiffies = jiffies;
 
 	/*
 	 * Record the position of the start of the breadcrumb so that
@@ -1180,11 +1152,6 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
 
 	prev = __i915_request_add_to_timeline(rq);
 
-	list_add_tail(&rq->ring_link, &ring->request_list);
-	if (list_is_first(&rq->ring_link, &ring->request_list))
-		list_add(&ring->active_link, &rq->i915->gt.active_rings);
-	rq->emitted_jiffies = jiffies;
-
 	/*
 	 * Let the backend know a new request has arrived that may need
 	 * to adjust the existing execution schedule due to a high priority
@@ -1237,10 +1204,11 @@ struct i915_request *__i915_request_commit(struct i915_request *rq)
 
 void i915_request_add(struct i915_request *rq)
 {
+	struct intel_timeline * const tl = rq->timeline;
 	struct i915_request *prev;
 
-	lockdep_assert_held(&rq->timeline->mutex);
-	lockdep_unpin_lock(&rq->timeline->mutex, rq->cookie);
+	lockdep_assert_held(&tl->mutex);
+	lockdep_unpin_lock(&tl->mutex, rq->cookie);
 
 	trace_i915_request_add(rq);
 
@@ -1263,10 +1231,10 @@ void i915_request_add(struct i915_request *rq)
 	 * work on behalf of others -- but instead we should benefit from
 	 * improved resource management. (Well, that's the theory at least.)
 	 */
-	if (prev && i915_request_completed(prev))
+	if (prev && i915_request_completed(prev) && prev->timeline == tl)
 		i915_request_retire_upto(prev);
 
-	mutex_unlock(&rq->timeline->mutex);
+	mutex_unlock(&tl->mutex);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
@@ -1487,18 +1455,43 @@ long i915_request_wait(struct i915_request *rq,
 
 bool i915_retire_requests(struct drm_i915_private *i915)
 {
-	struct intel_ring *ring, *tmp;
+	struct intel_gt_timelines *timelines = &i915->gt.timelines;
+	struct intel_timeline *tl, *tn;
+	LIST_HEAD(free);
+
+	spin_lock(&timelines->lock);
+	list_for_each_entry_safe(tl, tn, &timelines->active_list, link) {
+		if (!mutex_trylock(&tl->mutex))
+			continue;
+
+		intel_timeline_get(tl);
+		GEM_BUG_ON(!tl->active_count);
+		tl->active_count++; /* pin the list element */
+		spin_unlock(&timelines->lock);
 
-	lockdep_assert_held(&i915->drm.struct_mutex);
+		retire_requests(tl);
 
-	list_for_each_entry_safe(ring, tmp,
-				 &i915->gt.active_rings, active_link) {
-		intel_ring_get(ring); /* last rq holds reference! */
-		ring_retire_requests(ring);
-		intel_ring_put(ring);
+		spin_lock(&timelines->lock);
+
+		/* Restart iteration after dropping lock */
+		list_safe_reset_next(tl, tn, link);
+		if (!--tl->active_count)
+			list_del(&tl->link);
+
+		mutex_unlock(&tl->mutex);
+
+		/* Defer the final release to after the spinlock */
+		if (refcount_dec_and_test(&tl->kref.refcount)) {
+			GEM_BUG_ON(tl->active_count);
+			list_add(&tl->link, &free);
+		}
 	}
+	spin_unlock(&timelines->lock);
+
+	list_for_each_entry_safe(tl, tn, &free, link)
+		__intel_timeline_free(&tl->kref);
 
-	return !list_empty(&i915->gt.active_rings);
+	return !list_empty(&timelines->active_list);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index a41791637076..75453474dddb 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -221,9 +221,6 @@ struct i915_request {
 	/** timeline->request entry for this request */
 	struct list_head link;
 
-	/** ring->request_list entry for this request */
-	struct list_head ring_link;
-
 	struct drm_i915_file_private *file_priv;
 	/** file_priv list entry for this request */
 	struct list_head client_link;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* [PATCH 20/20] drm/i915: Replace struct_mutex for batch pool serialisation
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (17 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 19/20] drm/i915: Protect request retirement with timeline->mutex Chris Wilson
@ 2019-06-25 13:01 ` Chris Wilson
  2019-06-25 14:09 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Patchwork
                   ` (9 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 13:01 UTC (permalink / raw)
  To: intel-gfx; +Cc: Matthew Auld

Switch to tracking activity via i915_active on individual nodes, only
keeping a list of retired objects in the cache, and reaping the cache
when the engine itself idles.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/Makefile                 |   2 +-
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    |  58 +++---
 drivers/gpu/drm/i915/gem/i915_gem_object.c    |   1 -
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   1 -
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |   4 +-
 drivers/gpu/drm/i915/gt/intel_engine.h        |   1 -
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  11 +-
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |   2 +
 drivers/gpu/drm/i915/gt/intel_engine_pool.c   | 166 ++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_engine_pool.h   |  34 ++++
 .../gpu/drm/i915/gt/intel_engine_pool_types.h |  29 +++
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |   6 +-
 drivers/gpu/drm/i915/gt/mock_engine.c         |   3 +
 drivers/gpu/drm/i915/i915_debugfs.c           |  68 -------
 drivers/gpu/drm/i915/i915_gem_batch_pool.c    | 132 --------------
 drivers/gpu/drm/i915/i915_gem_batch_pool.h    |  26 ---
 16 files changed, 279 insertions(+), 265 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool.h
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
 delete mode 100644 drivers/gpu/drm/i915/i915_gem_batch_pool.c
 delete mode 100644 drivers/gpu/drm/i915/i915_gem_batch_pool.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 84ac0fd1b8d0..ad8b9f1887a0 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -72,6 +72,7 @@ obj-y += gt/
 gt-y += \
 	gt/intel_breadcrumbs.o \
 	gt/intel_context.o \
+	gt/intel_engine_pool.o \
 	gt/intel_engine_cs.o \
 	gt/intel_engine_pm.o \
 	gt/intel_gt.o \
@@ -118,7 +119,6 @@ i915-y += \
 	  $(gem-y) \
 	  i915_active.o \
 	  i915_cmd_parser.o \
-	  i915_gem_batch_pool.o \
 	  i915_gem_evict.o \
 	  i915_gem_fence_reg.o \
 	  i915_gem_gtt.o \
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 80c9c57a302f..0ea2d49bc8b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -16,6 +16,7 @@
 
 #include "gem/i915_gem_ioctls.h"
 #include "gt/intel_context.h"
+#include "gt/intel_engine_pool.h"
 #include "gt/intel_gt.h"
 #include "gt/intel_gt_pm.h"
 
@@ -1145,25 +1146,26 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 			     unsigned int len)
 {
 	struct reloc_cache *cache = &eb->reloc_cache;
-	struct drm_i915_gem_object *obj;
+	struct intel_engine_pool_node *pool;
 	struct i915_request *rq;
 	struct i915_vma *batch;
 	u32 *cmd;
 	int err;
 
-	obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
+	pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE);
+	if (IS_ERR(pool))
+		return PTR_ERR(pool);
 
-	cmd = i915_gem_object_pin_map(obj,
+	cmd = i915_gem_object_pin_map(pool->obj,
 				      cache->has_llc ?
 				      I915_MAP_FORCE_WB :
 				      I915_MAP_FORCE_WC);
-	i915_gem_object_unpin_pages(obj);
-	if (IS_ERR(cmd))
-		return PTR_ERR(cmd);
+	if (IS_ERR(cmd)) {
+		err = PTR_ERR(cmd);
+		goto out_pool;
+	}
 
-	batch = i915_vma_instance(obj, vma->vm, NULL);
+	batch = i915_vma_instance(pool->obj, vma->vm, NULL);
 	if (IS_ERR(batch)) {
 		err = PTR_ERR(batch);
 		goto err_unmap;
@@ -1179,6 +1181,10 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 		goto err_unpin;
 	}
 
+	err = intel_engine_pool_mark_active(pool, rq);
+	if (err)
+		goto err_request;
+
 	err = reloc_move_to_gpu(rq, vma);
 	if (err)
 		goto err_request;
@@ -1204,7 +1210,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	cache->rq_size = 0;
 
 	/* Return with batch mapping (cmd) still pinned */
-	return 0;
+	goto out_pool;
 
 skip_request:
 	i915_request_skip(rq, err);
@@ -1213,7 +1219,9 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 err_unpin:
 	i915_vma_unpin(batch);
 err_unmap:
-	i915_gem_object_unpin_map(obj);
+	i915_gem_object_unpin_map(pool->obj);
+out_pool:
+	intel_engine_pool_put(pool);
 	return err;
 }
 
@@ -1957,18 +1965,17 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
 
 static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
 {
-	struct drm_i915_gem_object *shadow_batch_obj;
+	struct intel_engine_pool_node *pool;
 	struct i915_vma *vma;
 	int err;
 
-	shadow_batch_obj = i915_gem_batch_pool_get(&eb->engine->batch_pool,
-						   PAGE_ALIGN(eb->batch_len));
-	if (IS_ERR(shadow_batch_obj))
-		return ERR_CAST(shadow_batch_obj);
+	pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len);
+	if (IS_ERR(pool))
+		return ERR_CAST(pool);
 
 	err = intel_engine_cmd_parser(eb->engine,
 				      eb->batch->obj,
-				      shadow_batch_obj,
+				      pool->obj,
 				      eb->batch_start_offset,
 				      eb->batch_len,
 				      is_master);
@@ -1977,12 +1984,12 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
 			vma = NULL;
 		else
 			vma = ERR_PTR(err);
-		goto out;
+		goto err;
 	}
 
-	vma = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
+	vma = i915_gem_object_ggtt_pin(pool->obj, NULL, 0, 0, 0);
 	if (IS_ERR(vma))
-		goto out;
+		goto err;
 
 	eb->vma[eb->buffer_count] = i915_vma_get(vma);
 	eb->flags[eb->buffer_count] =
@@ -1990,8 +1997,11 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb, bool is_master)
 	vma->exec_flags = &eb->flags[eb->buffer_count];
 	eb->buffer_count++;
 
-out:
-	i915_gem_object_unpin_pages(shadow_batch_obj);
+	vma->private = pool;
+	return vma;
+
+err:
+	intel_engine_pool_put(pool);
 	return vma;
 }
 
@@ -2615,6 +2625,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	 * to explicitly hold another reference here.
 	 */
 	eb.request->batch = eb.batch;
+	if (eb.batch->private)
+		intel_engine_pool_mark_active(eb.batch->private, eb.request);
 
 	trace_i915_request_queue(eb.request, eb.batch_flags);
 	err = eb_submit(&eb);
@@ -2639,6 +2651,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_batch_unpin:
 	if (eb.batch_flags & I915_DISPATCH_SECURE)
 		i915_vma_unpin(eb.batch);
+	if (eb.batch->private)
+		intel_engine_pool_put(eb.batch->private);
 err_vma:
 	if (eb.exec)
 		eb_release_vmas(&eb);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 43194fbcbc2e..3260377ac021 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -64,7 +64,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&obj->vma.list);
 
 	INIT_LIST_HEAD(&obj->lut_list);
-	INIT_LIST_HEAD(&obj->batch_pool_link);
 
 	init_rcu_head(&obj->rcu);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 34b51fad02de..d474c6ac4100 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -114,7 +114,6 @@ struct drm_i915_gem_object {
 	unsigned int userfault_count;
 	struct list_head userfault_link;
 
-	struct list_head batch_pool_link;
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
 	/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 93d188526457..bf085b0cb7c6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -33,10 +33,8 @@ static void i915_gem_park(struct drm_i915_private *i915)
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
-	for_each_engine(engine, i915, id) {
+	for_each_engine(engine, i915, id)
 		call_idle_barriers(engine); /* cleanup after wedging */
-		i915_gem_batch_pool_fini(&engine->batch_pool);
-	}
 
 	i915_vma_parked(i915);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 557b08b13feb..6375d6111b15 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -9,7 +9,6 @@
 #include <linux/random.h>
 #include <linux/seqlock.h>
 
-#include "i915_gem_batch_pool.h"
 #include "i915_pmu.h"
 #include "i915_reg.h"
 #include "i915_request.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index b27fc555fe09..49439cf2fd1f 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -32,6 +32,7 @@
 
 #include "intel_engine.h"
 #include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
 #include "intel_context.h"
 #include "intel_lrc.h"
 #include "intel_reset.h"
@@ -498,11 +499,6 @@ int intel_engines_init(struct drm_i915_private *i915)
 	return err;
 }
 
-static void intel_engine_init_batch_pool(struct intel_engine_cs *engine)
-{
-	i915_gem_batch_pool_init(&engine->batch_pool, engine);
-}
-
 void intel_engine_init_execlists(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -628,10 +624,11 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
 	intel_engine_init_breadcrumbs(engine);
 	intel_engine_init_execlists(engine);
 	intel_engine_init_hangcheck(engine);
-	intel_engine_init_batch_pool(engine);
 	intel_engine_init_cmd_parser(engine);
 	intel_engine_init__pm(engine);
 
+	intel_engine_pool_init(&engine->pool);
+
 	/* Use the whole device by default */
 	engine->sseu =
 		intel_sseu_from_device_info(&RUNTIME_INFO(engine->i915)->sseu);
@@ -880,9 +877,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 
 	cleanup_status_page(engine);
 
+	intel_engine_pool_fini(&engine->pool);
 	intel_engine_fini_breadcrumbs(engine);
 	intel_engine_cleanup_cmd_parser(engine);
-	i915_gem_batch_pool_fini(&engine->batch_pool);
 
 	if (engine->default_state)
 		i915_gem_object_put(engine->default_state);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 9751a02d86bc..fe9f9eaffe88 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 
 #include "intel_engine.h"
+#include "intel_engine_pool.h"
 #include "intel_engine_pm.h"
 #include "intel_gt_pm.h"
 
@@ -116,6 +117,7 @@ static int __engine_park(struct intel_wakeref *wf)
 	GEM_TRACE("%s\n", engine->name);
 
 	intel_engine_disarm_breadcrumbs(engine);
+	intel_engine_pool_park(&engine->pool);
 
 	/* Must be reset upon idling, or we may miss the busy wakeup. */
 	GEM_BUG_ON(engine->execlists.queue_priority_hint != INT_MIN);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
new file mode 100644
index 000000000000..32688ca379ef
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c
@@ -0,0 +1,166 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#include "gem/i915_gem_object.h"
+
+#include "i915_drv.h"
+#include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
+
+static struct intel_engine_cs *to_engine(struct intel_engine_pool *pool)
+{
+	return container_of(pool, struct intel_engine_cs, pool);
+}
+
+static struct list_head *
+bucket_for_size(struct intel_engine_pool *pool, size_t sz)
+{
+	int n;
+
+	/*
+	 * Compute a power-of-two bucket, but throw everything greater than
+	 * 16KiB into the same bucket: i.e. the buckets hold objects of
+	 * (1 page, 2 pages, 4 pages, 8+ pages).
+	 */
+	n = fls(sz >> PAGE_SHIFT) - 1;
+	if (n >= ARRAY_SIZE(pool->cache_list))
+		n = ARRAY_SIZE(pool->cache_list) - 1;
+
+	return &pool->cache_list[n];
+}
+
+static void node_free(struct intel_engine_pool_node *node)
+{
+	i915_gem_object_put(node->obj);
+	i915_active_fini(&node->active);
+	kfree(node);
+}
+
+static int pool_active(struct i915_active *ref)
+{
+	struct intel_engine_pool_node *node =
+		container_of(ref, typeof(*node), active);
+	struct reservation_object *resv = node->obj->base.resv;
+
+	if (reservation_object_trylock(resv)) {
+		reservation_object_add_excl_fence(resv, NULL);
+		reservation_object_unlock(resv);
+	}
+
+	return i915_gem_object_pin_pages(node->obj);
+}
+
+static void pool_retire(struct i915_active *ref)
+{
+	struct intel_engine_pool_node *node =
+		container_of(ref, typeof(*node), active);
+	struct intel_engine_pool *pool = node->pool;
+	struct list_head *list = bucket_for_size(pool, node->obj->base.size);
+	unsigned long flags;
+
+	GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+	i915_gem_object_unpin_pages(node->obj);
+
+	spin_lock_irqsave(&pool->lock, flags);
+	list_add(&node->link, list);
+	spin_unlock_irqrestore(&pool->lock, flags);
+}
+
+static struct intel_engine_pool_node *
+node_create(struct intel_engine_pool *pool, size_t sz)
+{
+	struct intel_engine_cs *engine = to_engine(pool);
+	struct intel_engine_pool_node *node;
+	struct drm_i915_gem_object *obj;
+
+	node = kmalloc(sizeof(*node),
+		       GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+	if (!node)
+		return ERR_PTR(-ENOMEM);
+
+	node->pool = pool;
+	i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
+
+	obj = i915_gem_object_create_internal(engine->i915, sz);
+	if (IS_ERR(obj)) {
+		i915_active_fini(&node->active);
+		kfree(node);
+		return ERR_CAST(obj);
+	}
+
+	node->obj = obj;
+	return node;
+}
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size)
+{
+	struct intel_engine_pool_node *node;
+	struct list_head *list;
+	unsigned long flags;
+	int ret;
+
+	GEM_BUG_ON(!intel_engine_pm_is_awake(to_engine(pool)));
+
+	size = PAGE_ALIGN(size);
+	list = bucket_for_size(pool, size);
+
+	spin_lock_irqsave(&pool->lock, flags);
+	list_for_each_entry(node, list, link) {
+		if (node->obj->base.size < size)
+			continue;
+		list_del(&node->link);
+		break;
+	}
+	spin_unlock_irqrestore(&pool->lock, flags);
+
+	if (&node->link == list) {
+		node = node_create(pool, size);
+		if (IS_ERR(node))
+			return node;
+	}
+
+	ret = i915_active_acquire(&node->active);
+	if (ret) {
+		node_free(node);
+		return ERR_PTR(ret);
+	}
+
+	return node;
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool)
+{
+	int n;
+
+	spin_lock_init(&pool->lock);
+	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+		INIT_LIST_HEAD(&pool->cache_list[n]);
+}
+
+void intel_engine_pool_park(struct intel_engine_pool *pool)
+{
+	int n;
+
+	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
+		struct list_head *list = &pool->cache_list[n];
+		struct intel_engine_pool_node *node, *nn;
+
+		list_for_each_entry_safe(node, nn, list, link)
+			node_free(node);
+
+		INIT_LIST_HEAD(list);
+	}
+}
+
+void intel_engine_pool_fini(struct intel_engine_pool *pool)
+{
+	int n;
+
+	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
+		GEM_BUG_ON(!list_empty(&pool->cache_list[n]));
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
new file mode 100644
index 000000000000..f7a0a660c1c9
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h
@@ -0,0 +1,34 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_H
+#define INTEL_ENGINE_POOL_H
+
+#include "intel_engine_pool_types.h"
+#include "i915_active.h"
+#include "i915_request.h"
+
+struct intel_engine_pool_node *
+intel_engine_pool_get(struct intel_engine_pool *pool, size_t size);
+
+static inline int
+intel_engine_pool_mark_active(struct intel_engine_pool_node *node,
+			      struct i915_request *rq)
+{
+	return i915_active_ref(&node->active, rq->fence.context, rq);
+}
+
+static inline void
+intel_engine_pool_put(struct intel_engine_pool_node *node)
+{
+	i915_active_release(&node->active);
+}
+
+void intel_engine_pool_init(struct intel_engine_pool *pool);
+void intel_engine_pool_park(struct intel_engine_pool *pool);
+void intel_engine_pool_fini(struct intel_engine_pool *pool);
+
+#endif /* INTEL_ENGINE_POOL_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
new file mode 100644
index 000000000000..e31ee361b76f
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pool_types.h
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2014-2018 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_POOL_TYPES_H
+#define INTEL_ENGINE_POOL_TYPES_H
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "i915_active_types.h"
+
+struct drm_i915_gem_object;
+
+struct intel_engine_pool {
+	spinlock_t lock;
+	struct list_head cache_list[4];
+};
+
+struct intel_engine_pool_node {
+	struct i915_active active;
+	struct drm_i915_gem_object *obj;
+	struct list_head link;
+	struct intel_engine_pool *pool;
+};
+
+#endif /* INTEL_ENGINE_POOL_TYPES_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 0dde7e04b102..6d2f3e11da1c 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -16,12 +16,12 @@
 #include <linux/types.h>
 
 #include "i915_gem.h"
-#include "i915_gem_batch_pool.h"
 #include "i915_pmu.h"
 #include "i915_priolist_types.h"
 #include "i915_selftest.h"
-#include "gt/intel_timeline_types.h"
+#include "intel_engine_pool_types.h"
 #include "intel_sseu.h"
+#include "intel_timeline_types.h"
 #include "intel_wakeref.h"
 #include "intel_workarounds_types.h"
 
@@ -353,7 +353,7 @@ struct intel_engine_cs {
 	 * when the command parser is enabled. Prevents the client from
 	 * modifying the batch contents after software parsing.
 	 */
-	struct i915_gem_batch_pool batch_pool;
+	struct intel_engine_pool pool;
 
 	struct intel_hw_status_page status_page;
 	struct i915_ctx_workarounds wa_ctx;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 5bcb461b8372..b94d57bf2c48 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -27,6 +27,7 @@
 #include "i915_drv.h"
 #include "intel_context.h"
 #include "intel_engine_pm.h"
+#include "intel_engine_pool.h"
 
 #include "mock_engine.h"
 #include "selftests/mock_request.h"
@@ -291,6 +292,8 @@ int mock_engine_init(struct intel_engine_cs *engine)
 	intel_engine_init_execlists(engine);
 	intel_engine_init__pm(engine);
 
+	intel_engine_pool_init(&engine->pool);
+
 	engine->kernel_context =
 		i915_gem_context_get_engine(i915->kernel_context, engine->id);
 	if (IS_ERR(engine->kernel_context))
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index eeecdad0e3ca..253e86868061 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -295,27 +295,6 @@ static int per_file_stats(int id, void *ptr, void *data)
 			   stats.closed); \
 } while (0)
 
-static void print_batch_pool_stats(struct seq_file *m,
-				   struct drm_i915_private *dev_priv)
-{
-	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *engine;
-	struct file_stats stats = {};
-	enum intel_engine_id id;
-	int j;
-
-	for_each_engine(engine, dev_priv, id) {
-		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
-			list_for_each_entry(obj,
-					    &engine->batch_pool.cache_list[j],
-					    batch_pool_link)
-				per_file_stats(0, obj, &stats);
-		}
-	}
-
-	print_file_stats(m, "[k]batch pool", stats);
-}
-
 static void print_context_stats(struct seq_file *m,
 				struct drm_i915_private *i915)
 {
@@ -373,58 +352,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 	if (ret)
 		return ret;
 
-	print_batch_pool_stats(m, i915);
 	print_context_stats(m, i915);
 	mutex_unlock(&i915->drm.struct_mutex);
 
 	return 0;
 }
 
-static int i915_gem_batch_pool_info(struct seq_file *m, void *data)
-{
-	struct drm_i915_private *dev_priv = node_to_i915(m->private);
-	struct drm_device *dev = &dev_priv->drm;
-	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	int total = 0;
-	int ret, j;
-
-	ret = mutex_lock_interruptible(&dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	for_each_engine(engine, dev_priv, id) {
-		for (j = 0; j < ARRAY_SIZE(engine->batch_pool.cache_list); j++) {
-			int count;
-
-			count = 0;
-			list_for_each_entry(obj,
-					    &engine->batch_pool.cache_list[j],
-					    batch_pool_link)
-				count++;
-			seq_printf(m, "%s cache[%d]: %d objects\n",
-				   engine->name, j, count);
-
-			list_for_each_entry(obj,
-					    &engine->batch_pool.cache_list[j],
-					    batch_pool_link) {
-				seq_puts(m, "   ");
-				describe_obj(m, obj);
-				seq_putc(m, '\n');
-			}
-
-			total += count;
-		}
-	}
-
-	seq_printf(m, "total: %d\n", total);
-
-	mutex_unlock(&dev->struct_mutex);
-
-	return 0;
-}
-
 static void gen8_display_interrupt_info(struct seq_file *m)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -4364,7 +4297,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
 	{"i915_gem_interrupt", i915_interrupt_info, 0},
-	{"i915_gem_batch_pool", i915_gem_batch_pool_info, 0},
 	{"i915_guc_info", i915_guc_info, 0},
 	{"i915_guc_load_status", i915_guc_load_status_info, 0},
 	{"i915_guc_log_dump", i915_guc_log_dump, 0},
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
deleted file mode 100644
index b17f23991253..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ /dev/null
@@ -1,132 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#include "i915_gem_batch_pool.h"
-#include "i915_drv.h"
-
-/**
- * DOC: batch pool
- *
- * In order to submit batch buffers as 'secure', the software command parser
- * must ensure that a batch buffer cannot be modified after parsing. It does
- * this by copying the user provided batch buffer contents to a kernel owned
- * buffer from which the hardware will actually execute, and by carefully
- * managing the address space bindings for such buffers.
- *
- * The batch pool framework provides a mechanism for the driver to manage a
- * set of scratch buffers to use for this purpose. The framework can be
- * extended to support other uses cases should they arise.
- */
-
-/**
- * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @pool: the batch buffer pool
- * @engine: the associated request submission engine
- */
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
-			      struct intel_engine_cs *engine)
-{
-	int n;
-
-	pool->engine = engine;
-
-	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
-		INIT_LIST_HEAD(&pool->cache_list[n]);
-}
-
-/**
- * i915_gem_batch_pool_fini() - clean up a batch buffer pool
- * @pool: the pool to clean up
- *
- * Note: Callers must hold the struct_mutex.
- */
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
-{
-	int n;
-
-	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
-	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
-		struct drm_i915_gem_object *obj, *next;
-
-		list_for_each_entry_safe(obj, next,
-					 &pool->cache_list[n],
-					 batch_pool_link)
-			i915_gem_object_put(obj);
-
-		INIT_LIST_HEAD(&pool->cache_list[n]);
-	}
-}
-
-/**
- * i915_gem_batch_pool_get() - allocate a buffer from the pool
- * @pool: the batch buffer pool
- * @size: the minimum desired size of the returned buffer
- *
- * Returns an inactive buffer from @pool with at least @size bytes,
- * with the pages pinned. The caller must i915_gem_object_unpin_pages()
- * on the returned object.
- *
- * Note: Callers must hold the struct_mutex
- *
- * Return: the buffer object or an error pointer
- */
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
-			size_t size)
-{
-	struct drm_i915_gem_object *obj;
-	struct list_head *list;
-	int n, ret;
-
-	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
-
-	/* Compute a power-of-two bucket, but throw everything greater than
-	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
-	 * (1 page, 2 pages, 4 pages, 8+ pages).
-	 */
-	n = fls(size >> PAGE_SHIFT) - 1;
-	if (n >= ARRAY_SIZE(pool->cache_list))
-		n = ARRAY_SIZE(pool->cache_list) - 1;
-	list = &pool->cache_list[n];
-
-	list_for_each_entry(obj, list, batch_pool_link) {
-		struct reservation_object *resv = obj->base.resv;
-
-		/* The batches are strictly LRU ordered */
-		if (!reservation_object_test_signaled_rcu(resv, true))
-			break;
-
-		/*
-		 * The object is now idle, clear the array of shared
-		 * fences before we add a new request. Although, we
-		 * remain on the same engine, we may be on a different
-		 * timeline and so may continually grow the array,
-		 * trapping a reference to all the old fences, rather
-		 * than replace the existing fence.
-		 */
-		if (rcu_access_pointer(resv->fence)) {
-			reservation_object_lock(resv, NULL);
-			reservation_object_add_excl_fence(resv, NULL);
-			reservation_object_unlock(resv);
-		}
-
-		if (obj->base.size >= size)
-			goto found;
-	}
-
-	obj = i915_gem_object_create_internal(pool->engine->i915, size);
-	if (IS_ERR(obj))
-		return obj;
-
-found:
-	ret = i915_gem_object_pin_pages(obj);
-	if (ret)
-		return ERR_PTR(ret);
-
-	list_move_tail(&obj->batch_pool_link, list);
-	return obj;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
deleted file mode 100644
index feeeeeaa54d8..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright © 2014-2018 Intel Corporation
- */
-
-#ifndef I915_GEM_BATCH_POOL_H
-#define I915_GEM_BATCH_POOL_H
-
-#include <linux/types.h>
-
-struct drm_i915_gem_object;
-struct intel_engine_cs;
-
-struct i915_gem_batch_pool {
-	struct intel_engine_cs *engine;
-	struct list_head cache_list[4];
-};
-
-void i915_gem_batch_pool_init(struct i915_gem_batch_pool *pool,
-			      struct intel_engine_cs *engine);
-void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
-struct drm_i915_gem_object *
-i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, size_t size);
-
-#endif /* I915_GEM_BATCH_POOL_H */
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (18 preceding siblings ...)
  2019-06-25 13:01 ` [PATCH 20/20] drm/i915: Replace struct_mutex for batch pool serialisation Chris Wilson
@ 2019-06-25 14:09 ` Patchwork
  2019-06-25 14:38 ` ✗ Fi.CI.BAT: failure " Patchwork
                   ` (8 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Patchwork @ 2019-06-25 14:09 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative
URL   : https://patchwork.freedesktop.org/series/62706/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
a985c923cf12 drm/i915/execlists: Convert recursive defer_request() into iterative
c1cca4af3d7e drm/i915/gt: Pass intel_gt to pm routines
8c14a0a355dd drm/i915/selftests: Serialise nop reset with retirement
23b17962a3ed drm/i915/selftests: Drop manual request wakerefs around hangcheck
51401ad0e1f7 drm/i915/selftests: Fixup atomic reset checking
a1af1dba6619 drm/i915: Rename intel_wakeref_[is]_active
165e0397a15c drm/i915: Add a wakeref getter for iff the wakeref is already active
16f86bf810ec drm/i915: Only recover active engines
bbf1d5ececcb drm/i915: Lift intel_engines_resume() to callers
-:215: WARNING:AVOID_BUG: Avoid crashing the kernel - try using WARN_ON & recovery code rather than BUG() or BUG_ON()
#215: FILE: drivers/gpu/drm/i915/i915_gem.c:1200:
+	BUG_ON(!i915->kernel_context);

total: 0 errors, 1 warnings, 0 checks, 432 lines checked
90e1fc305efa drm/i915: Teach execbuffer to take the engine wakeref not GT
9cbbcf9478ab drm/i915/gt: Track timeline activeness in enter/exit
964a1acc5d18 drm/i915/gt: Convert timeline tracking to spinlock
25803be03449 drm/i915/gt: Guard timeline pinning with its own mutex
592645d9bbc0 drm/i915/selftests: Hold ref on request across waits
a2c24f42c719 drm/i915/gt: Always call kref_init for the timeline
2a1e3258bbf2 drm/i915/gt: Drop stale commentary for timeline density
94318c8a9993 drm/i915/gt: Add some debug tracing for context pinning
23a7536cb76e drm/i915: Include the breadcrumb when asserting request completion
6926fbf086a1 drm/i915: Protect request retirement with timeline->mutex
8a5698fc2d86 drm/i915: Replace struct_mutex for batch pool serialisation
-:305: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#305: 
new file mode 100644

-:310: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#310: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.c:1:
+/*

-:311: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#311: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.c:2:
+ * SPDX-License-Identifier: MIT

-:482: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#482: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.h:1:
+/*

-:483: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#483: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.h:2:
+ * SPDX-License-Identifier: MIT

-:522: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#522: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:1:
+/*

-:523: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#523: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:2:
+ * SPDX-License-Identifier: MIT

-:539: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#539: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:18:
+	spinlock_t lock;

total: 0 errors, 7 warnings, 1 checks, 595 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* ✗ Fi.CI.BAT: failure for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (19 preceding siblings ...)
  2019-06-25 14:09 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Patchwork
@ 2019-06-25 14:38 ` Patchwork
  2019-06-25 15:10 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev2) Patchwork
                   ` (7 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Patchwork @ 2019-06-25 14:38 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative
URL   : https://patchwork.freedesktop.org/series/62706/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_6342 -> Patchwork_13415
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_13415 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_13415, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_13415:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_selftest@live_hangcheck:
    - fi-bwr-2160:        [PASS][1] -> [INCOMPLETE][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-bwr-2160/igt@i915_selftest@live_hangcheck.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-bwr-2160/igt@i915_selftest@live_hangcheck.html
    - fi-snb-2520m:       [PASS][3] -> [INCOMPLETE][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-snb-2520m/igt@i915_selftest@live_hangcheck.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-snb-2520m/igt@i915_selftest@live_hangcheck.html
    - fi-ilk-650:         [PASS][5] -> [INCOMPLETE][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-ilk-650/igt@i915_selftest@live_hangcheck.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-ilk-650/igt@i915_selftest@live_hangcheck.html
    - fi-blb-e6850:       [PASS][7] -> [INCOMPLETE][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-blb-e6850/igt@i915_selftest@live_hangcheck.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-blb-e6850/igt@i915_selftest@live_hangcheck.html
    - fi-hsw-4770:        [PASS][9] -> [INCOMPLETE][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-hsw-4770/igt@i915_selftest@live_hangcheck.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-hsw-4770/igt@i915_selftest@live_hangcheck.html
    - fi-ivb-3770:        [PASS][11] -> [INCOMPLETE][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-ivb-3770/igt@i915_selftest@live_hangcheck.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-ivb-3770/igt@i915_selftest@live_hangcheck.html
    - fi-hsw-4770r:       [PASS][13] -> [INCOMPLETE][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-hsw-4770r/igt@i915_selftest@live_hangcheck.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-hsw-4770r/igt@i915_selftest@live_hangcheck.html
    - fi-hsw-peppy:       [PASS][15] -> [INCOMPLETE][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-hsw-peppy/igt@i915_selftest@live_hangcheck.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-hsw-peppy/igt@i915_selftest@live_hangcheck.html

  * igt@i915_selftest@live_workarounds:
    - fi-bsw-n3050:       [PASS][17] -> [INCOMPLETE][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-bsw-n3050/igt@i915_selftest@live_workarounds.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-bsw-n3050/igt@i915_selftest@live_workarounds.html
    - fi-cfl-guc:         [PASS][19] -> [INCOMPLETE][20]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-cfl-guc/igt@i915_selftest@live_workarounds.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-cfl-guc/igt@i915_selftest@live_workarounds.html
    - fi-bsw-kefka:       [PASS][21] -> [INCOMPLETE][22]
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-bsw-kefka/igt@i915_selftest@live_workarounds.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-bsw-kefka/igt@i915_selftest@live_workarounds.html
    - fi-kbl-7500u:       [PASS][23] -> [INCOMPLETE][24]
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-kbl-7500u/igt@i915_selftest@live_workarounds.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-kbl-7500u/igt@i915_selftest@live_workarounds.html
    - fi-cfl-8109u:       [PASS][25] -> [INCOMPLETE][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-cfl-8109u/igt@i915_selftest@live_workarounds.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-cfl-8109u/igt@i915_selftest@live_workarounds.html
    - fi-kbl-guc:         [PASS][27] -> [INCOMPLETE][28]
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-kbl-guc/igt@i915_selftest@live_workarounds.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-kbl-guc/igt@i915_selftest@live_workarounds.html
    - fi-skl-6600u:       [PASS][29] -> [INCOMPLETE][30]
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-skl-6600u/igt@i915_selftest@live_workarounds.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-skl-6600u/igt@i915_selftest@live_workarounds.html
    - fi-bdw-5557u:       [PASS][31] -> [INCOMPLETE][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-bdw-5557u/igt@i915_selftest@live_workarounds.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-bdw-5557u/igt@i915_selftest@live_workarounds.html
    - fi-kbl-7567u:       [PASS][33] -> [INCOMPLETE][34]
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-kbl-7567u/igt@i915_selftest@live_workarounds.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-kbl-7567u/igt@i915_selftest@live_workarounds.html
    - fi-whl-u:           [PASS][35] -> [INCOMPLETE][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-whl-u/igt@i915_selftest@live_workarounds.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-whl-u/igt@i915_selftest@live_workarounds.html
    - fi-skl-iommu:       [PASS][37] -> [INCOMPLETE][38]
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-skl-iommu/igt@i915_selftest@live_workarounds.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-skl-iommu/igt@i915_selftest@live_workarounds.html
    - fi-skl-6770hq:      [PASS][39] -> [INCOMPLETE][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-skl-6770hq/igt@i915_selftest@live_workarounds.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-skl-6770hq/igt@i915_selftest@live_workarounds.html
    - fi-kbl-x1275:       [PASS][41] -> [INCOMPLETE][42]
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-kbl-x1275/igt@i915_selftest@live_workarounds.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-kbl-x1275/igt@i915_selftest@live_workarounds.html
    - fi-skl-6260u:       [PASS][43] -> [INCOMPLETE][44]
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-skl-6260u/igt@i915_selftest@live_workarounds.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-skl-6260u/igt@i915_selftest@live_workarounds.html
    - fi-kbl-r:           [PASS][45] -> [INCOMPLETE][46]
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-kbl-r/igt@i915_selftest@live_workarounds.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-kbl-r/igt@i915_selftest@live_workarounds.html
    - fi-kbl-8809g:       [PASS][47] -> [INCOMPLETE][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-kbl-8809g/igt@i915_selftest@live_workarounds.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-kbl-8809g/igt@i915_selftest@live_workarounds.html
    - fi-bdw-gvtdvm:      [PASS][49] -> [INCOMPLETE][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-bdw-gvtdvm/igt@i915_selftest@live_workarounds.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-bdw-gvtdvm/igt@i915_selftest@live_workarounds.html
    - fi-skl-guc:         [PASS][51] -> [INCOMPLETE][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-skl-guc/igt@i915_selftest@live_workarounds.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-skl-guc/igt@i915_selftest@live_workarounds.html
    - fi-cfl-8700k:       [PASS][53] -> [INCOMPLETE][54]
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-cfl-8700k/igt@i915_selftest@live_workarounds.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-cfl-8700k/igt@i915_selftest@live_workarounds.html

  
#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@i915_selftest@live_workarounds:
    - {fi-skl-lmem}:      [PASS][55] -> [INCOMPLETE][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-skl-lmem/igt@i915_selftest@live_workarounds.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-skl-lmem/igt@i915_selftest@live_workarounds.html

  
Known issues
------------

  Here are the changes found in Patchwork_13415 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_busy@busy-all:
    - fi-icl-u3:          [PASS][57] -> [DMESG-WARN][58] ([fdo#107724])
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-u3/igt@gem_busy@busy-all.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-icl-u3/igt@gem_busy@busy-all.html

  * igt@i915_module_load@reload-with-fault-injection:
    - fi-icl-dsi:         [PASS][59] -> [INCOMPLETE][60] ([fdo#107713])
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-dsi/igt@i915_module_load@reload-with-fault-injection.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-icl-dsi/igt@i915_module_load@reload-with-fault-injection.html

  * igt@i915_selftest@live_hangcheck:
    - fi-elk-e7500:       [PASS][61] -> [INCOMPLETE][62] ([fdo#103989])
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-elk-e7500/igt@i915_selftest@live_hangcheck.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-elk-e7500/igt@i915_selftest@live_hangcheck.html
    - fi-byt-j1900:       [PASS][63] -> [INCOMPLETE][64] ([fdo#102657])
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-byt-j1900/igt@i915_selftest@live_hangcheck.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-byt-j1900/igt@i915_selftest@live_hangcheck.html
    - fi-byt-n2820:       [PASS][65] -> [INCOMPLETE][66] ([fdo#102657])
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-byt-n2820/igt@i915_selftest@live_hangcheck.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-byt-n2820/igt@i915_selftest@live_hangcheck.html
    - fi-snb-2600:        [PASS][67] -> [INCOMPLETE][68] ([fdo#105411])
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-snb-2600/igt@i915_selftest@live_hangcheck.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-snb-2600/igt@i915_selftest@live_hangcheck.html

  * igt@i915_selftest@live_workarounds:
    - fi-cml-u:           [PASS][69] -> [INCOMPLETE][70] ([fdo#110566])
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-cml-u/igt@i915_selftest@live_workarounds.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-cml-u/igt@i915_selftest@live_workarounds.html
    - fi-glk-dsi:         [PASS][71] -> [INCOMPLETE][72] ([fdo#103359] / [k.org#198133])
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-glk-dsi/igt@i915_selftest@live_workarounds.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-glk-dsi/igt@i915_selftest@live_workarounds.html
    - fi-apl-guc:         [PASS][73] -> [INCOMPLETE][74] ([fdo#103927])
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-apl-guc/igt@i915_selftest@live_workarounds.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-apl-guc/igt@i915_selftest@live_workarounds.html
    - fi-skl-6700k2:      [PASS][75] -> [INCOMPLETE][76] ([fdo#104108])
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-skl-6700k2/igt@i915_selftest@live_workarounds.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-skl-6700k2/igt@i915_selftest@live_workarounds.html
    - fi-icl-u3:          [PASS][77] -> [INCOMPLETE][78] ([fdo#107713])
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-u3/igt@i915_selftest@live_workarounds.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-icl-u3/igt@i915_selftest@live_workarounds.html
    - fi-bxt-j4205:       [PASS][79] -> [INCOMPLETE][80] ([fdo#103927])
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-bxt-j4205/igt@i915_selftest@live_workarounds.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-bxt-j4205/igt@i915_selftest@live_workarounds.html
    - fi-bxt-dsi:         [PASS][81] -> [INCOMPLETE][82] ([fdo#103927])
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-bxt-dsi/igt@i915_selftest@live_workarounds.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-bxt-dsi/igt@i915_selftest@live_workarounds.html
    - fi-icl-u2:          [PASS][83] -> [INCOMPLETE][84] ([fdo#107713])
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-u2/igt@i915_selftest@live_workarounds.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-icl-u2/igt@i915_selftest@live_workarounds.html
    - fi-cml-u2:          [PASS][85] -> [INCOMPLETE][86] ([fdo#110566])
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-cml-u2/igt@i915_selftest@live_workarounds.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-cml-u2/igt@i915_selftest@live_workarounds.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][87] -> [FAIL][88] ([fdo#109485])
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-icl-u2:          [PASS][89] -> [FAIL][90] ([fdo#103167])
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html

  * igt@vgem_basic@dmabuf-fence-before:
    - fi-icl-dsi:         [PASS][91] -> [DMESG-WARN][92] ([fdo#106107])
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-dsi/igt@vgem_basic@dmabuf-fence-before.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-icl-dsi/igt@vgem_basic@dmabuf-fence-before.html

  
#### Possible fixes ####

  * igt@i915_selftest@live_sanitycheck:
    - fi-icl-u3:          [DMESG-WARN][93] ([fdo#107724]) -> [PASS][94] +1 similar issue
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#102657]: https://bugs.freedesktop.org/show_bug.cgi?id=102657
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103359]: https://bugs.freedesktop.org/show_bug.cgi?id=103359
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#103989]: https://bugs.freedesktop.org/show_bug.cgi?id=103989
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#106107]: https://bugs.freedesktop.org/show_bug.cgi?id=106107
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#109485]: https://bugs.freedesktop.org/show_bug.cgi?id=109485
  [fdo#110566]: https://bugs.freedesktop.org/show_bug.cgi?id=110566
  [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133


Participating hosts (53 -> 44)
------------------------------

  Missing    (9): fi-ilk-m540 fi-skl-gvtdvm fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-pnv-d510 fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_6342 -> Patchwork_13415

  CI_DRM_6342: 6eef272b254b34200129af8f2ec1e4cfe1ca6bff @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5067: 5eafa33dbdb1d3c190ac5060161c45152e9a298e @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13415: 8a5698fc2d860278c3e6a68a96db8ff34a03e6fb @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

8a5698fc2d86 drm/i915: Replace struct_mutex for batch pool serialisation
6926fbf086a1 drm/i915: Protect request retirement with timeline->mutex
23a7536cb76e drm/i915: Include the breadcrumb when asserting request completion
94318c8a9993 drm/i915/gt: Add some debug tracing for context pinning
2a1e3258bbf2 drm/i915/gt: Drop stale commentary for timeline density
a2c24f42c719 drm/i915/gt: Always call kref_init for the timeline
592645d9bbc0 drm/i915/selftests: Hold ref on request across waits
25803be03449 drm/i915/gt: Guard timeline pinning with its own mutex
964a1acc5d18 drm/i915/gt: Convert timeline tracking to spinlock
9cbbcf9478ab drm/i915/gt: Track timeline activeness in enter/exit
90e1fc305efa drm/i915: Teach execbuffer to take the engine wakeref not GT
bbf1d5ececcb drm/i915: Lift intel_engines_resume() to callers
16f86bf810ec drm/i915: Only recover active engines
165e0397a15c drm/i915: Add a wakeref getter for iff the wakeref is already active
a1af1dba6619 drm/i915: Rename intel_wakeref_[is]_active
51401ad0e1f7 drm/i915/selftests: Fixup atomic reset checking
23b17962a3ed drm/i915/selftests: Drop manual request wakerefs around hangcheck
8c14a0a355dd drm/i915/selftests: Serialise nop reset with retirement
c1cca4af3d7e drm/i915/gt: Pass intel_gt to pm routines
a985c923cf12 drm/i915/execlists: Convert recursive defer_request() into iterative

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13415/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* [PATCH] drm/i915: Include the breadcrumb when asserting request completion
  2019-06-25 13:01 ` [PATCH 18/20] drm/i915: Include the breadcrumb when asserting request completion Chris Wilson
@ 2019-06-25 14:42   ` Chris Wilson
  0 siblings, 0 replies; 36+ messages in thread
From: Chris Wilson @ 2019-06-25 14:42 UTC (permalink / raw)
  To: intel-gfx

As we explode, include the breadcrumb results in the trace for why we
choose to explode on the request completion assertion failure.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
Undo the logical inversion by adding the wrong assert to reset.
-Chris
---
 drivers/gpu/drm/i915/gt/intel_lrc.c |  2 +-
 drivers/gpu/drm/i915/i915_request.c |  2 +-
 drivers/gpu/drm/i915/i915_request.h | 12 ++++++++++++
 3 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index ce1aba3604b3..18b94fc952d8 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1381,7 +1381,7 @@ static void process_csb(struct intel_engine_cs *engine)
 			 * coherent (visible from the CPU) before the
 			 * user interrupt and CSB is processed.
 			 */
-			GEM_BUG_ON(!i915_request_completed(rq));
+			assert_request_complete(rq);
 			execlists_schedule_out(rq);
 
 			GEM_BUG_ON(execlists->active - execlists->inflight >
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 5ff87c4a0cd5..5b1c638d3754 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -329,7 +329,7 @@ void i915_request_retire_upto(struct i915_request *rq)
 		  hwsp_seqno(rq));
 
 	lockdep_assert_held(&rq->i915->drm.struct_mutex);
-	GEM_BUG_ON(!i915_request_completed(rq));
+	assert_request_complete(rq);
 
 	if (list_empty(&rq->ring_link))
 		return;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index b58ceef92e20..a41791637076 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -432,4 +432,16 @@ static inline void i915_request_mark_complete(struct i915_request *rq)
 
 bool i915_retire_requests(struct drm_i915_private *i915);
 
+static inline void assert_request_complete(const struct i915_request *rq)
+{
+	u32 hwsp = hwsp_seqno(rq);
+
+	if (i915_seqno_passed(hwsp, rq->fence.seqno))
+		return;
+
+	GEM_TRACE("fence %llx:%llu incomplete, current %u\n",
+		  rq->fence.context, rq->fence.seqno, hwsp);
+	GEM_BUG_ON(!i915_seqno_passed(hwsp, rq->fence.seqno));
+}
+
 #endif /* I915_REQUEST_H */
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 36+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev2)
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (20 preceding siblings ...)
  2019-06-25 14:38 ` ✗ Fi.CI.BAT: failure " Patchwork
@ 2019-06-25 15:10 ` Patchwork
  2019-06-25 15:18 ` ✗ Fi.CI.SPARSE: " Patchwork
                   ` (6 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Patchwork @ 2019-06-25 15:10 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev2)
URL   : https://patchwork.freedesktop.org/series/62706/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
ff0b5f2fde8d drm/i915/execlists: Convert recursive defer_request() into iterative
a7e0d14976ee drm/i915/gt: Pass intel_gt to pm routines
e953533d98c6 drm/i915/selftests: Serialise nop reset with retirement
811871280044 drm/i915/selftests: Drop manual request wakerefs around hangcheck
1b48e65ee030 drm/i915/selftests: Fixup atomic reset checking
409fc607b479 drm/i915: Rename intel_wakeref_[is]_active
5a833d447cea drm/i915: Add a wakeref getter for iff the wakeref is already active
78da9d00f0d4 drm/i915: Only recover active engines
4d1d80bb2202 drm/i915: Lift intel_engines_resume() to callers
-:215: WARNING:AVOID_BUG: Avoid crashing the kernel - try using WARN_ON & recovery code rather than BUG() or BUG_ON()
#215: FILE: drivers/gpu/drm/i915/i915_gem.c:1200:
+	BUG_ON(!i915->kernel_context);

total: 0 errors, 1 warnings, 0 checks, 432 lines checked
792efdcdfaf7 drm/i915: Teach execbuffer to take the engine wakeref not GT
10540b25e989 drm/i915/gt: Track timeline activeness in enter/exit
372aaef8ea0f drm/i915/gt: Convert timeline tracking to spinlock
45b54311fd1f drm/i915/gt: Guard timeline pinning with its own mutex
19f67078c03b drm/i915/selftests: Hold ref on request across waits
dfb388a622db drm/i915/gt: Always call kref_init for the timeline
3a18bd5da448 drm/i915/gt: Drop stale commentary for timeline density
3a8097fc0f3d drm/i915/gt: Add some debug tracing for context pinning
8eb3a21e705e drm/i915: Include the breadcrumb when asserting request completion
0b10920892c1 drm/i915: Protect request retirement with timeline->mutex
13497475389a drm/i915: Replace struct_mutex for batch pool serialisation
-:305: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#305: 
new file mode 100644

-:310: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#310: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.c:1:
+/*

-:311: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#311: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.c:2:
+ * SPDX-License-Identifier: MIT

-:482: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#482: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.h:1:
+/*

-:483: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#483: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.h:2:
+ * SPDX-License-Identifier: MIT

-:522: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#522: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:1:
+/*

-:523: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#523: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:2:
+ * SPDX-License-Identifier: MIT

-:539: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#539: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:18:
+	spinlock_t lock;

total: 0 errors, 7 warnings, 1 checks, 595 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* ✗ Fi.CI.SPARSE: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev2)
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (21 preceding siblings ...)
  2019-06-25 15:10 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev2) Patchwork
@ 2019-06-25 15:18 ` Patchwork
  2019-06-25 15:45 ` ✗ Fi.CI.BAT: failure " Patchwork
                   ` (5 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Patchwork @ 2019-06-25 15:18 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev2)
URL   : https://patchwork.freedesktop.org/series/62706/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.5.2
Commit: drm/i915/execlists: Convert recursive defer_request() into iterative
Okay!

Commit: drm/i915/gt: Pass intel_gt to pm routines
Okay!

Commit: drm/i915/selftests: Serialise nop reset with retirement
Okay!

Commit: drm/i915/selftests: Drop manual request wakerefs around hangcheck
Okay!

Commit: drm/i915/selftests: Fixup atomic reset checking
Okay!

Commit: drm/i915: Rename intel_wakeref_[is]_active
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)

Commit: drm/i915: Add a wakeref getter for iff the wakeref is already active
Okay!

Commit: drm/i915: Only recover active engines
Okay!

Commit: drm/i915: Lift intel_engines_resume() to callers
Okay!

Commit: drm/i915: Teach execbuffer to take the engine wakeref not GT
Okay!

Commit: drm/i915/gt: Track timeline activeness in enter/exit
Okay!

Commit: drm/i915/gt: Convert timeline tracking to spinlock
Okay!

Commit: drm/i915/gt: Guard timeline pinning with its own mutex
Okay!

Commit: drm/i915/selftests: Hold ref on request across waits
Okay!

Commit: drm/i915/gt: Always call kref_init for the timeline
Okay!

Commit: drm/i915/gt: Drop stale commentary for timeline density
Okay!

Commit: drm/i915/gt: Add some debug tracing for context pinning
Okay!

Commit: drm/i915: Include the breadcrumb when asserting request completion
Okay!

Commit: drm/i915: Protect request retirement with timeline->mutex
Okay!

Commit: drm/i915: Replace struct_mutex for batch pool serialisation
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* ✗ Fi.CI.BAT: failure for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev2)
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (22 preceding siblings ...)
  2019-06-25 15:18 ` ✗ Fi.CI.SPARSE: " Patchwork
@ 2019-06-25 15:45 ` Patchwork
  2019-06-25 18:01 ` [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Matthew Auld
                   ` (4 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Patchwork @ 2019-06-25 15:45 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev2)
URL   : https://patchwork.freedesktop.org/series/62706/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_6342 -> Patchwork_13418
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_13418 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_13418, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13418/

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_13418:

### IGT changes ###

#### Possible regressions ####

  * igt@kms_chamelium@hdmi-edid-read:
    - fi-kbl-7567u:       [PASS][1] -> [TIMEOUT][2] +3 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-kbl-7567u/igt@kms_chamelium@hdmi-edid-read.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13418/fi-kbl-7567u/igt@kms_chamelium@hdmi-edid-read.html

  
#### Warnings ####

  * igt@kms_chamelium@vga-hpd-fast:
    - fi-kbl-7567u:       [SKIP][3] ([fdo#109271]) -> [TIMEOUT][4] +4 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-kbl-7567u/igt@kms_chamelium@vga-hpd-fast.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13418/fi-kbl-7567u/igt@kms_chamelium@vga-hpd-fast.html

  
Known issues
------------

  Here are the changes found in Patchwork_13418 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@kms_flip@basic-plain-flip:
    - fi-ilk-650:         [PASS][5] -> [DMESG-WARN][6] ([fdo#106387])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-ilk-650/igt@kms_flip@basic-plain-flip.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13418/fi-ilk-650/igt@kms_flip@basic-plain-flip.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-icl-u2:          [PASS][7] -> [FAIL][8] ([fdo#103167])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13418/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html

  * igt@vgem_basic@unload:
    - fi-icl-u3:          [PASS][9] -> [DMESG-WARN][10] ([fdo#107724]) +1 similar issue
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-u3/igt@vgem_basic@unload.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13418/fi-icl-u3/igt@vgem_basic@unload.html

  
#### Possible fixes ####

  * igt@i915_selftest@live_blt:
    - fi-skl-iommu:       [INCOMPLETE][11] ([fdo#108602]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-skl-iommu/igt@i915_selftest@live_blt.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13418/fi-skl-iommu/igt@i915_selftest@live_blt.html

  * igt@i915_selftest@live_sanitycheck:
    - fi-icl-u3:          [DMESG-WARN][13] ([fdo#107724]) -> [PASS][14] +1 similar issue
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6342/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13418/fi-icl-u3/igt@i915_selftest@live_sanitycheck.html

  
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#106387]: https://bugs.freedesktop.org/show_bug.cgi?id=106387
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108602]: https://bugs.freedesktop.org/show_bug.cgi?id=108602
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271


Participating hosts (53 -> 46)
------------------------------

  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * Linux: CI_DRM_6342 -> Patchwork_13418

  CI_DRM_6342: 6eef272b254b34200129af8f2ec1e4cfe1ca6bff @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5067: 5eafa33dbdb1d3c190ac5060161c45152e9a298e @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13418: 13497475389acafbe8d9863995d4c80e7f94f366 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

13497475389a drm/i915: Replace struct_mutex for batch pool serialisation
0b10920892c1 drm/i915: Protect request retirement with timeline->mutex
8eb3a21e705e drm/i915: Include the breadcrumb when asserting request completion
3a8097fc0f3d drm/i915/gt: Add some debug tracing for context pinning
3a18bd5da448 drm/i915/gt: Drop stale commentary for timeline density
dfb388a622db drm/i915/gt: Always call kref_init for the timeline
19f67078c03b drm/i915/selftests: Hold ref on request across waits
45b54311fd1f drm/i915/gt: Guard timeline pinning with its own mutex
372aaef8ea0f drm/i915/gt: Convert timeline tracking to spinlock
10540b25e989 drm/i915/gt: Track timeline activeness in enter/exit
792efdcdfaf7 drm/i915: Teach execbuffer to take the engine wakeref not GT
4d1d80bb2202 drm/i915: Lift intel_engines_resume() to callers
78da9d00f0d4 drm/i915: Only recover active engines
5a833d447cea drm/i915: Add a wakeref getter for iff the wakeref is already active
409fc607b479 drm/i915: Rename intel_wakeref_[is]_active
1b48e65ee030 drm/i915/selftests: Fixup atomic reset checking
811871280044 drm/i915/selftests: Drop manual request wakerefs around hangcheck
e953533d98c6 drm/i915/selftests: Serialise nop reset with retirement
a7e0d14976ee drm/i915/gt: Pass intel_gt to pm routines
ff0b5f2fde8d drm/i915/execlists: Convert recursive defer_request() into iterative

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13418/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (23 preceding siblings ...)
  2019-06-25 15:45 ` ✗ Fi.CI.BAT: failure " Patchwork
@ 2019-06-25 18:01 ` Matthew Auld
  2019-06-25 18:19 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3) Patchwork
                   ` (3 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Matthew Auld @ 2019-06-25 18:01 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development

On Tue, 25 Jun 2019 at 14:03, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> As this engine owns the lock around rq->sched.link (for those waiters
> submitted to this engine), we can use that link as an element in a local
> list. We can thus replace the recursive algorithm with an iterative walk
> over the ordered list of waiters.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 02/20] drm/i915/gt: Pass intel_gt to pm routines
  2019-06-25 13:01 ` [PATCH 02/20] drm/i915/gt: Pass intel_gt to pm routines Chris Wilson
@ 2019-06-25 18:07   ` Matthew Auld
  0 siblings, 0 replies; 36+ messages in thread
From: Matthew Auld @ 2019-06-25 18:07 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development

On Tue, 25 Jun 2019 at 14:02, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> Switch from passing the i915 container to newly named struct intel_gt.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3)
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (24 preceding siblings ...)
  2019-06-25 18:01 ` [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Matthew Auld
@ 2019-06-25 18:19 ` Patchwork
  2019-06-25 18:28 ` ✗ Fi.CI.SPARSE: " Patchwork
                   ` (2 subsequent siblings)
  28 siblings, 0 replies; 36+ messages in thread
From: Patchwork @ 2019-06-25 18:19 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3)
URL   : https://patchwork.freedesktop.org/series/62706/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
d5806bbd1145 drm/i915/execlists: Convert recursive defer_request() into iterative
55805e8f02a8 drm/i915/gt: Pass intel_gt to pm routines
4b78a9379430 drm/i915/selftests: Serialise nop reset with retirement
2d4f05d735ef drm/i915/selftests: Drop manual request wakerefs around hangcheck
21f1b65d8dfd drm/i915/selftests: Fixup atomic reset checking
de3548b613d8 drm/i915: Rename intel_wakeref_[is]_active
e9aaa83e46bf drm/i915: Add a wakeref getter for iff the wakeref is already active
f28349424a41 drm/i915: Only recover active engines
7b0712a06ed0 drm/i915: Lift intel_engines_resume() to callers
-:215: WARNING:AVOID_BUG: Avoid crashing the kernel - try using WARN_ON & recovery code rather than BUG() or BUG_ON()
#215: FILE: drivers/gpu/drm/i915/i915_gem.c:1200:
+	BUG_ON(!i915->kernel_context);

total: 0 errors, 1 warnings, 0 checks, 432 lines checked
6231b6d31a29 drm/i915: Teach execbuffer to take the engine wakeref not GT
087ecd33d81b drm/i915/gt: Track timeline activeness in enter/exit
bb82817e5d1a drm/i915/gt: Convert timeline tracking to spinlock
2714ae845e6c drm/i915/gt: Guard timeline pinning with its own mutex
f678c21b80c0 drm/i915/selftests: Hold ref on request across waits
1ce083435c01 drm/i915/gt: Always call kref_init for the timeline
9ef3064fc46d drm/i915/gt: Drop stale commentary for timeline density
956d36060177 drm/i915/gt: Add some debug tracing for context pinning
9d6ae4ad1f94 drm/i915: Include the breadcrumb when asserting request completion
3d4fdb554df9 drm/i915: Protect request retirement with timeline->mutex
28cf28b85d48 drm/i915: Replace struct_mutex for batch pool serialisation
-:305: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#305: 
new file mode 100644

-:310: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#310: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.c:1:
+/*

-:311: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#311: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.c:2:
+ * SPDX-License-Identifier: MIT

-:482: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#482: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.h:1:
+/*

-:483: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#483: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool.h:2:
+ * SPDX-License-Identifier: MIT

-:522: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#522: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:1:
+/*

-:523: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#523: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:2:
+ * SPDX-License-Identifier: MIT

-:539: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#539: FILE: drivers/gpu/drm/i915/gt/intel_engine_pool_types.h:18:
+	spinlock_t lock;

total: 0 errors, 7 warnings, 1 checks, 595 lines checked

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* ✗ Fi.CI.SPARSE: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3)
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (25 preceding siblings ...)
  2019-06-25 18:19 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3) Patchwork
@ 2019-06-25 18:28 ` Patchwork
  2019-06-25 19:35 ` ✓ Fi.CI.BAT: success " Patchwork
  2019-06-25 22:10 ` ✓ Fi.CI.IGT: " Patchwork
  28 siblings, 0 replies; 36+ messages in thread
From: Patchwork @ 2019-06-25 18:28 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3)
URL   : https://patchwork.freedesktop.org/series/62706/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Sparse version: v0.5.2
Commit: drm/i915/execlists: Convert recursive defer_request() into iterative
Okay!

Commit: drm/i915/gt: Pass intel_gt to pm routines
Okay!

Commit: drm/i915/selftests: Serialise nop reset with retirement
Okay!

Commit: drm/i915/selftests: Drop manual request wakerefs around hangcheck
Okay!

Commit: drm/i915/selftests: Fixup atomic reset checking
Okay!

Commit: drm/i915: Rename intel_wakeref_[is]_active
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)

Commit: drm/i915: Add a wakeref getter for iff the wakeref is already active
Okay!

Commit: drm/i915: Only recover active engines
Okay!

Commit: drm/i915: Lift intel_engines_resume() to callers
Okay!

Commit: drm/i915: Teach execbuffer to take the engine wakeref not GT
Okay!

Commit: drm/i915/gt: Track timeline activeness in enter/exit
Okay!

Commit: drm/i915/gt: Convert timeline tracking to spinlock
Okay!

Commit: drm/i915/gt: Guard timeline pinning with its own mutex
Okay!

Commit: drm/i915/selftests: Hold ref on request across waits
Okay!

Commit: drm/i915/gt: Always call kref_init for the timeline
Okay!

Commit: drm/i915/gt: Drop stale commentary for timeline density
Okay!

Commit: drm/i915/gt: Add some debug tracing for context pinning
Okay!

Commit: drm/i915: Include the breadcrumb when asserting request completion
Okay!

Commit: drm/i915: Protect request retirement with timeline->mutex
Okay!

Commit: drm/i915: Replace struct_mutex for batch pool serialisation
+./include/uapi/linux/perf_event.h:147:56: warning: cast truncates bits from constant value (8000000000000000 becomes 0)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 14/20] drm/i915/selftests: Hold ref on request across waits
  2019-06-25 13:01 ` [PATCH 14/20] drm/i915/selftests: Hold ref on request across waits Chris Wilson
@ 2019-06-25 18:39   ` Matthew Auld
  0 siblings, 0 replies; 36+ messages in thread
From: Matthew Auld @ 2019-06-25 18:39 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development

On Tue, 25 Jun 2019 at 14:02, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> As we wait upon the request, we should be sure to hold our own reference
> for our checks.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 15/20] drm/i915/gt: Always call kref_init for the timeline
  2019-06-25 13:01 ` [PATCH 15/20] drm/i915/gt: Always call kref_init for the timeline Chris Wilson
@ 2019-06-25 18:42   ` Matthew Auld
  0 siblings, 0 replies; 36+ messages in thread
From: Matthew Auld @ 2019-06-25 18:42 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development

On Tue, 25 Jun 2019 at 14:03, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> Always initialise the refcount, even for the embedded timelines inside
> mock devices.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 17/20] drm/i915/gt: Add some debug tracing for context pinning
  2019-06-25 13:01 ` [PATCH 17/20] drm/i915/gt: Add some debug tracing for context pinning Chris Wilson
@ 2019-06-25 18:47   ` Matthew Auld
  0 siblings, 0 replies; 36+ messages in thread
From: Matthew Auld @ 2019-06-25 18:47 UTC (permalink / raw)
  To: Chris Wilson; +Cc: Intel Graphics Development

On Tue, 25 Jun 2019 at 14:02, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> Add the context pin/unpin events to the trace for post-mortem debugging.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3)
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (26 preceding siblings ...)
  2019-06-25 18:28 ` ✗ Fi.CI.SPARSE: " Patchwork
@ 2019-06-25 19:35 ` Patchwork
  2019-06-25 22:10 ` ✓ Fi.CI.IGT: " Patchwork
  28 siblings, 0 replies; 36+ messages in thread
From: Patchwork @ 2019-06-25 19:35 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3)
URL   : https://patchwork.freedesktop.org/series/62706/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6346 -> Patchwork_13420
====================================================

Summary
-------

  **WARNING**

  Minor unknown changes coming with Patchwork_13420 need to be verified
  manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_13420, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_13420:

### IGT changes ###

#### Warnings ####

  * igt@kms_chamelium@vga-edid-read:
    - fi-kbl-7567u:       [TIMEOUT][1] -> [FAIL][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/fi-kbl-7567u/igt@kms_chamelium@vga-edid-read.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/fi-kbl-7567u/igt@kms_chamelium@vga-edid-read.html

  
Known issues
------------

  Here are the changes found in Patchwork_13420 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@live_blt:
    - fi-cfl-guc:         [PASS][3] -> [DMESG-WARN][4] ([fdo#110943])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/fi-cfl-guc/igt@i915_selftest@live_blt.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/fi-cfl-guc/igt@i915_selftest@live_blt.html

  * igt@kms_flip@basic-plain-flip:
    - fi-icl-u3:          [PASS][5] -> [DMESG-WARN][6] ([fdo#107724]) +2 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/fi-icl-u3/igt@kms_flip@basic-plain-flip.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/fi-icl-u3/igt@kms_flip@basic-plain-flip.html

  
#### Possible fixes ####

  * igt@gem_mmap_gtt@basic-small-bo-tiledx:
    - fi-icl-u3:          [DMESG-WARN][7] ([fdo#107724]) -> [PASS][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/fi-icl-u3/igt@gem_mmap_gtt@basic-small-bo-tiledx.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/fi-icl-u3/igt@gem_mmap_gtt@basic-small-bo-tiledx.html

  * igt@i915_getparams_basic@basic-subslice-total:
    - fi-icl-dsi:         [INCOMPLETE][9] ([fdo#107713]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/fi-icl-dsi/igt@i915_getparams_basic@basic-subslice-total.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/fi-icl-dsi/igt@i915_getparams_basic@basic-subslice-total.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-hsw-peppy:       [DMESG-WARN][11] ([fdo#102614]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/fi-hsw-peppy/igt@kms_frontbuffer_tracking@basic.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/fi-hsw-peppy/igt@kms_frontbuffer_tracking@basic.html
    - fi-icl-u2:          [FAIL][13] ([fdo#103167]) -> [PASS][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/fi-icl-u2/igt@kms_frontbuffer_tracking@basic.html

  
  [fdo#102614]: https://bugs.freedesktop.org/show_bug.cgi?id=102614
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#110943]: https://bugs.freedesktop.org/show_bug.cgi?id=110943


Participating hosts (53 -> 45)
------------------------------

  Missing    (8): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus fi-cml-u 


Build changes
-------------

  * Linux: CI_DRM_6346 -> Patchwork_13420

  CI_DRM_6346: 6c9c9bdb0ff7eb41b7eb1a92ce218c813740606a @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5068: 15ad664534413628f06c0f172aac11598bfdb895 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13420: 28cf28b85d48b1b16f266834aaba09da832a5f95 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

28cf28b85d48 drm/i915: Replace struct_mutex for batch pool serialisation
3d4fdb554df9 drm/i915: Protect request retirement with timeline->mutex
9d6ae4ad1f94 drm/i915: Include the breadcrumb when asserting request completion
956d36060177 drm/i915/gt: Add some debug tracing for context pinning
9ef3064fc46d drm/i915/gt: Drop stale commentary for timeline density
1ce083435c01 drm/i915/gt: Always call kref_init for the timeline
f678c21b80c0 drm/i915/selftests: Hold ref on request across waits
2714ae845e6c drm/i915/gt: Guard timeline pinning with its own mutex
bb82817e5d1a drm/i915/gt: Convert timeline tracking to spinlock
087ecd33d81b drm/i915/gt: Track timeline activeness in enter/exit
6231b6d31a29 drm/i915: Teach execbuffer to take the engine wakeref not GT
7b0712a06ed0 drm/i915: Lift intel_engines_resume() to callers
f28349424a41 drm/i915: Only recover active engines
e9aaa83e46bf drm/i915: Add a wakeref getter for iff the wakeref is already active
de3548b613d8 drm/i915: Rename intel_wakeref_[is]_active
21f1b65d8dfd drm/i915/selftests: Fixup atomic reset checking
2d4f05d735ef drm/i915/selftests: Drop manual request wakerefs around hangcheck
4b78a9379430 drm/i915/selftests: Serialise nop reset with retirement
55805e8f02a8 drm/i915/gt: Pass intel_gt to pm routines
d5806bbd1145 drm/i915/execlists: Convert recursive defer_request() into iterative

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* ✓ Fi.CI.IGT: success for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3)
  2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
                   ` (27 preceding siblings ...)
  2019-06-25 19:35 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-06-25 22:10 ` Patchwork
  28 siblings, 0 replies; 36+ messages in thread
From: Patchwork @ 2019-06-25 22:10 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3)
URL   : https://patchwork.freedesktop.org/series/62706/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_6346_full -> Patchwork_13420_full
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  

Known issues
------------

  Here are the changes found in Patchwork_13420_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_selftest@mock_requests:
    - shard-skl:          [PASS][1] -> [INCOMPLETE][2] ([fdo#110550])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-skl5/igt@i915_selftest@mock_requests.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-skl3/igt@i915_selftest@mock_requests.html

  * igt@kms_draw_crc@draw-method-rgb565-mmap-cpu-ytiled:
    - shard-iclb:         [PASS][3] -> [FAIL][4] ([fdo#103184] / [fdo#103232])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-iclb5/igt@kms_draw_crc@draw-method-rgb565-mmap-cpu-ytiled.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-iclb7/igt@kms_draw_crc@draw-method-rgb565-mmap-cpu-ytiled.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible:
    - shard-skl:          [PASS][5] -> [FAIL][6] ([fdo#105363])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-skl4/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-skl10/igt@kms_flip@flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-blt:
    - shard-iclb:         [PASS][7] -> [FAIL][8] ([fdo#103167]) +1 similar issue
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-blt.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-iclb7/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-draw-blt.html

  * igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-pwrite:
    - shard-hsw:          [PASS][9] -> [SKIP][10] ([fdo#109271]) +23 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-hsw5/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-pwrite.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-hsw1/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-cur-indfb-draw-pwrite.html

  * igt@kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-cpu:
    - shard-skl:          [PASS][11] -> [FAIL][12] ([fdo#103167] / [fdo#110379])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-skl3/igt@kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-cpu.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-skl8/igt@kms_frontbuffer_tracking@fbcpsr-rgb101010-draw-mmap-cpu.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-b-planes:
    - shard-apl:          [PASS][13] -> [DMESG-WARN][14] ([fdo#108566]) +2 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-apl4/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-b-planes.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-apl1/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-b-planes.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes:
    - shard-glk:          [PASS][15] -> [INCOMPLETE][16] ([fdo#103359] / [k.org#198133])
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-glk7/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-glk3/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-c-planes.html

  * igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min:
    - shard-skl:          [PASS][17] -> [FAIL][18] ([fdo#108145])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-skl5/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-skl7/igt@kms_plane_alpha_blend@pipe-a-constant-alpha-min.html

  * igt@kms_psr@psr2_cursor_mmap_cpu:
    - shard-iclb:         [PASS][19] -> [SKIP][20] ([fdo#109441]) +1 similar issue
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-iclb8/igt@kms_psr@psr2_cursor_mmap_cpu.html

  
#### Possible fixes ####

  * igt@gem_eio@context-create:
    - shard-hsw:          [DMESG-WARN][21] ([fdo#110789] / [fdo#110913 ]) -> [PASS][22] +5 similar issues
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-hsw4/igt@gem_eio@context-create.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-hsw1/igt@gem_eio@context-create.html

  * igt@gem_eio@wait-10ms:
    - shard-apl:          [DMESG-WARN][23] ([fdo#110913 ]) -> [PASS][24] +4 similar issues
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-apl7/igt@gem_eio@wait-10ms.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-apl1/igt@gem_eio@wait-10ms.html

  * igt@gem_exec_balancer@smoke:
    - shard-iclb:         [SKIP][25] ([fdo#110854]) -> [PASS][26]
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-iclb6/igt@gem_exec_balancer@smoke.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-iclb4/igt@gem_exec_balancer@smoke.html

  * igt@gem_partial_pwrite_pread@writes-after-reads-snoop:
    - shard-iclb:         [DMESG-WARN][27] ([fdo#110913 ]) -> [PASS][28] +9 similar issues
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-iclb2/igt@gem_partial_pwrite_pread@writes-after-reads-snoop.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-iclb8/igt@gem_partial_pwrite_pread@writes-after-reads-snoop.html
    - shard-glk:          [DMESG-WARN][29] ([fdo#110913 ]) -> [PASS][30] +8 similar issues
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-glk1/igt@gem_partial_pwrite_pread@writes-after-reads-snoop.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-glk6/igt@gem_partial_pwrite_pread@writes-after-reads-snoop.html

  * igt@gem_softpin@noreloc-s3:
    - shard-skl:          [INCOMPLETE][31] ([fdo#104108] / [fdo#107773]) -> [PASS][32]
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-skl8/igt@gem_softpin@noreloc-s3.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-skl3/igt@gem_softpin@noreloc-s3.html

  * igt@gem_userptr_blits@map-fixed-invalidate-overlap-busy-gup:
    - shard-skl:          [DMESG-WARN][33] ([fdo#110913 ]) -> [PASS][34] +2 similar issues
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-skl2/igt@gem_userptr_blits@map-fixed-invalidate-overlap-busy-gup.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-skl5/igt@gem_userptr_blits@map-fixed-invalidate-overlap-busy-gup.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-snb:          [DMESG-WARN][35] ([fdo#110913 ]) -> [PASS][36]
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-snb1/igt@gem_userptr_blits@sync-unmap-cycles.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-snb5/igt@gem_userptr_blits@sync-unmap-cycles.html
    - shard-hsw:          [DMESG-WARN][37] ([fdo#110913 ]) -> [PASS][38]
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-hsw6/igt@gem_userptr_blits@sync-unmap-cycles.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-hsw5/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@i915_pm_backlight@fade_with_suspend:
    - shard-skl:          [INCOMPLETE][39] ([fdo#104108]) -> [PASS][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-skl5/igt@i915_pm_backlight@fade_with_suspend.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-skl7/igt@i915_pm_backlight@fade_with_suspend.html

  * igt@i915_selftest@mock_sanitycheck:
    - shard-kbl:          [DMESG-WARN][41] ([fdo#110913 ]) -> [PASS][42] +5 similar issues
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-kbl1/igt@i915_selftest@mock_sanitycheck.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-kbl6/igt@i915_selftest@mock_sanitycheck.html

  * igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
    - shard-glk:          [FAIL][43] ([fdo#104873]) -> [PASS][44]
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-glk2/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-glk7/igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-skl:          [FAIL][45] ([fdo#105363]) -> [PASS][46]
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-skl10/igt@kms_flip@flip-vs-expired-vblank.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-skl1/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-msflip-blt:
    - shard-iclb:         [FAIL][47] ([fdo#103167]) -> [PASS][48] +6 similar issues
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-iclb4/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-msflip-blt.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-shrfb-msflip-blt.html

  * igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-move:
    - shard-hsw:          [SKIP][49] ([fdo#109271]) -> [PASS][50] +23 similar issues
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-hsw1/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-move.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-hsw4/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-move.html

  * igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
    - shard-apl:          [DMESG-WARN][51] ([fdo#108566]) -> [PASS][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-apl2/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-apl1/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html

  * igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min:
    - shard-skl:          [FAIL][53] ([fdo#108145]) -> [PASS][54]
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-skl1/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-skl6/igt@kms_plane_alpha_blend@pipe-b-constant-alpha-min.html

  * igt@kms_psr@psr2_primary_page_flip:
    - shard-iclb:         [SKIP][55] ([fdo#109441]) -> [PASS][56] +4 similar issues
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-iclb1/igt@kms_psr@psr2_primary_page_flip.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-iclb2/igt@kms_psr@psr2_primary_page_flip.html

  * igt@kms_vblank@pipe-a-query-idle-hang:
    - shard-snb:          [DMESG-WARN][57] ([fdo#110789] / [fdo#110913 ]) -> [PASS][58] +4 similar issues
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-snb4/igt@kms_vblank@pipe-a-query-idle-hang.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-snb1/igt@kms_vblank@pipe-a-query-idle-hang.html

  * igt@tools_test@tools_test:
    - shard-kbl:          [SKIP][59] ([fdo#109271]) -> [PASS][60] +1 similar issue
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-kbl3/igt@tools_test@tools_test.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-kbl1/igt@tools_test@tools_test.html

  
#### Warnings ####

  * igt@i915_pm_rpm@modeset-lpsp-stress:
    - shard-hsw:          [SKIP][61] ([fdo#109271]) -> [INCOMPLETE][62] ([fdo#103540] / [fdo#107807])
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_6346/shard-hsw7/igt@i915_pm_rpm@modeset-lpsp-stress.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/shard-hsw5/igt@i915_pm_rpm@modeset-lpsp-stress.html

  
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103184]: https://bugs.freedesktop.org/show_bug.cgi?id=103184
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#103359]: https://bugs.freedesktop.org/show_bug.cgi?id=103359
  [fdo#103540]: https://bugs.freedesktop.org/show_bug.cgi?id=103540
  [fdo#104108]: https://bugs.freedesktop.org/show_bug.cgi?id=104108
  [fdo#104873]: https://bugs.freedesktop.org/show_bug.cgi?id=104873
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#107773]: https://bugs.freedesktop.org/show_bug.cgi?id=107773
  [fdo#107807]: https://bugs.freedesktop.org/show_bug.cgi?id=107807
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#110379]: https://bugs.freedesktop.org/show_bug.cgi?id=110379
  [fdo#110550]: https://bugs.freedesktop.org/show_bug.cgi?id=110550
  [fdo#110789]: https://bugs.freedesktop.org/show_bug.cgi?id=110789
  [fdo#110854]: https://bugs.freedesktop.org/show_bug.cgi?id=110854
  [fdo#110913 ]: https://bugs.freedesktop.org/show_bug.cgi?id=110913 
  [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133


Participating hosts (10 -> 10)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * Linux: CI_DRM_6346 -> Patchwork_13420

  CI_DRM_6346: 6c9c9bdb0ff7eb41b7eb1a92ce218c813740606a @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5068: 15ad664534413628f06c0f172aac11598bfdb895 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_13420: 28cf28b85d48b1b16f266834aaba09da832a5f95 @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_13420/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

* Re: [PATCH 16/20] drm/i915/gt: Drop stale commentary for timeline density
  2019-06-25 13:01 ` [PATCH 16/20] drm/i915/gt: Drop stale commentary for timeline density Chris Wilson
@ 2019-06-25 23:28   ` Daniele Ceraolo Spurio
  0 siblings, 0 replies; 36+ messages in thread
From: Daniele Ceraolo Spurio @ 2019-06-25 23:28 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx



On 6/25/19 6:01 AM, Chris Wilson wrote:
> We no longer allocate a continguous set of timeline ids for all engines
> upon creation, so we no longer should assume that the timelines are
> density allocated within a context. Hopefully, still dense enough for us
> to take advantage of the compressed radix tree.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>

Daniele

> ---
>   drivers/gpu/drm/i915/gt/intel_timeline.c | 14 ++------------
>   1 file changed, 2 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
> index 7fb5defd9e71..7b476cd55dac 100644
> --- a/drivers/gpu/drm/i915/gt/intel_timeline.c
> +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
> @@ -210,21 +210,11 @@ int intel_timeline_init(struct intel_timeline *timeline,
>   {
>   	void *vaddr;
>   
> -	/*
> -	 * Ideally we want a set of engines on a single leaf as we expect
> -	 * to mostly be tracking synchronisation between engines. It is not
> -	 * a huge issue if this is not the case, but we may want to mitigate
> -	 * any page crossing penalties if they become an issue.
> -	 *
> -	 * Called during early_init before we know how many engines there are.
> -	 */
> -	BUILD_BUG_ON(KSYNCMAP < I915_NUM_ENGINES);
> -
> -	timeline->gt = gt;
> -
>   	kref_init(&timeline->kref);
>   	atomic_set(&timeline->pin_count, 0);
>   
> +	timeline->gt = gt;
> +
>   	timeline->has_initial_breadcrumb = !hwsp;
>   	timeline->hwsp_cacheline = NULL;
>   
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 36+ messages in thread

end of thread, other threads:[~2019-06-25 23:29 UTC | newest]

Thread overview: 36+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-06-25 13:01 [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Chris Wilson
2019-06-25 13:01 ` [PATCH 02/20] drm/i915/gt: Pass intel_gt to pm routines Chris Wilson
2019-06-25 18:07   ` Matthew Auld
2019-06-25 13:01 ` [PATCH 03/20] drm/i915/selftests: Serialise nop reset with retirement Chris Wilson
2019-06-25 13:01 ` [PATCH 04/20] drm/i915/selftests: Drop manual request wakerefs around hangcheck Chris Wilson
2019-06-25 13:01 ` [PATCH 05/20] drm/i915/selftests: Fixup atomic reset checking Chris Wilson
2019-06-25 13:01 ` [PATCH 06/20] drm/i915: Rename intel_wakeref_[is]_active Chris Wilson
2019-06-25 13:01 ` [PATCH 07/20] drm/i915: Add a wakeref getter for iff the wakeref is already active Chris Wilson
2019-06-25 13:01 ` [PATCH 08/20] drm/i915: Only recover active engines Chris Wilson
2019-06-25 13:01 ` [PATCH 09/20] drm/i915: Lift intel_engines_resume() to callers Chris Wilson
2019-06-25 13:01 ` [PATCH 10/20] drm/i915: Teach execbuffer to take the engine wakeref not GT Chris Wilson
2019-06-25 13:01 ` [PATCH 11/20] drm/i915/gt: Track timeline activeness in enter/exit Chris Wilson
2019-06-25 13:01 ` [PATCH 12/20] drm/i915/gt: Convert timeline tracking to spinlock Chris Wilson
2019-06-25 13:01 ` [PATCH 13/20] drm/i915/gt: Guard timeline pinning with its own mutex Chris Wilson
2019-06-25 13:01 ` [PATCH 14/20] drm/i915/selftests: Hold ref on request across waits Chris Wilson
2019-06-25 18:39   ` Matthew Auld
2019-06-25 13:01 ` [PATCH 15/20] drm/i915/gt: Always call kref_init for the timeline Chris Wilson
2019-06-25 18:42   ` Matthew Auld
2019-06-25 13:01 ` [PATCH 16/20] drm/i915/gt: Drop stale commentary for timeline density Chris Wilson
2019-06-25 23:28   ` Daniele Ceraolo Spurio
2019-06-25 13:01 ` [PATCH 17/20] drm/i915/gt: Add some debug tracing for context pinning Chris Wilson
2019-06-25 18:47   ` Matthew Auld
2019-06-25 13:01 ` [PATCH 18/20] drm/i915: Include the breadcrumb when asserting request completion Chris Wilson
2019-06-25 14:42   ` [PATCH] " Chris Wilson
2019-06-25 13:01 ` [PATCH 19/20] drm/i915: Protect request retirement with timeline->mutex Chris Wilson
2019-06-25 13:01 ` [PATCH 20/20] drm/i915: Replace struct_mutex for batch pool serialisation Chris Wilson
2019-06-25 14:09 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Patchwork
2019-06-25 14:38 ` ✗ Fi.CI.BAT: failure " Patchwork
2019-06-25 15:10 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev2) Patchwork
2019-06-25 15:18 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-06-25 15:45 ` ✗ Fi.CI.BAT: failure " Patchwork
2019-06-25 18:01 ` [PATCH 01/20] drm/i915/execlists: Convert recursive defer_request() into iterative Matthew Auld
2019-06-25 18:19 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/20] drm/i915/execlists: Convert recursive defer_request() into iterative (rev3) Patchwork
2019-06-25 18:28 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-06-25 19:35 ` ✓ Fi.CI.BAT: success " Patchwork
2019-06-25 22:10 ` ✓ Fi.CI.IGT: " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.