All of lore.kernel.org
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it
@ 2020-01-08 10:45 Chris Wilson
  2020-01-08 10:45 ` [Intel-gfx] [PATCH 2/4] drm/i915: Replace vma parking with a clock aging algorithm Chris Wilson
                   ` (6 more replies)
  0 siblings, 7 replies; 8+ messages in thread
From: Chris Wilson @ 2020-01-08 10:45 UTC (permalink / raw)
  To: intel-gfx

Since we now allow the intel_context_unpin() to run unserialised, we
risk our operations under the intel_context_lock_pinned() being run as
the context is unpinned (and thus invalidating our state). We can
atomically acquire the pin, testing to see if it is pinned in the
process, thus ensuring that the state remains consistent during the
course of the whole operation.

Fixes: 841350223816 ("drm/i915/gt: Drop mutex serialisation between context pin/unpin")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c | 10 +++++++---
 drivers/gpu/drm/i915/gt/intel_context.h     |  7 ++++++-
 drivers/gpu/drm/i915/i915_debugfs.c         | 10 ++++------
 drivers/gpu/drm/i915/i915_perf.c            | 13 +++++--------
 4 files changed, 22 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 88f6253f5405..a2e57e62af30 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1236,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 	 * image, or into the registers directory, does not stick). Pristine
 	 * and idle contexts will be configured on pinning.
 	 */
-	if (!intel_context_is_pinned(ce))
+	if (!intel_context_pin_if_active(ce))
 		return 0;
 
 	rq = intel_engine_create_kernel_request(ce->engine);
-	if (IS_ERR(rq))
-		return PTR_ERR(rq);
+	if (IS_ERR(rq)) {
+		ret = PTR_ERR(rq);
+		goto out_unpin;
+	}
 
 	/* Serialise with the remote context */
 	ret = intel_context_prepare_remote_request(ce, rq);
@@ -1249,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
 		ret = gen8_emit_rpcs_config(rq, ce, sseu);
 
 	i915_request_add(rq);
+out_unpin:
+	intel_context_unpin(ce);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 0f5ae4ff3b10..63073ebc6cf1 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -76,9 +76,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce)
 
 int __intel_context_do_pin(struct intel_context *ce);
 
+static inline bool intel_context_pin_if_active(struct intel_context *ce)
+{
+	return atomic_inc_not_zero(&ce->pin_count);
+}
+
 static inline int intel_context_pin(struct intel_context *ce)
 {
-	if (likely(atomic_inc_not_zero(&ce->pin_count)))
+	if (likely(intel_context_pin_if_active(ce)))
 		return 0;
 
 	return __intel_context_do_pin(ce);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0ac98e39eb75..db184536acef 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -321,16 +321,15 @@ static void print_context_stats(struct seq_file *m,
 
 		for_each_gem_engine(ce,
 				    i915_gem_context_lock_engines(ctx), it) {
-			intel_context_lock_pinned(ce);
-			if (intel_context_is_pinned(ce)) {
+			if (intel_context_pin_if_active(ce)) {
 				rcu_read_lock();
 				if (ce->state)
 					per_file_stats(0,
 						       ce->state->obj, &kstats);
 				per_file_stats(0, ce->ring->vma->obj, &kstats);
 				rcu_read_unlock();
+				intel_context_unpin(ce);
 			}
-			intel_context_unlock_pinned(ce);
 		}
 		i915_gem_context_unlock_engines(ctx);
 
@@ -1513,15 +1512,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
 
 		for_each_gem_engine(ce,
 				    i915_gem_context_lock_engines(ctx), it) {
-			intel_context_lock_pinned(ce);
-			if (intel_context_is_pinned(ce)) {
+			if (intel_context_pin_if_active(ce)) {
 				seq_printf(m, "%s: ", ce->engine->name);
 				if (ce->state)
 					describe_obj(m, ce->state->obj);
 				describe_ctx_ring(m, ce->ring);
 				seq_putc(m, '\n');
+				intel_context_unpin(ce);
 			}
-			intel_context_unlock_pinned(ce);
 		}
 		i915_gem_context_unlock_engines(ctx);
 
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 84350c7bc711..c7a7b676f079 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2203,17 +2203,14 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
 		if (ce->engine->class != RENDER_CLASS)
 			continue;
 
-		err = intel_context_lock_pinned(ce);
-		if (err)
-			break;
+		/* Otherwise OA settings will be set upon first use */
+		if (!intel_context_pin_if_active(ce))
+			continue;
 
 		flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
+		err = gen8_modify_context(ce, flex, count);
 
-		/* Otherwise OA settings will be set upon first use */
-		if (intel_context_is_pinned(ce))
-			err = gen8_modify_context(ce, flex, count);
-
-		intel_context_unlock_pinned(ce);
+		intel_context_unpin(ce);
 		if (err)
 			break;
 	}
-- 
2.25.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Intel-gfx] [PATCH 2/4] drm/i915: Replace vma parking with a clock aging algorithm
  2020-01-08 10:45 [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it Chris Wilson
@ 2020-01-08 10:45 ` Chris Wilson
  2020-01-08 10:45 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-01-08 10:45 UTC (permalink / raw)
  To: intel-gfx

We cache the user's vma for a brief period of time after they close them
so that if they are immediately reopened we avoid having to unbind and
rebind them. This happens quite frequently for display servers which
only keep a client's frame open for as long as they are copying from it,
and so they open/close every vma about 30 Hz (every other frame for
double buffering).

Our current strategy is to keep the vma alive until the next global idle
point. However this cache should be purely temporal, so switch over from
using the parked notifier to using its own clock based aging algorithm:
if the closed vma is not reused within 2 clock ticks, it is destroyed.

Closes: https://gitlab.freedesktop.org/drm/intel/issues/644
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_gt.c            |  3 -
 drivers/gpu/drm/i915/gt/intel_gt_pm.c         |  1 -
 drivers/gpu/drm/i915/gt/intel_gt_types.h      |  3 -
 drivers/gpu/drm/i915/i915_debugfs.c           |  3 +
 drivers/gpu/drm/i915/i915_drv.c               |  4 +-
 drivers/gpu/drm/i915/i915_drv.h               |  1 +
 drivers/gpu/drm/i915/i915_vma.c               | 83 +++++++++++++++----
 drivers/gpu/drm/i915/i915_vma.h               | 11 ++-
 .../gpu/drm/i915/selftests/mock_gem_device.c  |  2 +
 9 files changed, 86 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index da2b6e2ae692..63d70cf62ddb 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -23,9 +23,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 
 	spin_lock_init(&gt->irq_lock);
 
-	INIT_LIST_HEAD(&gt->closed_vma);
-	spin_lock_init(&gt->closed_lock);
-
 	intel_gt_init_reset(gt);
 	intel_gt_init_requests(gt);
 	intel_gt_init_timelines(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index d1c2f034296a..3302f676d12b 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -80,7 +80,6 @@ static int __gt_park(struct intel_wakeref *wf)
 
 	intel_gt_park_requests(gt);
 
-	i915_vma_parked(gt);
 	i915_pmu_gt_parked(i915);
 	intel_rps_park(&gt->rps);
 	intel_rc6_park(&gt->rc6);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 96890dd12b5f..4589dea67b8f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -58,9 +58,6 @@ struct intel_gt {
 	struct intel_wakeref wakeref;
 	atomic_t user_wakeref;
 
-	struct list_head closed_vma;
-	spinlock_t closed_lock; /* guards the list of closed_vma */
-
 	struct intel_reset reset;
 
 	/**
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index db184536acef..9e18e10c125f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -3587,6 +3587,9 @@ i915_drop_caches_set(void *data, u64 val)
 	if (ret)
 		return ret;
 
+	if (val & DROP_IDLE)
+		i915_vma_clock_flush(&i915->vma_clock);
+
 	fs_reclaim_acquire(GFP_KERNEL);
 	if (val & DROP_BOUND)
 		i915_gem_shrink(i915, LONG_MAX, NULL, I915_SHRINK_BOUND);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f7385abdd74b..9fde3918094f 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -523,8 +523,8 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
 
 	intel_wopcm_init_early(&dev_priv->wopcm);
 
+	i915_vma_clock_init_early(&dev_priv->vma_clock);
 	intel_gt_init_early(&dev_priv->gt, dev_priv);
-
 	i915_gem_init_early(dev_priv);
 
 	/* This must be called before any calls to HAS_PCH_* */
@@ -561,6 +561,8 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
  */
 static void i915_driver_late_release(struct drm_i915_private *dev_priv)
 {
+	i915_vma_clock_flush(&dev_priv->vma_clock);
+
 	intel_irq_fini(dev_priv);
 	intel_power_domains_cleanup(dev_priv);
 	i915_gem_cleanup_early(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 50181113dd2b..d61d73c680b1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1240,6 +1240,7 @@ struct drm_i915_private {
 	struct intel_runtime_pm runtime_pm;
 
 	struct i915_perf perf;
+	struct i915_vma_clock vma_clock;
 
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct intel_gt gt;
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index cbd783c31adb..89ed287ab892 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -985,8 +985,7 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
 
 void i915_vma_close(struct i915_vma *vma)
 {
-	struct intel_gt *gt = vma->vm->gt;
-	unsigned long flags;
+	struct i915_vma_clock *clock = &vma->vm->i915->vma_clock;
 
 	GEM_BUG_ON(i915_vma_is_closed(vma));
 
@@ -1002,18 +1001,20 @@ void i915_vma_close(struct i915_vma *vma)
 	 * causing us to rebind the VMA once more. This ends up being a lot
 	 * of wasted work for the steady state.
 	 */
-	spin_lock_irqsave(&gt->closed_lock, flags);
-	list_add(&vma->closed_link, &gt->closed_vma);
-	spin_unlock_irqrestore(&gt->closed_lock, flags);
+	spin_lock(&clock->lock);
+	list_add(&vma->closed_link, &clock->age[0]);
+	spin_unlock(&clock->lock);
+
+	schedule_delayed_work(&clock->work, round_jiffies_up_relative(HZ));
 }
 
 static void __i915_vma_remove_closed(struct i915_vma *vma)
 {
-	struct intel_gt *gt = vma->vm->gt;
+	struct i915_vma_clock *clock = &vma->vm->i915->vma_clock;
 
-	spin_lock_irq(&gt->closed_lock);
+	spin_lock(&clock->lock);
 	list_del_init(&vma->closed_link);
-	spin_unlock_irq(&gt->closed_lock);
+	spin_unlock(&clock->lock);
 }
 
 void i915_vma_reopen(struct i915_vma *vma)
@@ -1051,15 +1052,35 @@ void i915_vma_release(struct kref *ref)
 	i915_vma_free(vma);
 }
 
-void i915_vma_parked(struct intel_gt *gt)
+static void i915_vma_clock(struct work_struct *w)
 {
+	struct i915_vma_clock *clock =
+		container_of(w, typeof(*clock), work.work);
 	struct i915_vma *vma, *next;
 
-	spin_lock_irq(&gt->closed_lock);
-	list_for_each_entry_safe(vma, next, &gt->closed_vma, closed_link) {
+	/*
+	 * A very simple clock aging algorithm: we keep the user's closed
+	 * vma alive for a couple of timer ticks before destroying them.
+	 * This serves a shortlived cache so that frequently reused VMA
+	 * are kept alive between frames and we skip having to rebing them.
+	 *
+	 * When closed, we insert the vma into age[0]. Upon completion of
+	 * a timer tick, it is moved to age[1]. At the start of each timer
+	 * tick, we destroy all the old vma that were accumulated into age[1]
+	 * and have not been reused. All destroyed vma have therefore been
+	 * unused for more than 1 tick (at least a second), and at most 2
+	 * ticks (we expect the average to be 1.5 ticks).
+	 */
+
+	spin_lock(&clock->lock);
+
+	list_for_each_entry_safe(vma, next, &clock->age[1], closed_link) {
 		struct drm_i915_gem_object *obj = vma->obj;
 		struct i915_address_space *vm = vma->vm;
 
+		if (i915_vma_is_active(vma))
+			continue;
+
 		/* XXX All to avoid keeping a reference on i915_vma itself */
 
 		if (!kref_get_unless_zero(&obj->base.refcount))
@@ -1072,7 +1093,7 @@ void i915_vma_parked(struct intel_gt *gt)
 			obj = NULL;
 		}
 
-		spin_unlock_irq(&gt->closed_lock);
+		spin_unlock(&clock->lock);
 
 		if (obj) {
 			__i915_vma_put(vma);
@@ -1082,11 +1103,26 @@ void i915_vma_parked(struct intel_gt *gt)
 		i915_vm_close(vm);
 
 		/* Restart after dropping lock */
-		spin_lock_irq(&gt->closed_lock);
-		next = list_first_entry(&gt->closed_vma,
+		spin_lock(&clock->lock);
+		next = list_first_entry(&clock->age[1],
 					typeof(*next), closed_link);
 	}
-	spin_unlock_irq(&gt->closed_lock);
+	list_splice_tail_init(&clock->age[0], &clock->age[1]);
+
+	if (!list_empty(&clock->age[1])) {
+		/* Keep active VMA around until second tick after idling */
+		list_for_each_entry_safe(vma, next,
+					 &clock->age[1], closed_link) {
+			if (i915_vma_is_active(vma))
+				list_move_tail(&vma->closed_link,
+					       &clock->age[0]);
+		}
+
+		schedule_delayed_work(&clock->work,
+				      round_jiffies_up_relative(HZ));
+	}
+
+	spin_unlock(&clock->lock);
 }
 
 static void __i915_vma_iounmap(struct i915_vma *vma)
@@ -1277,6 +1313,23 @@ void i915_vma_make_purgeable(struct i915_vma *vma)
 	i915_gem_object_make_purgeable(vma->obj);
 }
 
+void i915_vma_clock_init_early(struct i915_vma_clock *clock)
+{
+	spin_lock_init(&clock->lock);
+	INIT_LIST_HEAD(&clock->age[0]);
+	INIT_LIST_HEAD(&clock->age[1]);
+
+	INIT_DELAYED_WORK(&clock->work, i915_vma_clock);
+}
+
+void i915_vma_clock_flush(struct i915_vma_clock *clock)
+{
+	do {
+		if (cancel_delayed_work_sync(&clock->work))
+			i915_vma_clock(&clock->work.work);
+	} while (delayed_work_pending(&clock->work));
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/i915_vma.c"
 #endif
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 02b31a62951e..7b8a18b72d81 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -351,8 +351,6 @@ i915_vma_unpin_fence(struct i915_vma *vma)
 		__i915_vma_unpin_fence(vma);
 }
 
-void i915_vma_parked(struct intel_gt *gt);
-
 #define for_each_until(cond) if (cond) break; else
 
 /**
@@ -381,4 +379,13 @@ static inline int i915_vma_sync(struct i915_vma *vma)
 	return i915_active_wait(&vma->active);
 }
 
+struct i915_vma_clock {
+	spinlock_t lock;
+	struct list_head age[2];
+	struct delayed_work work;
+};
+
+void i915_vma_clock_init_early(struct i915_vma_clock *clock);
+void i915_vma_clock_flush(struct i915_vma_clock *clock);
+
 #endif
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 3b8986983afc..6c27f43155ea 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -57,6 +57,7 @@ static void mock_device_release(struct drm_device *dev)
 
 	mock_device_flush(i915);
 	intel_gt_driver_remove(&i915->gt);
+	i915_vma_clock_flush(&i915->vma_clock);
 
 	i915_gem_driver_release__contexts(i915);
 
@@ -164,6 +165,7 @@ struct drm_i915_private *mock_gem_device(void)
 	mock_uncore_init(&i915->uncore, i915);
 
 	i915_gem_init__mm(i915);
+	i915_vma_clock_init_early(&i915->vma_clock);
 	intel_gt_init_early(&i915->gt, i915);
 	atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */
 	i915->gt.awake = -ENODEV;
-- 
2.25.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Intel-gfx] [PATCH 3/4] drm/i915/gt: Always reset the timeslice after a context switch
  2020-01-08 10:45 [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it Chris Wilson
  2020-01-08 10:45 ` [Intel-gfx] [PATCH 2/4] drm/i915: Replace vma parking with a clock aging algorithm Chris Wilson
@ 2020-01-08 10:45 ` Chris Wilson
  2020-01-08 10:45 ` [Intel-gfx] [PATCH 4/4] drm/i915/gt: Yield the timeslice if waiting on a semaphore Chris Wilson
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-01-08 10:45 UTC (permalink / raw)
  To: intel-gfx

Currently, we reset the timer after a pre-eemption event. This has the
side-effect that the timeslice runs into the second context after the
first is completed. To be more fair, we want to reset the clock after
promotion as well.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index bd74b76c6403..f825df1ba638 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -2289,7 +2289,6 @@ static void process_csb(struct intel_engine_cs *engine)
 
 			/* Point active to the new ELSP; prevent overwriting */
 			WRITE_ONCE(execlists->active, execlists->pending);
-			set_timeslice(engine);
 
 			if (!inject_preempt_hang(execlists))
 				ring_set_paused(engine, 0);
@@ -2329,6 +2328,9 @@ static void process_csb(struct intel_engine_cs *engine)
 		}
 	} while (head != tail);
 
+	if (execlists_active(execlists))
+		set_timeslice(engine);
+
 	execlists->csb_head = head;
 
 	/*
-- 
2.25.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Intel-gfx] [PATCH 4/4] drm/i915/gt: Yield the timeslice if waiting on a semaphore
  2020-01-08 10:45 [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it Chris Wilson
  2020-01-08 10:45 ` [Intel-gfx] [PATCH 2/4] drm/i915: Replace vma parking with a clock aging algorithm Chris Wilson
  2020-01-08 10:45 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
@ 2020-01-08 10:45 ` Chris Wilson
  2020-01-08 10:59 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915: Pin the context as we work on it Patchwork
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-01-08 10:45 UTC (permalink / raw)
  To: intel-gfx

If we find ourselves waiting on a MI_SEMAPHORE_WAIT, either within the
user batch or in our own preamble, the engine raises a
GT_WAIT_ON_SEMAPHORE interrupt. We can unmask that interrupt and so
respond to a semaphore wait by yielding the timeslice, if we have
another context to yield to!

The only real complication is that the interrupt is only generated for
the start of the semaphore wait, and is asynchronous to our
process_csb() -- that is, we may not have registered the timeslice before
we see the interrupt. To ensure we don't miss a potential semaphore
blocking forward progress (e.g. selftests/live_timeslice_preempt) we mark
the interrupt and apply it to the next timeslice regardless of whether it
was active at the time.

v2: We use semaphores in preempt-to-busy, within the timeslicing
implementation itself! Ergo, when we do insert a preemption due to an
expired timeslice, the new context may start with the missed semaphore
flagged by the retired context and be yielded, ad infinitum. To avoid
this, read the context id at the time of the semaphore interrupt and
only yield if that context is still active.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_engine_types.h |  9 ++++++
 drivers/gpu/drm/i915/gt/intel_gt_irq.c       | 32 +++++++++++---------
 drivers/gpu/drm/i915/gt/intel_lrc.c          | 31 ++++++++++++++++---
 drivers/gpu/drm/i915/i915_reg.h              |  5 +++
 4 files changed, 59 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 00287515e7af..d146d2fbd42a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -156,6 +156,15 @@ struct intel_engine_execlists {
 	 */
 	struct i915_priolist default_priolist;
 
+	/**
+	 * @yield: CCID at the time of the last semaphore-wait interrupt.
+	 *
+	 * Instead of leaving a semaphore busy-spinning on an engine, we would
+	 * like to switch to another ready context, i.e. yielding the semaphore
+	 * timeslice.
+	 */
+	u32 yield;
+
 	/**
 	 * @no_priolist: priority lists disabled
 	 */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f796bdf1ed30..6ae64a224b02 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -24,6 +24,13 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
 {
 	bool tasklet = false;
 
+	if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) {
+		WRITE_ONCE(engine->execlists.yield,
+			   ENGINE_READ_FW(engine, EXECLIST_CCID));
+		if (del_timer(&engine->execlists.timer))
+			tasklet = true;
+	}
+
 	if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
 		tasklet = true;
 
@@ -210,7 +217,10 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
 
 void gen11_gt_irq_postinstall(struct intel_gt *gt)
 {
-	const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+	const u32 irqs =
+		GT_RENDER_USER_INTERRUPT |
+		GT_CONTEXT_SWITCH_INTERRUPT |
+		GT_WAIT_SEMAPHORE_INTERRUPT;
 	struct intel_uncore *uncore = gt->uncore;
 	const u32 dmask = irqs << 16 | irqs;
 	const u32 smask = irqs << 16;
@@ -357,21 +367,15 @@ void gen8_gt_irq_postinstall(struct intel_gt *gt)
 	struct intel_uncore *uncore = gt->uncore;
 
 	/* These are interrupts we'll toggle with the ring mask register */
+	const u32 irqs =
+		GT_RENDER_USER_INTERRUPT |
+		GT_CONTEXT_SWITCH_INTERRUPT |
+		GT_WAIT_SEMAPHORE_INTERRUPT;
 	u32 gt_interrupts[] = {
-		(GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
-		 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
-
-		(GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
-		 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
-
+		irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
+		irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
 		0,
-
-		(GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
-		 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+		irqs << GEN8_VECS_IRQ_SHIFT,
 	};
 
 	gt->pm_ier = 0x0;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index f825df1ba638..7cf2aade6b3b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1662,7 +1662,8 @@ static void defer_active(struct intel_engine_cs *engine)
 }
 
 static bool
-need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
+need_timeslice(const struct intel_engine_cs *engine,
+	       const struct i915_request *rq)
 {
 	int hint;
 
@@ -1678,6 +1679,27 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
 	return hint >= effective_prio(rq);
 }
 
+static bool
+timeslice_expired(const struct intel_engine_cs *engine,
+		  const struct i915_request *rq)
+{
+	const struct intel_engine_execlists *el = &engine->execlists;
+
+	return (timer_expired(&el->timer) ||
+		/*
+		 * Once bitten, forever smitten!
+		 *
+		 * If the active context ever busy-waited on a semaphore,
+		 * it will be treated as a hog until the end of its timeslice.
+		 * The HW only sends an interrupt on the first miss, and we
+		 * do know if that semaphore has been signaled, or even if it
+		 * is now stuck on another semaphore. Play safe, yield if it
+		 * might be stuck -- it will be given a fresh timeslice in
+		 * the near future.
+		 */
+		upper_32_bits(rq->context->lrc_desc) == READ_ONCE(el->yield));
+}
+
 static int
 switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq)
 {
@@ -1693,8 +1715,7 @@ timeslice(const struct intel_engine_cs *engine)
 	return READ_ONCE(engine->props.timeslice_duration_ms);
 }
 
-static unsigned long
-active_timeslice(const struct intel_engine_cs *engine)
+static unsigned long active_timeslice(const struct intel_engine_cs *engine)
 {
 	const struct i915_request *rq = *engine->execlists.active;
 
@@ -1845,7 +1866,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
 			last = NULL;
 		} else if (need_timeslice(engine, last) &&
-			   timer_expired(&engine->execlists.timer)) {
+			   timeslice_expired(engine, last)) {
 			ENGINE_TRACE(engine,
 				     "expired last=%llx:%lld, prio=%d, hint=%d\n",
 				     last->fence.context,
@@ -2111,6 +2132,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 		}
 		clear_ports(port + 1, last_port - port);
 
+		WRITE_ONCE(execlists->yield, -1);
 		execlists_submit_ports(engine);
 		set_preempt_timeout(engine);
 	} else {
@@ -3972,6 +3994,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
 
 	engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
 	engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+	engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift;
 }
 
 static void rcs_submission_override(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index d90d7bf8d328..c0308682756b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3105,6 +3105,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define GT_BSD_CS_ERROR_INTERRUPT		(1 << 15)
 #define GT_BSD_USER_INTERRUPT			(1 << 12)
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1	(1 << 11) /* hsw+; rsvd on snb, ivb, vlv */
+#define GT_WAIT_SEMAPHORE_INTERRUPT		(1 <<  11) /* bdw+ */
 #define GT_CONTEXT_SWITCH_INTERRUPT		(1 <<  8)
 #define GT_RENDER_L3_PARITY_ERROR_INTERRUPT	(1 <<  5) /* !snb */
 #define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT	(1 <<  4)
@@ -4056,6 +4057,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
 #define   CCID_EN			BIT(0)
 #define   CCID_EXTENDED_STATE_RESTORE	BIT(2)
 #define   CCID_EXTENDED_STATE_SAVE	BIT(3)
+
+#define EXECLIST_STATUS(base)	_MMIO((base) + 0x234)
+#define EXECLIST_CCID(base)	_MMIO((base) + 0x238)
+
 /*
  * Notes on SNB/IVB/VLV context size:
  * - Power context is saved elsewhere (LLC or stolen)
-- 
2.25.0.rc1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915: Pin the context as we work on it
  2020-01-08 10:45 [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it Chris Wilson
                   ` (2 preceding siblings ...)
  2020-01-08 10:45 ` [Intel-gfx] [PATCH 4/4] drm/i915/gt: Yield the timeslice if waiting on a semaphore Chris Wilson
@ 2020-01-08 10:59 ` Patchwork
  2020-01-08 11:29 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-01-08 10:59 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915: Pin the context as we work on it
URL   : https://patchwork.freedesktop.org/series/71755/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
5472c92edbce drm/i915: Pin the context as we work on it
0ceabcc14ba7 drm/i915: Replace vma parking with a clock aging algorithm
-:272: CHECK:UNCOMMENTED_DEFINITION: spinlock_t definition without comment
#272: FILE: drivers/gpu/drm/i915/i915_vma.h:383:
+	spinlock_t lock;

total: 0 errors, 0 warnings, 1 checks, 226 lines checked
1a5b2bec4372 drm/i915/gt: Always reset the timeslice after a context switch
c85c8ced9310 drm/i915/gt: Yield the timeslice if waiting on a semaphore

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/4] drm/i915: Pin the context as we work on it
  2020-01-08 10:45 [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it Chris Wilson
                   ` (3 preceding siblings ...)
  2020-01-08 10:59 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915: Pin the context as we work on it Patchwork
@ 2020-01-08 11:29 ` Patchwork
  2020-01-09  6:11 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
  2020-01-09  8:21 ` [Intel-gfx] [PATCH 1/4] " Mika Kuoppala
  6 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-01-08 11:29 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915: Pin the context as we work on it
URL   : https://patchwork.freedesktop.org/series/71755/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7702 -> Patchwork_16022
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/index.html

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_16022:

### IGT changes ###

#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@i915_selftest@live_execlists:
    - {fi-tgl-u}:         [PASS][1] -> [DMESG-FAIL][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-tgl-u/igt@i915_selftest@live_execlists.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-tgl-u/igt@i915_selftest@live_execlists.html

  
Known issues
------------

  Here are the changes found in Patchwork_16022 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@i915_module_load@reload-with-fault-injection:
    - fi-cfl-guc:         [PASS][3] -> [INCOMPLETE][4] ([i915#505] / [i915#671])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-cfl-guc/igt@i915_module_load@reload-with-fault-injection.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-cfl-guc/igt@i915_module_load@reload-with-fault-injection.html
    - fi-glk-dsi:         [PASS][5] -> [INCOMPLETE][6] ([i915#58] / [k.org#198133])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-glk-dsi/igt@i915_module_load@reload-with-fault-injection.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-glk-dsi/igt@i915_module_load@reload-with-fault-injection.html

  * igt@i915_selftest@live_blt:
    - fi-bsw-nick:        [PASS][7] -> [DMESG-FAIL][8] ([i915#723])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-bsw-nick/igt@i915_selftest@live_blt.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-bsw-nick/igt@i915_selftest@live_blt.html

  * igt@i915_selftest@live_gem_contexts:
    - fi-cfl-8700k:       [PASS][9] -> [INCOMPLETE][10] ([i915#424])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html

  * igt@i915_selftest@live_gt_lrc:
    - fi-skl-6600u:       [PASS][11] -> [DMESG-FAIL][12] ([i915#889]) +7 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-skl-6600u/igt@i915_selftest@live_gt_lrc.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-skl-6600u/igt@i915_selftest@live_gt_lrc.html

  * igt@i915_selftest@live_late_gt_pm:
    - fi-skl-6600u:       [PASS][13] -> [DMESG-WARN][14] ([i915#889]) +23 similar issues
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-skl-6600u/igt@i915_selftest@live_late_gt_pm.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-skl-6600u/igt@i915_selftest@live_late_gt_pm.html

  
#### Possible fixes ####

  * igt@i915_module_load@reload-with-fault-injection:
    - fi-skl-6770hq:      [INCOMPLETE][15] ([i915#671]) -> [PASS][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-skl-6770hq/igt@i915_module_load@reload-with-fault-injection.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-skl-6770hq/igt@i915_module_load@reload-with-fault-injection.html
    - fi-kbl-x1275:       [INCOMPLETE][17] ([i915#879]) -> [PASS][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-kbl-x1275/igt@i915_module_load@reload-with-fault-injection.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-kbl-x1275/igt@i915_module_load@reload-with-fault-injection.html

  * igt@i915_selftest@live_blt:
    - fi-ivb-3770:        [DMESG-FAIL][19] ([i915#725]) -> [PASS][20]
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-ivb-3770/igt@i915_selftest@live_blt.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-ivb-3770/igt@i915_selftest@live_blt.html

  
#### Warnings ####

  * igt@i915_selftest@live_blt:
    - fi-hsw-4770:        [DMESG-FAIL][21] ([i915#725]) -> [DMESG-FAIL][22] ([i915#553] / [i915#725])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/fi-hsw-4770/igt@i915_selftest@live_blt.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/fi-hsw-4770/igt@i915_selftest@live_blt.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [i915#424]: https://gitlab.freedesktop.org/drm/intel/issues/424
  [i915#505]: https://gitlab.freedesktop.org/drm/intel/issues/505
  [i915#553]: https://gitlab.freedesktop.org/drm/intel/issues/553
  [i915#58]: https://gitlab.freedesktop.org/drm/intel/issues/58
  [i915#671]: https://gitlab.freedesktop.org/drm/intel/issues/671
  [i915#723]: https://gitlab.freedesktop.org/drm/intel/issues/723
  [i915#725]: https://gitlab.freedesktop.org/drm/intel/issues/725
  [i915#879]: https://gitlab.freedesktop.org/drm/intel/issues/879
  [i915#889]: https://gitlab.freedesktop.org/drm/intel/issues/889
  [k.org#198133]: https://bugzilla.kernel.org/show_bug.cgi?id=198133


Participating hosts (51 -> 44)
------------------------------

  Additional (1): fi-bsw-n3050 
  Missing    (8): fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-kbl-7560u fi-byt-n2820 fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7702 -> Patchwork_16022

  CI-20190529: 20190529
  CI_DRM_7702: 696cc0448fced2ed45ab5e9e0a5c913bfe263592 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5358: c6fc013f414b806175dc4143c58ab445e5235ea5 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_16022: c85c8ced93107178100de54a66ffeef6bb431736 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

c85c8ced9310 drm/i915/gt: Yield the timeslice if waiting on a semaphore
1a5b2bec4372 drm/i915/gt: Always reset the timeslice after a context switch
0ceabcc14ba7 drm/i915: Replace vma parking with a clock aging algorithm
5472c92edbce drm/i915: Pin the context as we work on it

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [1/4] drm/i915: Pin the context as we work on it
  2020-01-08 10:45 [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it Chris Wilson
                   ` (4 preceding siblings ...)
  2020-01-08 11:29 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2020-01-09  6:11 ` Patchwork
  2020-01-09  8:21 ` [Intel-gfx] [PATCH 1/4] " Mika Kuoppala
  6 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-01-09  6:11 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/4] drm/i915: Pin the context as we work on it
URL   : https://patchwork.freedesktop.org/series/71755/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7702_full -> Patchwork_16022_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_16022_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_16022_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_16022_full:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_selftest@live_execlists:
    - shard-tglb:         [PASS][1] -> [DMESG-FAIL][2] +1 similar issue
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb1/igt@i915_selftest@live_execlists.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb2/igt@i915_selftest@live_execlists.html

  * igt@perf@enable-disable:
    - shard-iclb:         [PASS][3] -> [DMESG-WARN][4] +2 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb6/igt@perf@enable-disable.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb5/igt@perf@enable-disable.html
    - shard-kbl:          [PASS][5] -> [DMESG-WARN][6] +2 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-kbl2/igt@perf@enable-disable.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-kbl2/igt@perf@enable-disable.html
    - shard-tglb:         [PASS][7] -> [DMESG-WARN][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb5/igt@perf@enable-disable.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb7/igt@perf@enable-disable.html

  * igt@perf@gen8-unprivileged-single-ctx-counters:
    - shard-skl:          [PASS][9] -> [DMESG-WARN][10] +1 similar issue
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl1/igt@perf@gen8-unprivileged-single-ctx-counters.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl8/igt@perf@gen8-unprivileged-single-ctx-counters.html
    - shard-apl:          [PASS][11] -> [DMESG-WARN][12] +2 similar issues
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-apl8/igt@perf@gen8-unprivileged-single-ctx-counters.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-apl3/igt@perf@gen8-unprivileged-single-ctx-counters.html
    - shard-glk:          [PASS][13] -> [DMESG-WARN][14] +1 similar issue
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-glk1/igt@perf@gen8-unprivileged-single-ctx-counters.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-glk3/igt@perf@gen8-unprivileged-single-ctx-counters.html

  * igt@perf@oa-exponents:
    - shard-glk:          ([PASS][15], [PASS][16]) -> [DMESG-WARN][17]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-glk2/igt@perf@oa-exponents.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-glk7/igt@perf@oa-exponents.html
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-glk2/igt@perf@oa-exponents.html
    - shard-skl:          ([PASS][18], [PASS][19]) -> [DMESG-WARN][20]
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl3/igt@perf@oa-exponents.html
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl7/igt@perf@oa-exponents.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl6/igt@perf@oa-exponents.html

  * igt@runner@aborted:
    - shard-kbl:          NOTRUN -> ([FAIL][21], [FAIL][22], [FAIL][23]) ([fdo#111732])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-kbl2/igt@runner@aborted.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-kbl6/igt@runner@aborted.html
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-kbl7/igt@runner@aborted.html
    - shard-apl:          NOTRUN -> ([FAIL][24], [FAIL][25], [FAIL][26]) ([fdo#111732])
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-apl3/igt@runner@aborted.html
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-apl7/igt@runner@aborted.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-apl3/igt@runner@aborted.html

  
Known issues
------------

  Here are the changes found in Patchwork_16022_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_busy@busy-vcs1:
    - shard-iclb:         [PASS][27] -> [SKIP][28] ([fdo#112080]) +13 similar issues
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb4/igt@gem_busy@busy-vcs1.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb8/igt@gem_busy@busy-vcs1.html

  * igt@gem_ctx_isolation@vcs1-dirty-create:
    - shard-iclb:         [PASS][29] -> [SKIP][30] ([fdo#109276] / [fdo#112080]) +1 similar issue
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb1/igt@gem_ctx_isolation@vcs1-dirty-create.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb6/igt@gem_ctx_isolation@vcs1-dirty-create.html

  * igt@gem_eio@reset-stress:
    - shard-snb:          [PASS][31] -> [FAIL][32] ([i915#232])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-snb6/igt@gem_eio@reset-stress.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-snb5/igt@gem_eio@reset-stress.html

  * igt@gem_exec_balancer@smoke:
    - shard-iclb:         [PASS][33] -> [SKIP][34] ([fdo#110854])
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb2/igt@gem_exec_balancer@smoke.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb6/igt@gem_exec_balancer@smoke.html

  * igt@gem_exec_schedule@preempt-other-chain-bsd:
    - shard-iclb:         [PASS][35] -> [SKIP][36] ([fdo#112146]) +4 similar issues
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb7/igt@gem_exec_schedule@preempt-other-chain-bsd.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb2/igt@gem_exec_schedule@preempt-other-chain-bsd.html

  * igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd1:
    - shard-tglb:         [PASS][37] -> [INCOMPLETE][38] ([fdo#111677] / [i915#472])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb9/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd1.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb6/igt@gem_exec_schedule@preempt-queue-contexts-chain-bsd1.html

  * igt@gem_exec_suspend@basic-s3:
    - shard-iclb:         [PASS][39] -> [INCOMPLETE][40] ([i915#140]) +1 similar issue
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb1/igt@gem_exec_suspend@basic-s3.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb5/igt@gem_exec_suspend@basic-s3.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-kbl:          [PASS][41] -> [FAIL][42] ([i915#520])
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-kbl2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-kbl2/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gen9_exec_parse@allowed-all:
    - shard-glk:          [PASS][43] -> [DMESG-WARN][44] ([i915#716])
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-glk4/igt@gen9_exec_parse@allowed-all.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-glk6/igt@gen9_exec_parse@allowed-all.html

  * igt@i915_pm_backlight@fade_with_suspend:
    - shard-skl:          ([PASS][45], [PASS][46], [PASS][47]) -> [INCOMPLETE][48] ([i915#69])
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl4/igt@i915_pm_backlight@fade_with_suspend.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl10/igt@i915_pm_backlight@fade_with_suspend.html
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl9/igt@i915_pm_backlight@fade_with_suspend.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl2/igt@i915_pm_backlight@fade_with_suspend.html

  * igt@i915_pm_rps@reset:
    - shard-iclb:         [PASS][49] -> [FAIL][50] ([i915#413])
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb4/igt@i915_pm_rps@reset.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb4/igt@i915_pm_rps@reset.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-skl:          ([PASS][51], [PASS][52]) -> [FAIL][53] ([i915#79])
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl10/igt@kms_flip@flip-vs-expired-vblank.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl9/igt@kms_flip@flip-vs-expired-vblank.html
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl2/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_flip@flip-vs-suspend-interruptible:
    - shard-apl:          [PASS][54] -> [DMESG-WARN][55] ([i915#180]) +1 similar issue
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-apl2/igt@kms_flip@flip-vs-suspend-interruptible.html
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-apl4/igt@kms_flip@flip-vs-suspend-interruptible.html
    - shard-snb:          [PASS][56] -> [INCOMPLETE][57] ([i915#82])
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-snb6/igt@kms_flip@flip-vs-suspend-interruptible.html
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-snb1/igt@kms_flip@flip-vs-suspend-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-pwrite:
    - shard-tglb:         [PASS][58] -> [FAIL][59] ([i915#49])
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb6/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-pwrite.html
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb7/igt@kms_frontbuffer_tracking@fbc-1p-offscren-pri-indfb-draw-pwrite.html

  * igt@kms_frontbuffer_tracking@fbc-suspend:
    - shard-kbl:          [PASS][60] -> [DMESG-WARN][61] ([i915#180]) +4 similar issues
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-kbl7/igt@kms_frontbuffer_tracking@fbc-suspend.html
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-kbl4/igt@kms_frontbuffer_tracking@fbc-suspend.html

  * igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min:
    - shard-skl:          [PASS][62] -> [FAIL][63] ([fdo#108145])
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl10/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl10/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html

  * igt@kms_psr2_su@page_flip:
    - shard-iclb:         [PASS][64] -> [SKIP][65] ([fdo#109642] / [fdo#111068])
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb2/igt@kms_psr2_su@page_flip.html
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb6/igt@kms_psr2_su@page_flip.html

  * igt@kms_psr@psr2_sprite_mmap_gtt:
    - shard-iclb:         [PASS][66] -> [SKIP][67] ([fdo#109441]) +3 similar issues
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb2/igt@kms_psr@psr2_sprite_mmap_gtt.html
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb3/igt@kms_psr@psr2_sprite_mmap_gtt.html

  * igt@kms_setmode@basic:
    - shard-skl:          [PASS][68] -> [FAIL][69] ([i915#31])
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl3/igt@kms_setmode@basic.html
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl6/igt@kms_setmode@basic.html

  * igt@prime_vgem@fence-wait-bsd2:
    - shard-iclb:         [PASS][70] -> [SKIP][71] ([fdo#109276]) +24 similar issues
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb1/igt@prime_vgem@fence-wait-bsd2.html
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb6/igt@prime_vgem@fence-wait-bsd2.html

  
#### Possible fixes ####

  * igt@gem_ctx_persistence@bcs0-mixed-process:
    - shard-iclb:         [FAIL][72] ([i915#679]) -> [PASS][73]
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb8/igt@gem_ctx_persistence@bcs0-mixed-process.html
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb3/igt@gem_ctx_persistence@bcs0-mixed-process.html

  * igt@gem_ctx_persistence@vcs1-cleanup:
    - shard-iclb:         [SKIP][74] ([fdo#109276] / [fdo#112080]) -> ([PASS][75], [PASS][76])
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb3/igt@gem_ctx_persistence@vcs1-cleanup.html
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb1/igt@gem_ctx_persistence@vcs1-cleanup.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb4/igt@gem_ctx_persistence@vcs1-cleanup.html

  * igt@gem_ctx_persistence@vcs1-queued:
    - shard-iclb:         [SKIP][77] ([fdo#109276] / [fdo#112080]) -> [PASS][78]
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb8/igt@gem_ctx_persistence@vcs1-queued.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb1/igt@gem_ctx_persistence@vcs1-queued.html

  * igt@gem_ctx_shared@q-out-order-bsd2:
    - shard-iclb:         ([PASS][79], [SKIP][80], [SKIP][81]) ([fdo#109276]) -> [PASS][82] +1 similar issue
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb4/igt@gem_ctx_shared@q-out-order-bsd2.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb3/igt@gem_ctx_shared@q-out-order-bsd2.html
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb6/igt@gem_ctx_shared@q-out-order-bsd2.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb2/igt@gem_ctx_shared@q-out-order-bsd2.html

  * igt@gem_ctx_shared@q-smoketest-bsd:
    - shard-tglb:         [INCOMPLETE][83] ([i915#461]) -> [PASS][84]
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb9/igt@gem_ctx_shared@q-smoketest-bsd.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb1/igt@gem_ctx_shared@q-smoketest-bsd.html

  * igt@gem_eio@reset-stress:
    - shard-tglb:         [INCOMPLETE][85] ([i915#470]) -> [PASS][86]
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb8/igt@gem_eio@reset-stress.html
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb8/igt@gem_eio@reset-stress.html

  * igt@gem_eio@unwedge-stress:
    - shard-apl:          [INCOMPLETE][87] ([fdo#103927]) -> [PASS][88] +5 similar issues
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-apl8/igt@gem_eio@unwedge-stress.html
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-apl6/igt@gem_eio@unwedge-stress.html

  * igt@gem_exec_await@wide-all:
    - shard-tglb:         [INCOMPLETE][89] ([fdo#111736] / [i915#472]) -> [PASS][90]
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb6/igt@gem_exec_await@wide-all.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb2/igt@gem_exec_await@wide-all.html

  * igt@gem_exec_create@madvise:
    - shard-tglb:         [INCOMPLETE][91] ([i915#435] / [i915#472]) -> [PASS][92] +2 similar issues
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb1/igt@gem_exec_create@madvise.html
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb5/igt@gem_exec_create@madvise.html

  * igt@gem_exec_parallel@basic:
    - shard-tglb:         [INCOMPLETE][93] ([i915#472] / [i915#476]) -> [PASS][94]
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb3/igt@gem_exec_parallel@basic.html
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb2/igt@gem_exec_parallel@basic.html

  * igt@gem_exec_parallel@contexts:
    - shard-tglb:         [INCOMPLETE][95] ([i915#470] / [i915#472]) -> [PASS][96]
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb8/igt@gem_exec_parallel@contexts.html
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb5/igt@gem_exec_parallel@contexts.html

  * igt@gem_exec_parallel@vcs1-contexts:
    - shard-iclb:         ([SKIP][97], [SKIP][98]) ([fdo#112080]) -> [PASS][99] +2 similar issues
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb7/igt@gem_exec_parallel@vcs1-contexts.html
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb3/igt@gem_exec_parallel@vcs1-contexts.html
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb2/igt@gem_exec_parallel@vcs1-contexts.html

  * igt@gem_exec_reuse@contexts:
    - shard-skl:          [INCOMPLETE][100] ([i915#659]) -> [PASS][101]
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl5/igt@gem_exec_reuse@contexts.html
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl7/igt@gem_exec_reuse@contexts.html

  * igt@gem_exec_schedule@pi-distinct-iova-bsd:
    - shard-iclb:         [SKIP][102] ([i915#677]) -> [PASS][103] +1 similar issue
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb1/igt@gem_exec_schedule@pi-distinct-iova-bsd.html
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb8/igt@gem_exec_schedule@pi-distinct-iova-bsd.html

  * igt@gem_exec_schedule@pi-distinct-iova-bsd1:
    - shard-iclb:         ([SKIP][104], [SKIP][105]) ([fdo#109276]) -> [PASS][106] +1 similar issue
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb3/igt@gem_exec_schedule@pi-distinct-iova-bsd1.html
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb7/igt@gem_exec_schedule@pi-distinct-iova-bsd1.html
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb2/igt@gem_exec_schedule@pi-distinct-iova-bsd1.html

  * igt@gem_exec_schedule@preempt-queue-contexts-chain-render:
    - shard-tglb:         [INCOMPLETE][107] ([fdo#111677] / [i915#472]) -> [PASS][108]
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb8/igt@gem_exec_schedule@preempt-queue-contexts-chain-render.html
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb3/igt@gem_exec_schedule@preempt-queue-contexts-chain-render.html

  * igt@gem_exec_schedule@preemptive-hang-bsd:
    - shard-iclb:         [SKIP][109] ([fdo#112146]) -> [PASS][110] +5 similar issues
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb2/igt@gem_exec_schedule@preemptive-hang-bsd.html
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb6/igt@gem_exec_schedule@preemptive-hang-bsd.html

  * igt@gem_exec_schedule@preemptive-hang-bsd1:
    - shard-iclb:         [SKIP][111] ([fdo#109276]) -> ([PASS][112], [PASS][113]) +1 similar issue
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb3/igt@gem_exec_schedule@preemptive-hang-bsd1.html
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd1.html
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb4/igt@gem_exec_schedule@preemptive-hang-bsd1.html

  * igt@gem_exec_suspend@basic-s3-devices:
    - shard-tglb:         [INCOMPLETE][114] ([i915#460] / [i915#472]) -> [PASS][115]
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb3/igt@gem_exec_suspend@basic-s3-devices.html
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb8/igt@gem_exec_suspend@basic-s3-devices.html

  * igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing:
    - shard-tglb:         [TIMEOUT][116] ([fdo#112126] / [i915#530]) -> [PASS][117]
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb7/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb9/igt@gem_persistent_relocs@forked-interruptible-faulting-reloc-thrashing.html

  * igt@gem_persistent_relocs@forked-thrashing:
    - shard-snb:          [FAIL][118] ([i915#520]) -> [PASS][119]
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-snb5/igt@gem_persistent_relocs@forked-thrashing.html
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-snb2/igt@gem_persistent_relocs@forked-thrashing.html

  * igt@gem_ppgtt@flink-and-close-vma-leak:
    - shard-glk:          [FAIL][120] ([i915#644]) -> [PASS][121]
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-glk2/igt@gem_ppgtt@flink-and-close-vma-leak.html
   [121]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-glk7/igt@gem_ppgtt@flink-and-close-vma-leak.html

  * igt@gem_wait@busy-vcs1:
    - shard-iclb:         ([PASS][122], [SKIP][123], [SKIP][124]) ([fdo#112080]) -> [PASS][125] +1 similar issue
   [122]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb4/igt@gem_wait@busy-vcs1.html
   [123]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb6/igt@gem_wait@busy-vcs1.html
   [124]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb3/igt@gem_wait@busy-vcs1.html
   [125]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb2/igt@gem_wait@busy-vcs1.html

  * igt@i915_pm_rpm@debugfs-forcewake-user:
    - shard-iclb:         [INCOMPLETE][126] ([i915#140] / [i915#189]) -> [PASS][127]
   [126]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb8/igt@i915_pm_rpm@debugfs-forcewake-user.html
   [127]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb1/igt@i915_pm_rpm@debugfs-forcewake-user.html

  * igt@i915_pm_rps@waitboost:
    - shard-iclb:         [FAIL][128] ([i915#413]) -> [PASS][129]
   [128]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb3/igt@i915_pm_rps@waitboost.html
   [129]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb1/igt@i915_pm_rps@waitboost.html

  * igt@kms_busy@basic-modeset-pipe-a:
    - shard-apl:          ([PASS][130], [INCOMPLETE][131], [PASS][132]) ([fdo#103927]) -> [PASS][133]
   [130]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-apl4/igt@kms_busy@basic-modeset-pipe-a.html
   [131]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-apl2/igt@kms_busy@basic-modeset-pipe-a.html
   [132]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-apl6/igt@kms_busy@basic-modeset-pipe-a.html
   [133]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-apl6/igt@kms_busy@basic-modeset-pipe-a.html

  * igt@kms_cursor_crc@pipe-b-cursor-suspend:
    - shard-kbl:          [DMESG-WARN][134] ([i915#180]) -> [PASS][135] +1 similar issue
   [134]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-kbl4/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
   [135]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-kbl7/igt@kms_cursor_crc@pipe-b-cursor-suspend.html

  * igt@kms_cursor_crc@pipe-c-cursor-128x128-onscreen:
    - shard-skl:          [FAIL][136] ([i915#54]) -> [PASS][137]
   [136]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl3/igt@kms_cursor_crc@pipe-c-cursor-128x128-onscreen.html
   [137]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl6/igt@kms_cursor_crc@pipe-c-cursor-128x128-onscreen.html

  * igt@kms_cursor_legacy@all-pipes-single-bo:
    - shard-skl:          [INCOMPLETE][138] -> [PASS][139] +3 similar issues
   [138]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl5/igt@kms_cursor_legacy@all-pipes-single-bo.html
   [139]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl7/igt@kms_cursor_legacy@all-pipes-single-bo.html

  * igt@kms_cursor_legacy@pipe-a-forked-bo:
    - shard-hsw:          [INCOMPLETE][140] ([i915#61]) -> [PASS][141]
   [140]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-hsw1/igt@kms_cursor_legacy@pipe-a-forked-bo.html
   [141]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-hsw5/igt@kms_cursor_legacy@pipe-a-forked-bo.html

  * igt@kms_flip@flip-vs-expired-vblank:
    - shard-glk:          [FAIL][142] ([i915#79]) -> [PASS][143]
   [142]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-glk9/igt@kms_flip@flip-vs-expired-vblank.html
   [143]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-glk1/igt@kms_flip@flip-vs-expired-vblank.html

  * igt@kms_flip@plain-flip-interruptible:
    - shard-iclb:         [INCOMPLETE][144] ([i915#140]) -> [PASS][145]
   [144]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb8/igt@kms_flip@plain-flip-interruptible.html
   [145]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb1/igt@kms_flip@plain-flip-interruptible.html

  * igt@kms_frontbuffer_tracking@psr-1p-pri-indfb-multidraw:
    - shard-skl:          ([PASS][146], [PASS][147], [INCOMPLETE][148]) ([i915#123]) -> [PASS][149]
   [146]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl10/igt@kms_frontbuffer_tracking@psr-1p-pri-indfb-multidraw.html
   [147]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl9/igt@kms_frontbuffer_tracking@psr-1p-pri-indfb-multidraw.html
   [148]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl5/igt@kms_frontbuffer_tracking@psr-1p-pri-indfb-multidraw.html
   [149]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl2/igt@kms_frontbuffer_tracking@psr-1p-pri-indfb-multidraw.html

  * igt@kms_plane_alpha_blend@pipe-c-coverage-7efc:
    - shard-skl:          [FAIL][150] ([fdo#108145] / [i915#265]) -> [PASS][151]
   [150]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-skl3/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html
   [151]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-skl6/igt@kms_plane_alpha_blend@pipe-c-coverage-7efc.html

  * igt@kms_psr@psr2_cursor_plane_move:
    - shard-iclb:         ([SKIP][152], [SKIP][153], [SKIP][154]) ([fdo#109441]) -> [PASS][155] +1 similar issue
   [152]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb3/igt@kms_psr@psr2_cursor_plane_move.html
   [153]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb4/igt@kms_psr@psr2_cursor_plane_move.html
   [154]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb6/igt@kms_psr@psr2_cursor_plane_move.html
   [155]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb2/igt@kms_psr@psr2_cursor_plane_move.html

  * igt@kms_psr@psr2_primary_mmap_cpu:
    - shard-iclb:         [SKIP][156] ([fdo#109441]) -> [PASS][157]
   [156]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb7/igt@kms_psr@psr2_primary_mmap_cpu.html
   [157]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb2/igt@kms_psr@psr2_primary_mmap_cpu.html

  * igt@kms_rotation_crc@sprite-rotation-90-pos-100-0:
    - shard-glk:          [INCOMPLETE][158] ([i915#58] / [k.org#198133]) -> [PASS][159] +5 similar issues
   [158]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-glk8/igt@kms_rotation_crc@sprite-rotation-90-pos-100-0.html
   [159]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-glk4/igt@kms_rotation_crc@sprite-rotation-90-pos-100-0.html

  * igt@perf_pmu@init-busy-vcs1:
    - shard-iclb:         [SKIP][160] ([fdo#112080]) -> [PASS][161] +5 similar issues
   [160]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb6/igt@perf_pmu@init-busy-vcs1.html
   [161]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb4/igt@perf_pmu@init-busy-vcs1.html

  * igt@prime_busy@hang-bsd2:
    - shard-iclb:         [SKIP][162] ([fdo#109276]) -> [PASS][163] +7 similar issues
   [162]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb8/igt@prime_busy@hang-bsd2.html
   [163]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb4/igt@prime_busy@hang-bsd2.html

  
#### Warnings ####

  * igt@gem_ctx_isolation@vcs1-nonpriv-switch:
    - shard-iclb:         [FAIL][164] ([IGT#28]) -> [SKIP][165] ([fdo#109276] / [fdo#112080])
   [164]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb2/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html
   [165]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-iclb5/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html

  * igt@gem_ctx_isolation@vcs2-clean:
    - shard-tglb:         [SKIP][166] ([fdo#112080]) -> [SKIP][167] ([fdo#111912] / [fdo#112080])
   [166]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb9/igt@gem_ctx_isolation@vcs2-clean.html
   [167]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb6/igt@gem_ctx_isolation@vcs2-clean.html

  * igt@gem_ctx_isolation@vcs2-nonpriv:
    - shard-tglb:         [SKIP][168] ([fdo#111912] / [fdo#112080]) -> [SKIP][169] ([fdo#112080])
   [168]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-tglb7/igt@gem_ctx_isolation@vcs2-nonpriv.html
   [169]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/shard-tglb9/igt@gem_ctx_isolation@vcs2-nonpriv.html

  * igt@gem_exec_schedule@pi-shared-iova-bsd:
    - shard-iclb:         ([PASS][170], [SKIP][171], [PASS][172]) ([i915#677]) -> [SKIP][173] ([i915#677])
   [170]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7702/shard-iclb3/igt@gem_exec_schedule@pi-shared-iova-bsd.html
   [171]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_77

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16022/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it
  2020-01-08 10:45 [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it Chris Wilson
                   ` (5 preceding siblings ...)
  2020-01-09  6:11 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
@ 2020-01-09  8:21 ` Mika Kuoppala
  6 siblings, 0 replies; 8+ messages in thread
From: Mika Kuoppala @ 2020-01-09  8:21 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> Since we now allow the intel_context_unpin() to run unserialised, we
> risk our operations under the intel_context_lock_pinned() being run as
> the context is unpinned (and thus invalidating our state). We can
> atomically acquire the pin, testing to see if it is pinned in the
> process, thus ensuring that the state remains consistent during the
> course of the whole operation.
>
> Fixes: 841350223816 ("drm/i915/gt: Drop mutex serialisation between context pin/unpin")
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/gem/i915_gem_context.c | 10 +++++++---
>  drivers/gpu/drm/i915/gt/intel_context.h     |  7 ++++++-
>  drivers/gpu/drm/i915/i915_debugfs.c         | 10 ++++------
>  drivers/gpu/drm/i915/i915_perf.c            | 13 +++++--------
>  4 files changed, 22 insertions(+), 18 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index 88f6253f5405..a2e57e62af30 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -1236,12 +1236,14 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
>  	 * image, or into the registers directory, does not stick). Pristine
>  	 * and idle contexts will be configured on pinning.
>  	 */
> -	if (!intel_context_is_pinned(ce))
> +	if (!intel_context_pin_if_active(ce))
>  		return 0;
>  
>  	rq = intel_engine_create_kernel_request(ce->engine);
> -	if (IS_ERR(rq))
> -		return PTR_ERR(rq);
> +	if (IS_ERR(rq)) {
> +		ret = PTR_ERR(rq);
> +		goto out_unpin;
> +	}
>  
>  	/* Serialise with the remote context */
>  	ret = intel_context_prepare_remote_request(ce, rq);
> @@ -1249,6 +1251,8 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
>  		ret = gen8_emit_rpcs_config(rq, ce, sseu);
>  
>  	i915_request_add(rq);
> +out_unpin:
> +	intel_context_unpin(ce);
>  	return ret;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
> index 0f5ae4ff3b10..63073ebc6cf1 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context.h
> @@ -76,9 +76,14 @@ static inline void intel_context_unlock_pinned(struct intel_context *ce)
>  
>  int __intel_context_do_pin(struct intel_context *ce);
>  
> +static inline bool intel_context_pin_if_active(struct intel_context *ce)
> +{
> +	return atomic_inc_not_zero(&ce->pin_count);
> +}
> +
>  static inline int intel_context_pin(struct intel_context *ce)
>  {
> -	if (likely(atomic_inc_not_zero(&ce->pin_count)))
> +	if (likely(intel_context_pin_if_active(ce)))
>  		return 0;
>  
>  	return __intel_context_do_pin(ce);
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 0ac98e39eb75..db184536acef 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -321,16 +321,15 @@ static void print_context_stats(struct seq_file *m,
>  
>  		for_each_gem_engine(ce,
>  				    i915_gem_context_lock_engines(ctx), it) {
> -			intel_context_lock_pinned(ce);
> -			if (intel_context_is_pinned(ce)) {
> +			if (intel_context_pin_if_active(ce)) {
>  				rcu_read_lock();
>  				if (ce->state)
>  					per_file_stats(0,
>  						       ce->state->obj, &kstats);
>  				per_file_stats(0, ce->ring->vma->obj, &kstats);
>  				rcu_read_unlock();
> +				intel_context_unpin(ce);
>  			}
> -			intel_context_unlock_pinned(ce);
>  		}
>  		i915_gem_context_unlock_engines(ctx);
>  
> @@ -1513,15 +1512,14 @@ static int i915_context_status(struct seq_file *m, void *unused)
>  
>  		for_each_gem_engine(ce,
>  				    i915_gem_context_lock_engines(ctx), it) {
> -			intel_context_lock_pinned(ce);
> -			if (intel_context_is_pinned(ce)) {
> +			if (intel_context_pin_if_active(ce)) {
>  				seq_printf(m, "%s: ", ce->engine->name);
>  				if (ce->state)
>  					describe_obj(m, ce->state->obj);
>  				describe_ctx_ring(m, ce->ring);
>  				seq_putc(m, '\n');
> +				intel_context_unpin(ce);
>  			}
> -			intel_context_unlock_pinned(ce);
>  		}
>  		i915_gem_context_unlock_engines(ctx);
>  
> diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
> index 84350c7bc711..c7a7b676f079 100644
> --- a/drivers/gpu/drm/i915/i915_perf.c
> +++ b/drivers/gpu/drm/i915/i915_perf.c
> @@ -2203,17 +2203,14 @@ static int gen8_configure_context(struct i915_gem_context *ctx,
>  		if (ce->engine->class != RENDER_CLASS)
>  			continue;
>  
> -		err = intel_context_lock_pinned(ce);
> -		if (err)
> -			break;
> +		/* Otherwise OA settings will be set upon first use */
> +		if (!intel_context_pin_if_active(ce))
> +			continue;
>  
>  		flex->value = intel_sseu_make_rpcs(ctx->i915, &ce->sseu);
> +		err = gen8_modify_context(ce, flex, count);
>  
> -		/* Otherwise OA settings will be set upon first use */
> -		if (intel_context_is_pinned(ce))
> -			err = gen8_modify_context(ce, flex, count);
> -
> -		intel_context_unlock_pinned(ce);
> +		intel_context_unpin(ce);
>  		if (err)
>  			break;
>  	}
> -- 
> 2.25.0.rc1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2020-01-09  8:21 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-08 10:45 [Intel-gfx] [PATCH 1/4] drm/i915: Pin the context as we work on it Chris Wilson
2020-01-08 10:45 ` [Intel-gfx] [PATCH 2/4] drm/i915: Replace vma parking with a clock aging algorithm Chris Wilson
2020-01-08 10:45 ` [Intel-gfx] [PATCH 3/4] drm/i915/gt: Always reset the timeslice after a context switch Chris Wilson
2020-01-08 10:45 ` [Intel-gfx] [PATCH 4/4] drm/i915/gt: Yield the timeslice if waiting on a semaphore Chris Wilson
2020-01-08 10:59 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/4] drm/i915: Pin the context as we work on it Patchwork
2020-01-08 11:29 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-01-09  6:11 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2020-01-09  8:21 ` [Intel-gfx] [PATCH 1/4] " Mika Kuoppala

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.