All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 1/3] drm/i915: Move request->ctx aside
@ 2018-04-19 11:58 Chris Wilson
  2018-04-19 11:58 ` [PATCH v2 2/3] drm/i915: Move fiddling with engine->last_retired_context Chris Wilson
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Chris Wilson @ 2018-04-19 11:58 UTC (permalink / raw)
  To: intel-gfx

In the next patch, we want to store the intel_context pointer inside
i915_request, as it is frequently access via a convoluted dance when
submitting the request to hw. Having two context pointers inside
i915_request leads to confusion so first rename the existing
i915_gem_context pointer to i915_request.gem_context.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gvt/scheduler.c          |  4 +--
 drivers/gpu/drm/i915/i915_debugfs.c           |  4 +--
 drivers/gpu/drm/i915/i915_gem.c               | 10 +++----
 drivers/gpu/drm/i915/i915_gpu_error.c         | 18 +++++++-----
 drivers/gpu/drm/i915/i915_request.c           |  8 ++---
 drivers/gpu/drm/i915/i915_request.h           |  2 +-
 drivers/gpu/drm/i915/i915_trace.h             |  6 ++--
 drivers/gpu/drm/i915/intel_engine_cs.c        |  2 +-
 drivers/gpu/drm/i915/intel_guc_submission.c   |  7 +++--
 drivers/gpu/drm/i915/intel_lrc.c              | 29 ++++++++++---------
 drivers/gpu/drm/i915/intel_ringbuffer.c       | 11 +++----
 .../gpu/drm/i915/selftests/intel_hangcheck.c  |  5 +++-
 drivers/gpu/drm/i915/selftests/intel_lrc.c    |  2 +-
 13 files changed, 58 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f3d21849b0cb..f64cccb2e793 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -205,7 +205,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 
 static inline bool is_gvt_request(struct i915_request *req)
 {
-	return i915_gem_context_force_single_submission(req->ctx);
+	return i915_gem_context_force_single_submission(req->gem_context);
 }
 
 static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -305,7 +305,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 	struct i915_request *req = workload->req;
 
 	if (IS_KABYLAKE(req->i915) &&
-	    is_inhibit_context(req->ctx, req->engine->id))
+	    is_inhibit_context(req->gem_context, req->engine->id))
 		intel_vgpu_restore_inhibit_context(vgpu, req);
 
 	/* allocate shadow ring buffer */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e0274f41bc76..792f69e44ba5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -539,8 +539,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
 						   struct i915_request,
 						   client_link);
 		rcu_read_lock();
-		task = pid_task(request && request->ctx->pid ?
-				request->ctx->pid : file->pid,
+		task = pid_task(request && request->gem_context->pid ?
+				request->gem_context->pid : file->pid,
 				PIDTYPE_PID);
 		print_file_stats(m, task ? task->comm : "<unknown>", stats);
 		rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 795ca83aed7a..4dba735505d4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3108,7 +3108,7 @@ static void skip_request(struct i915_request *request)
 static void engine_skip_context(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
-	struct i915_gem_context *hung_ctx = request->ctx;
+	struct i915_gem_context *hung_ctx = request->gem_context;
 	struct intel_timeline *timeline;
 	unsigned long flags;
 
@@ -3118,7 +3118,7 @@ static void engine_skip_context(struct i915_request *request)
 	spin_lock(&timeline->lock);
 
 	list_for_each_entry_continue(request, &engine->timeline->requests, link)
-		if (request->ctx == hung_ctx)
+		if (request->gem_context == hung_ctx)
 			skip_request(request);
 
 	list_for_each_entry(request, &timeline->requests, link)
@@ -3164,11 +3164,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
 	}
 
 	if (stalled) {
-		i915_gem_context_mark_guilty(request->ctx);
+		i915_gem_context_mark_guilty(request->gem_context);
 		skip_request(request);
 
 		/* If this context is now banned, skip all pending requests. */
-		if (i915_gem_context_is_banned(request->ctx))
+		if (i915_gem_context_is_banned(request->gem_context))
 			engine_skip_context(request);
 	} else {
 		/*
@@ -3178,7 +3178,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
 		 */
 		request = i915_gem_find_active_request(engine);
 		if (request) {
-			i915_gem_context_mark_innocent(request->ctx);
+			i915_gem_context_mark_innocent(request->gem_context);
 			dma_fence_set_error(&request->fence, -EAGAIN);
 
 			/* Rewind the engine to replay the incomplete rq */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 671ffa37614e..269574b7254c 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1277,16 +1277,18 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 static void record_request(struct i915_request *request,
 			   struct drm_i915_error_request *erq)
 {
-	erq->context = request->ctx->hw_id;
+	struct i915_gem_context *ctx = request->gem_context;
+
+	erq->context = ctx->hw_id;
 	erq->sched_attr = request->sched.attr;
-	erq->ban_score = atomic_read(&request->ctx->ban_score);
+	erq->ban_score = atomic_read(&ctx->ban_score);
 	erq->seqno = request->global_seqno;
 	erq->jiffies = request->emitted_jiffies;
 	erq->head = request->head;
 	erq->tail = request->tail;
 
 	rcu_read_lock();
-	erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0;
+	erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
 	rcu_read_unlock();
 }
 
@@ -1450,12 +1452,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
 		request = i915_gem_find_active_request(engine);
 		if (request) {
+			struct i915_gem_context *ctx = request->gem_context;
 			struct intel_ring *ring;
 
-			ee->vm = request->ctx->ppgtt ?
-				&request->ctx->ppgtt->base : &ggtt->base;
+			ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base;
 
-			record_context(&ee->context, request->ctx);
+			record_context(&ee->context, ctx);
 
 			/* We need to copy these to an anonymous buffer
 			 * as the simplest method to avoid being overwritten
@@ -1472,10 +1474,10 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
 			ee->ctx =
 				i915_error_object_create(i915,
-							 request->ctx->engine[i].state);
+							 ctx->engine[i].state);
 
 			error->simulated |=
-				i915_gem_context_no_error_capture(request->ctx);
+				i915_gem_context_no_error_capture(ctx);
 
 			ee->rq_head = request->head;
 			ee->rq_post = request->postfix;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b692a9f7c357..f913e56604ea 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -399,7 +399,7 @@ static void i915_request_retire(struct i915_request *request)
 	i915_request_remove_from_client(request);
 
 	/* Retirement decays the ban score as it is a sign of ctx progress */
-	atomic_dec_if_positive(&request->ctx->ban_score);
+	atomic_dec_if_positive(&request->gem_context->ban_score);
 
 	/*
 	 * The backing object for the context is done after switching to the
@@ -411,7 +411,7 @@ static void i915_request_retire(struct i915_request *request)
 	 */
 	if (engine->last_retired_context)
 		engine->context_unpin(engine, engine->last_retired_context);
-	engine->last_retired_context = request->ctx;
+	engine->last_retired_context = request->gem_context;
 
 	spin_lock_irq(&request->lock);
 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
@@ -732,7 +732,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	INIT_LIST_HEAD(&rq->active_list);
 	rq->i915 = i915;
 	rq->engine = engine;
-	rq->ctx = ctx;
+	rq->gem_context = ctx;
 	rq->ring = ring;
 
 	/* No zalloc, must clear what we need by hand */
@@ -1064,7 +1064,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	 */
 	rcu_read_lock();
 	if (engine->schedule)
-		engine->schedule(request, &request->ctx->sched);
+		engine->schedule(request, &request->gem_context->sched);
 	rcu_read_unlock();
 
 	local_bh_disable();
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 8f31ca8272f8..6a029b3f0a88 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -92,7 +92,7 @@ struct i915_request {
 	 * i915_request_free() will then decrement the refcount on the
 	 * context.
 	 */
-	struct i915_gem_context *ctx;
+	struct i915_gem_context *gem_context;
 	struct intel_engine_cs *engine;
 	struct intel_ring *ring;
 	struct intel_timeline *timeline;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 408827bf5d96..da38b295d69f 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -624,7 +624,7 @@ TRACE_EVENT(i915_request_queue,
 
 	    TP_fast_assign(
 			   __entry->dev = rq->i915->drm.primary->index;
-			   __entry->hw_id = rq->ctx->hw_id;
+			   __entry->hw_id = rq->gem_context->hw_id;
 			   __entry->ring = rq->engine->id;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
@@ -651,7 +651,7 @@ DECLARE_EVENT_CLASS(i915_request,
 
 	    TP_fast_assign(
 			   __entry->dev = rq->i915->drm.primary->index;
-			   __entry->hw_id = rq->ctx->hw_id;
+			   __entry->hw_id = rq->gem_context->hw_id;
 			   __entry->ring = rq->engine->id;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
@@ -792,7 +792,7 @@ TRACE_EVENT(i915_request_wait_begin,
 	     */
 	    TP_fast_assign(
 			   __entry->dev = rq->i915->drm.primary->index;
-			   __entry->hw_id = rq->ctx->hw_id;
+			   __entry->hw_id = rq->gem_context->hw_id;
 			   __entry->ring = rq->engine->id;
 			   __entry->ctx = rq->fence.context;
 			   __entry->seqno = rq->fence.seqno;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index be608f7111f5..0248d64c2a72 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1010,7 +1010,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
 	 */
 	rq = __i915_gem_active_peek(&engine->timeline->last_request);
 	if (rq)
-		return rq->ctx == kernel_context;
+		return rq->gem_context == kernel_context;
 	else
 		return engine->last_retired_context == kernel_context;
 }
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 02da05875aa7..8527fa1f5c3e 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -511,8 +511,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
 	struct intel_guc_client *client = guc->execbuf_client;
 	struct intel_engine_cs *engine = rq->engine;
-	u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
-								 engine));
+	u32 ctx_desc =
+		lower_32_bits(intel_lr_context_descriptor(rq->gem_context,
+							  engine));
 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 
 	spin_lock(&client->wq_lock);
@@ -707,7 +708,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 		struct i915_request *rq, *rn;
 
 		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
-			if (last && rq->ctx != last->ctx) {
+			if (last && rq->gem_context != last->gem_context) {
 				if (port == last_port) {
 					__list_del_many(&p->requests,
 							&rq->sched.link);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 029901a8fa38..0777226e65a6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -414,9 +414,9 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 
 static u64 execlists_update_context(struct i915_request *rq)
 {
-	struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
+	struct intel_context *ce = &rq->gem_context->engine[rq->engine->id];
 	struct i915_hw_ppgtt *ppgtt =
-		rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
+		rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
 	u32 *reg_state = ce->lrc_reg_state;
 
 	reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -666,7 +666,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * second request, and so we never need to tell the
 			 * hardware about the first.
 			 */
-			if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
+			if (last && !can_merge_ctx(rq->gem_context,
+						   last->gem_context)) {
 				/*
 				 * If we are on the second port and cannot
 				 * combine this request with the last, then we
@@ -685,14 +686,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * the same context (even though a different
 				 * request) to the second port.
 				 */
-				if (ctx_single_port_submission(last->ctx) ||
-				    ctx_single_port_submission(rq->ctx)) {
+				if (ctx_single_port_submission(last->gem_context) ||
+				    ctx_single_port_submission(rq->gem_context)) {
 					__list_del_many(&p->requests,
 							&rq->sched.link);
 					goto done;
 				}
 
-				GEM_BUG_ON(last->ctx == rq->ctx);
+				GEM_BUG_ON(last->gem_context == rq->gem_context);
 
 				if (submit)
 					port_assign(port, last);
@@ -1376,7 +1377,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
 static int execlists_request_alloc(struct i915_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
-	struct intel_context *ce = &request->ctx->engine[engine->id];
+	struct intel_context *ce = &request->gem_context->engine[engine->id];
 	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
@@ -1830,9 +1831,9 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	 * future request will be after userspace has had the opportunity
 	 * to recreate its own state.
 	 */
-	ce = &request->ctx->engine[engine->id];
+	ce = &request->gem_context->engine[engine->id];
 	execlists_init_reg_state(ce->lrc_reg_state,
-				 request->ctx, engine, ce->ring);
+				 request->gem_context, engine, ce->ring);
 
 	/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
 	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
@@ -1848,7 +1849,7 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 
 static int intel_logical_ring_emit_pdps(struct i915_request *rq)
 {
-	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
+	struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
 	struct intel_engine_cs *engine = rq->engine;
 	const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
 	u32 *cs;
@@ -1887,15 +1888,15 @@ static int gen8_emit_bb_start(struct i915_request *rq,
 	 * it is unsafe in case of lite-restore (because the ctx is
 	 * not idle). PML4 is allocated during ppgtt init so this is
 	 * not needed in 48-bit.*/
-	if (rq->ctx->ppgtt &&
-	    (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) &&
-	    !i915_vm_is_48bit(&rq->ctx->ppgtt->base) &&
+	if (rq->gem_context->ppgtt &&
+	    (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
+	    !i915_vm_is_48bit(&rq->gem_context->ppgtt->base) &&
 	    !intel_vgpu_active(rq->i915)) {
 		ret = intel_logical_ring_emit_pdps(rq);
 		if (ret)
 			return ret;
 
-		rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
+		rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
 	}
 
 	cs = intel_ring_begin(rq, 4);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index c68ac605b8a9..3ea8eb5d49f5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -558,7 +558,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 	 */
 	if (request) {
 		struct drm_i915_private *dev_priv = request->i915;
-		struct intel_context *ce = &request->ctx->engine[engine->id];
+		struct intel_context *ce =
+			&request->gem_context->engine[engine->id];
 		struct i915_hw_ppgtt *ppgtt;
 
 		if (ce->state) {
@@ -570,7 +571,7 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 				   CCID_EN);
 		}
 
-		ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
+		ppgtt = request->gem_context->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
 		if (ppgtt) {
 			u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
 
@@ -1427,7 +1428,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 
 	*cs++ = MI_NOOP;
 	*cs++ = MI_SET_CONTEXT;
-	*cs++ = i915_ggtt_offset(rq->ctx->engine[RCS].state) | flags;
+	*cs++ = i915_ggtt_offset(rq->gem_context->engine[RCS].state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1495,7 +1496,7 @@ static int remap_l3(struct i915_request *rq, int slice)
 static int switch_context(struct i915_request *rq)
 {
 	struct intel_engine_cs *engine = rq->engine;
-	struct i915_gem_context *to_ctx = rq->ctx;
+	struct i915_gem_context *to_ctx = rq->gem_context;
 	struct i915_hw_ppgtt *to_mm =
 		to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
 	struct i915_gem_context *from_ctx = engine->legacy_active_context;
@@ -1566,7 +1567,7 @@ static int ring_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
+	GEM_BUG_ON(!request->gem_context->engine[request->engine->id].pin_count);
 
 	/* Flush enough space to reduce the likelihood of waiting after
 	 * we start building the request - in which case we will just
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index f7ee54e109ae..99679b5f97eb 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -102,7 +102,10 @@ static int emit_recurse_batch(struct hang *h,
 			      struct i915_request *rq)
 {
 	struct drm_i915_private *i915 = h->i915;
-	struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base;
+	struct i915_address_space *vm =
+		rq->gem_context->ppgtt ?
+		&rq->gem_context->ppgtt->base :
+		&i915->ggtt.base;
 	struct i915_vma *hws, *vma;
 	unsigned int flags;
 	u32 *batch;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index ee7e22d18ff8..20279547cb05 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -82,7 +82,7 @@ static int emit_recurse_batch(struct spinner *spin,
 			      struct i915_request *rq,
 			      u32 arbitration_command)
 {
-	struct i915_address_space *vm = &rq->ctx->ppgtt->base;
+	struct i915_address_space *vm = &rq->gem_context->ppgtt->base;
 	struct i915_vma *hws, *vma;
 	u32 *batch;
 	int err;
-- 
2.17.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 2/3] drm/i915: Move fiddling with engine->last_retired_context
  2018-04-19 11:58 [PATCH v2 1/3] drm/i915: Move request->ctx aside Chris Wilson
@ 2018-04-19 11:58 ` Chris Wilson
  2018-04-19 11:58 ` [PATCH v2 3/3] drm/i915: Store a pointer to intel_context in i915_request Chris Wilson
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2018-04-19 11:58 UTC (permalink / raw)
  To: intel-gfx

Move the knowledge about resetting the current context tracking on the
engine from inside i915_gem_context.c into intel_engine_cs.c

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_context.c | 12 ++----------
 drivers/gpu/drm/i915/intel_engine_cs.c  | 23 +++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_ringbuffer.h |  1 +
 3 files changed, 26 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 74435affe23f..2fe779cab298 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -514,16 +514,8 @@ void i915_gem_contexts_lost(struct drm_i915_private *dev_priv)
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-	for_each_engine(engine, dev_priv, id) {
-		engine->legacy_active_context = NULL;
-		engine->legacy_active_ppgtt = NULL;
-
-		if (!engine->last_retired_context)
-			continue;
-
-		engine->context_unpin(engine, engine->last_retired_context);
-		engine->last_retired_context = NULL;
-	}
+	for_each_engine(engine, dev_priv, id)
+		intel_engine_lost_context(engine);
 }
 
 void i915_gem_contexts_fini(struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 0248d64c2a72..b26867eed4ca 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1084,6 +1084,29 @@ void intel_engines_unpark(struct drm_i915_private *i915)
 	}
 }
 
+/**
+ * intel_engine_lost_context: called when the GPU is reset into unknown state
+ * @engine: the engine
+ *
+ * We have either reset the GPU or otherwise about to lose state tracking of
+ * the current GPU logical state (e.g. suspend). On next use, it is therefore
+ * imperative that we make no presumptions about the current state and load
+ * from scratch.
+ */
+void intel_engine_lost_context(struct intel_engine_cs *engine)
+{
+	struct i915_gem_context *ctx;
+
+	lockdep_assert_held(&engine->i915->drm.struct_mutex);
+
+	engine->legacy_active_context = NULL;
+	engine->legacy_active_ppgtt = NULL;
+
+	ctx = fetch_and_zero(&engine->last_retired_context);
+	if (ctx)
+		engine->context_unpin(engine, ctx);
+}
+
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
 {
 	switch (INTEL_GEN(engine->i915)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index c5e27905b0e1..584b7f246374 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1040,6 +1040,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine);
 bool intel_engines_are_idle(struct drm_i915_private *dev_priv);
 
 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine);
+void intel_engine_lost_context(struct intel_engine_cs *engine);
 
 void intel_engines_park(struct drm_i915_private *i915);
 void intel_engines_unpark(struct drm_i915_private *i915);
-- 
2.17.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 3/3] drm/i915: Store a pointer to intel_context in i915_request
  2018-04-19 11:58 [PATCH v2 1/3] drm/i915: Move request->ctx aside Chris Wilson
  2018-04-19 11:58 ` [PATCH v2 2/3] drm/i915: Move fiddling with engine->last_retired_context Chris Wilson
@ 2018-04-19 11:58 ` Chris Wilson
  2018-04-19 12:29   ` Tvrtko Ursulin
  2018-04-19 12:20 ` ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/3] drm/i915: Move request->ctx aside Patchwork
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2018-04-19 11:58 UTC (permalink / raw)
  To: intel-gfx

To ease the frequent and ugly pointer dance of
&request->gem_context->engine[request->engine->id] during request
submission, store that pointer as request->hw_context. One major
advantage that we will exploit later is that this decouples the logical
context state from the engine itself.

v2: Set mock_context->ops so we don't crash and burn in selftests.
    Cleanups from Tvrtko.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gvt/mmio_context.c       |   6 +-
 drivers/gpu/drm/i915/gvt/mmio_context.h       |   2 +-
 drivers/gpu/drm/i915/gvt/scheduler.c          | 141 ++++++----------
 drivers/gpu/drm/i915/gvt/scheduler.h          |   1 -
 drivers/gpu/drm/i915/i915_debugfs.c           |  20 ++-
 drivers/gpu/drm/i915/i915_drv.h               |   1 +
 drivers/gpu/drm/i915/i915_gem.c               |  14 +-
 drivers/gpu/drm/i915/i915_gem_context.c       |  23 +--
 drivers/gpu/drm/i915/i915_gem_context.h       |  29 +++-
 drivers/gpu/drm/i915/i915_gpu_error.c         |   2 +-
 drivers/gpu/drm/i915/i915_perf.c              |  28 ++--
 drivers/gpu/drm/i915/i915_request.c           |  23 ++-
 drivers/gpu/drm/i915/i915_request.h           |   3 +-
 drivers/gpu/drm/i915/intel_engine_cs.c        |  55 ++++---
 drivers/gpu/drm/i915/intel_guc_ads.c          |   2 +-
 drivers/gpu/drm/i915/intel_guc_submission.c   |  15 +-
 drivers/gpu/drm/i915/intel_lrc.c              | 151 +++++++++++-------
 drivers/gpu/drm/i915/intel_lrc.h              |   7 -
 drivers/gpu/drm/i915/intel_ringbuffer.c       | 104 +++++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.h       |   9 +-
 drivers/gpu/drm/i915/selftests/mock_context.c |   7 +
 drivers/gpu/drm/i915/selftests/mock_engine.c  |  42 +++--
 22 files changed, 377 insertions(+), 308 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index a5bac83d53a9..708170e61625 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -446,9 +446,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
 
 #define CTX_CONTEXT_CONTROL_VAL	0x03
 
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
+bool is_inhibit_context(struct intel_context *ce)
 {
-	u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
+	const u32 *reg_state = ce->lrc_reg_state;
 	u32 inhibit_mask =
 		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
 
@@ -501,7 +501,7 @@ static void switch_mmio(struct intel_vgpu *pre,
 			 * itself.
 			 */
 			if (mmio->in_context &&
-			    !is_inhibit_context(s->shadow_ctx, ring_id))
+			    !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
 				continue;
 
 			if (mmio->mask)
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index 0439eb8057a8..5c3b9ff9f96a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
 
 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
 
-bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
+bool is_inhibit_context(struct intel_context *ce);
 
 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
 				       struct i915_request *req);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index f64cccb2e793..c2e440200b0c 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -54,11 +54,8 @@ static void set_context_pdp_root_pointer(
 
 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
 {
-	struct intel_vgpu *vgpu = workload->vgpu;
-	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
 	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		workload->req->hw_context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 
@@ -128,9 +125,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
 	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
 	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+		workload->req->hw_context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *dst;
@@ -280,10 +276,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
 	return NOTIFY_OK;
 }
 
-static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
-		struct intel_engine_cs *engine)
+static void shadow_context_descriptor_update(struct intel_context *ce)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
 	u64 desc = 0;
 
 	desc = ce->lrc_desc;
@@ -292,7 +286,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
 	 * like GEN8_CTX_* cached in desc_template
 	 */
 	desc &= U64_MAX << 12;
-	desc |= ctx->desc_template & ((1ULL << 12) - 1);
+	desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
 
 	ce->lrc_desc = desc;
 }
@@ -300,12 +294,11 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
+	struct i915_request *req = workload->req;
 	void *shadow_ring_buffer_va;
 	u32 *cs;
-	struct i915_request *req = workload->req;
 
-	if (IS_KABYLAKE(req->i915) &&
-	    is_inhibit_context(req->gem_context, req->engine->id))
+	if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context))
 		intel_vgpu_restore_inhibit_context(vgpu, req);
 
 	/* allocate shadow ring buffer */
@@ -353,60 +346,56 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
 	struct intel_vgpu_submission *s = &vgpu->submission;
 	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
-	int ring_id = workload->ring_id;
-	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-	struct intel_ring *ring;
+	struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
+	struct intel_context *ce;
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-	if (workload->shadowed)
+	if (workload->req)
 		return 0;
 
+	/* pin shadow context by gvt even the shadow context will be pinned
+	 * when i915 alloc request. That is because gvt will update the guest
+	 * context from shadow context when workload is completed, and at that
+	 * moment, i915 may already unpined the shadow context to make the
+	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
+	 * the guest context, gvt can unpin the shadow_ctx safely.
+	 */
+	ce = intel_context_pin(shadow_ctx, engine);
+	if (IS_ERR(ce)) {
+		gvt_vgpu_err("fail to pin shadow context\n");
+		return PTR_ERR(ce);
+	}
+
 	shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
 	shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
 				    GEN8_CTX_ADDRESSING_MODE_SHIFT;
 
-	if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
-		shadow_context_descriptor_update(shadow_ctx,
-					dev_priv->engine[ring_id]);
+	if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
+		shadow_context_descriptor_update(ce);
 
 	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
 	if (ret)
-		goto err_scan;
+		goto err_unpin;
 
 	if ((workload->ring_id == RCS) &&
 	    (workload->wa_ctx.indirect_ctx.size != 0)) {
 		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
 		if (ret)
-			goto err_scan;
-	}
-
-	/* pin shadow context by gvt even the shadow context will be pinned
-	 * when i915 alloc request. That is because gvt will update the guest
-	 * context from shadow context when workload is completed, and at that
-	 * moment, i915 may already unpined the shadow context to make the
-	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
-	 * the guest context, gvt can unpin the shadow_ctx safely.
-	 */
-	ring = engine->context_pin(engine, shadow_ctx);
-	if (IS_ERR(ring)) {
-		ret = PTR_ERR(ring);
-		gvt_vgpu_err("fail to pin shadow context\n");
-		goto err_shadow;
+			goto err_shadow;
 	}
 
 	ret = populate_shadow_context(workload);
 	if (ret)
-		goto err_unpin;
-	workload->shadowed = true;
+		goto err_shadow;
+
 	return 0;
 
-err_unpin:
-	engine->context_unpin(engine, shadow_ctx);
 err_shadow:
 	release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
+err_unpin:
+	intel_context_unpin(ce);
 	return ret;
 }
 
@@ -414,7 +403,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
 {
 	int ring_id = workload->ring_id;
 	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
-	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
 	struct i915_request *rq;
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_vgpu_submission *s = &vgpu->submission;
@@ -437,7 +425,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
 	return 0;
 
 err_unpin:
-	engine->context_unpin(engine, shadow_ctx);
 	release_shadow_wa_ctx(&workload->wa_ctx);
 	return ret;
 }
@@ -495,21 +482,13 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
 	return ret;
 }
 
-static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
+static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 {
-	struct intel_vgpu_workload *workload = container_of(wa_ctx,
-					struct intel_vgpu_workload,
-					wa_ctx);
-	int ring_id = workload->ring_id;
-	struct intel_vgpu_submission *s = &workload->vgpu->submission;
-	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
-	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
-	struct execlist_ring_context *shadow_ring_context;
-	struct page *page;
-
-	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
-	shadow_ring_context = kmap_atomic(page);
+	struct intel_vgpu_workload *workload =
+		container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
+	struct i915_request *rq = workload->req;
+	struct execlist_ring_context *shadow_ring_context =
+		(struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
 
 	shadow_ring_context->bb_per_ctx_ptr.val =
 		(shadow_ring_context->bb_per_ctx_ptr.val &
@@ -517,9 +496,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 	shadow_ring_context->rcs_indirect_ctx.val =
 		(shadow_ring_context->rcs_indirect_ctx.val &
 		(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
-
-	kunmap_atomic(shadow_ring_context);
-	return 0;
 }
 
 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
@@ -648,12 +624,9 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
 static int dispatch_workload(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
-	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
 	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
 	int ring_id = workload->ring_id;
-	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-	int ret = 0;
+	int ret;
 
 	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
 		ring_id, workload);
@@ -665,10 +638,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
 		goto out;
 
 	ret = prepare_workload(workload);
-	if (ret) {
-		engine->context_unpin(engine, shadow_ctx);
-		goto out;
-	}
 
 out:
 	if (ret)
@@ -743,27 +712,23 @@ static struct intel_vgpu_workload *pick_next_workload(
 
 static void update_guest_context(struct intel_vgpu_workload *workload)
 {
+	struct i915_request *rq = workload->req;
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_gvt *gvt = vgpu->gvt;
-	struct intel_vgpu_submission *s = &vgpu->submission;
-	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
-	int ring_id = workload->ring_id;
-	struct drm_i915_gem_object *ctx_obj =
-		shadow_ctx->engine[ring_id].state->obj;
+	struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
 	struct execlist_ring_context *shadow_ring_context;
 	struct page *page;
 	void *src;
 	unsigned long context_gpa, context_page_num;
 	int i;
 
-	gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
-			workload->ctx_desc.lrca);
-
-	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+	gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
+		      workload->ctx_desc.lrca);
 
+	context_page_num = rq->engine->context_size;
 	context_page_num = context_page_num >> PAGE_SHIFT;
 
-	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
+	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
 		context_page_num = 19;
 
 	i = 2;
@@ -836,6 +801,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 		scheduler->current_workload[ring_id];
 	struct intel_vgpu *vgpu = workload->vgpu;
 	struct intel_vgpu_submission *s = &vgpu->submission;
+	struct i915_request *rq;
 	int event;
 
 	mutex_lock(&gvt->lock);
@@ -844,11 +810,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 	 * switch to make sure request is completed.
 	 * For the workload w/o request, directly complete the workload.
 	 */
-	if (workload->req) {
-		struct drm_i915_private *dev_priv =
-			workload->vgpu->gvt->dev_priv;
-		struct intel_engine_cs *engine =
-			dev_priv->engine[workload->ring_id];
+	rq = fetch_and_zero(&workload->req);
+	if (rq) {
 		wait_event(workload->shadow_ctx_status_wq,
 			   !atomic_read(&workload->shadow_ctx_active));
 
@@ -864,8 +827,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 				workload->status = 0;
 		}
 
-		i915_request_put(fetch_and_zero(&workload->req));
-
 		if (!workload->status && !(vgpu->resetting_eng &
 					   ENGINE_MASK(ring_id))) {
 			update_guest_context(workload);
@@ -874,10 +835,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
 					 INTEL_GVT_EVENT_MAX)
 				intel_vgpu_trigger_virtual_event(vgpu, event);
 		}
-		mutex_lock(&dev_priv->drm.struct_mutex);
+
 		/* unpin shadow ctx as the shadow_ctx update is done */
-		engine->context_unpin(engine, s->shadow_ctx);
-		mutex_unlock(&dev_priv->drm.struct_mutex);
+		mutex_lock(&rq->i915->drm.struct_mutex);
+		intel_context_unpin(rq->hw_context);
+		mutex_unlock(&rq->i915->drm.struct_mutex);
+
+		i915_request_put(rq);
 	}
 
 	gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -1251,7 +1215,6 @@ alloc_workload(struct intel_vgpu *vgpu)
 	atomic_set(&workload->shadow_ctx_active, 0);
 
 	workload->status = -EINPROGRESS;
-	workload->shadowed = false;
 	workload->vgpu = vgpu;
 
 	return workload;
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 486ed57a4ad1..b0e04017d7a2 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -83,7 +83,6 @@ struct intel_vgpu_workload {
 	struct i915_request *req;
 	/* if this workload has been dispatched to i915? */
 	bool dispatched;
-	bool shadowed;
 	int status;
 
 	struct intel_vgpu_mm *shadow_mm;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 792f69e44ba5..fd202185d6e5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -377,16 +377,19 @@ static void print_batch_pool_stats(struct seq_file *m,
 	print_file_stats(m, "[k]batch pool", stats);
 }
 
-static int per_file_ctx_stats(int id, void *ptr, void *data)
+static int per_file_ctx_stats(int idx, void *ptr, void *data)
 {
 	struct i915_gem_context *ctx = ptr;
-	int n;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+
+	for_each_engine(engine, ctx->i915, id) {
+		struct intel_context *ce = to_intel_context(ctx, engine);
 
-	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
-		if (ctx->engine[n].state)
-			per_file_stats(0, ctx->engine[n].state->obj, data);
-		if (ctx->engine[n].ring)
-			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
+		if (ce->state)
+			per_file_stats(0, ce->state->obj, data);
+		if (ce->ring)
+			per_file_stats(0, ce->ring->vma->obj, data);
 	}
 
 	return 0;
@@ -1960,7 +1963,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		seq_putc(m, '\n');
 
 		for_each_engine(engine, dev_priv, id) {
-			struct intel_context *ce = &ctx->engine[engine->id];
+			struct intel_context *ce =
+				to_intel_context(ctx, engine);
 
 			seq_printf(m, "%s: ", engine->name);
 			if (ce->state)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 028691108125..8907f71b900a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1945,6 +1945,7 @@ struct drm_i915_private {
 			 */
 			struct i915_perf_stream *exclusive_stream;
 
+			struct intel_context *pinned_ctx;
 			u32 specific_ctx_id;
 
 			struct hrtimer poll_check_timer;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4dba735505d4..e186e9194fad 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3227,14 +3227,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
 	i915_retire_requests(dev_priv);
 
 	for_each_engine(engine, dev_priv, id) {
-		struct i915_gem_context *ctx;
+		struct intel_context *ce;
 
 		i915_gem_reset_engine(engine,
 				      engine->hangcheck.active_request,
 				      stalled_mask & ENGINE_MASK(id));
-		ctx = fetch_and_zero(&engine->last_retired_context);
-		if (ctx)
-			engine->context_unpin(engine, ctx);
+		ce = fetch_and_zero(&engine->last_retired_context);
+		if (ce)
+			intel_context_unpin(ce);
 
 		/*
 		 * Ostensibily, we always want a context loaded for powersaving,
@@ -4948,13 +4948,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
 
 static void assert_kernel_context_is_current(struct drm_i915_private *i915)
 {
-	struct i915_gem_context *kernel_context = i915->kernel_context;
+	struct i915_gem_context *kctx = i915->kernel_context;
 	struct intel_engine_cs *engine;
 	enum intel_engine_id id;
 
 	for_each_engine(engine, i915, id) {
 		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
-		GEM_BUG_ON(engine->last_retired_context != kernel_context);
+		GEM_BUG_ON(engine->last_retired_context->gem_context != kctx);
 	}
 }
 
@@ -5291,7 +5291,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 	for_each_engine(engine, i915, id) {
 		struct i915_vma *state;
 
-		state = ctx->engine[id].state;
+		state = to_intel_context(ctx, engine)->state;
 		if (!state)
 			continue;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 2fe779cab298..bb68f7381ff2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -117,24 +117,18 @@ static void lut_close(struct i915_gem_context *ctx)
 
 static void i915_gem_context_free(struct i915_gem_context *ctx)
 {
-	int i;
+	unsigned int n;
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
 
 	i915_ppgtt_put(ctx->ppgtt);
 
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		struct intel_context *ce = &ctx->engine[i];
+	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+		struct intel_context *ce = &ctx->__engine[n];
 
-		if (!ce->state)
-			continue;
-
-		WARN_ON(ce->pin_count);
-		if (ce->ring)
-			intel_ring_free(ce->ring);
-
-		__i915_gem_object_release_unless_active(ce->state->obj);
+		if (ce->ops)
+			ce->ops->destroy(ce);
 	}
 
 	kfree(ctx->name);
@@ -266,6 +260,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 		    struct drm_i915_file_private *file_priv)
 {
 	struct i915_gem_context *ctx;
+	unsigned int n;
 	int ret;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -283,6 +278,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	ctx->i915 = dev_priv;
 	ctx->sched.priority = I915_PRIORITY_NORMAL;
 
+	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+		struct intel_context *ce = &ctx->__engine[n];
+
+		ce->gem_context = ctx;
+	}
+
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index b12a8a8c5af9..48a11a98e617 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -45,6 +45,11 @@ struct intel_ring;
 
 #define DEFAULT_CONTEXT_HANDLE 0
 
+struct intel_context_ops {
+	void (*unpin)(struct intel_context *ce);
+	void (*destroy)(struct intel_context *ce);
+};
+
 /**
  * struct i915_gem_context - client state
  *
@@ -144,12 +149,15 @@ struct i915_gem_context {
 
 	/** engine: per-engine logical HW state */
 	struct intel_context {
+		struct i915_gem_context *gem_context;
 		struct i915_vma *state;
 		struct intel_ring *ring;
 		u32 *lrc_reg_state;
 		u64 lrc_desc;
 		int pin_count;
-	} engine[I915_NUM_ENGINES];
+
+		const struct intel_context_ops *ops;
+	} __engine[I915_NUM_ENGINES];
 
 	/** ring_size: size for allocating the per-engine ring buffer */
 	u32 ring_size;
@@ -256,6 +264,25 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
 	return !ctx->file_priv;
 }
 
+static inline struct intel_context *
+to_intel_context(struct i915_gem_context *ctx,
+		 const struct intel_engine_cs *engine)
+{
+	return &ctx->__engine[engine->id];
+}
+
+static inline struct intel_context *
+intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
+{
+	return engine->context_pin(engine, ctx);
+}
+
+static inline void intel_context_unpin(struct intel_context *ce)
+{
+	GEM_BUG_ON(!ce->ops);
+	ce->ops->unpin(ce);
+}
+
 /* i915_gem_context.c */
 int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 269574b7254c..4801047d4d6a 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1474,7 +1474,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
 			ee->ctx =
 				i915_error_object_create(i915,
-							 ctx->engine[i].state);
+							 request->hw_context->state);
 
 			error->simulated |=
 				i915_gem_context_no_error_capture(ctx);
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index bfc906cd4e5e..11afab651050 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -1221,7 +1221,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 		dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
 	} else {
 		struct intel_engine_cs *engine = dev_priv->engine[RCS];
-		struct intel_ring *ring;
+		struct intel_context *ce;
 		int ret;
 
 		ret = i915_mutex_lock_interruptible(&dev_priv->drm);
@@ -1234,19 +1234,19 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 		 *
 		 * NB: implied RCS engine...
 		 */
-		ring = engine->context_pin(engine, stream->ctx);
+		ce = intel_context_pin(stream->ctx, engine);
 		mutex_unlock(&dev_priv->drm.struct_mutex);
-		if (IS_ERR(ring))
-			return PTR_ERR(ring);
+		if (IS_ERR(ce))
+			return PTR_ERR(ce);
 
+		dev_priv->perf.oa.pinned_ctx = ce;
 
 		/*
 		 * Explicitly track the ID (instead of calling
 		 * i915_ggtt_offset() on the fly) considering the difference
 		 * with gen8+ and execlists
 		 */
-		dev_priv->perf.oa.specific_ctx_id =
-			i915_ggtt_offset(stream->ctx->engine[engine->id].state);
+		dev_priv->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
 	}
 
 	return 0;
@@ -1262,17 +1262,14 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
 static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
 {
 	struct drm_i915_private *dev_priv = stream->dev_priv;
+	struct intel_context *ce;
 
-	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
-		dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-	} else {
-		struct intel_engine_cs *engine = dev_priv->engine[RCS];
+	dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
 
+	ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
+	if (ce) {
 		mutex_lock(&dev_priv->drm.struct_mutex);
-
-		dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
-		engine->context_unpin(engine, stream->ctx);
-
+		intel_context_unpin(ce);
 		mutex_unlock(&dev_priv->drm.struct_mutex);
 	}
 }
@@ -1759,6 +1756,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
 static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 				       const struct i915_oa_config *oa_config)
 {
+	struct intel_engine_cs *engine = dev_priv->engine[RCS];
 	struct i915_gem_context *ctx;
 	int ret;
 	unsigned int wait_flags = I915_WAIT_LOCKED;
@@ -1789,7 +1787,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
 
 	/* Update all contexts now that we've stalled the submission. */
 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
-		struct intel_context *ce = &ctx->engine[RCS];
+		struct intel_context *ce = to_intel_context(ctx, engine);
 		u32 *regs;
 
 		/* OA settings will be set upon first use */
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index f913e56604ea..da97ce4e18a9 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -410,8 +410,8 @@ static void i915_request_retire(struct i915_request *request)
 	 * the subsequent request.
 	 */
 	if (engine->last_retired_context)
-		engine->context_unpin(engine, engine->last_retired_context);
-	engine->last_retired_context = request->gem_context;
+		intel_context_unpin(engine->last_retired_context);
+	engine->last_retired_context = request->hw_context;
 
 	spin_lock_irq(&request->lock);
 	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
@@ -613,7 +613,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 {
 	struct drm_i915_private *i915 = engine->i915;
 	struct i915_request *rq;
-	struct intel_ring *ring;
+	struct intel_context *ce;
 	int ret;
 
 	lockdep_assert_held(&i915->drm.struct_mutex);
@@ -637,16 +637,15 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	 * GGTT space, so do this first before we reserve a seqno for
 	 * ourselves.
 	 */
-	ring = engine->context_pin(engine, ctx);
-	if (IS_ERR(ring))
-		return ERR_CAST(ring);
-	GEM_BUG_ON(!ring);
+	ce = intel_context_pin(ctx, engine);
+	if (IS_ERR(ce))
+		return ERR_CAST(ce);
 
 	ret = reserve_engine(engine);
 	if (ret)
 		goto err_unpin;
 
-	ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+	ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
 	if (ret)
 		goto err_unreserve;
 
@@ -733,7 +732,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	rq->i915 = i915;
 	rq->engine = engine;
 	rq->gem_context = ctx;
-	rq->ring = ring;
+	rq->hw_context = ce;
+	rq->ring = ce->ring;
 
 	/* No zalloc, must clear what we need by hand */
 	rq->global_seqno = 0;
@@ -786,7 +786,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 err_unreserve:
 	unreserve_engine(engine);
 err_unpin:
-	engine->context_unpin(engine, ctx);
+	intel_context_unpin(ce);
 	return ERR_PTR(ret);
 }
 
@@ -972,7 +972,6 @@ i915_request_await_object(struct i915_request *to,
 void __i915_request_add(struct i915_request *request, bool flush_caches)
 {
 	struct intel_engine_cs *engine = request->engine;
-	struct intel_ring *ring = request->ring;
 	struct intel_timeline *timeline = request->timeline;
 	struct i915_request *prev;
 	u32 *cs;
@@ -1048,7 +1047,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
 	i915_gem_active_set(&timeline->last_request, request);
 
-	list_add_tail(&request->ring_link, &ring->request_list);
+	list_add_tail(&request->ring_link, &request->ring->request_list);
 	request->emitted_jiffies = jiffies;
 
 	/*
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 6a029b3f0a88..7ea939f974ea 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -92,8 +92,9 @@ struct i915_request {
 	 * i915_request_free() will then decrement the refcount on the
 	 * context.
 	 */
-	struct i915_gem_context *gem_context;
 	struct intel_engine_cs *engine;
+	struct i915_gem_context *gem_context;
+	struct intel_context *hw_context;
 	struct intel_ring *ring;
 	struct intel_timeline *timeline;
 	struct intel_signal_node signaling;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index b26867eed4ca..5070cfae5058 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -665,6 +665,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 	return 0;
 }
 
+static void __intel_context_unpin(struct i915_gem_context *ctx,
+				  struct intel_engine_cs *engine)
+{
+	intel_context_unpin(to_intel_context(ctx, engine));
+}
+
 /**
  * intel_engines_init_common - initialize cengine state which might require hw access
  * @engine: Engine to initialize.
@@ -678,7 +684,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
  */
 int intel_engine_init_common(struct intel_engine_cs *engine)
 {
-	struct intel_ring *ring;
+	struct drm_i915_private *i915 = engine->i915;
+	struct intel_context *ce;
 	int ret;
 
 	engine->set_default_submission(engine);
@@ -690,19 +697,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	 * be available. To avoid this we always pin the default
 	 * context.
 	 */
-	ring = engine->context_pin(engine, engine->i915->kernel_context);
-	if (IS_ERR(ring))
-		return PTR_ERR(ring);
+	ce = intel_context_pin(i915->kernel_context, engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
 
 	/*
 	 * Similarly the preempt context must always be available so that
 	 * we can interrupt the engine at any time.
 	 */
-	if (engine->i915->preempt_context) {
-		ring = engine->context_pin(engine,
-					   engine->i915->preempt_context);
-		if (IS_ERR(ring)) {
-			ret = PTR_ERR(ring);
+	if (i915->preempt_context) {
+		ce = intel_context_pin(i915->preempt_context, engine);
+		if (IS_ERR(ce)) {
+			ret = PTR_ERR(ce);
 			goto err_unpin_kernel;
 		}
 	}
@@ -711,7 +717,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	if (ret)
 		goto err_unpin_preempt;
 
-	if (HWS_NEEDS_PHYSICAL(engine->i915))
+	if (HWS_NEEDS_PHYSICAL(i915))
 		ret = init_phys_status_page(engine);
 	else
 		ret = init_status_page(engine);
@@ -723,10 +729,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 err_breadcrumbs:
 	intel_engine_fini_breadcrumbs(engine);
 err_unpin_preempt:
-	if (engine->i915->preempt_context)
-		engine->context_unpin(engine, engine->i915->preempt_context);
+	if (i915->preempt_context)
+		__intel_context_unpin(i915->preempt_context, engine);
+
 err_unpin_kernel:
-	engine->context_unpin(engine, engine->i915->kernel_context);
+	__intel_context_unpin(i915->kernel_context, engine);
 	return ret;
 }
 
@@ -739,6 +746,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
  */
 void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
+	struct drm_i915_private *i915 = engine->i915;
+
 	intel_engine_cleanup_scratch(engine);
 
 	if (HWS_NEEDS_PHYSICAL(engine->i915))
@@ -753,9 +762,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	if (engine->default_state)
 		i915_gem_object_put(engine->default_state);
 
-	if (engine->i915->preempt_context)
-		engine->context_unpin(engine, engine->i915->preempt_context);
-	engine->context_unpin(engine, engine->i915->kernel_context);
+	if (i915->preempt_context)
+		__intel_context_unpin(i915->preempt_context, engine);
+	__intel_context_unpin(i915->kernel_context, engine);
 }
 
 u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
@@ -997,8 +1006,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
  */
 bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
 {
-	const struct i915_gem_context * const kernel_context =
-		engine->i915->kernel_context;
+	const struct intel_context *kernel_context =
+		to_intel_context(engine->i915->kernel_context, engine);
 	struct i915_request *rq;
 
 	lockdep_assert_held(&engine->i915->drm.struct_mutex);
@@ -1010,7 +1019,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
 	 */
 	rq = __i915_gem_active_peek(&engine->timeline->last_request);
 	if (rq)
-		return rq->gem_context == kernel_context;
+		return rq->hw_context == kernel_context;
 	else
 		return engine->last_retired_context == kernel_context;
 }
@@ -1095,16 +1104,16 @@ void intel_engines_unpark(struct drm_i915_private *i915)
  */
 void intel_engine_lost_context(struct intel_engine_cs *engine)
 {
-	struct i915_gem_context *ctx;
+	struct intel_context *ce;
 
 	lockdep_assert_held(&engine->i915->drm.struct_mutex);
 
 	engine->legacy_active_context = NULL;
 	engine->legacy_active_ppgtt = NULL;
 
-	ctx = fetch_and_zero(&engine->last_retired_context);
-	if (ctx)
-		engine->context_unpin(engine, ctx);
+	ce = fetch_and_zero(&engine->last_retired_context);
+	if (ce)
+		intel_context_unpin(ce);
 }
 
 bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
index 334cb5202e1c..b8b4d346dd4d 100644
--- a/drivers/gpu/drm/i915/intel_guc_ads.c
+++ b/drivers/gpu/drm/i915/intel_guc_ads.c
@@ -121,7 +121,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
 	 * to find it. Note that we have to skip our header (1 page),
 	 * because our GuC shared data is there.
 	 */
-	kernel_ctx_vma = dev_priv->kernel_context->engine[RCS].state;
+	kernel_ctx_vma = dev_priv->kernel_context->__engine[RCS].state;
 	blob->ads.golden_context_lrca =
 		intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
 
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 8527fa1f5c3e..2fe3f9daa56d 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -362,7 +362,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
 	desc->db_id = client->doorbell_id;
 
 	for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
-		struct intel_context *ce = &ctx->engine[engine->id];
+		struct intel_context *ce = to_intel_context(ctx, engine);
 		u32 guc_engine_id = engine->guc_id;
 		struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
 
@@ -511,9 +511,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 {
 	struct intel_guc_client *client = guc->execbuf_client;
 	struct intel_engine_cs *engine = rq->engine;
-	u32 ctx_desc =
-		lower_32_bits(intel_lr_context_descriptor(rq->gem_context,
-							  engine));
+	u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
 	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 
 	spin_lock(&client->wq_lock);
@@ -551,8 +549,8 @@ static void inject_preempt_context(struct work_struct *work)
 					     preempt_work[engine->id]);
 	struct intel_guc_client *client = guc->preempt_client;
 	struct guc_stage_desc *stage_desc = __get_stage_desc(client);
-	u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
-								 engine));
+	u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
+						      engine)->lrc_desc);
 	u32 data[7];
 
 	/*
@@ -708,7 +706,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
 		struct i915_request *rq, *rn;
 
 		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
-			if (last && rq->gem_context != last->gem_context) {
+			if (last && rq->hw_context != last->hw_context) {
 				if (port == last_port) {
 					__list_del_many(&p->requests,
 							&rq->sched.link);
@@ -991,7 +989,8 @@ static void guc_fill_preempt_context(struct intel_guc *guc)
 	enum intel_engine_id id;
 
 	for_each_engine(engine, dev_priv, id) {
-		struct intel_context *ce = &client->owner->engine[id];
+		struct intel_context *ce =
+			to_intel_context(client->owner, engine);
 		u32 addr = intel_hws_preempt_done_address(engine);
 		u32 *cs;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0777226e65a6..a3de8522fbc2 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -164,7 +164,8 @@
 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-					    struct intel_engine_cs *engine);
+					    struct intel_engine_cs *engine,
+					    struct intel_context *ce);
 static void execlists_init_reg_state(u32 *reg_state,
 				     struct i915_gem_context *ctx,
 				     struct intel_engine_cs *engine,
@@ -221,9 +222,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
  */
 static void
 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
-				   struct intel_engine_cs *engine)
+				   struct intel_engine_cs *engine,
+				   struct intel_context *ce)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
 	u64 desc;
 
 	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
@@ -414,7 +415,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
 
 static u64 execlists_update_context(struct i915_request *rq)
 {
-	struct intel_context *ce = &rq->gem_context->engine[rq->engine->id];
+	struct intel_context *ce = rq->hw_context;
 	struct i915_hw_ppgtt *ppgtt =
 		rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
 	u32 *reg_state = ce->lrc_reg_state;
@@ -491,14 +492,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
 	execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
 }
 
-static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
+static bool ctx_single_port_submission(const struct intel_context *ce)
 {
 	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
-		i915_gem_context_force_single_submission(ctx));
+		i915_gem_context_force_single_submission(ce->gem_context));
 }
 
-static bool can_merge_ctx(const struct i915_gem_context *prev,
-			  const struct i915_gem_context *next)
+static bool can_merge_ctx(const struct intel_context *prev,
+			  const struct intel_context *next)
 {
 	if (prev != next)
 		return false;
@@ -523,7 +524,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists *execlists = &engine->execlists;
 	struct intel_context *ce =
-		&engine->i915->preempt_context->engine[engine->id];
+		to_intel_context(engine->i915->preempt_context, engine);
 	unsigned int n;
 
 	GEM_BUG_ON(execlists->preempt_complete_status !=
@@ -666,8 +667,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 			 * second request, and so we never need to tell the
 			 * hardware about the first.
 			 */
-			if (last && !can_merge_ctx(rq->gem_context,
-						   last->gem_context)) {
+			if (last &&
+			    !can_merge_ctx(rq->hw_context, last->hw_context)) {
 				/*
 				 * If we are on the second port and cannot
 				 * combine this request with the last, then we
@@ -686,14 +687,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				 * the same context (even though a different
 				 * request) to the second port.
 				 */
-				if (ctx_single_port_submission(last->gem_context) ||
-				    ctx_single_port_submission(rq->gem_context)) {
+				if (ctx_single_port_submission(last->hw_context) ||
+				    ctx_single_port_submission(rq->hw_context)) {
 					__list_del_many(&p->requests,
 							&rq->sched.link);
 					goto done;
 				}
 
-				GEM_BUG_ON(last->gem_context == rq->gem_context);
+				GEM_BUG_ON(last->hw_context == rq->hw_context);
 
 				if (submit)
 					port_assign(port, last);
@@ -1277,6 +1278,37 @@ static void execlists_schedule(struct i915_request *request,
 	spin_unlock_irq(&engine->timeline->lock);
 }
 
+static void execlists_context_destroy(struct intel_context *ce)
+{
+	GEM_BUG_ON(!ce->state);
+	GEM_BUG_ON(ce->pin_count);
+
+	intel_ring_free(ce->ring);
+	__i915_gem_object_release_unless_active(ce->state->obj);
+}
+
+static void __execlists_context_unpin(struct intel_context *ce)
+{
+	intel_ring_unpin(ce->ring);
+
+	ce->state->obj->pin_global--;
+	i915_gem_object_unpin_map(ce->state->obj);
+	i915_vma_unpin(ce->state);
+
+	i915_gem_context_put(ce->gem_context);
+}
+
+static void execlists_context_unpin(struct intel_context *ce)
+{
+	lockdep_assert_held(&ce->gem_context->i915->drm.struct_mutex);
+	GEM_BUG_ON(ce->pin_count == 0);
+
+	if (--ce->pin_count)
+		return;
+
+	__execlists_context_unpin(ce);
+}
+
 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
 {
 	unsigned int flags;
@@ -1300,21 +1332,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
 	return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
 }
 
-static struct intel_ring *
-execlists_context_pin(struct intel_engine_cs *engine,
-		      struct i915_gem_context *ctx)
+static struct intel_context *
+__execlists_context_pin(struct intel_engine_cs *engine,
+			struct i915_gem_context *ctx,
+			struct intel_context *ce)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
 	void *vaddr;
 	int ret;
 
-	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
-	if (likely(ce->pin_count++))
-		goto out;
-	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
-
-	ret = execlists_context_deferred_alloc(ctx, engine);
+	ret = execlists_context_deferred_alloc(ctx, engine, ce);
 	if (ret)
 		goto err;
 	GEM_BUG_ON(!ce->state);
@@ -1333,7 +1359,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
 	if (ret)
 		goto unpin_map;
 
-	intel_lr_context_descriptor_update(ctx, engine);
+	intel_lr_context_descriptor_update(ctx, engine, ce);
 
 	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
@@ -1342,8 +1368,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
 
 	ce->state->obj->pin_global++;
 	i915_gem_context_get(ctx);
-out:
-	return ce->ring;
+	return ce;
 
 unpin_map:
 	i915_gem_object_unpin_map(ce->state->obj);
@@ -1354,33 +1379,33 @@ execlists_context_pin(struct intel_engine_cs *engine,
 	return ERR_PTR(ret);
 }
 
-static void execlists_context_unpin(struct intel_engine_cs *engine,
-				    struct i915_gem_context *ctx)
+static const struct intel_context_ops execlists_context_ops = {
+	.unpin = execlists_context_unpin,
+	.destroy = execlists_context_destroy,
+};
+
+static struct intel_context *
+execlists_context_pin(struct intel_engine_cs *engine,
+		      struct i915_gem_context *ctx)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-	GEM_BUG_ON(ce->pin_count == 0);
-
-	if (--ce->pin_count)
-		return;
 
-	intel_ring_unpin(ce->ring);
+	if (likely(ce->pin_count++))
+		return ce;
+	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
 
-	ce->state->obj->pin_global--;
-	i915_gem_object_unpin_map(ce->state->obj);
-	i915_vma_unpin(ce->state);
+	ce->ops = &execlists_context_ops;
 
-	i915_gem_context_put(ctx);
+	return __execlists_context_pin(engine, ctx, ce);
 }
 
 static int execlists_request_alloc(struct i915_request *request)
 {
-	struct intel_engine_cs *engine = request->engine;
-	struct intel_context *ce = &request->gem_context->engine[engine->id];
-	int ret;
+	int err;
 
-	GEM_BUG_ON(!ce->pin_count);
+	GEM_BUG_ON(!request->hw_context->pin_count);
 
 	/* Flush enough space to reduce the likelihood of waiting after
 	 * we start building the request - in which case we will just
@@ -1388,9 +1413,9 @@ static int execlists_request_alloc(struct i915_request *request)
 	 */
 	request->reserved_space += EXECLISTS_REQUEST_SIZE;
 
-	ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
-	if (ret)
-		return ret;
+	err = intel_ring_wait_for_space(request->ring, request->reserved_space);
+	if (err)
+		return err;
 
 	/* Note that after this point, we have committed to using
 	 * this request as it is being used to both track the
@@ -1780,8 +1805,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 			      struct i915_request *request)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
-	struct intel_context *ce;
 	unsigned long flags;
+	u32 *regs;
 
 	GEM_TRACE("%s request global=%x, current=%d\n",
 		  engine->name, request ? request->global_seqno : 0,
@@ -1831,14 +1856,13 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 	 * future request will be after userspace has had the opportunity
 	 * to recreate its own state.
 	 */
-	ce = &request->gem_context->engine[engine->id];
-	execlists_init_reg_state(ce->lrc_reg_state,
-				 request->gem_context, engine, ce->ring);
+	regs = request->hw_context->lrc_reg_state;
+	execlists_init_reg_state(regs,
+				 request->gem_context, engine, request->ring);
 
 	/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
-	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
-		i915_ggtt_offset(ce->ring->vma);
-	ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
+	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
+	regs[CTX_RING_HEAD + 1] = request->postfix;
 
 	request->ring->head = request->postfix;
 	intel_ring_update_space(request->ring);
@@ -2176,8 +2200,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	engine->reset_hw = reset_common_ring;
 
 	engine->context_pin = execlists_context_pin;
-	engine->context_unpin = execlists_context_unpin;
-
 	engine->request_alloc = execlists_request_alloc;
 
 	engine->emit_flush = gen8_emit_flush;
@@ -2272,9 +2294,13 @@ static int logical_ring_init(struct intel_engine_cs *engine)
 	}
 
 	engine->execlists.preempt_complete_status = ~0u;
-	if (engine->i915->preempt_context)
+	if (engine->i915->preempt_context) {
+		struct intel_context *ce =
+			to_intel_context(engine->i915->preempt_context, engine);
+
 		engine->execlists.preempt_complete_status =
-			upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
+			upper_32_bits(ce->lrc_desc);
+	}
 
 	return 0;
 
@@ -2408,7 +2434,7 @@ static void execlists_init_reg_state(u32 *regs,
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
 	u32 base = engine->mmio_base;
-	bool rcs = engine->id == RCS;
+	bool rcs = engine->class == RENDER_CLASS;
 
 	/* A context is actually a big batch buffer with several
 	 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
@@ -2553,10 +2579,10 @@ populate_lr_context(struct i915_gem_context *ctx,
 }
 
 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
-					    struct intel_engine_cs *engine)
+					    struct intel_engine_cs *engine,
+					    struct intel_context *ce)
 {
 	struct drm_i915_gem_object *ctx_obj;
-	struct intel_context *ce = &ctx->engine[engine->id];
 	struct i915_vma *vma;
 	uint32_t context_size;
 	struct intel_ring *ring;
@@ -2627,7 +2653,8 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
 	 */
 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
 		for_each_engine(engine, dev_priv, id) {
-			struct intel_context *ce = &ctx->engine[engine->id];
+			struct intel_context *ce =
+				to_intel_context(ctx, engine);
 			u32 *reg;
 
 			if (!ce->state)
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 59d7b86012e9..1593194e930c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -104,11 +104,4 @@ struct i915_gem_context;
 
 void intel_lr_context_resume(struct drm_i915_private *dev_priv);
 
-static inline uint64_t
-intel_lr_context_descriptor(struct i915_gem_context *ctx,
-			    struct intel_engine_cs *engine)
-{
-	return ctx->engine[engine->id].lrc_desc;
-}
-
 #endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3ea8eb5d49f5..9f526afa11ed 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -558,8 +558,7 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 	 */
 	if (request) {
 		struct drm_i915_private *dev_priv = request->i915;
-		struct intel_context *ce =
-			&request->gem_context->engine[engine->id];
+		struct intel_context *ce = request->hw_context;
 		struct i915_hw_ppgtt *ppgtt;
 
 		if (ce->state) {
@@ -1164,9 +1163,33 @@ intel_ring_free(struct intel_ring *ring)
 	kfree(ring);
 }
 
-static int context_pin(struct i915_gem_context *ctx)
+static void intel_ring_context_destroy(struct intel_context *ce)
 {
-	struct i915_vma *vma = ctx->engine[RCS].state;
+	GEM_BUG_ON(ce->pin_count);
+
+	if (ce->state)
+		__i915_gem_object_release_unless_active(ce->state->obj);
+}
+
+static void intel_ring_context_unpin(struct intel_context *ce)
+{
+	lockdep_assert_held(&ce->gem_context->i915->drm.struct_mutex);
+	GEM_BUG_ON(ce->pin_count == 0);
+
+	if (--ce->pin_count)
+		return;
+
+	if (ce->state) {
+		ce->state->obj->pin_global--;
+		i915_vma_unpin(ce->state);
+	}
+
+	i915_gem_context_put(ce->gem_context);
+}
+
+static int __context_pin(struct intel_context *ce)
+{
+	struct i915_vma *vma = ce->state;
 	int ret;
 
 	/*
@@ -1253,25 +1276,19 @@ alloc_context_vma(struct intel_engine_cs *engine)
 	return ERR_PTR(err);
 }
 
-static struct intel_ring *
-intel_ring_context_pin(struct intel_engine_cs *engine,
-		       struct i915_gem_context *ctx)
+static struct intel_context *
+__ring_context_pin(struct intel_engine_cs *engine,
+		   struct i915_gem_context *ctx,
+		   struct intel_context *ce)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
-	int ret;
-
-	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-
-	if (likely(ce->pin_count++))
-		goto out;
-	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
+	int err;
 
 	if (!ce->state && engine->context_size) {
 		struct i915_vma *vma;
 
 		vma = alloc_context_vma(engine);
 		if (IS_ERR(vma)) {
-			ret = PTR_ERR(vma);
+			err = PTR_ERR(vma);
 			goto err;
 		}
 
@@ -1279,8 +1296,8 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
 	}
 
 	if (ce->state) {
-		ret = context_pin(ctx);
-		if (ret)
+		err = __context_pin(ce);
+		if (err)
 			goto err;
 
 		ce->state->obj->pin_global++;
@@ -1288,32 +1305,37 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
 
 	i915_gem_context_get(ctx);
 
-out:
 	/* One ringbuffer to rule them all */
-	return engine->buffer;
+	GEM_BUG_ON(!engine->buffer);
+	ce->ring = engine->buffer;
+
+	return ce;
 
 err:
 	ce->pin_count = 0;
-	return ERR_PTR(ret);
+	return ERR_PTR(err);
 }
 
-static void intel_ring_context_unpin(struct intel_engine_cs *engine,
-				     struct i915_gem_context *ctx)
+static const struct intel_context_ops ring_context_ops = {
+	.unpin = intel_ring_context_unpin,
+	.destroy = intel_ring_context_destroy,
+};
+
+static struct intel_context *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+		       struct i915_gem_context *ctx)
 {
-	struct intel_context *ce = &ctx->engine[engine->id];
+	struct intel_context *ce = to_intel_context(ctx, engine);
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
-	GEM_BUG_ON(ce->pin_count == 0);
 
-	if (--ce->pin_count)
-		return;
+	if (likely(ce->pin_count++))
+		return ce;
+	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
 
-	if (ce->state) {
-		ce->state->obj->pin_global--;
-		i915_vma_unpin(ce->state);
-	}
+	ce->ops = &ring_context_ops;
 
-	i915_gem_context_put(ctx);
+	return __ring_context_pin(engine, ctx, ce);
 }
 
 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
@@ -1323,10 +1345,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_engine_setup_common(engine);
 
-	err = intel_engine_init_common(engine);
-	if (err)
-		goto err;
-
 	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
 	if (IS_ERR(ring)) {
 		err = PTR_ERR(ring);
@@ -1341,8 +1359,14 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	GEM_BUG_ON(engine->buffer);
 	engine->buffer = ring;
 
+	err = intel_engine_init_common(engine);
+	if (err)
+		goto err_unpin;
+
 	return 0;
 
+err_unpin:
+	intel_ring_unpin(ring);
 err_ring:
 	intel_ring_free(ring);
 err:
@@ -1428,7 +1452,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
 
 	*cs++ = MI_NOOP;
 	*cs++ = MI_SET_CONTEXT;
-	*cs++ = i915_ggtt_offset(rq->gem_context->engine[RCS].state) | flags;
+	*cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1519,7 +1543,7 @@ static int switch_context(struct i915_request *rq)
 		hw_flags = MI_FORCE_RESTORE;
 	}
 
-	if (to_ctx->engine[engine->id].state &&
+	if (rq->hw_context->state &&
 	    (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
 		GEM_BUG_ON(engine->id != RCS);
 
@@ -1567,7 +1591,7 @@ static int ring_request_alloc(struct i915_request *request)
 {
 	int ret;
 
-	GEM_BUG_ON(!request->gem_context->engine[request->engine->id].pin_count);
+	GEM_BUG_ON(!request->hw_context->pin_count);
 
 	/* Flush enough space to reduce the likelihood of waiting after
 	 * we start building the request - in which case we will just
@@ -1994,8 +2018,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 	engine->reset_hw = reset_ring_common;
 
 	engine->context_pin = intel_ring_context_pin;
-	engine->context_unpin = intel_ring_context_unpin;
-
 	engine->request_alloc = ring_request_alloc;
 
 	engine->emit_breadcrumb = i9xx_emit_breadcrumb;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 584b7f246374..10713ce3de2a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -427,10 +427,9 @@ struct intel_engine_cs {
 
 	void		(*set_default_submission)(struct intel_engine_cs *engine);
 
-	struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
-					  struct i915_gem_context *ctx);
-	void		(*context_unpin)(struct intel_engine_cs *engine,
-					 struct i915_gem_context *ctx);
+	struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
+					     struct i915_gem_context *ctx);
+
 	int		(*request_alloc)(struct i915_request *rq);
 	int		(*init_context)(struct i915_request *rq);
 
@@ -546,7 +545,7 @@ struct intel_engine_cs {
 	 * to the kernel context and trash it as the save may not happen
 	 * before the hardware is powered down.
 	 */
-	struct i915_gem_context *last_retired_context;
+	struct intel_context *last_retired_context;
 
 	/* We track the current MI_SET_CONTEXT in order to eliminate
 	 * redudant context switches. This presumes that requests are not
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index 501becc47c0c..8904f1ce64e3 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915,
 	     const char *name)
 {
 	struct i915_gem_context *ctx;
+	unsigned int n;
 	int ret;
 
 	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915,
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
 
+	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
+		struct intel_context *ce = &ctx->__engine[n];
+
+		ce->gem_context = ctx;
+	}
+
 	ret = ida_simple_get(&i915->contexts.hw_ida,
 			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
 	if (ret < 0)
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index 78a89efa1119..fecfe061d693 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -67,18 +67,37 @@ static void hw_delay_complete(struct timer_list *t)
 	spin_unlock(&engine->hw_lock);
 }
 
-static struct intel_ring *
-mock_context_pin(struct intel_engine_cs *engine,
-		 struct i915_gem_context *ctx)
+static void mock_context_unpin(struct intel_context *ce)
+{
+	if (--ce->pin_count)
+		return;
+
+	i915_gem_context_put(ce->gem_context);
+}
+
+static void mock_context_destroy(struct intel_context *ce)
 {
-	i915_gem_context_get(ctx);
-	return engine->buffer;
+	GEM_BUG_ON(ce->pin_count);
 }
 
-static void mock_context_unpin(struct intel_engine_cs *engine,
-			       struct i915_gem_context *ctx)
+static const struct intel_context_ops mock_context_ops = {
+	.unpin = mock_context_unpin,
+	.destroy = mock_context_destroy,
+};
+
+static struct intel_context *
+mock_context_pin(struct intel_engine_cs *engine,
+		 struct i915_gem_context *ctx)
 {
-	i915_gem_context_put(ctx);
+	struct intel_context *ce = to_intel_context(ctx, engine);
+
+	if (!ce->pin_count++) {
+		i915_gem_context_get(ctx);
+		ce->ring = engine->buffer;
+		ce->ops = &mock_context_ops;
+	}
+
+	return ce;
 }
 
 static int mock_request_alloc(struct i915_request *request)
@@ -168,7 +187,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 	engine->base.status_page.page_addr = (void *)(engine + 1);
 
 	engine->base.context_pin = mock_context_pin;
-	engine->base.context_unpin = mock_context_unpin;
 	engine->base.request_alloc = mock_request_alloc;
 	engine->base.emit_flush = mock_emit_flush;
 	engine->base.emit_breadcrumb = mock_emit_breadcrumb;
@@ -213,11 +231,13 @@ void mock_engine_free(struct intel_engine_cs *engine)
 {
 	struct mock_engine *mock =
 		container_of(engine, typeof(*mock), base);
+	struct intel_context *ce;
 
 	GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
-	if (engine->last_retired_context)
-		engine->context_unpin(engine, engine->last_retired_context);
+	ce = fetch_and_zero(&engine->last_retired_context);
+	if (ce)
+		intel_context_unpin(ce);
 
 	intel_engine_fini_breadcrumbs(engine);
 
-- 
2.17.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/3] drm/i915: Move request->ctx aside
  2018-04-19 11:58 [PATCH v2 1/3] drm/i915: Move request->ctx aside Chris Wilson
  2018-04-19 11:58 ` [PATCH v2 2/3] drm/i915: Move fiddling with engine->last_retired_context Chris Wilson
  2018-04-19 11:58 ` [PATCH v2 3/3] drm/i915: Store a pointer to intel_context in i915_request Chris Wilson
@ 2018-04-19 12:20 ` Patchwork
  2018-04-19 12:32 ` ✓ Fi.CI.BAT: success " Patchwork
  2018-04-19 16:49 ` ✗ Fi.CI.IGT: failure " Patchwork
  4 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2018-04-19 12:20 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/3] drm/i915: Move request->ctx aside
URL   : https://patchwork.freedesktop.org/series/41964/
State : warning

== Summary ==

$ dim sparse origin/drm-tip
Commit: drm/i915: Move request->ctx aside
Okay!

Commit: drm/i915: Move fiddling with engine->last_retired_context
Okay!

Commit: drm/i915: Store a pointer to intel_context in i915_request
-drivers/gpu/drm/i915/selftests/../i915_drv.h:2208:33: warning: constant 0xffffea0000000000 is so big it is unsigned long
-drivers/gpu/drm/i915/selftests/../i915_drv.h:3656:16: warning: expression using sizeof(void)
+drivers/gpu/drm/i915/selftests/../i915_drv.h:2209:33: warning: constant 0xffffea0000000000 is so big it is unsigned long
+drivers/gpu/drm/i915/selftests/../i915_drv.h:3657:16: warning: expression using sizeof(void)

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 3/3] drm/i915: Store a pointer to intel_context in i915_request
  2018-04-19 11:58 ` [PATCH v2 3/3] drm/i915: Store a pointer to intel_context in i915_request Chris Wilson
@ 2018-04-19 12:29   ` Tvrtko Ursulin
  2018-04-19 12:35     ` Chris Wilson
  0 siblings, 1 reply; 8+ messages in thread
From: Tvrtko Ursulin @ 2018-04-19 12:29 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 19/04/2018 12:58, Chris Wilson wrote:
> To ease the frequent and ugly pointer dance of
> &request->gem_context->engine[request->engine->id] during request
> submission, store that pointer as request->hw_context. One major
> advantage that we will exploit later is that this decouples the logical
> context state from the engine itself.
> 
> v2: Set mock_context->ops so we don't crash and burn in selftests.
>      Cleanups from Tvrtko.

Which ones? I have to re-read it all? :I

> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/gvt/mmio_context.c       |   6 +-
>   drivers/gpu/drm/i915/gvt/mmio_context.h       |   2 +-
>   drivers/gpu/drm/i915/gvt/scheduler.c          | 141 ++++++----------
>   drivers/gpu/drm/i915/gvt/scheduler.h          |   1 -
>   drivers/gpu/drm/i915/i915_debugfs.c           |  20 ++-
>   drivers/gpu/drm/i915/i915_drv.h               |   1 +
>   drivers/gpu/drm/i915/i915_gem.c               |  14 +-
>   drivers/gpu/drm/i915/i915_gem_context.c       |  23 +--
>   drivers/gpu/drm/i915/i915_gem_context.h       |  29 +++-
>   drivers/gpu/drm/i915/i915_gpu_error.c         |   2 +-
>   drivers/gpu/drm/i915/i915_perf.c              |  28 ++--
>   drivers/gpu/drm/i915/i915_request.c           |  23 ++-
>   drivers/gpu/drm/i915/i915_request.h           |   3 +-
>   drivers/gpu/drm/i915/intel_engine_cs.c        |  55 ++++---
>   drivers/gpu/drm/i915/intel_guc_ads.c          |   2 +-
>   drivers/gpu/drm/i915/intel_guc_submission.c   |  15 +-
>   drivers/gpu/drm/i915/intel_lrc.c              | 151 +++++++++++-------
>   drivers/gpu/drm/i915/intel_lrc.h              |   7 -
>   drivers/gpu/drm/i915/intel_ringbuffer.c       | 104 +++++++-----
>   drivers/gpu/drm/i915/intel_ringbuffer.h       |   9 +-
>   drivers/gpu/drm/i915/selftests/mock_context.c |   7 +
>   drivers/gpu/drm/i915/selftests/mock_engine.c  |  42 +++--
>   22 files changed, 377 insertions(+), 308 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
> index a5bac83d53a9..708170e61625 100644
> --- a/drivers/gpu/drm/i915/gvt/mmio_context.c
> +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
> @@ -446,9 +446,9 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
>   
>   #define CTX_CONTEXT_CONTROL_VAL	0x03
>   
> -bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
> +bool is_inhibit_context(struct intel_context *ce)
>   {
> -	u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
> +	const u32 *reg_state = ce->lrc_reg_state;
>   	u32 inhibit_mask =
>   		_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
>   
> @@ -501,7 +501,7 @@ static void switch_mmio(struct intel_vgpu *pre,
>   			 * itself.
>   			 */
>   			if (mmio->in_context &&
> -			    !is_inhibit_context(s->shadow_ctx, ring_id))
> +			    !is_inhibit_context(&s->shadow_ctx->__engine[ring_id]))
>   				continue;
>   
>   			if (mmio->mask)
> diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
> index 0439eb8057a8..5c3b9ff9f96a 100644
> --- a/drivers/gpu/drm/i915/gvt/mmio_context.h
> +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
> @@ -49,7 +49,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
>   
>   void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
>   
> -bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
> +bool is_inhibit_context(struct intel_context *ce);
>   
>   int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
>   				       struct i915_request *req);
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index f64cccb2e793..c2e440200b0c 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -54,11 +54,8 @@ static void set_context_pdp_root_pointer(
>   
>   static void update_shadow_pdps(struct intel_vgpu_workload *workload)
>   {
> -	struct intel_vgpu *vgpu = workload->vgpu;
> -	int ring_id = workload->ring_id;
> -	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
>   	struct drm_i915_gem_object *ctx_obj =
> -		shadow_ctx->engine[ring_id].state->obj;
> +		workload->req->hw_context->state->obj;
>   	struct execlist_ring_context *shadow_ring_context;
>   	struct page *page;
>   
> @@ -128,9 +125,8 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
>   	struct intel_vgpu *vgpu = workload->vgpu;
>   	struct intel_gvt *gvt = vgpu->gvt;
>   	int ring_id = workload->ring_id;
> -	struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
>   	struct drm_i915_gem_object *ctx_obj =
> -		shadow_ctx->engine[ring_id].state->obj;
> +		workload->req->hw_context->state->obj;
>   	struct execlist_ring_context *shadow_ring_context;
>   	struct page *page;
>   	void *dst;
> @@ -280,10 +276,8 @@ static int shadow_context_status_change(struct notifier_block *nb,
>   	return NOTIFY_OK;
>   }
>   
> -static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
> -		struct intel_engine_cs *engine)
> +static void shadow_context_descriptor_update(struct intel_context *ce)
>   {
> -	struct intel_context *ce = &ctx->engine[engine->id];
>   	u64 desc = 0;
>   
>   	desc = ce->lrc_desc;
> @@ -292,7 +286,7 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
>   	 * like GEN8_CTX_* cached in desc_template
>   	 */
>   	desc &= U64_MAX << 12;
> -	desc |= ctx->desc_template & ((1ULL << 12) - 1);
> +	desc |= ce->gem_context->desc_template & ((1ULL << 12) - 1);
>   
>   	ce->lrc_desc = desc;
>   }
> @@ -300,12 +294,11 @@ static void shadow_context_descriptor_update(struct i915_gem_context *ctx,
>   static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
>   {
>   	struct intel_vgpu *vgpu = workload->vgpu;
> +	struct i915_request *req = workload->req;
>   	void *shadow_ring_buffer_va;
>   	u32 *cs;
> -	struct i915_request *req = workload->req;
>   
> -	if (IS_KABYLAKE(req->i915) &&
> -	    is_inhibit_context(req->gem_context, req->engine->id))
> +	if (IS_KABYLAKE(req->i915) && is_inhibit_context(req->hw_context))
>   		intel_vgpu_restore_inhibit_context(vgpu, req);
>   
>   	/* allocate shadow ring buffer */
> @@ -353,60 +346,56 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
>   	struct intel_vgpu_submission *s = &vgpu->submission;
>   	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
>   	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
> -	int ring_id = workload->ring_id;
> -	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
> -	struct intel_ring *ring;
> +	struct intel_engine_cs *engine = dev_priv->engine[workload->ring_id];
> +	struct intel_context *ce;
>   	int ret;
>   
>   	lockdep_assert_held(&dev_priv->drm.struct_mutex);
>   
> -	if (workload->shadowed)
> +	if (workload->req)
>   		return 0;
>   
> +	/* pin shadow context by gvt even the shadow context will be pinned
> +	 * when i915 alloc request. That is because gvt will update the guest
> +	 * context from shadow context when workload is completed, and at that
> +	 * moment, i915 may already unpined the shadow context to make the
> +	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
> +	 * the guest context, gvt can unpin the shadow_ctx safely.
> +	 */
> +	ce = intel_context_pin(shadow_ctx, engine);
> +	if (IS_ERR(ce)) {
> +		gvt_vgpu_err("fail to pin shadow context\n");
> +		return PTR_ERR(ce);
> +	}
> +
>   	shadow_ctx->desc_template &= ~(0x3 << GEN8_CTX_ADDRESSING_MODE_SHIFT);
>   	shadow_ctx->desc_template |= workload->ctx_desc.addressing_mode <<
>   				    GEN8_CTX_ADDRESSING_MODE_SHIFT;
>   
> -	if (!test_and_set_bit(ring_id, s->shadow_ctx_desc_updated))
> -		shadow_context_descriptor_update(shadow_ctx,
> -					dev_priv->engine[ring_id]);
> +	if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
> +		shadow_context_descriptor_update(ce);
>   
>   	ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
>   	if (ret)
> -		goto err_scan;
> +		goto err_unpin;
>   
>   	if ((workload->ring_id == RCS) &&
>   	    (workload->wa_ctx.indirect_ctx.size != 0)) {
>   		ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
>   		if (ret)
> -			goto err_scan;
> -	}
> -
> -	/* pin shadow context by gvt even the shadow context will be pinned
> -	 * when i915 alloc request. That is because gvt will update the guest
> -	 * context from shadow context when workload is completed, and at that
> -	 * moment, i915 may already unpined the shadow context to make the
> -	 * shadow_ctx pages invalid. So gvt need to pin itself. After update
> -	 * the guest context, gvt can unpin the shadow_ctx safely.
> -	 */
> -	ring = engine->context_pin(engine, shadow_ctx);
> -	if (IS_ERR(ring)) {
> -		ret = PTR_ERR(ring);
> -		gvt_vgpu_err("fail to pin shadow context\n");
> -		goto err_shadow;
> +			goto err_shadow;
>   	}
>   
>   	ret = populate_shadow_context(workload);
>   	if (ret)
> -		goto err_unpin;
> -	workload->shadowed = true;
> +		goto err_shadow;
> +
>   	return 0;
>   
> -err_unpin:
> -	engine->context_unpin(engine, shadow_ctx);
>   err_shadow:
>   	release_shadow_wa_ctx(&workload->wa_ctx);
> -err_scan:
> +err_unpin:
> +	intel_context_unpin(ce);
>   	return ret;
>   }
>   
> @@ -414,7 +403,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
>   {
>   	int ring_id = workload->ring_id;
>   	struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
> -	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
>   	struct i915_request *rq;
>   	struct intel_vgpu *vgpu = workload->vgpu;
>   	struct intel_vgpu_submission *s = &vgpu->submission;
> @@ -437,7 +425,6 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
>   	return 0;
>   
>   err_unpin:
> -	engine->context_unpin(engine, shadow_ctx);
>   	release_shadow_wa_ctx(&workload->wa_ctx);
>   	return ret;
>   }
> @@ -495,21 +482,13 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
>   	return ret;
>   }
>   
> -static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
> +static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
>   {
> -	struct intel_vgpu_workload *workload = container_of(wa_ctx,
> -					struct intel_vgpu_workload,
> -					wa_ctx);
> -	int ring_id = workload->ring_id;
> -	struct intel_vgpu_submission *s = &workload->vgpu->submission;
> -	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
> -	struct drm_i915_gem_object *ctx_obj =
> -		shadow_ctx->engine[ring_id].state->obj;
> -	struct execlist_ring_context *shadow_ring_context;
> -	struct page *page;
> -
> -	page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
> -	shadow_ring_context = kmap_atomic(page);
> +	struct intel_vgpu_workload *workload =
> +		container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
> +	struct i915_request *rq = workload->req;
> +	struct execlist_ring_context *shadow_ring_context =
> +		(struct execlist_ring_context *)rq->hw_context->lrc_reg_state;
>   
>   	shadow_ring_context->bb_per_ctx_ptr.val =
>   		(shadow_ring_context->bb_per_ctx_ptr.val &
> @@ -517,9 +496,6 @@ static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
>   	shadow_ring_context->rcs_indirect_ctx.val =
>   		(shadow_ring_context->rcs_indirect_ctx.val &
>   		(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
> -
> -	kunmap_atomic(shadow_ring_context);
> -	return 0;
>   }
>   
>   static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
> @@ -648,12 +624,9 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
>   static int dispatch_workload(struct intel_vgpu_workload *workload)
>   {
>   	struct intel_vgpu *vgpu = workload->vgpu;
> -	struct intel_vgpu_submission *s = &vgpu->submission;
> -	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
>   	struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
>   	int ring_id = workload->ring_id;
> -	struct intel_engine_cs *engine = dev_priv->engine[ring_id];
> -	int ret = 0;
> +	int ret;
>   
>   	gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
>   		ring_id, workload);
> @@ -665,10 +638,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
>   		goto out;
>   
>   	ret = prepare_workload(workload);
> -	if (ret) {
> -		engine->context_unpin(engine, shadow_ctx);
> -		goto out;
> -	}
>   
>   out:
>   	if (ret)
> @@ -743,27 +712,23 @@ static struct intel_vgpu_workload *pick_next_workload(
>   
>   static void update_guest_context(struct intel_vgpu_workload *workload)
>   {
> +	struct i915_request *rq = workload->req;
>   	struct intel_vgpu *vgpu = workload->vgpu;
>   	struct intel_gvt *gvt = vgpu->gvt;
> -	struct intel_vgpu_submission *s = &vgpu->submission;
> -	struct i915_gem_context *shadow_ctx = s->shadow_ctx;
> -	int ring_id = workload->ring_id;
> -	struct drm_i915_gem_object *ctx_obj =
> -		shadow_ctx->engine[ring_id].state->obj;
> +	struct drm_i915_gem_object *ctx_obj = rq->hw_context->state->obj;
>   	struct execlist_ring_context *shadow_ring_context;
>   	struct page *page;
>   	void *src;
>   	unsigned long context_gpa, context_page_num;
>   	int i;
>   
> -	gvt_dbg_sched("ring id %d workload lrca %x\n", ring_id,
> -			workload->ctx_desc.lrca);
> -
> -	context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
> +	gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
> +		      workload->ctx_desc.lrca);
>   
> +	context_page_num = rq->engine->context_size;
>   	context_page_num = context_page_num >> PAGE_SHIFT;
>   
> -	if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS)
> +	if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS)
>   		context_page_num = 19;
>   
>   	i = 2;
> @@ -836,6 +801,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
>   		scheduler->current_workload[ring_id];
>   	struct intel_vgpu *vgpu = workload->vgpu;
>   	struct intel_vgpu_submission *s = &vgpu->submission;
> +	struct i915_request *rq;
>   	int event;
>   
>   	mutex_lock(&gvt->lock);
> @@ -844,11 +810,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
>   	 * switch to make sure request is completed.
>   	 * For the workload w/o request, directly complete the workload.
>   	 */
> -	if (workload->req) {
> -		struct drm_i915_private *dev_priv =
> -			workload->vgpu->gvt->dev_priv;
> -		struct intel_engine_cs *engine =
> -			dev_priv->engine[workload->ring_id];
> +	rq = fetch_and_zero(&workload->req);
> +	if (rq) {
>   		wait_event(workload->shadow_ctx_status_wq,
>   			   !atomic_read(&workload->shadow_ctx_active));
>   
> @@ -864,8 +827,6 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
>   				workload->status = 0;
>   		}
>   
> -		i915_request_put(fetch_and_zero(&workload->req));
> -
>   		if (!workload->status && !(vgpu->resetting_eng &
>   					   ENGINE_MASK(ring_id))) {
>   			update_guest_context(workload);
> @@ -874,10 +835,13 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
>   					 INTEL_GVT_EVENT_MAX)
>   				intel_vgpu_trigger_virtual_event(vgpu, event);
>   		}
> -		mutex_lock(&dev_priv->drm.struct_mutex);
> +
>   		/* unpin shadow ctx as the shadow_ctx update is done */
> -		engine->context_unpin(engine, s->shadow_ctx);
> -		mutex_unlock(&dev_priv->drm.struct_mutex);
> +		mutex_lock(&rq->i915->drm.struct_mutex);
> +		intel_context_unpin(rq->hw_context);
> +		mutex_unlock(&rq->i915->drm.struct_mutex);
> +
> +		i915_request_put(rq);
>   	}
>   
>   	gvt_dbg_sched("ring id %d complete workload %p status %d\n",
> @@ -1251,7 +1215,6 @@ alloc_workload(struct intel_vgpu *vgpu)
>   	atomic_set(&workload->shadow_ctx_active, 0);
>   
>   	workload->status = -EINPROGRESS;
> -	workload->shadowed = false;
>   	workload->vgpu = vgpu;
>   
>   	return workload;
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
> index 486ed57a4ad1..b0e04017d7a2 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.h
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.h
> @@ -83,7 +83,6 @@ struct intel_vgpu_workload {
>   	struct i915_request *req;
>   	/* if this workload has been dispatched to i915? */
>   	bool dispatched;
> -	bool shadowed;
>   	int status;
>   
>   	struct intel_vgpu_mm *shadow_mm;
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 792f69e44ba5..fd202185d6e5 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -377,16 +377,19 @@ static void print_batch_pool_stats(struct seq_file *m,
>   	print_file_stats(m, "[k]batch pool", stats);
>   }
>   
> -static int per_file_ctx_stats(int id, void *ptr, void *data)
> +static int per_file_ctx_stats(int idx, void *ptr, void *data)
>   {
>   	struct i915_gem_context *ctx = ptr;
> -	int n;
> +	struct intel_engine_cs *engine;
> +	enum intel_engine_id id;
> +
> +	for_each_engine(engine, ctx->i915, id) {
> +		struct intel_context *ce = to_intel_context(ctx, engine);
>   
> -	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
> -		if (ctx->engine[n].state)
> -			per_file_stats(0, ctx->engine[n].state->obj, data);
> -		if (ctx->engine[n].ring)
> -			per_file_stats(0, ctx->engine[n].ring->vma->obj, data);
> +		if (ce->state)
> +			per_file_stats(0, ce->state->obj, data);
> +		if (ce->ring)
> +			per_file_stats(0, ce->ring->vma->obj, data);
>   	}
>   
>   	return 0;
> @@ -1960,7 +1963,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
>   		seq_putc(m, '\n');
>   
>   		for_each_engine(engine, dev_priv, id) {
> -			struct intel_context *ce = &ctx->engine[engine->id];
> +			struct intel_context *ce =
> +				to_intel_context(ctx, engine);
>   
>   			seq_printf(m, "%s: ", engine->name);
>   			if (ce->state)
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 028691108125..8907f71b900a 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1945,6 +1945,7 @@ struct drm_i915_private {
>   			 */
>   			struct i915_perf_stream *exclusive_stream;
>   
> +			struct intel_context *pinned_ctx;
>   			u32 specific_ctx_id;
>   
>   			struct hrtimer poll_check_timer;
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 4dba735505d4..e186e9194fad 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -3227,14 +3227,14 @@ void i915_gem_reset(struct drm_i915_private *dev_priv,
>   	i915_retire_requests(dev_priv);
>   
>   	for_each_engine(engine, dev_priv, id) {
> -		struct i915_gem_context *ctx;
> +		struct intel_context *ce;
>   
>   		i915_gem_reset_engine(engine,
>   				      engine->hangcheck.active_request,
>   				      stalled_mask & ENGINE_MASK(id));
> -		ctx = fetch_and_zero(&engine->last_retired_context);
> -		if (ctx)
> -			engine->context_unpin(engine, ctx);
> +		ce = fetch_and_zero(&engine->last_retired_context);
> +		if (ce)
> +			intel_context_unpin(ce);
>   
>   		/*
>   		 * Ostensibily, we always want a context loaded for powersaving,
> @@ -4948,13 +4948,13 @@ void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
>   
>   static void assert_kernel_context_is_current(struct drm_i915_private *i915)
>   {
> -	struct i915_gem_context *kernel_context = i915->kernel_context;
> +	struct i915_gem_context *kctx = i915->kernel_context;
>   	struct intel_engine_cs *engine;
>   	enum intel_engine_id id;
>   
>   	for_each_engine(engine, i915, id) {
>   		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
> -		GEM_BUG_ON(engine->last_retired_context != kernel_context);
> +		GEM_BUG_ON(engine->last_retired_context->gem_context != kctx);
>   	}
>   }
>   
> @@ -5291,7 +5291,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
>   	for_each_engine(engine, i915, id) {
>   		struct i915_vma *state;
>   
> -		state = ctx->engine[id].state;
> +		state = to_intel_context(ctx, engine)->state;
>   		if (!state)
>   			continue;
>   
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 2fe779cab298..bb68f7381ff2 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -117,24 +117,18 @@ static void lut_close(struct i915_gem_context *ctx)
>   
>   static void i915_gem_context_free(struct i915_gem_context *ctx)
>   {
> -	int i;
> +	unsigned int n;
>   
>   	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
>   	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
>   
>   	i915_ppgtt_put(ctx->ppgtt);
>   
> -	for (i = 0; i < I915_NUM_ENGINES; i++) {
> -		struct intel_context *ce = &ctx->engine[i];
> +	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
> +		struct intel_context *ce = &ctx->__engine[n];
>   
> -		if (!ce->state)
> -			continue;
> -
> -		WARN_ON(ce->pin_count);
> -		if (ce->ring)
> -			intel_ring_free(ce->ring);
> -
> -		__i915_gem_object_release_unless_active(ce->state->obj);
> +		if (ce->ops)
> +			ce->ops->destroy(ce);

"if (ce->ops)" means "has this context ever been used/pinned. I'd put a 
comment to that effect.

>   	}
>   
>   	kfree(ctx->name);
> @@ -266,6 +260,7 @@ __create_hw_context(struct drm_i915_private *dev_priv,
>   		    struct drm_i915_file_private *file_priv)
>   {
>   	struct i915_gem_context *ctx;
> +	unsigned int n;
>   	int ret;
>   
>   	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
> @@ -283,6 +278,12 @@ __create_hw_context(struct drm_i915_private *dev_priv,
>   	ctx->i915 = dev_priv;
>   	ctx->sched.priority = I915_PRIORITY_NORMAL;
>   
> +	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
> +		struct intel_context *ce = &ctx->__engine[n];
> +
> +		ce->gem_context = ctx;
> +	}

Or I'd just set the ops here and call them unconditionally on free, 
letting them do the right thing internally. It would be more symmetrical 
design like that I think.

> +
>   	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
>   	INIT_LIST_HEAD(&ctx->handles_list);
>   
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
> index b12a8a8c5af9..48a11a98e617 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/i915_gem_context.h
> @@ -45,6 +45,11 @@ struct intel_ring;
>   
>   #define DEFAULT_CONTEXT_HANDLE 0
>   
> +struct intel_context_ops {
> +	void (*unpin)(struct intel_context *ce);
> +	void (*destroy)(struct intel_context *ce);
> +};
> +
>   /**
>    * struct i915_gem_context - client state
>    *
> @@ -144,12 +149,15 @@ struct i915_gem_context {
>   
>   	/** engine: per-engine logical HW state */
>   	struct intel_context {
> +		struct i915_gem_context *gem_context;
>   		struct i915_vma *state;
>   		struct intel_ring *ring;
>   		u32 *lrc_reg_state;
>   		u64 lrc_desc;
>   		int pin_count;
> -	} engine[I915_NUM_ENGINES];
> +
> +		const struct intel_context_ops *ops;
> +	} __engine[I915_NUM_ENGINES];
>   
>   	/** ring_size: size for allocating the per-engine ring buffer */
>   	u32 ring_size;
> @@ -256,6 +264,25 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx)
>   	return !ctx->file_priv;
>   }
>   
> +static inline struct intel_context *
> +to_intel_context(struct i915_gem_context *ctx,
> +		 const struct intel_engine_cs *engine)
> +{
> +	return &ctx->__engine[engine->id];
> +}
> +
> +static inline struct intel_context *
> +intel_context_pin(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
> +{
> +	return engine->context_pin(engine, ctx);
> +}
> +
> +static inline void intel_context_unpin(struct intel_context *ce)
> +{
> +	GEM_BUG_ON(!ce->ops);

This would then also go being redundant to pin_count underflow check in 
unpin.

> +	ce->ops->unpin(ce);
> +}
> +
>   /* i915_gem_context.c */
>   int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv);
>   void i915_gem_contexts_lost(struct drm_i915_private *dev_priv);
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 269574b7254c..4801047d4d6a 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -1474,7 +1474,7 @@ static void gem_record_rings(struct i915_gpu_state *error)
>   
>   			ee->ctx =
>   				i915_error_object_create(i915,
> -							 ctx->engine[i].state);
> +							 request->hw_context->state);
>   
>   			error->simulated |=
>   				i915_gem_context_no_error_capture(ctx);
> diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
> index bfc906cd4e5e..11afab651050 100644
> --- a/drivers/gpu/drm/i915/i915_perf.c
> +++ b/drivers/gpu/drm/i915/i915_perf.c
> @@ -1221,7 +1221,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
>   		dev_priv->perf.oa.specific_ctx_id = stream->ctx->hw_id;
>   	} else {
>   		struct intel_engine_cs *engine = dev_priv->engine[RCS];
> -		struct intel_ring *ring;
> +		struct intel_context *ce;
>   		int ret;
>   
>   		ret = i915_mutex_lock_interruptible(&dev_priv->drm);
> @@ -1234,19 +1234,19 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
>   		 *
>   		 * NB: implied RCS engine...
>   		 */
> -		ring = engine->context_pin(engine, stream->ctx);
> +		ce = intel_context_pin(stream->ctx, engine);
>   		mutex_unlock(&dev_priv->drm.struct_mutex);
> -		if (IS_ERR(ring))
> -			return PTR_ERR(ring);
> +		if (IS_ERR(ce))
> +			return PTR_ERR(ce);
>   
> +		dev_priv->perf.oa.pinned_ctx = ce;
>   
>   		/*
>   		 * Explicitly track the ID (instead of calling
>   		 * i915_ggtt_offset() on the fly) considering the difference
>   		 * with gen8+ and execlists
>   		 */
> -		dev_priv->perf.oa.specific_ctx_id =
> -			i915_ggtt_offset(stream->ctx->engine[engine->id].state);
> +		dev_priv->perf.oa.specific_ctx_id = i915_ggtt_offset(ce->state);
>   	}
>   
>   	return 0;
> @@ -1262,17 +1262,14 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
>   static void oa_put_render_ctx_id(struct i915_perf_stream *stream)
>   {
>   	struct drm_i915_private *dev_priv = stream->dev_priv;
> +	struct intel_context *ce;
>   
> -	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
> -		dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
> -	} else {
> -		struct intel_engine_cs *engine = dev_priv->engine[RCS];
> +	dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
>   
> +	ce = fetch_and_zero(&dev_priv->perf.oa.pinned_ctx);
> +	if (ce) {
>   		mutex_lock(&dev_priv->drm.struct_mutex);
> -
> -		dev_priv->perf.oa.specific_ctx_id = INVALID_CTX_ID;
> -		engine->context_unpin(engine, stream->ctx);
> -
> +		intel_context_unpin(ce);
>   		mutex_unlock(&dev_priv->drm.struct_mutex);
>   	}
>   }
> @@ -1759,6 +1756,7 @@ static int gen8_switch_to_updated_kernel_context(struct drm_i915_private *dev_pr
>   static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
>   				       const struct i915_oa_config *oa_config)
>   {
> +	struct intel_engine_cs *engine = dev_priv->engine[RCS];
>   	struct i915_gem_context *ctx;
>   	int ret;
>   	unsigned int wait_flags = I915_WAIT_LOCKED;
> @@ -1789,7 +1787,7 @@ static int gen8_configure_all_contexts(struct drm_i915_private *dev_priv,
>   
>   	/* Update all contexts now that we've stalled the submission. */
>   	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
> -		struct intel_context *ce = &ctx->engine[RCS];
> +		struct intel_context *ce = to_intel_context(ctx, engine);
>   		u32 *regs;
>   
>   		/* OA settings will be set upon first use */
> diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
> index f913e56604ea..da97ce4e18a9 100644
> --- a/drivers/gpu/drm/i915/i915_request.c
> +++ b/drivers/gpu/drm/i915/i915_request.c
> @@ -410,8 +410,8 @@ static void i915_request_retire(struct i915_request *request)
>   	 * the subsequent request.
>   	 */
>   	if (engine->last_retired_context)
> -		engine->context_unpin(engine, engine->last_retired_context);
> -	engine->last_retired_context = request->gem_context;
> +		intel_context_unpin(engine->last_retired_context);
> +	engine->last_retired_context = request->hw_context;
>   
>   	spin_lock_irq(&request->lock);
>   	if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags))
> @@ -613,7 +613,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
>   {
>   	struct drm_i915_private *i915 = engine->i915;
>   	struct i915_request *rq;
> -	struct intel_ring *ring;
> +	struct intel_context *ce;
>   	int ret;
>   
>   	lockdep_assert_held(&i915->drm.struct_mutex);
> @@ -637,16 +637,15 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
>   	 * GGTT space, so do this first before we reserve a seqno for
>   	 * ourselves.
>   	 */
> -	ring = engine->context_pin(engine, ctx);
> -	if (IS_ERR(ring))
> -		return ERR_CAST(ring);
> -	GEM_BUG_ON(!ring);
> +	ce = intel_context_pin(ctx, engine);
> +	if (IS_ERR(ce))
> +		return ERR_CAST(ce);
>   
>   	ret = reserve_engine(engine);
>   	if (ret)
>   		goto err_unpin;
>   
> -	ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
> +	ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
>   	if (ret)
>   		goto err_unreserve;
>   
> @@ -733,7 +732,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
>   	rq->i915 = i915;
>   	rq->engine = engine;
>   	rq->gem_context = ctx;
> -	rq->ring = ring;
> +	rq->hw_context = ce;
> +	rq->ring = ce->ring;
>   
>   	/* No zalloc, must clear what we need by hand */
>   	rq->global_seqno = 0;
> @@ -786,7 +786,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
>   err_unreserve:
>   	unreserve_engine(engine);
>   err_unpin:
> -	engine->context_unpin(engine, ctx);
> +	intel_context_unpin(ce);
>   	return ERR_PTR(ret);
>   }
>   
> @@ -972,7 +972,6 @@ i915_request_await_object(struct i915_request *to,
>   void __i915_request_add(struct i915_request *request, bool flush_caches)
>   {
>   	struct intel_engine_cs *engine = request->engine;
> -	struct intel_ring *ring = request->ring;
>   	struct intel_timeline *timeline = request->timeline;
>   	struct i915_request *prev;
>   	u32 *cs;
> @@ -1048,7 +1047,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
>   	GEM_BUG_ON(timeline->seqno != request->fence.seqno);
>   	i915_gem_active_set(&timeline->last_request, request);
>   
> -	list_add_tail(&request->ring_link, &ring->request_list);
> +	list_add_tail(&request->ring_link, &request->ring->request_list);
>   	request->emitted_jiffies = jiffies;
>   
>   	/*
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 6a029b3f0a88..7ea939f974ea 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -92,8 +92,9 @@ struct i915_request {
>   	 * i915_request_free() will then decrement the refcount on the
>   	 * context.
>   	 */
> -	struct i915_gem_context *gem_context;
>   	struct intel_engine_cs *engine;
> +	struct i915_gem_context *gem_context;
> +	struct intel_context *hw_context;
>   	struct intel_ring *ring;
>   	struct intel_timeline *timeline;
>   	struct intel_signal_node signaling;
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index b26867eed4ca..5070cfae5058 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -665,6 +665,12 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
>   	return 0;
>   }
>   
> +static void __intel_context_unpin(struct i915_gem_context *ctx,
> +				  struct intel_engine_cs *engine)
> +{
> +	intel_context_unpin(to_intel_context(ctx, engine));
> +}
> +
>   /**
>    * intel_engines_init_common - initialize cengine state which might require hw access
>    * @engine: Engine to initialize.
> @@ -678,7 +684,8 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
>    */
>   int intel_engine_init_common(struct intel_engine_cs *engine)
>   {
> -	struct intel_ring *ring;
> +	struct drm_i915_private *i915 = engine->i915;
> +	struct intel_context *ce;
>   	int ret;
>   
>   	engine->set_default_submission(engine);
> @@ -690,19 +697,18 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
>   	 * be available. To avoid this we always pin the default
>   	 * context.
>   	 */
> -	ring = engine->context_pin(engine, engine->i915->kernel_context);
> -	if (IS_ERR(ring))
> -		return PTR_ERR(ring);
> +	ce = intel_context_pin(i915->kernel_context, engine);
> +	if (IS_ERR(ce))
> +		return PTR_ERR(ce);
>   
>   	/*
>   	 * Similarly the preempt context must always be available so that
>   	 * we can interrupt the engine at any time.
>   	 */
> -	if (engine->i915->preempt_context) {
> -		ring = engine->context_pin(engine,
> -					   engine->i915->preempt_context);
> -		if (IS_ERR(ring)) {
> -			ret = PTR_ERR(ring);
> +	if (i915->preempt_context) {
> +		ce = intel_context_pin(i915->preempt_context, engine);
> +		if (IS_ERR(ce)) {
> +			ret = PTR_ERR(ce);
>   			goto err_unpin_kernel;
>   		}
>   	}
> @@ -711,7 +717,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
>   	if (ret)
>   		goto err_unpin_preempt;
>   
> -	if (HWS_NEEDS_PHYSICAL(engine->i915))
> +	if (HWS_NEEDS_PHYSICAL(i915))
>   		ret = init_phys_status_page(engine);
>   	else
>   		ret = init_status_page(engine);
> @@ -723,10 +729,11 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
>   err_breadcrumbs:
>   	intel_engine_fini_breadcrumbs(engine);
>   err_unpin_preempt:
> -	if (engine->i915->preempt_context)
> -		engine->context_unpin(engine, engine->i915->preempt_context);
> +	if (i915->preempt_context)
> +		__intel_context_unpin(i915->preempt_context, engine);
> +
>   err_unpin_kernel:
> -	engine->context_unpin(engine, engine->i915->kernel_context);
> +	__intel_context_unpin(i915->kernel_context, engine);
>   	return ret;
>   }
>   
> @@ -739,6 +746,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
>    */
>   void intel_engine_cleanup_common(struct intel_engine_cs *engine)
>   {
> +	struct drm_i915_private *i915 = engine->i915;
> +
>   	intel_engine_cleanup_scratch(engine);
>   
>   	if (HWS_NEEDS_PHYSICAL(engine->i915))
> @@ -753,9 +762,9 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
>   	if (engine->default_state)
>   		i915_gem_object_put(engine->default_state);
>   
> -	if (engine->i915->preempt_context)
> -		engine->context_unpin(engine, engine->i915->preempt_context);
> -	engine->context_unpin(engine, engine->i915->kernel_context);
> +	if (i915->preempt_context)
> +		__intel_context_unpin(i915->preempt_context, engine);
> +	__intel_context_unpin(i915->kernel_context, engine);
>   }
>   
>   u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
> @@ -997,8 +1006,8 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv)
>    */
>   bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
>   {
> -	const struct i915_gem_context * const kernel_context =
> -		engine->i915->kernel_context;
> +	const struct intel_context *kernel_context =
> +		to_intel_context(engine->i915->kernel_context, engine);
>   	struct i915_request *rq;
>   
>   	lockdep_assert_held(&engine->i915->drm.struct_mutex);
> @@ -1010,7 +1019,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
>   	 */
>   	rq = __i915_gem_active_peek(&engine->timeline->last_request);
>   	if (rq)
> -		return rq->gem_context == kernel_context;
> +		return rq->hw_context == kernel_context;
>   	else
>   		return engine->last_retired_context == kernel_context;
>   }
> @@ -1095,16 +1104,16 @@ void intel_engines_unpark(struct drm_i915_private *i915)
>    */
>   void intel_engine_lost_context(struct intel_engine_cs *engine)
>   {
> -	struct i915_gem_context *ctx;
> +	struct intel_context *ce;
>   
>   	lockdep_assert_held(&engine->i915->drm.struct_mutex);
>   
>   	engine->legacy_active_context = NULL;
>   	engine->legacy_active_ppgtt = NULL;
>   
> -	ctx = fetch_and_zero(&engine->last_retired_context);
> -	if (ctx)
> -		engine->context_unpin(engine, ctx);
> +	ce = fetch_and_zero(&engine->last_retired_context);
> +	if (ce)
> +		intel_context_unpin(ce);
>   }
>   
>   bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
> diff --git a/drivers/gpu/drm/i915/intel_guc_ads.c b/drivers/gpu/drm/i915/intel_guc_ads.c
> index 334cb5202e1c..b8b4d346dd4d 100644
> --- a/drivers/gpu/drm/i915/intel_guc_ads.c
> +++ b/drivers/gpu/drm/i915/intel_guc_ads.c
> @@ -121,7 +121,7 @@ int intel_guc_ads_create(struct intel_guc *guc)
>   	 * to find it. Note that we have to skip our header (1 page),
>   	 * because our GuC shared data is there.
>   	 */
> -	kernel_ctx_vma = dev_priv->kernel_context->engine[RCS].state;
> +	kernel_ctx_vma = dev_priv->kernel_context->__engine[RCS].state;
>   	blob->ads.golden_context_lrca =
>   		intel_guc_ggtt_offset(guc, kernel_ctx_vma) + skipped_offset;
>   
> diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
> index 8527fa1f5c3e..2fe3f9daa56d 100644
> --- a/drivers/gpu/drm/i915/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/intel_guc_submission.c
> @@ -362,7 +362,7 @@ static void guc_stage_desc_init(struct intel_guc *guc,
>   	desc->db_id = client->doorbell_id;
>   
>   	for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
> -		struct intel_context *ce = &ctx->engine[engine->id];
> +		struct intel_context *ce = to_intel_context(ctx, engine);
>   		u32 guc_engine_id = engine->guc_id;
>   		struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
>   
> @@ -511,9 +511,7 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
>   {
>   	struct intel_guc_client *client = guc->execbuf_client;
>   	struct intel_engine_cs *engine = rq->engine;
> -	u32 ctx_desc =
> -		lower_32_bits(intel_lr_context_descriptor(rq->gem_context,
> -							  engine));
> +	u32 ctx_desc = lower_32_bits(rq->hw_context->lrc_desc);
>   	u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
>   
>   	spin_lock(&client->wq_lock);
> @@ -551,8 +549,8 @@ static void inject_preempt_context(struct work_struct *work)
>   					     preempt_work[engine->id]);
>   	struct intel_guc_client *client = guc->preempt_client;
>   	struct guc_stage_desc *stage_desc = __get_stage_desc(client);
> -	u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
> -								 engine));
> +	u32 ctx_desc = lower_32_bits(to_intel_context(client->owner,
> +						      engine)->lrc_desc);
>   	u32 data[7];
>   
>   	/*
> @@ -708,7 +706,7 @@ static void guc_dequeue(struct intel_engine_cs *engine)
>   		struct i915_request *rq, *rn;
>   
>   		list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
> -			if (last && rq->gem_context != last->gem_context) {
> +			if (last && rq->hw_context != last->hw_context) {
>   				if (port == last_port) {
>   					__list_del_many(&p->requests,
>   							&rq->sched.link);
> @@ -991,7 +989,8 @@ static void guc_fill_preempt_context(struct intel_guc *guc)
>   	enum intel_engine_id id;
>   
>   	for_each_engine(engine, dev_priv, id) {
> -		struct intel_context *ce = &client->owner->engine[id];
> +		struct intel_context *ce =
> +			to_intel_context(client->owner, engine);
>   		u32 addr = intel_hws_preempt_done_address(engine);
>   		u32 *cs;
>   
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 0777226e65a6..a3de8522fbc2 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -164,7 +164,8 @@
>   #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
>   
>   static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
> -					    struct intel_engine_cs *engine);
> +					    struct intel_engine_cs *engine,
> +					    struct intel_context *ce);
>   static void execlists_init_reg_state(u32 *reg_state,
>   				     struct i915_gem_context *ctx,
>   				     struct intel_engine_cs *engine,
> @@ -221,9 +222,9 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
>    */
>   static void
>   intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
> -				   struct intel_engine_cs *engine)
> +				   struct intel_engine_cs *engine,
> +				   struct intel_context *ce)
>   {
> -	struct intel_context *ce = &ctx->engine[engine->id];
>   	u64 desc;
>   
>   	BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH)));
> @@ -414,7 +415,7 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
>   
>   static u64 execlists_update_context(struct i915_request *rq)
>   {
> -	struct intel_context *ce = &rq->gem_context->engine[rq->engine->id];
> +	struct intel_context *ce = rq->hw_context;
>   	struct i915_hw_ppgtt *ppgtt =
>   		rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
>   	u32 *reg_state = ce->lrc_reg_state;
> @@ -491,14 +492,14 @@ static void execlists_submit_ports(struct intel_engine_cs *engine)
>   	execlists_clear_active(execlists, EXECLISTS_ACTIVE_HWACK);
>   }
>   
> -static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
> +static bool ctx_single_port_submission(const struct intel_context *ce)
>   {
>   	return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
> -		i915_gem_context_force_single_submission(ctx));
> +		i915_gem_context_force_single_submission(ce->gem_context));
>   }
>   
> -static bool can_merge_ctx(const struct i915_gem_context *prev,
> -			  const struct i915_gem_context *next)
> +static bool can_merge_ctx(const struct intel_context *prev,
> +			  const struct intel_context *next)
>   {
>   	if (prev != next)
>   		return false;
> @@ -523,7 +524,7 @@ static void inject_preempt_context(struct intel_engine_cs *engine)
>   {
>   	struct intel_engine_execlists *execlists = &engine->execlists;
>   	struct intel_context *ce =
> -		&engine->i915->preempt_context->engine[engine->id];
> +		to_intel_context(engine->i915->preempt_context, engine);
>   	unsigned int n;
>   
>   	GEM_BUG_ON(execlists->preempt_complete_status !=
> @@ -666,8 +667,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   			 * second request, and so we never need to tell the
>   			 * hardware about the first.
>   			 */
> -			if (last && !can_merge_ctx(rq->gem_context,
> -						   last->gem_context)) {
> +			if (last &&
> +			    !can_merge_ctx(rq->hw_context, last->hw_context)) {
>   				/*
>   				 * If we are on the second port and cannot
>   				 * combine this request with the last, then we
> @@ -686,14 +687,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   				 * the same context (even though a different
>   				 * request) to the second port.
>   				 */
> -				if (ctx_single_port_submission(last->gem_context) ||
> -				    ctx_single_port_submission(rq->gem_context)) {
> +				if (ctx_single_port_submission(last->hw_context) ||
> +				    ctx_single_port_submission(rq->hw_context)) {
>   					__list_del_many(&p->requests,
>   							&rq->sched.link);
>   					goto done;
>   				}
>   
> -				GEM_BUG_ON(last->gem_context == rq->gem_context);
> +				GEM_BUG_ON(last->hw_context == rq->hw_context);
>   
>   				if (submit)
>   					port_assign(port, last);
> @@ -1277,6 +1278,37 @@ static void execlists_schedule(struct i915_request *request,
>   	spin_unlock_irq(&engine->timeline->lock);
>   }
>   
> +static void execlists_context_destroy(struct intel_context *ce)
> +{
> +	GEM_BUG_ON(!ce->state);
> +	GEM_BUG_ON(ce->pin_count);
> +
> +	intel_ring_free(ce->ring);
> +	__i915_gem_object_release_unless_active(ce->state->obj);
> +}
> +
> +static void __execlists_context_unpin(struct intel_context *ce)
> +{
> +	intel_ring_unpin(ce->ring);
> +
> +	ce->state->obj->pin_global--;
> +	i915_gem_object_unpin_map(ce->state->obj);
> +	i915_vma_unpin(ce->state);
> +
> +	i915_gem_context_put(ce->gem_context);
> +}
> +
> +static void execlists_context_unpin(struct intel_context *ce)
> +{
> +	lockdep_assert_held(&ce->gem_context->i915->drm.struct_mutex);
> +	GEM_BUG_ON(ce->pin_count == 0);
> +
> +	if (--ce->pin_count)
> +		return;
> +
> +	__execlists_context_unpin(ce);
> +}
> +
>   static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
>   {
>   	unsigned int flags;
> @@ -1300,21 +1332,15 @@ static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
>   	return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
>   }
>   
> -static struct intel_ring *
> -execlists_context_pin(struct intel_engine_cs *engine,
> -		      struct i915_gem_context *ctx)
> +static struct intel_context *
> +__execlists_context_pin(struct intel_engine_cs *engine,
> +			struct i915_gem_context *ctx,
> +			struct intel_context *ce)
>   {
> -	struct intel_context *ce = &ctx->engine[engine->id];
>   	void *vaddr;
>   	int ret;
>   
> -	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> -
> -	if (likely(ce->pin_count++))
> -		goto out;
> -	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
> -
> -	ret = execlists_context_deferred_alloc(ctx, engine);
> +	ret = execlists_context_deferred_alloc(ctx, engine, ce);
>   	if (ret)
>   		goto err;
>   	GEM_BUG_ON(!ce->state);
> @@ -1333,7 +1359,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
>   	if (ret)
>   		goto unpin_map;
>   
> -	intel_lr_context_descriptor_update(ctx, engine);
> +	intel_lr_context_descriptor_update(ctx, engine, ce);
>   
>   	ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
>   	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
> @@ -1342,8 +1368,7 @@ execlists_context_pin(struct intel_engine_cs *engine,
>   
>   	ce->state->obj->pin_global++;
>   	i915_gem_context_get(ctx);
> -out:
> -	return ce->ring;
> +	return ce;
>   
>   unpin_map:
>   	i915_gem_object_unpin_map(ce->state->obj);
> @@ -1354,33 +1379,33 @@ execlists_context_pin(struct intel_engine_cs *engine,
>   	return ERR_PTR(ret);
>   }
>   
> -static void execlists_context_unpin(struct intel_engine_cs *engine,
> -				    struct i915_gem_context *ctx)
> +static const struct intel_context_ops execlists_context_ops = {
> +	.unpin = execlists_context_unpin,
> +	.destroy = execlists_context_destroy,
> +};
> +
> +static struct intel_context *
> +execlists_context_pin(struct intel_engine_cs *engine,
> +		      struct i915_gem_context *ctx)
>   {
> -	struct intel_context *ce = &ctx->engine[engine->id];
> +	struct intel_context *ce = to_intel_context(ctx, engine);
>   
>   	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> -	GEM_BUG_ON(ce->pin_count == 0);
> -
> -	if (--ce->pin_count)
> -		return;
>   
> -	intel_ring_unpin(ce->ring);
> +	if (likely(ce->pin_count++))
> +		return ce;
> +	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
>   
> -	ce->state->obj->pin_global--;
> -	i915_gem_object_unpin_map(ce->state->obj);
> -	i915_vma_unpin(ce->state);
> +	ce->ops = &execlists_context_ops;
>   
> -	i915_gem_context_put(ctx);
> +	return __execlists_context_pin(engine, ctx, ce);
>   }
>   
>   static int execlists_request_alloc(struct i915_request *request)
>   {
> -	struct intel_engine_cs *engine = request->engine;
> -	struct intel_context *ce = &request->gem_context->engine[engine->id];
> -	int ret;
> +	int err;

This one.

>   
> -	GEM_BUG_ON(!ce->pin_count);
> +	GEM_BUG_ON(!request->hw_context->pin_count);
>   
>   	/* Flush enough space to reduce the likelihood of waiting after
>   	 * we start building the request - in which case we will just
> @@ -1388,9 +1413,9 @@ static int execlists_request_alloc(struct i915_request *request)
>   	 */
>   	request->reserved_space += EXECLISTS_REQUEST_SIZE;
>   
> -	ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
> -	if (ret)
> -		return ret;
> +	err = intel_ring_wait_for_space(request->ring, request->reserved_space);
> +	if (err)
> +		return err;
>   
>   	/* Note that after this point, we have committed to using
>   	 * this request as it is being used to both track the
> @@ -1780,8 +1805,8 @@ static void reset_common_ring(struct intel_engine_cs *engine,
>   			      struct i915_request *request)
>   {
>   	struct intel_engine_execlists * const execlists = &engine->execlists;
> -	struct intel_context *ce;
>   	unsigned long flags;
> +	u32 *regs;
>   
>   	GEM_TRACE("%s request global=%x, current=%d\n",
>   		  engine->name, request ? request->global_seqno : 0,
> @@ -1831,14 +1856,13 @@ static void reset_common_ring(struct intel_engine_cs *engine,
>   	 * future request will be after userspace has had the opportunity
>   	 * to recreate its own state.
>   	 */
> -	ce = &request->gem_context->engine[engine->id];
> -	execlists_init_reg_state(ce->lrc_reg_state,
> -				 request->gem_context, engine, ce->ring);
> +	regs = request->hw_context->lrc_reg_state;
> +	execlists_init_reg_state(regs,
> +				 request->gem_context, engine, request->ring);
>   
>   	/* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
> -	ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
> -		i915_ggtt_offset(ce->ring->vma);
> -	ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
> +	regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
> +	regs[CTX_RING_HEAD + 1] = request->postfix;
>   
>   	request->ring->head = request->postfix;
>   	intel_ring_update_space(request->ring);
> @@ -2176,8 +2200,6 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>   	engine->reset_hw = reset_common_ring;
>   
>   	engine->context_pin = execlists_context_pin;
> -	engine->context_unpin = execlists_context_unpin;
> -
>   	engine->request_alloc = execlists_request_alloc;
>   
>   	engine->emit_flush = gen8_emit_flush;
> @@ -2272,9 +2294,13 @@ static int logical_ring_init(struct intel_engine_cs *engine)
>   	}
>   
>   	engine->execlists.preempt_complete_status = ~0u;
> -	if (engine->i915->preempt_context)
> +	if (engine->i915->preempt_context) {
> +		struct intel_context *ce =
> +			to_intel_context(engine->i915->preempt_context, engine);
> +
>   		engine->execlists.preempt_complete_status =
> -			upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc);
> +			upper_32_bits(ce->lrc_desc);
> +	}
>   
>   	return 0;
>   
> @@ -2408,7 +2434,7 @@ static void execlists_init_reg_state(u32 *regs,
>   	struct drm_i915_private *dev_priv = engine->i915;
>   	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
>   	u32 base = engine->mmio_base;
> -	bool rcs = engine->id == RCS;
> +	bool rcs = engine->class == RENDER_CLASS;

And this one would made me happier if weren't in this patch. But OK.

>   
>   	/* A context is actually a big batch buffer with several
>   	 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
> @@ -2553,10 +2579,10 @@ populate_lr_context(struct i915_gem_context *ctx,
>   }
>   
>   static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
> -					    struct intel_engine_cs *engine)
> +					    struct intel_engine_cs *engine,
> +					    struct intel_context *ce)
>   {
>   	struct drm_i915_gem_object *ctx_obj;
> -	struct intel_context *ce = &ctx->engine[engine->id];
>   	struct i915_vma *vma;
>   	uint32_t context_size;
>   	struct intel_ring *ring;
> @@ -2627,7 +2653,8 @@ void intel_lr_context_resume(struct drm_i915_private *dev_priv)
>   	 */
>   	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
>   		for_each_engine(engine, dev_priv, id) {
> -			struct intel_context *ce = &ctx->engine[engine->id];
> +			struct intel_context *ce =
> +				to_intel_context(ctx, engine);
>   			u32 *reg;
>   
>   			if (!ce->state)
> diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
> index 59d7b86012e9..1593194e930c 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.h
> +++ b/drivers/gpu/drm/i915/intel_lrc.h
> @@ -104,11 +104,4 @@ struct i915_gem_context;
>   
>   void intel_lr_context_resume(struct drm_i915_private *dev_priv);
>   
> -static inline uint64_t
> -intel_lr_context_descriptor(struct i915_gem_context *ctx,
> -			    struct intel_engine_cs *engine)
> -{
> -	return ctx->engine[engine->id].lrc_desc;
> -}
> -
>   #endif /* _INTEL_LRC_H_ */
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 3ea8eb5d49f5..9f526afa11ed 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -558,8 +558,7 @@ static void reset_ring_common(struct intel_engine_cs *engine,
>   	 */
>   	if (request) {
>   		struct drm_i915_private *dev_priv = request->i915;
> -		struct intel_context *ce =
> -			&request->gem_context->engine[engine->id];
> +		struct intel_context *ce = request->hw_context;
>   		struct i915_hw_ppgtt *ppgtt;
>   
>   		if (ce->state) {
> @@ -1164,9 +1163,33 @@ intel_ring_free(struct intel_ring *ring)
>   	kfree(ring);
>   }
>   
> -static int context_pin(struct i915_gem_context *ctx)
> +static void intel_ring_context_destroy(struct intel_context *ce)
>   {
> -	struct i915_vma *vma = ctx->engine[RCS].state;
> +	GEM_BUG_ON(ce->pin_count);
> +
> +	if (ce->state)
> +		__i915_gem_object_release_unless_active(ce->state->obj);
> +}
> +
> +static void intel_ring_context_unpin(struct intel_context *ce)
> +{
> +	lockdep_assert_held(&ce->gem_context->i915->drm.struct_mutex);
> +	GEM_BUG_ON(ce->pin_count == 0);
> +
> +	if (--ce->pin_count)
> +		return;
> +
> +	if (ce->state) {
> +		ce->state->obj->pin_global--;
> +		i915_vma_unpin(ce->state);
> +	}
> +
> +	i915_gem_context_put(ce->gem_context);
> +}
> +
> +static int __context_pin(struct intel_context *ce)
> +{
> +	struct i915_vma *vma = ce->state;
>   	int ret;
>   
>   	/*
> @@ -1253,25 +1276,19 @@ alloc_context_vma(struct intel_engine_cs *engine)
>   	return ERR_PTR(err);
>   }
>   
> -static struct intel_ring *
> -intel_ring_context_pin(struct intel_engine_cs *engine,
> -		       struct i915_gem_context *ctx)
> +static struct intel_context *
> +__ring_context_pin(struct intel_engine_cs *engine,
> +		   struct i915_gem_context *ctx,
> +		   struct intel_context *ce)
>   {
> -	struct intel_context *ce = &ctx->engine[engine->id];
> -	int ret;
> -
> -	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> -
> -	if (likely(ce->pin_count++))
> -		goto out;
> -	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
> +	int err;
>   
>   	if (!ce->state && engine->context_size) {
>   		struct i915_vma *vma;
>   
>   		vma = alloc_context_vma(engine);
>   		if (IS_ERR(vma)) {
> -			ret = PTR_ERR(vma);
> +			err = PTR_ERR(vma);
>   			goto err;
>   		}
>   
> @@ -1279,8 +1296,8 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
>   	}
>   
>   	if (ce->state) {
> -		ret = context_pin(ctx);
> -		if (ret)
> +		err = __context_pin(ce);
> +		if (err)
>   			goto err;
>   
>   		ce->state->obj->pin_global++;
> @@ -1288,32 +1305,37 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
>   
>   	i915_gem_context_get(ctx);
>   
> -out:
>   	/* One ringbuffer to rule them all */
> -	return engine->buffer;
> +	GEM_BUG_ON(!engine->buffer);
> +	ce->ring = engine->buffer;
> +
> +	return ce;
>   
>   err:
>   	ce->pin_count = 0;
> -	return ERR_PTR(ret);
> +	return ERR_PTR(err);
>   }
>   
> -static void intel_ring_context_unpin(struct intel_engine_cs *engine,
> -				     struct i915_gem_context *ctx)
> +static const struct intel_context_ops ring_context_ops = {
> +	.unpin = intel_ring_context_unpin,
> +	.destroy = intel_ring_context_destroy,
> +};
> +
> +static struct intel_context *
> +intel_ring_context_pin(struct intel_engine_cs *engine,
> +		       struct i915_gem_context *ctx)
>   {
> -	struct intel_context *ce = &ctx->engine[engine->id];
> +	struct intel_context *ce = to_intel_context(ctx, engine);
>   
>   	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
> -	GEM_BUG_ON(ce->pin_count == 0);
>   
> -	if (--ce->pin_count)
> -		return;
> +	if (likely(ce->pin_count++))
> +		return ce;
> +	GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
>   
> -	if (ce->state) {
> -		ce->state->obj->pin_global--;
> -		i915_vma_unpin(ce->state);
> -	}
> +	ce->ops = &ring_context_ops;
>   
> -	i915_gem_context_put(ctx);
> +	return __ring_context_pin(engine, ctx, ce);
>   }
>   
>   static int intel_init_ring_buffer(struct intel_engine_cs *engine)
> @@ -1323,10 +1345,6 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
>   
>   	intel_engine_setup_common(engine);
>   
> -	err = intel_engine_init_common(engine);
> -	if (err)
> -		goto err;
> -
>   	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
>   	if (IS_ERR(ring)) {
>   		err = PTR_ERR(ring);
> @@ -1341,8 +1359,14 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
>   	GEM_BUG_ON(engine->buffer);
>   	engine->buffer = ring;
>   
> +	err = intel_engine_init_common(engine);
> +	if (err)
> +		goto err_unpin;
> +
>   	return 0;
>   
> +err_unpin:
> +	intel_ring_unpin(ring);
>   err_ring:
>   	intel_ring_free(ring);
>   err:
> @@ -1428,7 +1452,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
>   
>   	*cs++ = MI_NOOP;
>   	*cs++ = MI_SET_CONTEXT;
> -	*cs++ = i915_ggtt_offset(rq->gem_context->engine[RCS].state) | flags;
> +	*cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
>   	/*
>   	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
>   	 * WaMiSetContext_Hang:snb,ivb,vlv
> @@ -1519,7 +1543,7 @@ static int switch_context(struct i915_request *rq)
>   		hw_flags = MI_FORCE_RESTORE;
>   	}
>   
> -	if (to_ctx->engine[engine->id].state &&
> +	if (rq->hw_context->state &&
>   	    (to_ctx != from_ctx || hw_flags & MI_FORCE_RESTORE)) {
>   		GEM_BUG_ON(engine->id != RCS);
>   
> @@ -1567,7 +1591,7 @@ static int ring_request_alloc(struct i915_request *request)
>   {
>   	int ret;
>   
> -	GEM_BUG_ON(!request->gem_context->engine[request->engine->id].pin_count);
> +	GEM_BUG_ON(!request->hw_context->pin_count);
>   
>   	/* Flush enough space to reduce the likelihood of waiting after
>   	 * we start building the request - in which case we will just
> @@ -1994,8 +2018,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
>   	engine->reset_hw = reset_ring_common;
>   
>   	engine->context_pin = intel_ring_context_pin;
> -	engine->context_unpin = intel_ring_context_unpin;
> -
>   	engine->request_alloc = ring_request_alloc;
>   
>   	engine->emit_breadcrumb = i9xx_emit_breadcrumb;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 584b7f246374..10713ce3de2a 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -427,10 +427,9 @@ struct intel_engine_cs {
>   
>   	void		(*set_default_submission)(struct intel_engine_cs *engine);
>   
> -	struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
> -					  struct i915_gem_context *ctx);
> -	void		(*context_unpin)(struct intel_engine_cs *engine,
> -					 struct i915_gem_context *ctx);
> +	struct intel_context *(*context_pin)(struct intel_engine_cs *engine,
> +					     struct i915_gem_context *ctx);
> +
>   	int		(*request_alloc)(struct i915_request *rq);
>   	int		(*init_context)(struct i915_request *rq);
>   
> @@ -546,7 +545,7 @@ struct intel_engine_cs {
>   	 * to the kernel context and trash it as the save may not happen
>   	 * before the hardware is powered down.
>   	 */
> -	struct i915_gem_context *last_retired_context;
> +	struct intel_context *last_retired_context;
>   
>   	/* We track the current MI_SET_CONTEXT in order to eliminate
>   	 * redudant context switches. This presumes that requests are not
> diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
> index 501becc47c0c..8904f1ce64e3 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_context.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_context.c
> @@ -30,6 +30,7 @@ mock_context(struct drm_i915_private *i915,
>   	     const char *name)
>   {
>   	struct i915_gem_context *ctx;
> +	unsigned int n;
>   	int ret;
>   
>   	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
> @@ -43,6 +44,12 @@ mock_context(struct drm_i915_private *i915,
>   	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
>   	INIT_LIST_HEAD(&ctx->handles_list);
>   
> +	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++) {
> +		struct intel_context *ce = &ctx->__engine[n];
> +
> +		ce->gem_context = ctx;
> +	}
> +
>   	ret = ida_simple_get(&i915->contexts.hw_ida,
>   			     0, MAX_CONTEXT_HW_ID, GFP_KERNEL);
>   	if (ret < 0)
> diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
> index 78a89efa1119..fecfe061d693 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_engine.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
> @@ -67,18 +67,37 @@ static void hw_delay_complete(struct timer_list *t)
>   	spin_unlock(&engine->hw_lock);
>   }
>   
> -static struct intel_ring *
> -mock_context_pin(struct intel_engine_cs *engine,
> -		 struct i915_gem_context *ctx)
> +static void mock_context_unpin(struct intel_context *ce)
> +{
> +	if (--ce->pin_count)
> +		return;
> +
> +	i915_gem_context_put(ce->gem_context);
> +}
> +
> +static void mock_context_destroy(struct intel_context *ce)
>   {
> -	i915_gem_context_get(ctx);
> -	return engine->buffer;
> +	GEM_BUG_ON(ce->pin_count);
>   }
>   
> -static void mock_context_unpin(struct intel_engine_cs *engine,
> -			       struct i915_gem_context *ctx)
> +static const struct intel_context_ops mock_context_ops = {
> +	.unpin = mock_context_unpin,
> +	.destroy = mock_context_destroy,
> +};
> +
> +static struct intel_context *
> +mock_context_pin(struct intel_engine_cs *engine,
> +		 struct i915_gem_context *ctx)
>   {
> -	i915_gem_context_put(ctx);
> +	struct intel_context *ce = to_intel_context(ctx, engine);
> +
> +	if (!ce->pin_count++) {
> +		i915_gem_context_get(ctx);
> +		ce->ring = engine->buffer;
> +		ce->ops = &mock_context_ops;
> +	}
> +
> +	return ce;
>   }
>   
>   static int mock_request_alloc(struct i915_request *request)
> @@ -168,7 +187,6 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
>   	engine->base.status_page.page_addr = (void *)(engine + 1);
>   
>   	engine->base.context_pin = mock_context_pin;
> -	engine->base.context_unpin = mock_context_unpin;
>   	engine->base.request_alloc = mock_request_alloc;
>   	engine->base.emit_flush = mock_emit_flush;
>   	engine->base.emit_breadcrumb = mock_emit_breadcrumb;
> @@ -213,11 +231,13 @@ void mock_engine_free(struct intel_engine_cs *engine)
>   {
>   	struct mock_engine *mock =
>   		container_of(engine, typeof(*mock), base);
> +	struct intel_context *ce;
>   
>   	GEM_BUG_ON(timer_pending(&mock->hw_delay));
>   
> -	if (engine->last_retired_context)
> -		engine->context_unpin(engine, engine->last_retired_context);
> +	ce = fetch_and_zero(&engine->last_retired_context);
> +	if (ce)
> +		intel_context_unpin(ce);
>   
>   	intel_engine_fini_breadcrumbs(engine);
>   
> 

Looks OK and I can give r-b for the non-GVT parts being 6/10 happy.

5/10 would be meh, so six because there are aspects which make the 
design cleaner. But not fully clean so only six.

I'd be 7/10 happy if unrelated cleanups were left out - at least two of 
them I can remember. And 8/10 if ringbuffer init ordering fix was a 
separate patch. More symmetrical constructors/destructors would be 9/10. 
And 10/10 would be if there weren't so many current streams of work this 
will conflict with.

Hope this makes my position clear. :)))

Regards,

Tvrtko








_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [v2,1/3] drm/i915: Move request->ctx aside
  2018-04-19 11:58 [PATCH v2 1/3] drm/i915: Move request->ctx aside Chris Wilson
                   ` (2 preceding siblings ...)
  2018-04-19 12:20 ` ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/3] drm/i915: Move request->ctx aside Patchwork
@ 2018-04-19 12:32 ` Patchwork
  2018-04-19 16:49 ` ✗ Fi.CI.IGT: failure " Patchwork
  4 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2018-04-19 12:32 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/3] drm/i915: Move request->ctx aside
URL   : https://patchwork.freedesktop.org/series/41964/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_4068 -> Patchwork_8752 =

== Summary - SUCCESS ==

  No regressions found.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/41964/revisions/1/mbox/

== Known issues ==

  Here are the changes found in Patchwork_8752 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@kms_chamelium@hdmi-hpd-fast:
      fi-kbl-7500u:       SKIP -> FAIL (fdo#103841, fdo#102672)

    
    ==== Possible fixes ====

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b:
      fi-ivb-3520m:       DMESG-WARN (fdo#106084) -> PASS

    
  fdo#102672 https://bugs.freedesktop.org/show_bug.cgi?id=102672
  fdo#103841 https://bugs.freedesktop.org/show_bug.cgi?id=103841
  fdo#106084 https://bugs.freedesktop.org/show_bug.cgi?id=106084


== Participating hosts (34 -> 31) ==

  Missing    (3): fi-ctg-p8600 fi-ilk-m540 fi-skl-6700hq 


== Build changes ==

    * Linux: CI_DRM_4068 -> Patchwork_8752

  CI_DRM_4068: 28fecc12e5c2b1beb9ab89e3616266d5d5e58e3d @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4441: 83ba5b7d3bde48b383df41792fc9c955a5a23bdb @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_8752: 85da0887e43c97fe443c440052e124aecbcb704a @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4441: e60d247eb359f044caf0c09904da14e39d7adca1 @ git://anongit.freedesktop.org/piglit


== Linux commits ==

85da0887e43c drm/i915: Store a pointer to intel_context in i915_request
a7b57f6eb191 drm/i915: Move fiddling with engine->last_retired_context
d2cce053130b drm/i915: Move request->ctx aside

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8752/issues.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 3/3] drm/i915: Store a pointer to intel_context in i915_request
  2018-04-19 12:29   ` Tvrtko Ursulin
@ 2018-04-19 12:35     ` Chris Wilson
  0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2018-04-19 12:35 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2018-04-19 13:29:42)
> 
> On 19/04/2018 12:58, Chris Wilson wrote:
> > To ease the frequent and ugly pointer dance of
> > &request->gem_context->engine[request->engine->id] during request
> > submission, store that pointer as request->hw_context. One major
> > advantage that we will exploit later is that this decouples the logical
> > context state from the engine itself.
> > 
> > v2: Set mock_context->ops so we don't crash and burn in selftests.
> >      Cleanups from Tvrtko.
> 
> Which ones? I have to re-read it all? :I

All but fixing engine->class and the semantics differences between
hw_context and gem_context. Other than that I'm seeing if the gvt guys
even read the ml ;)
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

* ✗ Fi.CI.IGT: failure for series starting with [v2,1/3] drm/i915: Move request->ctx aside
  2018-04-19 11:58 [PATCH v2 1/3] drm/i915: Move request->ctx aside Chris Wilson
                   ` (3 preceding siblings ...)
  2018-04-19 12:32 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2018-04-19 16:49 ` Patchwork
  4 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2018-04-19 16:49 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [v2,1/3] drm/i915: Move request->ctx aside
URL   : https://patchwork.freedesktop.org/series/41964/
State : failure

== Summary ==

= CI Bug Log - changes from CI_DRM_4068_full -> Patchwork_8752_full =

== Summary - FAILURE ==

  Serious unknown changes coming with Patchwork_8752_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_8752_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/41964/revisions/1/mbox/

== Possible new issues ==

  Here are the unknown changes that may have been introduced in Patchwork_8752_full:

  === IGT changes ===

    ==== Possible regressions ====

    igt@gem_eio@wait-1us:
      shard-glk:          PASS -> FAIL

    
    ==== Warnings ====

    igt@gem_mmap_wc@set-cache-level:
      shard-glk:          PASS -> SKIP +95

    igt@gem_mocs_settings@mocs-rc6-dirty-render:
      shard-kbl:          SKIP -> PASS +2

    igt@gem_mocs_settings@mocs-rc6-vebox:
      shard-kbl:          PASS -> SKIP +2

    igt@kms_mmap_write_crc:
      shard-glk:          SKIP -> PASS +94

    
== Known issues ==

  Here are the changes found in Patchwork_8752_full that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@kms_cursor_legacy@flip-vs-cursor-legacy:
      shard-hsw:          PASS -> FAIL (fdo#102670)

    igt@kms_flip@flip-vs-modeset-vs-hang-interruptible:
      shard-kbl:          PASS -> DMESG-WARN (fdo#105602, fdo#103558, fdo#103313) +1

    igt@kms_flip@modeset-vs-vblank-race-interruptible:
      shard-glk:          PASS -> FAIL (fdo#103060)

    igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-pgflip-blt:
      shard-kbl:          PASS -> DMESG-WARN (fdo#105602, fdo#103558) +20

    
    ==== Possible fixes ====

    igt@drv_suspend@forcewake:
      shard-kbl:          INCOMPLETE (fdo#103665) -> PASS

    igt@kms_flip@2x-wf_vblank-ts-check:
      shard-hsw:          FAIL (fdo#100368) -> PASS +1

    igt@kms_flip@absolute-wf_vblank-interruptible:
      shard-glk:          FAIL (fdo#106087) -> PASS

    igt@kms_flip@flip-vs-absolute-wf_vblank-interruptible:
      shard-glk:          FAIL (fdo#100368) -> SKIP

    igt@kms_hdmi_inject@inject-audio:
      shard-glk:          FAIL (fdo#102370) -> PASS

    igt@kms_setmode@basic:
      shard-glk:          FAIL (fdo#99912) -> PASS

    
  fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
  fdo#102370 https://bugs.freedesktop.org/show_bug.cgi?id=102370
  fdo#102670 https://bugs.freedesktop.org/show_bug.cgi?id=102670
  fdo#103060 https://bugs.freedesktop.org/show_bug.cgi?id=103060
  fdo#103313 https://bugs.freedesktop.org/show_bug.cgi?id=103313
  fdo#103558 https://bugs.freedesktop.org/show_bug.cgi?id=103558
  fdo#103665 https://bugs.freedesktop.org/show_bug.cgi?id=103665
  fdo#105602 https://bugs.freedesktop.org/show_bug.cgi?id=105602
  fdo#106087 https://bugs.freedesktop.org/show_bug.cgi?id=106087
  fdo#99912 https://bugs.freedesktop.org/show_bug.cgi?id=99912


== Participating hosts (6 -> 5) ==

  Missing    (1): shard-glkb 


== Build changes ==

    * Linux: CI_DRM_4068 -> Patchwork_8752

  CI_DRM_4068: 28fecc12e5c2b1beb9ab89e3616266d5d5e58e3d @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_4441: 83ba5b7d3bde48b383df41792fc9c955a5a23bdb @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_8752: 85da0887e43c97fe443c440052e124aecbcb704a @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4441: e60d247eb359f044caf0c09904da14e39d7adca1 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_8752/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2018-04-19 16:49 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-04-19 11:58 [PATCH v2 1/3] drm/i915: Move request->ctx aside Chris Wilson
2018-04-19 11:58 ` [PATCH v2 2/3] drm/i915: Move fiddling with engine->last_retired_context Chris Wilson
2018-04-19 11:58 ` [PATCH v2 3/3] drm/i915: Store a pointer to intel_context in i915_request Chris Wilson
2018-04-19 12:29   ` Tvrtko Ursulin
2018-04-19 12:35     ` Chris Wilson
2018-04-19 12:20 ` ✗ Fi.CI.SPARSE: warning for series starting with [v2,1/3] drm/i915: Move request->ctx aside Patchwork
2018-04-19 12:32 ` ✓ Fi.CI.BAT: success " Patchwork
2018-04-19 16:49 ` ✗ Fi.CI.IGT: failure " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.