All of lore.kernel.org
 help / color / mirror / Atom feed
* Unify request construction
@ 2016-07-20 13:11 Chris Wilson
  2016-07-20 13:11 ` [PATCH 01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
                   ` (21 more replies)
  0 siblings, 22 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:11 UTC (permalink / raw)
  To: intel-gfx

This deduplicates a lot of code because I only want to fix the bugs
within it once.
-Chris

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* [PATCH 01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
  2016-07-20 13:11 Unify request construction Chris Wilson
@ 2016-07-20 13:11 ` Chris Wilson
  2016-07-21 11:26   ` Joonas Lahtinen
  2016-07-20 13:11 ` [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
                   ` (20 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:11 UTC (permalink / raw)
  To: intel-gfx

Both perform the same actions with more or less indirection, so just
unify the code.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  54 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  53 ++--
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  62 ++---
 drivers/gpu/drm/i915/intel_display.c       |  80 +++---
 drivers/gpu/drm/i915/intel_lrc.c           | 184 +++++++-------
 drivers/gpu/drm/i915/intel_lrc.h           |  26 --
 drivers/gpu/drm/i915/intel_mocs.c          |  38 ++-
 drivers/gpu/drm/i915/intel_overlay.c       |  50 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 387 +++++++++++++++--------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  25 +-
 10 files changed, 463 insertions(+), 496 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index bd13d084e19c..b6d10bd763a0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -552,7 +552,7 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -567,7 +567,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(dev_priv)) {
-		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
+		ret = req->engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
@@ -589,64 +589,64 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(engine,
+			intel_ring_emit(ring,
 					MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_engine(signaller, dev_priv) {
-				if (signaller == engine)
+				if (signaller == req->engine)
 					continue;
 
-				intel_ring_emit_reg(engine,
+				intel_ring_emit_reg(ring,
 						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(engine,
+				intel_ring_emit(ring,
 						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
 		}
 	}
 
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_emit(engine, MI_SET_CONTEXT);
-	intel_ring_emit(engine,
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_SET_CONTEXT);
+	intel_ring_emit(ring,
 			i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
 			flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(engine,
+			intel_ring_emit(ring,
 					MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_engine(signaller, dev_priv) {
-				if (signaller == engine)
+				if (signaller == req->engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(engine, last_reg);
-				intel_ring_emit(engine,
+				intel_ring_emit_reg(ring, last_reg);
+				intel_ring_emit(ring,
 						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(engine,
+			intel_ring_emit(ring,
 					MI_STORE_REGISTER_MEM |
 					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(engine, last_reg);
-			intel_ring_emit(engine, engine->scratch.gtt_offset);
-			intel_ring_emit(engine, MI_NOOP);
+			intel_ring_emit_reg(ring, last_reg);
+			intel_ring_emit(ring, req->engine->scratch.gtt_offset);
+			intel_ring_emit(ring, MI_NOOP);
 		}
-		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 	}
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return ret;
 }
@@ -654,7 +654,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
 	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int i, ret;
 
 	if (!remap_info)
@@ -669,13 +669,13 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice)
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
-		intel_ring_emit(engine, remap_info[i]);
+		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
+		intel_ring_emit(ring, remap_info[i]);
 	}
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6482ec24ff3b..e2c4d99a1e7f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1171,14 +1171,12 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 }
 
 static int
-i915_reset_gen7_sol_offsets(struct drm_device *dev,
-			    struct drm_i915_gem_request *req)
+i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret, i;
 
-	if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
+	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
@@ -1188,12 +1186,12 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
 		return ret;
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(engine, 0);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1256,9 +1254,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas)
 {
-	struct drm_device *dev = params->dev;
-	struct intel_engine_cs *engine = params->engine;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
 	u32 instp_mask;
@@ -1272,34 +1268,31 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	if (ret)
 		return ret;
 
-	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
-	     "%s didn't clear reload\n", engine->name);
-
 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
 	instp_mask = I915_EXEC_CONSTANTS_MASK;
 	switch (instp_mode) {
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
+		if (instp_mode != 0 && params->engine->id != RCS) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
 
 		if (instp_mode != dev_priv->relative_constants_mode) {
-			if (INTEL_INFO(dev)->gen < 4) {
+			if (INTEL_INFO(dev_priv)->gen < 4) {
 				DRM_DEBUG("no rel constants on pre-gen4\n");
 				return -EINVAL;
 			}
 
-			if (INTEL_INFO(dev)->gen > 5 &&
+			if (INTEL_INFO(dev_priv)->gen > 5 &&
 			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
 				return -EINVAL;
 			}
 
 			/* The HW changed the meaning on this bit on gen6 */
-			if (INTEL_INFO(dev)->gen >= 6)
+			if (INTEL_INFO(dev_priv)->gen >= 6)
 				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
 		}
 		break;
@@ -1308,23 +1301,25 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 		return -EINVAL;
 	}
 
-	if (engine == &dev_priv->engine[RCS] &&
+	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
+		struct intel_ringbuffer *ring = params->request->ringbuf;
+
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(engine, INSTPM);
-		intel_ring_emit(engine, instp_mask << 16 | instp_mode);
-		intel_ring_advance(engine);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, INSTPM);
+		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ring);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
 
 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, params->request);
+		ret = i915_reset_gen7_sol_offsets(params->request);
 		if (ret)
 			return ret;
 	}
@@ -1336,9 +1331,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	if (exec_len == 0)
 		exec_len = params->batch_obj->base.size;
 
-	ret = engine->dispatch_execbuffer(params->request,
-					exec_start, exec_len,
-					params->dispatch_flags);
+	ret = params->engine->dispatch_execbuffer(params->request,
+						  exec_start, exec_len,
+						  params->dispatch_flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 30da543e1bdf..abc439be2049 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	BUG_ON(entry >= 4);
@@ -678,13 +678,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(engine, upper_32_bits(addr));
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(engine, lower_32_bits(addr));
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(req->engine, entry));
+	intel_ring_emit(ring, upper_32_bits(addr));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(req->engine, entry));
+	intel_ring_emit(ring, lower_32_bits(addr));
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1660,11 +1660,13 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = req->engine->flush(req,
+				 I915_GEM_GPU_DOMAINS,
+				 I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1672,13 +1674,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(engine, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(engine, get_pd_offset(ppgtt));
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(req->engine));
+	intel_ring_emit(ring, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(req->engine));
+	intel_ring_emit(ring, get_pd_offset(ppgtt));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1686,11 +1688,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = req->engine->flush(req,
+				 I915_GEM_GPU_DOMAINS,
+				 I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1698,17 +1702,19 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(engine, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(engine, get_pd_offset(ppgtt));
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(req->engine));
+	intel_ring_emit(ring, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(req->engine));
+	intel_ring_emit(ring, get_pd_offset(ppgtt));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
-	if (engine->id != RCS) {
-		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	if (req->engine->id != RCS) {
+		ret = req->engine->flush(req,
+					 I915_GEM_GPU_DOMAINS,
+					 I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 78beb7e9d384..d18ed32e6a31 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11123,7 +11123,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11139,13 +11139,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_emit(engine, MI_DISPLAY_FLIP |
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0]);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(engine, 0); /* aux display base address, unused */
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, 0); /* aux display base address, unused */
 
 	return 0;
 }
@@ -11157,7 +11157,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11170,13 +11170,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0]);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, MI_NOOP);
 
 	return 0;
 }
@@ -11188,7 +11188,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11202,10 +11202,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(engine, MI_DISPLAY_FLIP |
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0]);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
 			obj->tiling_mode);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
@@ -11214,7 +11214,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(engine, pf | pipesrc);
+	intel_ring_emit(ring, pf | pipesrc);
 
 	return 0;
 }
@@ -11226,7 +11226,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11236,10 +11236,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_DISPLAY_FLIP |
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -11249,7 +11249,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(engine, pf | pipesrc);
+	intel_ring_emit(ring, pf | pipesrc);
 
 	return 0;
 }
@@ -11261,7 +11261,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
@@ -11282,7 +11282,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	}
 
 	len = 4;
-	if (engine->id == RCS) {
+	if (req->engine->id == RCS) {
 		len += 6;
 		/*
 		 * On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11320,30 +11320,30 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
-	if (engine->id == RCS) {
-		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(engine, DERRMR);
-		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+	if (req->engine->id == RCS) {
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, DERRMR);
+		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
 					  DERRMR_PIPEB_PRI_FLIP_DONE |
 					  DERRMR_PIPEC_PRI_FLIP_DONE));
 		if (IS_GEN8(dev))
-			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
+			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		else
-			intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
+			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
 					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(engine, DERRMR);
-		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
+		intel_ring_emit_reg(ring, DERRMR);
+		intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256);
 		if (IS_GEN8(dev)) {
-			intel_ring_emit(engine, 0);
-			intel_ring_emit(engine, MI_NOOP);
+			intel_ring_emit(ring, 0);
+			intel_ring_emit(ring, MI_NOOP);
 		}
 	}
 
-	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(engine, (MI_NOOP));
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, (MI_NOOP));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 439aeab807b1..8bf2ea5a2de3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -773,7 +773,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *engine = request->engine;
 
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_advance(ringbuf);
 	request->tail = ringbuf->tail;
 
 	/*
@@ -782,9 +782,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	 *
 	 * Caller must reserve WA_TAIL_DWORDS for us!
 	 */
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	/* We keep the previous context alive until we retire the following
 	 * request. This ensures that any the context object is still pinned
@@ -868,11 +868,11 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 		if (ret)
 			return ret;
 
-		intel_logical_ring_emit(ringbuf, MI_NOOP);
-		intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-		intel_logical_ring_emit_reg(ringbuf, INSTPM);
-		intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-		intel_logical_ring_advance(ringbuf);
+		intel_ring_emit(ringbuf, MI_NOOP);
+		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ringbuf, INSTPM);
+		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ringbuf);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
@@ -1045,14 +1045,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
-		intel_logical_ring_emit(ringbuf, w->reg[i].value);
+		intel_ring_emit_reg(ringbuf, w->reg[i].addr);
+		intel_ring_emit(ringbuf, w->reg[i].value);
 	}
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ringbuf, MI_NOOP);
 
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_advance(ringbuf);
 
 	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
@@ -1546,8 +1546,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
 
@@ -1555,20 +1554,18 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_logical_ring_emit_reg(ringbuf,
-					    GEN8_RING_PDP_UDW(engine, i));
-		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-		intel_logical_ring_emit_reg(ringbuf,
-					    GEN8_RING_PDP_LDW(engine, i));
-		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(req->engine, i));
+		intel_ring_emit(ring, upper_32_bits(pd_daddr));
+		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(req->engine, i));
+		intel_ring_emit(ring, lower_32_bits(pd_daddr));
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1576,7 +1573,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -1603,14 +1600,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
-				(ppgtt<<8) |
-				(dispatch_flags & I915_DISPATCH_RS ?
-				 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
-	intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
+			(ppgtt<<8) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1633,9 +1630,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *engine = ringbuf->engine;
-	struct drm_i915_private *dev_priv = request->i915;
+	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_engine_cs *engine = ring->engine;
 	uint32_t cmd;
 	int ret;
 
@@ -1654,17 +1650,17 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 
 	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (engine == &dev_priv->engine[VCS])
+		if (engine->id == VCS)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_logical_ring_emit(ringbuf, cmd);
-	intel_logical_ring_emit(ringbuf,
-				I915_GEM_HWS_SCRATCH_ADDR |
-				MI_FLUSH_DW_USE_GTT);
-	intel_logical_ring_emit(ringbuf, 0); /* upper addr */
-	intel_logical_ring_emit(ringbuf, 0); /* value */
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring,
+			I915_GEM_HWS_SCRATCH_ADDR |
+			MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, 0); /* upper addr */
+	intel_ring_emit(ring, 0); /* value */
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1673,8 +1669,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *engine = ringbuf->engine;
+	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
 	u32 flags = 0;
@@ -1725,40 +1721,40 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 		return ret;
 
 	if (vf_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
 	if (dc_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-	intel_logical_ring_emit(ringbuf, flags);
-	intel_logical_ring_emit(ringbuf, scratch_addr);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, 0);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
 
 	if (dc_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1787,7 +1783,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1797,21 +1793,20 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-	intel_logical_ring_emit(ringbuf,
-				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
-	intel_logical_ring_emit(ringbuf,
-				intel_hws_seqno_address(request->engine) |
-				MI_FLUSH_DW_USE_GTT);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, request->fence.seqno);
-	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+	intel_ring_emit(ring,
+			intel_hws_seqno_address(request->engine) |
+			MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, request->fence.seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
 	return intel_logical_ring_advance_and_submit(request);
 }
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
@@ -1825,19 +1820,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 	 * need a prior CS_STALL, which is emitted by the flush
 	 * following the batch.
 	 */
-	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-	intel_logical_ring_emit(ringbuf,
-				(PIPE_CONTROL_GLOBAL_GTT_IVB |
-				 PIPE_CONTROL_CS_STALL |
-				 PIPE_CONTROL_QW_WRITE));
-	intel_logical_ring_emit(ringbuf,
-				intel_hws_seqno_address(request->engine));
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring,
+			(PIPE_CONTROL_GLOBAL_GTT_IVB |
+			 PIPE_CONTROL_CS_STALL |
+			 PIPE_CONTROL_QW_WRITE));
+	intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(request));
 	/* We're thrashing one dword of HWS. */
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
 	return intel_logical_ring_advance_and_submit(request);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index aa3ac023aa73..9d58cd332bc3 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -73,32 +73,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
 int intel_engines_init(struct drm_device *dev);
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-/**
- * intel_logical_ring_advance() - advance the ringbuffer tail
- * @ringbuf: Ringbuffer to advance.
- *
- * The tail is only updated in our logical ringbuffer struct.
- */
-static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
-{
-	__intel_ringbuffer_advance(ringbuf);
-}
-
-/**
- * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
- * @ringbuf: Ringbuffer to write to.
- * @data: DWORD to write.
- */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
-					   u32 data)
-{
-	__intel_ringbuffer_emit(ringbuf, data);
-}
-static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
-					       i915_reg_t reg)
-{
-	intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
-}
 
 /* Logical Ring Contexts */
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 2280c329d37f..2743424f2746 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -288,14 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf,
-				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
 	for (index = 0; index < table->size; index++) {
-		intel_logical_ring_emit_reg(ringbuf,
-					    mocs_register(engine, index));
-		intel_logical_ring_emit(ringbuf,
-					table->table[index].control_value);
+		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
+		intel_ring_emit(ringbuf, table->table[index].control_value);
 	}
 
 	/*
@@ -307,14 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_logical_ring_emit_reg(ringbuf,
-					    mocs_register(engine, index));
-		intel_logical_ring_emit(ringbuf,
-					table->table[0].control_value);
+		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
+		intel_ring_emit(ringbuf, table->table[0].control_value);
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
@@ -352,19 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf,
+	intel_ring_emit(ringbuf,
 			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf,
-					l3cc_combine(table, 2*i, 2*i+1));
+		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1));
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
 		i++;
 	}
 
@@ -374,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
+		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 8654a323722e..92722e614955 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -235,6 +235,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
+	struct intel_ringbuffer *ring;
 	int ret;
 
 	WARN_ON(overlay->active);
@@ -252,11 +253,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	overlay->active = true;
 
-	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	ring = req->ringbuf;
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -268,6 +270,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
+	struct intel_ringbuffer *ring;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
 	int ret;
@@ -292,9 +295,10 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 		return ret;
 	}
 
-	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(engine, flip_addr);
-	intel_ring_advance(engine);
+	ring = req->ringbuf;
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_advance(ring);
 
 	WARN_ON(overlay->last_flip_req);
 	i915_gem_request_assign(&overlay->last_flip_req, req);
@@ -336,6 +340,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
+	struct intel_ringbuffer *ring;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
 
@@ -357,24 +362,25 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 		return ret;
 	}
 
+	ring = req->ringbuf;
 	/* wait for overlay to go idle */
-	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(engine, flip_addr);
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
 	if (IS_I830(dev_priv)) {
 		/* Workaround: Don't disable the overlay fully, since otherwise
 		 * it dies on the next OVERLAY_ON cmd. */
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
 	} else {
-		intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-		intel_ring_emit(engine, flip_addr);
-		intel_ring_emit(engine,
+		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(ring, flip_addr);
+		intel_ring_emit(ring,
 				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	}
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
@@ -420,6 +426,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
+		struct intel_ringbuffer *ring;
 
 		req = i915_gem_request_alloc(engine, NULL);
 		if (IS_ERR(req))
@@ -431,10 +438,11 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 			return ret;
 		}
 
-		intel_ring_emit(engine,
+		ring = req->ringbuf;
+		intel_ring_emit(ring,
 				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_advance(engine);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b844e6984ae7..da8134d43b26 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -58,7 +58,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
 					    ringbuf->tail, ringbuf->size);
 }
 
-static void __intel_ring_advance(struct intel_engine_cs *engine)
+static void __intel_engine_submit(struct intel_engine_cs *engine)
 {
 	struct intel_ringbuffer *ringbuf = engine->buffer;
 	ringbuf->tail &= ringbuf->size - 1;
@@ -70,7 +70,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 cmd;
 	int ret;
 
@@ -85,9 +85,9 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -97,7 +97,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 cmd;
 	int ret;
 
@@ -129,23 +129,20 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 	 * are flushed at any MI_FLUSH.
 	 */
 
-	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-	if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
-		cmd &= ~MI_NO_WRITE_FLUSH;
-	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+	cmd = MI_FLUSH;
+	if (invalidate_domains) {
 		cmd |= MI_EXE_FLUSH;
-
-	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-	    (IS_G4X(req->i915) || IS_GEN5(req->i915)))
-		cmd |= MI_INVALIDATE_ISP;
+		if (IS_G4X(req->i915) || IS_GEN5(req->i915))
+			cmd |= MI_INVALIDATE_ISP;
+	}
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -190,34 +187,35 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 scratch_addr =
+		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
 			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(engine, 0); /* low dword */
-	intel_ring_emit(engine, 0); /* high dword */
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0); /* low dword */
+	intel_ring_emit(ring, 0); /* high dword */
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -226,9 +224,10 @@ static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 scratch_addr =
+		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -266,11 +265,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(engine, flags);
-	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -278,19 +277,20 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
-			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring,
+			PIPE_CONTROL_CS_STALL |
+			PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -299,9 +299,10 @@ static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 scratch_addr =
+		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/*
@@ -350,11 +351,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(engine, flags);
-	intel_ring_emit(engine, scratch_addr);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -363,20 +364,20 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(engine, flags);
-	intel_ring_emit(engine, scratch_addr);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -385,8 +386,8 @@ static int
 gen8_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	u32 flags = 0;
 	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 flags = 0;
 	int ret;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -679,14 +680,14 @@ err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct i915_workarounds *w = &req->i915->workarounds;
 	int ret, i;
 
 	if (w->count == 0)
 		return 0;
 
-	engine->gpu_caches_dirty = true;
+	req->engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -695,16 +696,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(engine, w->reg[i].addr);
-		intel_ring_emit(engine, w->reg[i].value);
+		intel_ring_emit_reg(ring, w->reg[i].addr);
+		intel_ring_emit(ring, w->reg[i].value);
 	}
-	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
-	engine->gpu_caches_dirty = true;
+	req->engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -1323,7 +1324,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-	struct intel_engine_cs *signaller = signaller_req->engine;
+	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1338,20 +1339,23 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+		u64 gtt_offset =
+			signaller_req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
 		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
-					   PIPE_CONTROL_QW_WRITE |
-					   PIPE_CONTROL_CS_STALL);
+		intel_ring_emit(signaller,
+				PIPE_CONTROL_GLOBAL_GTT_IVB |
+				PIPE_CONTROL_QW_WRITE |
+				PIPE_CONTROL_CS_STALL);
 		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
 		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
 		intel_ring_emit(signaller, signaller_req->fence.seqno);
 		intel_ring_emit(signaller, 0);
-		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-					   MI_SEMAPHORE_TARGET(waiter->hw_id));
+		intel_ring_emit(signaller,
+				MI_SEMAPHORE_SIGNAL |
+				MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
 
@@ -1362,7 +1366,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-	struct intel_engine_cs *signaller = signaller_req->engine;
+	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1377,18 +1381,21 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+		u64 gtt_offset =
+			signaller_req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
-					   MI_FLUSH_DW_OP_STOREDW);
-		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
-					   MI_FLUSH_DW_USE_GTT);
+		intel_ring_emit(signaller,
+				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+		intel_ring_emit(signaller,
+				lower_32_bits(gtt_offset) |
+				MI_FLUSH_DW_USE_GTT);
 		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
 		intel_ring_emit(signaller, signaller_req->fence.seqno);
-		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-					   MI_SEMAPHORE_TARGET(waiter->hw_id));
+		intel_ring_emit(signaller,
+				MI_SEMAPHORE_SIGNAL |
+				MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
 
@@ -1398,7 +1405,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
-	struct intel_engine_cs *signaller = signaller_req->engine;
+	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	enum intel_engine_id id;
@@ -1414,7 +1421,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		return ret;
 
 	for_each_engine_id(useless, dev_priv, id) {
-		i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
+		i915_reg_t mbox_reg =
+			signaller_req->engine->semaphore.mbox.signal[id];
 
 		if (i915_mmio_reg_valid(mbox_reg)) {
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
@@ -1441,23 +1449,22 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
-	if (engine->semaphore.signal)
-		ret = engine->semaphore.signal(req, 4);
+	if (req->engine->semaphore.signal)
+		ret = req->engine->semaphore.signal(req, 4);
 	else
 		ret = intel_ring_begin(req, 4);
 
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(engine,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(engine, req->fence.seqno);
-	intel_ring_emit(engine, MI_USER_INTERRUPT);
-	__intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, req->fence.seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	__intel_engine_submit(req->engine);
 
 	return 0;
 }
@@ -1466,6 +1473,7 @@ static int
 gen8_render_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	if (engine->semaphore.signal)
@@ -1475,18 +1483,18 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
-				 PIPE_CONTROL_CS_STALL |
-				 PIPE_CONTROL_QW_WRITE));
-	intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+			       PIPE_CONTROL_CS_STALL |
+			       PIPE_CONTROL_QW_WRITE));
+	intel_ring_emit(ring, intel_hws_seqno_address(engine));
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	/* We're thrashing one dword of HWS. */
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, MI_USER_INTERRUPT);
-	intel_ring_emit(engine, MI_NOOP);
-	__intel_ring_advance(engine);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
+	__intel_engine_submit(engine);
 
 	return 0;
 }
@@ -1510,9 +1518,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_engine_cs *waiter = waiter_req->engine;
+	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
 	struct drm_i915_private *dev_priv = waiter_req->i915;
-	u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
@@ -1544,11 +1552,11 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_engine_cs *waiter = waiter_req->engine;
+	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
 	int ret;
 
 	/* Throughout all of the GEM code, seqno passed implies our current
@@ -1678,35 +1686,34 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_FLUSH);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 	return 0;
 }
 
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(engine,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(engine, req->fence.seqno);
-	intel_ring_emit(engine, MI_USER_INTERRUPT);
-	__intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, req->fence.seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	__intel_engine_submit(req->engine);
 
 	return 0;
 }
@@ -1773,20 +1780,20 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine,
+	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			MI_BATCH_GTT |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(engine, offset);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1800,8 +1807,8 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
-	u32 cs_offset = engine->scratch.gtt_offset;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 cs_offset = req->engine->scratch.gtt_offset;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -1809,13 +1816,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(engine, cs_offset);
-	intel_ring_emit(engine, 0xdeadbeef);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+	intel_ring_emit(ring, cs_offset);
+	intel_ring_emit(ring, 0xdeadbeef);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
@@ -1829,17 +1836,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(engine,
+		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+		intel_ring_emit(ring,
 				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(engine, cs_offset);
-		intel_ring_emit(engine, 4096);
-		intel_ring_emit(engine, offset);
+		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+		intel_ring_emit(ring, cs_offset);
+		intel_ring_emit(ring, 4096);
+		intel_ring_emit(ring, offset);
 
-		intel_ring_emit(engine, MI_FLUSH);
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_advance(engine);
+		intel_ring_emit(ring, MI_FLUSH);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 
 		/* ... and execute it. */
 		offset = cs_offset;
@@ -1849,10 +1856,10 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					  0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1862,17 +1869,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					  0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2404,8 +2411,9 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
-	int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	struct intel_ringbuffer *ring = req->ringbuf;
+	int num_dwords =
+		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
 	if (num_dwords == 0)
@@ -2417,9 +2425,9 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 		return ret;
 
 	while (num_dwords--)
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2510,7 +2518,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	uint32_t cmd;
 	int ret;
 
@@ -2538,17 +2546,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 	if (invalidate & I915_GEM_GPU_DOMAINS)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(engine, 0); /* upper addr */
-		intel_ring_emit(engine, 0); /* value */
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
 	} else  {
-		intel_ring_emit(engine, 0);
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
 	}
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 	return 0;
 }
 
@@ -2557,8 +2564,8 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
-	bool ppgtt = USES_PPGTT(engine->dev) &&
+	struct intel_ringbuffer *ring = req->ringbuf;
+	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -2567,13 +2574,13 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(engine, lower_32_bits(offset));
-	intel_ring_emit(engine, upper_32_bits(offset));
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2583,22 +2590,22 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine,
+	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(engine, offset);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2608,20 +2615,20 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine,
+	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(engine, offset);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2631,7 +2638,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	uint32_t cmd;
 	int ret;
 
@@ -2658,17 +2665,17 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
 	 */
 	if (invalidate & I915_GEM_DOMAIN_RENDER)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine,
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring,
 			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(engine, 0); /* upper addr */
-		intel_ring_emit(engine, 0); /* value */
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
 	} else  {
-		intel_ring_emit(engine, 0);
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
 	}
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 05bab8bda63d..427fb19a7a2e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -454,32 +454,21 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void __intel_ringbuffer_emit(struct intel_ringbuffer *rb,
-					   u32 data)
+static inline void intel_ring_emit(struct intel_ringbuffer *ring, u32 data)
 {
-	*(uint32_t *)(rb->vaddr + rb->tail) = data;
-	rb->tail += 4;
+	*(uint32_t *)(ring->vaddr + ring->tail) = data;
+	ring->tail += 4;
 }
 
-static inline void __intel_ringbuffer_advance(struct intel_ringbuffer *rb)
-{
-	rb->tail &= rb->size - 1;
-}
-
-static inline void intel_ring_emit(struct intel_engine_cs *engine, u32 data)
-{
-	__intel_ringbuffer_emit(engine->buffer, data);
-}
-
-static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
+static inline void intel_ring_emit_reg(struct intel_ringbuffer *ring,
 				       i915_reg_t reg)
 {
-	intel_ring_emit(engine, i915_mmio_reg_offset(reg));
+	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
 }
 
-static inline void intel_ring_advance(struct intel_engine_cs *engine)
+static inline void intel_ring_advance(struct intel_ringbuffer *ring)
 {
-	__intel_ringbuffer_advance(engine->buffer);
+	ring->tail &= ring->size - 1;
 }
 
 int __intel_ring_space(int head, int tail, int size);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring
  2016-07-20 13:11 Unify request construction Chris Wilson
  2016-07-20 13:11 ` [PATCH 01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
@ 2016-07-20 13:11 ` Chris Wilson
  2016-07-20 14:12   ` Dave Gordon
  2016-07-21 11:28   ` [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring Joonas Lahtinen
  2016-07-20 13:11 ` [PATCH 03/18] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs Chris Wilson
                   ` (19 subsequent siblings)
  21 siblings, 2 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:11 UTC (permalink / raw)
  To: intel-gfx

Now that we have disambuigated ring and engine, we can use the clearer
and more consistent name for the intel_ringbuffer pointer in the
request.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  4 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  4 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  6 +-
 drivers/gpu/drm/i915/i915_gem_request.c    | 16 +++---
 drivers/gpu/drm/i915/i915_gem_request.h    |  2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      | 20 +++----
 drivers/gpu/drm/i915/intel_display.c       | 10 ++--
 drivers/gpu/drm/i915/intel_lrc.c           | 57 +++++++++---------
 drivers/gpu/drm/i915/intel_mocs.c          | 36 ++++++------
 drivers/gpu/drm/i915/intel_overlay.c       |  8 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 92 +++++++++++++++---------------
 11 files changed, 126 insertions(+), 129 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index b6d10bd763a0..16138c4ff7db 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -552,7 +552,7 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -654,7 +654,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
 	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int i, ret;
 
 	if (!remap_info)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e2c4d99a1e7f..501a1751d432 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1173,7 +1173,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret, i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
@@ -1303,7 +1303,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ringbuffer *ring = params->request->ringbuf;
+		struct intel_ringbuffer *ring = params->request->ring;
 
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index abc439be2049..a48329baf432 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	BUG_ON(entry >= 4);
@@ -1660,7 +1660,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1688,7 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 60a3a343b3a8..0f415606a383 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -170,7 +170,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	 * Note this requires that we are always called in request
 	 * completion order.
 	 */
-	request->ringbuf->last_retired_head = request->postfix;
+	request->ring->last_retired_head = request->postfix;
 
 	i915_gem_request_remove_from_client(request);
 
@@ -425,7 +425,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 			bool flush_caches)
 {
 	struct intel_engine_cs *engine;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ringbuffer *ring;
 	u32 request_start;
 	u32 reserved_tail;
 	int ret;
@@ -434,14 +434,14 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 		return;
 
 	engine = request->engine;
-	ringbuf = request->ringbuf;
+	ring = request->ring;
 
 	/*
 	 * To ensure that this call will not fail, space for its emissions
 	 * should already have been reserved in the ring buffer. Let the ring
 	 * know that it is time to use that space up.
 	 */
-	request_start = intel_ring_get_tail(ringbuf);
+	request_start = intel_ring_get_tail(ring);
 	reserved_tail = request->reserved_space;
 	request->reserved_space = 0;
 
@@ -488,21 +488,21 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 * GPU processing the request, we never over-estimate the
 	 * position of the head.
 	 */
-	request->postfix = intel_ring_get_tail(ringbuf);
+	request->postfix = intel_ring_get_tail(ring);
 
 	if (i915.enable_execlists) {
 		ret = engine->emit_request(request);
 	} else {
 		ret = engine->add_request(request);
 
-		request->tail = intel_ring_get_tail(ringbuf);
+		request->tail = intel_ring_get_tail(ring);
 	}
 	/* Not allowed to fail! */
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
 	/* Sanity check that the reserved size was large enough. */
-	ret = intel_ring_get_tail(ringbuf) - request_start;
+	ret = intel_ring_get_tail(ring) - request_start;
 	if (ret < 0)
-		ret += ringbuf->size;
+		ret += ring->size;
 	WARN_ONCE(ret > reserved_tail,
 		  "Not enough space reserved (%d bytes) "
 		  "for adding the request (%d bytes)\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index e06e81f459df..68868d825d9d 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -61,7 +61,7 @@ struct drm_i915_gem_request {
 	 */
 	struct i915_gem_context *ctx;
 	struct intel_engine_cs *engine;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ringbuffer *ring;
 	struct intel_signal_node signaling;
 
 	/** GEM sequence number associated with the previous request,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 4d39c7284605..09997c6adcd2 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1091,7 +1091,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 		request = i915_gem_find_active_request(engine);
 		if (request) {
 			struct i915_address_space *vm;
-			struct intel_ringbuffer *rb;
+			struct intel_ringbuffer *ring;
 
 			vm = request->ctx->ppgtt ?
 				&request->ctx->ppgtt->base : &ggtt->base;
@@ -1108,7 +1108,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 			if (HAS_BROKEN_CS_TLB(dev_priv))
 				error->ring[i].wa_batchbuffer =
 					i915_error_ggtt_object_create(dev_priv,
-							     engine->scratch.obj);
+								      engine->scratch.obj);
 
 			if (request->pid) {
 				struct task_struct *task;
@@ -1125,23 +1125,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 			error->simulated |=
 				request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
 
-			rb = request->ringbuf;
-			error->ring[i].cpu_ring_head = rb->head;
-			error->ring[i].cpu_ring_tail = rb->tail;
+			ring = request->ring;
+			error->ring[i].cpu_ring_head = ring->head;
+			error->ring[i].cpu_ring_tail = ring->tail;
 			error->ring[i].ringbuffer =
 				i915_error_ggtt_object_create(dev_priv,
-							      rb->obj);
+							      ring->obj);
 		}
 
 		error->ring[i].hws_page =
 			i915_error_ggtt_object_create(dev_priv,
 						      engine->status_page.obj);
 
-		if (engine->wa_ctx.obj) {
-			error->ring[i].wa_ctx =
-				i915_error_ggtt_object_create(dev_priv,
-							      engine->wa_ctx.obj);
-		}
+		error->ring[i].wa_ctx =
+			i915_error_ggtt_object_create(dev_priv,
+						      engine->wa_ctx.obj);
 
 		i915_gem_record_active_context(engine, error, &error->ring[i]);
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d18ed32e6a31..d1932840a268 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11123,7 +11123,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11157,7 +11157,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11188,7 +11188,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11226,7 +11226,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11261,7 +11261,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8bf2ea5a2de3..c3542eb338ca 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -714,7 +714,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 			return ret;
 	}
 
-	request->ringbuf = ce->ringbuf;
+	request->ring = ce->ringbuf;
 
 	if (i915.enable_guc_submission) {
 		/*
@@ -770,11 +770,11 @@ err_unpin:
 static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 
-	intel_ring_advance(ringbuf);
-	request->tail = ringbuf->tail;
+	intel_ring_advance(ring);
+	request->tail = ring->tail;
 
 	/*
 	 * Here we add two extra NOOPs as padding to avoid
@@ -782,9 +782,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	 *
 	 * Caller must reserve WA_TAIL_DWORDS for us!
 	 */
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	/* We keep the previous context alive until we retire the following
 	 * request. This ensures that any the context object is still pinned
@@ -821,7 +821,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	struct drm_device       *dev = params->dev;
 	struct intel_engine_cs *engine = params->engine;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
+	struct intel_ringbuffer *ring = params->request->ring;
 	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
@@ -833,7 +833,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
+		if (instp_mode != 0 && engine->id != RCS) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
@@ -862,17 +862,17 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	if (ret)
 		return ret;
 
-	if (engine == &dev_priv->engine[RCS] &&
+	if (engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(ringbuf, MI_NOOP);
-		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ringbuf, INSTPM);
-		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ringbuf);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, INSTPM);
+		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ring);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
@@ -1030,7 +1030,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 
 	if (w->count == 0)
@@ -1045,14 +1045,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ringbuf, w->reg[i].addr);
-		intel_ring_emit(ringbuf, w->reg[i].value);
+		intel_ring_emit_reg(ring, w->reg[i].addr);
+		intel_ring_emit(ring, w->reg[i].value);
 	}
-	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
-	intel_ring_advance(ringbuf);
+	intel_ring_advance(ring);
 
 	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
@@ -1546,7 +1546,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
 
@@ -1573,7 +1573,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -1630,8 +1630,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
-	struct intel_ringbuffer *ring = request->ringbuf;
-	struct intel_engine_cs *engine = ring->engine;
+	struct intel_ringbuffer *ring = request->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -1650,7 +1649,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 
 	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (engine->id == VCS)
+		if (request->engine->id == VCS)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
@@ -1669,7 +1668,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
@@ -1783,7 +1782,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1806,7 +1805,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 2743424f2746..fe63c7e79fb1 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
 	int ret;
@@ -288,11 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
-		intel_ring_emit(ringbuf, table->table[index].control_value);
+		intel_ring_emit_reg(ring, mocs_register(engine, index));
+		intel_ring_emit(ring, table->table[index].control_value);
 	}
 
 	/*
@@ -304,12 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
-		intel_ring_emit(ringbuf, table->table[0].control_value);
+		intel_ring_emit_reg(ring, mocs_register(engine, index));
+		intel_ring_emit(ring, table->table[0].control_value);
 	}
 
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -336,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	unsigned int i;
 	int ret;
 
@@ -347,18 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf,
+	intel_ring_emit(ring,
 			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1));
+		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
 		i++;
 	}
 
@@ -368,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
+		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
 	}
 
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 92722e614955..84b8f74bd13c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -253,7 +253,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	overlay->active = true;
 
-	ring = req->ringbuf;
+	ring = req->ring;
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
 	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
 	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -295,7 +295,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 		return ret;
 	}
 
-	ring = req->ringbuf;
+	ring = req->ring;
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
 	intel_ring_advance(ring);
@@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 		return ret;
 	}
 
-	ring = req->ringbuf;
+	ring = req->ring;
 	/* wait for overlay to go idle */
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
@@ -438,7 +438,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 			return ret;
 		}
 
-		ring = req->ringbuf;
+		ring = req->ring;
 		intel_ring_emit(ring,
 				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 		intel_ring_emit(ring, MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index da8134d43b26..ac51e4885046 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -70,7 +70,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -97,7 +97,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -187,7 +187,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
@@ -224,7 +224,7 @@ static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
@@ -277,7 +277,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -299,7 +299,7 @@ static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
@@ -364,7 +364,7 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -680,7 +680,7 @@ err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 	int ret, i;
 
@@ -1324,7 +1324,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+	struct intel_ringbuffer *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1366,7 +1366,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+	struct intel_ringbuffer *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1405,7 +1405,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
-	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+	struct intel_ringbuffer *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	enum intel_engine_id id;
@@ -1449,7 +1449,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	if (req->engine->semaphore.signal)
@@ -1473,7 +1473,7 @@ static int
 gen8_render_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	if (engine->semaphore.signal)
@@ -1518,7 +1518,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
+	struct intel_ringbuffer *waiter = waiter_req->ring;
 	struct drm_i915_private *dev_priv = waiter_req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
 	struct i915_hw_ppgtt *ppgtt;
@@ -1552,7 +1552,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
+	struct intel_ringbuffer *waiter = waiter_req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
@@ -1686,7 +1686,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1702,7 +1702,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -1780,7 +1780,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1807,7 +1807,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 cs_offset = req->engine->scratch.gtt_offset;
 	int ret;
 
@@ -1869,7 +1869,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2297,7 +2297,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 	 */
 	request->reserved_space += LEGACY_REQUEST_SIZE;
 
-	request->ringbuf = request->engine->buffer;
+	request->ring = request->engine->buffer;
 
 	ret = intel_ring_begin(request, 0);
 	if (ret)
@@ -2309,12 +2309,12 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *target;
 
-	intel_ring_update_space(ringbuf);
-	if (ringbuf->space >= bytes)
+	intel_ring_update_space(ring);
+	if (ring->space >= bytes)
 		return 0;
 
 	/*
@@ -2336,12 +2336,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 		 * from multiple ringbuffers. Here, we must ignore any that
 		 * aren't from the ringbuffer we're considering.
 		 */
-		if (target->ringbuf != ringbuf)
+		if (target->ring != ring)
 			continue;
 
 		/* Would completion of this request free enough space? */
-		space = __intel_ring_space(target->postfix, ringbuf->tail,
-					   ringbuf->size);
+		space = __intel_ring_space(target->postfix, ring->tail,
+					   ring->size);
 		if (space >= bytes)
 			break;
 	}
@@ -2354,9 +2354,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 
 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	int remain_actual = ringbuf->size - ringbuf->tail;
-	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	struct intel_ringbuffer *ring = req->ring;
+	int remain_actual = ring->size - ring->tail;
+	int remain_usable = ring->effective_size - ring->tail;
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
@@ -2383,35 +2383,35 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		wait_bytes = total_bytes;
 	}
 
-	if (wait_bytes > ringbuf->space) {
+	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
 			return ret;
 
-		intel_ring_update_space(ringbuf);
-		if (unlikely(ringbuf->space < wait_bytes))
+		intel_ring_update_space(ring);
+		if (unlikely(ring->space < wait_bytes))
 			return -EAGAIN;
 	}
 
 	if (unlikely(need_wrap)) {
-		GEM_BUG_ON(remain_actual > ringbuf->space);
-		GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
+		GEM_BUG_ON(remain_actual > ring->space);
+		GEM_BUG_ON(ring->tail + remain_actual > ring->size);
 
 		/* Fill the tail with MI_NOOP */
-		memset(ringbuf->vaddr + ringbuf->tail, 0, remain_actual);
-		ringbuf->tail = 0;
-		ringbuf->space -= remain_actual;
+		memset(ring->vaddr + ring->tail, 0, remain_actual);
+		ring->tail = 0;
+		ring->space -= remain_actual;
 	}
 
-	ringbuf->space -= bytes;
-	GEM_BUG_ON(ringbuf->space < 0);
+	ring->space -= bytes;
+	GEM_BUG_ON(ring->space < 0);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int num_dwords =
 		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
@@ -2518,7 +2518,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2564,7 +2564,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
@@ -2590,7 +2590,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2615,7 +2615,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2638,7 +2638,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 03/18] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs
  2016-07-20 13:11 Unify request construction Chris Wilson
  2016-07-20 13:11 ` [PATCH 01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
  2016-07-20 13:11 ` [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
@ 2016-07-20 13:11 ` Chris Wilson
  2016-07-20 14:23   ` Dave Gordon
  2016-07-21 11:32   ` Joonas Lahtinen
  2016-07-20 13:11 ` [PATCH 04/18] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
                   ` (18 subsequent siblings)
  21 siblings, 2 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:11 UTC (permalink / raw)
  To: intel-gfx

Having ringbuf->ring point to an engine is confusing, so rename it once
again to ring->engine.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++++++------
 1 file changed, 6 insertions(+), 6 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ac51e4885046..3cfbfe40f6e8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2171,7 +2171,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
 	i915_gem_context_put(ctx);
 }
 
-static int intel_init_ring_buffer(struct intel_engine_cs *engine)
+static int intel_init_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct intel_ringbuffer *ringbuf;
@@ -2868,7 +2868,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 	engine->init_hw = init_render_ring;
 	engine->cleanup = render_ring_cleanup;
 
-	ret = intel_init_ring_buffer(engine);
+	ret = intel_init_engine(engine);
 	if (ret)
 		return ret;
 
@@ -2907,7 +2907,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
 	}
 
-	return intel_init_ring_buffer(engine);
+	return intel_init_engine(engine);
 }
 
 /**
@@ -2921,7 +2921,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
 
 	engine->flush = gen6_bsd_ring_flush;
 
-	return intel_init_ring_buffer(engine);
+	return intel_init_engine(engine);
 }
 
 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
@@ -2934,7 +2934,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) < 8)
 		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 
-	return intel_init_ring_buffer(engine);
+	return intel_init_engine(engine);
 }
 
 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
@@ -2951,7 +2951,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 		engine->irq_disable = hsw_vebox_irq_disable;
 	}
 
-	return intel_init_ring_buffer(engine);
+	return intel_init_engine(engine);
 }
 
 int
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 04/18] drm/i915: Rename intel_context[engine].ringbuf
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (2 preceding siblings ...)
  2016-07-20 13:11 ` [PATCH 03/18] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs Chris Wilson
@ 2016-07-20 13:11 ` Chris Wilson
  2016-07-21 11:43   ` Joonas Lahtinen
  2016-07-20 13:11 ` [PATCH 05/18] drm/i915: Rename struct intel_ringbuffer to struct intel_ring Chris Wilson
                   ` (17 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:11 UTC (permalink / raw)
  To: intel-gfx

Perform s/ringbuf/ring/ on the context struct for consistency with the
ring/engine split.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  8 ++++----
 drivers/gpu/drm/i915/i915_drv.h            |  2 +-
 drivers/gpu/drm/i915/i915_gem_context.c    |  4 ++--
 drivers/gpu/drm/i915/i915_guc_submission.c |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 33 ++++++++++++++----------------
 5 files changed, 23 insertions(+), 26 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9aa62c5b5f65..bde68741809b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -425,8 +425,8 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
 	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
 		if (ctx->engine[n].state)
 			per_file_stats(0, ctx->engine[n].state, data);
-		if (ctx->engine[n].ringbuf)
-			per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
+		if (ctx->engine[n].ring)
+			per_file_stats(0, ctx->engine[n].ring->obj, data);
 	}
 
 	return 0;
@@ -2066,8 +2066,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
 			seq_putc(m, ce->initialised ? 'I' : 'i');
 			if (ce->state)
 				describe_obj(m, ce->state);
-			if (ce->ringbuf)
-				describe_ctx_ringbuf(m, ce->ringbuf);
+			if (ce->ring)
+				describe_ctx_ringbuf(m, ce->ring);
 			seq_putc(m, '\n');
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0f408ada1c65..87e06a6a797a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -894,7 +894,7 @@ struct i915_gem_context {
 
 	struct intel_context {
 		struct drm_i915_gem_object *state;
-		struct intel_ringbuffer *ringbuf;
+		struct intel_ringbuffer *ring;
 		struct i915_vma *lrc_vma;
 		uint32_t *lrc_reg_state;
 		u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 16138c4ff7db..c8bf7b8e959f 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -173,8 +173,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
 			continue;
 
 		WARN_ON(ce->pin_count);
-		if (ce->ringbuf)
-			intel_ringbuffer_free(ce->ringbuf);
+		if (ce->ring)
+			intel_ringbuffer_free(ce->ring);
 
 		i915_gem_object_put(ce->state);
 	}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 01c1c1671811..eccd34832fe6 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -363,7 +363,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
 		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
 				(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
 
-		obj = ce->ringbuf->obj;
+		obj = ce->ring->obj;
 		gfx_addr = i915_gem_obj_ggtt_offset(obj);
 
 		lrc->ring_begin = gfx_addr;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index c3542eb338ca..7bc1d0c92799 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -482,11 +482,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
 		 * resubmit the request. See gen8_emit_request() for where we
 		 * prepare the padding after the end of the request.
 		 */
-		struct intel_ringbuffer *ringbuf;
-
-		ringbuf = req0->ctx->engine[engine->id].ringbuf;
 		req0->tail += 8;
-		req0->tail &= ringbuf->size - 1;
+		req0->tail &= req0->ring->size - 1;
 	}
 
 	execlists_submit_requests(req0, req1);
@@ -714,7 +711,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 			return ret;
 	}
 
-	request->ring = ce->ringbuf;
+	request->ring = ce->ring;
 
 	if (i915.enable_guc_submission) {
 		/*
@@ -976,14 +973,14 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 
 	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
-	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
+	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ring);
 	if (ret)
 		goto unpin_map;
 
 	ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
 	intel_lr_context_descriptor_update(ctx, engine);
 
-	lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
+	lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start;
 	ce->lrc_reg_state = lrc_reg_state;
 	ce->state->dirty = true;
 
@@ -1014,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 	if (--ce->pin_count)
 		return;
 
-	intel_unpin_ringbuffer_obj(ce->ringbuf);
+	intel_unpin_ringbuffer_obj(ce->ring);
 
 	i915_gem_object_unpin_map(ce->state);
 	i915_gem_object_ggtt_unpin(ce->state);
@@ -2338,7 +2335,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	struct drm_i915_gem_object *ctx_obj;
 	struct intel_context *ce = &ctx->engine[engine->id];
 	uint32_t context_size;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ringbuffer *ring;
 	int ret;
 
 	WARN_ON(ce->state);
@@ -2354,29 +2351,29 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 		return PTR_ERR(ctx_obj);
 	}
 
-	ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
-	if (IS_ERR(ringbuf)) {
-		ret = PTR_ERR(ringbuf);
+	ring = intel_engine_create_ringbuffer(engine, ctx->ring_size);
+	if (IS_ERR(ring)) {
+		ret = PTR_ERR(ring);
 		goto error_deref_obj;
 	}
 
-	ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
+	ret = populate_lr_context(ctx, ctx_obj, engine, ring);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
 		goto error_ringbuf;
 	}
 
-	ce->ringbuf = ringbuf;
+	ce->ring = ring;
 	ce->state = ctx_obj;
 	ce->initialised = engine->init_context == NULL;
 
 	return 0;
 
 error_ringbuf:
-	intel_ringbuffer_free(ringbuf);
+	intel_ringbuffer_free(ring);
 error_deref_obj:
 	i915_gem_object_put(ctx_obj);
-	ce->ringbuf = NULL;
+	ce->ring = NULL;
 	ce->state = NULL;
 	return ret;
 }
@@ -2407,7 +2404,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
 
 		i915_gem_object_unpin_map(ctx_obj);
 
-		ce->ringbuf->head = 0;
-		ce->ringbuf->tail = 0;
+		ce->ring->head = 0;
+		ce->ring->tail = 0;
 	}
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 05/18] drm/i915: Rename struct intel_ringbuffer to struct intel_ring
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (3 preceding siblings ...)
  2016-07-20 13:11 ` [PATCH 04/18] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
@ 2016-07-20 13:11 ` Chris Wilson
  2016-07-21 11:59   ` Joonas Lahtinen
  2016-07-20 13:11 ` [PATCH 06/18] drm/i915: Rename residual ringbuf parameters Chris Wilson
                   ` (16 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:11 UTC (permalink / raw)
  To: intel-gfx

The state stored in this struct is not only the information about the
buffer object, but the ring used to communicate with the hardware. Using
buffer here is overly specific and, for me at least, conflates with the
notion of buffer objects themselves.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  11 ++-
 drivers/gpu/drm/i915/i915_drv.h            |   4 +-
 drivers/gpu/drm/i915/i915_gem.c            |  16 ++--
 drivers/gpu/drm/i915/i915_gem_context.c    |   6 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   6 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |   6 +-
 drivers/gpu/drm/i915/i915_gem_request.c    |   6 +-
 drivers/gpu/drm/i915/i915_gem_request.h    |   2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      |   8 +-
 drivers/gpu/drm/i915/i915_irq.c            |  14 ++--
 drivers/gpu/drm/i915/intel_display.c       |  10 +--
 drivers/gpu/drm/i915/intel_engine_cs.c     |   2 +-
 drivers/gpu/drm/i915/intel_lrc.c           |  34 ++++----
 drivers/gpu/drm/i915/intel_mocs.c          |   4 +-
 drivers/gpu/drm/i915/intel_overlay.c       |   8 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 128 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  51 ++++++------
 17 files changed, 157 insertions(+), 159 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bde68741809b..dccc72d63dd0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1419,7 +1419,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	intel_runtime_pm_get(dev_priv);
 
 	for_each_engine_id(engine, dev_priv, id) {
-		acthd[id] = intel_ring_get_active_head(engine);
+		acthd[id] = intel_engine_get_active_head(engine);
 		seqno[id] = intel_engine_get_seqno(engine);
 	}
 
@@ -2017,12 +2017,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 	return 0;
 }
 
-static void describe_ctx_ringbuf(struct seq_file *m,
-				 struct intel_ringbuffer *ringbuf)
+static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 {
 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
-		   ringbuf->space, ringbuf->head, ringbuf->tail,
-		   ringbuf->last_retired_head);
+		   ring->space, ring->head, ring->tail,
+		   ring->last_retired_head);
 }
 
 static int i915_context_status(struct seq_file *m, void *unused)
@@ -2067,7 +2066,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 			if (ce->state)
 				describe_obj(m, ce->state);
 			if (ce->ring)
-				describe_ctx_ringbuf(m, ce->ring);
+				describe_ctx_ring(m, ce->ring);
 			seq_putc(m, '\n');
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 87e06a6a797a..f32ec6db5bfa 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -518,7 +518,7 @@ struct drm_i915_error_state {
 		bool waiting;
 		int num_waiters;
 		int hangcheck_score;
-		enum intel_ring_hangcheck_action hangcheck_action;
+		enum intel_engine_hangcheck_action hangcheck_action;
 		int num_requests;
 
 		/* our own tracking of ring head and tail */
@@ -894,7 +894,7 @@ struct i915_gem_context {
 
 	struct intel_context {
 		struct drm_i915_gem_object *state;
-		struct intel_ringbuffer *ring;
+		struct intel_ring *ring;
 		struct i915_vma *lrc_vma;
 		uint32_t *lrc_reg_state;
 		u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 40047eb48826..95dbcfd94a80 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2486,7 +2486,7 @@ static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
 
 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 {
-	struct intel_ringbuffer *buffer;
+	struct intel_ring *ring;
 
 	while (!list_empty(&engine->active_list)) {
 		struct drm_i915_gem_object *obj;
@@ -2502,7 +2502,7 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 	 * (lockless) lookup doesn't try and wait upon the request as we
 	 * reset it.
 	 */
-	intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+	intel_engine_init_seqno(engine, engine->last_submitted_seqno);
 
 	/*
 	 * Clear the execlists queue up before freeing the requests, as those
@@ -2541,9 +2541,9 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 	 * upon reset is less than when we start. Do one more pass over
 	 * all the ringbuffers to reset last_retired_head.
 	 */
-	list_for_each_entry(buffer, &engine->buffers, link) {
-		buffer->last_retired_head = buffer->tail;
-		intel_ring_update_space(buffer);
+	list_for_each_entry(ring, &engine->buffers, link) {
+		ring->last_retired_head = ring->tail;
+		intel_ring_update_space(ring);
 	}
 
 	engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
@@ -2867,7 +2867,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 		i915_gem_object_retire_request(obj, from_req);
 	} else {
-		int idx = intel_ring_sync_index(from, to);
+		int idx = intel_engine_sync_index(from, to);
 		u32 seqno = i915_gem_request_get_seqno(from_req);
 
 		WARN_ON(!to_req);
@@ -4567,8 +4567,8 @@ int i915_gem_init(struct drm_device *dev)
 
 	if (!i915.enable_execlists) {
 		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
-		dev_priv->gt.cleanup_engine = intel_cleanup_engine;
-		dev_priv->gt.stop_engine = intel_stop_engine;
+		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
+		dev_priv->gt.stop_engine = intel_engine_stop;
 	} else {
 		dev_priv->gt.execbuf_submit = intel_execlists_submission;
 		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c8bf7b8e959f..d9b861b856dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -174,7 +174,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
 
 		WARN_ON(ce->pin_count);
 		if (ce->ring)
-			intel_ringbuffer_free(ce->ring);
+			intel_ring_free(ce->ring);
 
 		i915_gem_object_put(ce->state);
 	}
@@ -552,7 +552,7 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
@@ -654,7 +654,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
 	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int i, ret;
 
 	if (!remap_info)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 501a1751d432..12adfec2d6a9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1001,7 +1001,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return intel_ring_invalidate_all_caches(req);
+	return intel_engine_invalidate_all_caches(req);
 }
 
 static bool
@@ -1173,7 +1173,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret, i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
@@ -1303,7 +1303,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ringbuffer *ring = params->request->ring;
+		struct intel_ring *ring = params->request->ring;
 
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a48329baf432..01b825169164 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	BUG_ON(entry >= 4);
@@ -1660,7 +1660,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1688,7 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 0f415606a383..54b27369225a 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -244,7 +244,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 
 	/* Finally reset hw state */
 	for_each_engine(engine, dev_priv)
-		intel_ring_init_seqno(engine, seqno);
+		intel_engine_init_seqno(engine, seqno);
 
 	return 0;
 }
@@ -425,7 +425,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 			bool flush_caches)
 {
 	struct intel_engine_cs *engine;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	u32 request_start;
 	u32 reserved_tail;
 	int ret;
@@ -456,7 +456,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 		if (i915.enable_execlists)
 			ret = logical_ring_flush_all_caches(request);
 		else
-			ret = intel_ring_flush_all_caches(request);
+			ret = intel_engine_flush_all_caches(request);
 		/* Not allowed to fail! */
 		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 68868d825d9d..382ca5a163eb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -61,7 +61,7 @@ struct drm_i915_gem_request {
 	 */
 	struct i915_gem_context *ctx;
 	struct intel_engine_cs *engine;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	struct intel_signal_node signaling;
 
 	/** GEM sequence number associated with the previous request,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 09997c6adcd2..2fbe81d51af1 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -221,7 +221,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
 	}
 }
 
-static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
 {
 	switch (a) {
 	case HANGCHECK_IDLE:
@@ -882,7 +882,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
 		signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
 				/ 4;
 		tmp = error->semaphore_obj->pages[0];
-		idx = intel_ring_sync_index(engine, to);
+		idx = intel_engine_sync_index(engine, to);
 
 		ering->semaphore_mboxes[idx] = tmp[signal_offset];
 		ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
@@ -983,7 +983,7 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
 
 	ering->waiting = intel_engine_has_waiter(engine);
 	ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
-	ering->acthd = intel_ring_get_active_head(engine);
+	ering->acthd = intel_engine_get_active_head(engine);
 	ering->seqno = intel_engine_get_seqno(engine);
 	ering->last_seqno = engine->last_submitted_seqno;
 	ering->start = I915_READ_START(engine);
@@ -1091,7 +1091,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 		request = i915_gem_find_active_request(engine);
 		if (request) {
 			struct i915_address_space *vm;
-			struct intel_ringbuffer *ring;
+			struct intel_ring *ring;
 
 			vm = request->ctx->ppgtt ?
 				&request->ctx->ppgtt->base : &ggtt->base;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7104dc1463eb..5903111db718 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2993,7 +2993,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
 	return stuck;
 }
 
-static enum intel_ring_hangcheck_action
+static enum intel_engine_hangcheck_action
 head_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
 	if (acthd != engine->hangcheck.acthd) {
@@ -3011,11 +3011,11 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
 	return HANGCHECK_HUNG;
 }
 
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *engine, u64 acthd)
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	enum intel_ring_hangcheck_action ha;
+	enum intel_engine_hangcheck_action ha;
 	u32 tmp;
 
 	ha = head_stuck(engine, acthd);
@@ -3124,7 +3124,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 		if (engine->irq_seqno_barrier)
 			engine->irq_seqno_barrier(engine);
 
-		acthd = intel_ring_get_active_head(engine);
+		acthd = intel_engine_get_active_head(engine);
 		seqno = intel_engine_get_seqno(engine);
 
 		/* Reset stuck interrupts between batch advances */
@@ -3154,8 +3154,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 				 * being repeatedly kicked and so responsible
 				 * for stalling the machine.
 				 */
-				engine->hangcheck.action = ring_stuck(engine,
-								      acthd);
+				engine->hangcheck.action =
+					engine_stuck(engine, acthd);
 
 				switch (engine->hangcheck.action) {
 				case HANGCHECK_IDLE:
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d1932840a268..bff172c45ff7 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11123,7 +11123,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11157,7 +11157,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11188,7 +11188,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11226,7 +11226,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11261,7 +11261,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index f4a35ec78481..f00bd55fe582 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -154,7 +154,7 @@ cleanup:
 		if (i915.enable_execlists)
 			intel_logical_ring_cleanup(&dev_priv->engine[i]);
 		else
-			intel_cleanup_engine(&dev_priv->engine[i]);
+			intel_engine_cleanup(&dev_priv->engine[i]);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 7bc1d0c92799..5b9f98f6ed87 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -767,7 +767,7 @@ err_unpin:
 static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ring;
+	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 
 	intel_ring_advance(ring);
@@ -818,7 +818,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	struct drm_device       *dev = params->dev;
 	struct intel_engine_cs *engine = params->engine;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ringbuffer *ring = params->request->ring;
+	struct intel_ring *ring = params->request->ring;
 	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
@@ -973,7 +973,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 
 	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
-	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ring);
+	ret = intel_pin_and_map_ring(dev_priv, ce->ring);
 	if (ret)
 		goto unpin_map;
 
@@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 	if (--ce->pin_count)
 		return;
 
-	intel_unpin_ringbuffer_obj(ce->ring);
+	intel_unpin_ring(ce->ring);
 
 	i915_gem_object_unpin_map(ce->state);
 	i915_gem_object_ggtt_unpin(ce->state);
@@ -1027,7 +1027,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 
 	if (w->count == 0)
@@ -1543,7 +1543,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
 
@@ -1570,7 +1570,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -1627,8 +1627,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
-	struct intel_ringbuffer *ring = request->ring;
-	uint32_t cmd;
+	struct intel_ring *ring = request->ring;
+	u32 cmd;
 	int ret;
 
 	ret = intel_ring_begin(request, 4);
@@ -1665,7 +1665,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = request->ring;
+	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
@@ -1779,7 +1779,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ring;
+	struct intel_ring *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1802,7 +1802,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ring;
+	struct intel_ring *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
@@ -2154,7 +2154,7 @@ static int
 populate_lr_context(struct i915_gem_context *ctx,
 		    struct drm_i915_gem_object *ctx_obj,
 		    struct intel_engine_cs *engine,
-		    struct intel_ringbuffer *ringbuf)
+		    struct intel_ring *ring)
 {
 	struct drm_i915_private *dev_priv = ctx->i915;
 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
@@ -2207,7 +2207,7 @@ populate_lr_context(struct i915_gem_context *ctx,
 		       RING_START(engine->mmio_base), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
 		       RING_CTL(engine->mmio_base),
-		       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+		       ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
 	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
 		       RING_BBADDR_UDW(engine->mmio_base), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2335,7 +2335,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	struct drm_i915_gem_object *ctx_obj;
 	struct intel_context *ce = &ctx->engine[engine->id];
 	uint32_t context_size;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	int ret;
 
 	WARN_ON(ce->state);
@@ -2351,7 +2351,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 		return PTR_ERR(ctx_obj);
 	}
 
-	ring = intel_engine_create_ringbuffer(engine, ctx->ring_size);
+	ring = intel_engine_create_ring(engine, ctx->ring_size);
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		goto error_deref_obj;
@@ -2370,7 +2370,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	return 0;
 
 error_ringbuf:
-	intel_ringbuffer_free(ring);
+	intel_ring_free(ring);
 error_deref_obj:
 	i915_gem_object_put(ctx_obj);
 	ce->ring = NULL;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index fe63c7e79fb1..58db0e330ee6 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
 	int ret;
@@ -336,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	unsigned int i;
 	int ret;
 
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 84b8f74bd13c..a5071e281088 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -235,7 +235,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	int ret;
 
 	WARN_ON(overlay->active);
@@ -270,7 +270,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
 	int ret;
@@ -340,7 +340,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
 
@@ -426,7 +426,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
-		struct intel_ringbuffer *ring;
+		struct intel_ring *ring;
 
 		req = i915_gem_request_alloc(engine, NULL);
 		if (IS_ERR(req))
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3cfbfe40f6e8..9aaf81ba66c8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -47,7 +47,7 @@ int __intel_ring_space(int head, int tail, int size)
 	return space - I915_RING_FREE_SPACE;
 }
 
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+void intel_ring_update_space(struct intel_ring *ringbuf)
 {
 	if (ringbuf->last_retired_head != -1) {
 		ringbuf->head = ringbuf->last_retired_head;
@@ -60,9 +60,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
 
 static void __intel_engine_submit(struct intel_engine_cs *engine)
 {
-	struct intel_ringbuffer *ringbuf = engine->buffer;
-	ringbuf->tail &= ringbuf->size - 1;
-	engine->write_tail(engine, ringbuf->tail);
+	struct intel_ring *ring = engine->buffer;
+
+	ring->tail &= ring->size - 1;
+	engine->write_tail(engine, ring->tail);
 }
 
 static int
@@ -70,7 +71,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -97,7 +98,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -187,7 +188,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
@@ -224,7 +225,7 @@ static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
@@ -277,7 +278,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -299,7 +300,7 @@ static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
@@ -364,7 +365,7 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -427,7 +428,7 @@ static void ring_write_tail(struct intel_engine_cs *engine,
 	I915_WRITE_TAIL(engine, value);
 }
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	u64 acthd;
@@ -553,8 +554,8 @@ static bool stop_ring(struct intel_engine_cs *engine)
 static int init_ring_common(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct intel_ringbuffer *ringbuf = engine->buffer;
-	struct drm_i915_gem_object *obj = ringbuf->obj;
+	struct intel_ring *ring = engine->buffer;
+	struct drm_i915_gem_object *obj = ring->obj;
 	int ret = 0;
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -604,7 +605,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	(void)I915_READ_HEAD(engine);
 
 	I915_WRITE_CTL(engine,
-			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
+			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
@@ -623,10 +624,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
 		goto out;
 	}
 
-	ringbuf->last_retired_head = -1;
-	ringbuf->head = I915_READ_HEAD(engine);
-	ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
-	intel_ring_update_space(ringbuf);
+	ring->last_retired_head = -1;
+	ring->head = I915_READ_HEAD(engine);
+	ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
+	intel_ring_update_space(ring);
 
 	intel_engine_init_hangcheck(engine);
 
@@ -680,7 +681,7 @@ err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 	int ret, i;
 
@@ -688,7 +689,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 		return 0;
 
 	req->engine->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(req);
+	ret = intel_engine_flush_all_caches(req);
 	if (ret)
 		return ret;
 
@@ -706,7 +707,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	intel_ring_advance(ring);
 
 	req->engine->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(req);
+	ret = intel_engine_flush_all_caches(req);
 	if (ret)
 		return ret;
 
@@ -1324,7 +1325,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-	struct intel_ringbuffer *signaller = signaller_req->ring;
+	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1366,7 +1367,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-	struct intel_ringbuffer *signaller = signaller_req->ring;
+	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1405,7 +1406,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
-	struct intel_ringbuffer *signaller = signaller_req->ring;
+	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	enum intel_engine_id id;
@@ -1449,7 +1450,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	if (req->engine->semaphore.signal)
@@ -1473,7 +1474,7 @@ static int
 gen8_render_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	if (engine->semaphore.signal)
@@ -1518,7 +1519,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ring;
+	struct intel_ring *waiter = waiter_req->ring;
 	struct drm_i915_private *dev_priv = waiter_req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
 	struct i915_hw_ppgtt *ppgtt;
@@ -1552,7 +1553,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ring;
+	struct intel_ring *waiter = waiter_req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
@@ -1686,7 +1687,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1702,7 +1703,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -1780,7 +1781,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1807,7 +1808,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 cs_offset = req->engine->scratch.gtt_offset;
 	int ret;
 
@@ -1869,7 +1870,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1977,7 +1978,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 	return 0;
 }
 
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_unpin_ring(struct intel_ring *ringbuf)
 {
 	GEM_BUG_ON(!ringbuf->vma);
 	GEM_BUG_ON(!ringbuf->vaddr);
@@ -1992,8 +1993,8 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 	ringbuf->vma = NULL;
 }
 
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
-				     struct intel_ringbuffer *ringbuf)
+int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
+			   struct intel_ring *ringbuf)
 {
 	struct drm_i915_gem_object *obj = ringbuf->obj;
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -2045,14 +2046,14 @@ err_unpin:
 	return ret;
 }
 
-static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
 {
 	i915_gem_object_put(ringbuf->obj);
 	ringbuf->obj = NULL;
 }
 
 static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-				      struct intel_ringbuffer *ringbuf)
+				      struct intel_ring *ringbuf)
 {
 	struct drm_i915_gem_object *obj;
 
@@ -2072,10 +2073,10 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
 	return 0;
 }
 
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 {
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	int ret;
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -2113,7 +2114,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
 }
 
 void
-intel_ringbuffer_free(struct intel_ringbuffer *ring)
+intel_ring_free(struct intel_ring *ring)
 {
 	intel_destroy_ringbuffer_obj(ring);
 	list_del(&ring->link);
@@ -2174,7 +2175,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
 static int intel_init_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ring *ringbuf;
 	int ret;
 
 	WARN_ON(engine->buffer);
@@ -2199,7 +2200,7 @@ static int intel_init_engine(struct intel_engine_cs *engine)
 	if (ret)
 		goto error;
 
-	ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
+	ringbuf = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
 	if (IS_ERR(ringbuf)) {
 		ret = PTR_ERR(ringbuf);
 		goto error;
@@ -2217,7 +2218,7 @@ static int intel_init_engine(struct intel_engine_cs *engine)
 			goto error;
 	}
 
-	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
+	ret = intel_pin_and_map_ring(dev_priv, ringbuf);
 	if (ret) {
 		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
 				engine->name, ret);
@@ -2228,11 +2229,11 @@ static int intel_init_engine(struct intel_engine_cs *engine)
 	return 0;
 
 error:
-	intel_cleanup_engine(engine);
+	intel_engine_cleanup(engine);
 	return ret;
 }
 
-void intel_cleanup_engine(struct intel_engine_cs *engine)
+void intel_engine_cleanup(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv;
 
@@ -2242,11 +2243,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
 	dev_priv = engine->i915;
 
 	if (engine->buffer) {
-		intel_stop_engine(engine);
+		intel_engine_stop(engine);
 		WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-		intel_unpin_ringbuffer_obj(engine->buffer);
-		intel_ringbuffer_free(engine->buffer);
+		intel_unpin_ring(engine->buffer);
+		intel_ring_free(engine->buffer);
 		engine->buffer = NULL;
 	}
 
@@ -2309,7 +2310,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *target;
 
@@ -2354,7 +2355,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 
 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int remain_actual = ring->size - ring->tail;
 	int remain_usable = ring->effective_size - ring->tail;
 	int bytes = num_dwords * sizeof(u32);
@@ -2411,7 +2412,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int num_dwords =
 		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
@@ -2432,7 +2433,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
@@ -2518,7 +2519,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2564,7 +2565,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
@@ -2590,7 +2591,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2615,7 +2616,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2638,7 +2639,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2955,7 +2956,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 }
 
 int
-intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
+intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
@@ -2974,7 +2975,7 @@ intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 }
 
 int
-intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
+intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	uint32_t flush_domains;
@@ -2994,8 +2995,7 @@ intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-void
-intel_stop_engine(struct intel_engine_cs *engine)
+void intel_engine_stop(struct intel_engine_cs *engine)
 {
 	int ret;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 427fb19a7a2e..91d0aea695b2 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -62,7 +62,7 @@ struct  intel_hw_status_page {
 	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
 	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
 
-enum intel_ring_hangcheck_action {
+enum intel_engine_hangcheck_action {
 	HANGCHECK_IDLE = 0,
 	HANGCHECK_WAIT,
 	HANGCHECK_ACTIVE,
@@ -72,17 +72,17 @@ enum intel_ring_hangcheck_action {
 
 #define HANGCHECK_SCORE_RING_HUNG 31
 
-struct intel_ring_hangcheck {
+struct intel_engine_hangcheck {
 	u64 acthd;
 	unsigned long user_interrupts;
 	u32 seqno;
 	int score;
-	enum intel_ring_hangcheck_action action;
+	enum intel_engine_hangcheck_action action;
 	int deadlock;
 	u32 instdone[I915_NUM_INSTDONE_REG];
 };
 
-struct intel_ringbuffer {
+struct intel_ring {
 	struct drm_i915_gem_object *obj;
 	void *vaddr;
 	struct i915_vma *vma;
@@ -149,7 +149,7 @@ struct intel_engine_cs {
 	u64 fence_context;
 	u32		mmio_base;
 	unsigned int irq_shift;
-	struct intel_ringbuffer *buffer;
+	struct intel_ring *buffer;
 	struct list_head buffers;
 
 	/* Rather than have every client wait upon all user interrupts,
@@ -329,7 +329,7 @@ struct intel_engine_cs {
 
 	struct i915_gem_context *last_context;
 
-	struct intel_ring_hangcheck hangcheck;
+	struct intel_engine_hangcheck hangcheck;
 
 	struct {
 		struct drm_i915_gem_object *obj;
@@ -376,8 +376,8 @@ intel_engine_flag(const struct intel_engine_cs *engine)
 }
 
 static inline u32
-intel_ring_sync_index(struct intel_engine_cs *engine,
-		      struct intel_engine_cs *other)
+intel_engine_sync_index(struct intel_engine_cs *engine,
+			struct intel_engine_cs *other)
 {
 	int idx;
 
@@ -439,45 +439,44 @@ intel_write_status_page(struct intel_engine_cs *engine,
 #define I915_GEM_HWS_SCRATCH_INDEX	0x40
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
-				     struct intel_ringbuffer *ringbuf);
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
-void intel_ringbuffer_free(struct intel_ringbuffer *ring);
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
+			   struct intel_ring *ring);
+void intel_unpin_ring(struct intel_ring *ring);
+void intel_ring_free(struct intel_ring *ring);
 
-void intel_stop_engine(struct intel_engine_cs *engine);
-void intel_cleanup_engine(struct intel_engine_cs *engine);
+void intel_engine_stop(struct intel_engine_cs *engine);
+void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void intel_ring_emit(struct intel_ringbuffer *ring, u32 data)
+static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
 {
 	*(uint32_t *)(ring->vaddr + ring->tail) = data;
 	ring->tail += 4;
 }
 
-static inline void intel_ring_emit_reg(struct intel_ringbuffer *ring,
-				       i915_reg_t reg)
+static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
 {
 	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
 }
 
-static inline void intel_ring_advance(struct intel_ringbuffer *ring)
+static inline void intel_ring_advance(struct intel_ring *ring)
 {
 	ring->tail &= ring->size - 1;
 }
 
 int __intel_ring_space(int head, int tail, int size);
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
+void intel_ring_update_space(struct intel_ring *ringbuf);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
 void intel_fini_pipe_control(struct intel_engine_cs *engine);
@@ -491,7 +490,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 {
 	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
@@ -499,7 +498,7 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
-static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
+static inline u32 intel_ring_get_tail(struct intel_ring *ringbuf)
 {
 	return ringbuf->tail;
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 06/18] drm/i915: Rename residual ringbuf parameters
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (4 preceding siblings ...)
  2016-07-20 13:11 ` [PATCH 05/18] drm/i915: Rename struct intel_ringbuffer to struct intel_ring Chris Wilson
@ 2016-07-20 13:11 ` Chris Wilson
  2016-07-21 12:01   ` Joonas Lahtinen
  2016-07-20 13:11 ` [PATCH 07/18] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
                   ` (15 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:11 UTC (permalink / raw)
  To: intel-gfx

Now that we have a clear ring/engine split and a struct intel_ring, we
no longer need the stopgap ringbuf names.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 66 ++++++++++++++++-----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  6 +--
 2 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9aaf81ba66c8..625fae42dc0c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -47,15 +47,15 @@ int __intel_ring_space(int head, int tail, int size)
 	return space - I915_RING_FREE_SPACE;
 }
 
-void intel_ring_update_space(struct intel_ring *ringbuf)
+void intel_ring_update_space(struct intel_ring *ring)
 {
-	if (ringbuf->last_retired_head != -1) {
-		ringbuf->head = ringbuf->last_retired_head;
-		ringbuf->last_retired_head = -1;
+	if (ring->last_retired_head != -1) {
+		ring->head = ring->last_retired_head;
+		ring->last_retired_head = -1;
 	}
 
-	ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
-					    ringbuf->tail, ringbuf->size);
+	ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
+					 ring->tail, ring->size);
 }
 
 static void __intel_engine_submit(struct intel_engine_cs *engine)
@@ -1978,25 +1978,25 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 	return 0;
 }
 
-void intel_unpin_ring(struct intel_ring *ringbuf)
+void intel_unpin_ring(struct intel_ring *ring)
 {
-	GEM_BUG_ON(!ringbuf->vma);
-	GEM_BUG_ON(!ringbuf->vaddr);
+	GEM_BUG_ON(!ring->vma);
+	GEM_BUG_ON(!ring->vaddr);
 
-	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
-		i915_gem_object_unpin_map(ringbuf->obj);
+	if (HAS_LLC(ring->obj->base.dev) && !ring->obj->stolen)
+		i915_gem_object_unpin_map(ring->obj);
 	else
-		i915_vma_unpin_iomap(ringbuf->vma);
-	ringbuf->vaddr = NULL;
+		i915_vma_unpin_iomap(ring->vma);
+	ring->vaddr = NULL;
 
-	i915_gem_object_ggtt_unpin(ringbuf->obj);
-	ringbuf->vma = NULL;
+	i915_gem_object_ggtt_unpin(ring->obj);
+	ring->vma = NULL;
 }
 
 int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
-			   struct intel_ring *ringbuf)
+			   struct intel_ring *ring)
 {
-	struct drm_i915_gem_object *obj = ringbuf->obj;
+	struct drm_i915_gem_object *obj = ring->obj;
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
 	unsigned flags = PIN_OFFSET_BIAS | 4096;
 	void *addr;
@@ -2037,8 +2037,8 @@ int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
 		}
 	}
 
-	ringbuf->vaddr = addr;
-	ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+	ring->vaddr = addr;
+	ring->vma = i915_gem_obj_to_ggtt(obj);
 	return 0;
 
 err_unpin:
@@ -2046,29 +2046,29 @@ err_unpin:
 	return ret;
 }
 
-static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
+static void intel_destroy_ringbuffer_obj(struct intel_ring *ring)
 {
-	i915_gem_object_put(ringbuf->obj);
-	ringbuf->obj = NULL;
+	i915_gem_object_put(ring->obj);
+	ring->obj = NULL;
 }
 
 static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-				      struct intel_ring *ringbuf)
+				      struct intel_ring *ring)
 {
 	struct drm_i915_gem_object *obj;
 
 	obj = NULL;
 	if (!HAS_LLC(dev))
-		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
+		obj = i915_gem_object_create_stolen(dev, ring->size);
 	if (obj == NULL)
-		obj = i915_gem_object_create(dev, ringbuf->size);
+		obj = i915_gem_object_create(dev, ring->size);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
 	/* mark ring buffers as read-only from GPU side by default */
 	obj->gt_ro = 1;
 
-	ringbuf->obj = obj;
+	ring->obj = obj;
 
 	return 0;
 }
@@ -2175,7 +2175,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
 static int intel_init_engine(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct intel_ring *ringbuf;
+	struct intel_ring *ring;
 	int ret;
 
 	WARN_ON(engine->buffer);
@@ -2200,12 +2200,12 @@ static int intel_init_engine(struct intel_engine_cs *engine)
 	if (ret)
 		goto error;
 
-	ringbuf = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
-	if (IS_ERR(ringbuf)) {
-		ret = PTR_ERR(ringbuf);
+	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
+	if (IS_ERR(ring)) {
+		ret = PTR_ERR(ring);
 		goto error;
 	}
-	engine->buffer = ringbuf;
+	engine->buffer = ring;
 
 	if (I915_NEED_GFX_HWS(dev_priv)) {
 		ret = init_status_page(engine);
@@ -2218,11 +2218,11 @@ static int intel_init_engine(struct intel_engine_cs *engine)
 			goto error;
 	}
 
-	ret = intel_pin_and_map_ring(dev_priv, ringbuf);
+	ret = intel_pin_and_map_ring(dev_priv, ring);
 	if (ret) {
 		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
 				engine->name, ret);
-		intel_destroy_ringbuffer_obj(ringbuf);
+		intel_destroy_ringbuffer_obj(ring);
 		goto error;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 91d0aea695b2..76fc9bd70873 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -471,7 +471,7 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 }
 
 int __intel_ring_space(int head, int tail, int size);
-void intel_ring_update_space(struct intel_ring *ringbuf);
+void intel_ring_update_space(struct intel_ring *ring);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
@@ -498,9 +498,9 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
-static inline u32 intel_ring_get_tail(struct intel_ring *ringbuf)
+static inline u32 intel_ring_get_tail(struct intel_ring *ring)
 {
-	return ringbuf->tail;
+	return ring->tail;
 }
 
 /*
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 07/18] drm/i915: Rename intel_pin_and_map_ring()
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (5 preceding siblings ...)
  2016-07-20 13:11 ` [PATCH 06/18] drm/i915: Rename residual ringbuf parameters Chris Wilson
@ 2016-07-20 13:11 ` Chris Wilson
  2016-07-21 12:02   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 08/18] drm/i915: Remove obsolete engine->gpu_caches_dirty Chris Wilson
                   ` (14 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:11 UTC (permalink / raw)
  To: intel-gfx

For more consistent oop-naming, we would use intel_ring_verb, so pick
intel_ring_pin() and intel_ring_unpin().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_lrc.c        |  4 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c | 38 ++++++++++++++++-----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  5 ++---
 3 files changed, 23 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 5b9f98f6ed87..33d5916a6b0d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -973,7 +973,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 
 	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
-	ret = intel_pin_and_map_ring(dev_priv, ce->ring);
+	ret = intel_ring_pin(ce->ring);
 	if (ret)
 		goto unpin_map;
 
@@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 	if (--ce->pin_count)
 		return;
 
-	intel_unpin_ring(ce->ring);
+	intel_ring_unpin(ce->ring);
 
 	i915_gem_object_unpin_map(ce->state);
 	i915_gem_object_ggtt_unpin(ce->state);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 625fae42dc0c..e7050b408ab7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1978,24 +1978,9 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 	return 0;
 }
 
-void intel_unpin_ring(struct intel_ring *ring)
-{
-	GEM_BUG_ON(!ring->vma);
-	GEM_BUG_ON(!ring->vaddr);
-
-	if (HAS_LLC(ring->obj->base.dev) && !ring->obj->stolen)
-		i915_gem_object_unpin_map(ring->obj);
-	else
-		i915_vma_unpin_iomap(ring->vma);
-	ring->vaddr = NULL;
-
-	i915_gem_object_ggtt_unpin(ring->obj);
-	ring->vma = NULL;
-}
-
-int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
-			   struct intel_ring *ring)
+int intel_ring_pin(struct intel_ring *ring)
 {
+	struct drm_i915_private *dev_priv = ring->engine->i915;
 	struct drm_i915_gem_object *obj = ring->obj;
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
 	unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2046,6 +2031,21 @@ err_unpin:
 	return ret;
 }
 
+void intel_ring_unpin(struct intel_ring *ring)
+{
+	GEM_BUG_ON(!ring->vma);
+	GEM_BUG_ON(!ring->vaddr);
+
+	if (HAS_LLC(ring->engine->i915) && !ring->obj->stolen)
+		i915_gem_object_unpin_map(ring->obj);
+	else
+		i915_vma_unpin_iomap(ring->vma);
+	ring->vaddr = NULL;
+
+	i915_gem_object_ggtt_unpin(ring->obj);
+	ring->vma = NULL;
+}
+
 static void intel_destroy_ringbuffer_obj(struct intel_ring *ring)
 {
 	i915_gem_object_put(ring->obj);
@@ -2218,7 +2218,7 @@ static int intel_init_engine(struct intel_engine_cs *engine)
 			goto error;
 	}
 
-	ret = intel_pin_and_map_ring(dev_priv, ring);
+	ret = intel_ring_pin(ring);
 	if (ret) {
 		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
 				engine->name, ret);
@@ -2246,7 +2246,7 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 		intel_engine_stop(engine);
 		WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-		intel_unpin_ring(engine->buffer);
+		intel_ring_unpin(engine->buffer);
 		intel_ring_free(engine->buffer);
 		engine->buffer = NULL;
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 76fc9bd70873..836931a6012b 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -441,9 +441,8 @@ intel_write_status_page(struct intel_engine_cs *engine,
 
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
-			   struct intel_ring *ring);
-void intel_unpin_ring(struct intel_ring *ring);
+int intel_ring_pin(struct intel_ring *ring);
+void intel_ring_unpin(struct intel_ring *ring);
 void intel_ring_free(struct intel_ring *ring);
 
 void intel_engine_stop(struct intel_engine_cs *engine);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 08/18] drm/i915: Remove obsolete engine->gpu_caches_dirty
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (6 preceding siblings ...)
  2016-07-20 13:11 ` [PATCH 07/18] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-20 13:12 ` [PATCH 09/18] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
                   ` (13 subsequent siblings)
  21 siblings, 0 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

Space for flushing the GPU cache prior to completing the request is
preallocated and so cannot fail.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  9 +---
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 18 ++++----
 drivers/gpu/drm/i915/i915_gem_request.c    |  7 ++-
 drivers/gpu/drm/i915/intel_lrc.c           | 47 +++----------------
 drivers/gpu/drm/i915/intel_lrc.h           |  2 -
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 72 +++++++-----------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  7 ---
 8 files changed, 39 insertions(+), 125 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d9b861b856dc..e1eed0f449c6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -567,7 +567,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(dev_priv)) {
-		ret = req->engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
+		ret = req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 12adfec2d6a9..2a4841256f8e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -998,10 +998,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
 
-	/* Unconditionally invalidate gpu caches and ensure that we do flush
-	 * any residual writes from the previous batch.
-	 */
-	return intel_engine_invalidate_all_caches(req);
+	/* Unconditionally invalidate gpu caches and TLBs. */
+	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
 }
 
 static bool
@@ -1163,9 +1161,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static void
 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
-	/* Unconditionally force add_request to emit a full flush. */
-	params->engine->gpu_caches_dirty = true;
-
 	/* Add a breadcrumb for the completion of the batch buffer */
 	__i915_add_request(params->request, params->batch_obj, true);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 01b825169164..da1061f4d70f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1664,9 +1664,9 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = req->engine->flush(req,
-				 I915_GEM_GPU_DOMAINS,
-				 I915_GEM_GPU_DOMAINS);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1692,9 +1692,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = req->engine->flush(req,
-				 I915_GEM_GPU_DOMAINS,
-				 I915_GEM_GPU_DOMAINS);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1712,9 +1712,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (req->engine->id != RCS) {
-		ret = req->engine->flush(req,
-					 I915_GEM_GPU_DOMAINS,
-					 I915_GEM_GPU_DOMAINS);
+		ret = req->engine->emit_flush(req,
+					      I915_GEM_GPU_DOMAINS,
+					      I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 54b27369225a..3a566abf5219 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -453,10 +453,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 * what.
 	 */
 	if (flush_caches) {
-		if (i915.enable_execlists)
-			ret = logical_ring_flush_all_caches(request);
-		else
-			ret = intel_engine_flush_all_caches(request);
+		ret = request->engine->emit_flush(request,
+						  0, I915_GEM_GPU_DOMAINS);
+
 		/* Not allowed to fail! */
 		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 33d5916a6b0d..3158a1a38644 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -642,24 +642,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
 	spin_unlock_bh(&engine->execlist_lock);
 }
 
-static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	uint32_t flush_domains;
-	int ret;
-
-	flush_domains = 0;
-	if (engine->gpu_caches_dirty)
-		flush_domains = I915_GEM_GPU_DOMAINS;
-
-	ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-	if (ret)
-		return ret;
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 				 struct list_head *vmas)
 {
@@ -690,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return logical_ring_invalidate_all_caches(req);
+	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -930,22 +912,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
 	I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	if (!engine->gpu_caches_dirty)
-		return 0;
-
-	ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
-	if (ret)
-		return ret;
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
 				struct intel_engine_cs *engine)
 {
@@ -1026,15 +992,15 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 
 	if (w->count == 0)
 		return 0;
 
-	engine->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1051,8 +1017,9 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	engine->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 9d58cd332bc3..212ee7c43438 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -72,8 +72,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
 
 int intel_engines_init(struct drm_device *dev);
 
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-
 /* Logical Ring Contexts */
 
 /* One extra page is added before LRC for GuC as shared data */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e7050b408ab7..6aa1657bbc9d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -688,8 +688,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (w->count == 0)
 		return 0;
 
-	req->engine->gpu_caches_dirty = true;
-	ret = intel_engine_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -706,8 +707,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	req->engine->gpu_caches_dirty = true;
-	ret = intel_engine_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -2845,21 +2847,21 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->init_context = intel_rcs_ctx_init;
 		engine->add_request = gen8_render_add_request;
-		engine->flush = gen8_render_ring_flush;
+		engine->emit_flush = gen8_render_ring_flush;
 		if (i915.semaphores)
 			engine->semaphore.signal = gen8_rcs_signal;
 	} else if (INTEL_GEN(dev_priv) >= 6) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->flush = gen7_render_ring_flush;
+		engine->emit_flush = gen7_render_ring_flush;
 		if (IS_GEN6(dev_priv))
-			engine->flush = gen6_render_ring_flush;
+			engine->emit_flush = gen6_render_ring_flush;
 	} else if (IS_GEN5(dev_priv)) {
-		engine->flush = gen4_render_ring_flush;
+		engine->emit_flush = gen4_render_ring_flush;
 	} else {
 		if (INTEL_GEN(dev_priv) < 4)
-			engine->flush = gen2_render_ring_flush;
+			engine->emit_flush = gen2_render_ring_flush;
 		else
-			engine->flush = gen4_render_ring_flush;
+			engine->emit_flush = gen4_render_ring_flush;
 		engine->irq_enable_mask = I915_USER_INTERRUPT;
 	}
 
@@ -2896,12 +2898,12 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev_priv))
 			engine->write_tail = gen6_bsd_ring_write_tail;
-		engine->flush = gen6_bsd_ring_flush;
+		engine->emit_flush = gen6_bsd_ring_flush;
 		if (INTEL_GEN(dev_priv) < 8)
 			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 	} else {
 		engine->mmio_base = BSD_RING_BASE;
-		engine->flush = bsd_ring_flush;
+		engine->emit_flush = bsd_ring_flush;
 		if (IS_GEN5(dev_priv))
 			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
 		else
@@ -2920,7 +2922,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_bsd_ring_flush;
+	engine->emit_flush = gen6_bsd_ring_flush;
 
 	return intel_init_engine(engine);
 }
@@ -2931,7 +2933,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_ring_flush;
+	engine->emit_flush = gen6_ring_flush;
 	if (INTEL_GEN(dev_priv) < 8)
 		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 
@@ -2944,7 +2946,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_ring_flush;
+	engine->emit_flush = gen6_ring_flush;
 
 	if (INTEL_GEN(dev_priv) < 8) {
 		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
@@ -2955,46 +2957,6 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 	return intel_init_engine(engine);
 }
 
-int
-intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	if (!engine->gpu_caches_dirty)
-		return 0;
-
-	ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
-int
-intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	uint32_t flush_domains;
-	int ret;
-
-	flush_domains = 0;
-	if (engine->gpu_caches_dirty)
-		flush_domains = I915_GEM_GPU_DOMAINS;
-
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
 void intel_engine_stop(struct intel_engine_cs *engine)
 {
 	int ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 836931a6012b..49500cead7a5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -206,9 +206,6 @@ struct intel_engine_cs {
 
 	void		(*write_tail)(struct intel_engine_cs *ring,
 				      u32 value);
-	int __must_check (*flush)(struct drm_i915_gem_request *req,
-				  u32	invalidate_domains,
-				  u32	flush_domains);
 	int		(*add_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
@@ -325,8 +322,6 @@ struct intel_engine_cs {
 	 */
 	u32 last_submitted_seqno;
 
-	bool gpu_caches_dirty;
-
 	struct i915_gem_context *last_context;
 
 	struct intel_engine_hangcheck hangcheck;
@@ -474,8 +469,6 @@ void intel_ring_update_space(struct intel_ring *ring);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
 void intel_fini_pipe_control(struct intel_engine_cs *engine);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 09/18] drm/i915: Simplify request_alloc by returning the allocated request
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (7 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 08/18] drm/i915: Remove obsolete engine->gpu_caches_dirty Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-21 13:07   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 10/18] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
                   ` (12 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

If is simpler and leads to more readable code through the callstack if
the allocation returns the allocated struct through the return value.

The importance of this is that it no longer looks like we accidentally
allocate requests as side-effect of calling certain functions.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |  3 +-
 drivers/gpu/drm/i915/i915_gem.c            | 75 ++++++++----------------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 12 ++---
 drivers/gpu/drm/i915/i915_gem_request.c    | 58 ++++++++---------------
 drivers/gpu/drm/i915/i915_trace.h          | 13 +++---
 drivers/gpu/drm/i915/intel_display.c       | 36 ++++++--------
 drivers/gpu/drm/i915/intel_lrc.c           |  2 +-
 drivers/gpu/drm/i915/intel_overlay.c       | 20 ++++----
 8 files changed, 79 insertions(+), 140 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f32ec6db5bfa..3f67431577e3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3168,8 +3168,7 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-			 struct intel_engine_cs *to,
-			 struct drm_i915_gem_request **to_req);
+			 struct drm_i915_gem_request *to);
 void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req);
 int i915_gem_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 95dbcfd94a80..77d7c0b012f4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2842,51 +2842,35 @@ out:
 
 static int
 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		       struct intel_engine_cs *to,
-		       struct drm_i915_gem_request *from_req,
-		       struct drm_i915_gem_request **to_req)
+		       struct drm_i915_gem_request *to,
+		       struct drm_i915_gem_request *from)
 {
-	struct intel_engine_cs *from;
 	int ret;
 
-	from = i915_gem_request_get_engine(from_req);
-	if (to == from)
+	if (to->engine == from->engine)
 		return 0;
 
-	if (i915_gem_request_completed(from_req))
+	if (i915_gem_request_completed(from))
 		return 0;
 
 	if (!i915.semaphores) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
-		ret = __i915_wait_request(from_req,
-					  i915->mm.interruptible,
+		ret = __i915_wait_request(from,
+					  from->i915->mm.interruptible,
 					  NULL,
 					  NO_WAITBOOST);
 		if (ret)
 			return ret;
 
-		i915_gem_object_retire_request(obj, from_req);
+		i915_gem_object_retire_request(obj, from);
 	} else {
-		int idx = intel_engine_sync_index(from, to);
-		u32 seqno = i915_gem_request_get_seqno(from_req);
+		int idx = intel_engine_sync_index(from->engine, to->engine);
+		u32 seqno = i915_gem_request_get_seqno(from);
 
-		WARN_ON(!to_req);
-
-		if (seqno <= from->semaphore.sync_seqno[idx])
+		if (seqno <= from->engine->semaphore.sync_seqno[idx])
 			return 0;
 
-		if (*to_req == NULL) {
-			struct drm_i915_gem_request *req;
-
-			req = i915_gem_request_alloc(to, NULL);
-			if (IS_ERR(req))
-				return PTR_ERR(req);
-
-			*to_req = req;
-		}
-
-		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
-		ret = to->semaphore.sync_to(*to_req, from, seqno);
+		trace_i915_gem_ring_sync_to(to, from);
+		ret = to->engine->semaphore.sync_to(to, from->engine, seqno);
 		if (ret)
 			return ret;
 
@@ -2894,8 +2878,8 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		 * might have just caused seqno wrap under
 		 * the radar.
 		 */
-		from->semaphore.sync_seqno[idx] =
-			i915_gem_request_get_seqno(obj->last_read_req[from->id]);
+		from->engine->semaphore.sync_seqno[idx] =
+			i915_gem_request_get_seqno(obj->last_read_req[from->engine->id]);
 	}
 
 	return 0;
@@ -2905,17 +2889,12 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  * i915_gem_object_sync - sync an object to a ring.
  *
  * @obj: object which may be in use on another ring.
- * @to: ring we wish to use the object on. May be NULL.
- * @to_req: request we wish to use the object for. See below.
- *          This will be allocated and returned if a request is
- *          required but not passed in.
+ * @to: request we are wishing to use
  *
  * This code is meant to abstract object synchronization with the GPU.
- * Calling with NULL implies synchronizing the object with the CPU
- * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow one engine to write
- * into a buffer at any time, but multiple readers. To ensure each has
- * a coherent view of memory, we must:
+ * Conceptually we serialise writes between engines inside the GPU.
+ * We only allow one engine to write into a buffer at any time, but
+ * multiple readers. To ensure each has a coherent view of memory, we must:
  *
  * - If there is an outstanding write request to the object, the new
  *   request must wait for it to complete (either CPU or in hw, requests
@@ -2924,22 +2903,11 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  * - If we are a write request (pending_write_domain is set), the new
  *   request must wait for outstanding read requests to complete.
  *
- * For CPU synchronisation (NULL to) no request is required. For syncing with
- * rings to_req must be non-NULL. However, a request does not have to be
- * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
- * request will be allocated automatically and returned through *to_req. Note
- * that it is not guaranteed that commands will be emitted (because the system
- * might already be idle). Hence there is no need to create a request that
- * might never have any work submitted. Note further that if a request is
- * returned in *to_req, it is the responsibility of the caller to submit
- * that request (after potentially adding more work to it).
- *
  * Returns 0 if successful, else propagates up the lower layer error.
  */
 int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		     struct intel_engine_cs *to,
-		     struct drm_i915_gem_request **to_req)
+		     struct drm_i915_gem_request *to)
 {
 	const bool readonly = obj->base.pending_write_domain == 0;
 	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
@@ -2948,9 +2916,6 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (!obj->active)
 		return 0;
 
-	if (to == NULL)
-		return i915_gem_object_wait_rendering(obj, readonly);
-
 	n = 0;
 	if (readonly) {
 		if (obj->last_write_req)
@@ -2961,7 +2926,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 				req[n++] = obj->last_read_req[i];
 	}
 	for (i = 0; i < n; i++) {
-		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
+		ret = __i915_gem_object_sync(obj, to, req[i]);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2a4841256f8e..5cea95c6f98b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -981,7 +981,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->engine, &req);
+			ret = i915_gem_object_sync(obj, req);
 			if (ret)
 				return ret;
 		}
@@ -1426,7 +1426,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct drm_i915_gem_request *req = NULL;
 	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
@@ -1614,13 +1613,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
 	/* Allocate a request for this batch buffer nice and early. */
-	req = i915_gem_request_alloc(engine, ctx);
-	if (IS_ERR(req)) {
-		ret = PTR_ERR(req);
+	params->request = i915_gem_request_alloc(engine, ctx);
+	if (IS_ERR(params->request)) {
+		ret = PTR_ERR(params->request);
 		goto err_batch_unpin;
 	}
 
-	ret = i915_gem_request_add_to_client(req, file);
+	ret = i915_gem_request_add_to_client(params->request, file);
 	if (ret)
 		goto err_request;
 
@@ -1636,7 +1635,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->dispatch_flags          = dispatch_flags;
 	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
-	params->request                 = req;
 
 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 err_request:
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 3a566abf5219..2153b4fe4a1f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -292,10 +292,21 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
 	return 0;
 }
 
-static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *engine,
-			 struct i915_gem_context *ctx,
-			 struct drm_i915_gem_request **req_out)
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ *       This can be NULL if the request is not directly related to
+ *       any specific user context, in which case this function will
+ *       choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+		       struct i915_gem_context *ctx)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
@@ -303,18 +314,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
 	u32 seqno;
 	int ret;
 
-	if (!req_out)
-		return -EINVAL;
-
-	*req_out = NULL;
-
 	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
 	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
 	 * and restart.
 	 */
 	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
 	if (ret)
-		return ret;
+		return ERR_PTR(ret);
 
 	/* Move the oldest request to the slab-cache (if not in use!) */
 	if (!list_empty(&engine->request_list)) {
@@ -326,7 +332,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
 
 	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
 	if (!req)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	ret = i915_gem_get_seqno(dev_priv, &seqno);
 	if (ret)
@@ -359,39 +365,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
 	if (ret)
 		goto err_ctx;
 
-	*req_out = req;
-	return 0;
+	return req;
 
 err_ctx:
 	i915_gem_context_put(ctx);
 err:
 	kmem_cache_free(dev_priv->requests, req);
-	return ret;
-}
-
-/**
- * i915_gem_request_alloc - allocate a request structure
- *
- * @engine: engine that we wish to issue the request on.
- * @ctx: context that the request will be associated with.
- *       This can be NULL if the request is not directly related to
- *       any specific user context, in which case this function will
- *       choose an appropriate context to use.
- *
- * Returns a pointer to the allocated request if successful,
- * or an error code if not.
- */
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-		       struct i915_gem_context *ctx)
-{
-	struct drm_i915_gem_request *req;
-	int err;
-
-	if (!ctx)
-		ctx = engine->i915->kernel_context;
-	err = __i915_gem_request_alloc(engine, ctx, &req);
-	return err ? ERR_PTR(err) : req;
+	return ERR_PTR(ret);
 }
 
 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 007112d1e049..9e43c0aa6e3b 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -449,10 +449,9 @@ TRACE_EVENT(i915_gem_evict_vm,
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-	    TP_PROTO(struct drm_i915_gem_request *to_req,
-		     struct intel_engine_cs *from,
-		     struct drm_i915_gem_request *req),
-	    TP_ARGS(to_req, from, req),
+	    TP_PROTO(struct drm_i915_gem_request *to,
+		     struct drm_i915_gem_request *from),
+	    TP_ARGS(to, from),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -463,9 +462,9 @@ TRACE_EVENT(i915_gem_ring_sync_to,
 
 	    TP_fast_assign(
 			   __entry->dev = from->i915->drm.primary->index;
-			   __entry->sync_from = from->id;
-			   __entry->sync_to = to_req->engine->id;
-			   __entry->seqno = req->fence.seqno;
+			   __entry->sync_from = from->engine->id;
+			   __entry->sync_to = to->engine->id;
+			   __entry->seqno = from->fence.seqno;
 			   ),
 
 	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index bff172c45ff7..5d4420b67642 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11583,7 +11583,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	struct intel_flip_work *work;
 	struct intel_engine_cs *engine;
 	bool mmio_flip;
-	struct drm_i915_gem_request *request = NULL;
+	struct drm_i915_gem_request *request;
 	int ret;
 
 	/*
@@ -11690,22 +11690,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 	mmio_flip = use_mmio_flip(engine, obj);
 
-	/* When using CS flips, we want to emit semaphores between rings.
-	 * However, when using mmio flips we will create a task to do the
-	 * synchronisation, so all we want here is to pin the framebuffer
-	 * into the display plane and skip any waits.
-	 */
-	if (!mmio_flip) {
-		ret = i915_gem_object_sync(obj, engine, &request);
-		if (!ret && !request) {
-			request = i915_gem_request_alloc(engine, NULL);
-			ret = PTR_ERR_OR_ZERO(request);
-		}
-
-		if (ret)
-			goto cleanup_pending;
-	}
-
 	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
 	if (ret)
 		goto cleanup_pending;
@@ -11723,14 +11707,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 		schedule_work(&work->mmio_work);
 	} else {
-		i915_gem_request_assign(&work->flip_queued_req, request);
+		request = i915_gem_request_alloc(engine, engine->last_context);
+		if (IS_ERR(request)) {
+			ret = PTR_ERR(request);
+			goto cleanup_unpin;
+		}
+
+		ret = i915_gem_object_sync(obj, request);
+		if (ret)
+			goto cleanup_request;
+
 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
 						   page_flip_flags);
 		if (ret)
-			goto cleanup_unpin;
+			goto cleanup_request;
 
 		intel_mark_page_flip_active(intel_crtc, work);
 
+		work->flip_queued_req = i915_gem_request_get(request);
 		i915_add_request_no_flush(request);
 	}
 
@@ -11745,11 +11739,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 	return 0;
 
+cleanup_request:
+	i915_add_request_no_flush(request);
 cleanup_unpin:
 	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
 cleanup_pending:
-	if (!IS_ERR_OR_NULL(request))
-		i915_add_request_no_flush(request);
 	atomic_dec(&intel_crtc->unpin_work_count);
 	mutex_unlock(&dev->struct_mutex);
 cleanup:
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3158a1a38644..6cd0e24ed50c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -655,7 +655,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->engine, &req);
+			ret = i915_gem_object_sync(obj, req);
 			if (ret)
 				return ret;
 		}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a5071e281088..356a1f6f95aa 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -229,11 +229,18 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 	return 0;
 }
 
+static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
+{
+	struct drm_i915_private *dev_priv = overlay->i915;
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+
+	return i915_gem_request_alloc(engine, dev_priv->kernel_context);
+}
+
 /* overlay needs to be disable in OCMD reg */
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
 	struct intel_ring *ring;
 	int ret;
@@ -241,7 +248,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	req = i915_gem_request_alloc(engine, NULL);
+	req = alloc_request(overlay);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -268,7 +275,6 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 				  bool load_polyphase_filter)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
 	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
@@ -285,7 +291,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	req = i915_gem_request_alloc(engine, NULL);
+	req = alloc_request(overlay);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -338,7 +344,6 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
 	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
@@ -352,7 +357,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	req = i915_gem_request_alloc(engine, NULL);
+	req = alloc_request(overlay);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -412,7 +417,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -428,7 +432,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 		struct drm_i915_gem_request *req;
 		struct intel_ring *ring;
 
-		req = i915_gem_request_alloc(engine, NULL);
+		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 10/18] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (8 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 09/18] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-21 13:39   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 11/18] drm/i915: Convert engine->write_tail to operate on a request Chris Wilson
                   ` (11 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

Both the ->dispatch_execbuffer and ->emit_bb_start callbacks do exactly
the same thing, add MI_BATCHBUFFER_START to the request's ringbuffer -
we need only one vfunc.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  6 ++--
 drivers/gpu/drm/i915/i915_gem_render_state.c | 16 +++++-----
 drivers/gpu/drm/i915/intel_lrc.c             | 15 ++++++---
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 48 ++++++++++++++--------------
 drivers/gpu/drm/i915/intel_ringbuffer.h      | 12 +++----
 5 files changed, 50 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 5cea95c6f98b..2d9f1f4bc058 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1326,9 +1326,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	if (exec_len == 0)
 		exec_len = params->batch_obj->base.size;
 
-	ret = params->engine->dispatch_execbuffer(params->request,
-						  exec_start, exec_len,
-						  params->dispatch_flags);
+	ret = params->engine->emit_bb_start(params->request,
+					    exec_start, exec_len,
+					    params->dispatch_flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index b2be4676a5cf..2ba759f3ab6f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -234,18 +234,18 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
-					     so.rodata->batch_items * 4,
-					     I915_DISPATCH_SECURE);
+	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
+					 so.rodata->batch_items * 4,
+					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
 	if (so.aux_batch_size > 8) {
-		ret = req->engine->dispatch_execbuffer(req,
-						     (so.ggtt_offset +
-						      so.aux_batch_offset),
-						     so.aux_batch_size,
-						     I915_DISPATCH_SECURE);
+		ret = req->engine->emit_bb_start(req,
+						 (so.ggtt_offset +
+						  so.aux_batch_offset),
+						 so.aux_batch_size,
+						 I915_DISPATCH_SECURE);
 		if (ret)
 			goto out;
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 6cd0e24ed50c..d17a193e8eaf 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -859,7 +859,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	exec_start = params->batch_obj_vm_offset +
 		     args->batch_start_offset;
 
-	ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+	ret = engine->emit_bb_start(params->request,
+				    exec_start, args->batch_len,
+				    params->dispatch_flags);
 	if (ret)
 		return ret;
 
@@ -1535,7 +1537,8 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 }
 
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
-			      u64 offset, unsigned dispatch_flags)
+			      u64 offset, u32 len,
+			      unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
@@ -1811,13 +1814,15 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 		return 0;
 
 	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
-				       I915_DISPATCH_SECURE);
+					 so.rodata->batch_items * 4,
+					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
 	ret = req->engine->emit_bb_start(req,
-				       (so.ggtt_offset + so.aux_batch_offset),
-				       I915_DISPATCH_SECURE);
+					 (so.ggtt_offset + so.aux_batch_offset),
+					 so.aux_batch_size,
+					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6aa1657bbc9d..4488db485fa4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1779,9 +1779,9 @@ gen8_irq_disable(struct intel_engine_cs *engine)
 }
 
 static int
-i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 length,
-			 unsigned dispatch_flags)
+i965_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 length,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -1806,9 +1806,9 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 len,
-			 unsigned dispatch_flags)
+i830_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	u32 cs_offset = req->engine->scratch.gtt_offset;
@@ -1868,9 +1868,9 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 }
 
 static int
-i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 len,
-			 unsigned dispatch_flags)
+i915_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2563,9 +2563,9 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			      u64 offset, u32 len,
-			      unsigned dispatch_flags)
+gen8_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
@@ -2589,9 +2589,9 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			     u64 offset, u32 len,
-			     unsigned dispatch_flags)
+hsw_emit_bb_start(struct drm_i915_gem_request *req,
+		  u64 offset, u32 len,
+		  unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2614,9 +2614,9 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			      u64 offset, u32 len,
-			      unsigned dispatch_flags)
+gen6_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2820,15 +2820,15 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 		engine->add_request = gen6_add_request;
 
 	if (INTEL_GEN(dev_priv) >= 8)
-		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		engine->emit_bb_start = gen8_emit_bb_start;
 	else if (INTEL_GEN(dev_priv) >= 6)
-		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		engine->emit_bb_start = gen6_emit_bb_start;
 	else if (INTEL_GEN(dev_priv) >= 4)
-		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+		engine->emit_bb_start = i965_emit_bb_start;
 	else if (IS_I830(dev_priv) || IS_845G(dev_priv))
-		engine->dispatch_execbuffer = i830_dispatch_execbuffer;
+		engine->emit_bb_start = i830_emit_bb_start;
 	else
-		engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+		engine->emit_bb_start = i915_emit_bb_start;
 
 	intel_ring_init_irq(dev_priv, engine);
 	intel_ring_init_semaphores(dev_priv, engine);
@@ -2866,7 +2866,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 	}
 
 	if (IS_HASWELL(dev_priv))
-		engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+		engine->emit_bb_start = hsw_emit_bb_start;
 
 	engine->init_hw = init_render_ring;
 	engine->cleanup = render_ring_cleanup;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 49500cead7a5..85d6a70554b9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -214,12 +214,6 @@ struct intel_engine_cs {
 	 * monotonic, even if not coherent.
 	 */
 	void		(*irq_seqno_barrier)(struct intel_engine_cs *ring);
-	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
-					       u64 offset, u32 length,
-					       unsigned dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
 	void		(*cleanup)(struct intel_engine_cs *ring);
 
 	/* GEN8 signal/wait table - never trust comments!
@@ -295,7 +289,11 @@ struct intel_engine_cs {
 				      u32 invalidate_domains,
 				      u32 flush_domains);
 	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
-					 u64 offset, unsigned dispatch_flags);
+					 u64 offset, u32 length,
+					 unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
 
 	/**
 	 * List of objects currently involved in rendering from the
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 11/18] drm/i915: Convert engine->write_tail to operate on a request
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (9 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 10/18] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-21 13:52   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 12/18] drm/i915: Unify request submission Chris Wilson
                   ` (10 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

If we rewrite the I915_WRITE_TAIL specialisation for the legacy
ringbuffer as submitting the request onto the ringbuffer, we can unify
the vfunc with both execlists and GuC in the next patch.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_request.c |  7 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 52 ++++++++++++++++-----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  3 +-
 3 files changed, 29 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 2153b4fe4a1f..408f390a4c98 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -469,13 +469,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 */
 	request->postfix = intel_ring_get_tail(ring);
 
-	if (i915.enable_execlists) {
+	if (i915.enable_execlists)
 		ret = engine->emit_request(request);
-	} else {
+	else
 		ret = engine->add_request(request);
-
-		request->tail = intel_ring_get_tail(ring);
-	}
 	/* Not allowed to fail! */
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
 	/* Sanity check that the reserved size was large enough. */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 4488db485fa4..43dfa4be1cfd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -58,14 +58,6 @@ void intel_ring_update_space(struct intel_ring *ring)
 					 ring->tail, ring->size);
 }
 
-static void __intel_engine_submit(struct intel_engine_cs *engine)
-{
-	struct intel_ring *ring = engine->buffer;
-
-	ring->tail &= ring->size - 1;
-	engine->write_tail(engine, ring->tail);
-}
-
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
@@ -421,13 +413,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
 	return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-static void ring_write_tail(struct intel_engine_cs *engine,
-			    u32 value)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	I915_WRITE_TAIL(engine, value);
-}
-
 u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
@@ -541,7 +526,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
 
 	I915_WRITE_CTL(engine, 0);
 	I915_WRITE_HEAD(engine, 0);
-	engine->write_tail(engine, 0);
+	I915_WRITE_TAIL(engine, 0);
 
 	if (!IS_GEN2(dev_priv)) {
 		(void)I915_READ_CTL(engine);
@@ -1467,7 +1452,11 @@ gen6_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, req->fence.seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_engine_submit(req->engine);
+	intel_ring_advance(ring);
+
+	req->tail = intel_ring_get_tail(ring);
+
+	req->engine->submit_request(req);
 
 	return 0;
 }
@@ -1497,7 +1486,8 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
-	__intel_engine_submit(engine);
+
+	req->engine->submit_request(req);
 
 	return 0;
 }
@@ -1716,11 +1706,22 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, req->fence.seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_engine_submit(req->engine);
+	intel_ring_advance(ring);
+
+	req->tail = intel_ring_get_tail(ring);
+
+	req->engine->submit_request(req);
 
 	return 0;
 }
 
+static void i9xx_submit_request(struct drm_i915_gem_request *request)
+{
+	struct drm_i915_private *dev_priv = request->i915;
+
+	I915_WRITE_TAIL(request->engine, request->tail);
+}
+
 static void
 gen6_irq_enable(struct intel_engine_cs *engine)
 {
@@ -2479,10 +2480,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 	rcu_read_unlock();
 }
 
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
-				     u32 value)
+static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *dev_priv = request->i915;
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
@@ -2506,8 +2506,8 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
 	/* Now that the ring is fully powered up, update the tail */
-	I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
-	POSTING_READ_FW(RING_TAIL(engine->mmio_base));
+	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
+	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
 
 	/* Let the ring send IDLE messages to the GT again,
 	 * and so let it sleep to conserve power when idle.
@@ -2813,7 +2813,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 				      struct intel_engine_cs *engine)
 {
 	engine->init_hw = init_ring_common;
-	engine->write_tail = ring_write_tail;
+	engine->submit_request = i9xx_submit_request;
 
 	engine->add_request = i9xx_add_request;
 	if (INTEL_GEN(dev_priv) >= 6)
@@ -2897,7 +2897,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) >= 6) {
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev_priv))
-			engine->write_tail = gen6_bsd_ring_write_tail;
+			engine->submit_request = gen6_bsd_submit_request;
 		engine->emit_flush = gen6_bsd_ring_flush;
 		if (INTEL_GEN(dev_priv) < 8)
 			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 85d6a70554b9..1a38c383327e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -204,8 +204,6 @@ struct intel_engine_cs {
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
-	void		(*write_tail)(struct intel_engine_cs *ring,
-				      u32 value);
 	int		(*add_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
@@ -294,6 +292,7 @@ struct intel_engine_cs {
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
 #define I915_DISPATCH_RS     0x4
+	void		(*submit_request)(struct drm_i915_gem_request *req);
 
 	/**
 	 * List of objects currently involved in rendering from the
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 12/18] drm/i915: Unify request submission
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (10 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 11/18] drm/i915: Convert engine->write_tail to operate on a request Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-22  8:03   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 13/18] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal() Chris Wilson
                   ` (9 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

Move request submission from emit_request into its own common vfunc
from i915_add_request().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_request.c    |  7 +++----
 drivers/gpu/drm/i915/i915_guc_submission.c |  9 ++++++---
 drivers/gpu/drm/i915/intel_guc.h           |  1 -
 drivers/gpu/drm/i915/intel_lrc.c           | 10 +++-------
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 26 ++++++++++----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    | 23 +++++++++++------------
 6 files changed, 33 insertions(+), 43 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 408f390a4c98..3e633b47213c 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -469,12 +469,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 */
 	request->postfix = intel_ring_get_tail(ring);
 
-	if (i915.enable_execlists)
-		ret = engine->emit_request(request);
-	else
-		ret = engine->add_request(request);
 	/* Not allowed to fail! */
+	ret = engine->emit_request(request);
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
+
 	/* Sanity check that the reserved size was large enough. */
 	ret = intel_ring_get_tail(ring) - request_start;
 	if (ret < 0)
@@ -485,6 +483,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 		  reserved_tail, ret);
 
 	i915_gem_mark_busy(engine);
+	engine->submit_request(request);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index eccd34832fe6..32d0e1890950 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -585,7 +585,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
  * The only error here arises if the doorbell hardware isn't functioning
  * as expected, which really shouln't happen.
  */
-int i915_guc_submit(struct drm_i915_gem_request *rq)
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
 {
 	unsigned int engine_id = rq->engine->id;
 	struct intel_guc *guc = &rq->i915->guc;
@@ -602,8 +602,6 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
 
 	guc->submissions[engine_id] += 1;
 	guc->last_seqno[engine_id] = rq->fence.seqno;
-
-	return b_ret;
 }
 
 /*
@@ -992,6 +990,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
 	struct i915_guc_client *client;
+	struct intel_engine_cs *engine;
 
 	/* client for execbuf submission */
 	client = guc_client_alloc(dev_priv,
@@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 	host2guc_sample_forcewake(guc, client);
 	guc_init_doorbell_hw(guc);
 
+	/* Take over from manual control of ELSP (execlists) */
+	for_each_engine(engine, dev_priv)
+		engine->submit_request = i915_guc_submit;
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 3e3e743740c0..623cf26cd784 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -160,7 +160,6 @@ extern int intel_guc_resume(struct drm_device *dev);
 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
 int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
-int i915_guc_submit(struct drm_i915_gem_request *rq);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d17a193e8eaf..52edbcc9bca0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -773,12 +773,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	 */
 	request->previous_context = engine->last_context;
 	engine->last_context = request->ctx;
-
-	if (i915.enable_guc_submission)
-		i915_guc_submit(request);
-	else
-		execlists_context_queue(request);
-
 	return 0;
 }
 
@@ -1904,8 +1898,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
 	/* Default vfuncs which can be overriden by each engine. */
 	engine->init_hw = gen8_init_common_ring;
-	engine->emit_request = gen8_emit_request;
 	engine->emit_flush = gen8_emit_flush;
+	engine->emit_request = gen8_emit_request;
+	engine->submit_request = execlists_context_queue;
+
 	engine->irq_enable = gen8_logical_ring_enable_irq;
 	engine->irq_disable = gen8_logical_ring_disable_irq;
 	engine->emit_bb_start = gen8_emit_bb_start;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 43dfa4be1cfd..907d933d62aa 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1427,15 +1427,14 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 }
 
 /**
- * gen6_add_request - Update the semaphore mailbox registers
+ * gen6_emit_request - Update the semaphore mailbox registers
  *
  * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static int
-gen6_add_request(struct drm_i915_gem_request *req)
+static int gen6_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -1456,13 +1455,10 @@ gen6_add_request(struct drm_i915_gem_request *req)
 
 	req->tail = intel_ring_get_tail(ring);
 
-	req->engine->submit_request(req);
-
 	return 0;
 }
 
-static int
-gen8_render_add_request(struct drm_i915_gem_request *req)
+static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
@@ -1486,8 +1482,9 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
-	req->engine->submit_request(req);
+	req->tail = intel_ring_get_tail(ring);
 
 	return 0;
 }
@@ -1692,8 +1689,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 	return 0;
 }
 
-static int
-i9xx_add_request(struct drm_i915_gem_request *req)
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -1710,8 +1706,6 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 
 	req->tail = intel_ring_get_tail(ring);
 
-	req->engine->submit_request(req);
-
 	return 0;
 }
 
@@ -2813,11 +2807,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 				      struct intel_engine_cs *engine)
 {
 	engine->init_hw = init_ring_common;
-	engine->submit_request = i9xx_submit_request;
 
-	engine->add_request = i9xx_add_request;
+	engine->emit_request = i9xx_emit_request;
 	if (INTEL_GEN(dev_priv) >= 6)
-		engine->add_request = gen6_add_request;
+		engine->emit_request = gen6_emit_request;
+	engine->submit_request = i9xx_submit_request;
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		engine->emit_bb_start = gen8_emit_bb_start;
@@ -2846,7 +2840,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->add_request = gen8_render_add_request;
+		engine->emit_request = gen8_render_emit_request;
 		engine->emit_flush = gen8_render_ring_flush;
 		if (i915.semaphores)
 			engine->semaphore.signal = gen8_rcs_signal;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 1a38c383327e..856b732ddbbd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -204,7 +204,17 @@ struct intel_engine_cs {
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
-	int		(*add_request)(struct drm_i915_gem_request *req);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
+				      u32 invalidate_domains,
+				      u32 flush_domains);
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
+					 u64 offset, u32 length,
+					 unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
+	int		(*emit_request)(struct drm_i915_gem_request *req);
+	void		(*submit_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
@@ -282,17 +292,6 @@ struct intel_engine_cs {
 	unsigned int idle_lite_restore_wa;
 	bool disable_lite_restore_wa;
 	u32 ctx_desc_template;
-	int		(*emit_request)(struct drm_i915_gem_request *request);
-	int		(*emit_flush)(struct drm_i915_gem_request *request,
-				      u32 invalidate_domains,
-				      u32 flush_domains);
-	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
-					 u64 offset, u32 length,
-					 unsigned int dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
-	void		(*submit_request)(struct drm_i915_gem_request *req);
 
 	/**
 	 * List of objects currently involved in rendering from the
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 13/18] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal()
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (11 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 12/18] drm/i915: Unify request submission Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-22  8:15   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 14/18] drm/i915: Reuse legacy breadcrumbs + tail emission Chris Wilson
                   ` (8 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

Rather than pass in the num_dwords that the caller wishes to use after
the signal command packet, split the breadcrumb emission into two phases
and have both the signal and breadcrumb individiually acquire space on
the ring. This makes the interface simpler for the reader, and will
simplify for patches.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 51 ++++++++++++++-------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  4 +--
 2 files changed, 23 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 907d933d62aa..9c66745fc8d7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1308,10 +1308,8 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
 	intel_fini_pipe_control(engine);
 }
 
-static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
-			   unsigned int num_dwords)
+static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req)
 {
-#define MBOX_UPDATE_DWORDS 8
 	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
@@ -1319,10 +1317,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
-	ret = intel_ring_begin(signaller_req, num_dwords);
+	ret = intel_ring_begin(signaller_req, (num_rings-1) * 8);
 	if (ret)
 		return ret;
 
@@ -1346,14 +1341,13 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 				MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
+	intel_ring_advance(signaller);
 
 	return 0;
 }
 
-static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
-			   unsigned int num_dwords)
+static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req)
 {
-#define MBOX_UPDATE_DWORDS 6
 	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
@@ -1361,10 +1355,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
-	ret = intel_ring_begin(signaller_req, num_dwords);
+	ret = intel_ring_begin(signaller_req, (num_rings-1) * 6);
 	if (ret)
 		return ret;
 
@@ -1386,12 +1377,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 				MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
+	intel_ring_advance(signaller);
 
 	return 0;
 }
 
-static int gen6_signal(struct drm_i915_gem_request *signaller_req,
-		       unsigned int num_dwords)
+static int gen6_signal(struct drm_i915_gem_request *signaller_req)
 {
 	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
@@ -1399,12 +1390,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 	enum intel_engine_id id;
 	int ret, num_rings;
 
-#define MBOX_UPDATE_DWORDS 3
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
-#undef MBOX_UPDATE_DWORDS
-
-	ret = intel_ring_begin(signaller_req, num_dwords);
+	ret = intel_ring_begin(signaller_req, round_up((num_rings-1) * 3, 2));
 	if (ret)
 		return ret;
 
@@ -1422,6 +1409,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 	/* If num_dwords was rounded, make sure the tail pointer is correct */
 	if (num_rings % 2 == 0)
 		intel_ring_emit(signaller, MI_NOOP);
+	intel_ring_advance(signaller);
 
 	return 0;
 }
@@ -1439,11 +1427,13 @@ static int gen6_emit_request(struct drm_i915_gem_request *req)
 	struct intel_ring *ring = req->ring;
 	int ret;
 
-	if (req->engine->semaphore.signal)
-		ret = req->engine->semaphore.signal(req, 4);
-	else
-		ret = intel_ring_begin(req, 4);
+	if (req->engine->semaphore.signal) {
+		ret = req->engine->semaphore.signal(req);
+		if (ret)
+			return ret;
+	}
 
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -1464,10 +1454,13 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 	struct intel_ring *ring = req->ring;
 	int ret;
 
-	if (engine->semaphore.signal)
-		ret = engine->semaphore.signal(req, 8);
-	else
-		ret = intel_ring_begin(req, 8);
+	if (engine->semaphore.signal) {
+		ret = engine->semaphore.signal(req);
+		if (ret)
+			return ret;
+	}
+
+	ret = intel_ring_begin(req, 8);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 856b732ddbbd..08e86204a3d5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -278,9 +278,7 @@ struct intel_engine_cs {
 		int	(*sync_to)(struct drm_i915_gem_request *to_req,
 				   struct intel_engine_cs *from,
 				   u32 seqno);
-		int	(*signal)(struct drm_i915_gem_request *signaller_req,
-				  /* num_dwords needed by caller */
-				  unsigned int num_dwords);
+		int	(*signal)(struct drm_i915_gem_request *signaller_req);
 	} semaphore;
 
 	/* Execlists */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 14/18] drm/i915: Reuse legacy breadcrumbs + tail emission
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (12 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 13/18] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal() Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-22  8:34   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 15/18] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores Chris Wilson
                   ` (7 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

As GEN6+ is now a simple variant on the basic breadcrumbs + tail write,
reuse the common code.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 71 ++++++++++++++-------------------
 1 file changed, 29 insertions(+), 42 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9c66745fc8d7..a74b42fc8f48 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1414,25 +1414,18 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req)
 	return 0;
 }
 
-/**
- * gen6_emit_request - Update the semaphore mailbox registers
- *
- * @request - request to write to the ring
- *
- * Update the mailbox registers in the *other* rings with the current seqno.
- * This acts like a signal in the canonical semaphore.
- */
-static int gen6_emit_request(struct drm_i915_gem_request *req)
+static void i9xx_submit_request(struct drm_i915_gem_request *request)
+{
+	struct drm_i915_private *dev_priv = request->i915;
+
+	I915_WRITE_TAIL(request->engine, request->tail);
+}
+
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
 
-	if (req->engine->semaphore.signal) {
-		ret = req->engine->semaphore.signal(req);
-		if (ret)
-			return ret;
-	}
-
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
@@ -1448,6 +1441,27 @@ static int gen6_emit_request(struct drm_i915_gem_request *req)
 	return 0;
 }
 
+/**
+ * gen6_emit_request - Update the semaphore mailbox registers
+ *
+ * @request - request to write to the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
+static int gen6_emit_request(struct drm_i915_gem_request *req)
+{
+	if (req->engine->semaphore.signal) {
+		int ret;
+
+		ret = req->engine->semaphore.signal(req);
+		if (ret)
+			return ret;
+	}
+
+	return i9xx_emit_request(req);
+}
+
 static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
@@ -1682,33 +1696,6 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 	return 0;
 }
 
-static int i9xx_emit_request(struct drm_i915_gem_request *req)
-{
-	struct intel_ring *ring = req->ring;
-	int ret;
-
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, req->fence.seqno);
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_advance(ring);
-
-	req->tail = intel_ring_get_tail(ring);
-
-	return 0;
-}
-
-static void i9xx_submit_request(struct drm_i915_gem_request *request)
-{
-	struct drm_i915_private *dev_priv = request->i915;
-
-	I915_WRITE_TAIL(request->engine, request->tail);
-}
-
 static void
 gen6_irq_enable(struct intel_engine_cs *engine)
 {
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 15/18] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (13 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 14/18] drm/i915: Reuse legacy breadcrumbs + tail emission Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-21 13:55   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 16/18] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
                   ` (6 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

As gen6_emit_request() only differs from i9xx_emit_request() when
semaphores are enabled, only use the specialised vfunc in that scenario.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 18 ++++++++----------
 1 file changed, 8 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index a74b42fc8f48..8ae25bcc876e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1442,22 +1442,20 @@ static int i9xx_emit_request(struct drm_i915_gem_request *req)
 }
 
 /**
- * gen6_emit_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_request - Update the semaphore mailbox registers
  *
  * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static int gen6_emit_request(struct drm_i915_gem_request *req)
+static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
 {
-	if (req->engine->semaphore.signal) {
-		int ret;
+	int ret;
 
-		ret = req->engine->semaphore.signal(req);
-		if (ret)
-			return ret;
-	}
+	ret = req->engine->semaphore.signal(req);
+	if (ret)
+		return ret;
 
 	return i9xx_emit_request(req);
 }
@@ -2687,6 +2685,8 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
 	if (!i915.semaphores)
 		return;
 
+	engine->emit_request = gen6_sema_emit_request;
+
 	if (INTEL_GEN(dev_priv) >= 8) {
 		u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
 
@@ -2789,8 +2789,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 	engine->init_hw = init_ring_common;
 
 	engine->emit_request = i9xx_emit_request;
-	if (INTEL_GEN(dev_priv) >= 6)
-		engine->emit_request = gen6_emit_request;
 	engine->submit_request = i9xx_submit_request;
 
 	if (INTEL_GEN(dev_priv) >= 8)
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 16/18] drm/i915: Remove duplicate golden render state init from execlists
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (14 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 15/18] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-21 14:18   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 17/18] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
                   ` (5 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

Now that we use the same vfuncs for emitting the batch buffer in both
execlists and legacy, the golden render state initialisation is
identical between both.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_render_state.c | 23 +++++++++++++------
 drivers/gpu/drm/i915/i915_gem_render_state.h | 18 ---------------
 drivers/gpu/drm/i915/intel_lrc.c             | 34 +---------------------------
 drivers/gpu/drm/i915/intel_renderstate.h     | 16 +++++++++----
 4 files changed, 28 insertions(+), 63 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 2ba759f3ab6f..dd84793d75d9 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,6 +28,15 @@
 #include "i915_drv.h"
 #include "intel_renderstate.h"
 
+struct render_state {
+	const struct intel_renderstate_rodata *rodata;
+	struct drm_i915_gem_object *obj;
+	u64 ggtt_offset;
+	int gen;
+	u32 aux_batch_size;
+	u32 aux_batch_offset;
+};
+
 static const struct intel_renderstate_rodata *
 render_state_get_rodata(const int gen)
 {
@@ -51,6 +60,7 @@ static int render_state_init(struct render_state *so,
 	int ret;
 
 	so->gen = INTEL_GEN(dev_priv);
+	so->ggtt_offset = 0;
 	so->rodata = render_state_get_rodata(so->gen);
 	if (so->rodata == NULL)
 		return 0;
@@ -192,14 +202,14 @@ err_out:
 
 #undef OUT_BATCH
 
-void i915_gem_render_state_fini(struct render_state *so)
+static void render_state_fini(struct render_state *so)
 {
 	i915_gem_object_ggtt_unpin(so->obj);
 	i915_gem_object_put(so->obj);
 }
 
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
-				  struct render_state *so)
+static int render_state_prepare(struct intel_engine_cs *engine,
+				struct render_state *so)
 {
 	int ret;
 
@@ -215,7 +225,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
 
 	ret = render_state_setup(so);
 	if (ret) {
-		i915_gem_render_state_fini(so);
+		render_state_fini(so);
 		return ret;
 	}
 
@@ -227,7 +237,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	struct render_state so;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(req->engine, &so);
+	ret = render_state_prepare(req->engine, &so);
 	if (ret)
 		return ret;
 
@@ -251,8 +261,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	}
 
 	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-
 out:
-	i915_gem_render_state_fini(&so);
+	render_state_fini(&so);
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 6aaa3a10a630..c44fca8599bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -26,24 +26,6 @@
 
 #include <linux/types.h>
 
-struct intel_renderstate_rodata {
-	const u32 *reloc;
-	const u32 *batch;
-	const u32 batch_items;
-};
-
-struct render_state {
-	const struct intel_renderstate_rodata *rodata;
-	struct drm_i915_gem_object *obj;
-	u64 ggtt_offset;
-	int gen;
-	u32 aux_batch_size;
-	u32 aux_batch_offset;
-};
-
 int i915_gem_render_state_init(struct drm_i915_gem_request *req);
-void i915_gem_render_state_fini(struct render_state *so);
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
-				  struct render_state *so);
 
 #endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 52edbcc9bca0..bce37d0d431f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1795,38 +1795,6 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 	return intel_logical_ring_advance_and_submit(request);
 }
 
-static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
-{
-	struct render_state so;
-	int ret;
-
-	ret = i915_gem_render_state_prepare(req->engine, &so);
-	if (ret)
-		return ret;
-
-	if (so.rodata == NULL)
-		return 0;
-
-	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
-					 so.rodata->batch_items * 4,
-					 I915_DISPATCH_SECURE);
-	if (ret)
-		goto out;
-
-	ret = req->engine->emit_bb_start(req,
-					 (so.ggtt_offset + so.aux_batch_offset),
-					 so.aux_batch_size,
-					 I915_DISPATCH_SECURE);
-	if (ret)
-		goto out;
-
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-
-out:
-	i915_gem_render_state_fini(&so);
-	return ret;
-}
-
 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 {
 	int ret;
@@ -1843,7 +1811,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 	if (ret)
 		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
 
-	return intel_lr_context_render_state_init(req);
+	return i915_gem_render_state_init(req);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index 5bd69852752c..08f6fea05a2c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -24,12 +24,13 @@
 #ifndef _INTEL_RENDERSTATE_H
 #define _INTEL_RENDERSTATE_H
 
-#include "i915_drv.h"
+#include <linux/types.h>
 
-extern const struct intel_renderstate_rodata gen6_null_state;
-extern const struct intel_renderstate_rodata gen7_null_state;
-extern const struct intel_renderstate_rodata gen8_null_state;
-extern const struct intel_renderstate_rodata gen9_null_state;
+struct intel_renderstate_rodata {
+	const u32 *reloc;
+	const u32 *batch;
+	const u32 batch_items;
+};
 
 #define RO_RENDERSTATE(_g)						\
 	const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
@@ -38,4 +39,9 @@ extern const struct intel_renderstate_rodata gen9_null_state;
 		.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
 	}
 
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
+
 #endif /* INTEL_RENDERSTATE_H */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 17/18] drm/i915: Unify legacy/execlists submit_execbuf callbacks
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (15 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 16/18] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-22  8:45   ` Joonas Lahtinen
  2016-07-20 13:12 ` [PATCH 18/18] drm/i915: Simplify calling engine->sync_to Chris Wilson
                   ` (4 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

Now that emitting requests is identical between legacy and execlists, we
can use the same function to build up the ring for submitting to either
engine. (With the exception of i915_switch_contexts(), but in time that
will also be handled gracefully.)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |  20 -----
 drivers/gpu/drm/i915/i915_gem.c            |   2 -
 drivers/gpu/drm/i915/i915_gem_context.c    |   7 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  24 ++++--
 drivers/gpu/drm/i915/intel_lrc.c           | 123 -----------------------------
 drivers/gpu/drm/i915/intel_lrc.h           |   4 -
 6 files changed, 21 insertions(+), 159 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3f67431577e3..f188c9a9b746 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1705,18 +1705,6 @@ struct i915_virtual_gpu {
 	bool active;
 };
 
-struct i915_execbuffer_params {
-	struct drm_device               *dev;
-	struct drm_file                 *file;
-	uint32_t                        dispatch_flags;
-	uint32_t                        args_batch_start_offset;
-	uint64_t                        batch_obj_vm_offset;
-	struct intel_engine_cs *engine;
-	struct drm_i915_gem_object      *batch_obj;
-	struct i915_gem_context            *ctx;
-	struct drm_i915_gem_request     *request;
-};
-
 /* used in computing the new watermarks state */
 struct intel_wm_config {
 	unsigned int num_pipes_active;
@@ -2016,9 +2004,6 @@ struct drm_i915_private {
 
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct {
-		int (*execbuf_submit)(struct i915_execbuffer_params *params,
-				      struct drm_i915_gem_execbuffer2 *args,
-				      struct list_head *vmas);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 		void (*stop_engine)(struct intel_engine_cs *engine);
 
@@ -2990,11 +2975,6 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
-void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-					struct drm_i915_gem_request *req);
-int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
-				   struct drm_i915_gem_execbuffer2 *args,
-				   struct list_head *vmas);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 77d7c0b012f4..9fdecef34fa8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4531,11 +4531,9 @@ int i915_gem_init(struct drm_device *dev)
 	mutex_lock(&dev->struct_mutex);
 
 	if (!i915.enable_execlists) {
-		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
 		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
 		dev_priv->gt.stop_engine = intel_engine_stop;
 	} else {
-		dev_priv->gt.execbuf_submit = intel_execlists_submission;
 		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
 		dev_priv->gt.stop_engine = intel_logical_ring_stop;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e1eed0f449c6..72b21c7b7547 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -893,8 +893,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 
-	WARN_ON(i915.enable_execlists);
 	lockdep_assert_held(&req->i915->drm.struct_mutex);
+	if (i915.enable_execlists)
+		return 0;
 
 	if (!req->ctx->engine[engine->id].state) {
 		struct i915_gem_context *to = req->ctx;
@@ -942,9 +943,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-		ret = 0;
-		if (!i915.enable_execlists)
-			ret = i915_switch_context(req);
+		ret = i915_switch_context(req);
 		i915_add_request_no_flush(req);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2d9f1f4bc058..e302477418d8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -42,6 +42,18 @@
 
 #define BATCH_OFFSET_BIAS (256*1024)
 
+struct i915_execbuffer_params {
+	struct drm_device               *dev;
+	struct drm_file                 *file;
+	u32				 dispatch_flags;
+	u32				 args_batch_start_offset;
+	u32				 batch_obj_vm_offset;
+	struct intel_engine_cs          *engine;
+	struct drm_i915_gem_object      *batch_obj;
+	struct i915_gem_context         *ctx;
+	struct drm_i915_gem_request     *request;
+};
+
 struct eb_vmas {
 	struct list_head vmas;
 	int and;
@@ -1117,7 +1129,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 	return ctx;
 }
 
-void
+static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
 {
@@ -1244,10 +1256,10 @@ err:
 		return ERR_PTR(ret);
 }
 
-int
-i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
-			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas)
+static int
+execbuf_submit(struct i915_execbuffer_params *params,
+	       struct drm_i915_gem_execbuffer2 *args,
+	       struct list_head *vmas)
 {
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
@@ -1636,7 +1648,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
 
-	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+	ret = execbuf_submit(params, args, &eb->vmas);
 err_request:
 	i915_gem_execbuffer_retire_commands(params);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bce37d0d431f..8d1589f0ea7e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -642,39 +642,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
 	spin_unlock_bh(&engine->execlist_lock);
 }
 
-static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
-				 struct list_head *vmas)
-{
-	const unsigned other_rings = ~intel_engine_flag(req->engine);
-	struct i915_vma *vma;
-	uint32_t flush_domains = 0;
-	bool flush_chipset = false;
-	int ret;
-
-	list_for_each_entry(vma, vmas, exec_list) {
-		struct drm_i915_gem_object *obj = vma->obj;
-
-		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req);
-			if (ret)
-				return ret;
-		}
-
-		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
-			flush_chipset |= i915_gem_clflush_object(obj, false);
-
-		flush_domains |= obj->base.write_domain;
-	}
-
-	if (flush_domains & I915_GEM_DOMAIN_GTT)
-		wmb();
-
-	/* Unconditionally invalidate gpu caches and ensure that we do flush
-	 * any residual writes from the previous batch.
-	 */
-	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
-}
-
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
@@ -776,96 +743,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	return 0;
 }
 
-/**
- * intel_execlists_submission() - submit a batchbuffer for execution, Execlists style
- * @params: execbuffer call parameters.
- * @args: execbuffer call arguments.
- * @vmas: list of vmas.
- *
- * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
- * away the submission details of the execbuffer ioctl call.
- *
- * Return: non-zero if the submission fails.
- */
-int intel_execlists_submission(struct i915_execbuffer_params *params,
-			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas)
-{
-	struct drm_device       *dev = params->dev;
-	struct intel_engine_cs *engine = params->engine;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ring *ring = params->request->ring;
-	u64 exec_start;
-	int instp_mode;
-	u32 instp_mask;
-	int ret;
-
-	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
-	instp_mask = I915_EXEC_CONSTANTS_MASK;
-	switch (instp_mode) {
-	case I915_EXEC_CONSTANTS_REL_GENERAL:
-	case I915_EXEC_CONSTANTS_ABSOLUTE:
-	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && engine->id != RCS) {
-			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
-			return -EINVAL;
-		}
-
-		if (instp_mode != dev_priv->relative_constants_mode) {
-			if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
-				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
-				return -EINVAL;
-			}
-
-			/* The HW changed the meaning on this bit on gen6 */
-			instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
-		}
-		break;
-	default:
-		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
-		return -EINVAL;
-	}
-
-	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		DRM_DEBUG("sol reset is gen7 only\n");
-		return -EINVAL;
-	}
-
-	ret = execlists_move_to_gpu(params->request, vmas);
-	if (ret)
-		return ret;
-
-	if (engine->id == RCS &&
-	    instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
-
-		dev_priv->relative_constants_mode = instp_mode;
-	}
-
-	exec_start = params->batch_obj_vm_offset +
-		     args->batch_start_offset;
-
-	ret = engine->emit_bb_start(params->request,
-				    exec_start, args->batch_len,
-				    params->dispatch_flags);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
-
-	i915_gem_execbuffer_move_to_active(vmas, params->request);
-
-	return 0;
-}
-
 void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *req, *tmp;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 212ee7c43438..0f9c9925985c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -95,10 +95,6 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
 				    int enable_execlists);
-struct i915_execbuffer_params;
-int intel_execlists_submission(struct i915_execbuffer_params *params,
-			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas);
 
 void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* [PATCH 18/18] drm/i915: Simplify calling engine->sync_to
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (16 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 17/18] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
@ 2016-07-20 13:12 ` Chris Wilson
  2016-07-22  8:59   ` Joonas Lahtinen
  2016-07-20 13:54 ` ✓ Ro.CI.BAT: success for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Patchwork
                   ` (3 subsequent siblings)
  21 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 13:12 UTC (permalink / raw)
  To: intel-gfx

Since requests can no longer be generated as a side-effect of
intel_ring_begin(), we know that the seqno will be unchanged during
ring-emission. This predicatablity then means we do not have to check
for the seqno wrapping around whilst emitting the semaphore for
engine->sync_to().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h         |  2 +-
 drivers/gpu/drm/i915/i915_gem.c         | 13 ++-----
 drivers/gpu/drm/i915/i915_gem_request.c |  9 +----
 drivers/gpu/drm/i915/intel_ringbuffer.c | 64 ++++++++++++---------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  5 ++-
 5 files changed, 30 insertions(+), 63 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f188c9a9b746..c374b8687d87 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1757,7 +1757,7 @@ struct drm_i915_private {
 	struct i915_gem_context *kernel_context;
 	struct intel_engine_cs engine[I915_NUM_ENGINES];
 	struct drm_i915_gem_object *semaphore_obj;
-	uint32_t last_seqno, next_seqno;
+	u32 next_seqno;
 
 	struct drm_dma_handle *status_page_dmah;
 	struct resource mch_res;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9fdecef34fa8..0b7a0e6f9dd1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2864,22 +2864,15 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		i915_gem_object_retire_request(obj, from);
 	} else {
 		int idx = intel_engine_sync_index(from->engine, to->engine);
-		u32 seqno = i915_gem_request_get_seqno(from);
-
-		if (seqno <= from->engine->semaphore.sync_seqno[idx])
+		if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
 			return 0;
 
 		trace_i915_gem_ring_sync_to(to, from);
-		ret = to->engine->semaphore.sync_to(to, from->engine, seqno);
+		ret = to->engine->semaphore.sync_to(to, from);
 		if (ret)
 			return ret;
 
-		/* We use last_read_req because sync_to()
-		 * might have just caused seqno wrap under
-		 * the radar.
-		 */
-		from->engine->semaphore.sync_seqno[idx] =
-			i915_gem_request_get_seqno(obj->last_read_req[from->engine->id]);
+		from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 3e633b47213c..dfdb86c8a433 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -264,14 +264,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 	if (ret)
 		return ret;
 
-	/* Carefully set the last_seqno value so that wrap
-	 * detection still works
-	 */
 	dev_priv->next_seqno = seqno;
-	dev_priv->last_seqno = seqno - 1;
-	if (dev_priv->last_seqno == 0)
-		dev_priv->last_seqno--;
-
 	return 0;
 }
 
@@ -288,7 +281,7 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
 		dev_priv->next_seqno = 1;
 	}
 
-	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+	*seqno = dev_priv->next_seqno++;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 8ae25bcc876e..bfeb16025327 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1494,12 +1494,6 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
-					      u32 seqno)
-{
-	return dev_priv->last_seqno < seqno;
-}
-
 /**
  * intel_ring_sync - sync the waiter to the signaller on seqno
  *
@@ -1509,24 +1503,23 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
  */
 
 static int
-gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
-	       struct intel_engine_cs *signaller,
-	       u32 seqno)
+gen8_ring_sync(struct drm_i915_gem_request *wait,
+	       struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = waiter_req->ring;
-	struct drm_i915_private *dev_priv = waiter_req->i915;
-	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
+	struct intel_ring *waiter = wait->ring;
+	struct drm_i915_private *dev_priv = wait->i915;
+	u64 offset = GEN8_WAIT_OFFSET(wait->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
-	ret = intel_ring_begin(waiter_req, 4);
+	ret = intel_ring_begin(wait, 4);
 	if (ret)
 		return ret;
 
 	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
 				MI_SEMAPHORE_GLOBAL_GTT |
 				MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(waiter, seqno);
+	intel_ring_emit(waiter, signal->fence.seqno);
 	intel_ring_emit(waiter, lower_32_bits(offset));
 	intel_ring_emit(waiter, upper_32_bits(offset));
 	intel_ring_advance(waiter);
@@ -1536,48 +1529,37 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	 * We do this on the i915_switch_context() following the wait and
 	 * before the dispatch.
 	 */
-	ppgtt = waiter_req->ctx->ppgtt;
-	if (ppgtt && waiter_req->engine->id != RCS)
-		ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
+	ppgtt = wait->ctx->ppgtt;
+	if (ppgtt && wait->engine->id != RCS)
+		ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine);
 	return 0;
 }
 
 static int
-gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
-	       struct intel_engine_cs *signaller,
-	       u32 seqno)
+gen6_ring_sync(struct drm_i915_gem_request *wait,
+	       struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = waiter_req->ring;
+	struct intel_ring *waiter = wait->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
+	u32 wait_mbox = signal->engine->semaphore.mbox.wait[wait->engine->id];
 	int ret;
 
-	/* Throughout all of the GEM code, seqno passed implies our current
-	 * seqno is >= the last seqno executed. However for hardware the
-	 * comparison is strictly greater than.
-	 */
-	seqno -= 1;
-
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(waiter_req, 4);
+	ret = intel_ring_begin(wait, 4);
 	if (ret)
 		return ret;
 
-	/* If seqno wrap happened, omit the wait with no-ops */
-	if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
-		intel_ring_emit(waiter, dw1 | wait_mbox);
-		intel_ring_emit(waiter, seqno);
-		intel_ring_emit(waiter, 0);
-		intel_ring_emit(waiter, MI_NOOP);
-	} else {
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-	}
+	intel_ring_emit(waiter, dw1 | wait_mbox);
+	/* Throughout all of the GEM code, seqno passed implies our current
+	 * seqno is >= the last seqno executed. However for hardware the
+	 * comparison is strictly greater than.
+	 */
+	intel_ring_emit(waiter, signal->fence.seqno - 1);
+	intel_ring_emit(waiter, 0);
+	intel_ring_emit(waiter, MI_NOOP);
 	intel_ring_advance(waiter);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 08e86204a3d5..65cb6adf26ca 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -275,9 +275,8 @@ struct intel_engine_cs {
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct drm_i915_gem_request *to_req,
-				   struct intel_engine_cs *from,
-				   u32 seqno);
+		int	(*sync_to)(struct drm_i915_gem_request *to,
+				   struct drm_i915_gem_request *from);
 		int	(*signal)(struct drm_i915_gem_request *signaller_req);
 	} semaphore;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* ✓ Ro.CI.BAT: success for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (17 preceding siblings ...)
  2016-07-20 13:12 ` [PATCH 18/18] drm/i915: Simplify calling engine->sync_to Chris Wilson
@ 2016-07-20 13:54 ` Patchwork
  2016-07-20 15:10 ` ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev2) Patchwork
                   ` (2 subsequent siblings)
  21 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2016-07-20 13:54 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
URL   : https://patchwork.freedesktop.org/series/10090/
State : success

== Summary ==

Series 10090v1 Series without cover letter
http://patchwork.freedesktop.org/api/1.0/series/10090/revisions/1/mbox

Test gem_sync:
        Subgroup basic-store-each:
                dmesg-fail -> PASS       (ro-bdw-i7-5600u)
Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-a:
                incomplete -> PASS       (fi-hsw-i7-4770k)

fi-hsw-i7-4770k  total:244  pass:216  dwarn:0   dfail:0   fail:8   skip:20 
fi-kbl-qkkr      total:244  pass:180  dwarn:27  dfail:1   fail:8   skip:28 
fi-skl-i5-6260u  total:244  pass:224  dwarn:0   dfail:0   fail:8   skip:12 
fi-skl-i7-6700k  total:244  pass:210  dwarn:0   dfail:0   fail:8   skip:26 
fi-snb-i7-2600   total:244  pass:196  dwarn:0   dfail:0   fail:8   skip:40 
ro-bdw-i5-5250u  total:244  pass:219  dwarn:4   dfail:0   fail:8   skip:13 
ro-bdw-i7-5600u  total:244  pass:204  dwarn:0   dfail:0   fail:8   skip:32 
ro-bsw-n3050     total:218  pass:173  dwarn:0   dfail:0   fail:2   skip:42 
ro-byt-n2820     total:244  pass:197  dwarn:0   dfail:0   fail:9   skip:38 
ro-hsw-i3-4010u  total:244  pass:212  dwarn:0   dfail:0   fail:8   skip:24 
ro-hsw-i7-4770r  total:244  pass:212  dwarn:0   dfail:0   fail:8   skip:24 
ro-ilk-i7-620lm  total:244  pass:172  dwarn:0   dfail:0   fail:9   skip:63 
ro-ilk1-i5-650   total:239  pass:172  dwarn:0   dfail:0   fail:9   skip:58 
ro-ivb-i7-3770   total:244  pass:203  dwarn:0   dfail:0   fail:8   skip:33 
ro-skl3-i5-6260u total:244  pass:224  dwarn:0   dfail:0   fail:8   skip:12 
ro-snb-i7-2620M  total:244  pass:193  dwarn:0   dfail:0   fail:9   skip:42 
ro-bdw-i7-5557U failed to connect after reboot

Results at /archive/results/CI_IGT_test/RO_Patchwork_1544/

30920eb drm-intel-nightly: 2016y-07m-20d-13h-08m-56s UTC integration manifest
755a3c7 drm/i915: Simplify calling engine->sync_to
5993ece drm/i915: Unify legacy/execlists submit_execbuf callbacks
1d1b866 drm/i915: Remove duplicate golden render state init from execlists
5450fac drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores
fb13170 drm/i915: Reuse legacy breadcrumbs + tail emission
805d6ef drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal()
3a0cc9c drm/i915: Unify request submission
412f0be drm/i915: Convert engine->write_tail to operate on a request
789c2bc drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START
60a9841 drm/i915: Simplify request_alloc by returning the allocated request
3f35c7f4 drm/i915: Remove obsolete engine->gpu_caches_dirty
67b32da drm/i915: Rename intel_pin_and_map_ring()
ae49414 drm/i915: Rename residual ringbuf parameters
768ae5b drm/i915: Rename struct intel_ringbuffer to struct intel_ring
808f502 drm/i915: Rename intel_context[engine].ringbuf
57211f0 drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs
b1e530a drm/i915: Rename request->ringbuf to request->ring
0af9f94 drm/i915: Unify intel_logical_ring_emit and intel_ring_emit

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring
  2016-07-20 13:11 ` [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
@ 2016-07-20 14:12   ` Dave Gordon
  2016-07-20 14:51     ` Dave Gordon
  2016-07-20 15:00     ` [PATCH] drm/i915: Convert stray struct intel_engine_cs *ring Chris Wilson
  2016-07-21 11:28   ` [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring Joonas Lahtinen
  1 sibling, 2 replies; 72+ messages in thread
From: Dave Gordon @ 2016-07-20 14:12 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 20/07/16 14:11, Chris Wilson wrote:
> Now that we have disambuigated ring and engine, we can use the clearer
> and more consistent name for the intel_ringbuffer pointer in the
> request.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

You missed a few instances of 'ring' meaning engine:

i915_gem_execbuffer.c:	       struct intel_engine_cs **ring)
intel_mocs.h:int intel_mocs_init_engine(struct intel_engine_cs *ring);
intel_ringbuffer.c:gen5_seqno_barrier(struct intel_engine_cs *ring)
intel_ringbuffer.h:	void		(*irq_enable)(struct intel_engine_cs *ring);
intel_ringbuffer.h:	void		(*irq_disable)(struct intel_engine_cs *ring);
intel_ringbuffer.h:	int		(*init_hw)(struct intel_engine_cs *ring);
intel_ringbuffer.h:	void		(*irq_seqno_barrier)(struct intel_engine_cs 
*ring);
intel_ringbuffer.h:	void		(*cleanup)(struct intel_engine_cs *ring);

I think we have to purge every last trace of this usage before using 
'ring' as shorthand for 'ringbuf[fer]'.

.Dave.

> ---
>   drivers/gpu/drm/i915/i915_gem_context.c    |  4 +-
>   drivers/gpu/drm/i915/i915_gem_execbuffer.c |  4 +-
>   drivers/gpu/drm/i915/i915_gem_gtt.c        |  6 +-
>   drivers/gpu/drm/i915/i915_gem_request.c    | 16 +++---
>   drivers/gpu/drm/i915/i915_gem_request.h    |  2 +-
>   drivers/gpu/drm/i915/i915_gpu_error.c      | 20 +++----
>   drivers/gpu/drm/i915/intel_display.c       | 10 ++--
>   drivers/gpu/drm/i915/intel_lrc.c           | 57 +++++++++---------
>   drivers/gpu/drm/i915/intel_mocs.c          | 36 ++++++------
>   drivers/gpu/drm/i915/intel_overlay.c       |  8 +--
>   drivers/gpu/drm/i915/intel_ringbuffer.c    | 92 +++++++++++++++---------------
>   11 files changed, 126 insertions(+), 129 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index b6d10bd763a0..16138c4ff7db 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -552,7 +552,7 @@ static inline int
>   mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>   {
>   	struct drm_i915_private *dev_priv = req->i915;
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	u32 flags = hw_flags | MI_MM_SPACE_GTT;
>   	const int num_rings =
>   		/* Use an extended w/a on ivb+ if signalling from other rings */
> @@ -654,7 +654,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>   static int remap_l3(struct drm_i915_gem_request *req, int slice)
>   {
>   	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int i, ret;
>
>   	if (!remap_info)
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index e2c4d99a1e7f..501a1751d432 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1173,7 +1173,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
>   static int
>   i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret, i;
>
>   	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
> @@ -1303,7 +1303,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>
>   	if (params->engine->id == RCS &&
>   	    instp_mode != dev_priv->relative_constants_mode) {
> -		struct intel_ringbuffer *ring = params->request->ringbuf;
> +		struct intel_ringbuffer *ring = params->request->ring;
>
>   		ret = intel_ring_begin(params->request, 4);
>   		if (ret)
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index abc439be2049..a48329baf432 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
>   			  unsigned entry,
>   			  dma_addr_t addr)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	BUG_ON(entry >= 4);
> @@ -1660,7 +1660,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
>   static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>   			 struct drm_i915_gem_request *req)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	/* NB: TLBs must be flushed and invalidated before a switch */
> @@ -1688,7 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>   static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
>   			  struct drm_i915_gem_request *req)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	/* NB: TLBs must be flushed and invalidated before a switch */
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 60a3a343b3a8..0f415606a383 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -170,7 +170,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
>   	 * Note this requires that we are always called in request
>   	 * completion order.
>   	 */
> -	request->ringbuf->last_retired_head = request->postfix;
> +	request->ring->last_retired_head = request->postfix;
>
>   	i915_gem_request_remove_from_client(request);
>
> @@ -425,7 +425,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>   			bool flush_caches)
>   {
>   	struct intel_engine_cs *engine;
> -	struct intel_ringbuffer *ringbuf;
> +	struct intel_ringbuffer *ring;
>   	u32 request_start;
>   	u32 reserved_tail;
>   	int ret;
> @@ -434,14 +434,14 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>   		return;
>
>   	engine = request->engine;
> -	ringbuf = request->ringbuf;
> +	ring = request->ring;
>
>   	/*
>   	 * To ensure that this call will not fail, space for its emissions
>   	 * should already have been reserved in the ring buffer. Let the ring
>   	 * know that it is time to use that space up.
>   	 */
> -	request_start = intel_ring_get_tail(ringbuf);
> +	request_start = intel_ring_get_tail(ring);
>   	reserved_tail = request->reserved_space;
>   	request->reserved_space = 0;
>
> @@ -488,21 +488,21 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>   	 * GPU processing the request, we never over-estimate the
>   	 * position of the head.
>   	 */
> -	request->postfix = intel_ring_get_tail(ringbuf);
> +	request->postfix = intel_ring_get_tail(ring);
>
>   	if (i915.enable_execlists) {
>   		ret = engine->emit_request(request);
>   	} else {
>   		ret = engine->add_request(request);
>
> -		request->tail = intel_ring_get_tail(ringbuf);
> +		request->tail = intel_ring_get_tail(ring);
>   	}
>   	/* Not allowed to fail! */
>   	WARN(ret, "emit|add_request failed: %d!\n", ret);
>   	/* Sanity check that the reserved size was large enough. */
> -	ret = intel_ring_get_tail(ringbuf) - request_start;
> +	ret = intel_ring_get_tail(ring) - request_start;
>   	if (ret < 0)
> -		ret += ringbuf->size;
> +		ret += ring->size;
>   	WARN_ONCE(ret > reserved_tail,
>   		  "Not enough space reserved (%d bytes) "
>   		  "for adding the request (%d bytes)\n",
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
> index e06e81f459df..68868d825d9d 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.h
> +++ b/drivers/gpu/drm/i915/i915_gem_request.h
> @@ -61,7 +61,7 @@ struct drm_i915_gem_request {
>   	 */
>   	struct i915_gem_context *ctx;
>   	struct intel_engine_cs *engine;
> -	struct intel_ringbuffer *ringbuf;
> +	struct intel_ringbuffer *ring;
>   	struct intel_signal_node signaling;
>
>   	/** GEM sequence number associated with the previous request,
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 4d39c7284605..09997c6adcd2 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -1091,7 +1091,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
>   		request = i915_gem_find_active_request(engine);
>   		if (request) {
>   			struct i915_address_space *vm;
> -			struct intel_ringbuffer *rb;
> +			struct intel_ringbuffer *ring;
>
>   			vm = request->ctx->ppgtt ?
>   				&request->ctx->ppgtt->base : &ggtt->base;
> @@ -1108,7 +1108,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
>   			if (HAS_BROKEN_CS_TLB(dev_priv))
>   				error->ring[i].wa_batchbuffer =
>   					i915_error_ggtt_object_create(dev_priv,
> -							     engine->scratch.obj);
> +								      engine->scratch.obj);
>
>   			if (request->pid) {
>   				struct task_struct *task;
> @@ -1125,23 +1125,21 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
>   			error->simulated |=
>   				request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
>
> -			rb = request->ringbuf;
> -			error->ring[i].cpu_ring_head = rb->head;
> -			error->ring[i].cpu_ring_tail = rb->tail;
> +			ring = request->ring;
> +			error->ring[i].cpu_ring_head = ring->head;
> +			error->ring[i].cpu_ring_tail = ring->tail;
>   			error->ring[i].ringbuffer =
>   				i915_error_ggtt_object_create(dev_priv,
> -							      rb->obj);
> +							      ring->obj);
>   		}
>
>   		error->ring[i].hws_page =
>   			i915_error_ggtt_object_create(dev_priv,
>   						      engine->status_page.obj);
>
> -		if (engine->wa_ctx.obj) {
> -			error->ring[i].wa_ctx =
> -				i915_error_ggtt_object_create(dev_priv,
> -							      engine->wa_ctx.obj);
> -		}
> +		error->ring[i].wa_ctx =
> +			i915_error_ggtt_object_create(dev_priv,
> +						      engine->wa_ctx.obj);
>
>   		i915_gem_record_active_context(engine, error, &error->ring[i]);
>
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index d18ed32e6a31..d1932840a268 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -11123,7 +11123,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
>   				 struct drm_i915_gem_request *req,
>   				 uint32_t flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>   	u32 flip_mask;
>   	int ret;
> @@ -11157,7 +11157,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
>   				 struct drm_i915_gem_request *req,
>   				 uint32_t flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>   	u32 flip_mask;
>   	int ret;
> @@ -11188,7 +11188,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
>   				 struct drm_i915_gem_request *req,
>   				 uint32_t flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	struct drm_i915_private *dev_priv = to_i915(dev);
>   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>   	uint32_t pf, pipesrc;
> @@ -11226,7 +11226,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
>   				 struct drm_i915_gem_request *req,
>   				 uint32_t flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	struct drm_i915_private *dev_priv = to_i915(dev);
>   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>   	uint32_t pf, pipesrc;
> @@ -11261,7 +11261,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>   				 struct drm_i915_gem_request *req,
>   				 uint32_t flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>   	uint32_t plane_bit = 0;
>   	int len, ret;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 8bf2ea5a2de3..c3542eb338ca 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -714,7 +714,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
>   			return ret;
>   	}
>
> -	request->ringbuf = ce->ringbuf;
> +	request->ring = ce->ringbuf;
>
>   	if (i915.enable_guc_submission) {
>   		/*
> @@ -770,11 +770,11 @@ err_unpin:
>   static int
>   intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>   {
> -	struct intel_ringbuffer *ringbuf = request->ringbuf;
> +	struct intel_ringbuffer *ring = request->ring;
>   	struct intel_engine_cs *engine = request->engine;
>
> -	intel_ring_advance(ringbuf);
> -	request->tail = ringbuf->tail;
> +	intel_ring_advance(ring);
> +	request->tail = ring->tail;
>
>   	/*
>   	 * Here we add two extra NOOPs as padding to avoid
> @@ -782,9 +782,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>   	 *
>   	 * Caller must reserve WA_TAIL_DWORDS for us!
>   	 */
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>
>   	/* We keep the previous context alive until we retire the following
>   	 * request. This ensures that any the context object is still pinned
> @@ -821,7 +821,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>   	struct drm_device       *dev = params->dev;
>   	struct intel_engine_cs *engine = params->engine;
>   	struct drm_i915_private *dev_priv = to_i915(dev);
> -	struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
> +	struct intel_ringbuffer *ring = params->request->ring;
>   	u64 exec_start;
>   	int instp_mode;
>   	u32 instp_mask;
> @@ -833,7 +833,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>   	case I915_EXEC_CONSTANTS_REL_GENERAL:
>   	case I915_EXEC_CONSTANTS_ABSOLUTE:
>   	case I915_EXEC_CONSTANTS_REL_SURFACE:
> -		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
> +		if (instp_mode != 0 && engine->id != RCS) {
>   			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
>   			return -EINVAL;
>   		}
> @@ -862,17 +862,17 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>   	if (ret)
>   		return ret;
>
> -	if (engine == &dev_priv->engine[RCS] &&
> +	if (engine->id == RCS &&
>   	    instp_mode != dev_priv->relative_constants_mode) {
>   		ret = intel_ring_begin(params->request, 4);
>   		if (ret)
>   			return ret;
>
> -		intel_ring_emit(ringbuf, MI_NOOP);
> -		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
> -		intel_ring_emit_reg(ringbuf, INSTPM);
> -		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
> -		intel_ring_advance(ringbuf);
> +		intel_ring_emit(ring, MI_NOOP);
> +		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> +		intel_ring_emit_reg(ring, INSTPM);
> +		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
> +		intel_ring_advance(ring);
>
>   		dev_priv->relative_constants_mode = instp_mode;
>   	}
> @@ -1030,7 +1030,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>   {
>   	int ret, i;
>   	struct intel_engine_cs *engine = req->engine;
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	struct i915_workarounds *w = &req->i915->workarounds;
>
>   	if (w->count == 0)
> @@ -1045,14 +1045,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>   	if (ret)
>   		return ret;
>
> -	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
> +	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
>   	for (i = 0; i < w->count; i++) {
> -		intel_ring_emit_reg(ringbuf, w->reg[i].addr);
> -		intel_ring_emit(ringbuf, w->reg[i].value);
> +		intel_ring_emit_reg(ring, w->reg[i].addr);
> +		intel_ring_emit(ring, w->reg[i].value);
>   	}
> -	intel_ring_emit(ringbuf, MI_NOOP);
> +	intel_ring_emit(ring, MI_NOOP);
>
> -	intel_ring_advance(ringbuf);
> +	intel_ring_advance(ring);
>
>   	engine->gpu_caches_dirty = true;
>   	ret = logical_ring_flush_all_caches(req);
> @@ -1546,7 +1546,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
>   static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>   {
>   	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
>   	int i, ret;
>
> @@ -1573,7 +1573,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>   static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
>   			      u64 offset, unsigned dispatch_flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
>   	int ret;
>
> @@ -1630,8 +1630,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
>   			   u32 invalidate_domains,
>   			   u32 unused)
>   {
> -	struct intel_ringbuffer *ring = request->ringbuf;
> -	struct intel_engine_cs *engine = ring->engine;
> +	struct intel_ringbuffer *ring = request->ring;
>   	uint32_t cmd;
>   	int ret;
>
> @@ -1650,7 +1649,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
>
>   	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
>   		cmd |= MI_INVALIDATE_TLB;
> -		if (engine->id == VCS)
> +		if (request->engine->id == VCS)
>   			cmd |= MI_INVALIDATE_BSD;
>   	}
>
> @@ -1669,7 +1668,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
>   				  u32 invalidate_domains,
>   				  u32 flush_domains)
>   {
> -	struct intel_ringbuffer *ring = request->ringbuf;
> +	struct intel_ringbuffer *ring = request->ring;
>   	struct intel_engine_cs *engine = request->engine;
>   	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>   	bool vf_flush_wa = false, dc_flush_wa = false;
> @@ -1783,7 +1782,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
>
>   static int gen8_emit_request(struct drm_i915_gem_request *request)
>   {
> -	struct intel_ringbuffer *ring = request->ringbuf;
> +	struct intel_ringbuffer *ring = request->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
> @@ -1806,7 +1805,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
>
>   static int gen8_emit_request_render(struct drm_i915_gem_request *request)
>   {
> -	struct intel_ringbuffer *ring = request->ringbuf;
> +	struct intel_ringbuffer *ring = request->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
> diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
> index 2743424f2746..fe63c7e79fb1 100644
> --- a/drivers/gpu/drm/i915/intel_mocs.c
> +++ b/drivers/gpu/drm/i915/intel_mocs.c
> @@ -276,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
>   static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>   				   const struct drm_i915_mocs_table *table)
>   {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	enum intel_engine_id engine = req->engine->id;
>   	unsigned int index;
>   	int ret;
> @@ -288,11 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>   	if (ret)
>   		return ret;
>
> -	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
> +	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
>
>   	for (index = 0; index < table->size; index++) {
> -		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
> -		intel_ring_emit(ringbuf, table->table[index].control_value);
> +		intel_ring_emit_reg(ring, mocs_register(engine, index));
> +		intel_ring_emit(ring, table->table[index].control_value);
>   	}
>
>   	/*
> @@ -304,12 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>   	 * that value to all the used entries.
>   	 */
>   	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
> -		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
> -		intel_ring_emit(ringbuf, table->table[0].control_value);
> +		intel_ring_emit_reg(ring, mocs_register(engine, index));
> +		intel_ring_emit(ring, table->table[0].control_value);
>   	}
>
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>
>   	return 0;
>   }
> @@ -336,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
>   static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
>   				const struct drm_i915_mocs_table *table)
>   {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	unsigned int i;
>   	int ret;
>
> @@ -347,18 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
>   	if (ret)
>   		return ret;
>
> -	intel_ring_emit(ringbuf,
> +	intel_ring_emit(ring,
>   			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
>
>   	for (i = 0; i < table->size/2; i++) {
> -		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
> -		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1));
> +		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
> +		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
>   	}
>
>   	if (table->size & 0x01) {
>   		/* Odd table size - 1 left over */
> -		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
> -		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
> +		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
> +		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
>   		i++;
>   	}
>
> @@ -368,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
>   	 * they are reserved by the hardware.
>   	 */
>   	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
> -		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
> -		intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
> +		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
> +		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
>   	}
>
> -	intel_ring_emit(ringbuf, MI_NOOP);
> -	intel_ring_advance(ringbuf);
> +	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>
>   	return 0;
>   }
> diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> index 92722e614955..84b8f74bd13c 100644
> --- a/drivers/gpu/drm/i915/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/intel_overlay.c
> @@ -253,7 +253,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
>
>   	overlay->active = true;
>
> -	ring = req->ringbuf;
> +	ring = req->ring;
>   	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
>   	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
>   	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
> @@ -295,7 +295,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
>   		return ret;
>   	}
>
> -	ring = req->ringbuf;
> +	ring = req->ring;
>   	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
>   	intel_ring_emit(ring, flip_addr);
>   	intel_ring_advance(ring);
> @@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
>   		return ret;
>   	}
>
> -	ring = req->ringbuf;
> +	ring = req->ring;
>   	/* wait for overlay to go idle */
>   	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
>   	intel_ring_emit(ring, flip_addr);
> @@ -438,7 +438,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
>   			return ret;
>   		}
>
> -		ring = req->ringbuf;
> +		ring = req->ring;
>   		intel_ring_emit(ring,
>   				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
>   		intel_ring_emit(ring, MI_NOOP);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index da8134d43b26..ac51e4885046 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -70,7 +70,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
>   		       u32	invalidate_domains,
>   		       u32	flush_domains)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	u32 cmd;
>   	int ret;
>
> @@ -97,7 +97,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>   		       u32	invalidate_domains,
>   		       u32	flush_domains)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	u32 cmd;
>   	int ret;
>
> @@ -187,7 +187,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>   static int
>   intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	u32 scratch_addr =
>   		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>   	int ret;
> @@ -224,7 +224,7 @@ static int
>   gen6_render_ring_flush(struct drm_i915_gem_request *req,
>   		       u32 invalidate_domains, u32 flush_domains)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	u32 scratch_addr =
>   		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>   	u32 flags = 0;
> @@ -277,7 +277,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
>   static int
>   gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(req, 4);
> @@ -299,7 +299,7 @@ static int
>   gen7_render_ring_flush(struct drm_i915_gem_request *req,
>   		       u32 invalidate_domains, u32 flush_domains)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	u32 scratch_addr =
>   		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>   	u32 flags = 0;
> @@ -364,7 +364,7 @@ static int
>   gen8_emit_pipe_control(struct drm_i915_gem_request *req,
>   		       u32 flags, u32 scratch_addr)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(req, 6);
> @@ -680,7 +680,7 @@ err:
>
>   static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	struct i915_workarounds *w = &req->i915->workarounds;
>   	int ret, i;
>
> @@ -1324,7 +1324,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
>   			   unsigned int num_dwords)
>   {
>   #define MBOX_UPDATE_DWORDS 8
> -	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> +	struct intel_ringbuffer *signaller = signaller_req->ring;
>   	struct drm_i915_private *dev_priv = signaller_req->i915;
>   	struct intel_engine_cs *waiter;
>   	enum intel_engine_id id;
> @@ -1366,7 +1366,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>   			   unsigned int num_dwords)
>   {
>   #define MBOX_UPDATE_DWORDS 6
> -	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> +	struct intel_ringbuffer *signaller = signaller_req->ring;
>   	struct drm_i915_private *dev_priv = signaller_req->i915;
>   	struct intel_engine_cs *waiter;
>   	enum intel_engine_id id;
> @@ -1405,7 +1405,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>   static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>   		       unsigned int num_dwords)
>   {
> -	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
> +	struct intel_ringbuffer *signaller = signaller_req->ring;
>   	struct drm_i915_private *dev_priv = signaller_req->i915;
>   	struct intel_engine_cs *useless;
>   	enum intel_engine_id id;
> @@ -1449,7 +1449,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>   static int
>   gen6_add_request(struct drm_i915_gem_request *req)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	if (req->engine->semaphore.signal)
> @@ -1473,7 +1473,7 @@ static int
>   gen8_render_add_request(struct drm_i915_gem_request *req)
>   {
>   	struct intel_engine_cs *engine = req->engine;
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	if (engine->semaphore.signal)
> @@ -1518,7 +1518,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
>   	       struct intel_engine_cs *signaller,
>   	       u32 seqno)
>   {
> -	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
> +	struct intel_ringbuffer *waiter = waiter_req->ring;
>   	struct drm_i915_private *dev_priv = waiter_req->i915;
>   	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
>   	struct i915_hw_ppgtt *ppgtt;
> @@ -1552,7 +1552,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
>   	       struct intel_engine_cs *signaller,
>   	       u32 seqno)
>   {
> -	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
> +	struct intel_ringbuffer *waiter = waiter_req->ring;
>   	u32 dw1 = MI_SEMAPHORE_MBOX |
>   		  MI_SEMAPHORE_COMPARE |
>   		  MI_SEMAPHORE_REGISTER;
> @@ -1686,7 +1686,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>   	       u32     invalidate_domains,
>   	       u32     flush_domains)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(req, 2);
> @@ -1702,7 +1702,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>   static int
>   i9xx_add_request(struct drm_i915_gem_request *req)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(req, 4);
> @@ -1780,7 +1780,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
>   			 u64 offset, u32 length,
>   			 unsigned dispatch_flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(req, 2);
> @@ -1807,7 +1807,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
>   			 u64 offset, u32 len,
>   			 unsigned dispatch_flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	u32 cs_offset = req->engine->scratch.gtt_offset;
>   	int ret;
>
> @@ -1869,7 +1869,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
>   			 u64 offset, u32 len,
>   			 unsigned dispatch_flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(req, 2);
> @@ -2297,7 +2297,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
>   	 */
>   	request->reserved_space += LEGACY_REQUEST_SIZE;
>
> -	request->ringbuf = request->engine->buffer;
> +	request->ring = request->engine->buffer;
>
>   	ret = intel_ring_begin(request, 0);
>   	if (ret)
> @@ -2309,12 +2309,12 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
>
>   static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>   {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	struct intel_engine_cs *engine = req->engine;
>   	struct drm_i915_gem_request *target;
>
> -	intel_ring_update_space(ringbuf);
> -	if (ringbuf->space >= bytes)
> +	intel_ring_update_space(ring);
> +	if (ring->space >= bytes)
>   		return 0;
>
>   	/*
> @@ -2336,12 +2336,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>   		 * from multiple ringbuffers. Here, we must ignore any that
>   		 * aren't from the ringbuffer we're considering.
>   		 */
> -		if (target->ringbuf != ringbuf)
> +		if (target->ring != ring)
>   			continue;
>
>   		/* Would completion of this request free enough space? */
> -		space = __intel_ring_space(target->postfix, ringbuf->tail,
> -					   ringbuf->size);
> +		space = __intel_ring_space(target->postfix, ring->tail,
> +					   ring->size);
>   		if (space >= bytes)
>   			break;
>   	}
> @@ -2354,9 +2354,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>
>   int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
>   {
> -	struct intel_ringbuffer *ringbuf = req->ringbuf;
> -	int remain_actual = ringbuf->size - ringbuf->tail;
> -	int remain_usable = ringbuf->effective_size - ringbuf->tail;
> +	struct intel_ringbuffer *ring = req->ring;
> +	int remain_actual = ring->size - ring->tail;
> +	int remain_usable = ring->effective_size - ring->tail;
>   	int bytes = num_dwords * sizeof(u32);
>   	int total_bytes, wait_bytes;
>   	bool need_wrap = false;
> @@ -2383,35 +2383,35 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
>   		wait_bytes = total_bytes;
>   	}
>
> -	if (wait_bytes > ringbuf->space) {
> +	if (wait_bytes > ring->space) {
>   		int ret = wait_for_space(req, wait_bytes);
>   		if (unlikely(ret))
>   			return ret;
>
> -		intel_ring_update_space(ringbuf);
> -		if (unlikely(ringbuf->space < wait_bytes))
> +		intel_ring_update_space(ring);
> +		if (unlikely(ring->space < wait_bytes))
>   			return -EAGAIN;
>   	}
>
>   	if (unlikely(need_wrap)) {
> -		GEM_BUG_ON(remain_actual > ringbuf->space);
> -		GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
> +		GEM_BUG_ON(remain_actual > ring->space);
> +		GEM_BUG_ON(ring->tail + remain_actual > ring->size);
>
>   		/* Fill the tail with MI_NOOP */
> -		memset(ringbuf->vaddr + ringbuf->tail, 0, remain_actual);
> -		ringbuf->tail = 0;
> -		ringbuf->space -= remain_actual;
> +		memset(ring->vaddr + ring->tail, 0, remain_actual);
> +		ring->tail = 0;
> +		ring->space -= remain_actual;
>   	}
>
> -	ringbuf->space -= bytes;
> -	GEM_BUG_ON(ringbuf->space < 0);
> +	ring->space -= bytes;
> +	GEM_BUG_ON(ring->space < 0);
>   	return 0;
>   }
>
>   /* Align the ring tail to a cacheline boundary */
>   int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int num_dwords =
>   		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
>   	int ret;
> @@ -2518,7 +2518,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
>   static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
>   			       u32 invalidate, u32 flush)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	uint32_t cmd;
>   	int ret;
>
> @@ -2564,7 +2564,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>   			      u64 offset, u32 len,
>   			      unsigned dispatch_flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	bool ppgtt = USES_PPGTT(req->i915) &&
>   			!(dispatch_flags & I915_DISPATCH_SECURE);
>   	int ret;
> @@ -2590,7 +2590,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>   			     u64 offset, u32 len,
>   			     unsigned dispatch_flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(req, 2);
> @@ -2615,7 +2615,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>   			      u64 offset, u32 len,
>   			      unsigned dispatch_flags)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	int ret;
>
>   	ret = intel_ring_begin(req, 2);
> @@ -2638,7 +2638,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>   static int gen6_ring_flush(struct drm_i915_gem_request *req,
>   			   u32 invalidate, u32 flush)
>   {
> -	struct intel_ringbuffer *ring = req->ringbuf;
> +	struct intel_ringbuffer *ring = req->ring;
>   	uint32_t cmd;
>   	int ret;
>
>

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 03/18] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs
  2016-07-20 13:11 ` [PATCH 03/18] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs Chris Wilson
@ 2016-07-20 14:23   ` Dave Gordon
  2016-07-21 11:32   ` Joonas Lahtinen
  1 sibling, 0 replies; 72+ messages in thread
From: Dave Gordon @ 2016-07-20 14:23 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 20/07/16 14:11, Chris Wilson wrote:
> Having ringbuf->ring point to an engine is confusing, so rename it once
> again to ring->engine.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++++++------
>   1 file changed, 6 insertions(+), 6 deletions(-)

Doesn't do what it says in the commit message (which sounded like a good 
idea). This patch actually just renames the function 
intel_init_ring_buffer() to intel_init_engine(). However /most/ of the 
code in that function is to do with initialising a ringbuffer!

.Dave.

> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index ac51e4885046..3cfbfe40f6e8 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2171,7 +2171,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
>   	i915_gem_context_put(ctx);
>   }
>
> -static int intel_init_ring_buffer(struct intel_engine_cs *engine)
> +static int intel_init_engine(struct intel_engine_cs *engine)
>   {
>   	struct drm_i915_private *dev_priv = engine->i915;
>   	struct intel_ringbuffer *ringbuf;
> @@ -2868,7 +2868,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
>   	engine->init_hw = init_render_ring;
>   	engine->cleanup = render_ring_cleanup;
>
> -	ret = intel_init_ring_buffer(engine);
> +	ret = intel_init_engine(engine);
>   	if (ret)
>   		return ret;
>
> @@ -2907,7 +2907,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
>   			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
>   	}
>
> -	return intel_init_ring_buffer(engine);
> +	return intel_init_engine(engine);
>   }
>
>   /**
> @@ -2921,7 +2921,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
>
>   	engine->flush = gen6_bsd_ring_flush;
>
> -	return intel_init_ring_buffer(engine);
> +	return intel_init_engine(engine);
>   }
>
>   int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
> @@ -2934,7 +2934,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
>   	if (INTEL_GEN(dev_priv) < 8)
>   		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
>
> -	return intel_init_ring_buffer(engine);
> +	return intel_init_engine(engine);
>   }
>
>   int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
> @@ -2951,7 +2951,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
>   		engine->irq_disable = hsw_vebox_irq_disable;
>   	}
>
> -	return intel_init_ring_buffer(engine);
> +	return intel_init_engine(engine);
>   }
>
>   int
>

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring
  2016-07-20 14:12   ` Dave Gordon
@ 2016-07-20 14:51     ` Dave Gordon
  2016-07-20 15:00     ` [PATCH] drm/i915: Convert stray struct intel_engine_cs *ring Chris Wilson
  1 sibling, 0 replies; 72+ messages in thread
From: Dave Gordon @ 2016-07-20 14:51 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 20/07/16 15:12, Dave Gordon wrote:
> On 20/07/16 14:11, Chris Wilson wrote:
>> Now that we have disambuigated ring and engine, we can use the clearer
>> and more consistent name for the intel_ringbuffer pointer in the
>> request.
>>
>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>
> You missed a few instances of 'ring' meaning engine:
>
> i915_gem_execbuffer.c:           struct intel_engine_cs **ring)
> intel_mocs.h:int intel_mocs_init_engine(struct intel_engine_cs *ring);
> intel_ringbuffer.c:gen5_seqno_barrier(struct intel_engine_cs *ring)
> intel_ringbuffer.h:    void        (*irq_enable)(struct intel_engine_cs
> *ring);
> intel_ringbuffer.h:    void        (*irq_disable)(struct intel_engine_cs
> *ring);
> intel_ringbuffer.h:    int        (*init_hw)(struct intel_engine_cs *ring);
> intel_ringbuffer.h:    void        (*irq_seqno_barrier)(struct
> intel_engine_cs *ring);
> intel_ringbuffer.h:    void        (*cleanup)(struct intel_engine_cs
> *ring);
>
> I think we have to purge every last trace of this usage before using
> 'ring' as shorthand for 'ringbuf[fer]'.
>
> .Dave.

Oh yes, also there are lots of other things called 'ring' which aren't 
ringbuffers, such as an engine:

#define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)

or an engine id:

static i915_reg_t mocs_register(enum intel_engine_id ring, int index)
i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
int ring = req->engine->id;

or a different structure entirely:

struct drm_i915_error_ring *ring = &error->ring[ring_idx];

I could probably write some Cocci to find-and-rename all the things 
called 'ring' that weren't ringbuffers, but it would be easier not to 
overload the identifier with a host of different meanings in the first 
place. So I think adding any more instances of things called 'ring' 
should wait until the name has no other meanings, if ringbuffers are the 
thing you want it to unambiguously identify.

.Dave.

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* [PATCH] drm/i915: Convert stray struct intel_engine_cs *ring
  2016-07-20 14:12   ` Dave Gordon
  2016-07-20 14:51     ` Dave Gordon
@ 2016-07-20 15:00     ` Chris Wilson
  2016-07-27 13:15       ` Dave Gordon
  1 sibling, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-20 15:00 UTC (permalink / raw)
  To: intel-gfx

We still have a few uses of the identifier "ring" used when referring to
the struct intel_engine_cs (a remanent from when there was only one dual
purpose engine/ringbuffer). Rename all of this to use the familiar
engine so that the separation between the hardware engine and the
ringbuffer containing the commands is clear.

This patch was formed by searching for instances of '\<ring\>' and
changing those found to be referring to an engine. There are quite a few
instances in comments remaining where it is less clear what is
appropriate for the context, the registers still refer to ring (there we
need to check against bspec for any counter-recommendations, but quite a
few registers, like PDP should be engine based, whereas RING_HEAD
probably wants to remain as ring based) and the biggest compromise was
in error capture where we already have a local engine variable and so
finding a good name was trickier.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dave Gordon <david.s.gordon@intel.com>
---
 drivers/gpu/drm/i915/i915_cmd_parser.c     |   4 +-
 drivers/gpu/drm/i915/i915_drv.h            |  15 +-
 drivers/gpu/drm/i915/i915_gem.c            |  26 +--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  23 +--
 drivers/gpu/drm/i915/i915_gpu_error.c      | 255 +++++++++++++++--------------
 drivers/gpu/drm/i915/i915_irq.c            |   6 +-
 drivers/gpu/drm/i915/intel_mocs.h          |   2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    |   2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  24 +--
 9 files changed, 183 insertions(+), 174 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index b0fd6a7b0603..b5b520176c59 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -636,7 +636,7 @@ static bool validate_cmds_sorted(struct intel_engine_cs *engine,
 	return ret;
 }
 
-static bool check_sorted(int ring_id,
+static bool check_sorted(int engine_id,
 			 const struct drm_i915_reg_descriptor *reg_table,
 			 int reg_count)
 {
@@ -649,7 +649,7 @@ static bool check_sorted(int ring_id,
 
 		if (curr < previous) {
 			DRM_ERROR("CMD: table not sorted ring=%d entry=%d reg=0x%08X prev=0x%08X\n",
-				  ring_id, i, curr, previous);
+				  engine_id, i, curr, previous);
 			ret = false;
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 0f408ada1c65..808333a1d42d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -402,7 +402,7 @@ struct drm_i915_file_private {
 		unsigned boosts;
 	} rps;
 
-	unsigned int bsd_ring;
+	unsigned int bsd_engine;
 };
 
 /* Used by dp and fdi links */
@@ -512,7 +512,7 @@ struct drm_i915_error_state {
 	struct intel_display_error_state *display;
 	struct drm_i915_error_object *semaphore_obj;
 
-	struct drm_i915_error_ring {
+	struct drm_i915_error_engine {
 		bool valid;
 		/* Software tracked state */
 		bool waiting;
@@ -578,7 +578,7 @@ struct drm_i915_error_state {
 
 		pid_t pid;
 		char comm[TASK_COMM_LEN];
-	} ring[I915_NUM_ENGINES];
+	} engine[I915_NUM_ENGINES];
 
 	struct drm_i915_error_buffer {
 		u32 size;
@@ -593,7 +593,7 @@ struct drm_i915_error_state {
 		u32 dirty:1;
 		u32 purgeable:1;
 		u32 userptr:1;
-		s32 ring:4;
+		s32 engine:4;
 		u32 cache_level:3;
 	} **active_bo, **pinned_bo;
 
@@ -1331,7 +1331,7 @@ struct i915_gem_mm {
 	bool interruptible;
 
 	/* the indicator for dispatch video commands on two BSD rings */
-	unsigned int bsd_ring_dispatch_index;
+	unsigned int bsd_engine_dispatch_index;
 
 	/** Bit 6 swizzling required for X tiling */
 	uint32_t bit_6_swizzle_x;
@@ -2500,8 +2500,9 @@ struct drm_i915_cmd_descriptor {
 /*
  * A table of commands requiring special handling by the command parser.
  *
- * Each ring has an array of tables. Each table consists of an array of command
- * descriptors, which must be sorted with command opcodes in ascending order.
+ * Each engine has an array of tables. Each table consists of an array of
+ * command descriptors, which must be sorted with command opcodes in
+ * ascending order.
  */
 struct drm_i915_cmd_table {
 	const struct drm_i915_cmd_descriptor *table;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 40047eb48826..d004664c0847 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -46,7 +46,7 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
 static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
 static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int engine);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
 				  enum i915_cache_level level)
@@ -1385,10 +1385,10 @@ static void
 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
 			       struct drm_i915_gem_request *req)
 {
-	int ring = req->engine->id;
+	int engine = req->engine->id;
 
-	if (obj->last_read_req[ring] == req)
-		i915_gem_object_retire__read(obj, ring);
+	if (obj->last_read_req[engine] == req)
+		i915_gem_object_retire__read(obj, engine);
 	else if (obj->last_write_req == req)
 		i915_gem_object_retire__write(obj);
 
@@ -2381,20 +2381,20 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 }
 
 static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int engine)
 {
 	struct i915_vma *vma;
 
-	GEM_BUG_ON(obj->last_read_req[ring] == NULL);
-	GEM_BUG_ON(!(obj->active & (1 << ring)));
+	GEM_BUG_ON(obj->last_read_req[engine] == NULL);
+	GEM_BUG_ON(!(obj->active & (1 << engine)));
 
-	list_del_init(&obj->engine_list[ring]);
-	i915_gem_request_assign(&obj->last_read_req[ring], NULL);
+	list_del_init(&obj->engine_list[engine]);
+	i915_gem_request_assign(&obj->last_read_req[engine], NULL);
 
-	if (obj->last_write_req && obj->last_write_req->engine->id == ring)
+	if (obj->last_write_req && obj->last_write_req->engine->id == engine)
 		i915_gem_object_retire__write(obj);
 
-	obj->active &= ~(1 << ring);
+	obj->active &= ~(1 << engine);
 	if (obj->active)
 		return;
 
@@ -4596,7 +4596,7 @@ int i915_gem_init(struct drm_device *dev)
 
 	ret = i915_gem_init_hw(dev);
 	if (ret == -EIO) {
-		/* Allow ring initialisation to fail by marking the GPU as
+		/* Allow engine initialisation to fail by marking the GPU as
 		 * wedged. But we only want to do this where the GPU is angry,
 		 * for all other failure, such as an allocation failure, bail.
 		 */
@@ -4785,7 +4785,7 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
 	spin_lock_init(&file_priv->mm.lock);
 	INIT_LIST_HEAD(&file_priv->mm.request_list);
 
-	file_priv->bsd_ring = -1;
+	file_priv->bsd_engine = -1;
 
 	ret = i915_gem_context_open(dev, file);
 	if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 6482ec24ff3b..5d4b5df41387 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1351,23 +1351,24 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 /**
  * Find one BSD ring to dispatch the corresponding BSD command.
- * The ring index is returned.
+ * The engine index is returned.
  */
 static unsigned int
-gen8_dispatch_bsd_ring(struct drm_i915_private *dev_priv, struct drm_file *file)
+gen8_dispatch_bsd_engine(struct drm_i915_private *dev_priv,
+			 struct drm_file *file)
 {
 	struct drm_i915_file_private *file_priv = file->driver_priv;
 
 	/* Check whether the file_priv has already selected one ring. */
-	if ((int)file_priv->bsd_ring < 0) {
+	if ((int)file_priv->bsd_engine < 0) {
 		/* If not, use the ping-pong mechanism to select one. */
 		mutex_lock(&dev_priv->drm.struct_mutex);
-		file_priv->bsd_ring = dev_priv->mm.bsd_ring_dispatch_index;
-		dev_priv->mm.bsd_ring_dispatch_index ^= 1;
+		file_priv->bsd_engine = dev_priv->mm.bsd_engine_dispatch_index;
+		dev_priv->mm.bsd_engine_dispatch_index ^= 1;
 		mutex_unlock(&dev_priv->drm.struct_mutex);
 	}
 
-	return file_priv->bsd_ring;
+	return file_priv->bsd_engine;
 }
 
 #define I915_USER_RINGS (4)
@@ -1384,7 +1385,7 @@ static int
 eb_select_ring(struct drm_i915_private *dev_priv,
 	       struct drm_file *file,
 	       struct drm_i915_gem_execbuffer2 *args,
-	       struct intel_engine_cs **ring)
+	       struct intel_engine_cs **engine)
 {
 	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
 
@@ -1404,7 +1405,7 @@ eb_select_ring(struct drm_i915_private *dev_priv,
 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
 
 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
-			bsd_idx = gen8_dispatch_bsd_ring(dev_priv, file);
+			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
 		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
 			   bsd_idx <= I915_EXEC_BSD_RING2) {
 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
@@ -1415,12 +1416,12 @@ eb_select_ring(struct drm_i915_private *dev_priv,
 			return -EINVAL;
 		}
 
-		*ring = &dev_priv->engine[_VCS(bsd_idx)];
+		*engine = &dev_priv->engine[_VCS(bsd_idx)];
 	} else {
-		*ring = &dev_priv->engine[user_ring_map[user_ring_id]];
+		*engine = &dev_priv->engine[user_ring_map[user_ring_id]];
 	}
 
-	if (!intel_engine_initialized(*ring)) {
+	if (!intel_engine_initialized(*engine)) {
 		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
 		return -EINVAL;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 4d39c7284605..d2ace2986b92 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -30,9 +30,9 @@
 #include <generated/utsrelease.h>
 #include "i915_drv.h"
 
-static const char *ring_str(int ring)
+static const char *engine_str(int engine)
 {
-	switch (ring) {
+	switch (engine) {
 	case RCS: return "render";
 	case VCS: return "bsd";
 	case BCS: return "blt";
@@ -207,8 +207,8 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
 		err_puts(m, dirty_flag(err->dirty));
 		err_puts(m, purgeable_flag(err->purgeable));
 		err_puts(m, err->userptr ? " userptr" : "");
-		err_puts(m, err->ring != -1 ? " " : "");
-		err_puts(m, ring_str(err->ring));
+		err_puts(m, err->engine != -1 ? " " : "");
+		err_puts(m, engine_str(err->engine));
 		err_puts(m, i915_cache_level_str(m->i915, err->cache_level));
 
 		if (err->name)
@@ -240,69 +240,71 @@ static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
 }
 
 static void i915_ring_error_state(struct drm_i915_error_state_buf *m,
-				  struct drm_device *dev,
-				  struct drm_i915_error_state *error,
-				  int ring_idx)
+				    struct drm_device *dev,
+				    struct drm_i915_error_state *error,
+				    int engine_idx)
 {
-	struct drm_i915_error_ring *ring = &error->ring[ring_idx];
+	struct drm_i915_error_engine *ering = &error->engine[engine_idx];
 
-	if (!ring->valid)
+	if (!ering->valid)
 		return;
 
-	err_printf(m, "%s command stream:\n", ring_str(ring_idx));
-	err_printf(m, "  START: 0x%08x\n", ring->start);
-	err_printf(m, "  HEAD:  0x%08x\n", ring->head);
-	err_printf(m, "  TAIL:  0x%08x\n", ring->tail);
-	err_printf(m, "  CTL:   0x%08x\n", ring->ctl);
-	err_printf(m, "  HWS:   0x%08x\n", ring->hws);
-	err_printf(m, "  ACTHD: 0x%08x %08x\n", (u32)(ring->acthd>>32), (u32)ring->acthd);
-	err_printf(m, "  IPEIR: 0x%08x\n", ring->ipeir);
-	err_printf(m, "  IPEHR: 0x%08x\n", ring->ipehr);
-	err_printf(m, "  INSTDONE: 0x%08x\n", ring->instdone);
+	err_printf(m, "%s command stream:\n", engine_str(engine_idx));
+	err_printf(m, "  START: 0x%08x\n", ering->start);
+	err_printf(m, "  HEAD:  0x%08x\n", ering->head);
+	err_printf(m, "  TAIL:  0x%08x\n", ering->tail);
+	err_printf(m, "  CTL:   0x%08x\n", ering->ctl);
+	err_printf(m, "  HWS:   0x%08x\n", ering->hws);
+	err_printf(m, "  ACTHD: 0x%08x %08x\n",
+		   (u32)(ering->acthd>>32), (u32)ering->acthd);
+	err_printf(m, "  IPEIR: 0x%08x\n", ering->ipeir);
+	err_printf(m, "  IPEHR: 0x%08x\n", ering->ipehr);
+	err_printf(m, "  INSTDONE: 0x%08x\n", ering->instdone);
 	if (INTEL_INFO(dev)->gen >= 4) {
-		err_printf(m, "  BBADDR: 0x%08x %08x\n", (u32)(ring->bbaddr>>32), (u32)ring->bbaddr);
-		err_printf(m, "  BB_STATE: 0x%08x\n", ring->bbstate);
-		err_printf(m, "  INSTPS: 0x%08x\n", ring->instps);
+		err_printf(m, "  BBADDR: 0x%08x %08x\n",
+			   (u32)(ering->bbaddr>>32), (u32)ering->bbaddr);
+		err_printf(m, "  BB_STATE: 0x%08x\n", ering->bbstate);
+		err_printf(m, "  INSTPS: 0x%08x\n", ering->instps);
 	}
-	err_printf(m, "  INSTPM: 0x%08x\n", ring->instpm);
-	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ring->faddr),
-		   lower_32_bits(ring->faddr));
+	err_printf(m, "  INSTPM: 0x%08x\n", ering->instpm);
+	err_printf(m, "  FADDR: 0x%08x %08x\n", upper_32_bits(ering->faddr),
+		   lower_32_bits(ering->faddr));
 	if (INTEL_INFO(dev)->gen >= 6) {
-		err_printf(m, "  RC PSMI: 0x%08x\n", ring->rc_psmi);
-		err_printf(m, "  FAULT_REG: 0x%08x\n", ring->fault_reg);
+		err_printf(m, "  RC PSMI: 0x%08x\n", ering->rc_psmi);
+		err_printf(m, "  FAULT_REG: 0x%08x\n", ering->fault_reg);
 		err_printf(m, "  SYNC_0: 0x%08x [last synced 0x%08x]\n",
-			   ring->semaphore_mboxes[0],
-			   ring->semaphore_seqno[0]);
+			   ering->semaphore_mboxes[0],
+			   ering->semaphore_seqno[0]);
 		err_printf(m, "  SYNC_1: 0x%08x [last synced 0x%08x]\n",
-			   ring->semaphore_mboxes[1],
-			   ring->semaphore_seqno[1]);
+			   ering->semaphore_mboxes[1],
+			   ering->semaphore_seqno[1]);
 		if (HAS_VEBOX(dev)) {
 			err_printf(m, "  SYNC_2: 0x%08x [last synced 0x%08x]\n",
-				   ring->semaphore_mboxes[2],
-				   ring->semaphore_seqno[2]);
+				   ering->semaphore_mboxes[2],
+				   ering->semaphore_seqno[2]);
 		}
 	}
 	if (USES_PPGTT(dev)) {
-		err_printf(m, "  GFX_MODE: 0x%08x\n", ring->vm_info.gfx_mode);
+		err_printf(m, "  GFX_MODE: 0x%08x\n", ering->vm_info.gfx_mode);
 
 		if (INTEL_INFO(dev)->gen >= 8) {
 			int i;
 			for (i = 0; i < 4; i++)
 				err_printf(m, "  PDP%d: 0x%016llx\n",
-					   i, ring->vm_info.pdp[i]);
+					   i, ering->vm_info.pdp[i]);
 		} else {
 			err_printf(m, "  PP_DIR_BASE: 0x%08x\n",
-				   ring->vm_info.pp_dir_base);
+				   ering->vm_info.pp_dir_base);
 		}
 	}
-	err_printf(m, "  seqno: 0x%08x\n", ring->seqno);
-	err_printf(m, "  last_seqno: 0x%08x\n", ring->last_seqno);
-	err_printf(m, "  waiting: %s\n", yesno(ring->waiting));
-	err_printf(m, "  ring->head: 0x%08x\n", ring->cpu_ring_head);
-	err_printf(m, "  ring->tail: 0x%08x\n", ring->cpu_ring_tail);
+	err_printf(m, "  seqno: 0x%08x\n", ering->seqno);
+	err_printf(m, "  last_seqno: 0x%08x\n", ering->last_seqno);
+	err_printf(m, "  waiting: %s\n", yesno(ering->waiting));
+	err_printf(m, "  ring->head: 0x%08x\n", ering->cpu_ring_head);
+	err_printf(m, "  ring->tail: 0x%08x\n", ering->cpu_ring_tail);
 	err_printf(m, "  hangcheck: %s [%d]\n",
-		   hangcheck_action_to_str(ring->hangcheck_action),
-		   ring->hangcheck_score);
+		   hangcheck_action_to_str(ering->hangcheck_action),
+		   ering->hangcheck_score);
 }
 
 void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
@@ -348,17 +350,17 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 		   error->time.tv_usec);
 	err_printf(m, "Kernel: " UTS_RELEASE "\n");
 	max_hangcheck_score = 0;
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-		if (error->ring[i].hangcheck_score > max_hangcheck_score)
-			max_hangcheck_score = error->ring[i].hangcheck_score;
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		if (error->engine[i].hangcheck_score > max_hangcheck_score)
+			max_hangcheck_score = error->engine[i].hangcheck_score;
 	}
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-		if (error->ring[i].hangcheck_score == max_hangcheck_score &&
-		    error->ring[i].pid != -1) {
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		if (error->engine[i].hangcheck_score == max_hangcheck_score &&
+		    error->engine[i].pid != -1) {
 			err_printf(m, "Active process (on ring %s): %s [%d]\n",
-				   ring_str(i),
-				   error->ring[i].comm,
-				   error->ring[i].pid);
+				   engine_str(i),
+				   error->engine[i].comm,
+				   error->engine[i].pid);
 		}
 	}
 	err_printf(m, "Reset count: %u\n", error->reset_count);
@@ -414,7 +416,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 	if (IS_GEN7(dev))
 		err_printf(m, "ERR_INT: 0x%08x\n", error->err_int);
 
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++)
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++)
 		i915_ring_error_state(m, dev, error, i);
 
 	for (i = 0; i < error->vm_count; i++) {
@@ -429,21 +431,23 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 				    error->pinned_bo_count[i]);
 	}
 
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-		obj = error->ring[i].batchbuffer;
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		struct drm_i915_error_engine *ering = &error->engine[i];
+
+		obj = ering->batchbuffer;
 		if (obj) {
 			err_puts(m, dev_priv->engine[i].name);
-			if (error->ring[i].pid != -1)
+			if (ering->pid != -1)
 				err_printf(m, " (submitted by %s [%d])",
-					   error->ring[i].comm,
-					   error->ring[i].pid);
+					   ering->comm,
+					   ering->pid);
 			err_printf(m, " --- gtt_offset = 0x%08x %08x\n",
 				   upper_32_bits(obj->gtt_offset),
 				   lower_32_bits(obj->gtt_offset));
 			print_error_obj(m, obj);
 		}
 
-		obj = error->ring[i].wa_batchbuffer;
+		obj = ering->wa_batchbuffer;
 		if (obj) {
 			err_printf(m, "%s (w/a) --- gtt_offset = 0x%08x\n",
 				   dev_priv->engine[i].name,
@@ -451,38 +455,38 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			print_error_obj(m, obj);
 		}
 
-		if (error->ring[i].num_requests) {
+		if (ering->num_requests) {
 			err_printf(m, "%s --- %d requests\n",
 				   dev_priv->engine[i].name,
-				   error->ring[i].num_requests);
-			for (j = 0; j < error->ring[i].num_requests; j++) {
+				   ering->num_requests);
+			for (j = 0; j < ering->num_requests; j++) {
 				err_printf(m, "  seqno 0x%08x, emitted %ld, tail 0x%08x\n",
-					   error->ring[i].requests[j].seqno,
-					   error->ring[i].requests[j].jiffies,
-					   error->ring[i].requests[j].tail);
+					   ering->requests[j].seqno,
+					   ering->requests[j].jiffies,
+					   ering->requests[j].tail);
 			}
 		}
 
-		if (error->ring[i].num_waiters) {
+		if (ering->num_waiters) {
 			err_printf(m, "%s --- %d waiters\n",
 				   dev_priv->engine[i].name,
-				   error->ring[i].num_waiters);
-			for (j = 0; j < error->ring[i].num_waiters; j++) {
+				   ering->num_waiters);
+			for (j = 0; j < ering->num_waiters; j++) {
 				err_printf(m, " seqno 0x%08x for %s [%d]\n",
-					   error->ring[i].waiters[j].seqno,
-					   error->ring[i].waiters[j].comm,
-					   error->ring[i].waiters[j].pid);
+					   ering->waiters[j].seqno,
+					   ering->waiters[j].comm,
+					   ering->waiters[j].pid);
 			}
 		}
 
-		if ((obj = error->ring[i].ringbuffer)) {
+		if ((obj = ering->ringbuffer)) {
 			err_printf(m, "%s --- ringbuffer = 0x%08x\n",
 				   dev_priv->engine[i].name,
 				   lower_32_bits(obj->gtt_offset));
 			print_error_obj(m, obj);
 		}
 
-		if ((obj = error->ring[i].hws_page)) {
+		if ((obj = ering->hws_page)) {
 			u64 hws_offset = obj->gtt_offset;
 			u32 *hws_page = &obj->pages[0][0];
 
@@ -504,7 +508,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			}
 		}
 
-		obj = error->ring[i].wa_ctx;
+		obj = ering->wa_ctx;
 		if (obj) {
 			u64 wa_ctx_offset = obj->gtt_offset;
 			u32 *wa_ctx_page = &obj->pages[0][0];
@@ -526,7 +530,7 @@ int i915_error_state_to_str(struct drm_i915_error_state_buf *m,
 			}
 		}
 
-		if ((obj = error->ring[i].ctx)) {
+		if ((obj = ering->ctx)) {
 			err_printf(m, "%s --- HW Context = 0x%08x\n",
 				   dev_priv->engine[i].name,
 				   lower_32_bits(obj->gtt_offset));
@@ -611,15 +615,18 @@ static void i915_error_state_free(struct kref *error_ref)
 							  typeof(*error), ref);
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
-		i915_error_object_free(error->ring[i].batchbuffer);
-		i915_error_object_free(error->ring[i].wa_batchbuffer);
-		i915_error_object_free(error->ring[i].ringbuffer);
-		i915_error_object_free(error->ring[i].hws_page);
-		i915_error_object_free(error->ring[i].ctx);
-		i915_error_object_free(error->ring[i].wa_ctx);
-		kfree(error->ring[i].requests);
-		kfree(error->ring[i].waiters);
+	for (i = 0; i < ARRAY_SIZE(error->engine); i++) {
+		struct drm_i915_error_engine *ering = &error->engine[i];
+
+		i915_error_object_free(ering->batchbuffer);
+		i915_error_object_free(ering->wa_batchbuffer);
+		i915_error_object_free(ering->ringbuffer);
+		i915_error_object_free(ering->hws_page);
+		i915_error_object_free(ering->ctx);
+		i915_error_object_free(ering->wa_ctx);
+
+		kfree(ering->requests);
+		kfree(ering->waiters);
 	}
 
 	i915_error_object_free(error->semaphore_obj);
@@ -762,8 +769,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->dirty = obj->dirty;
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
-	err->ring = obj->last_write_req ?
-			i915_gem_request_get_engine(obj->last_write_req)->id : -1;
+	err->engine = obj->last_write_req ?
+		i915_gem_request_get_engine(obj->last_write_req)->id : -1;
 	err->cache_level = obj->cache_level;
 }
 
@@ -815,7 +822,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
  */
 static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
 					 struct drm_i915_error_state *error,
-					 int *ring_id)
+					 int *engine_id)
 {
 	uint32_t error_code = 0;
 	int i;
@@ -826,11 +833,11 @@ static uint32_t i915_error_generate_code(struct drm_i915_private *dev_priv,
 	 * strictly a client bug. Use instdone to differentiate those some.
 	 */
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (error->ring[i].hangcheck_action == HANGCHECK_HUNG) {
-			if (ring_id)
-				*ring_id = i;
+		if (error->engine[i].hangcheck_action == HANGCHECK_HUNG) {
+			if (engine_id)
+				*engine_id = i;
 
-			return error->ring[i].ipehr ^ error->ring[i].instdone;
+			return error->engine[i].ipehr ^ error->engine[i].instdone;
 		}
 	}
 
@@ -858,7 +865,7 @@ static void i915_gem_record_fences(struct drm_i915_private *dev_priv,
 static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
 					struct drm_i915_error_state *error,
 					struct intel_engine_cs *engine,
-					struct drm_i915_error_ring *ering)
+					struct drm_i915_error_engine *ering)
 {
 	struct intel_engine_cs *to;
 	enum intel_engine_id id;
@@ -891,7 +898,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
 
 static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
 					struct intel_engine_cs *engine,
-					struct drm_i915_error_ring *ering)
+					struct drm_i915_error_engine *ering)
 {
 	ering->semaphore_mboxes[0] = I915_READ(RING_SYNC_0(engine->mmio_base));
 	ering->semaphore_mboxes[1] = I915_READ(RING_SYNC_1(engine->mmio_base));
@@ -906,7 +913,7 @@ static void gen6_record_semaphore_state(struct drm_i915_private *dev_priv,
 }
 
 static void engine_record_waiters(struct intel_engine_cs *engine,
-				  struct drm_i915_error_ring *ering)
+				  struct drm_i915_error_engine *ering)
 {
 	struct intel_breadcrumbs *b = &engine->breadcrumbs;
 	struct drm_i915_error_waiter *waiter;
@@ -950,7 +957,7 @@ static void engine_record_waiters(struct intel_engine_cs *engine,
 static void i915_record_ring_state(struct drm_i915_private *dev_priv,
 				   struct drm_i915_error_state *error,
 				   struct intel_engine_cs *engine,
-				   struct drm_i915_error_ring *ering)
+				   struct drm_i915_error_engine *ering)
 {
 	if (INTEL_GEN(dev_priv) >= 6) {
 		ering->rc_psmi = I915_READ(RING_PSMI_CTL(engine->mmio_base));
@@ -1048,7 +1055,7 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
 
 static void i915_gem_record_active_context(struct intel_engine_cs *engine,
 					   struct drm_i915_error_state *error,
-					   struct drm_i915_error_ring *ering)
+					   struct drm_i915_error_engine *ering)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	struct drm_i915_gem_object *obj;
@@ -1077,16 +1084,17 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct intel_engine_cs *engine = &dev_priv->engine[i];
+		struct drm_i915_error_engine *ering = &error->engine[i];
 
-		error->ring[i].pid = -1;
+		ering->pid = -1;
 
 		if (!intel_engine_initialized(engine))
 			continue;
 
-		error->ring[i].valid = true;
+		ering->valid = true;
 
-		i915_record_ring_state(dev_priv, error, engine, &error->ring[i]);
-		engine_record_waiters(engine, &error->ring[i]);
+		i915_record_ring_state(dev_priv, error, engine, ering);
+		engine_record_waiters(engine, ering);
 
 		request = i915_gem_find_active_request(engine);
 		if (request) {
@@ -1100,13 +1108,13 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 			 * as the simplest method to avoid being overwritten
 			 * by userspace.
 			 */
-			error->ring[i].batchbuffer =
+			ering->batchbuffer =
 				i915_error_object_create(dev_priv,
 							 request->batch_obj,
 							 vm);
 
 			if (HAS_BROKEN_CS_TLB(dev_priv))
-				error->ring[i].wa_batchbuffer =
+				ering->wa_batchbuffer =
 					i915_error_ggtt_object_create(dev_priv,
 							     engine->scratch.obj);
 
@@ -1116,8 +1124,8 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 				rcu_read_lock();
 				task = pid_task(request->pid, PIDTYPE_PID);
 				if (task) {
-					strcpy(error->ring[i].comm, task->comm);
-					error->ring[i].pid = task->pid;
+					strcpy(ering->comm, task->comm);
+					ering->pid = task->pid;
 				}
 				rcu_read_unlock();
 			}
@@ -1126,35 +1134,34 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 				request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
 
 			rb = request->ringbuf;
-			error->ring[i].cpu_ring_head = rb->head;
-			error->ring[i].cpu_ring_tail = rb->tail;
-			error->ring[i].ringbuffer =
+			ering->cpu_ring_head = rb->head;
+			ering->cpu_ring_tail = rb->tail;
+			ering->ringbuffer =
 				i915_error_ggtt_object_create(dev_priv,
 							      rb->obj);
 		}
 
-		error->ring[i].hws_page =
+		ering->hws_page =
 			i915_error_ggtt_object_create(dev_priv,
 						      engine->status_page.obj);
 
 		if (engine->wa_ctx.obj) {
-			error->ring[i].wa_ctx =
+			ering->wa_ctx =
 				i915_error_ggtt_object_create(dev_priv,
 							      engine->wa_ctx.obj);
 		}
 
-		i915_gem_record_active_context(engine, error, &error->ring[i]);
+		i915_gem_record_active_context(engine, error, ering);
 
 		count = 0;
 		list_for_each_entry(request, &engine->request_list, list)
 			count++;
 
-		error->ring[i].num_requests = count;
-		error->ring[i].requests =
-			kcalloc(count, sizeof(*error->ring[i].requests),
-				GFP_ATOMIC);
-		if (error->ring[i].requests == NULL) {
-			error->ring[i].num_requests = 0;
+		ering->num_requests = count;
+		ering->requests =
+			kcalloc(count, sizeof(*ering->requests), GFP_ATOMIC);
+		if (ering->requests == NULL) {
+			ering->num_requests = 0;
 			continue;
 		}
 
@@ -1162,7 +1169,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 		list_for_each_entry(request, &engine->request_list, list) {
 			struct drm_i915_error_request *erq;
 
-			if (count >= error->ring[i].num_requests) {
+			if (count >= ering->num_requests) {
 				/*
 				 * If the ring request list was changed in
 				 * between the point where the error request
@@ -1181,7 +1188,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 				break;
 			}
 
-			erq = &error->ring[i].requests[count++];
+			erq = &ering->requests[count++];
 			erq->seqno = request->fence.seqno;
 			erq->jiffies = request->emitted_jiffies;
 			erq->tail = request->postfix;
@@ -1352,20 +1359,20 @@ static void i915_error_capture_msg(struct drm_i915_private *dev_priv,
 				   const char *error_msg)
 {
 	u32 ecode;
-	int ring_id = -1, len;
+	int engine_id = -1, len;
 
-	ecode = i915_error_generate_code(dev_priv, error, &ring_id);
+	ecode = i915_error_generate_code(dev_priv, error, &engine_id);
 
 	len = scnprintf(error->error_msg, sizeof(error->error_msg),
 			"GPU HANG: ecode %d:%d:0x%08x",
-			INTEL_GEN(dev_priv), ring_id, ecode);
+			INTEL_GEN(dev_priv), engine_id, ecode);
 
-	if (ring_id != -1 && error->ring[ring_id].pid != -1)
+	if (engine_id != -1 && error->engine[engine_id].pid != -1)
 		len += scnprintf(error->error_msg + len,
 				 sizeof(error->error_msg) - len,
 				 ", in %s [%d]",
-				 error->ring[ring_id].comm,
-				 error->ring[ring_id].pid);
+				 error->engine[engine_id].comm,
+				 error->engine[engine_id].pid);
 
 	scnprintf(error->error_msg + len, sizeof(error->error_msg) - len,
 		  ", reason: %s, action: %s",
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 7104dc1463eb..f5bf4f913a91 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -3140,13 +3140,13 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 				}
 			} else {
 				/* We always increment the hangcheck score
-				 * if the ring is busy and still processing
+				 * if the engine is busy and still processing
 				 * the same request, so that no single request
 				 * can run indefinitely (such as a chain of
 				 * batches). The only time we do not increment
 				 * the hangcheck score on this ring, if this
-				 * ring is in a legitimate wait for another
-				 * ring. In that case the waiting ring is a
+				 * engine is in a legitimate wait for another
+				 * engine. In that case the waiting engine is a
 				 * victim and we want to be sure we catch the
 				 * right culprit. Then every time we do kick
 				 * the ring, add a small increment to the
diff --git a/drivers/gpu/drm/i915/intel_mocs.h b/drivers/gpu/drm/i915/intel_mocs.h
index 4640299e04ec..a8bd9f7bfece 100644
--- a/drivers/gpu/drm/i915/intel_mocs.h
+++ b/drivers/gpu/drm/i915/intel_mocs.h
@@ -54,6 +54,6 @@
 
 int intel_rcs_context_init_mocs(struct drm_i915_gem_request *req);
 void intel_mocs_init_l3cc_table(struct drm_device *dev);
-int intel_mocs_init_engine(struct intel_engine_cs *ring);
+int intel_mocs_init_engine(struct intel_engine_cs *engine);
 
 #endif
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b844e6984ae7..a6f7db2857c1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1581,7 +1581,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 }
 
 static void
-gen5_seqno_barrier(struct intel_engine_cs *ring)
+gen5_seqno_barrier(struct intel_engine_cs *engine)
 {
 	/* MI_STORE are internally buffered by the GPU and not flushed
 	 * either by MI_FLUSH or SyncFlush or any other combination of
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 05bab8bda63d..2888fda6a5e0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -197,14 +197,14 @@ struct intel_engine_cs {
 
 	u32             irq_keep_mask; /* always keep these interrupts */
 	u32		irq_enable_mask; /* bitmask to enable ring interrupt */
-	void		(*irq_enable)(struct intel_engine_cs *ring);
-	void		(*irq_disable)(struct intel_engine_cs *ring);
+	void		(*irq_enable)(struct intel_engine_cs *engine);
+	void		(*irq_disable)(struct intel_engine_cs *engine);
 
-	int		(*init_hw)(struct intel_engine_cs *ring);
+	int		(*init_hw)(struct intel_engine_cs *engine);
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
-	void		(*write_tail)(struct intel_engine_cs *ring,
+	void		(*write_tail)(struct intel_engine_cs *engine,
 				      u32 value);
 	int __must_check (*flush)(struct drm_i915_gem_request *req,
 				  u32	invalidate_domains,
@@ -216,14 +216,14 @@ struct intel_engine_cs {
 	 * seen value is good enough. Note that the seqno will always be
 	 * monotonic, even if not coherent.
 	 */
-	void		(*irq_seqno_barrier)(struct intel_engine_cs *ring);
+	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
 	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
 					       u64 offset, u32 length,
 					       unsigned dispatch_flags);
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
 #define I915_DISPATCH_RS     0x4
-	void		(*cleanup)(struct intel_engine_cs *ring);
+	void		(*cleanup)(struct intel_engine_cs *engine);
 
 	/* GEN8 signal/wait table - never trust comments!
 	 *	  signal to	signal to    signal to   signal to      signal to
@@ -269,7 +269,7 @@ struct intel_engine_cs {
 			struct {
 				/* our mbox written by others */
 				u32		wait[I915_NUM_ENGINES];
-				/* mboxes this ring signals to */
+				/* mboxes this engine signals to */
 				i915_reg_t	signal[I915_NUM_ENGINES];
 			} mbox;
 			u64		signal_ggtt[I915_NUM_ENGINES];
@@ -340,7 +340,7 @@ struct intel_engine_cs {
 
 	/*
 	 * Table of commands the command parser needs to know about
-	 * for this ring.
+	 * for this engine.
 	 */
 	DECLARE_HASHTABLE(cmd_hash, I915_CMD_HASH_ORDER);
 
@@ -354,11 +354,11 @@ struct intel_engine_cs {
 	 * Returns the bitmask for the length field of the specified command.
 	 * Return 0 for an unrecognized/invalid command.
 	 *
-	 * If the command parser finds an entry for a command in the ring's
+	 * If the command parser finds an entry for a command in the engines's
 	 * cmd_tables, it gets the command's length based on the table entry.
-	 * If not, it calls this function to determine the per-ring length field
-	 * encoding for the command (i.e. certain opcode ranges use certain bits
-	 * to encode the command length in the header).
+	 * If not, it calls this function to determine the per-engine length
+	 * field encoding for the command (i.e. different opcode ranges use
+	 * certain bits to encode the command length in the header).
 	 */
 	u32 (*get_cmd_length_mask)(u32 cmd_header);
 };
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev2)
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (18 preceding siblings ...)
  2016-07-20 13:54 ` ✓ Ro.CI.BAT: success for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Patchwork
@ 2016-07-20 15:10 ` Patchwork
  2016-07-22  9:58 ` ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev4) Patchwork
  2016-07-22 10:22 ` ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev5) Patchwork
  21 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2016-07-20 15:10 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev2)
URL   : https://patchwork.freedesktop.org/series/10090/
State : failure

== Summary ==

Applying: drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
Applying: drm/i915: Convert stray struct intel_engine_cs *ring
Applying: drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs
Applying: drm/i915: Rename intel_context[engine].ringbuf
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/i915_drv.h
M	drivers/gpu/drm/i915/i915_gem_context.c
M	drivers/gpu/drm/i915/intel_lrc.c
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/intel_lrc.c
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/intel_lrc.c
Auto-merging drivers/gpu/drm/i915/i915_gem_context.c
Auto-merging drivers/gpu/drm/i915/i915_drv.h
error: Failed to merge in the changes.
Patch failed at 0004 drm/i915: Rename intel_context[engine].ringbuf
The copy of the patch that failed is found in: .git/rebase-apply/patch
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
  2016-07-20 13:11 ` [PATCH 01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
@ 2016-07-21 11:26   ` Joonas Lahtinen
  2016-07-21 12:09     ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 11:26 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> Both perform the same actions with more or less indirection, so just
> unify the code.
> 

Don't really like removing the engine = req->engine aliases, but seems
like req->engine is used plenty already. And assuming this was a
mechanical change with no hidden functional changes. Then,

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring
  2016-07-20 13:11 ` [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
  2016-07-20 14:12   ` Dave Gordon
@ 2016-07-21 11:28   ` Joonas Lahtinen
  1 sibling, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 11:28 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> Now that we have disambuigated ring and engine, we can use the clearer
> and more consistent name for the intel_ringbuffer pointer in the
> request.
> 

The cocci data would be useful, or sed expression. And would make me
more confident this is pure mechanical work.

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas

-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 03/18] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs
  2016-07-20 13:11 ` [PATCH 03/18] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs Chris Wilson
  2016-07-20 14:23   ` Dave Gordon
@ 2016-07-21 11:32   ` Joonas Lahtinen
  2016-07-21 11:42     ` Chris Wilson
  1 sibling, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 11:32 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> Having ringbuf->ring point to an engine is confusing, so rename it once
> again to ring->engine.
> 

Commit message and content do not seem to match.

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 12 ++++++------
>  1 file changed, 6 insertions(+), 6 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index ac51e4885046..3cfbfe40f6e8 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2171,7 +2171,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
>  	i915_gem_context_put(ctx);
>  }
>  
> -static int intel_init_ring_buffer(struct intel_engine_cs *engine)
> +static int intel_init_engine(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
>  	struct intel_ringbuffer *ringbuf;
> @@ -2868,7 +2868,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
>  	engine->init_hw = init_render_ring;
>  	engine->cleanup = render_ring_cleanup;
>  
> -	ret = intel_init_ring_buffer(engine);
> +	ret = intel_init_engine(engine);
>  	if (ret)
>  		return ret;
>  
> @@ -2907,7 +2907,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
>  			engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
>  	}
>  
> -	return intel_init_ring_buffer(engine);
> +	return intel_init_engine(engine);
>  }
>  
>  /**
> @@ -2921,7 +2921,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
>  
>  	engine->flush = gen6_bsd_ring_flush;
>  
> -	return intel_init_ring_buffer(engine);
> +	return intel_init_engine(engine);
>  }
>  
>  int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
> @@ -2934,7 +2934,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
>  	if (INTEL_GEN(dev_priv) < 8)
>  		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
>  
> -	return intel_init_ring_buffer(engine);
> +	return intel_init_engine(engine);
>  }
>  
>  int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
> @@ -2951,7 +2951,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
>  		engine->irq_disable = hsw_vebox_irq_disable;
>  	}
>  
> -	return intel_init_ring_buffer(engine);
> +	return intel_init_engine(engine);
>  }
>  
>  int
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 03/18] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs
  2016-07-21 11:32   ` Joonas Lahtinen
@ 2016-07-21 11:42     ` Chris Wilson
  0 siblings, 0 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-21 11:42 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Thu, Jul 21, 2016 at 02:32:52PM +0300, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> > Having ringbuf->ring point to an engine is confusing, so rename it once
> > again to ring->engine.
> > 
> 
> Commit message and content do not seem to match.

It does, but it is subtle...

intel_init_engine() should be intel_init_legacy_engine() now.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 04/18] drm/i915: Rename intel_context[engine].ringbuf
  2016-07-20 13:11 ` [PATCH 04/18] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
@ 2016-07-21 11:43   ` Joonas Lahtinen
  0 siblings, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 11:43 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> Perform s/ringbuf/ring/ on the context struct for consistency with the
> ring/engine split.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c        |  8 ++++----
>  drivers/gpu/drm/i915/i915_drv.h            |  2 +-
>  drivers/gpu/drm/i915/i915_gem_context.c    |  4 ++--
>  drivers/gpu/drm/i915/i915_guc_submission.c |  2 +-
>  drivers/gpu/drm/i915/intel_lrc.c           | 33 ++++++++++++++----------------
>  5 files changed, 23 insertions(+), 26 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 9aa62c5b5f65..bde68741809b 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -425,8 +425,8 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
>  	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
>  		if (ctx->engine[n].state)
>  			per_file_stats(0, ctx->engine[n].state, data);
> -		if (ctx->engine[n].ringbuf)
> -			per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
> +		if (ctx->engine[n].ring)
> +			per_file_stats(0, ctx->engine[n].ring->obj, data);
>  	}
>  
>  	return 0;
> @@ -2066,8 +2066,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
>  			seq_putc(m, ce->initialised ? 'I' : 'i');
>  			if (ce->state)
>  				describe_obj(m, ce->state);
> -			if (ce->ringbuf)
> -				describe_ctx_ringbuf(m, ce->ringbuf);
> +			if (ce->ring)
> +				describe_ctx_ringbuf(m, ce->ring);
>  			seq_putc(m, '\n');
>  		}
>  
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 0f408ada1c65..87e06a6a797a 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -894,7 +894,7 @@ struct i915_gem_context {
>  
>  	struct intel_context {
>  		struct drm_i915_gem_object *state;
> -		struct intel_ringbuffer *ringbuf;
> +		struct intel_ringbuffer *ring;
>  		struct i915_vma *lrc_vma;
>  		uint32_t *lrc_reg_state;
>  		u64 lrc_desc;
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index 16138c4ff7db..c8bf7b8e959f 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -173,8 +173,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
>  			continue;
>  
>  		WARN_ON(ce->pin_count);
> -		if (ce->ringbuf)
> -			intel_ringbuffer_free(ce->ringbuf);
> +		if (ce->ring)
> +			intel_ringbuffer_free(ce->ring);
>  
>  		i915_gem_object_put(ce->state);
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index 01c1c1671811..eccd34832fe6 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -363,7 +363,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
>  		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
>  				(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
>  
> -		obj = ce->ringbuf->obj;
> +		obj = ce->ring->obj;
>  		gfx_addr = i915_gem_obj_ggtt_offset(obj);
>  
>  		lrc->ring_begin = gfx_addr;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index c3542eb338ca..7bc1d0c92799 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -482,11 +482,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
>  		 * resubmit the request. See gen8_emit_request() for where we
>  		 * prepare the padding after the end of the request.
>  		 */
> -		struct intel_ringbuffer *ringbuf;
> -
> -		ringbuf = req0->ctx->engine[engine->id].ringbuf;
>  		req0->tail += 8;
> -		req0->tail &= ringbuf->size - 1;
> +		req0->tail &= req0->ring->size - 1;
>  	}
>  
>  	execlists_submit_requests(req0, req1);
> @@ -714,7 +711,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
>  			return ret;
>  	}
>  
> -	request->ring = ce->ringbuf;
> +	request->ring = ce->ring;
>  
>  	if (i915.enable_guc_submission) {
>  		/*
> @@ -976,14 +973,14 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>  
>  	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
>  
> -	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
> +	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ring);
>  	if (ret)
>  		goto unpin_map;
>  
>  	ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
>  	intel_lr_context_descriptor_update(ctx, engine);
>  
> -	lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
> +	lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start;
>  	ce->lrc_reg_state = lrc_reg_state;
>  	ce->state->dirty = true;
>  
> @@ -1014,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
>  	if (--ce->pin_count)
>  		return;
>  
> -	intel_unpin_ringbuffer_obj(ce->ringbuf);
> +	intel_unpin_ringbuffer_obj(ce->ring);
>  
>  	i915_gem_object_unpin_map(ce->state);
>  	i915_gem_object_ggtt_unpin(ce->state);
> @@ -2338,7 +2335,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
>  	struct drm_i915_gem_object *ctx_obj;
>  	struct intel_context *ce = &ctx->engine[engine->id];
>  	uint32_t context_size;
> -	struct intel_ringbuffer *ringbuf;
> +	struct intel_ringbuffer *ring;
>  	int ret;
>  
>  	WARN_ON(ce->state);
> @@ -2354,29 +2351,29 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
>  		return PTR_ERR(ctx_obj);
>  	}
>  
> -	ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
> -	if (IS_ERR(ringbuf)) {
> -		ret = PTR_ERR(ringbuf);
> +	ring = intel_engine_create_ringbuffer(engine, ctx->ring_size);
> +	if (IS_ERR(ring)) {
> +		ret = PTR_ERR(ring);
>  		goto error_deref_obj;
>  	}
>  
> -	ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
> +	ret = populate_lr_context(ctx, ctx_obj, engine, ring);
>  	if (ret) {
>  		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
>  		goto error_ringbuf;
>  	}
>  
> -	ce->ringbuf = ringbuf;
> +	ce->ring = ring;
>  	ce->state = ctx_obj;
>  	ce->initialised = engine->init_context == NULL;
>  
>  	return 0;
>  
>  error_ringbuf:

All the same to change the label too, then;

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas

> -	intel_ringbuffer_free(ringbuf);
> +	intel_ringbuffer_free(ring);
>  error_deref_obj:
>  	i915_gem_object_put(ctx_obj);
> -	ce->ringbuf = NULL;
> +	ce->ring = NULL;
>  	ce->state = NULL;
>  	return ret;
>  }
> @@ -2407,7 +2404,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
>  
>  		i915_gem_object_unpin_map(ctx_obj);
>  
> -		ce->ringbuf->head = 0;
> -		ce->ringbuf->tail = 0;
> +		ce->ring->head = 0;
> +		ce->ring->tail = 0;
>  	}
>  }
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 05/18] drm/i915: Rename struct intel_ringbuffer to struct intel_ring
  2016-07-20 13:11 ` [PATCH 05/18] drm/i915: Rename struct intel_ringbuffer to struct intel_ring Chris Wilson
@ 2016-07-21 11:59   ` Joonas Lahtinen
  2016-07-21 16:02     ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 11:59 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> The state stored in this struct is not only the information about the
> buffer object, but the ring used to communicate with the hardware. Using
> buffer here is overly specific and, for me at least, conflates with the
> notion of buffer objects themselves.
> 

You should list all the renames here so the poor rebasers get to live.
With those listed;

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Comment below.

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_debugfs.c        |  11 ++-
>  drivers/gpu/drm/i915/i915_drv.h            |   4 +-
>  drivers/gpu/drm/i915/i915_gem.c            |  16 ++--
>  drivers/gpu/drm/i915/i915_gem_context.c    |   6 +-
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |   6 +-
>  drivers/gpu/drm/i915/i915_gem_gtt.c        |   6 +-
>  drivers/gpu/drm/i915/i915_gem_request.c    |   6 +-
>  drivers/gpu/drm/i915/i915_gem_request.h    |   2 +-
>  drivers/gpu/drm/i915/i915_gpu_error.c      |   8 +-
>  drivers/gpu/drm/i915/i915_irq.c            |  14 ++--
>  drivers/gpu/drm/i915/intel_display.c       |  10 +--
>  drivers/gpu/drm/i915/intel_engine_cs.c     |   2 +-
>  drivers/gpu/drm/i915/intel_lrc.c           |  34 ++++----
>  drivers/gpu/drm/i915/intel_mocs.c          |   4 +-
>  drivers/gpu/drm/i915/intel_overlay.c       |   8 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.c    | 128 ++++++++++++++---------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h    |  51 ++++++------
>  17 files changed, 157 insertions(+), 159 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index bde68741809b..dccc72d63dd0 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -1419,7 +1419,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
>  	intel_runtime_pm_get(dev_priv);
>  
>  	for_each_engine_id(engine, dev_priv, id) {
> -		acthd[id] = intel_ring_get_active_head(engine);
> +		acthd[id] = intel_engine_get_active_head(engine);
>  		seqno[id] = intel_engine_get_seqno(engine);
>  	}
>  
> @@ -2017,12 +2017,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
>  	return 0;
>  }
>  
> -static void describe_ctx_ringbuf(struct seq_file *m,
> -				 struct intel_ringbuffer *ringbuf)
> +static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
>  {
>  	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
> -		   ringbuf->space, ringbuf->head, ringbuf->tail,
> -		   ringbuf->last_retired_head);
> +		   ring->space, ring->head, ring->tail,
> +		   ring->last_retired_head);
>  }
>  
>  static int i915_context_status(struct seq_file *m, void *unused)
> @@ -2067,7 +2066,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
>  			if (ce->state)
>  				describe_obj(m, ce->state);
>  			if (ce->ring)
> -				describe_ctx_ringbuf(m, ce->ring);
> +				describe_ctx_ring(m, ce->ring);
>  			seq_putc(m, '\n');
>  		}
>  
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 87e06a6a797a..f32ec6db5bfa 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -518,7 +518,7 @@ struct drm_i915_error_state {
>  		bool waiting;
>  		int num_waiters;
>  		int hangcheck_score;
> -		enum intel_ring_hangcheck_action hangcheck_action;
> +		enum intel_engine_hangcheck_action hangcheck_action;
>  		int num_requests;
>  
>  		/* our own tracking of ring head and tail */
> @@ -894,7 +894,7 @@ struct i915_gem_context {
>  
>  	struct intel_context {
>  		struct drm_i915_gem_object *state;
> -		struct intel_ringbuffer *ring;
> +		struct intel_ring *ring;
>  		struct i915_vma *lrc_vma;
>  		uint32_t *lrc_reg_state;
>  		u64 lrc_desc;
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 40047eb48826..95dbcfd94a80 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2486,7 +2486,7 @@ static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
>  
>  static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
>  {
> -	struct intel_ringbuffer *buffer;
> +	struct intel_ring *ring;
>  
>  	while (!list_empty(&engine->active_list)) {
>  		struct drm_i915_gem_object *obj;
> @@ -2502,7 +2502,7 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
>  	 * (lockless) lookup doesn't try and wait upon the request as we
>  	 * reset it.
>  	 */
> -	intel_ring_init_seqno(engine, engine->last_submitted_seqno);
> +	intel_engine_init_seqno(engine, engine->last_submitted_seqno);
>  
>  	/*
>  	 * Clear the execlists queue up before freeing the requests, as those
> @@ -2541,9 +2541,9 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
>  	 * upon reset is less than when we start. Do one more pass over
>  	 * all the ringbuffers to reset last_retired_head.
>  	 */
> -	list_for_each_entry(buffer, &engine->buffers, link) {
> -		buffer->last_retired_head = buffer->tail;
> -		intel_ring_update_space(buffer);
> +	list_for_each_entry(ring, &engine->buffers, link) {
> +		ring->last_retired_head = ring->tail;
> +		intel_ring_update_space(ring);
>  	}
>  
>  	engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
> @@ -2867,7 +2867,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  
>  		i915_gem_object_retire_request(obj, from_req);
>  	} else {
> -		int idx = intel_ring_sync_index(from, to);
> +		int idx = intel_engine_sync_index(from, to);
>  		u32 seqno = i915_gem_request_get_seqno(from_req);
>  
>  		WARN_ON(!to_req);
> @@ -4567,8 +4567,8 @@ int i915_gem_init(struct drm_device *dev)
>  
>  	if (!i915.enable_execlists) {
>  		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
> -		dev_priv->gt.cleanup_engine = intel_cleanup_engine;
> -		dev_priv->gt.stop_engine = intel_stop_engine;
> +		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
> +		dev_priv->gt.stop_engine = intel_engine_stop;

I guess you added renaming gt.*_engine to your TODO already.

Regards, Joonas

>  	} else {
>  		dev_priv->gt.execbuf_submit = intel_execlists_submission;
>  		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index c8bf7b8e959f..d9b861b856dc 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -174,7 +174,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
>  
>  		WARN_ON(ce->pin_count);
>  		if (ce->ring)
> -			intel_ringbuffer_free(ce->ring);
> +			intel_ring_free(ce->ring);
>  
>  		i915_gem_object_put(ce->state);
>  	}
> @@ -552,7 +552,7 @@ static inline int
>  mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  {
>  	struct drm_i915_private *dev_priv = req->i915;
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	u32 flags = hw_flags | MI_MM_SPACE_GTT;
>  	const int num_rings =
>  		/* Use an extended w/a on ivb+ if signalling from other rings */
> @@ -654,7 +654,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  static int remap_l3(struct drm_i915_gem_request *req, int slice)
>  {
>  	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int i, ret;
>  
>  	if (!remap_info)
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 501a1751d432..12adfec2d6a9 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1001,7 +1001,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
>  	/* Unconditionally invalidate gpu caches and ensure that we do flush
>  	 * any residual writes from the previous batch.
>  	 */
> -	return intel_ring_invalidate_all_caches(req);
> +	return intel_engine_invalidate_all_caches(req);
>  }
>  
>  static bool
> @@ -1173,7 +1173,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
>  static int
>  i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret, i;
>  
>  	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
> @@ -1303,7 +1303,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>  
>  	if (params->engine->id == RCS &&
>  	    instp_mode != dev_priv->relative_constants_mode) {
> -		struct intel_ringbuffer *ring = params->request->ring;
> +		struct intel_ring *ring = params->request->ring;
>  
>  		ret = intel_ring_begin(params->request, 4);
>  		if (ret)
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index a48329baf432..01b825169164 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
>  			  unsigned entry,
>  			  dma_addr_t addr)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	BUG_ON(entry >= 4);
> @@ -1660,7 +1660,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
>  static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  			 struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	/* NB: TLBs must be flushed and invalidated before a switch */
> @@ -1688,7 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
>  			  struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	/* NB: TLBs must be flushed and invalidated before a switch */
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 0f415606a383..54b27369225a 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -244,7 +244,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
>  
>  	/* Finally reset hw state */
>  	for_each_engine(engine, dev_priv)
> -		intel_ring_init_seqno(engine, seqno);
> +		intel_engine_init_seqno(engine, seqno);
>  
>  	return 0;
>  }
> @@ -425,7 +425,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  			bool flush_caches)
>  {
>  	struct intel_engine_cs *engine;
> -	struct intel_ringbuffer *ring;
> +	struct intel_ring *ring;
>  	u32 request_start;
>  	u32 reserved_tail;
>  	int ret;
> @@ -456,7 +456,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  		if (i915.enable_execlists)
>  			ret = logical_ring_flush_all_caches(request);
>  		else
> -			ret = intel_ring_flush_all_caches(request);
> +			ret = intel_engine_flush_all_caches(request);
>  		/* Not allowed to fail! */
>  		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
> index 68868d825d9d..382ca5a163eb 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.h
> +++ b/drivers/gpu/drm/i915/i915_gem_request.h
> @@ -61,7 +61,7 @@ struct drm_i915_gem_request {
>  	 */
>  	struct i915_gem_context *ctx;
>  	struct intel_engine_cs *engine;
> -	struct intel_ringbuffer *ring;
> +	struct intel_ring *ring;
>  	struct intel_signal_node signaling;
>  
>  	/** GEM sequence number associated with the previous request,
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 09997c6adcd2..2fbe81d51af1 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -221,7 +221,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
>  	}
>  }
>  
> -static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
> +static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
>  {
>  	switch (a) {
>  	case HANGCHECK_IDLE:
> @@ -882,7 +882,7 @@ static void gen8_record_semaphore_state(struct drm_i915_private *dev_priv,
>  		signal_offset = (GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1))
>  				/ 4;
>  		tmp = error->semaphore_obj->pages[0];
> -		idx = intel_ring_sync_index(engine, to);
> +		idx = intel_engine_sync_index(engine, to);
>  
>  		ering->semaphore_mboxes[idx] = tmp[signal_offset];
>  		ering->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
> @@ -983,7 +983,7 @@ static void i915_record_ring_state(struct drm_i915_private *dev_priv,
>  
>  	ering->waiting = intel_engine_has_waiter(engine);
>  	ering->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
> -	ering->acthd = intel_ring_get_active_head(engine);
> +	ering->acthd = intel_engine_get_active_head(engine);
>  	ering->seqno = intel_engine_get_seqno(engine);
>  	ering->last_seqno = engine->last_submitted_seqno;
>  	ering->start = I915_READ_START(engine);
> @@ -1091,7 +1091,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
>  		request = i915_gem_find_active_request(engine);
>  		if (request) {
>  			struct i915_address_space *vm;
> -			struct intel_ringbuffer *ring;
> +			struct intel_ring *ring;
>  
>  			vm = request->ctx->ppgtt ?
>  				&request->ctx->ppgtt->base : &ggtt->base;
> diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
> index 7104dc1463eb..5903111db718 100644
> --- a/drivers/gpu/drm/i915/i915_irq.c
> +++ b/drivers/gpu/drm/i915/i915_irq.c
> @@ -2993,7 +2993,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
>  	return stuck;
>  }
>  
> -static enum intel_ring_hangcheck_action
> +static enum intel_engine_hangcheck_action
>  head_stuck(struct intel_engine_cs *engine, u64 acthd)
>  {
>  	if (acthd != engine->hangcheck.acthd) {
> @@ -3011,11 +3011,11 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
>  	return HANGCHECK_HUNG;
>  }
>  
> -static enum intel_ring_hangcheck_action
> -ring_stuck(struct intel_engine_cs *engine, u64 acthd)
> +static enum intel_engine_hangcheck_action
> +engine_stuck(struct intel_engine_cs *engine, u64 acthd)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
> -	enum intel_ring_hangcheck_action ha;
> +	enum intel_engine_hangcheck_action ha;
>  	u32 tmp;
>  
>  	ha = head_stuck(engine, acthd);
> @@ -3124,7 +3124,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
>  		if (engine->irq_seqno_barrier)
>  			engine->irq_seqno_barrier(engine);
>  
> -		acthd = intel_ring_get_active_head(engine);
> +		acthd = intel_engine_get_active_head(engine);
>  		seqno = intel_engine_get_seqno(engine);
>  
>  		/* Reset stuck interrupts between batch advances */
> @@ -3154,8 +3154,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
>  				 * being repeatedly kicked and so responsible
>  				 * for stalling the machine.
>  				 */
> -				engine->hangcheck.action = ring_stuck(engine,
> -								      acthd);
> +				engine->hangcheck.action =
> +					engine_stuck(engine, acthd);
>  
>  				switch (engine->hangcheck.action) {
>  				case HANGCHECK_IDLE:
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index d1932840a268..bff172c45ff7 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -11123,7 +11123,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	u32 flip_mask;
>  	int ret;
> @@ -11157,7 +11157,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	u32 flip_mask;
>  	int ret;
> @@ -11188,7 +11188,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	struct drm_i915_private *dev_priv = to_i915(dev);
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	uint32_t pf, pipesrc;
> @@ -11226,7 +11226,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	struct drm_i915_private *dev_priv = to_i915(dev);
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	uint32_t pf, pipesrc;
> @@ -11261,7 +11261,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
>  				 struct drm_i915_gem_request *req,
>  				 uint32_t flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
>  	uint32_t plane_bit = 0;
>  	int len, ret;
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index f4a35ec78481..f00bd55fe582 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -154,7 +154,7 @@ cleanup:
>  		if (i915.enable_execlists)
>  			intel_logical_ring_cleanup(&dev_priv->engine[i]);
>  		else
> -			intel_cleanup_engine(&dev_priv->engine[i]);
> +			intel_engine_cleanup(&dev_priv->engine[i]);
>  	}
>  
>  	return ret;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 7bc1d0c92799..5b9f98f6ed87 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -767,7 +767,7 @@ err_unpin:
>  static int
>  intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>  {
> -	struct intel_ringbuffer *ring = request->ring;
> +	struct intel_ring *ring = request->ring;
>  	struct intel_engine_cs *engine = request->engine;
>  
>  	intel_ring_advance(ring);
> @@ -818,7 +818,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>  	struct drm_device       *dev = params->dev;
>  	struct intel_engine_cs *engine = params->engine;
>  	struct drm_i915_private *dev_priv = to_i915(dev);
> -	struct intel_ringbuffer *ring = params->request->ring;
> +	struct intel_ring *ring = params->request->ring;
>  	u64 exec_start;
>  	int instp_mode;
>  	u32 instp_mask;
> @@ -973,7 +973,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>  
>  	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
>  
> -	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ring);
> +	ret = intel_pin_and_map_ring(dev_priv, ce->ring);
>  	if (ret)
>  		goto unpin_map;
>  
> @@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
>  	if (--ce->pin_count)
>  		return;
>  
> -	intel_unpin_ringbuffer_obj(ce->ring);
> +	intel_unpin_ring(ce->ring);
>  
>  	i915_gem_object_unpin_map(ce->state);
>  	i915_gem_object_ggtt_unpin(ce->state);
> @@ -1027,7 +1027,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  {
>  	int ret, i;
>  	struct intel_engine_cs *engine = req->engine;
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	struct i915_workarounds *w = &req->i915->workarounds;
>  
>  	if (w->count == 0)
> @@ -1543,7 +1543,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
>  static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>  {
>  	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
>  	int i, ret;
>  
> @@ -1570,7 +1570,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>  static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
>  			      u64 offset, unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
>  	int ret;
>  
> @@ -1627,8 +1627,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
>  			   u32 invalidate_domains,
>  			   u32 unused)
>  {
> -	struct intel_ringbuffer *ring = request->ring;
> -	uint32_t cmd;
> +	struct intel_ring *ring = request->ring;
> +	u32 cmd;
>  	int ret;
>  
>  	ret = intel_ring_begin(request, 4);
> @@ -1665,7 +1665,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
>  				  u32 invalidate_domains,
>  				  u32 flush_domains)
>  {
> -	struct intel_ringbuffer *ring = request->ring;
> +	struct intel_ring *ring = request->ring;
>  	struct intel_engine_cs *engine = request->engine;
>  	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	bool vf_flush_wa = false, dc_flush_wa = false;
> @@ -1779,7 +1779,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
>  
>  static int gen8_emit_request(struct drm_i915_gem_request *request)
>  {
> -	struct intel_ringbuffer *ring = request->ring;
> +	struct intel_ring *ring = request->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
> @@ -1802,7 +1802,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
>  
>  static int gen8_emit_request_render(struct drm_i915_gem_request *request)
>  {
> -	struct intel_ringbuffer *ring = request->ring;
> +	struct intel_ring *ring = request->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
> @@ -2154,7 +2154,7 @@ static int
>  populate_lr_context(struct i915_gem_context *ctx,
>  		    struct drm_i915_gem_object *ctx_obj,
>  		    struct intel_engine_cs *engine,
> -		    struct intel_ringbuffer *ringbuf)
> +		    struct intel_ring *ring)
>  {
>  	struct drm_i915_private *dev_priv = ctx->i915;
>  	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
> @@ -2207,7 +2207,7 @@ populate_lr_context(struct i915_gem_context *ctx,
>  		       RING_START(engine->mmio_base), 0);
>  	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
>  		       RING_CTL(engine->mmio_base),
> -		       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
> +		       ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
>  	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
>  		       RING_BBADDR_UDW(engine->mmio_base), 0);
>  	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
> @@ -2335,7 +2335,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
>  	struct drm_i915_gem_object *ctx_obj;
>  	struct intel_context *ce = &ctx->engine[engine->id];
>  	uint32_t context_size;
> -	struct intel_ringbuffer *ring;
> +	struct intel_ring *ring;
>  	int ret;
>  
>  	WARN_ON(ce->state);
> @@ -2351,7 +2351,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
>  		return PTR_ERR(ctx_obj);
>  	}
>  
> -	ring = intel_engine_create_ringbuffer(engine, ctx->ring_size);
> +	ring = intel_engine_create_ring(engine, ctx->ring_size);
>  	if (IS_ERR(ring)) {
>  		ret = PTR_ERR(ring);
>  		goto error_deref_obj;
> @@ -2370,7 +2370,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
>  	return 0;
>  
>  error_ringbuf:
> -	intel_ringbuffer_free(ring);
> +	intel_ring_free(ring);
>  error_deref_obj:
>  	i915_gem_object_put(ctx_obj);
>  	ce->ring = NULL;
> diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
> index fe63c7e79fb1..58db0e330ee6 100644
> --- a/drivers/gpu/drm/i915/intel_mocs.c
> +++ b/drivers/gpu/drm/i915/intel_mocs.c
> @@ -276,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
>  static int emit_mocs_control_table(struct drm_i915_gem_request *req,
>  				   const struct drm_i915_mocs_table *table)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	enum intel_engine_id engine = req->engine->id;
>  	unsigned int index;
>  	int ret;
> @@ -336,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
>  static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
>  				const struct drm_i915_mocs_table *table)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	unsigned int i;
>  	int ret;
>  
> diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> index 84b8f74bd13c..a5071e281088 100644
> --- a/drivers/gpu/drm/i915/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/intel_overlay.c
> @@ -235,7 +235,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
>  	struct drm_i915_private *dev_priv = overlay->i915;
>  	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
>  	struct drm_i915_gem_request *req;
> -	struct intel_ringbuffer *ring;
> +	struct intel_ring *ring;
>  	int ret;
>  
>  	WARN_ON(overlay->active);
> @@ -270,7 +270,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
>  	struct drm_i915_private *dev_priv = overlay->i915;
>  	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
>  	struct drm_i915_gem_request *req;
> -	struct intel_ringbuffer *ring;
> +	struct intel_ring *ring;
>  	u32 flip_addr = overlay->flip_addr;
>  	u32 tmp;
>  	int ret;
> @@ -340,7 +340,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
>  	struct drm_i915_private *dev_priv = overlay->i915;
>  	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
>  	struct drm_i915_gem_request *req;
> -	struct intel_ringbuffer *ring;
> +	struct intel_ring *ring;
>  	u32 flip_addr = overlay->flip_addr;
>  	int ret;
>  
> @@ -426,7 +426,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
>  	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
>  		/* synchronous slowpath */
>  		struct drm_i915_gem_request *req;
> -		struct intel_ringbuffer *ring;
> +		struct intel_ring *ring;
>  
>  		req = i915_gem_request_alloc(engine, NULL);
>  		if (IS_ERR(req))
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 3cfbfe40f6e8..9aaf81ba66c8 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -47,7 +47,7 @@ int __intel_ring_space(int head, int tail, int size)
>  	return space - I915_RING_FREE_SPACE;
>  }
>  
> -void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
> +void intel_ring_update_space(struct intel_ring *ringbuf)
>  {
>  	if (ringbuf->last_retired_head != -1) {
>  		ringbuf->head = ringbuf->last_retired_head;
> @@ -60,9 +60,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
>  
>  static void __intel_engine_submit(struct intel_engine_cs *engine)
>  {
> -	struct intel_ringbuffer *ringbuf = engine->buffer;
> -	ringbuf->tail &= ringbuf->size - 1;
> -	engine->write_tail(engine, ringbuf->tail);
> +	struct intel_ring *ring = engine->buffer;
> +
> +	ring->tail &= ring->size - 1;
> +	engine->write_tail(engine, ring->tail);
>  }
>  
>  static int
> @@ -70,7 +71,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32	invalidate_domains,
>  		       u32	flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	u32 cmd;
>  	int ret;
>  
> @@ -97,7 +98,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32	invalidate_domains,
>  		       u32	flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	u32 cmd;
>  	int ret;
>  
> @@ -187,7 +188,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
>  static int
>  intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	u32 scratch_addr =
>  		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	int ret;
> @@ -224,7 +225,7 @@ static int
>  gen6_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32 invalidate_domains, u32 flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	u32 scratch_addr =
>  		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	u32 flags = 0;
> @@ -277,7 +278,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
>  static int
>  gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 4);
> @@ -299,7 +300,7 @@ static int
>  gen7_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32 invalidate_domains, u32 flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	u32 scratch_addr =
>  		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
>  	u32 flags = 0;
> @@ -364,7 +365,7 @@ static int
>  gen8_emit_pipe_control(struct drm_i915_gem_request *req,
>  		       u32 flags, u32 scratch_addr)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 6);
> @@ -427,7 +428,7 @@ static void ring_write_tail(struct intel_engine_cs *engine,
>  	I915_WRITE_TAIL(engine, value);
>  }
>  
> -u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
> +u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
>  	u64 acthd;
> @@ -553,8 +554,8 @@ static bool stop_ring(struct intel_engine_cs *engine)
>  static int init_ring_common(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
> -	struct intel_ringbuffer *ringbuf = engine->buffer;
> -	struct drm_i915_gem_object *obj = ringbuf->obj;
> +	struct intel_ring *ring = engine->buffer;
> +	struct drm_i915_gem_object *obj = ring->obj;
>  	int ret = 0;
>  
>  	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
> @@ -604,7 +605,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
>  	(void)I915_READ_HEAD(engine);
>  
>  	I915_WRITE_CTL(engine,
> -			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
> +			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
>  			| RING_VALID);
>  
>  	/* If the head is still not zero, the ring is dead */
> @@ -623,10 +624,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
>  		goto out;
>  	}
>  
> -	ringbuf->last_retired_head = -1;
> -	ringbuf->head = I915_READ_HEAD(engine);
> -	ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
> -	intel_ring_update_space(ringbuf);
> +	ring->last_retired_head = -1;
> +	ring->head = I915_READ_HEAD(engine);
> +	ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
> +	intel_ring_update_space(ring);
>  
>  	intel_engine_init_hangcheck(engine);
>  
> @@ -680,7 +681,7 @@ err:
>  
>  static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	struct i915_workarounds *w = &req->i915->workarounds;
>  	int ret, i;
>  
> @@ -688,7 +689,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  		return 0;
>  
>  	req->engine->gpu_caches_dirty = true;
> -	ret = intel_ring_flush_all_caches(req);
> +	ret = intel_engine_flush_all_caches(req);
>  	if (ret)
>  		return ret;
>  
> @@ -706,7 +707,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
>  	intel_ring_advance(ring);
>  
>  	req->engine->gpu_caches_dirty = true;
> -	ret = intel_ring_flush_all_caches(req);
> +	ret = intel_engine_flush_all_caches(req);
>  	if (ret)
>  		return ret;
>  
> @@ -1324,7 +1325,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
>  			   unsigned int num_dwords)
>  {
>  #define MBOX_UPDATE_DWORDS 8
> -	struct intel_ringbuffer *signaller = signaller_req->ring;
> +	struct intel_ring *signaller = signaller_req->ring;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
>  	enum intel_engine_id id;
> @@ -1366,7 +1367,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  			   unsigned int num_dwords)
>  {
>  #define MBOX_UPDATE_DWORDS 6
> -	struct intel_ringbuffer *signaller = signaller_req->ring;
> +	struct intel_ring *signaller = signaller_req->ring;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
>  	enum intel_engine_id id;
> @@ -1405,7 +1406,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  		       unsigned int num_dwords)
>  {
> -	struct intel_ringbuffer *signaller = signaller_req->ring;
> +	struct intel_ring *signaller = signaller_req->ring;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *useless;
>  	enum intel_engine_id id;
> @@ -1449,7 +1450,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  static int
>  gen6_add_request(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	if (req->engine->semaphore.signal)
> @@ -1473,7 +1474,7 @@ static int
>  gen8_render_add_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_engine_cs *engine = req->engine;
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	if (engine->semaphore.signal)
> @@ -1518,7 +1519,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
>  	       struct intel_engine_cs *signaller,
>  	       u32 seqno)
>  {
> -	struct intel_ringbuffer *waiter = waiter_req->ring;
> +	struct intel_ring *waiter = waiter_req->ring;
>  	struct drm_i915_private *dev_priv = waiter_req->i915;
>  	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
>  	struct i915_hw_ppgtt *ppgtt;
> @@ -1552,7 +1553,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
>  	       struct intel_engine_cs *signaller,
>  	       u32 seqno)
>  {
> -	struct intel_ringbuffer *waiter = waiter_req->ring;
> +	struct intel_ring *waiter = waiter_req->ring;
>  	u32 dw1 = MI_SEMAPHORE_MBOX |
>  		  MI_SEMAPHORE_COMPARE |
>  		  MI_SEMAPHORE_REGISTER;
> @@ -1686,7 +1687,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>  	       u32     invalidate_domains,
>  	       u32     flush_domains)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -1702,7 +1703,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>  static int
>  i9xx_add_request(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 4);
> @@ -1780,7 +1781,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 u64 offset, u32 length,
>  			 unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -1807,7 +1808,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 u64 offset, u32 len,
>  			 unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	u32 cs_offset = req->engine->scratch.gtt_offset;
>  	int ret;
>  
> @@ -1869,7 +1870,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			 u64 offset, u32 len,
>  			 unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -1977,7 +1978,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
>  	return 0;
>  }
>  
> -void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
> +void intel_unpin_ring(struct intel_ring *ringbuf)
>  {
>  	GEM_BUG_ON(!ringbuf->vma);
>  	GEM_BUG_ON(!ringbuf->vaddr);
> @@ -1992,8 +1993,8 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
>  	ringbuf->vma = NULL;
>  }
>  
> -int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
> -				     struct intel_ringbuffer *ringbuf)
> +int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
> +			   struct intel_ring *ringbuf)
>  {
>  	struct drm_i915_gem_object *obj = ringbuf->obj;
>  	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
> @@ -2045,14 +2046,14 @@ err_unpin:
>  	return ret;
>  }
>  
> -static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
> +static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
>  {
>  	i915_gem_object_put(ringbuf->obj);
>  	ringbuf->obj = NULL;
>  }
>  
>  static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
> -				      struct intel_ringbuffer *ringbuf)
> +				      struct intel_ring *ringbuf)
>  {
>  	struct drm_i915_gem_object *obj;
>  
> @@ -2072,10 +2073,10 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
>  	return 0;
>  }
>  
> -struct intel_ringbuffer *
> -intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
> +struct intel_ring *
> +intel_engine_create_ring(struct intel_engine_cs *engine, int size)
>  {
> -	struct intel_ringbuffer *ring;
> +	struct intel_ring *ring;
>  	int ret;
>  
>  	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
> @@ -2113,7 +2114,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
>  }
>  
>  void
> -intel_ringbuffer_free(struct intel_ringbuffer *ring)
> +intel_ring_free(struct intel_ring *ring)
>  {
>  	intel_destroy_ringbuffer_obj(ring);
>  	list_del(&ring->link);
> @@ -2174,7 +2175,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
>  static int intel_init_engine(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
> -	struct intel_ringbuffer *ringbuf;
> +	struct intel_ring *ringbuf;
>  	int ret;
>  
>  	WARN_ON(engine->buffer);
> @@ -2199,7 +2200,7 @@ static int intel_init_engine(struct intel_engine_cs *engine)
>  	if (ret)
>  		goto error;
>  
> -	ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
> +	ringbuf = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
>  	if (IS_ERR(ringbuf)) {
>  		ret = PTR_ERR(ringbuf);
>  		goto error;
> @@ -2217,7 +2218,7 @@ static int intel_init_engine(struct intel_engine_cs *engine)
>  			goto error;
>  	}
>  
> -	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
> +	ret = intel_pin_and_map_ring(dev_priv, ringbuf);
>  	if (ret) {
>  		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
>  				engine->name, ret);
> @@ -2228,11 +2229,11 @@ static int intel_init_engine(struct intel_engine_cs *engine)
>  	return 0;
>  
>  error:
> -	intel_cleanup_engine(engine);
> +	intel_engine_cleanup(engine);
>  	return ret;
>  }
>  
> -void intel_cleanup_engine(struct intel_engine_cs *engine)
> +void intel_engine_cleanup(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv;
>  
> @@ -2242,11 +2243,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
>  	dev_priv = engine->i915;
>  
>  	if (engine->buffer) {
> -		intel_stop_engine(engine);
> +		intel_engine_stop(engine);
>  		WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
>  
> -		intel_unpin_ringbuffer_obj(engine->buffer);
> -		intel_ringbuffer_free(engine->buffer);
> +		intel_unpin_ring(engine->buffer);
> +		intel_ring_free(engine->buffer);
>  		engine->buffer = NULL;
>  	}
>  
> @@ -2309,7 +2310,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
>  
>  static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	struct intel_engine_cs *engine = req->engine;
>  	struct drm_i915_gem_request *target;
>  
> @@ -2354,7 +2355,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
>  
>  int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int remain_actual = ring->size - ring->tail;
>  	int remain_usable = ring->effective_size - ring->tail;
>  	int bytes = num_dwords * sizeof(u32);
> @@ -2411,7 +2412,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
>  /* Align the ring tail to a cacheline boundary */
>  int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int num_dwords =
>  		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
>  	int ret;
> @@ -2432,7 +2433,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
>  	return 0;
>  }
>  
> -void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
> +void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
>  
> @@ -2518,7 +2519,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
>  static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
>  			       u32 invalidate, u32 flush)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	uint32_t cmd;
>  	int ret;
>  
> @@ -2564,7 +2565,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			      u64 offset, u32 len,
>  			      unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	bool ppgtt = USES_PPGTT(req->i915) &&
>  			!(dispatch_flags & I915_DISPATCH_SECURE);
>  	int ret;
> @@ -2590,7 +2591,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			     u64 offset, u32 len,
>  			     unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -2615,7 +2616,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  			      u64 offset, u32 len,
>  			      unsigned dispatch_flags)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	int ret;
>  
>  	ret = intel_ring_begin(req, 2);
> @@ -2638,7 +2639,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  static int gen6_ring_flush(struct drm_i915_gem_request *req,
>  			   u32 invalidate, u32 flush)
>  {
> -	struct intel_ringbuffer *ring = req->ring;
> +	struct intel_ring *ring = req->ring;
>  	uint32_t cmd;
>  	int ret;
>  
> @@ -2955,7 +2956,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
>  }
>  
>  int
> -intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
> +intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
>  {
>  	struct intel_engine_cs *engine = req->engine;
>  	int ret;
> @@ -2974,7 +2975,7 @@ intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
>  }
>  
>  int
> -intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
> +intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
>  {
>  	struct intel_engine_cs *engine = req->engine;
>  	uint32_t flush_domains;
> @@ -2994,8 +2995,7 @@ intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
>  	return 0;
>  }
>  
> -void
> -intel_stop_engine(struct intel_engine_cs *engine)
> +void intel_engine_stop(struct intel_engine_cs *engine)
>  {
>  	int ret;
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 427fb19a7a2e..91d0aea695b2 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -62,7 +62,7 @@ struct  intel_hw_status_page {
>  	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
>  	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
>  
> -enum intel_ring_hangcheck_action {
> +enum intel_engine_hangcheck_action {
>  	HANGCHECK_IDLE = 0,
>  	HANGCHECK_WAIT,
>  	HANGCHECK_ACTIVE,
> @@ -72,17 +72,17 @@ enum intel_ring_hangcheck_action {
>  
>  #define HANGCHECK_SCORE_RING_HUNG 31
>  
> -struct intel_ring_hangcheck {
> +struct intel_engine_hangcheck {
>  	u64 acthd;
>  	unsigned long user_interrupts;
>  	u32 seqno;
>  	int score;
> -	enum intel_ring_hangcheck_action action;
> +	enum intel_engine_hangcheck_action action;
>  	int deadlock;
>  	u32 instdone[I915_NUM_INSTDONE_REG];
>  };
>  
> -struct intel_ringbuffer {
> +struct intel_ring {
>  	struct drm_i915_gem_object *obj;
>  	void *vaddr;
>  	struct i915_vma *vma;
> @@ -149,7 +149,7 @@ struct intel_engine_cs {
>  	u64 fence_context;
>  	u32		mmio_base;
>  	unsigned int irq_shift;
> -	struct intel_ringbuffer *buffer;
> +	struct intel_ring *buffer;
>  	struct list_head buffers;
>  
>  	/* Rather than have every client wait upon all user interrupts,
> @@ -329,7 +329,7 @@ struct intel_engine_cs {
>  
>  	struct i915_gem_context *last_context;
>  
> -	struct intel_ring_hangcheck hangcheck;
> +	struct intel_engine_hangcheck hangcheck;
>  
>  	struct {
>  		struct drm_i915_gem_object *obj;
> @@ -376,8 +376,8 @@ intel_engine_flag(const struct intel_engine_cs *engine)
>  }
>  
>  static inline u32
> -intel_ring_sync_index(struct intel_engine_cs *engine,
> -		      struct intel_engine_cs *other)
> +intel_engine_sync_index(struct intel_engine_cs *engine,
> +			struct intel_engine_cs *other)
>  {
>  	int idx;
>  
> @@ -439,45 +439,44 @@ intel_write_status_page(struct intel_engine_cs *engine,
>  #define I915_GEM_HWS_SCRATCH_INDEX	0x40
>  #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
>  
> -struct intel_ringbuffer *
> -intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
> -int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
> -				     struct intel_ringbuffer *ringbuf);
> -void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
> -void intel_ringbuffer_free(struct intel_ringbuffer *ring);
> +struct intel_ring *
> +intel_engine_create_ring(struct intel_engine_cs *engine, int size);
> +int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
> +			   struct intel_ring *ring);
> +void intel_unpin_ring(struct intel_ring *ring);
> +void intel_ring_free(struct intel_ring *ring);
>  
> -void intel_stop_engine(struct intel_engine_cs *engine);
> -void intel_cleanup_engine(struct intel_engine_cs *engine);
> +void intel_engine_stop(struct intel_engine_cs *engine);
> +void intel_engine_cleanup(struct intel_engine_cs *engine);
>  
>  int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
>  
>  int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
>  int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
>  
> -static inline void intel_ring_emit(struct intel_ringbuffer *ring, u32 data)
> +static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
>  {
>  	*(uint32_t *)(ring->vaddr + ring->tail) = data;
>  	ring->tail += 4;
>  }
>  
> -static inline void intel_ring_emit_reg(struct intel_ringbuffer *ring,
> -				       i915_reg_t reg)
> +static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
>  {
>  	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
>  }
>  
> -static inline void intel_ring_advance(struct intel_ringbuffer *ring)
> +static inline void intel_ring_advance(struct intel_ring *ring)
>  {
>  	ring->tail &= ring->size - 1;
>  }
>  
>  int __intel_ring_space(int head, int tail, int size);
> -void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
> +void intel_ring_update_space(struct intel_ring *ringbuf);
>  
>  int __must_check intel_engine_idle(struct intel_engine_cs *engine);
> -void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
> -int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
> -int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
> +void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
> +int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
> +int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
>  
>  int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
>  void intel_fini_pipe_control(struct intel_engine_cs *engine);
> @@ -491,7 +490,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
>  int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
>  int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
>  
> -u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
> +u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
>  static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
>  {
>  	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
> @@ -499,7 +498,7 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
>  
>  int init_workarounds_ring(struct intel_engine_cs *engine);
>  
> -static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
> +static inline u32 intel_ring_get_tail(struct intel_ring *ringbuf)
>  {
>  	return ringbuf->tail;
>  }
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 06/18] drm/i915: Rename residual ringbuf parameters
  2016-07-20 13:11 ` [PATCH 06/18] drm/i915: Rename residual ringbuf parameters Chris Wilson
@ 2016-07-21 12:01   ` Joonas Lahtinen
  2016-07-21 12:20     ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 12:01 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> Now that we have a clear ring/engine split and a struct intel_ring, we
> no longer need the stopgap ringbuf names.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Why is this a separate patch? List the renames here too, with those;

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas

> ---
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 66 ++++++++++++++++-----------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  6 +--
>  2 files changed, 36 insertions(+), 36 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 9aaf81ba66c8..625fae42dc0c 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -47,15 +47,15 @@ int __intel_ring_space(int head, int tail, int size)
>  	return space - I915_RING_FREE_SPACE;
>  }
>  
> -void intel_ring_update_space(struct intel_ring *ringbuf)
> +void intel_ring_update_space(struct intel_ring *ring)
>  {
> -	if (ringbuf->last_retired_head != -1) {
> -		ringbuf->head = ringbuf->last_retired_head;
> -		ringbuf->last_retired_head = -1;
> +	if (ring->last_retired_head != -1) {
> +		ring->head = ring->last_retired_head;
> +		ring->last_retired_head = -1;
>  	}
>  
> -	ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
> -					    ringbuf->tail, ringbuf->size);
> +	ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
> +					 ring->tail, ring->size);
>  }
>  
>  static void __intel_engine_submit(struct intel_engine_cs *engine)
> @@ -1978,25 +1978,25 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
>  	return 0;
>  }
>  
> -void intel_unpin_ring(struct intel_ring *ringbuf)
> +void intel_unpin_ring(struct intel_ring *ring)
>  {
> -	GEM_BUG_ON(!ringbuf->vma);
> -	GEM_BUG_ON(!ringbuf->vaddr);
> +	GEM_BUG_ON(!ring->vma);
> +	GEM_BUG_ON(!ring->vaddr);
>  
> -	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
> -		i915_gem_object_unpin_map(ringbuf->obj);
> +	if (HAS_LLC(ring->obj->base.dev) && !ring->obj->stolen)
> +		i915_gem_object_unpin_map(ring->obj);
>  	else
> -		i915_vma_unpin_iomap(ringbuf->vma);
> -	ringbuf->vaddr = NULL;
> +		i915_vma_unpin_iomap(ring->vma);
> +	ring->vaddr = NULL;
>  
> -	i915_gem_object_ggtt_unpin(ringbuf->obj);
> -	ringbuf->vma = NULL;
> +	i915_gem_object_ggtt_unpin(ring->obj);
> +	ring->vma = NULL;
>  }
>  
>  int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
> -			   struct intel_ring *ringbuf)
> +			   struct intel_ring *ring)
>  {
> -	struct drm_i915_gem_object *obj = ringbuf->obj;
> +	struct drm_i915_gem_object *obj = ring->obj;
>  	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
>  	unsigned flags = PIN_OFFSET_BIAS | 4096;
>  	void *addr;
> @@ -2037,8 +2037,8 @@ int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
>  		}
>  	}
>  
> -	ringbuf->vaddr = addr;
> -	ringbuf->vma = i915_gem_obj_to_ggtt(obj);
> +	ring->vaddr = addr;
> +	ring->vma = i915_gem_obj_to_ggtt(obj);
>  	return 0;
>  
>  err_unpin:
> @@ -2046,29 +2046,29 @@ err_unpin:
>  	return ret;
>  }
>  
> -static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
> +static void intel_destroy_ringbuffer_obj(struct intel_ring *ring)
>  {
> -	i915_gem_object_put(ringbuf->obj);
> -	ringbuf->obj = NULL;
> +	i915_gem_object_put(ring->obj);
> +	ring->obj = NULL;
>  }
>  
>  static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
> -				      struct intel_ring *ringbuf)
> +				      struct intel_ring *ring)
>  {
>  	struct drm_i915_gem_object *obj;
>  
>  	obj = NULL;
>  	if (!HAS_LLC(dev))
> -		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
> +		obj = i915_gem_object_create_stolen(dev, ring->size);
>  	if (obj == NULL)
> -		obj = i915_gem_object_create(dev, ringbuf->size);
> +		obj = i915_gem_object_create(dev, ring->size);
>  	if (IS_ERR(obj))
>  		return PTR_ERR(obj);
>  
>  	/* mark ring buffers as read-only from GPU side by default */
>  	obj->gt_ro = 1;
>  
> -	ringbuf->obj = obj;
> +	ring->obj = obj;
>  
>  	return 0;
>  }
> @@ -2175,7 +2175,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
>  static int intel_init_engine(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
> -	struct intel_ring *ringbuf;
> +	struct intel_ring *ring;
>  	int ret;
>  
>  	WARN_ON(engine->buffer);
> @@ -2200,12 +2200,12 @@ static int intel_init_engine(struct intel_engine_cs *engine)
>  	if (ret)
>  		goto error;
>  
> -	ringbuf = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
> -	if (IS_ERR(ringbuf)) {
> -		ret = PTR_ERR(ringbuf);
> +	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
> +	if (IS_ERR(ring)) {
> +		ret = PTR_ERR(ring);
>  		goto error;
>  	}
> -	engine->buffer = ringbuf;
> +	engine->buffer = ring;
>  
>  	if (I915_NEED_GFX_HWS(dev_priv)) {
>  		ret = init_status_page(engine);
> @@ -2218,11 +2218,11 @@ static int intel_init_engine(struct intel_engine_cs *engine)
>  			goto error;
>  	}
>  
> -	ret = intel_pin_and_map_ring(dev_priv, ringbuf);
> +	ret = intel_pin_and_map_ring(dev_priv, ring);
>  	if (ret) {
>  		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
>  				engine->name, ret);
> -		intel_destroy_ringbuffer_obj(ringbuf);
> +		intel_destroy_ringbuffer_obj(ring);
>  		goto error;
>  	}
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 91d0aea695b2..76fc9bd70873 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -471,7 +471,7 @@ static inline void intel_ring_advance(struct intel_ring *ring)
>  }
>  
>  int __intel_ring_space(int head, int tail, int size);
> -void intel_ring_update_space(struct intel_ring *ringbuf);
> +void intel_ring_update_space(struct intel_ring *ring);
>  
>  int __must_check intel_engine_idle(struct intel_engine_cs *engine);
>  void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
> @@ -498,9 +498,9 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
>  
>  int init_workarounds_ring(struct intel_engine_cs *engine);
>  
> -static inline u32 intel_ring_get_tail(struct intel_ring *ringbuf)
> +static inline u32 intel_ring_get_tail(struct intel_ring *ring)
>  {
> -	return ringbuf->tail;
> +	return ring->tail;
>  }
>  
>  /*
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 07/18] drm/i915: Rename intel_pin_and_map_ring()
  2016-07-20 13:11 ` [PATCH 07/18] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
@ 2016-07-21 12:02   ` Joonas Lahtinen
  0 siblings, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 12:02 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> For more consistent oop-naming, we would use intel_ring_verb, so pick
> intel_ring_pin() and intel_ring_unpin().
> 

The done renames clearly here, then;

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/intel_lrc.c        |  4 ++--
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 38 ++++++++++++++++-----------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  5 ++---
>  3 files changed, 23 insertions(+), 24 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 5b9f98f6ed87..33d5916a6b0d 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -973,7 +973,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
>  
>  	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
>  
> -	ret = intel_pin_and_map_ring(dev_priv, ce->ring);
> +	ret = intel_ring_pin(ce->ring);
>  	if (ret)
>  		goto unpin_map;
>  
> @@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
>  	if (--ce->pin_count)
>  		return;
>  
> -	intel_unpin_ring(ce->ring);
> +	intel_ring_unpin(ce->ring);
>  
>  	i915_gem_object_unpin_map(ce->state);
>  	i915_gem_object_ggtt_unpin(ce->state);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 625fae42dc0c..e7050b408ab7 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1978,24 +1978,9 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
>  	return 0;
>  }
>  
> -void intel_unpin_ring(struct intel_ring *ring)
> -{
> -	GEM_BUG_ON(!ring->vma);
> -	GEM_BUG_ON(!ring->vaddr);
> -
> -	if (HAS_LLC(ring->obj->base.dev) && !ring->obj->stolen)
> -		i915_gem_object_unpin_map(ring->obj);
> -	else
> -		i915_vma_unpin_iomap(ring->vma);
> -	ring->vaddr = NULL;
> -
> -	i915_gem_object_ggtt_unpin(ring->obj);
> -	ring->vma = NULL;
> -}
> -
> -int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
> -			   struct intel_ring *ring)
> +int intel_ring_pin(struct intel_ring *ring)
>  {
> +	struct drm_i915_private *dev_priv = ring->engine->i915;
>  	struct drm_i915_gem_object *obj = ring->obj;
>  	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
>  	unsigned flags = PIN_OFFSET_BIAS | 4096;
> @@ -2046,6 +2031,21 @@ err_unpin:
>  	return ret;
>  }
>  
> +void intel_ring_unpin(struct intel_ring *ring)
> +{
> +	GEM_BUG_ON(!ring->vma);
> +	GEM_BUG_ON(!ring->vaddr);
> +
> +	if (HAS_LLC(ring->engine->i915) && !ring->obj->stolen)
> +		i915_gem_object_unpin_map(ring->obj);
> +	else
> +		i915_vma_unpin_iomap(ring->vma);
> +	ring->vaddr = NULL;
> +
> +	i915_gem_object_ggtt_unpin(ring->obj);
> +	ring->vma = NULL;
> +}
> +
>  static void intel_destroy_ringbuffer_obj(struct intel_ring *ring)
>  {
>  	i915_gem_object_put(ring->obj);
> @@ -2218,7 +2218,7 @@ static int intel_init_engine(struct intel_engine_cs *engine)
>  			goto error;
>  	}
>  
> -	ret = intel_pin_and_map_ring(dev_priv, ring);
> +	ret = intel_ring_pin(ring);
>  	if (ret) {
>  		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
>  				engine->name, ret);
> @@ -2246,7 +2246,7 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
>  		intel_engine_stop(engine);
>  		WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
>  
> -		intel_unpin_ring(engine->buffer);
> +		intel_ring_unpin(engine->buffer);
>  		intel_ring_free(engine->buffer);
>  		engine->buffer = NULL;
>  	}
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 76fc9bd70873..836931a6012b 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -441,9 +441,8 @@ intel_write_status_page(struct intel_engine_cs *engine,
>  
>  struct intel_ring *
>  intel_engine_create_ring(struct intel_engine_cs *engine, int size);
> -int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
> -			   struct intel_ring *ring);
> -void intel_unpin_ring(struct intel_ring *ring);
> +int intel_ring_pin(struct intel_ring *ring);
> +void intel_ring_unpin(struct intel_ring *ring);
>  void intel_ring_free(struct intel_ring *ring);
>  
>  void intel_engine_stop(struct intel_engine_cs *engine);
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
  2016-07-21 11:26   ` Joonas Lahtinen
@ 2016-07-21 12:09     ` Chris Wilson
  0 siblings, 0 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-21 12:09 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Thu, Jul 21, 2016 at 02:26:19PM +0300, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> > Both perform the same actions with more or less indirection, so just
> > unify the code.
> > 
> 
> Don't really like removing the engine = req->engine aliases, but seems
> like req->engine is used plenty already. And assuming this was a
> mechanical change with no hidden functional changes. Then,

I've added some engine = req->engine where there was repetition within
the funcion.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 06/18] drm/i915: Rename residual ringbuf parameters
  2016-07-21 12:01   ` Joonas Lahtinen
@ 2016-07-21 12:20     ` Chris Wilson
  0 siblings, 0 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-21 12:20 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Thu, Jul 21, 2016 at 03:01:07PM +0300, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:11 +0100, Chris Wilson wrote:
> > Now that we have a clear ring/engine split and a struct intel_ring, we
> > no longer need the stopgap ringbuf names.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> 
> Why is this a separate patch? List the renames here too, with those;

It renames a bunch of s/ringbuf/ring/ parameters that do not fit in with
the earlier patches.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 09/18] drm/i915: Simplify request_alloc by returning the allocated request
  2016-07-20 13:12 ` [PATCH 09/18] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
@ 2016-07-21 13:07   ` Joonas Lahtinen
  2016-07-21 13:18     ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 13:07 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> If is simpler and leads to more readable code through the callstack if
> the allocation returns the allocated struct through the return value.
> 
> The importance of this is that it no longer looks like we accidentally
> allocate requests as side-effect of calling certain functions.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_drv.h            |  3 +-
>  drivers/gpu/drm/i915/i915_gem.c            | 75 ++++++++----------------------
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c | 12 ++---
>  drivers/gpu/drm/i915/i915_gem_request.c    | 58 ++++++++---------------
>  drivers/gpu/drm/i915/i915_trace.h          | 13 +++---
>  drivers/gpu/drm/i915/intel_display.c       | 36 ++++++--------
>  drivers/gpu/drm/i915/intel_lrc.c           |  2 +-
>  drivers/gpu/drm/i915/intel_overlay.c       | 20 ++++----
>  8 files changed, 79 insertions(+), 140 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index f32ec6db5bfa..3f67431577e3 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3168,8 +3168,7 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
>  
>  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
>  int i915_gem_object_sync(struct drm_i915_gem_object *obj,
> -			 struct intel_engine_cs *to,
> -			 struct drm_i915_gem_request **to_req);
> +			 struct drm_i915_gem_request *to);
>  void i915_vma_move_to_active(struct i915_vma *vma,
>  			     struct drm_i915_gem_request *req);
>  int i915_gem_dumb_create(struct drm_file *file_priv,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 95dbcfd94a80..77d7c0b012f4 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2842,51 +2842,35 @@ out:
>  
>  static int
>  __i915_gem_object_sync(struct drm_i915_gem_object *obj,
> -		       struct intel_engine_cs *to,
> -		       struct drm_i915_gem_request *from_req,
> -		       struct drm_i915_gem_request **to_req)
> +		       struct drm_i915_gem_request *to,
> +		       struct drm_i915_gem_request *from)
>  {
> -	struct intel_engine_cs *from;
>  	int ret;
>  
> -	from = i915_gem_request_get_engine(from_req);
> -	if (to == from)
> +	if (to->engine == from->engine)
>  		return 0;
>  
> -	if (i915_gem_request_completed(from_req))
> +	if (i915_gem_request_completed(from))
>  		return 0;
>  
>  	if (!i915.semaphores) {
> -		struct drm_i915_private *i915 = to_i915(obj->base.dev);
> -		ret = __i915_wait_request(from_req,
> -					  i915->mm.interruptible,
> +		ret = __i915_wait_request(from,
> +					  from->i915->mm.interruptible,
>  					  NULL,
>  					  NO_WAITBOOST);
>  		if (ret)
>  			return ret;
>  
> -		i915_gem_object_retire_request(obj, from_req);
> +		i915_gem_object_retire_request(obj, from);
>  	} else {
> -		int idx = intel_engine_sync_index(from, to);
> -		u32 seqno = i915_gem_request_get_seqno(from_req);
> +		int idx = intel_engine_sync_index(from->engine, to->engine);
> +		u32 seqno = i915_gem_request_get_seqno(from);
>  
> -		WARN_ON(!to_req);
> -
> -		if (seqno <= from->semaphore.sync_seqno[idx])
> +		if (seqno <= from->engine->semaphore.sync_seqno[idx])
>  			return 0;
>  
> -		if (*to_req == NULL) {
> -			struct drm_i915_gem_request *req;
> -
> -			req = i915_gem_request_alloc(to, NULL);
> -			if (IS_ERR(req))
> -				return PTR_ERR(req);
> -
> -			*to_req = req;
> -		}
> -
> -		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
> -		ret = to->semaphore.sync_to(*to_req, from, seqno);
> +		trace_i915_gem_ring_sync_to(to, from);

Will somebody go nuts for changing the tracing just like so? I remember
somebody treating it somewhat of an ABI.

> +		ret = to->engine->semaphore.sync_to(to, from->engine, seqno);
>  		if (ret)
>  			return ret;
>  
> @@ -2894,8 +2878,8 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  		 * might have just caused seqno wrap under
>  		 * the radar.
>  		 */
> -		from->semaphore.sync_seqno[idx] =
> -			i915_gem_request_get_seqno(obj->last_read_req[from->id]);
> +		from->engine->semaphore.sync_seqno[idx] =
> +			i915_gem_request_get_seqno(obj->last_read_req[from->engine->id]);
>  	}
>  
>  	return 0;
> @@ -2905,17 +2889,12 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>   * i915_gem_object_sync - sync an object to a ring.
>   *
>   * @obj: object which may be in use on another ring.
> - * @to: ring we wish to use the object on. May be NULL.
> - * @to_req: request we wish to use the object for. See below.
> - *          This will be allocated and returned if a request is
> - *          required but not passed in.
> + * @to: request we are wishing to use
>   *
>   * This code is meant to abstract object synchronization with the GPU.
> - * Calling with NULL implies synchronizing the object with the CPU
> - * rather than a particular GPU ring. Conceptually we serialise writes
> - * between engines inside the GPU. We only allow one engine to write
> - * into a buffer at any time, but multiple readers. To ensure each has
> - * a coherent view of memory, we must:
> + * Conceptually we serialise writes between engines inside the GPU.
> + * We only allow one engine to write into a buffer at any time, but
> + * multiple readers. To ensure each has a coherent view of memory, we must:
>   *
>   * - If there is an outstanding write request to the object, the new
>   *   request must wait for it to complete (either CPU or in hw, requests
> @@ -2924,22 +2903,11 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>   * - If we are a write request (pending_write_domain is set), the new
>   *   request must wait for outstanding read requests to complete.
>   *
> - * For CPU synchronisation (NULL to) no request is required. For syncing with
> - * rings to_req must be non-NULL. However, a request does not have to be
> - * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
> - * request will be allocated automatically and returned through *to_req. Note
> - * that it is not guaranteed that commands will be emitted (because the system
> - * might already be idle). Hence there is no need to create a request that
> - * might never have any work submitted. Note further that if a request is
> - * returned in *to_req, it is the responsibility of the caller to submit
> - * that request (after potentially adding more work to it).
> - *
>   * Returns 0 if successful, else propagates up the lower layer error.
>   */
>  int
>  i915_gem_object_sync(struct drm_i915_gem_object *obj,
> -		     struct intel_engine_cs *to,
> -		     struct drm_i915_gem_request **to_req)
> +		     struct drm_i915_gem_request *to)
>  {
>  	const bool readonly = obj->base.pending_write_domain == 0;
>  	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
> @@ -2948,9 +2916,6 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  	if (!obj->active)
>  		return 0;
>  
> -	if (to == NULL)
> -		return i915_gem_object_wait_rendering(obj, readonly);
> -
>  	n = 0;
>  	if (readonly) {
>  		if (obj->last_write_req)
> @@ -2961,7 +2926,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  				req[n++] = obj->last_read_req[i];
>  	}
>  	for (i = 0; i < n; i++) {
> -		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
> +		ret = __i915_gem_object_sync(obj, to, req[i]);
>  		if (ret)
>  			return ret;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 2a4841256f8e..5cea95c6f98b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -981,7 +981,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
>  		struct drm_i915_gem_object *obj = vma->obj;
>  
>  		if (obj->active & other_rings) {
> -			ret = i915_gem_object_sync(obj, req->engine, &req);
> +			ret = i915_gem_object_sync(obj, req);
>  			if (ret)
>  				return ret;
>  		}
> @@ -1426,7 +1426,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  {
>  	struct drm_i915_private *dev_priv = to_i915(dev);
>  	struct i915_ggtt *ggtt = &dev_priv->ggtt;
> -	struct drm_i915_gem_request *req = NULL;
>  	struct eb_vmas *eb;
>  	struct drm_i915_gem_object *batch_obj;
>  	struct drm_i915_gem_exec_object2 shadow_exec_entry;
> @@ -1614,13 +1613,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
>  
>  	/* Allocate a request for this batch buffer nice and early. */
> -	req = i915_gem_request_alloc(engine, ctx);
> -	if (IS_ERR(req)) {
> -		ret = PTR_ERR(req);
> +	params->request = i915_gem_request_alloc(engine, ctx);
> +	if (IS_ERR(params->request)) {
> +		ret = PTR_ERR(params->request);
>  		goto err_batch_unpin;
>  	}
>  
> -	ret = i915_gem_request_add_to_client(req, file);
> +	ret = i915_gem_request_add_to_client(params->request, file);
>  	if (ret)
>  		goto err_request;
>  
> @@ -1636,7 +1635,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  	params->dispatch_flags          = dispatch_flags;
>  	params->batch_obj               = batch_obj;
>  	params->ctx                     = ctx;
> -	params->request                 = req;

Not sure why you especially want to always use params->request form?

>  
>  	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
>  err_request:
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 3a566abf5219..2153b4fe4a1f 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -292,10 +292,21 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
>  	return 0;
>  }
>  
> -static inline int
> -__i915_gem_request_alloc(struct intel_engine_cs *engine,
> -			 struct i915_gem_context *ctx,
> -			 struct drm_i915_gem_request **req_out)
> +/**
> + * i915_gem_request_alloc - allocate a request structure
> + *
> + * @engine: engine that we wish to issue the request on.
> + * @ctx: context that the request will be associated with.
> + *       This can be NULL if the request is not directly related to
> + *       any specific user context, in which case this function will
> + *       choose an appropriate context to use.
> + *
> + * Returns a pointer to the allocated request if successful,
> + * or an error code if not.
> + */
> +struct drm_i915_gem_request *
> +i915_gem_request_alloc(struct intel_engine_cs *engine,
> +		       struct i915_gem_context *ctx)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
>  	unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
> @@ -303,18 +314,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
>  	u32 seqno;
>  	int ret;
>  
> -	if (!req_out)
> -		return -EINVAL;
> -
> -	*req_out = NULL;
> -
>  	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
>  	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
>  	 * and restart.
>  	 */
>  	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
>  	if (ret)
> -		return ret;
> +		return ERR_PTR(ret);
>  
>  	/* Move the oldest request to the slab-cache (if not in use!) */
>  	if (!list_empty(&engine->request_list)) {
> @@ -326,7 +332,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
>  
>  	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
>  	if (!req)
> -		return -ENOMEM;
> +		return ERR_PTR(-ENOMEM);
>  
>  	ret = i915_gem_get_seqno(dev_priv, &seqno);
>  	if (ret)
> @@ -359,39 +365,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
>  	if (ret)
>  		goto err_ctx;
>  
> -	*req_out = req;
> -	return 0;
> +	return req;
>  
>  err_ctx:
>  	i915_gem_context_put(ctx);
>  err:
>  	kmem_cache_free(dev_priv->requests, req);
> -	return ret;
> -}
> -
> -/**
> - * i915_gem_request_alloc - allocate a request structure
> - *
> - * @engine: engine that we wish to issue the request on.
> - * @ctx: context that the request will be associated with.
> - *       This can be NULL if the request is not directly related to
> - *       any specific user context, in which case this function will
> - *       choose an appropriate context to use.
> - *
> - * Returns a pointer to the allocated request if successful,
> - * or an error code if not.
> - */
> -struct drm_i915_gem_request *
> -i915_gem_request_alloc(struct intel_engine_cs *engine,
> -		       struct i915_gem_context *ctx)
> -{
> -	struct drm_i915_gem_request *req;
> -	int err;
> -
> -	if (!ctx)
> -		ctx = engine->i915->kernel_context;
> -	err = __i915_gem_request_alloc(engine, ctx, &req);
> -	return err ? ERR_PTR(err) : req;
> +	return ERR_PTR(ret);
>  }
>  
>  static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index 007112d1e049..9e43c0aa6e3b 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -449,10 +449,9 @@ TRACE_EVENT(i915_gem_evict_vm,
>  );
>  
>  TRACE_EVENT(i915_gem_ring_sync_to,
> -	    TP_PROTO(struct drm_i915_gem_request *to_req,
> -		     struct intel_engine_cs *from,
> -		     struct drm_i915_gem_request *req),
> -	    TP_ARGS(to_req, from, req),
> +	    TP_PROTO(struct drm_i915_gem_request *to,
> +		     struct drm_i915_gem_request *from),
> +	    TP_ARGS(to, from),
>  
>  	    TP_STRUCT__entry(
>  			     __field(u32, dev)
> @@ -463,9 +462,9 @@ TRACE_EVENT(i915_gem_ring_sync_to,
>  
>  	    TP_fast_assign(
>  			   __entry->dev = from->i915->drm.primary->index;
> -			   __entry->sync_from = from->id;
> -			   __entry->sync_to = to_req->engine->id;
> -			   __entry->seqno = req->fence.seqno;
> +			   __entry->sync_from = from->engine->id;
> +			   __entry->sync_to = to->engine->id;
> +			   __entry->seqno = from->fence.seqno;
>  			   ),
>  
>  	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index bff172c45ff7..5d4420b67642 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -11583,7 +11583,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>  	struct intel_flip_work *work;
>  	struct intel_engine_cs *engine;
>  	bool mmio_flip;
> -	struct drm_i915_gem_request *request = NULL;
> +	struct drm_i915_gem_request *request;
>  	int ret;
>  
>  	/*
> @@ -11690,22 +11690,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>  
>  	mmio_flip = use_mmio_flip(engine, obj);
>  
> -	/* When using CS flips, we want to emit semaphores between rings.
> -	 * However, when using mmio flips we will create a task to do the
> -	 * synchronisation, so all we want here is to pin the framebuffer
> -	 * into the display plane and skip any waits.
> -	 */
> -	if (!mmio_flip) {
> -		ret = i915_gem_object_sync(obj, engine, &request);
> -		if (!ret && !request) {
> -			request = i915_gem_request_alloc(engine, NULL);
> -			ret = PTR_ERR_OR_ZERO(request);
> -		}
> -
> -		if (ret)
> -			goto cleanup_pending;
> -	}
> -
>  	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
>  	if (ret)
>  		goto cleanup_pending;
> @@ -11723,14 +11707,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>  
>  		schedule_work(&work->mmio_work);
>  	} else {
> -		i915_gem_request_assign(&work->flip_queued_req, request);
> +		request = i915_gem_request_alloc(engine, engine->last_context);
> +		if (IS_ERR(request)) {
> +			ret = PTR_ERR(request);
> +			goto cleanup_unpin;
> +		}
> +
> +		ret = i915_gem_object_sync(obj, request);
> +		if (ret)
> +			goto cleanup_request;
> +
>  		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
>  						   page_flip_flags);
>  		if (ret)
> -			goto cleanup_unpin;
> +			goto cleanup_request;
>  
>  		intel_mark_page_flip_active(intel_crtc, work);
>  
> +		work->flip_queued_req = i915_gem_request_get(request);

If I understood it correctly, result should be equivalent, no
functional changes.

Regards, Joonas

>  		i915_add_request_no_flush(request);
>  	}
>  
> @@ -11745,11 +11739,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
>  
>  	return 0;
>  
> +cleanup_request:
> +	i915_add_request_no_flush(request);
>  cleanup_unpin:
>  	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
>  cleanup_pending:
> -	if (!IS_ERR_OR_NULL(request))
> -		i915_add_request_no_flush(request);
>  	atomic_dec(&intel_crtc->unpin_work_count);
>  	mutex_unlock(&dev->struct_mutex);
>  cleanup:
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 3158a1a38644..6cd0e24ed50c 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -655,7 +655,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
>  		struct drm_i915_gem_object *obj = vma->obj;
>  
>  		if (obj->active & other_rings) {
> -			ret = i915_gem_object_sync(obj, req->engine, &req);
> +			ret = i915_gem_object_sync(obj, req);
>  			if (ret)
>  				return ret;
>  		}
> diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
> index a5071e281088..356a1f6f95aa 100644
> --- a/drivers/gpu/drm/i915/intel_overlay.c
> +++ b/drivers/gpu/drm/i915/intel_overlay.c
> @@ -229,11 +229,18 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
>  	return 0;
>  }
>  
> +static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
> +{
> +	struct drm_i915_private *dev_priv = overlay->i915;
> +	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
> +
> +	return i915_gem_request_alloc(engine, dev_priv->kernel_context);
> +}
> +
>  /* overlay needs to be disable in OCMD reg */
>  static int intel_overlay_on(struct intel_overlay *overlay)
>  {
>  	struct drm_i915_private *dev_priv = overlay->i915;
> -	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
>  	struct drm_i915_gem_request *req;
>  	struct intel_ring *ring;
>  	int ret;
> @@ -241,7 +248,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
>  	WARN_ON(overlay->active);
>  	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
>  
> -	req = i915_gem_request_alloc(engine, NULL);
> +	req = alloc_request(overlay);
>  	if (IS_ERR(req))
>  		return PTR_ERR(req);
>  
> @@ -268,7 +275,6 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
>  				  bool load_polyphase_filter)
>  {
>  	struct drm_i915_private *dev_priv = overlay->i915;
> -	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
>  	struct drm_i915_gem_request *req;
>  	struct intel_ring *ring;
>  	u32 flip_addr = overlay->flip_addr;
> @@ -285,7 +291,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
>  	if (tmp & (1 << 17))
>  		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
>  
> -	req = i915_gem_request_alloc(engine, NULL);
> +	req = alloc_request(overlay);
>  	if (IS_ERR(req))
>  		return PTR_ERR(req);
>  
> @@ -338,7 +344,6 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
>  static int intel_overlay_off(struct intel_overlay *overlay)
>  {
>  	struct drm_i915_private *dev_priv = overlay->i915;
> -	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
>  	struct drm_i915_gem_request *req;
>  	struct intel_ring *ring;
>  	u32 flip_addr = overlay->flip_addr;
> @@ -352,7 +357,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
>  	 * of the hw. Do it in both cases */
>  	flip_addr |= OFC_UPDATE;
>  
> -	req = i915_gem_request_alloc(engine, NULL);
> +	req = alloc_request(overlay);
>  	if (IS_ERR(req))
>  		return PTR_ERR(req);
>  
> @@ -412,7 +417,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
>  static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
>  {
>  	struct drm_i915_private *dev_priv = overlay->i915;
> -	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
>  	int ret;
>  
>  	lockdep_assert_held(&dev_priv->drm.struct_mutex);
> @@ -428,7 +432,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
>  		struct drm_i915_gem_request *req;
>  		struct intel_ring *ring;
>  
> -		req = i915_gem_request_alloc(engine, NULL);
> +		req = alloc_request(overlay);
>  		if (IS_ERR(req))
>  			return PTR_ERR(req);
>  
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 09/18] drm/i915: Simplify request_alloc by returning the allocated request
  2016-07-21 13:07   ` Joonas Lahtinen
@ 2016-07-21 13:18     ` Chris Wilson
  0 siblings, 0 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-21 13:18 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Thu, Jul 21, 2016 at 04:07:59PM +0300, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> > -		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
> > -		ret = to->semaphore.sync_to(*to_req, from, seqno);
> > +		trace_i915_gem_ring_sync_to(to, from);
> 
> Will somebody go nuts for changing the tracing just like so? I remember
> somebody treating it somewhat of an ABI.

They do, but they shouldn't! The format is discoverable though the
tracing interface

It's a moot point here as the trace output is unchanged, just the trace
point call simplified to match the parent.

> > @@ -1636,7 +1635,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
> >  	params->dispatch_flags          = dispatch_flags;
> >  	params->batch_obj               = batch_obj;
> >  	params->ctx                     = ctx;
> > -	params->request                 = req;
> 
> Not sure why you especially want to always use params->request form?

Stack deduplication. It'll be clearer eventually.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 10/18] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START
  2016-07-20 13:12 ` [PATCH 10/18] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
@ 2016-07-21 13:39   ` Joonas Lahtinen
  2016-07-21 14:14     ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 13:39 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> Both the ->dispatch_execbuffer and ->emit_bb_start callbacks do exactly
> the same thing, add MI_BATCHBUFFER_START to the request's ringbuffer -
> we need only one vfunc.
> 

Some ranting below,

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  6 ++--
>  drivers/gpu/drm/i915/i915_gem_render_state.c | 16 +++++-----
>  drivers/gpu/drm/i915/intel_lrc.c             | 15 ++++++---
>  drivers/gpu/drm/i915/intel_ringbuffer.c      | 48 ++++++++++++++--------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h      | 12 +++----
>  5 files changed, 50 insertions(+), 47 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 5cea95c6f98b..2d9f1f4bc058 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -1326,9 +1326,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
>  	if (exec_len == 0)
>  		exec_len = params->batch_obj->base.size;
>  
> -	ret = params->engine->dispatch_execbuffer(params->request,
> -						  exec_start, exec_len,
> -						  params->dispatch_flags);
> +	ret = params->engine->emit_bb_start(params->request,
> +					    exec_start, exec_len,
> +					    params->dispatch_flags);
>  	if (ret)
>  		return ret;
>  
> diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
> index b2be4676a5cf..2ba759f3ab6f 100644
> --- a/drivers/gpu/drm/i915/i915_gem_render_state.c
> +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
> @@ -234,18 +234,18 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
>  	if (so.rodata == NULL)
>  		return 0;
>  
> -	ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
> -					     so.rodata->batch_items * 4,
> -					     I915_DISPATCH_SECURE);
> +	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
> +					 so.rodata->batch_items * 4,
> +					 I915_DISPATCH_SECURE);
>  	if (ret)
>  		goto out;
>  
>  	if (so.aux_batch_size > 8) {
> -		ret = req->engine->dispatch_execbuffer(req,
> -						     (so.ggtt_offset +
> -						      so.aux_batch_offset),
> -						     so.aux_batch_size,
> -						     I915_DISPATCH_SECURE);
> +		ret = req->engine->emit_bb_start(req,
> +						 (so.ggtt_offset +
> +						  so.aux_batch_offset),
> +						 so.aux_batch_size,
> +						 I915_DISPATCH_SECURE);
>  		if (ret)
>  			goto out;
>  	}

The code above this line is exact reason why I don't like the a->b->c
(especially when there is repetition). But it's not new to this patch
so guess it'll do. Some future work to shorten down a little bit might
not hurt.

> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 6cd0e24ed50c..d17a193e8eaf 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -859,7 +859,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
>  	exec_start = params->batch_obj_vm_offset +
>  		     args->batch_start_offset;
>  
> -	ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
> +	ret = engine->emit_bb_start(params->request,
> +				    exec_start, args->batch_len,
> +				    params->dispatch_flags);
>  	if (ret)
>  		return ret;
>  
> @@ -1535,7 +1537,8 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
>  }
>  
>  static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
> -			      u64 offset, unsigned dispatch_flags)
> +			      u64 offset, u32 len,
> +			      unsigned int dispatch_flags)
>  {
>  	struct intel_ring *ring = req->ring;
>  	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
> @@ -1811,13 +1814,15 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
>  		return 0;
>  
>  	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
> -				       I915_DISPATCH_SECURE);
> +					 so.rodata->batch_items * 4,
> +					 I915_DISPATCH_SECURE);
>  	if (ret)
>  		goto out;
>  
>  	ret = req->engine->emit_bb_start(req,
> -				       (so.ggtt_offset + so.aux_batch_offset),
> -				       I915_DISPATCH_SECURE);
> +					 (so.ggtt_offset + so.aux_batch_offset),
> +					 so.aux_batch_size,
> +					 I915_DISPATCH_SECURE);
>  	if (ret)
>  		goto out;
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 6aa1657bbc9d..4488db485fa4 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1779,9 +1779,9 @@ gen8_irq_disable(struct intel_engine_cs *engine)
>  }
>  
>  static int
> -i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
> -			 u64 offset, u32 length,
> -			 unsigned dispatch_flags)
> +i965_emit_bb_start(struct drm_i915_gem_request *req,
> +		   u64 offset, u32 length,
> +		   unsigned int dispatch_flags)
>  {
>  	struct intel_ring *ring = req->ring;
>  	int ret;
> @@ -1806,9 +1806,9 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  #define I830_TLB_ENTRIES (2)
>  #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
>  static int
> -i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
> -			 u64 offset, u32 len,
> -			 unsigned dispatch_flags)
> +i830_emit_bb_start(struct drm_i915_gem_request *req,
> +		   u64 offset, u32 len,
> +		   unsigned int dispatch_flags)
>  {
>  	struct intel_ring *ring = req->ring;
>  	u32 cs_offset = req->engine->scratch.gtt_offset;
> @@ -1868,9 +1868,9 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  }
>  
>  static int
> -i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
> -			 u64 offset, u32 len,
> -			 unsigned dispatch_flags)
> +i915_emit_bb_start(struct drm_i915_gem_request *req,
> +		   u64 offset, u32 len,
> +		   unsigned int dispatch_flags)
>  {
>  	struct intel_ring *ring = req->ring;
>  	int ret;
> @@ -2563,9 +2563,9 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
>  }
>  
>  static int
> -gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> -			      u64 offset, u32 len,
> -			      unsigned dispatch_flags)
> +gen8_emit_bb_start(struct drm_i915_gem_request *req,
> +		   u64 offset, u32 len,
> +		   unsigned int dispatch_flags)
>  {
>  	struct intel_ring *ring = req->ring;
>  	bool ppgtt = USES_PPGTT(req->i915) &&
> @@ -2589,9 +2589,9 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  }
>  
>  static int
> -hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> -			     u64 offset, u32 len,
> -			     unsigned dispatch_flags)
> +hsw_emit_bb_start(struct drm_i915_gem_request *req,
> +		  u64 offset, u32 len,
> +		  unsigned int dispatch_flags)
>  {
>  	struct intel_ring *ring = req->ring;
>  	int ret;
> @@ -2614,9 +2614,9 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
>  }
>  
>  static int
> -gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
> -			      u64 offset, u32 len,
> -			      unsigned dispatch_flags)
> +gen6_emit_bb_start(struct drm_i915_gem_request *req,
> +		   u64 offset, u32 len,
> +		   unsigned int dispatch_flags)
>  {
>  	struct intel_ring *ring = req->ring;
>  	int ret;
> @@ -2820,15 +2820,15 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
>  		engine->add_request = gen6_add_request;
>  
>  	if (INTEL_GEN(dev_priv) >= 8)
> -		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
> +		engine->emit_bb_start = gen8_emit_bb_start;
>  	else if (INTEL_GEN(dev_priv) >= 6)
> -		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
> +		engine->emit_bb_start = gen6_emit_bb_start;
>  	else if (INTEL_GEN(dev_priv) >= 4)
> -		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
> +		engine->emit_bb_start = i965_emit_bb_start;
>  	else if (IS_I830(dev_priv) || IS_845G(dev_priv))
> -		engine->dispatch_execbuffer = i830_dispatch_execbuffer;
> +		engine->emit_bb_start = i830_emit_bb_start;
>  	else
> -		engine->dispatch_execbuffer = i915_dispatch_execbuffer;
> +		engine->emit_bb_start = i915_emit_bb_start;
>  
>  	intel_ring_init_irq(dev_priv, engine);
>  	intel_ring_init_semaphores(dev_priv, engine);
> @@ -2866,7 +2866,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
>  	}
>  
>  	if (IS_HASWELL(dev_priv))
> -		engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
> +		engine->emit_bb_start = hsw_emit_bb_start;
>  
>  	engine->init_hw = init_render_ring;
>  	engine->cleanup = render_ring_cleanup;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 49500cead7a5..85d6a70554b9 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -214,12 +214,6 @@ struct intel_engine_cs {
>  	 * monotonic, even if not coherent.
>  	 */
>  	void		(*irq_seqno_barrier)(struct intel_engine_cs *ring);
> -	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
> -					       u64 offset, u32 length,
> -					       unsigned dispatch_flags);
> -#define I915_DISPATCH_SECURE 0x1
> -#define I915_DISPATCH_PINNED 0x2
> -#define I915_DISPATCH_RS     0x4
>  	void		(*cleanup)(struct intel_engine_cs *ring);
>  
>  	/* GEN8 signal/wait table - never trust comments!
> @@ -295,7 +289,11 @@ struct intel_engine_cs {
>  				      u32 invalidate_domains,
>  				      u32 flush_domains);
>  	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
> -					 u64 offset, unsigned dispatch_flags);
> +					 u64 offset, u32 length,
> +					 unsigned int dispatch_flags);
> +#define I915_DISPATCH_SECURE 0x1
> +#define I915_DISPATCH_PINNED 0x2
> +#define I915_DISPATCH_RS     0x4

BIT(0) BIT(1) etc. while touching it?

Regards, Joonas

>  
>  	/**
>  	 * List of objects currently involved in rendering from the
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 11/18] drm/i915: Convert engine->write_tail to operate on a request
  2016-07-20 13:12 ` [PATCH 11/18] drm/i915: Convert engine->write_tail to operate on a request Chris Wilson
@ 2016-07-21 13:52   ` Joonas Lahtinen
  0 siblings, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 13:52 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> If we rewrite the I915_WRITE_TAIL specialisation for the legacy
> ringbuffer as submitting the request onto the ringbuffer, we can unify
> the vfunc with both execlists and GuC in the next patch.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_gem_request.c |  7 ++---
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 52 ++++++++++++++++-----------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  3 +-
>  3 files changed, 29 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 2153b4fe4a1f..408f390a4c98 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -469,13 +469,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  	 */
>  	request->postfix = intel_ring_get_tail(ring);
>  
> -	if (i915.enable_execlists) {
> +	if (i915.enable_execlists)
>  		ret = engine->emit_request(request);
> -	} else {
> +	else
>  		ret = engine->add_request(request);
> -
> -		request->tail = intel_ring_get_tail(ring);
> -	}
>  	/* Not allowed to fail! */
>  	WARN(ret, "emit|add_request failed: %d!\n", ret);
>  	/* Sanity check that the reserved size was large enough. */
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 4488db485fa4..43dfa4be1cfd 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -58,14 +58,6 @@ void intel_ring_update_space(struct intel_ring *ring)
>  					 ring->tail, ring->size);
>  }
>  
> -static void __intel_engine_submit(struct intel_engine_cs *engine)
> -{
> -	struct intel_ring *ring = engine->buffer;
> -
> -	ring->tail &= ring->size - 1;
> -	engine->write_tail(engine, ring->tail);
> -}
> -
>  static int
>  gen2_render_ring_flush(struct drm_i915_gem_request *req,
>  		       u32	invalidate_domains,
> @@ -421,13 +413,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
>  	return gen8_emit_pipe_control(req, flags, scratch_addr);
>  }
>  
> -static void ring_write_tail(struct intel_engine_cs *engine,
> -			    u32 value)
> -{
> -	struct drm_i915_private *dev_priv = engine->i915;
> -	I915_WRITE_TAIL(engine, value);
> -}
> -
>  u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_private *dev_priv = engine->i915;
> @@ -541,7 +526,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
>  
>  	I915_WRITE_CTL(engine, 0);
>  	I915_WRITE_HEAD(engine, 0);
> -	engine->write_tail(engine, 0);
> +	I915_WRITE_TAIL(engine, 0);
>  
>  	if (!IS_GEN2(dev_priv)) {
>  		(void)I915_READ_CTL(engine);
> @@ -1467,7 +1452,11 @@ gen6_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
>  	intel_ring_emit(ring, req->fence.seqno);
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
> -	__intel_engine_submit(req->engine);
> +	intel_ring_advance(ring);
> +
> +	req->tail = intel_ring_get_tail(ring);

I'd just do req->tail = ring->tail and drop the intel_ring_get_tail()
function completely at the most convenient spot which might be around
now before you add some more, currently we only have 5 uses.

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas

> +
> +	req->engine->submit_request(req);
>  
>  	return 0;
>  }
> @@ -1497,7 +1486,8 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, 0);
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
>  	intel_ring_emit(ring, MI_NOOP);
> -	__intel_engine_submit(engine);
> +
> +	req->engine->submit_request(req);
>  
>  	return 0;
>  }
> @@ -1716,11 +1706,22 @@ i9xx_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
>  	intel_ring_emit(ring, req->fence.seqno);
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
> -	__intel_engine_submit(req->engine);
> +	intel_ring_advance(ring);
> +
> +	req->tail = intel_ring_get_tail(ring);
> +
> +	req->engine->submit_request(req);
>  
>  	return 0;
>  }
>  
> +static void i9xx_submit_request(struct drm_i915_gem_request *request)
> +{
> +	struct drm_i915_private *dev_priv = request->i915;
> +
> +	I915_WRITE_TAIL(request->engine, request->tail);
> +}
> +
>  static void
>  gen6_irq_enable(struct intel_engine_cs *engine)
>  {
> @@ -2479,10 +2480,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
>  	rcu_read_unlock();
>  }
>  
> -static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
> -				     u32 value)
> +static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
>  {
> -	struct drm_i915_private *dev_priv = engine->i915;
> +	struct drm_i915_private *dev_priv = request->i915;
>  
>  	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
>  
> @@ -2506,8 +2506,8 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
>  		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
>  
>  	/* Now that the ring is fully powered up, update the tail */
> -	I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
> -	POSTING_READ_FW(RING_TAIL(engine->mmio_base));
> +	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
> +	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
>  
>  	/* Let the ring send IDLE messages to the GT again,
>  	 * and so let it sleep to conserve power when idle.
> @@ -2813,7 +2813,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
>  				      struct intel_engine_cs *engine)
>  {
>  	engine->init_hw = init_ring_common;
> -	engine->write_tail = ring_write_tail;
> +	engine->submit_request = i9xx_submit_request;
>  
>  	engine->add_request = i9xx_add_request;
>  	if (INTEL_GEN(dev_priv) >= 6)
> @@ -2897,7 +2897,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
>  	if (INTEL_GEN(dev_priv) >= 6) {
>  		/* gen6 bsd needs a special wa for tail updates */
>  		if (IS_GEN6(dev_priv))
> -			engine->write_tail = gen6_bsd_ring_write_tail;
> +			engine->submit_request = gen6_bsd_submit_request;
>  		engine->emit_flush = gen6_bsd_ring_flush;
>  		if (INTEL_GEN(dev_priv) < 8)
>  			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 85d6a70554b9..1a38c383327e 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -204,8 +204,6 @@ struct intel_engine_cs {
>  
>  	int		(*init_context)(struct drm_i915_gem_request *req);
>  
> -	void		(*write_tail)(struct intel_engine_cs *ring,
> -				      u32 value);
>  	int		(*add_request)(struct drm_i915_gem_request *req);
>  	/* Some chipsets are not quite as coherent as advertised and need
>  	 * an expensive kick to force a true read of the up-to-date seqno.
> @@ -294,6 +292,7 @@ struct intel_engine_cs {
>  #define I915_DISPATCH_SECURE 0x1
>  #define I915_DISPATCH_PINNED 0x2
>  #define I915_DISPATCH_RS     0x4
> +	void		(*submit_request)(struct drm_i915_gem_request *req);
>  
>  	/**
>  	 * List of objects currently involved in rendering from the
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 15/18] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores
  2016-07-20 13:12 ` [PATCH 15/18] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores Chris Wilson
@ 2016-07-21 13:55   ` Joonas Lahtinen
  2016-07-21 14:10     ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 13:55 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> As gen6_emit_request() only differs from i9xx_emit_request() when
> semaphores are enabled, only use the specialised vfunc in that scenario.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 18 ++++++++----------
>  1 file changed, 8 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index a74b42fc8f48..8ae25bcc876e 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1442,22 +1442,20 @@ static int i9xx_emit_request(struct drm_i915_gem_request *req)
>  }
>  
>  /**
> - * gen6_emit_request - Update the semaphore mailbox registers
> + * gen6_sema_emit_request - Update the semaphore mailbox registers
>   *
>   * @request - request to write to the ring
>   *
>   * Update the mailbox registers in the *other* rings with the current seqno.
>   * This acts like a signal in the canonical semaphore.
>   */
> -static int gen6_emit_request(struct drm_i915_gem_request *req)
> +static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
>  {
> -	if (req->engine->semaphore.signal) {
> -		int ret;
> +	int ret;
>  
> -		ret = req->engine->semaphore.signal(req);
> -		if (ret)
> -			return ret;
> -	}
> +	ret = req->engine->semaphore.signal(req);
> +	if (ret)
> +		return ret;
>  
>  	return i9xx_emit_request(req);
>  }
> @@ -2687,6 +2685,8 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
>  	if (!i915.semaphores)
>  		return;
>  
> +	engine->emit_request = gen6_sema_emit_request;
> +
>  	if (INTEL_GEN(dev_priv) >= 8) {
>  		u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
>  
> @@ -2789,8 +2789,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
>  	engine->init_hw = init_ring_common;
>  
>  	engine->emit_request = i9xx_emit_request;
> -	if (INTEL_GEN(dev_priv) >= 6)
> -		engine->emit_request = gen6_emit_request;

Not sure if I would prefer the assignment here still. If overrides
happen from all around the codebase, it'll be harder to come up with
what are the possible values for a vfunc, right?

Regards, Joonas

>  	engine->submit_request = i9xx_submit_request;
>  
>  	if (INTEL_GEN(dev_priv) >= 8)
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 15/18] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores
  2016-07-21 13:55   ` Joonas Lahtinen
@ 2016-07-21 14:10     ` Chris Wilson
  2016-07-22  9:42       ` Joonas Lahtinen
  0 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-21 14:10 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Thu, Jul 21, 2016 at 04:55:00PM +0300, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> > As gen6_emit_request() only differs from i9xx_emit_request() when
> > semaphores are enabled, only use the specialised vfunc in that scenario.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >  drivers/gpu/drm/i915/intel_ringbuffer.c | 18 ++++++++----------
> >  1 file changed, 8 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > index a74b42fc8f48..8ae25bcc876e 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > @@ -1442,22 +1442,20 @@ static int i9xx_emit_request(struct drm_i915_gem_request *req)
> >  }
> >  
> >  /**
> > - * gen6_emit_request - Update the semaphore mailbox registers
> > + * gen6_sema_emit_request - Update the semaphore mailbox registers
> >   *
> >   * @request - request to write to the ring
> >   *
> >   * Update the mailbox registers in the *other* rings with the current seqno.
> >   * This acts like a signal in the canonical semaphore.
> >   */
> > -static int gen6_emit_request(struct drm_i915_gem_request *req)
> > +static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
> >  {
> > -	if (req->engine->semaphore.signal) {
> > -		int ret;
> > +	int ret;
> >  
> > -		ret = req->engine->semaphore.signal(req);
> > -		if (ret)
> > -			return ret;
> > -	}
> > +	ret = req->engine->semaphore.signal(req);
> > +	if (ret)
> > +		return ret;
> >  
> >  	return i9xx_emit_request(req);
> >  }
> > @@ -2687,6 +2685,8 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
> >  	if (!i915.semaphores)
> >  		return;
> >  
> > +	engine->emit_request = gen6_sema_emit_request;
> > +
> >  	if (INTEL_GEN(dev_priv) >= 8) {
> >  		u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
> >  
> > @@ -2789,8 +2789,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
> >  	engine->init_hw = init_ring_common;
> >  
> >  	engine->emit_request = i9xx_emit_request;
> > -	if (INTEL_GEN(dev_priv) >= 6)
> > -		engine->emit_request = gen6_emit_request;
> 
> Not sure if I would prefer the assignment here still. If overrides
> happen from all around the codebase, it'll be harder to come up with
> what are the possible values for a vfunc, right?

It's definitely not a default function any more though. It needs to be
conditional on the semaphore setup. We could move the init_semaphores
earlier, and then do

engine->emit_request = i9xx_emit_request;
if (i915.semaphores)
	engine->emit_request = gen6_sema_emit_request;

here?
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 10/18] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START
  2016-07-21 13:39   ` Joonas Lahtinen
@ 2016-07-21 14:14     ` Chris Wilson
  2016-07-27 15:04       ` Dave Gordon
  0 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-21 14:14 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Thu, Jul 21, 2016 at 04:39:58PM +0300, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> >  	if (so.aux_batch_size > 8) {
> > -		ret = req->engine->dispatch_execbuffer(req,
> > -						     (so.ggtt_offset +
> > -						      so.aux_batch_offset),
> > -						     so.aux_batch_size,
> > -						     I915_DISPATCH_SECURE);
> > +		ret = req->engine->emit_bb_start(req,
> > +						 (so.ggtt_offset +
> > +						  so.aux_batch_offset),
> > +						 so.aux_batch_size,
> > +						 I915_DISPATCH_SECURE);
> >  		if (ret)
> >  			goto out;
> >  	}
> 
> The code above this line is exact reason why I don't like the a->b->c
> (especially when there is repetition). But it's not new to this patch
> so guess it'll do. Some future work to shorten down a little bit might
> not hurt.

I presume you mean req->engine->x here, not so.y. Is it just the depth
and saving 5 columns? Or something else?
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 16/18] drm/i915: Remove duplicate golden render state init from execlists
  2016-07-20 13:12 ` [PATCH 16/18] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
@ 2016-07-21 14:18   ` Joonas Lahtinen
  2016-07-21 16:27     ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-21 14:18 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
>  static const struct intel_renderstate_rodata *
>  render_state_get_rodata(const int gen)
>  {
> @@ -51,6 +60,7 @@ static int render_state_init(struct render_state *so,
>  	int ret;
>  
>  	so->gen = INTEL_GEN(dev_priv);
> +	so->ggtt_offset = 0;

Previousy not done, does it address a bug? It's going to get
overwritten or the render_state has failed to initialize and is
forgotten, no? If it fixes bug, I think the site fondling uninitialized
object should be fixed.

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 05/18] drm/i915: Rename struct intel_ringbuffer to struct intel_ring
  2016-07-21 11:59   ` Joonas Lahtinen
@ 2016-07-21 16:02     ` Chris Wilson
  0 siblings, 0 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-21 16:02 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Thu, Jul 21, 2016 at 02:59:23PM +0300, Joonas Lahtinen wrote:
> > @@ -4567,8 +4567,8 @@ int i915_gem_init(struct drm_device *dev)
> >  
> >  	if (!i915.enable_execlists) {
> >  		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
> > -		dev_priv->gt.cleanup_engine = intel_cleanup_engine;
> > -		dev_priv->gt.stop_engine = intel_stop_engine;
> > +		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
> > +		dev_priv->gt.stop_engine = intel_engine_stop;
> 
> I guess you added renaming gt.*_engine to your TODO already.

Rename? No we're eliminating them.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 16/18] drm/i915: Remove duplicate golden render state init from execlists
  2016-07-21 14:18   ` Joonas Lahtinen
@ 2016-07-21 16:27     ` Chris Wilson
  2016-07-21 16:37       ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-21 16:27 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Thu, Jul 21, 2016 at 05:18:17PM +0300, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> >  static const struct intel_renderstate_rodata *
> >  render_state_get_rodata(const int gen)
> >  {
> > @@ -51,6 +60,7 @@ static int render_state_init(struct render_state *so,
> >  	int ret;
> >  
> >  	so->gen = INTEL_GEN(dev_priv);
> > +	so->ggtt_offset = 0;
> 
> Previousy not done, does it address a bug? It's going to get
> overwritten or the render_state has failed to initialize and is
> forgotten, no? If it fixes bug, I think the site fondling uninitialized
> object should be fixed.

No, nothing is using it indeed. I can't remember why I added it.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 16/18] drm/i915: Remove duplicate golden render state init from execlists
  2016-07-21 16:27     ` Chris Wilson
@ 2016-07-21 16:37       ` Chris Wilson
  2016-07-22  9:53         ` Joonas Lahtinen
  0 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-21 16:37 UTC (permalink / raw)
  To: Joonas Lahtinen, intel-gfx

On Thu, Jul 21, 2016 at 05:27:06PM +0100, Chris Wilson wrote:
> On Thu, Jul 21, 2016 at 05:18:17PM +0300, Joonas Lahtinen wrote:
> > On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> > >  static const struct intel_renderstate_rodata *
> > >  render_state_get_rodata(const int gen)
> > >  {
> > > @@ -51,6 +60,7 @@ static int render_state_init(struct render_state *so,
> > >  	int ret;
> > >  
> > >  	so->gen = INTEL_GEN(dev_priv);
> > > +	so->ggtt_offset = 0;
> > 
> > Previousy not done, does it address a bug? It's going to get
> > overwritten or the render_state has failed to initialize and is
> > forgotten, no? If it fixes bug, I think the site fondling uninitialized
> > object should be fixed.
> 
> No, nothing is using it indeed. I can't remember why I added it.

Oh, because of gcc.

drivers/gpu/drm/i915/i915_gem_render_state.c: In function ‘i915_gem_render_state_init’:
drivers/gpu/drm/i915/i915_gem_render_state.c:246:6: error: ‘so.ggtt_offset’ may be used uninitialized in this function [-Werror=maybe-uninitialized]
  ret = req->engine->emit_bb_start(req, so.ggtt_offset,

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 12/18] drm/i915: Unify request submission
  2016-07-20 13:12 ` [PATCH 12/18] drm/i915: Unify request submission Chris Wilson
@ 2016-07-22  8:03   ` Joonas Lahtinen
  2016-07-22  8:24     ` Chris Wilson
  2016-07-27 17:51     ` Dave Gordon
  0 siblings, 2 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  8:03 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> Move request submission from emit_request into its own common vfunc
> from i915_add_request().
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_gem_request.c    |  7 +++----
>  drivers/gpu/drm/i915/i915_guc_submission.c |  9 ++++++---
>  drivers/gpu/drm/i915/intel_guc.h           |  1 -
>  drivers/gpu/drm/i915/intel_lrc.c           | 10 +++-------
>  drivers/gpu/drm/i915/intel_ringbuffer.c    | 26 ++++++++++----------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h    | 23 +++++++++++------------
>  6 files changed, 33 insertions(+), 43 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 408f390a4c98..3e633b47213c 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -469,12 +469,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  	 */
>  	request->postfix = intel_ring_get_tail(ring);
>  
> -	if (i915.enable_execlists)
> -		ret = engine->emit_request(request);
> -	else
> -		ret = engine->add_request(request);
>  	/* Not allowed to fail! */
> +	ret = engine->emit_request(request);
>  	WARN(ret, "emit|add_request failed: %d!\n", ret);

You should fix the message too; s/|add//

> +
>  	/* Sanity check that the reserved size was large enough. */
>  	ret = intel_ring_get_tail(ring) - request_start;
>  	if (ret < 0)
> @@ -485,6 +483,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  		  reserved_tail, ret);
>  
>  	i915_gem_mark_busy(engine);
> +	engine->submit_request(request);
>  }
>  
>  static unsigned long local_clock_us(unsigned int *cpu)
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index eccd34832fe6..32d0e1890950 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -585,7 +585,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   * The only error here arises if the doorbell hardware isn't functioning
>   * as expected, which really shouln't happen.
>   */
> -int i915_guc_submit(struct drm_i915_gem_request *rq)
> +static void i915_guc_submit(struct drm_i915_gem_request *rq)
>  {
>  	unsigned int engine_id = rq->engine->id;
>  	struct intel_guc *guc = &rq->i915->guc;
> @@ -602,8 +602,6 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
>  
>  	guc->submissions[engine_id] += 1;
>  	guc->last_seqno[engine_id] = rq->fence.seqno;
> -
> -	return b_ret;

Maybe we should have WARN(b_ret, "sumthing")? Although I see the return
value was not handled previously. CC'ing Dave to comment on too.

>  }
>  
>  /*
> @@ -992,6 +990,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
>  {
>  	struct intel_guc *guc = &dev_priv->guc;
>  	struct i915_guc_client *client;
> +	struct intel_engine_cs *engine;
>  
>  	/* client for execbuf submission */
>  	client = guc_client_alloc(dev_priv,
> @@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
>  	host2guc_sample_forcewake(guc, client);
>  	guc_init_doorbell_hw(guc);
>  
> +	/* Take over from manual control of ELSP (execlists) */
> +	for_each_engine(engine, dev_priv)
> +		engine->submit_request = i915_guc_submit;
> +
>  	return 0;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> index 3e3e743740c0..623cf26cd784 100644
> --- a/drivers/gpu/drm/i915/intel_guc.h
> +++ b/drivers/gpu/drm/i915/intel_guc.h
> @@ -160,7 +160,6 @@ extern int intel_guc_resume(struct drm_device *dev);
>  int i915_guc_submission_init(struct drm_i915_private *dev_priv);
>  int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
>  int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
> -int i915_guc_submit(struct drm_i915_gem_request *rq);
>  void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
>  void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
>  
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index d17a193e8eaf..52edbcc9bca0 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -773,12 +773,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>  	 */
>  	request->previous_context = engine->last_context;
>  	engine->last_context = request->ctx;
> -
> -	if (i915.enable_guc_submission)
> -		i915_guc_submit(request);
> -	else
> -		execlists_context_queue(request);
> -

Function name is still advance_and_submit, and now the call to submit
is moved to add_request, I'm confused.

>  	return 0;
>  }
>  
> @@ -1904,8 +1898,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>  {
>  	/* Default vfuncs which can be overriden by each engine. */
>  	engine->init_hw = gen8_init_common_ring;
> -	engine->emit_request = gen8_emit_request;
>  	engine->emit_flush = gen8_emit_flush;
> +	engine->emit_request = gen8_emit_request;
> +	engine->submit_request = execlists_context_queue;

execlists_context_queue name could be changed too, just defined and one
calling site.

> +
>  	engine->irq_enable = gen8_logical_ring_enable_irq;
>  	engine->irq_disable = gen8_logical_ring_disable_irq;
>  	engine->emit_bb_start = gen8_emit_bb_start;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 43dfa4be1cfd..907d933d62aa 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1427,15 +1427,14 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  }
>  
>  /**
> - * gen6_add_request - Update the semaphore mailbox registers
> + * gen6_emit_request - Update the semaphore mailbox registers
>   *
>   * @request - request to write to the ring
>   *
>   * Update the mailbox registers in the *other* rings with the current seqno.
>   * This acts like a signal in the canonical semaphore.
>   */
> -static int
> -gen6_add_request(struct drm_i915_gem_request *req)
> +static int gen6_emit_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_ring *ring = req->ring;
>  	int ret;
> @@ -1456,13 +1455,10 @@ gen6_add_request(struct drm_i915_gem_request *req)
>  
>  	req->tail = intel_ring_get_tail(ring);
>  
> -	req->engine->submit_request(req);
> -
>  	return 0;
>  }
>  
> -static int
> -gen8_render_add_request(struct drm_i915_gem_request *req)
> +static int gen8_render_emit_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_engine_cs *engine = req->engine;
>  	struct intel_ring *ring = req->ring;
> @@ -1486,8 +1482,9 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, 0);
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
>  	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>  
> -	req->engine->submit_request(req);
> +	req->tail = intel_ring_get_tail(ring);

Ditto req->tail = ring->tail;

>  
>  	return 0;
>  }
> @@ -1692,8 +1689,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>  	return 0;
>  }
>  
> -static int
> -i9xx_add_request(struct drm_i915_gem_request *req)
> +static int i9xx_emit_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_ring *ring = req->ring;
>  	int ret;
> @@ -1710,8 +1706,6 @@ i9xx_add_request(struct drm_i915_gem_request *req)
>  
>  	req->tail = intel_ring_get_tail(ring);
>  
> -	req->engine->submit_request(req);
> -
>  	return 0;
>  }
>  
> @@ -2813,11 +2807,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
>  				      struct intel_engine_cs *engine)
>  {
>  	engine->init_hw = init_ring_common;
> -	engine->submit_request = i9xx_submit_request;
>  
> -	engine->add_request = i9xx_add_request;
> +	engine->emit_request = i9xx_emit_request;
>  	if (INTEL_GEN(dev_priv) >= 6)
> -		engine->add_request = gen6_add_request;
> +		engine->emit_request = gen6_emit_request;
> +	engine->submit_request = i9xx_submit_request;
>  
>  	if (INTEL_GEN(dev_priv) >= 8)
>  		engine->emit_bb_start = gen8_emit_bb_start;
> @@ -2846,7 +2840,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
>  
>  	if (INTEL_GEN(dev_priv) >= 8) {
>  		engine->init_context = intel_rcs_ctx_init;
> -		engine->add_request = gen8_render_add_request;
> +		engine->emit_request = gen8_render_emit_request;
>  		engine->emit_flush = gen8_render_ring_flush;
>  		if (i915.semaphores)
>  			engine->semaphore.signal = gen8_rcs_signal;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 1a38c383327e..856b732ddbbd 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -204,7 +204,17 @@ struct intel_engine_cs {
>  
>  	int		(*init_context)(struct drm_i915_gem_request *req);
>  
> -	int		(*add_request)(struct drm_i915_gem_request *req);
> +	int		(*emit_flush)(struct drm_i915_gem_request *request,
> +				      u32 invalidate_domains,
> +				      u32 flush_domains);
> +	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
> +					 u64 offset, u32 length,
> +					 unsigned int dispatch_flags);
> +#define I915_DISPATCH_SECURE 0x1
> +#define I915_DISPATCH_PINNED 0x2
> +#define I915_DISPATCH_RS     0x4

Same here, maybe BIT(0) etc?

Really like how the code looks more consistent now!

Regards, Joonas

> +	int		(*emit_request)(struct drm_i915_gem_request *req);
> +	void		(*submit_request)(struct drm_i915_gem_request *req);
>  	/* Some chipsets are not quite as coherent as advertised and need
>  	 * an expensive kick to force a true read of the up-to-date seqno.
>  	 * However, the up-to-date seqno is not always required and the last
> @@ -282,17 +292,6 @@ struct intel_engine_cs {
>  	unsigned int idle_lite_restore_wa;
>  	bool disable_lite_restore_wa;
>  	u32 ctx_desc_template;
> -	int		(*emit_request)(struct drm_i915_gem_request *request);
> -	int		(*emit_flush)(struct drm_i915_gem_request *request,
> -				      u32 invalidate_domains,
> -				      u32 flush_domains);
> -	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
> -					 u64 offset, u32 length,
> -					 unsigned int dispatch_flags);
> -#define I915_DISPATCH_SECURE 0x1
> -#define I915_DISPATCH_PINNED 0x2
> -#define I915_DISPATCH_RS     0x4
> -	void		(*submit_request)(struct drm_i915_gem_request *req);
>  
>  	/**
>  	 * List of objects currently involved in rendering from the
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 13/18] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal()
  2016-07-20 13:12 ` [PATCH 13/18] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal() Chris Wilson
@ 2016-07-22  8:15   ` Joonas Lahtinen
  2016-07-22  8:30     ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  8:15 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> Rather than pass in the num_dwords that the caller wishes to use after
> the signal command packet, split the breadcrumb emission into two phases
> and have both the signal and breadcrumb individiually acquire space on
> the ring. This makes the interface simpler for the reader, and will
> simplify for patches.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 51 ++++++++++++++-------------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  4 +--
>  2 files changed, 23 insertions(+), 32 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 907d933d62aa..9c66745fc8d7 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1308,10 +1308,8 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
>  	intel_fini_pipe_control(engine);
>  }
>  
> -static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
> -			   unsigned int num_dwords)
> +static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req)
>  {
> -#define MBOX_UPDATE_DWORDS 8
>  	struct intel_ring *signaller = signaller_req->ring;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
> @@ -1319,10 +1317,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
>  	int ret, num_rings;
>  
>  	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
> -	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
> -#undef MBOX_UPDATE_DWORDS
> -
> -	ret = intel_ring_begin(signaller_req, num_dwords);
> +	ret = intel_ring_begin(signaller_req, (num_rings-1) * 8);

Magic number. Just make the defines GEN?_?CS_MBOX_UPDATE_DWORDS? 
>  	if (ret)
>  		return ret;
>  
> @@ -1346,14 +1341,13 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
>  				MI_SEMAPHORE_TARGET(waiter->hw_id));
>  		intel_ring_emit(signaller, 0);
>  	}
> +	intel_ring_advance(signaller);
>  
>  	return 0;
>  }
>  
> -static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
> -			   unsigned int num_dwords)
> +static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req)
>  {
> -#define MBOX_UPDATE_DWORDS 6
>  	struct intel_ring *signaller = signaller_req->ring;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
>  	struct intel_engine_cs *waiter;
> @@ -1361,10 +1355,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  	int ret, num_rings;
>  
>  	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
> -	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
> -#undef MBOX_UPDATE_DWORDS
> -
> -	ret = intel_ring_begin(signaller_req, num_dwords);
> +	ret = intel_ring_begin(signaller_req, (num_rings-1) * 6);

Magic number, see above.

>  	if (ret)
>  		return ret;
>  
> @@ -1386,12 +1377,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
>  				MI_SEMAPHORE_TARGET(waiter->hw_id));
>  		intel_ring_emit(signaller, 0);
>  	}
> +	intel_ring_advance(signaller);
>  
>  	return 0;
>  }
>  
> -static int gen6_signal(struct drm_i915_gem_request *signaller_req,
> -		       unsigned int num_dwords)
> +static int gen6_signal(struct drm_i915_gem_request *signaller_req)
>  {
>  	struct intel_ring *signaller = signaller_req->ring;
>  	struct drm_i915_private *dev_priv = signaller_req->i915;
> @@ -1399,12 +1390,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  	enum intel_engine_id id;
>  	int ret, num_rings;
>  
> -#define MBOX_UPDATE_DWORDS 3
>  	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
> -	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
> -#undef MBOX_UPDATE_DWORDS
> -
> -	ret = intel_ring_begin(signaller_req, num_dwords);
> +	ret = intel_ring_begin(signaller_req, round_up((num_rings-1) * 3, 2));

Magic.

>  	if (ret)
>  		return ret;
>  
> @@ -1422,6 +1409,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  	/* If num_dwords was rounded, make sure the tail pointer is correct */
>  	if (num_rings % 2 == 0)
>  		intel_ring_emit(signaller, MI_NOOP);
> +	intel_ring_advance(signaller);
>  
>  	return 0;
>  }
> @@ -1439,11 +1427,13 @@ static int gen6_emit_request(struct drm_i915_gem_request *req)
>  	struct intel_ring *ring = req->ring;
>  	int ret;
>  
> -	if (req->engine->semaphore.signal)
> -		ret = req->engine->semaphore.signal(req, 4);
> -	else
> -		ret = intel_ring_begin(req, 4);
> +	if (req->engine->semaphore.signal) {
> +		ret = req->engine->semaphore.signal(req);
> +		if (ret)
> +			return ret;
> +	}
>  

An alias "engine" would be cool while touching, it is there for ring,
too.

> +	ret = intel_ring_begin(req, 4);

Magic.

>  	if (ret)
>  		return ret;
>  
> @@ -1464,10 +1454,13 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
>  	struct intel_ring *ring = req->ring;
>  	int ret;
>  
> -	if (engine->semaphore.signal)
> -		ret = engine->semaphore.signal(req, 8);
> -	else
> -		ret = intel_ring_begin(req, 8);
> +	if (engine->semaphore.signal) {
> +		ret = engine->semaphore.signal(req);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	ret = intel_ring_begin(req, 8);

Pre-existing magic, no excuse not to convert.

Regards, Joonas

>  	if (ret)
>  		return ret;
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 856b732ddbbd..08e86204a3d5 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -278,9 +278,7 @@ struct intel_engine_cs {
>  		int	(*sync_to)(struct drm_i915_gem_request *to_req,
>  				   struct intel_engine_cs *from,
>  				   u32 seqno);
> -		int	(*signal)(struct drm_i915_gem_request *signaller_req,
> -				  /* num_dwords needed by caller */
> -				  unsigned int num_dwords);
> +		int	(*signal)(struct drm_i915_gem_request *signaller_req);
>  	} semaphore;
>  
>  	/* Execlists */
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 12/18] drm/i915: Unify request submission
  2016-07-22  8:03   ` Joonas Lahtinen
@ 2016-07-22  8:24     ` Chris Wilson
  2016-07-27 17:51     ` Dave Gordon
  1 sibling, 0 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-22  8:24 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Fri, Jul 22, 2016 at 11:03:19AM +0300, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> > @@ -1904,8 +1898,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
> >  {
> >  	/* Default vfuncs which can be overriden by each engine. */
> >  	engine->init_hw = gen8_init_common_ring;
> > -	engine->emit_request = gen8_emit_request;
> >  	engine->emit_flush = gen8_emit_flush;
> > +	engine->emit_request = gen8_emit_request;
> > +	engine->submit_request = execlists_context_queue;
> 
> execlists_context_queue name could be changed too, just defined and one
> calling site.

Could, but at the moment execlists_context_queue is paired with
execlists_context_unqueue, and we already have
execlists_submit_requests().

Not that we are queueing contexts either.

Perhaps.

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index a9ca31c113c3..a1908b2caf72 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -384,8 +384,8 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
                execlists_update_context_pdps(ppgtt, reg_state);
 }
 
-static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
-                                     struct drm_i915_gem_request *rq1)
+static void execlists_submit_elsp(struct drm_i915_gem_request *rq0,
+                                 struct drm_i915_gem_request *rq1)

This is weak. execlists_elsp_submit_contexts?

 {
        struct drm_i915_private *dev_priv = rq0->i915;
        unsigned int fw_domains = rq0->engine->fw_domains;
@@ -418,7 +418,7 @@ static inline void execlists_context_status_change(
        atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
 }
 
-static void execlists_context_unqueue(struct intel_engine_cs *engine)
+static void execlists_unqueue(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
        struct drm_i915_gem_request *cursor, *tmp;
@@ -486,7 +486,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
                req0->tail &= req0->ring->size - 1;
        }
 
-       execlists_submit_requests(req0, req1);
+       execlists_submit_elsp(req0, req1);
 }
 
 static unsigned int
@@ -597,7 +597,7 @@ static void intel_lrc_irq_handler(unsigned long data)
        if (submit_contexts) {
                if (!engine->disable_lite_restore_wa ||
                    (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
-                       execlists_context_unqueue(engine);
+                       execlists_unqueue(engine);
        }
 
        spin_unlock(&engine->execlist_lock);
@@ -606,7 +606,7 @@ static void intel_lrc_irq_handler(unsigned long data)
                DRM_ERROR("More than two context complete events?\n");
 }
 
-static void execlists_context_queue(struct drm_i915_gem_request *request)
+static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
        struct intel_engine_cs *engine = request->engine;
        struct drm_i915_gem_request *cursor;
@@ -637,7 +637,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
        list_add_tail(&request->execlist_link, &engine->execlist_queue);
        request->ctx_hw_id = request->ctx->hw_id;
        if (num_elements == 0)
-               execlists_context_unqueue(engine);
+               execlists_unqueue(engine);
 
        spin_unlock_bh(&engine->execlist_lock);
 }
@@ -1908,7 +1908,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
        engine->init_hw = gen8_init_common_ring;
        engine->emit_flush = gen8_emit_flush;
        engine->emit_request = gen8_emit_request;
-       engine->submit_request = execlists_context_queue;
+       engine->submit_request = execlists_submit_request;
 
        engine->irq_enable = gen8_logical_ring_enable_irq;
        engine->irq_disable = gen8_logical_ring_disable_irq;



-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* Re: [PATCH 13/18] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal()
  2016-07-22  8:15   ` Joonas Lahtinen
@ 2016-07-22  8:30     ` Chris Wilson
  2016-07-22  9:06       ` Joonas Lahtinen
  0 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-22  8:30 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Fri, Jul 22, 2016 at 11:15:59AM +0300, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> > Rather than pass in the num_dwords that the caller wishes to use after
> > the signal command packet, split the breadcrumb emission into two phases
> > and have both the signal and breadcrumb individiually acquire space on
> > the ring. This makes the interface simpler for the reader, and will
> > simplify for patches.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >  drivers/gpu/drm/i915/intel_ringbuffer.c | 51 ++++++++++++++-------------------
> >  drivers/gpu/drm/i915/intel_ringbuffer.h |  4 +--
> >  2 files changed, 23 insertions(+), 32 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > index 907d933d62aa..9c66745fc8d7 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > @@ -1308,10 +1308,8 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
> >  	intel_fini_pipe_control(engine);
> >  }
> >  
> > -static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
> > -			   unsigned int num_dwords)
> > +static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req)
> >  {
> > -#define MBOX_UPDATE_DWORDS 8
> >  	struct intel_ring *signaller = signaller_req->ring;
> >  	struct drm_i915_private *dev_priv = signaller_req->i915;
> >  	struct intel_engine_cs *waiter;
> > @@ -1319,10 +1317,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
> >  	int ret, num_rings;
> >  
> >  	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
> > -	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
> > -#undef MBOX_UPDATE_DWORDS
> > -
> > -	ret = intel_ring_begin(signaller_req, num_dwords);
> > +	ret = intel_ring_begin(signaller_req, (num_rings-1) * 8);
> 
> Magic number. Just make the defines GEN?_?CS_MBOX_UPDATE_DWORDS? 

No. It is important that these are very clear as the reviewer is
required to check the number of dwords emitted.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 14/18] drm/i915: Reuse legacy breadcrumbs + tail emission
  2016-07-20 13:12 ` [PATCH 14/18] drm/i915: Reuse legacy breadcrumbs + tail emission Chris Wilson
@ 2016-07-22  8:34   ` Joonas Lahtinen
  0 siblings, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  8:34 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> As GEN6+ is now a simple variant on the basic breadcrumbs + tail write,
> reuse the common code.
> 

Code motion could always be separate patch for easier review of what
functions were changed.

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 71 ++++++++++++++-------------------
>  1 file changed, 29 insertions(+), 42 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 9c66745fc8d7..a74b42fc8f48 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1414,25 +1414,18 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req)
>  	return 0;
>  }
>  
> -/**
> - * gen6_emit_request - Update the semaphore mailbox registers
> - *
> - * @request - request to write to the ring
> - *
> - * Update the mailbox registers in the *other* rings with the current seqno.
> - * This acts like a signal in the canonical semaphore.
> - */
> -static int gen6_emit_request(struct drm_i915_gem_request *req)
> +static void i9xx_submit_request(struct drm_i915_gem_request *request)
> +{
> +	struct drm_i915_private *dev_priv = request->i915;
> +
> +	I915_WRITE_TAIL(request->engine, request->tail);
> +}
> +
> +static int i9xx_emit_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_ring *ring = req->ring;
>  	int ret;
>  
> -	if (req->engine->semaphore.signal) {
> -		ret = req->engine->semaphore.signal(req);
> -		if (ret)
> -			return ret;
> -	}
> -
>  	ret = intel_ring_begin(req, 4);
>  	if (ret)
>  		return ret;
> @@ -1448,6 +1441,27 @@ static int gen6_emit_request(struct drm_i915_gem_request *req)
>  	return 0;
>  }
>  
> +/**
> + * gen6_emit_request - Update the semaphore mailbox registers
> + *
> + * @request - request to write to the ring
> + *
> + * Update the mailbox registers in the *other* rings with the current seqno.
> + * This acts like a signal in the canonical semaphore.
> + */
> +static int gen6_emit_request(struct drm_i915_gem_request *req)
> +{
> +	if (req->engine->semaphore.signal) {
> +		int ret;
> +
> +		ret = req->engine->semaphore.signal(req);
> +		if (ret)
> +			return ret;
> +	}
> +
> +	return i9xx_emit_request(req);
> +}
> +
>  static int gen8_render_emit_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_engine_cs *engine = req->engine;
> @@ -1682,33 +1696,6 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>  	return 0;
>  }
>  
> -static int i9xx_emit_request(struct drm_i915_gem_request *req)
> -{
> -	struct intel_ring *ring = req->ring;
> -	int ret;
> -
> -	ret = intel_ring_begin(req, 4);
> -	if (ret)
> -		return ret;
> -
> -	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
> -	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
> -	intel_ring_emit(ring, req->fence.seqno);
> -	intel_ring_emit(ring, MI_USER_INTERRUPT);
> -	intel_ring_advance(ring);
> -
> -	req->tail = intel_ring_get_tail(ring);
> -
> -	return 0;
> -}
> -
> -static void i9xx_submit_request(struct drm_i915_gem_request *request)
> -{
> -	struct drm_i915_private *dev_priv = request->i915;
> -
> -	I915_WRITE_TAIL(request->engine, request->tail);
> -}
> -
>  static void
>  gen6_irq_enable(struct intel_engine_cs *engine)
>  {
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 17/18] drm/i915: Unify legacy/execlists submit_execbuf callbacks
  2016-07-20 13:12 ` [PATCH 17/18] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
@ 2016-07-22  8:45   ` Joonas Lahtinen
  0 siblings, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  8:45 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> Now that emitting requests is identical between legacy and execlists, we
> can use the same function to build up the ring for submitting to either
> engine. (With the exception of i915_switch_contexts(), but in time that
> will also be handled gracefully.)
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

This series craves for some T-b's.

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/i915_drv.h            |  20 -----
>  drivers/gpu/drm/i915/i915_gem.c            |   2 -
>  drivers/gpu/drm/i915/i915_gem_context.c    |   7 +-
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |  24 ++++--
>  drivers/gpu/drm/i915/intel_lrc.c           | 123 -----------------------------
>  drivers/gpu/drm/i915/intel_lrc.h           |   4 -
>  6 files changed, 21 insertions(+), 159 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 3f67431577e3..f188c9a9b746 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1705,18 +1705,6 @@ struct i915_virtual_gpu {
>  	bool active;
>  };
>  
> -struct i915_execbuffer_params {
> -	struct drm_device               *dev;
> -	struct drm_file                 *file;
> -	uint32_t                        dispatch_flags;
> -	uint32_t                        args_batch_start_offset;
> -	uint64_t                        batch_obj_vm_offset;
> -	struct intel_engine_cs *engine;
> -	struct drm_i915_gem_object      *batch_obj;
> -	struct i915_gem_context            *ctx;
> -	struct drm_i915_gem_request     *request;
> -};
> -
>  /* used in computing the new watermarks state */
>  struct intel_wm_config {
>  	unsigned int num_pipes_active;
> @@ -2016,9 +2004,6 @@ struct drm_i915_private {
>  
>  	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
>  	struct {
> -		int (*execbuf_submit)(struct i915_execbuffer_params *params,
> -				      struct drm_i915_gem_execbuffer2 *args,
> -				      struct list_head *vmas);
>  		void (*cleanup_engine)(struct intel_engine_cs *engine);
>  		void (*stop_engine)(struct intel_engine_cs *engine);
>  
> @@ -2990,11 +2975,6 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
>  			      struct drm_file *file_priv);
>  int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
>  			     struct drm_file *file_priv);
> -void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
> -					struct drm_i915_gem_request *req);
> -int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
> -				   struct drm_i915_gem_execbuffer2 *args,
> -				   struct list_head *vmas);
>  int i915_gem_execbuffer(struct drm_device *dev, void *data,
>  			struct drm_file *file_priv);
>  int i915_gem_execbuffer2(struct drm_device *dev, void *data,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 77d7c0b012f4..9fdecef34fa8 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -4531,11 +4531,9 @@ int i915_gem_init(struct drm_device *dev)
>  	mutex_lock(&dev->struct_mutex);
>  
>  	if (!i915.enable_execlists) {
> -		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
>  		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
>  		dev_priv->gt.stop_engine = intel_engine_stop;
>  	} else {
> -		dev_priv->gt.execbuf_submit = intel_execlists_submission;
>  		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
>  		dev_priv->gt.stop_engine = intel_logical_ring_stop;
>  	}
> diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
> index e1eed0f449c6..72b21c7b7547 100644
> --- a/drivers/gpu/drm/i915/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/i915_gem_context.c
> @@ -893,8 +893,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
>  {
>  	struct intel_engine_cs *engine = req->engine;
>  
> -	WARN_ON(i915.enable_execlists);
>  	lockdep_assert_held(&req->i915->drm.struct_mutex);
> +	if (i915.enable_execlists)
> +		return 0;
>  
>  	if (!req->ctx->engine[engine->id].state) {
>  		struct i915_gem_context *to = req->ctx;
> @@ -942,9 +943,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
>  		if (IS_ERR(req))
>  			return PTR_ERR(req);
>  
> -		ret = 0;
> -		if (!i915.enable_execlists)
> -			ret = i915_switch_context(req);
> +		ret = i915_switch_context(req);
>  		i915_add_request_no_flush(req);
>  		if (ret)
>  			return ret;
> diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> index 2d9f1f4bc058..e302477418d8 100644
> --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
> @@ -42,6 +42,18 @@
>  
>  #define BATCH_OFFSET_BIAS (256*1024)
>  
> +struct i915_execbuffer_params {
> +	struct drm_device               *dev;
> +	struct drm_file                 *file;
> +	u32				 dispatch_flags;
> +	u32				 args_batch_start_offset;
> +	u32				 batch_obj_vm_offset;
> +	struct intel_engine_cs          *engine;
> +	struct drm_i915_gem_object      *batch_obj;
> +	struct i915_gem_context         *ctx;
> +	struct drm_i915_gem_request     *request;
> +};
> +
>  struct eb_vmas {
>  	struct list_head vmas;
>  	int and;
> @@ -1117,7 +1129,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
>  	return ctx;
>  }
>  
> -void
> +static void
>  i915_gem_execbuffer_move_to_active(struct list_head *vmas,
>  				   struct drm_i915_gem_request *req)
>  {
> @@ -1244,10 +1256,10 @@ err:
>  		return ERR_PTR(ret);
>  }
>  
> -int
> -i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
> -			       struct drm_i915_gem_execbuffer2 *args,
> -			       struct list_head *vmas)
> +static int
> +execbuf_submit(struct i915_execbuffer_params *params,
> +	       struct drm_i915_gem_execbuffer2 *args,
> +	       struct list_head *vmas)
>  {
>  	struct drm_i915_private *dev_priv = params->request->i915;
>  	u64 exec_start, exec_len;
> @@ -1636,7 +1648,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
>  	params->batch_obj               = batch_obj;
>  	params->ctx                     = ctx;
>  
> -	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
> +	ret = execbuf_submit(params, args, &eb->vmas);
>  err_request:
>  	i915_gem_execbuffer_retire_commands(params);
>  
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index bce37d0d431f..8d1589f0ea7e 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -642,39 +642,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
>  	spin_unlock_bh(&engine->execlist_lock);
>  }
>  
> -static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
> -				 struct list_head *vmas)
> -{
> -	const unsigned other_rings = ~intel_engine_flag(req->engine);
> -	struct i915_vma *vma;
> -	uint32_t flush_domains = 0;
> -	bool flush_chipset = false;
> -	int ret;
> -
> -	list_for_each_entry(vma, vmas, exec_list) {
> -		struct drm_i915_gem_object *obj = vma->obj;
> -
> -		if (obj->active & other_rings) {
> -			ret = i915_gem_object_sync(obj, req);
> -			if (ret)
> -				return ret;
> -		}
> -
> -		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
> -			flush_chipset |= i915_gem_clflush_object(obj, false);
> -
> -		flush_domains |= obj->base.write_domain;
> -	}
> -
> -	if (flush_domains & I915_GEM_DOMAIN_GTT)
> -		wmb();
> -
> -	/* Unconditionally invalidate gpu caches and ensure that we do flush
> -	 * any residual writes from the previous batch.
> -	 */
> -	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
> -}
> -
>  int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
>  {
>  	struct intel_engine_cs *engine = request->engine;
> @@ -776,96 +743,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>  	return 0;
>  }
>  
> -/**
> - * intel_execlists_submission() - submit a batchbuffer for execution, Execlists style
> - * @params: execbuffer call parameters.
> - * @args: execbuffer call arguments.
> - * @vmas: list of vmas.
> - *
> - * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
> - * away the submission details of the execbuffer ioctl call.
> - *
> - * Return: non-zero if the submission fails.
> - */
> -int intel_execlists_submission(struct i915_execbuffer_params *params,
> -			       struct drm_i915_gem_execbuffer2 *args,
> -			       struct list_head *vmas)
> -{
> -	struct drm_device       *dev = params->dev;
> -	struct intel_engine_cs *engine = params->engine;
> -	struct drm_i915_private *dev_priv = to_i915(dev);
> -	struct intel_ring *ring = params->request->ring;
> -	u64 exec_start;
> -	int instp_mode;
> -	u32 instp_mask;
> -	int ret;
> -
> -	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
> -	instp_mask = I915_EXEC_CONSTANTS_MASK;
> -	switch (instp_mode) {
> -	case I915_EXEC_CONSTANTS_REL_GENERAL:
> -	case I915_EXEC_CONSTANTS_ABSOLUTE:
> -	case I915_EXEC_CONSTANTS_REL_SURFACE:
> -		if (instp_mode != 0 && engine->id != RCS) {
> -			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
> -			return -EINVAL;
> -		}
> -
> -		if (instp_mode != dev_priv->relative_constants_mode) {
> -			if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
> -				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
> -				return -EINVAL;
> -			}
> -
> -			/* The HW changed the meaning on this bit on gen6 */
> -			instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
> -		}
> -		break;
> -	default:
> -		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
> -		return -EINVAL;
> -	}
> -
> -	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
> -		DRM_DEBUG("sol reset is gen7 only\n");
> -		return -EINVAL;
> -	}
> -
> -	ret = execlists_move_to_gpu(params->request, vmas);
> -	if (ret)
> -		return ret;
> -
> -	if (engine->id == RCS &&
> -	    instp_mode != dev_priv->relative_constants_mode) {
> -		ret = intel_ring_begin(params->request, 4);
> -		if (ret)
> -			return ret;
> -
> -		intel_ring_emit(ring, MI_NOOP);
> -		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
> -		intel_ring_emit_reg(ring, INSTPM);
> -		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
> -		intel_ring_advance(ring);
> -
> -		dev_priv->relative_constants_mode = instp_mode;
> -	}
> -
> -	exec_start = params->batch_obj_vm_offset +
> -		     args->batch_start_offset;
> -
> -	ret = engine->emit_bb_start(params->request,
> -				    exec_start, args->batch_len,
> -				    params->dispatch_flags);
> -	if (ret)
> -		return ret;
> -
> -	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
> -
> -	i915_gem_execbuffer_move_to_active(vmas, params->request);
> -
> -	return 0;
> -}
> -
>  void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
>  {
>  	struct drm_i915_gem_request *req, *tmp;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
> index 212ee7c43438..0f9c9925985c 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.h
> +++ b/drivers/gpu/drm/i915/intel_lrc.h
> @@ -95,10 +95,6 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
>  /* Execlists */
>  int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
>  				    int enable_execlists);
> -struct i915_execbuffer_params;
> -int intel_execlists_submission(struct i915_execbuffer_params *params,
> -			       struct drm_i915_gem_execbuffer2 *args,
> -			       struct list_head *vmas);
>  
>  void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
>  
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 18/18] drm/i915: Simplify calling engine->sync_to
  2016-07-20 13:12 ` [PATCH 18/18] drm/i915: Simplify calling engine->sync_to Chris Wilson
@ 2016-07-22  8:59   ` Joonas Lahtinen
  2016-07-22  9:14     ` [PATCH] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  8:59 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> Since requests can no longer be generated as a side-effect of
> intel_ring_begin(), we know that the seqno will be unchanged during
> ring-emission. This predicatablity then means we do not have to check
> for the seqno wrapping around whilst emitting the semaphore for
> engine->sync_to().
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_drv.h         |  2 +-
>  drivers/gpu/drm/i915/i915_gem.c         | 13 ++-----
>  drivers/gpu/drm/i915/i915_gem_request.c |  9 +----
>  drivers/gpu/drm/i915/intel_ringbuffer.c | 64 ++++++++++++---------------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h |  5 ++-
>  5 files changed, 30 insertions(+), 63 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index f188c9a9b746..c374b8687d87 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1757,7 +1757,7 @@ struct drm_i915_private {
>  	struct i915_gem_context *kernel_context;
>  	struct intel_engine_cs engine[I915_NUM_ENGINES];
>  	struct drm_i915_gem_object *semaphore_obj;
> -	uint32_t last_seqno, next_seqno;
> +	u32 next_seqno;
>  
>  	struct drm_dma_handle *status_page_dmah;
>  	struct resource mch_res;
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 9fdecef34fa8..0b7a0e6f9dd1 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2864,22 +2864,15 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
>  		i915_gem_object_retire_request(obj, from);
>  	} else {
>  		int idx = intel_engine_sync_index(from->engine, to->engine);
> -		u32 seqno = i915_gem_request_get_seqno(from);
> -
> -		if (seqno <= from->engine->semaphore.sync_seqno[idx])
> +		if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
>  			return 0;
>  
>  		trace_i915_gem_ring_sync_to(to, from);
> -		ret = to->engine->semaphore.sync_to(to, from->engine, seqno);
> +		ret = to->engine->semaphore.sync_to(to, from);
>  		if (ret)
>  			return ret;
>  
> -		/* We use last_read_req because sync_to()
> -		 * might have just caused seqno wrap under
> -		 * the radar.
> -		 */
> -		from->engine->semaphore.sync_seqno[idx] =
> -			i915_gem_request_get_seqno(obj->last_read_req[from->engine->id]);
> +		from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
>  	}
>  
>  	return 0;
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 3e633b47213c..dfdb86c8a433 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -264,14 +264,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
>  	if (ret)
>  		return ret;
>  
> -	/* Carefully set the last_seqno value so that wrap
> -	 * detection still works
> -	 */
>  	dev_priv->next_seqno = seqno;
> -	dev_priv->last_seqno = seqno - 1;
> -	if (dev_priv->last_seqno == 0)
> -		dev_priv->last_seqno--;
> -
>  	return 0;
>  }
>  
> @@ -288,7 +281,7 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
>  		dev_priv->next_seqno = 1;
>  	}
>  
> -	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
> +	*seqno = dev_priv->next_seqno++;
>  	return 0;
>  }
>  
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 8ae25bcc876e..bfeb16025327 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1494,12 +1494,6 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
>  	return 0;
>  }
>  
> -static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
> -					      u32 seqno)
> -{
> -	return dev_priv->last_seqno < seqno;
> -}
> -
>  /**
>   * intel_ring_sync - sync the waiter to the signaller on seqno
>   *
> @@ -1509,24 +1503,23 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
>   */
>  
>  static int
> -gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
> -	       struct intel_engine_cs *signaller,
> -	       u32 seqno)
> +gen8_ring_sync(struct drm_i915_gem_request *wait,

Why not to, from here too or in the header then, when they're revamped
in the series? To bring some clarity. Maybe wait and signal in header
too rather.

> +	       struct drm_i915_gem_request *signal)
>  {
> -	struct intel_ring *waiter = waiter_req->ring;
> -	struct drm_i915_private *dev_priv = waiter_req->i915;
> -	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
> +	struct intel_ring *waiter = wait->ring;

Just call this "ring" to reduce confusion of renaming the other
variable, then the ring_begin(wait) ring_emit() convention makes more
sense.

> +	struct drm_i915_private *dev_priv = wait->i915;
> +	u64 offset = GEN8_WAIT_OFFSET(wait->engine, signal->engine->id);
>  	struct i915_hw_ppgtt *ppgtt;
>  	int ret;
>  
> -	ret = intel_ring_begin(waiter_req, 4);
> +	ret = intel_ring_begin(wait, 4);
>  	if (ret)
>  		return ret;
>  
>  	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
>  				MI_SEMAPHORE_GLOBAL_GTT |
>  				MI_SEMAPHORE_SAD_GTE_SDD);
> -	intel_ring_emit(waiter, seqno);
> +	intel_ring_emit(waiter, signal->fence.seqno);
>  	intel_ring_emit(waiter, lower_32_bits(offset));
>  	intel_ring_emit(waiter, upper_32_bits(offset));
>  	intel_ring_advance(waiter);
> @@ -1536,48 +1529,37 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
>  	 * We do this on the i915_switch_context() following the wait and
>  	 * before the dispatch.
>  	 */
> -	ppgtt = waiter_req->ctx->ppgtt;
> -	if (ppgtt && waiter_req->engine->id != RCS)
> -		ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
> +	ppgtt = wait->ctx->ppgtt;

This could be moved to initialization line, like elsewhere.

> +	if (ppgtt && wait->engine->id != RCS)
> +		ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine);
>  	return 0;
>  }
>  
>  static int
> -gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
> -	       struct intel_engine_cs *signaller,
> -	       u32 seqno)
> +gen6_ring_sync(struct drm_i915_gem_request *wait,
> +	       struct drm_i915_gem_request *signal)
>  {
> -	struct intel_ring *waiter = waiter_req->ring;
> +	struct intel_ring *waiter = wait->ring;
>  	u32 dw1 = MI_SEMAPHORE_MBOX |
>  		  MI_SEMAPHORE_COMPARE |
>  		  MI_SEMAPHORE_REGISTER;
> -	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
> +	u32 wait_mbox = signal->engine->semaphore.mbox.wait[wait->engine->id];
>  	int ret;
>  
> -	/* Throughout all of the GEM code, seqno passed implies our current
> -	 * seqno is >= the last seqno executed. However for hardware the
> -	 * comparison is strictly greater than.
> -	 */
> -	seqno -= 1;
> -

Finally we get rid of this \o/

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

>  	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
>  
> -	ret = intel_ring_begin(waiter_req, 4);
> +	ret = intel_ring_begin(wait, 4);
>  	if (ret)
>  		return ret;
>  
> -	/* If seqno wrap happened, omit the wait with no-ops */
> -	if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
> -		intel_ring_emit(waiter, dw1 | wait_mbox);
> -		intel_ring_emit(waiter, seqno);
> -		intel_ring_emit(waiter, 0);
> -		intel_ring_emit(waiter, MI_NOOP);
> -	} else {
> -		intel_ring_emit(waiter, MI_NOOP);
> -		intel_ring_emit(waiter, MI_NOOP);
> -		intel_ring_emit(waiter, MI_NOOP);
> -		intel_ring_emit(waiter, MI_NOOP);
> -	}
> +	intel_ring_emit(waiter, dw1 | wait_mbox);
> +	/* Throughout all of the GEM code, seqno passed implies our current
> +	 * seqno is >= the last seqno executed. However for hardware the
> +	 * comparison is strictly greater than.
> +	 */
> +	intel_ring_emit(waiter, signal->fence.seqno - 1);
> +	intel_ring_emit(waiter, 0);
> +	intel_ring_emit(waiter, MI_NOOP);
>  	intel_ring_advance(waiter);
>  
>  	return 0;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 08e86204a3d5..65cb6adf26ca 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -275,9 +275,8 @@ struct intel_engine_cs {
>  		};
>  
>  		/* AKA wait() */
> -		int	(*sync_to)(struct drm_i915_gem_request *to_req,
> -				   struct intel_engine_cs *from,
> -				   u32 seqno);
> +		int	(*sync_to)(struct drm_i915_gem_request *to,
> +				   struct drm_i915_gem_request *from);
>  		int	(*signal)(struct drm_i915_gem_request *signaller_req);
>  	} semaphore;
>  
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 13/18] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal()
  2016-07-22  8:30     ` Chris Wilson
@ 2016-07-22  9:06       ` Joonas Lahtinen
  0 siblings, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  9:06 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On pe, 2016-07-22 at 09:30 +0100, Chris Wilson wrote:
> On Fri, Jul 22, 2016 at 11:15:59AM +0300, Joonas Lahtinen wrote:
> > 
> > On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> > > 
> > > Rather than pass in the num_dwords that the caller wishes to use after
> > > the signal command packet, split the breadcrumb emission into two phases
> > > and have both the signal and breadcrumb individiually acquire space on
> > > the ring. This makes the interface simpler for the reader, and will
> > > simplify for patches.
> > > 
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > ---
> > >  drivers/gpu/drm/i915/intel_ringbuffer.c | 51 ++++++++++++++-------------------
> > >  drivers/gpu/drm/i915/intel_ringbuffer.h |  4 +--
> > >  2 files changed, 23 insertions(+), 32 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > index 907d933d62aa..9c66745fc8d7 100644
> > > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > @@ -1308,10 +1308,8 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
> > >  	intel_fini_pipe_control(engine);
> > >  }
> > >  
> > > -static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
> > > -			   unsigned int num_dwords)
> > > +static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req)
> > >  {
> > > -#define MBOX_UPDATE_DWORDS 8
> > >  	struct intel_ring *signaller = signaller_req->ring;
> > >  	struct drm_i915_private *dev_priv = signaller_req->i915;
> > >  	struct intel_engine_cs *waiter;
> > > @@ -1319,10 +1317,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
> > >  	int ret, num_rings;
> > >  
> > >  	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
> > > -	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
> > > -#undef MBOX_UPDATE_DWORDS
> > > -
> > > -	ret = intel_ring_begin(signaller_req, num_dwords);
> > > +	ret = intel_ring_begin(signaller_req, (num_rings-1) * 8);
> > Magic number. Just make the defines GEN?_?CS_MBOX_UPDATE_DWORDS? 
> No. It is important that these are very clear as the reviewer is
> required to check the number of dwords emitted.

Actually noticed later in the series that now the counts are all in the
same function as emitting, comments can be dismissed wrt those cases.

Regards, Joonas

> -Chris
> 
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* [PATCH] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals
  2016-07-22  8:59   ` Joonas Lahtinen
@ 2016-07-22  9:14     ` Chris Wilson
  2016-07-22  9:28       ` Joonas Lahtinen
  0 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-22  9:14 UTC (permalink / raw)
  To: intel-gfx

In order to be more consistent with the rest of the request construction
and ring emission, use the common names for the ring and request.

Rather than using signaler_req, waiter_req, and intel_ring *wait, we use
plain req and ring.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 132 ++++++++++++++++----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |   6 +-
 2 files changed, 68 insertions(+), 70 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 4ed06f5244aa..e595d8399a65 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1322,108 +1322,105 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
 	intel_fini_pipe_control(engine);
 }
 
-static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req)
+static int gen8_rcs_signal(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *signaller = signaller_req->ring;
-	struct drm_i915_private *dev_priv = signaller_req->i915;
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	ret = intel_ring_begin(signaller_req, (num_rings-1) * 8);
+	ret = intel_ring_begin(req, (num_rings-1) * 8);
 	if (ret)
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset =
-			signaller_req->engine->semaphore.signal_ggtt[id];
+		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring,
 				PIPE_CONTROL_GLOBAL_GTT_IVB |
 				PIPE_CONTROL_QW_WRITE |
 				PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
-		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-		intel_ring_emit(signaller, signaller_req->fence.seqno);
-		intel_ring_emit(signaller, 0);
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring, lower_32_bits(gtt_offset));
+		intel_ring_emit(ring, upper_32_bits(gtt_offset));
+		intel_ring_emit(ring, req->fence.seqno);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring,
 				MI_SEMAPHORE_SIGNAL |
 				MI_SEMAPHORE_TARGET(waiter->hw_id));
-		intel_ring_emit(signaller, 0);
+		intel_ring_emit(ring, 0);
 	}
-	intel_ring_advance(signaller);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req)
+static int gen8_xcs_signal(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *signaller = signaller_req->ring;
-	struct drm_i915_private *dev_priv = signaller_req->i915;
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	ret = intel_ring_begin(signaller_req, (num_rings-1) * 6);
+	ret = intel_ring_begin(req, (num_rings-1) * 6);
 	if (ret)
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset =
-			signaller_req->engine->semaphore.signal_ggtt[id];
+		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring,
 				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring,
 				lower_32_bits(gtt_offset) |
 				MI_FLUSH_DW_USE_GTT);
-		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-		intel_ring_emit(signaller, signaller_req->fence.seqno);
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring, upper_32_bits(gtt_offset));
+		intel_ring_emit(ring, req->fence.seqno);
+		intel_ring_emit(ring,
 				MI_SEMAPHORE_SIGNAL |
 				MI_SEMAPHORE_TARGET(waiter->hw_id));
-		intel_ring_emit(signaller, 0);
+		intel_ring_emit(ring, 0);
 	}
-	intel_ring_advance(signaller);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-static int gen6_signal(struct drm_i915_gem_request *signaller_req)
+static int gen6_signal(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *signaller = signaller_req->ring;
-	struct drm_i915_private *dev_priv = signaller_req->i915;
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *useless;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	ret = intel_ring_begin(signaller_req, round_up((num_rings-1) * 3, 2));
+	ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
 	if (ret)
 		return ret;
 
 	for_each_engine_id(useless, dev_priv, id) {
-		i915_reg_t mbox_reg =
-			signaller_req->engine->semaphore.mbox.signal[id];
+		i915_reg_t mbox_reg = req->engine->semaphore.mbox.signal[id];
 
 		if (i915_mmio_reg_valid(mbox_reg)) {
-			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
-			intel_ring_emit_reg(signaller, mbox_reg);
-			intel_ring_emit(signaller, signaller_req->fence.seqno);
+			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+			intel_ring_emit_reg(ring, mbox_reg);
+			intel_ring_emit(ring, req->fence.seqno);
 		}
 	}
 
 	/* If num_dwords was rounded, make sure the tail pointer is correct */
 	if (num_rings % 2 == 0)
-		intel_ring_emit(signaller, MI_NOOP);
-	intel_ring_advance(signaller);
+		intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1518,64 +1515,65 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
  */
 
 static int
-gen8_ring_sync(struct drm_i915_gem_request *wait,
-	       struct drm_i915_gem_request *signal)
+gen8_ring_sync_to(struct drm_i915_gem_request *req,
+		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = wait->ring;
-	struct drm_i915_private *dev_priv = wait->i915;
-	u64 offset = GEN8_WAIT_OFFSET(wait->engine, signal->engine->id);
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
+	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
-	ret = intel_ring_begin(wait, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
-				MI_SEMAPHORE_GLOBAL_GTT |
-				MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(waiter, signal->fence.seqno);
-	intel_ring_emit(waiter, lower_32_bits(offset));
-	intel_ring_emit(waiter, upper_32_bits(offset));
-	intel_ring_advance(waiter);
+	intel_ring_emit(ring,
+			MI_SEMAPHORE_WAIT |
+			MI_SEMAPHORE_GLOBAL_GTT |
+			MI_SEMAPHORE_SAD_GTE_SDD);
+	intel_ring_emit(ring, signal->fence.seqno);
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_advance(ring);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
 	 * We do this on the i915_switch_context() following the wait and
 	 * before the dispatch.
 	 */
-	ppgtt = wait->ctx->ppgtt;
-	if (ppgtt && wait->engine->id != RCS)
-		ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine);
+	ppgtt = req->ctx->ppgtt;
+	if (ppgtt && req->engine->id != RCS)
+		ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
 	return 0;
 }
 
 static int
-gen6_ring_sync(struct drm_i915_gem_request *wait,
-	       struct drm_i915_gem_request *signal)
+gen6_ring_sync_to(struct drm_i915_gem_request *req,
+		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = wait->ring;
+	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signal->engine->semaphore.mbox.wait[wait->engine->id];
+	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->id];
 	int ret;
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(wait, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(waiter, dw1 | wait_mbox);
+	intel_ring_emit(ring, dw1 | wait_mbox);
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(waiter, signal->fence.seqno - 1);
-	intel_ring_emit(waiter, 0);
-	intel_ring_emit(waiter, MI_NOOP);
-	intel_ring_advance(waiter);
+	intel_ring_emit(ring, signal->fence.seqno - 1);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2686,7 +2684,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) >= 8) {
 		u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
 
-		engine->semaphore.sync_to = gen8_ring_sync;
+		engine->semaphore.sync_to = gen8_ring_sync_to;
 		engine->semaphore.signal = gen8_xcs_signal;
 
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -2700,7 +2698,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
 			engine->semaphore.signal_ggtt[i] = ring_offset;
 		}
 	} else if (INTEL_GEN(dev_priv) >= 6) {
-		engine->semaphore.sync_to = gen6_ring_sync;
+		engine->semaphore.sync_to = gen6_ring_sync_to;
 		engine->semaphore.signal = gen6_signal;
 
 		/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 7a4cf4a14c1a..e59162d89026 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -275,9 +275,9 @@ struct intel_engine_cs {
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct drm_i915_gem_request *to,
-				   struct drm_i915_gem_request *from);
-		int	(*signal)(struct drm_i915_gem_request *signaller_req);
+		int	(*sync_to)(struct drm_i915_gem_request *req,
+				   struct drm_i915_gem_request *signal);
+		int	(*signal)(struct drm_i915_gem_request *req);
 	} semaphore;
 
 	/* Execlists */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* Re: [PATCH] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals
  2016-07-22  9:14     ` [PATCH] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals Chris Wilson
@ 2016-07-22  9:28       ` Joonas Lahtinen
  2016-07-22  9:31         ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  9:28 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On pe, 2016-07-22 at 10:14 +0100, Chris Wilson wrote:
>  static int
> -gen8_ring_sync(struct drm_i915_gem_request *wait,
> -	       struct drm_i915_gem_request *signal)
> +gen8_ring_sync_to(struct drm_i915_gem_request *req,
> +		  struct drm_i915_gem_request *signal)
>  {
> -	struct intel_ring *waiter = wait->ring;
> -	struct drm_i915_private *dev_priv = wait->i915;
> -	u64 offset = GEN8_WAIT_OFFSET(wait->engine, signal->engine->id);
> +	struct intel_ring *ring = req->ring;
> +	struct drm_i915_private *dev_priv = req->i915;
> +	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
>  	struct i915_hw_ppgtt *ppgtt;
>  	int ret;
>  
> -	ret = intel_ring_begin(wait, 4);
> +	ret = intel_ring_begin(req, 4);
>  	if (ret)
>  		return ret;
>  
> -	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
> -				MI_SEMAPHORE_GLOBAL_GTT |
> -				MI_SEMAPHORE_SAD_GTE_SDD);
> -	intel_ring_emit(waiter, signal->fence.seqno);
> -	intel_ring_emit(waiter, lower_32_bits(offset));
> -	intel_ring_emit(waiter, upper_32_bits(offset));
> -	intel_ring_advance(waiter);
> +	intel_ring_emit(ring,
> +			MI_SEMAPHORE_WAIT |
> +			MI_SEMAPHORE_GLOBAL_GTT |
> +			MI_SEMAPHORE_SAD_GTE_SDD);
> +	intel_ring_emit(ring, signal->fence.seqno);
> +	intel_ring_emit(ring, lower_32_bits(offset));
> +	intel_ring_emit(ring, upper_32_bits(offset));
> +	intel_ring_advance(ring);
>  
>  	/* When the !RCS engines idle waiting upon a semaphore, they lose their
>  	 * pagetables and we must reload them before executing the batch.
>  	 * We do this on the i915_switch_context() following the wait and
>  	 * before the dispatch.
>  	 */
> -	ppgtt = wait->ctx->ppgtt;
> -	if (ppgtt && wait->engine->id != RCS)
> -		ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine);
> +	ppgtt = req->ctx->ppgtt;

This assignment could still be lifted up.

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

> +	if (ppgtt && req->engine->id != RCS)
> +		ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
>  	return 0;
>  }
>  

-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals
  2016-07-22  9:28       ` Joonas Lahtinen
@ 2016-07-22  9:31         ` Chris Wilson
  2016-07-22  9:38           ` Joonas Lahtinen
  0 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-22  9:31 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Fri, Jul 22, 2016 at 12:28:11PM +0300, Joonas Lahtinen wrote:
> On pe, 2016-07-22 at 10:14 +0100, Chris Wilson wrote:
> >  	/* When the !RCS engines idle waiting upon a semaphore, they lose their
> >  	 * pagetables and we must reload them before executing the batch.
> >  	 * We do this on the i915_switch_context() following the wait and
> >  	 * before the dispatch.
> >  	 */
> > -	ppgtt = wait->ctx->ppgtt;
> > -	if (ppgtt && wait->engine->id != RCS)
> > -		ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine);
> > +	ppgtt = req->ctx->ppgtt;
> 
> This assignment could still be lifted up.

I'm not a huge believe in gcc's live range analysis :)
-Chris
> 

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals
  2016-07-22  9:31         ` Chris Wilson
@ 2016-07-22  9:38           ` Joonas Lahtinen
  0 siblings, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  9:38 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On pe, 2016-07-22 at 10:31 +0100, Chris Wilson wrote:
> On Fri, Jul 22, 2016 at 12:28:11PM +0300, Joonas Lahtinen wrote:
> > 
> > On pe, 2016-07-22 at 10:14 +0100, Chris Wilson wrote:
> > > 
> > >  	/* When the !RCS engines idle waiting upon a semaphore, they lose their
> > >  	 * pagetables and we must reload them before executing the batch.
> > >  	 * We do this on the i915_switch_context() following the wait and
> > >  	 * before the dispatch.
> > >  	 */
> > > -	ppgtt = wait->ctx->ppgtt;
> > > -	if (ppgtt && wait->engine->id != RCS)
> > > -		ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine);
> > > +	ppgtt = req->ctx->ppgtt;
> > This assignment could still be lifted up.
> I'm not a huge believe in gcc's live range analysis :)

It's Open Source :P We can optimize the culprit once there is need.

> -Chris
> > 
> > 
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 15/18] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores
  2016-07-21 14:10     ` Chris Wilson
@ 2016-07-22  9:42       ` Joonas Lahtinen
  0 siblings, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  9:42 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On to, 2016-07-21 at 15:10 +0100, Chris Wilson wrote:
> On Thu, Jul 21, 2016 at 04:55:00PM +0300, Joonas Lahtinen wrote:
> > 
> > On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> > > +	engine->emit_request = gen6_sema_emit_request;
> > > +
> > >  	if (INTEL_GEN(dev_priv) >= 8) {
> > >  		u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
> > >  
> > > @@ -2789,8 +2789,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
> > >  	engine->init_hw = init_ring_common;
> > >  
> > >  	engine->emit_request = i9xx_emit_request;
> > > -	if (INTEL_GEN(dev_priv) >= 6)
> > > -		engine->emit_request = gen6_emit_request;
> > Not sure if I would prefer the assignment here still. If overrides
> > happen from all around the codebase, it'll be harder to come up with
> > what are the possible values for a vfunc, right?
> It's definitely not a default function any more though. It needs to be
> conditional on the semaphore setup. We could move the init_semaphores
> earlier, and then do
> 
> engine->emit_request = i9xx_emit_request;
> if (i915.semaphores)
> 	engine->emit_request = gen6_sema_emit_request;
> 
> here?

Yeah, that's pretty much what I was after. 

Regards, Joonas

> -Chris
> 
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 16/18] drm/i915: Remove duplicate golden render state init from execlists
  2016-07-21 16:37       ` Chris Wilson
@ 2016-07-22  9:53         ` Joonas Lahtinen
  2016-07-22 10:16           ` [PATCH] drm/i915: Refactor golden render state emission to unconfuse gcc Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22  9:53 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On to, 2016-07-21 at 17:37 +0100, Chris Wilson wrote:
> On Thu, Jul 21, 2016 at 05:27:06PM +0100, Chris Wilson wrote:
> > 
> > On Thu, Jul 21, 2016 at 05:18:17PM +0300, Joonas Lahtinen wrote:
> > > 
> > > On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> > > > 
> > > >  static const struct intel_renderstate_rodata *
> > > >  render_state_get_rodata(const int gen)
> > > >  {
> > > > @@ -51,6 +60,7 @@ static int render_state_init(struct render_state *so,
> > > >  	int ret;
> > > >  
> > > >  	so->gen = INTEL_GEN(dev_priv);
> > > > +	so->ggtt_offset = 0;
> > > Previousy not done, does it address a bug? It's going to get
> > > overwritten or the render_state has failed to initialize and is
> > > forgotten, no? If it fixes bug, I think the site fondling uninitialized
> > > object should be fixed.
> > No, nothing is using it indeed. I can't remember why I added it.
> Oh, because of gcc.
> 
> drivers/gpu/drm/i915/i915_gem_render_state.c: In function ‘i915_gem_render_state_init’:
> drivers/gpu/drm/i915/i915_gem_render_state.c:246:6: error: ‘so.ggtt_offset’ may be used uninitialized in this function [-Werror=maybe-uninitialized]
>   ret = req->engine->emit_bb_start(req, so.ggtt_offset,

Right, GCC doesn't notice the matching rodata == NULL tests. So I guess
this is the easiest way to get rid of the warning.

Or we could treat rodata == NULL as an ENOENT to make GCC shut up. And
then in i915_gem_render_state check for ENOENT? That would simplify the
code too.

Regards, Joonas

> 
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev4)
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (19 preceding siblings ...)
  2016-07-20 15:10 ` ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev2) Patchwork
@ 2016-07-22  9:58 ` Patchwork
  2016-07-22 10:22 ` ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev5) Patchwork
  21 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2016-07-22  9:58 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev4)
URL   : https://patchwork.freedesktop.org/series/10090/
State : failure

== Summary ==

Applying: drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
Applying: drm/i915: Convert stray struct intel_engine_cs *ring
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/i915_drv.h
M	drivers/gpu/drm/i915/i915_gem_execbuffer.c
M	drivers/gpu/drm/i915/intel_mocs.h
M	drivers/gpu/drm/i915/intel_ringbuffer.c
M	drivers/gpu/drm/i915/intel_ringbuffer.h
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/intel_ringbuffer.h
Auto-merging drivers/gpu/drm/i915/i915_gem_execbuffer.c
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/i915_gem_execbuffer.c
Auto-merging drivers/gpu/drm/i915/i915_drv.h
error: Failed to merge in the changes.
Patch failed at 0002 drm/i915: Convert stray struct intel_engine_cs *ring
The copy of the patch that failed is found in: .git/rebase-apply/patch
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* [PATCH] drm/i915: Refactor golden render state emission to unconfuse gcc
  2016-07-22  9:53         ` Joonas Lahtinen
@ 2016-07-22 10:16           ` Chris Wilson
  2016-07-22 10:33             ` Joonas Lahtinen
  0 siblings, 1 reply; 72+ messages in thread
From: Chris Wilson @ 2016-07-22 10:16 UTC (permalink / raw)
  To: intel-gfx

GCC was inlining the init and setup functions, but was getting itself
confused into thinking that variables could be used uninitialised. If we
do the inline for gcc, it is happy! As a bonus we shrink the code.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_render_state.c | 93 ++++++++--------------------
 1 file changed, 26 insertions(+), 67 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index a9b56d18a93b..b50412c205e5 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -32,7 +32,6 @@ struct render_state {
 	const struct intel_renderstate_rodata *rodata;
 	struct drm_i915_gem_object *obj;
 	u64 ggtt_offset;
-	int gen;
 	u32 aux_batch_size;
 	u32 aux_batch_offset;
 };
@@ -54,36 +53,6 @@ render_state_get_rodata(const int gen)
 	return NULL;
 }
 
-static int render_state_init(struct render_state *so,
-			     struct drm_i915_private *dev_priv)
-{
-	int ret;
-
-	so->gen = INTEL_GEN(dev_priv);
-	so->ggtt_offset = 0; /* keep gcc quiet */
-	so->rodata = render_state_get_rodata(so->gen);
-	if (so->rodata == NULL)
-		return 0;
-
-	if (so->rodata->batch_items * 4 > 4096)
-		return -EINVAL;
-
-	so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
-	if (IS_ERR(so->obj))
-		return PTR_ERR(so->obj);
-
-	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
-	if (ret)
-		goto free_gem;
-
-	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-	return 0;
-
-free_gem:
-	i915_gem_object_put(so->obj);
-	return ret;
-}
-
 /*
  * Macro to add commands to auxiliary batch.
  * This macro only checks for page overflow before inserting the commands,
@@ -106,6 +75,7 @@ static int render_state_setup(struct render_state *so)
 {
 	struct drm_device *dev = so->obj->base.dev;
 	const struct intel_renderstate_rodata *rodata = so->rodata;
+	const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
 	unsigned int i = 0, reloc_index = 0;
 	struct page *page;
 	u32 *d;
@@ -124,7 +94,7 @@ static int render_state_setup(struct render_state *so)
 		if (i * 4  == rodata->reloc[reloc_index]) {
 			u64 r = s + so->ggtt_offset;
 			s = lower_32_bits(r);
-			if (so->gen >= 8) {
+			if (has_64bit_reloc) {
 				if (i + 1 >= rodata->batch_items ||
 				    rodata->batch[i + 1] != 0) {
 					ret = -EINVAL;
@@ -202,53 +172,40 @@ err_out:
 
 #undef OUT_BATCH
 
-static void render_state_fini(struct render_state *so)
-{
-	i915_gem_object_ggtt_unpin(so->obj);
-	i915_gem_object_put(so->obj);
-}
-
-static int render_state_prepare(struct intel_engine_cs *engine,
-				struct render_state *so)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 {
+	struct render_state so;
 	int ret;
 
-	if (WARN_ON(engine->id != RCS))
-		return -ENOENT;
-
-	ret = render_state_init(so, engine->i915);
-	if (ret)
-		return ret;
-
-	if (so->rodata == NULL)
+	if (WARN_ON(req->engine->id != RCS))
 		return 0;
 
-	ret = render_state_setup(so);
-	if (ret) {
-		render_state_fini(so);
-		return ret;
-	}
+	so.rodata = render_state_get_rodata(INTEL_GEN(req->i915));
+	if (so.rodata == NULL)
+		return 0;
 
-	return 0;
-}
+	if (so.rodata->batch_items * 4 > 4096)
+		return -EINVAL;
 
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
-{
-	struct render_state so;
-	int ret;
+	so.obj = i915_gem_object_create(&req->i915->drm, 4096);
+	if (IS_ERR(so.obj))
+		return PTR_ERR(so.obj);
 
-	ret = render_state_prepare(req->engine, &so);
+	ret = i915_gem_obj_ggtt_pin(so.obj, 4096, 0);
 	if (ret)
-		return ret;
+		goto err_obj;
 
-	if (so.rodata == NULL)
-		return 0;
+	so.ggtt_offset = i915_gem_obj_ggtt_offset(so.obj);
+
+	ret = render_state_setup(&so);
+	if (ret)
+		goto err_unpin;
 
 	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
 					 so.rodata->batch_items * 4,
 					 I915_DISPATCH_SECURE);
 	if (ret)
-		goto out;
+		goto err_unpin;
 
 	if (so.aux_batch_size > 8) {
 		ret = req->engine->emit_bb_start(req,
@@ -257,11 +214,13 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 						 so.aux_batch_size,
 						 I915_DISPATCH_SECURE);
 		if (ret)
-			goto out;
+			goto err_unpin;
 	}
 
 	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-out:
-	render_state_fini(&so);
+err_unpin:
+	i915_gem_object_ggtt_unpin(so.obj);
+err_obj:
+	i915_gem_object_put(so.obj);
 	return ret;
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 72+ messages in thread

* ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev5)
  2016-07-20 13:11 Unify request construction Chris Wilson
                   ` (20 preceding siblings ...)
  2016-07-22  9:58 ` ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev4) Patchwork
@ 2016-07-22 10:22 ` Patchwork
  21 siblings, 0 replies; 72+ messages in thread
From: Patchwork @ 2016-07-22 10:22 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev5)
URL   : https://patchwork.freedesktop.org/series/10090/
State : failure

== Summary ==

Applying: drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
Applying: drm/i915: Convert stray struct intel_engine_cs *ring
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/i915_drv.h
M	drivers/gpu/drm/i915/i915_gem_execbuffer.c
M	drivers/gpu/drm/i915/intel_mocs.h
M	drivers/gpu/drm/i915/intel_ringbuffer.c
M	drivers/gpu/drm/i915/intel_ringbuffer.h
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/intel_ringbuffer.h
Auto-merging drivers/gpu/drm/i915/i915_gem_execbuffer.c
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/i915_gem_execbuffer.c
Auto-merging drivers/gpu/drm/i915/i915_drv.h
error: Failed to merge in the changes.
Patch failed at 0002 drm/i915: Convert stray struct intel_engine_cs *ring
The copy of the patch that failed is found in: .git/rebase-apply/patch
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH] drm/i915: Refactor golden render state emission to unconfuse gcc
  2016-07-22 10:16           ` [PATCH] drm/i915: Refactor golden render state emission to unconfuse gcc Chris Wilson
@ 2016-07-22 10:33             ` Joonas Lahtinen
  0 siblings, 0 replies; 72+ messages in thread
From: Joonas Lahtinen @ 2016-07-22 10:33 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On pe, 2016-07-22 at 11:16 +0100, Chris Wilson wrote:
> GCC was inlining the init and setup functions, but was getting itself
> confused into thinking that variables could be used uninitialised. If we
> do the inline for gcc, it is happy! As a bonus we shrink the code.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> ---
>  drivers/gpu/drm/i915/i915_gem_render_state.c | 93 ++++++++--------------------
>  1 file changed, 26 insertions(+), 67 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
> index a9b56d18a93b..b50412c205e5 100644
> --- a/drivers/gpu/drm/i915/i915_gem_render_state.c
> +++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
> @@ -32,7 +32,6 @@ struct render_state {
>  	const struct intel_renderstate_rodata *rodata;
>  	struct drm_i915_gem_object *obj;
>  	u64 ggtt_offset;
> -	int gen;
>  	u32 aux_batch_size;
>  	u32 aux_batch_offset;
>  };
> @@ -54,36 +53,6 @@ render_state_get_rodata(const int gen)
>  	return NULL;
>  }
>  
> -static int render_state_init(struct render_state *so,
> -			     struct drm_i915_private *dev_priv)
> -{
> -	int ret;
> -
> -	so->gen = INTEL_GEN(dev_priv);
> -	so->ggtt_offset = 0; /* keep gcc quiet */
> -	so->rodata = render_state_get_rodata(so->gen);
> -	if (so->rodata == NULL)
> -		return 0;
> -
> -	if (so->rodata->batch_items * 4 > 4096)
> -		return -EINVAL;
> -
> -	so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
> -	if (IS_ERR(so->obj))
> -		return PTR_ERR(so->obj);
> -
> -	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
> -	if (ret)
> -		goto free_gem;
> -
> -	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
> -	return 0;
> -
> -free_gem:
> -	i915_gem_object_put(so->obj);
> -	return ret;
> -}
> -
>  /*
>   * Macro to add commands to auxiliary batch.
>   * This macro only checks for page overflow before inserting the commands,
> @@ -106,6 +75,7 @@ static int render_state_setup(struct render_state *so)
>  {
>  	struct drm_device *dev = so->obj->base.dev;
>  	const struct intel_renderstate_rodata *rodata = so->rodata;
> +	const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
>  	unsigned int i = 0, reloc_index = 0;
>  	struct page *page;
>  	u32 *d;
> @@ -124,7 +94,7 @@ static int render_state_setup(struct render_state *so)
>  		if (i * 4  == rodata->reloc[reloc_index]) {
>  			u64 r = s + so->ggtt_offset;
>  			s = lower_32_bits(r);
> -			if (so->gen >= 8) {
> +			if (has_64bit_reloc) {
>  				if (i + 1 >= rodata->batch_items ||
>  				    rodata->batch[i + 1] != 0) {
>  					ret = -EINVAL;
> @@ -202,53 +172,40 @@ err_out:
>  
>  #undef OUT_BATCH
>  
> -static void render_state_fini(struct render_state *so)
> -{
> -	i915_gem_object_ggtt_unpin(so->obj);
> -	i915_gem_object_put(so->obj);
> -}
> -
> -static int render_state_prepare(struct intel_engine_cs *engine,
> -				struct render_state *so)
> +int i915_gem_render_state_init(struct drm_i915_gem_request *req)
>  {
> +	struct render_state so;
>  	int ret;
>  
> -	if (WARN_ON(engine->id != RCS))
> -		return -ENOENT;
> -
> -	ret = render_state_init(so, engine->i915);
> -	if (ret)
> -		return ret;
> -
> -	if (so->rodata == NULL)
> +	if (WARN_ON(req->engine->id != RCS))
>  		return 0;

Why not -ENOENT anymore, it was propagated up previously.

>  
> -	ret = render_state_setup(so);
> -	if (ret) {
> -		render_state_fini(so);
> -		return ret;
> -	}
> +	so.rodata = render_state_get_rodata(INTEL_GEN(req->i915));

While you revamp, maybe let the function take req->i915 directly?

Otherwise looks quite good.

> +	if (so.rodata == NULL)
> +		return 0;
> 
>  
> -	return 0;
> -}
> +	if (so.rodata->batch_items * 4 > 4096)
> +		return -EINVAL;
>  
> -int i915_gem_render_state_init(struct drm_i915_gem_request *req)
> -{
> -	struct render_state so;
> -	int ret;
> +	so.obj = i915_gem_object_create(&req->i915->drm, 4096);
> +	if (IS_ERR(so.obj))
> +		return PTR_ERR(so.obj);
>  
> -	ret = render_state_prepare(req->engine, &so);
> +	ret = i915_gem_obj_ggtt_pin(so.obj, 4096, 0);
>  	if (ret)
> -		return ret;
> +		goto err_obj;
>  
> -	if (so.rodata == NULL)
> -		return 0;
> +	so.ggtt_offset = i915_gem_obj_ggtt_offset(so.obj);
> +
> +	ret = render_state_setup(&so);
> +	if (ret)
> +		goto err_unpin;
>  
>  	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
>  					 so.rodata->batch_items * 4,
>  					 I915_DISPATCH_SECURE);
>  	if (ret)
> -		goto out;
> +		goto err_unpin;
>  
>  	if (so.aux_batch_size > 8) {
>  		ret = req->engine->emit_bb_start(req,
> @@ -257,11 +214,13 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
>  						 so.aux_batch_size,
>  						 I915_DISPATCH_SECURE);
>  		if (ret)
> -			goto out;
> +			goto err_unpin;
>  	}
>  
>  	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
> -out:
> -	render_state_fini(&so);
> +err_unpin:
> +	i915_gem_object_ggtt_unpin(so.obj);
> +err_obj:
> +	i915_gem_object_put(so.obj);
>  	return ret;
>  }
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH] drm/i915: Convert stray struct intel_engine_cs *ring
  2016-07-20 15:00     ` [PATCH] drm/i915: Convert stray struct intel_engine_cs *ring Chris Wilson
@ 2016-07-27 13:15       ` Dave Gordon
  0 siblings, 0 replies; 72+ messages in thread
From: Dave Gordon @ 2016-07-27 13:15 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 20/07/16 16:00, Chris Wilson wrote:
> We still have a few uses of the identifier "ring" used when referring to
> the struct intel_engine_cs (a remanent from when there was only one dual
> purpose engine/ringbuffer). Rename all of this to use the familiar
> engine so that the separation between the hardware engine and the
> ringbuffer containing the commands is clear.
>
> This patch was formed by searching for instances of '\<ring\>' and
> changing those found to be referring to an engine. There are quite a few
> instances in comments remaining where it is less clear what is
> appropriate for the context, the registers still refer to ring (there we
> need to check against bspec for any counter-recommendations, but quite a
> few registers, like PDP should be engine based, whereas RING_HEAD
> probably wants to remain as ring based) and the biggest compromise was
> in error capture where we already have a local engine variable and so
> finding a good name was trickier.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Dave Gordon <david.s.gordon@intel.com>
> ---
>  drivers/gpu/drm/i915/i915_cmd_parser.c     |   4 +-
>  drivers/gpu/drm/i915/i915_drv.h            |  15 +-
>  drivers/gpu/drm/i915/i915_gem.c            |  26 +--
>  drivers/gpu/drm/i915/i915_gem_execbuffer.c |  23 +--
>  drivers/gpu/drm/i915/i915_gpu_error.c      | 255 +++++++++++++++--------------
>  drivers/gpu/drm/i915/i915_irq.c            |   6 +-
>  drivers/gpu/drm/i915/intel_mocs.h          |   2 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.c    |   2 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.h    |  24 +--
>  9 files changed, 183 insertions(+), 174 deletions(-)

I ran my generic Cocci-script for renaming things over the before and 
after versions and it didn't find any you'd missed, so:

Reviewed-by: Dave Gordon <david.s.gordon@intel.com>

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 10/18] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START
  2016-07-21 14:14     ` Chris Wilson
@ 2016-07-27 15:04       ` Dave Gordon
  2016-07-27 15:19         ` Chris Wilson
  0 siblings, 1 reply; 72+ messages in thread
From: Dave Gordon @ 2016-07-27 15:04 UTC (permalink / raw)
  To: Chris Wilson, Joonas Lahtinen, intel-gfx

On 21/07/16 15:14, Chris Wilson wrote:
> On Thu, Jul 21, 2016 at 04:39:58PM +0300, Joonas Lahtinen wrote:
>> On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
>>>  	if (so.aux_batch_size > 8) {
>>> -		ret = req->engine->dispatch_execbuffer(req,
>>> -						     (so.ggtt_offset +
>>> -						      so.aux_batch_offset),
>>> -						     so.aux_batch_size,
>>> -						     I915_DISPATCH_SECURE);
>>> +		ret = req->engine->emit_bb_start(req,
>>> +						 (so.ggtt_offset +
>>> +						  so.aux_batch_offset),
>>> +						 so.aux_batch_size,
>>> +						 I915_DISPATCH_SECURE);
>>>  		if (ret)
>>>  			goto out;
>>>  	}
>>
>> The code above this line is exact reason why I don't like the a->b->c
>> (especially when there is repetition). But it's not new to this patch
>> so guess it'll do. Some future work to shorten down a little bit might
>> not hurt.
>
> I presume you mean req->engine->x here, not so.y. Is it just the depth
> and saving 5 columns? Or something else?
> -Chris

It can also hurt code efficiency. For example in

int i915_gem_render_state_init(struct drm_i915_gem_request *req)
{
     ret = i915_gem_render_state_prepare(req->engine, &so);
     ...
     ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
                 so.rodata->batch_items * 4, I915_DISPATCH_SECURE);
     ...
     if (so.aux_batch_size > 8) {
         ret = req->engine->dispatch_execbuffer(req,
                 (so.ggtt_offset + so.aux_batch_offset),
                 so.aux_batch_size, I915_DISPATCH_SECURE);
                 if (ret)
                         goto out;
         }
     ...
}

The compiler may not -- and in this case, *is not allowed to* -- assume 
that req->engine is the same at each callsite. We have passed the 
non-const pointer "req" to callees, so it must assume that req->engine 
may have been changed and re-fetch it from memory.

By inspection of the generated code, we find that a local for a value 
that is used only once is a net loss, twice is breakeven, and three or 
more times is a definite win.

And it generally makes the code prettier too :)

.Dave.
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 10/18] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START
  2016-07-27 15:04       ` Dave Gordon
@ 2016-07-27 15:19         ` Chris Wilson
  0 siblings, 0 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-27 15:19 UTC (permalink / raw)
  To: Dave Gordon; +Cc: intel-gfx

On Wed, Jul 27, 2016 at 04:04:44PM +0100, Dave Gordon wrote:
> On 21/07/16 15:14, Chris Wilson wrote:
> >On Thu, Jul 21, 2016 at 04:39:58PM +0300, Joonas Lahtinen wrote:
> >>On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
> >>> 	if (so.aux_batch_size > 8) {
> >>>-		ret = req->engine->dispatch_execbuffer(req,
> >>>-						     (so.ggtt_offset +
> >>>-						      so.aux_batch_offset),
> >>>-						     so.aux_batch_size,
> >>>-						     I915_DISPATCH_SECURE);
> >>>+		ret = req->engine->emit_bb_start(req,
> >>>+						 (so.ggtt_offset +
> >>>+						  so.aux_batch_offset),
> >>>+						 so.aux_batch_size,
> >>>+						 I915_DISPATCH_SECURE);
> >>> 		if (ret)
> >>> 			goto out;
> >>> 	}
> >>
> >>The code above this line is exact reason why I don't like the a->b->c
> >>(especially when there is repetition). But it's not new to this patch
> >>so guess it'll do. Some future work to shorten down a little bit might
> >>not hurt.
> >
> >I presume you mean req->engine->x here, not so.y. Is it just the depth
> >and saving 5 columns? Or something else?
> >-Chris
> 
> It can also hurt code efficiency. For example in
> 
> int i915_gem_render_state_init(struct drm_i915_gem_request *req)
> {
>     ret = i915_gem_render_state_prepare(req->engine, &so);
>     ...
>     ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
>                 so.rodata->batch_items * 4, I915_DISPATCH_SECURE);
>     ...
>     if (so.aux_batch_size > 8) {
>         ret = req->engine->dispatch_execbuffer(req,
>                 (so.ggtt_offset + so.aux_batch_offset),
>                 so.aux_batch_size, I915_DISPATCH_SECURE);
>                 if (ret)
>                         goto out;
>         }
>     ...
> }
> 
> The compiler may not -- and in this case, *is not allowed to* --
> assume that req->engine is the same at each callsite. We have passed
> the non-const pointer "req" to callees, so it must assume that
> req->engine may have been changed and re-fetch it from memory.
> 
> By inspection of the generated code, we find that a local for a
> value that is used only once is a net loss, twice is breakeven, and
> three or more times is a definite win.
> 
> And it generally makes the code prettier too :)

Only prettiness really matters here, this emission can be shared for all
contexts with the cache only released under memory pressure. For when we
start caring about the regressions in the GL microbenchmarks...
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 12/18] drm/i915: Unify request submission
  2016-07-22  8:03   ` Joonas Lahtinen
  2016-07-22  8:24     ` Chris Wilson
@ 2016-07-27 17:51     ` Dave Gordon
  2016-07-27 18:09       ` Chris Wilson
  1 sibling, 1 reply; 72+ messages in thread
From: Dave Gordon @ 2016-07-27 17:51 UTC (permalink / raw)
  To: Joonas Lahtinen, Chris Wilson, intel-gfx

On 22/07/16 09:03, Joonas Lahtinen wrote:
> On ke, 2016-07-20 at 14:12 +0100, Chris Wilson wrote:
>> Move request submission from emit_request into its own common vfunc
>> from i915_add_request().
>>
>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> ---
>>  drivers/gpu/drm/i915/i915_gem_request.c    |  7 +++----
>>  drivers/gpu/drm/i915/i915_guc_submission.c |  9 ++++++---
>>  drivers/gpu/drm/i915/intel_guc.h           |  1 -
>>  drivers/gpu/drm/i915/intel_lrc.c           | 10 +++-------
>>  drivers/gpu/drm/i915/intel_ringbuffer.c    | 26 ++++++++++----------------
>>  drivers/gpu/drm/i915/intel_ringbuffer.h    | 23 +++++++++++------------
>>  6 files changed, 33 insertions(+), 43 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
>> index 408f390a4c98..3e633b47213c 100644
>> --- a/drivers/gpu/drm/i915/i915_gem_request.c
>> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
>> @@ -469,12 +469,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>>  	 */
>>  	request->postfix = intel_ring_get_tail(ring);
>>
>> -	if (i915.enable_execlists)
>> -		ret = engine->emit_request(request);
>> -	else
>> -		ret = engine->add_request(request);
>>  	/* Not allowed to fail! */
>> +	ret = engine->emit_request(request);
>>  	WARN(ret, "emit|add_request failed: %d!\n", ret);
>
> You should fix the message too; s/|add//
>
>> +
>>  	/* Sanity check that the reserved size was large enough. */
>>  	ret = intel_ring_get_tail(ring) - request_start;
>>  	if (ret < 0)
>> @@ -485,6 +483,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>>  		  reserved_tail, ret);
>>
>>  	i915_gem_mark_busy(engine);
>> +	engine->submit_request(request);
>>  }
>>
>>  static unsigned long local_clock_us(unsigned int *cpu)
>> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
>> index eccd34832fe6..32d0e1890950 100644
>> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
>> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
>> @@ -585,7 +585,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>>   * The only error here arises if the doorbell hardware isn't functioning
>>   * as expected, which really shouln't happen.
>>   */
>> -int i915_guc_submit(struct drm_i915_gem_request *rq)
>> +static void i915_guc_submit(struct drm_i915_gem_request *rq)
>>  {
>>  	unsigned int engine_id = rq->engine->id;
>>  	struct intel_guc *guc = &rq->i915->guc;
>> @@ -602,8 +602,6 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
>>
>>  	guc->submissions[engine_id] += 1;
>>  	guc->last_seqno[engine_id] = rq->fence.seqno;
>> -
>> -	return b_ret;
>
> Maybe we should have WARN(b_ret, "sumthing")? Although I see the return
> value was not handled previously. CC'ing Dave to comment on too.

If submission were to fail we'll probably(!) get a hang later; or else 
it might recover(?), though I'm not sure how. We're counting the number 
of times this goes wrong, and it's supposed to always be zero. We should 
make sure this is all captured in the error state; then if we start 
seeing that sometimes it *does* fail then I guess we'll want more 
detail. But for now we can just assume it will work.

>>  }
>>
>>  /*
>> @@ -992,6 +990,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
>>  {
>>  	struct intel_guc *guc = &dev_priv->guc;
>>  	struct i915_guc_client *client;
>> +	struct intel_engine_cs *engine;
>>
>>  	/* client for execbuf submission */
>>  	client = guc_client_alloc(dev_priv,
>> @@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
>>  	host2guc_sample_forcewake(guc, client);
>>  	guc_init_doorbell_hw(guc);
>>
>> +	/* Take over from manual control of ELSP (execlists) */
>> +	for_each_engine(engine, dev_priv)
>> +		engine->submit_request = i915_guc_submit;

This doesn't get undone in i915_guc_submission_disable().
That will prevent the runtime fallback from working.

.Dave.

>> +
>>  	return 0;
>>  }
>>
>> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
>> index 3e3e743740c0..623cf26cd784 100644
>> --- a/drivers/gpu/drm/i915/intel_guc.h
>> +++ b/drivers/gpu/drm/i915/intel_guc.h
>> @@ -160,7 +160,6 @@ extern int intel_guc_resume(struct drm_device *dev);
>>  int i915_guc_submission_init(struct drm_i915_private *dev_priv);
>>  int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
>>  int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
>> -int i915_guc_submit(struct drm_i915_gem_request *rq);
>>  void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
>>  void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
>>
>> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
>> index d17a193e8eaf..52edbcc9bca0 100644
>> --- a/drivers/gpu/drm/i915/intel_lrc.c
>> +++ b/drivers/gpu/drm/i915/intel_lrc.c
>> @@ -773,12 +773,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>>  	 */
>>  	request->previous_context = engine->last_context;
>>  	engine->last_context = request->ctx;
>> -
>> -	if (i915.enable_guc_submission)
>> -		i915_guc_submit(request);
>> -	else
>> -		execlists_context_queue(request);
>> -
>
> Function name is still advance_and_submit, and now the call to submit
> is moved to add_request, I'm confused.
>
>>  	return 0;
>>  }
>>
>> @@ -1904,8 +1898,10 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>>  {
>>  	/* Default vfuncs which can be overriden by each engine. */
>>  	engine->init_hw = gen8_init_common_ring;
>> -	engine->emit_request = gen8_emit_request;
>>  	engine->emit_flush = gen8_emit_flush;
>> +	engine->emit_request = gen8_emit_request;
>> +	engine->submit_request = execlists_context_queue;
>
> execlists_context_queue name could be changed too, just defined and one
> calling site.
>
>> +
>>  	engine->irq_enable = gen8_logical_ring_enable_irq;
>>  	engine->irq_disable = gen8_logical_ring_disable_irq;
>>  	engine->emit_bb_start = gen8_emit_bb_start;
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> index 43dfa4be1cfd..907d933d62aa 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
>> @@ -1427,15 +1427,14 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>>  }
>>
>>  /**
>> - * gen6_add_request - Update the semaphore mailbox registers
>> + * gen6_emit_request - Update the semaphore mailbox registers
>>   *
>>   * @request - request to write to the ring
>>   *
>>   * Update the mailbox registers in the *other* rings with the current seqno.
>>   * This acts like a signal in the canonical semaphore.
>>   */
>> -static int
>> -gen6_add_request(struct drm_i915_gem_request *req)
>> +static int gen6_emit_request(struct drm_i915_gem_request *req)
>>  {
>>  	struct intel_ring *ring = req->ring;
>>  	int ret;
>> @@ -1456,13 +1455,10 @@ gen6_add_request(struct drm_i915_gem_request *req)
>>
>>  	req->tail = intel_ring_get_tail(ring);
>>
>> -	req->engine->submit_request(req);
>> -
>>  	return 0;
>>  }
>>
>> -static int
>> -gen8_render_add_request(struct drm_i915_gem_request *req)
>> +static int gen8_render_emit_request(struct drm_i915_gem_request *req)
>>  {
>>  	struct intel_engine_cs *engine = req->engine;
>>  	struct intel_ring *ring = req->ring;
>> @@ -1486,8 +1482,9 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
>>  	intel_ring_emit(ring, 0);
>>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
>>  	intel_ring_emit(ring, MI_NOOP);
>> +	intel_ring_advance(ring);
>>
>> -	req->engine->submit_request(req);
>> +	req->tail = intel_ring_get_tail(ring);
>
> Ditto req->tail = ring->tail;
>
>>
>>  	return 0;
>>  }
>> @@ -1692,8 +1689,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
>>  	return 0;
>>  }
>>
>> -static int
>> -i9xx_add_request(struct drm_i915_gem_request *req)
>> +static int i9xx_emit_request(struct drm_i915_gem_request *req)
>>  {
>>  	struct intel_ring *ring = req->ring;
>>  	int ret;
>> @@ -1710,8 +1706,6 @@ i9xx_add_request(struct drm_i915_gem_request *req)
>>
>>  	req->tail = intel_ring_get_tail(ring);
>>
>> -	req->engine->submit_request(req);
>> -
>>  	return 0;
>>  }
>>
>> @@ -2813,11 +2807,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
>>  				      struct intel_engine_cs *engine)
>>  {
>>  	engine->init_hw = init_ring_common;
>> -	engine->submit_request = i9xx_submit_request;
>>
>> -	engine->add_request = i9xx_add_request;
>> +	engine->emit_request = i9xx_emit_request;
>>  	if (INTEL_GEN(dev_priv) >= 6)
>> -		engine->add_request = gen6_add_request;
>> +		engine->emit_request = gen6_emit_request;
>> +	engine->submit_request = i9xx_submit_request;
>>
>>  	if (INTEL_GEN(dev_priv) >= 8)
>>  		engine->emit_bb_start = gen8_emit_bb_start;
>> @@ -2846,7 +2840,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
>>
>>  	if (INTEL_GEN(dev_priv) >= 8) {
>>  		engine->init_context = intel_rcs_ctx_init;
>> -		engine->add_request = gen8_render_add_request;
>> +		engine->emit_request = gen8_render_emit_request;
>>  		engine->emit_flush = gen8_render_ring_flush;
>>  		if (i915.semaphores)
>>  			engine->semaphore.signal = gen8_rcs_signal;
>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
>> index 1a38c383327e..856b732ddbbd 100644
>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
>> @@ -204,7 +204,17 @@ struct intel_engine_cs {
>>
>>  	int		(*init_context)(struct drm_i915_gem_request *req);
>>
>> -	int		(*add_request)(struct drm_i915_gem_request *req);
>> +	int		(*emit_flush)(struct drm_i915_gem_request *request,
>> +				      u32 invalidate_domains,
>> +				      u32 flush_domains);
>> +	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
>> +					 u64 offset, u32 length,
>> +					 unsigned int dispatch_flags);
>> +#define I915_DISPATCH_SECURE 0x1
>> +#define I915_DISPATCH_PINNED 0x2
>> +#define I915_DISPATCH_RS     0x4
>
> Same here, maybe BIT(0) etc?
>
> Really like how the code looks more consistent now!
>
> Regards, Joonas
>
>> +	int		(*emit_request)(struct drm_i915_gem_request *req);
>> +	void		(*submit_request)(struct drm_i915_gem_request *req);
>>  	/* Some chipsets are not quite as coherent as advertised and need
>>  	 * an expensive kick to force a true read of the up-to-date seqno.
>>  	 * However, the up-to-date seqno is not always required and the last
>> @@ -282,17 +292,6 @@ struct intel_engine_cs {
>>  	unsigned int idle_lite_restore_wa;
>>  	bool disable_lite_restore_wa;
>>  	u32 ctx_desc_template;
>> -	int		(*emit_request)(struct drm_i915_gem_request *request);
>> -	int		(*emit_flush)(struct drm_i915_gem_request *request,
>> -				      u32 invalidate_domains,
>> -				      u32 flush_domains);
>> -	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
>> -					 u64 offset, u32 length,
>> -					 unsigned int dispatch_flags);
>> -#define I915_DISPATCH_SECURE 0x1
>> -#define I915_DISPATCH_PINNED 0x2
>> -#define I915_DISPATCH_RS     0x4
>> -	void		(*submit_request)(struct drm_i915_gem_request *req);
>>
>>  	/**
>>  	 * List of objects currently involved in rendering from the

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 12/18] drm/i915: Unify request submission
  2016-07-27 17:51     ` Dave Gordon
@ 2016-07-27 18:09       ` Chris Wilson
  2016-07-27 18:17         ` Chris Wilson
  2016-07-28 10:25         ` Dave Gordon
  0 siblings, 2 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-27 18:09 UTC (permalink / raw)
  To: Dave Gordon; +Cc: intel-gfx

On Wed, Jul 27, 2016 at 06:51:35PM +0100, Dave Gordon wrote:
> >>@@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
> >> 	host2guc_sample_forcewake(guc, client);
> >> 	guc_init_doorbell_hw(guc);
> >>
> >>+	/* Take over from manual control of ELSP (execlists) */
> >>+	for_each_engine(engine, dev_priv)
> >>+		engine->submit_request = i915_guc_submit;
> 
> This doesn't get undone in i915_guc_submission_disable().
> That will prevent the runtime fallback from working.

I honestly wasn't sure if that was supported. (runtime enabling would be
nice...)

Would calling a hypothetical intel_execlists_submission_enable() be ok?
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 12/18] drm/i915: Unify request submission
  2016-07-27 18:09       ` Chris Wilson
@ 2016-07-27 18:17         ` Chris Wilson
  2016-07-28 10:25         ` Dave Gordon
  1 sibling, 0 replies; 72+ messages in thread
From: Chris Wilson @ 2016-07-27 18:17 UTC (permalink / raw)
  To: Dave Gordon, Joonas Lahtinen, intel-gfx

On Wed, Jul 27, 2016 at 07:09:15PM +0100, Chris Wilson wrote:
> On Wed, Jul 27, 2016 at 06:51:35PM +0100, Dave Gordon wrote:
> > >>@@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
> > >> 	host2guc_sample_forcewake(guc, client);
> > >> 	guc_init_doorbell_hw(guc);
> > >>
> > >>+	/* Take over from manual control of ELSP (execlists) */
> > >>+	for_each_engine(engine, dev_priv)
> > >>+		engine->submit_request = i915_guc_submit;
> > 
> > This doesn't get undone in i915_guc_submission_disable().
> > That will prevent the runtime fallback from working.
> 
> I honestly wasn't sure if that was supported. (runtime enabling would be
> nice...)

I've double checked. The i915_guc_submission_disable() is only called
after failing to enable the guc (in which case we haven't changed
engine->submit_request) or on calling intel_guc_fini() during
module_unload.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 12/18] drm/i915: Unify request submission
  2016-07-27 18:09       ` Chris Wilson
  2016-07-27 18:17         ` Chris Wilson
@ 2016-07-28 10:25         ` Dave Gordon
  2016-07-28 11:49           ` Daniel Vetter
  1 sibling, 1 reply; 72+ messages in thread
From: Dave Gordon @ 2016-07-28 10:25 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On 27/07/16 19:09, Chris Wilson wrote:
> On Wed, Jul 27, 2016 at 06:51:35PM +0100, Dave Gordon wrote:
>>>> @@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
>>>> 	host2guc_sample_forcewake(guc, client);
>>>> 	guc_init_doorbell_hw(guc);
>>>>
>>>> +	/* Take over from manual control of ELSP (execlists) */
>>>> +	for_each_engine(engine, dev_priv)
>>>> +		engine->submit_request = i915_guc_submit;
>>
>> This doesn't get undone in i915_guc_submission_disable().
>> That will prevent the runtime fallback from working.
>
> I honestly wasn't sure if that was supported. (runtime enabling would be
> nice...)

Any time the GuC (re)loading process fails, it will revert (forever) to 
execlists mode. At present there's no way to switch back to GuC mode 
thereafter. Of course, we don't actually *expect* it to fail on a 
reload, but we did observe this in action a while back, before we 
discovered why reload on resume didn't always work. That's fixed now, 
but the fallback is still there just to make sure that any undiscovered 
issues around GuC reload don't leave you with a blank screen and an 
unusable machine.

> Would calling a hypothetical intel_execlists_submission_enable() be ok?
> -Chris

Something like that, I guess, although since GuC submission mode is 
treated as execlist mode for almost all purposes then _enable() and its 
presumed complement _disable() probably aren't the right terms (we don't 
want to suggest that enabling GuC mode involves disabling execlist 
mode). Perhaps _activate() for a function which simply (re)installs the 
vfunc pointers, with no corresponding _deactivate() required, as 
activating a new mode implicitly deactivates the old mode (i.e. makes it 
unreachable). That little loop can then be moved into its own trivial 
guc_submission_activate() function for clarity; and the GuC code would 
call intel_execlists_submission_activate() as part of reverting from GuC 
to execlists (at the moment it relies on just clearing 
enable_guc_submission, but presumably you'd like as few runtime tests of 
that as possible).

.Dave.
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

* Re: [PATCH 12/18] drm/i915: Unify request submission
  2016-07-28 10:25         ` Dave Gordon
@ 2016-07-28 11:49           ` Daniel Vetter
  0 siblings, 0 replies; 72+ messages in thread
From: Daniel Vetter @ 2016-07-28 11:49 UTC (permalink / raw)
  To: Dave Gordon; +Cc: intel-gfx

On Thu, Jul 28, 2016 at 11:25:39AM +0100, Dave Gordon wrote:
> On 27/07/16 19:09, Chris Wilson wrote:
> > On Wed, Jul 27, 2016 at 06:51:35PM +0100, Dave Gordon wrote:
> > > > > @@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
> > > > > 	host2guc_sample_forcewake(guc, client);
> > > > > 	guc_init_doorbell_hw(guc);
> > > > > 
> > > > > +	/* Take over from manual control of ELSP (execlists) */
> > > > > +	for_each_engine(engine, dev_priv)
> > > > > +		engine->submit_request = i915_guc_submit;
> > > 
> > > This doesn't get undone in i915_guc_submission_disable().
> > > That will prevent the runtime fallback from working.
> > 
> > I honestly wasn't sure if that was supported. (runtime enabling would be
> > nice...)
> 
> Any time the GuC (re)loading process fails, it will revert (forever) to
> execlists mode. At present there's no way to switch back to GuC mode
> thereafter. Of course, we don't actually *expect* it to fail on a reload,
> but we did observe this in action a while back, before we discovered why
> reload on resume didn't always work. That's fixed now, but the fallback is
> still there just to make sure that any undiscovered issues around GuC reload
> don't leave you with a blank screen and an unusable machine.

Do we really get a black screen? I kinda hoped dying with -EIO is still
semi-acceptable. Still not sure maintaining all these fallback paths is
worth it. And -EIO should be good enough to grab logs of why the gpu died
and then reboot.
-Daniel
-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 72+ messages in thread

end of thread, other threads:[~2016-07-28 11:49 UTC | newest]

Thread overview: 72+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-07-20 13:11 Unify request construction Chris Wilson
2016-07-20 13:11 ` [PATCH 01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
2016-07-21 11:26   ` Joonas Lahtinen
2016-07-21 12:09     ` Chris Wilson
2016-07-20 13:11 ` [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
2016-07-20 14:12   ` Dave Gordon
2016-07-20 14:51     ` Dave Gordon
2016-07-20 15:00     ` [PATCH] drm/i915: Convert stray struct intel_engine_cs *ring Chris Wilson
2016-07-27 13:15       ` Dave Gordon
2016-07-21 11:28   ` [PATCH 02/18] drm/i915: Rename request->ringbuf to request->ring Joonas Lahtinen
2016-07-20 13:11 ` [PATCH 03/18] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs Chris Wilson
2016-07-20 14:23   ` Dave Gordon
2016-07-21 11:32   ` Joonas Lahtinen
2016-07-21 11:42     ` Chris Wilson
2016-07-20 13:11 ` [PATCH 04/18] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
2016-07-21 11:43   ` Joonas Lahtinen
2016-07-20 13:11 ` [PATCH 05/18] drm/i915: Rename struct intel_ringbuffer to struct intel_ring Chris Wilson
2016-07-21 11:59   ` Joonas Lahtinen
2016-07-21 16:02     ` Chris Wilson
2016-07-20 13:11 ` [PATCH 06/18] drm/i915: Rename residual ringbuf parameters Chris Wilson
2016-07-21 12:01   ` Joonas Lahtinen
2016-07-21 12:20     ` Chris Wilson
2016-07-20 13:11 ` [PATCH 07/18] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
2016-07-21 12:02   ` Joonas Lahtinen
2016-07-20 13:12 ` [PATCH 08/18] drm/i915: Remove obsolete engine->gpu_caches_dirty Chris Wilson
2016-07-20 13:12 ` [PATCH 09/18] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
2016-07-21 13:07   ` Joonas Lahtinen
2016-07-21 13:18     ` Chris Wilson
2016-07-20 13:12 ` [PATCH 10/18] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
2016-07-21 13:39   ` Joonas Lahtinen
2016-07-21 14:14     ` Chris Wilson
2016-07-27 15:04       ` Dave Gordon
2016-07-27 15:19         ` Chris Wilson
2016-07-20 13:12 ` [PATCH 11/18] drm/i915: Convert engine->write_tail to operate on a request Chris Wilson
2016-07-21 13:52   ` Joonas Lahtinen
2016-07-20 13:12 ` [PATCH 12/18] drm/i915: Unify request submission Chris Wilson
2016-07-22  8:03   ` Joonas Lahtinen
2016-07-22  8:24     ` Chris Wilson
2016-07-27 17:51     ` Dave Gordon
2016-07-27 18:09       ` Chris Wilson
2016-07-27 18:17         ` Chris Wilson
2016-07-28 10:25         ` Dave Gordon
2016-07-28 11:49           ` Daniel Vetter
2016-07-20 13:12 ` [PATCH 13/18] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal() Chris Wilson
2016-07-22  8:15   ` Joonas Lahtinen
2016-07-22  8:30     ` Chris Wilson
2016-07-22  9:06       ` Joonas Lahtinen
2016-07-20 13:12 ` [PATCH 14/18] drm/i915: Reuse legacy breadcrumbs + tail emission Chris Wilson
2016-07-22  8:34   ` Joonas Lahtinen
2016-07-20 13:12 ` [PATCH 15/18] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores Chris Wilson
2016-07-21 13:55   ` Joonas Lahtinen
2016-07-21 14:10     ` Chris Wilson
2016-07-22  9:42       ` Joonas Lahtinen
2016-07-20 13:12 ` [PATCH 16/18] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
2016-07-21 14:18   ` Joonas Lahtinen
2016-07-21 16:27     ` Chris Wilson
2016-07-21 16:37       ` Chris Wilson
2016-07-22  9:53         ` Joonas Lahtinen
2016-07-22 10:16           ` [PATCH] drm/i915: Refactor golden render state emission to unconfuse gcc Chris Wilson
2016-07-22 10:33             ` Joonas Lahtinen
2016-07-20 13:12 ` [PATCH 17/18] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
2016-07-22  8:45   ` Joonas Lahtinen
2016-07-20 13:12 ` [PATCH 18/18] drm/i915: Simplify calling engine->sync_to Chris Wilson
2016-07-22  8:59   ` Joonas Lahtinen
2016-07-22  9:14     ` [PATCH] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals Chris Wilson
2016-07-22  9:28       ` Joonas Lahtinen
2016-07-22  9:31         ` Chris Wilson
2016-07-22  9:38           ` Joonas Lahtinen
2016-07-20 13:54 ` ✓ Ro.CI.BAT: success for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Patchwork
2016-07-20 15:10 ` ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev2) Patchwork
2016-07-22  9:58 ` ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev4) Patchwork
2016-07-22 10:22 ` ✗ Ro.CI.BAT: failure for series starting with [01/18] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit (rev5) Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.