All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] drm/i915: Emit to ringbuffer directly
@ 2017-02-08 13:10 Tvrtko Ursulin
  2017-02-08 13:36 ` Chris Wilson
                   ` (7 more replies)
  0 siblings, 8 replies; 28+ messages in thread
From: Tvrtko Ursulin @ 2017-02-08 13:10 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

This removes the usage of intel_ring_emit in favour of
directly writing to the ring buffer.

intel_ring_emit was preventing the compiler for optimising
fetch and increment of the current ring buffer pointer and
therefore generating very verbose code for every write.

It had no useful purpose since all ringbuffer operations
are started and ended with intel_ring_begin and
intel_ring_advance respectively, with no bail out in the
middle possible, so it is fine to increment the tail in
intel_ring_begin and let the code manage the pointer
itself.

Useless instruction removal amounts to approximately
two and half kilobytes of saved text on my build.

Not sure if this has any measurable performance
implications but executing a ton of useless instructions
on fast paths cannot be good.

Patch is not fully polished, but it compiles and runs
on Gen9 at least.

v2:
 * Change return from intel_ring_begin to error pointer by
   popular demand.
 * Move tail increment to intel_ring_advance to enable some
   error checking.

v3:
 * Move tail advance back into intel_ring_begin.
 * Rebase and tidy.

v4:
 * Complete rebase after a few months since v3.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  76 ++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  40 ++-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  69 ++--
 drivers/gpu/drm/i915/i915_gem_request.c    |   6 +-
 drivers/gpu/drm/i915/intel_display.c       | 132 ++++----
 drivers/gpu/drm/i915/intel_lrc.c           | 156 +++++----
 drivers/gpu/drm/i915/intel_mocs.c          |  51 ++-
 drivers/gpu/drm/i915/intel_overlay.c       |  79 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 487 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  19 +-
 10 files changed, 514 insertions(+), 601 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 680105421bb9..49070b7aad30 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -584,10 +584,9 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	enum intel_engine_id id;
-	u32 flags = hw_flags | MI_MM_SPACE_GTT;
+	u32 *out, flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
 		i915.semaphores ?
@@ -617,99 +616,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	if (INTEL_GEN(dev_priv) >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		*out++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*out++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring,
-						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring,
-						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*out++ = i915_mmio_reg_offset(
+					   RING_PSMI_CTL(signaller->mmio_base));
+				*out++ = _MASKED_BIT_ENABLE(
+					 GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 		}
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring,
-			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
+	*out++ = MI_NOOP;
+	*out++ = MI_SET_CONTEXT;
+	*out++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*out++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(ring, last_reg);
-				intel_ring_emit(ring,
-						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*out++ = i915_mmio_reg_offset(last_reg);
+				*out++ = _MASKED_BIT_DISABLE(
+					 GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(ring,
-					MI_STORE_REGISTER_MEM |
-					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(ring, last_reg);
-			intel_ring_emit(ring,
-					i915_ggtt_offset(engine->scratch));
-			intel_ring_emit(ring, MI_NOOP);
+			*out++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+			*out++ = i915_mmio_reg_offset(last_reg);
+			*out++ = i915_ggtt_offset(engine->scratch);
+			*out++ = MI_NOOP;
 		}
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		*out++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return ret;
 }
 
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
-	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ring *ring = req->ring;
-	int i, ret;
+	u32 *out, *remap_info = req->i915->l3_parity.remap_info[slice];
+	int i;
 
 	if (!remap_info)
 		return 0;
 
-	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/*
 	 * Note: We do not worry about the concurrent register cacheline hang
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-		intel_ring_emit(ring, remap_info[i]);
+		*out++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+		*out++ = remap_info[i];
 	}
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 111b7f8c617a..5d4f0e02c0ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1336,25 +1336,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret, i;
+	u32 *out;
+	int i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(req, 4 * 3);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4 * 3);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
+		*out++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1415,7 +1415,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
-	u32 instp_mask;
+	u32 instp_mask, *out;
 	int ret;
 
 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1461,17 +1461,15 @@ execbuf_submit(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ring *ring = params->request->ring;
-
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		out = intel_ring_begin(params->request, 4);
+		if (IS_ERR(out))
+			return PTR_ERR(out);
+
+		*out++ = MI_NOOP;
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(INSTPM);
+		*out++ = instp_mask << 16 | instp_mode;
+		intel_ring_advance(params->request, out);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 22b3374096e8..a62a233ebc58 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -680,23 +680,22 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
-	int ret;
+	u32 *out;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(ring, upper_32_bits(addr));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(ring, lower_32_bits(addr));
-	intel_ring_advance(ring);
+	*out++ = MI_LOAD_REGISTER_IMM(1);
+	*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
+	*out++ = upper_32_bits(addr);
+	*out++ = MI_LOAD_REGISTER_IMM(1);
+	*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
+	*out++ = lower_32_bits(addr);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1723,8 +1722,8 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *out;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1732,17 +1731,17 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_LOAD_REGISTER_IMM(2);
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*out++ = PP_DIR_DCLV_2G;
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*out++ = get_pd_offset(ppgtt);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1750,8 +1749,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *out;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1759,17 +1758,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
+
+	*out++ = MI_LOAD_REGISTER_IMM(2);
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*out++ = PP_DIR_DCLV_2G;
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*out++ = get_pd_offset(ppgtt);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 72b7f7d9461d..90ffcba0f2f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -824,6 +824,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	struct intel_ring *ring = request->ring;
 	struct intel_timeline *timeline = request->timeline;
 	struct drm_i915_gem_request *prev;
+	u32 *out;
 	int err;
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -862,10 +863,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * GPU processing the request, we never over-estimate the
 	 * position of the ring's HEAD.
 	 */
-	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
-	GEM_BUG_ON(err);
 	request->postfix = ring->tail;
-	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+	out = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+	GEM_BUG_ON(IS_ERR(out));
 
 	/* Seal the request and mark it as pending execution. Note that
 	 * we may inspect this state, without holding any locks, during
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d0d042495dc7..285a356e9e42 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11762,14 +11762,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Can't queue multiple flips, so wait for the previous
 	 * one to finish before executing the next.
@@ -11778,13 +11776,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	*out++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*out++ = MI_NOOP;
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = 0; /* aux display base address, unused */
 
 	return 0;
 }
@@ -11796,26 +11793,24 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	if (intel_crtc->plane)
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*out++ = MI_NOOP;
+	*out++ = MI_DISPLAY_FLIP_I915 |
+		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = MI_NOOP;
 
 	return 0;
 }
@@ -11827,25 +11822,22 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* i965+ uses the linear or tiled offsets from the
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
-			intel_fb_modifier_to_tiling(fb->modifier));
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset |
+		 intel_fb_modifier_to_tiling(fb->modifier);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
 	 * untested on non-native modes, so ignore it for now.
@@ -11853,7 +11845,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*out++ = pf | pipesrc;
 
 	return 0;
 }
@@ -11865,21 +11857,17 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*out++ = intel_crtc->flip_work->gtt_offset;
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -11889,7 +11877,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*out++ = pf | pipesrc;
 
 	return 0;
 }
@@ -11902,9 +11890,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t plane_bit = 0;
+	u32 *out, plane_bit = 0;
 	int len, ret;
 
 	switch (intel_crtc->plane) {
@@ -11948,9 +11935,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Unmask the flip-done completion message. Note that the bspec says that
 	 * we should do this for both the BCS and RCS, and that we must not unmask
@@ -11962,31 +11949,28 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
 	if (req->engine->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-					  DERRMR_PIPEB_PRI_FLIP_DONE |
-					  DERRMR_PIPEC_PRI_FLIP_DONE));
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(DERRMR);
+		*out++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+			 DERRMR_PIPEB_PRI_FLIP_DONE |
+			 DERRMR_PIPEC_PRI_FLIP_DONE);
 		if (IS_GEN8(dev_priv))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
-					      MI_SRM_LRM_GLOBAL_GTT);
+			*out++ = MI_STORE_REGISTER_MEM_GEN8 |
+				 MI_SRM_LRM_GLOBAL_GTT;
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
-					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring,
-				i915_ggtt_offset(req->engine->scratch) + 256);
+			*out++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+		*out++ = i915_mmio_reg_offset(DERRMR);
+		*out++ = i915_ggtt_offset(req->engine->scratch) + 256;
 		if (IS_GEN8(dev_priv)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			*out++ = 0;
+			*out++ = MI_NOOP;
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	*out++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+	*out++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = MI_NOOP;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e42990b56fa8..5eea07dad67e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -836,6 +836,7 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_context *ce = &request->ctx->engine[engine->id];
+	u32 *out;
 	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
@@ -860,9 +861,11 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 			goto err;
 	}
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
+	out = intel_ring_begin(request, 0);
+	if (IS_ERR(out)) {
+		ret = PTR_ERR(out);
 		goto err_unreserve;
+	}
 
 	if (!ce->initialised) {
 		ret = engine->init_context(request);
@@ -891,9 +894,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	int ret, i;
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *out;
+	int ret, i;
 
 	if (w->count == 0)
 		return 0;
@@ -902,18 +905,18 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, w->count * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, w->count * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*out++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*out++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*out++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1406,27 +1409,27 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
-	int i, ret;
+	u32 *out;
+	int i;
 
-	ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	*out++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
-		intel_ring_emit(ring, upper_32_bits(pd_daddr));
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
-		intel_ring_emit(ring, lower_32_bits(pd_daddr));
+		*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+		*out++ = upper_32_bits(pd_daddr);
+		*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+		*out++ = lower_32_bits(pd_daddr);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1435,8 +1438,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+	u32 *out;
 	int ret;
 
 	/* Don't rely in hw updating PDPs, specially in lite-restore.
@@ -1457,19 +1460,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
-			(ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1490,13 +1492,11 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
-	struct intel_ring *ring = request->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(request, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW + 1;
 
@@ -1513,13 +1513,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR |
-			MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ring, 0); /* upper addr */
-	intel_ring_emit(ring, 0); /* value */
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+	*out++ = 0; /* upper addr */
+	*out++ = 0; /* value */
+	intel_ring_advance(request, out);
 
 	return 0;
 }
@@ -1527,13 +1525,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 mode)
 {
-	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr =
 		i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
-	u32 flags = 0;
-	int ret;
+	u32 *out, flags = 0;
 	int len;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -1575,45 +1571,45 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 	if (dc_flush_wa)
 		len += 12;
 
-	ret = intel_ring_begin(request, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	if (vf_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = PIPE_CONTROL_DC_FLUSH_ENABLE;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = 0;
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = PIPE_CONTROL_CS_STALL;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(request, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 773e36253e7c..a7a28c496890 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
-	int ret;
+	u32 *out;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[index].control_value);
+		*out++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*out++ = table->table[index].control_value;
 	}
 
 	/*
@@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[0].control_value);
+		*out++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*out++ = table->table[0].control_value;
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	unsigned int i;
-	int ret;
+	u32 *out;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 2 * i, 2 * i + 1);
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 2 * i, 0);
 		i++;
 	}
 
@@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 0, 0);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 0608fad7f593..f612a02ddfbe 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	int ret;
+	u32 *out;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
@@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret) {
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
 	overlay->active = true;
@@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_I830(dev_priv))
 		i830_overlay_clock_gating(dev_priv, false);
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+	*out++ = overlay->flip_addr | OFC_UPDATE;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
-	u32 tmp;
-	int ret;
+	u32 tmp, *out;
 
 	WARN_ON(!overlay->active);
 
@@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 2);
-	if (ret) {
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*out++ = flip_addr;
+	intel_ring_advance(req, out);
 
 	intel_overlay_flip_prepare(overlay, vma);
 
@@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	u32 flip_addr = overlay->flip_addr;
-	int ret;
+	u32 *out, flip_addr = overlay->flip_addr;
 
 	WARN_ON(!overlay->active);
 
@@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret) {
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
-	ring = req->ring;
-
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*out++ = flip_addr;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
 	/* turn overlay off */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+	*out++ = flip_addr;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	intel_overlay_flip_prepare(overlay, NULL);
 
@@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
+	u32 *out;
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
-		struct intel_ring *ring;
 
 		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-		ret = intel_ring_begin(req, 2);
-		if (ret) {
+		out = intel_ring_begin(req, 2);
+		if (IS_ERR(out)) {
 			i915_add_request_no_flush(req);
-			return ret;
+			return PTR_ERR(out);
 		}
 
-		ring = req->ring;
-		intel_ring_emit(ring,
-				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+		*out++ = MI_NOOP;
+		intel_ring_advance(req, out);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d3d1e64f2498..11d355d314e1 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -61,22 +61,20 @@ void intel_ring_update_space(struct intel_ring *ring)
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
 	cmd = MI_FLUSH;
 
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -84,9 +82,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
 	/*
 	 * read/write caches:
@@ -123,13 +119,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 			cmd |= MI_INVALIDATE_ISP;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -174,35 +170,33 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0); /* low dword */
-	intel_ring_emit(ring, 0); /* high dword */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(5);
+	*out++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0; /* low dword */
+	*out++ = 0; /* high dword */
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(5);
+	*out++ = PIPE_CONTROL_QW_WRITE;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -210,10 +204,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
+	u32 *out, flags = 0;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -247,15 +240,15 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = flags;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -263,20 +256,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring,
-			PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*out++ = 0;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -284,11 +274,9 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
-	int ret;
+	u32 *out, flags = 0;
 
 	/*
 	 * Ensure that any following seqno writes only happen when the render
@@ -332,15 +320,15 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -349,20 +337,19 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -659,8 +646,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *out;
 	int ret, i;
 
 	if (w->count == 0)
@@ -670,18 +657,18 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, (w->count * 2 + 2));
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, (w->count * 2 + 2));
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*out++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*out++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*out++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1427,24 +1414,21 @@ static int
 gen8_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_SEMAPHORE_WAIT |
-			MI_SEMAPHORE_GLOBAL_GTT |
-			MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(ring, signal->global_seqno);
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_advance(ring);
+	*out++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+		 MI_SEMAPHORE_SAD_GTE_SDD;
+	*out++ = signal->global_seqno;
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	intel_ring_advance(req, out);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
@@ -1461,28 +1445,27 @@ static int
 gen6_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
 	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
-	int ret;
+	u32 *out;
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, dw1 | wait_mbox);
+	*out++ = dw1 | wait_mbox;
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(ring, signal->global_seqno - 1);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = signal->global_seqno - 1;
+	*out++ = 0;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1583,16 +1566,15 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 static int
 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_FLUSH;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 	return 0;
 }
 
@@ -1658,20 +1640,17 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 length,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			MI_BATCH_GTT |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE_I965);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1685,59 +1664,56 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
-	int ret;
+	u32 *out, cs_offset = i915_ggtt_offset(req->engine->scratch);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(ring, cs_offset);
-	intel_ring_emit(ring, 0xdeadbeef);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+	*out++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+	*out++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+	*out++ = cs_offset;
+	*out++ = 0xdeadbeef;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(req, 6 + 2);
-		if (ret)
-			return ret;
+		out = intel_ring_begin(req, 6 + 2);
+		if (IS_ERR(out))
+			return PTR_ERR(out);
 
 		/* Blit the batch (which has now all relocs applied) to the
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(ring,
-				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(ring, cs_offset);
-		intel_ring_emit(ring, 4096);
-		intel_ring_emit(ring, offset);
-
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*out++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+		*out++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+		*out++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+		*out++ = cs_offset;
+		*out++ = 4096;
+		*out++ = offset;
+
+		*out++ = MI_FLUSH;
+		*out++ = MI_NOOP;
+		intel_ring_advance(req, out);
 
 		/* ... and execute it. */
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*out++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1747,17 +1723,16 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*out++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2171,7 +2146,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
 {
-	int ret;
+	u32 *out;
 
 	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
 
@@ -2184,9 +2159,9 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 	GEM_BUG_ON(!request->engine->buffer);
 	request->ring = request->engine->buffer;
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, 0);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	request->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
@@ -2241,7 +2216,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	return 0;
 }
 
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
 	struct intel_ring *ring = req->ring;
 	int remain_actual = ring->size - ring->tail;
@@ -2249,6 +2224,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
+	u32 *out;
 
 	total_bytes = bytes + req->reserved_space;
 
@@ -2275,7 +2251,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
-			return ret;
+			return ERR_PTR(ret);
 	}
 
 	if (unlikely(need_wrap)) {
@@ -2288,32 +2264,33 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		ring->space -= remain_actual;
 	}
 
+	out = (u32 *)(ring->vaddr + ring->tail);
+	ring->tail += bytes;
 	ring->space -= bytes;
 	GEM_BUG_ON(ring->space < 0);
-	GEM_BUG_ONLY(ring->advance = ring->tail + bytes);
-	return 0;
+
+	return out;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	int num_dwords =
-		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
-	int ret;
+		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	u32 *out;
 
 	if (num_dwords == 0)
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(req, num_dwords);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, num_dwords);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	while (num_dwords--)
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2357,13 +2334,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2385,16 +2360,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*out++ = 0; /* upper addr */
+		*out++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = 0;
+		*out++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 	return 0;
 }
 
@@ -2403,23 +2378,22 @@ gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2429,22 +2403,20 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
 		  u64 offset, u32 len,
 		  unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
+	*out++ = MI_BATCH_BUFFER_START |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2454,20 +2426,18 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
+	*out++ = MI_BATCH_BUFFER_START |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE_I965);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2476,13 +2446,11 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 
 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2503,17 +2471,16 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*out++ = 0; /* upper addr */
+		*out++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = 0;
+		*out++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 896838ca502c..3893bb8d2cb4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -144,7 +144,6 @@ struct intel_ring {
 
 	u32 head;
 	u32 tail;
-	GEM_BUG_ONLY_DECLARE(u32 advance);
 
 	int space;
 	int size;
@@ -495,22 +494,14 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
-int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
-{
-	*(uint32_t *)(ring->vaddr + ring->tail) = data;
-	ring->tail += 4;
-}
-
-static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
-{
-	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
 
-static inline void intel_ring_advance(struct intel_ring *ring)
+static inline void
+intel_ring_advance(struct drm_i915_gem_request *req, u32 *out)
 {
+	struct intel_ring *ring = req->ring;
 	/* Dummy function.
 	 *
 	 * This serves as a placeholder in the code so that the reader
@@ -519,7 +510,7 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 	 * reserved for the command packet (i.e. the value passed to
 	 * intel_ring_begin()).
 	 */
-	GEM_BUG_ONLY_ON(ring->tail != ring->advance);
+	GEM_BUG_ON((ring->vaddr + ring->tail) != out);
 }
 
 static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH] drm/i915: Emit to ringbuffer directly
  2017-02-08 13:10 [PATCH] drm/i915: Emit to ringbuffer directly Tvrtko Ursulin
@ 2017-02-08 13:36 ` Chris Wilson
  2017-02-08 13:58   ` Chris Wilson
  2017-02-08 14:14   ` [PATCH v5] " Tvrtko Ursulin
  2017-02-08 22:32 ` ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev7) Patchwork
                   ` (6 subsequent siblings)
  7 siblings, 2 replies; 28+ messages in thread
From: Chris Wilson @ 2017-02-08 13:36 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: Intel-gfx

On Wed, Feb 08, 2017 at 01:10:56PM +0000, Tvrtko Ursulin wrote:
> @@ -2288,32 +2264,33 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
>  		ring->space -= remain_actual;
>  	}
>  
> +	out = (u32 *)(ring->vaddr + ring->tail);

Checkpatch will complain about unaligned brackets, but I'll whine that
this cast is unnecessary.

> -static inline void intel_ring_advance(struct intel_ring *ring)
> +static inline void
> +intel_ring_advance(struct drm_i915_gem_request *req, u32 *out)
>  {
> +	struct intel_ring *ring = req->ring;
>  	/* Dummy function.
>  	 *
>  	 * This serves as a placeholder in the code so that the reader
> @@ -519,7 +510,7 @@ static inline void intel_ring_advance(struct intel_ring *ring)
>  	 * reserved for the command packet (i.e. the value passed to
>  	 * intel_ring_begin()).
>  	 */
> -	GEM_BUG_ONLY_ON(ring->tail != ring->advance);
> +	GEM_BUG_ON((ring->vaddr + ring->tail) != out);

This will generate a warning for an unused var if !GEM_DEBUG, just use
	GEM_BUG_ON(req->ring->vaddr + req->ring->tail != out);
it'll fit, the compiler will do CSE and it's only for debug...

The transformation looked good. I'd suggest we do a trybot with
i915.mmio_flip=0 to force the MI_DISPLAY_FLIP paths to be exercised and
aside from gen2/3 that should be all intel_ring_begin() covered.

In anticipation of those extra checks performed,
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>

A couple of future steps, we can eliminate some dwords by inserting the
MI_NOOP to align the tail in add_request. I
s/intel_ring_begin/i915_gem_request_emit/ for the thin veneer of oop.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH] drm/i915: Emit to ringbuffer directly
  2017-02-08 13:36 ` Chris Wilson
@ 2017-02-08 13:58   ` Chris Wilson
  2017-02-08 14:14   ` [PATCH v5] " Tvrtko Ursulin
  1 sibling, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2017-02-08 13:58 UTC (permalink / raw)
  To: Tvrtko Ursulin, Intel-gfx, Tvrtko Ursulin

On Wed, Feb 08, 2017 at 01:36:53PM +0000, Chris Wilson wrote:
> On Wed, Feb 08, 2017 at 01:10:56PM +0000, Tvrtko Ursulin wrote:
> > @@ -2288,32 +2264,33 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
> >  		ring->space -= remain_actual;
> >  	}
> >  
> > +	out = (u32 *)(ring->vaddr + ring->tail);
> 
> Checkpatch will complain about unaligned brackets, but I'll whine that
> this cast is unnecessary.
> 
> > -static inline void intel_ring_advance(struct intel_ring *ring)
> > +static inline void
> > +intel_ring_advance(struct drm_i915_gem_request *req, u32 *out)
> >  {
> > +	struct intel_ring *ring = req->ring;
> >  	/* Dummy function.
> >  	 *
> >  	 * This serves as a placeholder in the code so that the reader
> > @@ -519,7 +510,7 @@ static inline void intel_ring_advance(struct intel_ring *ring)
> >  	 * reserved for the command packet (i.e. the value passed to
> >  	 * intel_ring_begin()).
> >  	 */
> > -	GEM_BUG_ONLY_ON(ring->tail != ring->advance);
> > +	GEM_BUG_ON((ring->vaddr + ring->tail) != out);
> 
> This will generate a warning for an unused var if !GEM_DEBUG, just use
> 	GEM_BUG_ON(req->ring->vaddr + req->ring->tail != out);
> it'll fit, the compiler will do CSE and it's only for debug...
> 
> The transformation looked good. I'd suggest we do a trybot with
> i915.mmio_flip=0 to force the MI_DISPLAY_FLIP paths to be exercised and
> aside from gen2/3 that should be all intel_ring_begin() covered.

Oh, that forgets that atomic took over and we can't run the CS flips
anymore.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v5] drm/i915: Emit to ringbuffer directly
  2017-02-08 13:36 ` Chris Wilson
  2017-02-08 13:58   ` Chris Wilson
@ 2017-02-08 14:14   ` Tvrtko Ursulin
  2017-02-08 17:49     ` Chris Wilson
  1 sibling, 1 reply; 28+ messages in thread
From: Tvrtko Ursulin @ 2017-02-08 14:14 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

This removes the usage of intel_ring_emit in favour of
directly writing to the ring buffer.

intel_ring_emit was preventing the compiler for optimising
fetch and increment of the current ring buffer pointer and
therefore generating very verbose code for every write.

It had no useful purpose since all ringbuffer operations
are started and ended with intel_ring_begin and
intel_ring_advance respectively, with no bail out in the
middle possible, so it is fine to increment the tail in
intel_ring_begin and let the code manage the pointer
itself.

Useless instruction removal amounts to approximately
two and half kilobytes of saved text on my build.

Not sure if this has any measurable performance
implications but executing a ton of useless instructions
on fast paths cannot be good.

Patch is not fully polished, but it compiles and runs
on Gen9 at least.

v2:
 * Change return from intel_ring_begin to error pointer by
   popular demand.
 * Move tail increment to intel_ring_advance to enable some
   error checking.

v3:
 * Move tail advance back into intel_ring_begin.
 * Rebase and tidy.

v4:
 * Complete rebase after a few months since v3.

v5:
 * Remove unecessary cast and fix !debug compile. (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  76 ++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  40 ++-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  69 ++--
 drivers/gpu/drm/i915/i915_gem_request.c    |   6 +-
 drivers/gpu/drm/i915/intel_display.c       | 132 ++++----
 drivers/gpu/drm/i915/intel_lrc.c           | 156 +++++----
 drivers/gpu/drm/i915/intel_mocs.c          |  51 ++-
 drivers/gpu/drm/i915/intel_overlay.c       |  79 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 487 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  18 +-
 10 files changed, 513 insertions(+), 601 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 680105421bb9..49070b7aad30 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -584,10 +584,9 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	enum intel_engine_id id;
-	u32 flags = hw_flags | MI_MM_SPACE_GTT;
+	u32 *out, flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
 		i915.semaphores ?
@@ -617,99 +616,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	if (INTEL_GEN(dev_priv) >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		*out++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*out++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring,
-						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring,
-						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*out++ = i915_mmio_reg_offset(
+					   RING_PSMI_CTL(signaller->mmio_base));
+				*out++ = _MASKED_BIT_ENABLE(
+					 GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 		}
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring,
-			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
+	*out++ = MI_NOOP;
+	*out++ = MI_SET_CONTEXT;
+	*out++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*out++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(ring, last_reg);
-				intel_ring_emit(ring,
-						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*out++ = i915_mmio_reg_offset(last_reg);
+				*out++ = _MASKED_BIT_DISABLE(
+					 GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(ring,
-					MI_STORE_REGISTER_MEM |
-					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(ring, last_reg);
-			intel_ring_emit(ring,
-					i915_ggtt_offset(engine->scratch));
-			intel_ring_emit(ring, MI_NOOP);
+			*out++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+			*out++ = i915_mmio_reg_offset(last_reg);
+			*out++ = i915_ggtt_offset(engine->scratch);
+			*out++ = MI_NOOP;
 		}
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		*out++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return ret;
 }
 
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
-	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ring *ring = req->ring;
-	int i, ret;
+	u32 *out, *remap_info = req->i915->l3_parity.remap_info[slice];
+	int i;
 
 	if (!remap_info)
 		return 0;
 
-	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/*
 	 * Note: We do not worry about the concurrent register cacheline hang
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-		intel_ring_emit(ring, remap_info[i]);
+		*out++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+		*out++ = remap_info[i];
 	}
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 111b7f8c617a..5d4f0e02c0ea 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1336,25 +1336,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret, i;
+	u32 *out;
+	int i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(req, 4 * 3);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4 * 3);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
+		*out++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1415,7 +1415,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
-	u32 instp_mask;
+	u32 instp_mask, *out;
 	int ret;
 
 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1461,17 +1461,15 @@ execbuf_submit(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ring *ring = params->request->ring;
-
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		out = intel_ring_begin(params->request, 4);
+		if (IS_ERR(out))
+			return PTR_ERR(out);
+
+		*out++ = MI_NOOP;
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(INSTPM);
+		*out++ = instp_mask << 16 | instp_mode;
+		intel_ring_advance(params->request, out);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 22b3374096e8..a62a233ebc58 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -680,23 +680,22 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
-	int ret;
+	u32 *out;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(ring, upper_32_bits(addr));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(ring, lower_32_bits(addr));
-	intel_ring_advance(ring);
+	*out++ = MI_LOAD_REGISTER_IMM(1);
+	*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
+	*out++ = upper_32_bits(addr);
+	*out++ = MI_LOAD_REGISTER_IMM(1);
+	*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
+	*out++ = lower_32_bits(addr);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1723,8 +1722,8 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *out;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1732,17 +1731,17 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_LOAD_REGISTER_IMM(2);
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*out++ = PP_DIR_DCLV_2G;
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*out++ = get_pd_offset(ppgtt);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1750,8 +1749,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *out;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1759,17 +1758,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
+
+	*out++ = MI_LOAD_REGISTER_IMM(2);
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*out++ = PP_DIR_DCLV_2G;
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*out++ = get_pd_offset(ppgtt);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 72b7f7d9461d..90ffcba0f2f8 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -824,6 +824,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	struct intel_ring *ring = request->ring;
 	struct intel_timeline *timeline = request->timeline;
 	struct drm_i915_gem_request *prev;
+	u32 *out;
 	int err;
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -862,10 +863,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * GPU processing the request, we never over-estimate the
 	 * position of the ring's HEAD.
 	 */
-	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
-	GEM_BUG_ON(err);
 	request->postfix = ring->tail;
-	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+	out = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+	GEM_BUG_ON(IS_ERR(out));
 
 	/* Seal the request and mark it as pending execution. Note that
 	 * we may inspect this state, without holding any locks, during
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d0d042495dc7..285a356e9e42 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11762,14 +11762,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Can't queue multiple flips, so wait for the previous
 	 * one to finish before executing the next.
@@ -11778,13 +11776,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	*out++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*out++ = MI_NOOP;
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = 0; /* aux display base address, unused */
 
 	return 0;
 }
@@ -11796,26 +11793,24 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	if (intel_crtc->plane)
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*out++ = MI_NOOP;
+	*out++ = MI_DISPLAY_FLIP_I915 |
+		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = MI_NOOP;
 
 	return 0;
 }
@@ -11827,25 +11822,22 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* i965+ uses the linear or tiled offsets from the
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
-			intel_fb_modifier_to_tiling(fb->modifier));
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset |
+		 intel_fb_modifier_to_tiling(fb->modifier);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
 	 * untested on non-native modes, so ignore it for now.
@@ -11853,7 +11845,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*out++ = pf | pipesrc;
 
 	return 0;
 }
@@ -11865,21 +11857,17 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*out++ = intel_crtc->flip_work->gtt_offset;
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -11889,7 +11877,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*out++ = pf | pipesrc;
 
 	return 0;
 }
@@ -11902,9 +11890,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t plane_bit = 0;
+	u32 *out, plane_bit = 0;
 	int len, ret;
 
 	switch (intel_crtc->plane) {
@@ -11948,9 +11935,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Unmask the flip-done completion message. Note that the bspec says that
 	 * we should do this for both the BCS and RCS, and that we must not unmask
@@ -11962,31 +11949,28 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
 	if (req->engine->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-					  DERRMR_PIPEB_PRI_FLIP_DONE |
-					  DERRMR_PIPEC_PRI_FLIP_DONE));
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(DERRMR);
+		*out++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+			 DERRMR_PIPEB_PRI_FLIP_DONE |
+			 DERRMR_PIPEC_PRI_FLIP_DONE);
 		if (IS_GEN8(dev_priv))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
-					      MI_SRM_LRM_GLOBAL_GTT);
+			*out++ = MI_STORE_REGISTER_MEM_GEN8 |
+				 MI_SRM_LRM_GLOBAL_GTT;
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
-					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring,
-				i915_ggtt_offset(req->engine->scratch) + 256);
+			*out++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+		*out++ = i915_mmio_reg_offset(DERRMR);
+		*out++ = i915_ggtt_offset(req->engine->scratch) + 256;
 		if (IS_GEN8(dev_priv)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			*out++ = 0;
+			*out++ = MI_NOOP;
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	*out++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+	*out++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = MI_NOOP;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e42990b56fa8..5eea07dad67e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -836,6 +836,7 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_context *ce = &request->ctx->engine[engine->id];
+	u32 *out;
 	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
@@ -860,9 +861,11 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 			goto err;
 	}
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
+	out = intel_ring_begin(request, 0);
+	if (IS_ERR(out)) {
+		ret = PTR_ERR(out);
 		goto err_unreserve;
+	}
 
 	if (!ce->initialised) {
 		ret = engine->init_context(request);
@@ -891,9 +894,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	int ret, i;
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *out;
+	int ret, i;
 
 	if (w->count == 0)
 		return 0;
@@ -902,18 +905,18 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, w->count * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, w->count * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*out++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*out++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*out++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1406,27 +1409,27 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
-	int i, ret;
+	u32 *out;
+	int i;
 
-	ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	*out++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
-		intel_ring_emit(ring, upper_32_bits(pd_daddr));
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
-		intel_ring_emit(ring, lower_32_bits(pd_daddr));
+		*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+		*out++ = upper_32_bits(pd_daddr);
+		*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+		*out++ = lower_32_bits(pd_daddr);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1435,8 +1438,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+	u32 *out;
 	int ret;
 
 	/* Don't rely in hw updating PDPs, specially in lite-restore.
@@ -1457,19 +1460,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
-			(ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1490,13 +1492,11 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
-	struct intel_ring *ring = request->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(request, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW + 1;
 
@@ -1513,13 +1513,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR |
-			MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ring, 0); /* upper addr */
-	intel_ring_emit(ring, 0); /* value */
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+	*out++ = 0; /* upper addr */
+	*out++ = 0; /* value */
+	intel_ring_advance(request, out);
 
 	return 0;
 }
@@ -1527,13 +1525,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 mode)
 {
-	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr =
 		i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
-	u32 flags = 0;
-	int ret;
+	u32 *out, flags = 0;
 	int len;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -1575,45 +1571,45 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 	if (dc_flush_wa)
 		len += 12;
 
-	ret = intel_ring_begin(request, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	if (vf_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = PIPE_CONTROL_DC_FLUSH_ENABLE;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = 0;
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = PIPE_CONTROL_CS_STALL;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(request, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 773e36253e7c..a7a28c496890 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
-	int ret;
+	u32 *out;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[index].control_value);
+		*out++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*out++ = table->table[index].control_value;
 	}
 
 	/*
@@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[0].control_value);
+		*out++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*out++ = table->table[0].control_value;
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	unsigned int i;
-	int ret;
+	u32 *out;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 2 * i, 2 * i + 1);
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 2 * i, 0);
 		i++;
 	}
 
@@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 0, 0);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 0608fad7f593..f612a02ddfbe 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	int ret;
+	u32 *out;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
@@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret) {
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
 	overlay->active = true;
@@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_I830(dev_priv))
 		i830_overlay_clock_gating(dev_priv, false);
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+	*out++ = overlay->flip_addr | OFC_UPDATE;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
-	u32 tmp;
-	int ret;
+	u32 tmp, *out;
 
 	WARN_ON(!overlay->active);
 
@@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 2);
-	if (ret) {
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*out++ = flip_addr;
+	intel_ring_advance(req, out);
 
 	intel_overlay_flip_prepare(overlay, vma);
 
@@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	u32 flip_addr = overlay->flip_addr;
-	int ret;
+	u32 *out, flip_addr = overlay->flip_addr;
 
 	WARN_ON(!overlay->active);
 
@@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret) {
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
-	ring = req->ring;
-
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*out++ = flip_addr;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
 	/* turn overlay off */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+	*out++ = flip_addr;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	intel_overlay_flip_prepare(overlay, NULL);
 
@@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
+	u32 *out;
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
-		struct intel_ring *ring;
 
 		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-		ret = intel_ring_begin(req, 2);
-		if (ret) {
+		out = intel_ring_begin(req, 2);
+		if (IS_ERR(out)) {
 			i915_add_request_no_flush(req);
-			return ret;
+			return PTR_ERR(out);
 		}
 
-		ring = req->ring;
-		intel_ring_emit(ring,
-				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+		*out++ = MI_NOOP;
+		intel_ring_advance(req, out);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d3d1e64f2498..bd330edabea7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -61,22 +61,20 @@ void intel_ring_update_space(struct intel_ring *ring)
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
 	cmd = MI_FLUSH;
 
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -84,9 +82,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
 	/*
 	 * read/write caches:
@@ -123,13 +119,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 			cmd |= MI_INVALIDATE_ISP;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -174,35 +170,33 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0); /* low dword */
-	intel_ring_emit(ring, 0); /* high dword */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(5);
+	*out++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0; /* low dword */
+	*out++ = 0; /* high dword */
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(5);
+	*out++ = PIPE_CONTROL_QW_WRITE;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -210,10 +204,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
+	u32 *out, flags = 0;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -247,15 +240,15 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = flags;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -263,20 +256,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring,
-			PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*out++ = 0;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -284,11 +274,9 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
-	int ret;
+	u32 *out, flags = 0;
 
 	/*
 	 * Ensure that any following seqno writes only happen when the render
@@ -332,15 +320,15 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -349,20 +337,19 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -659,8 +646,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *out;
 	int ret, i;
 
 	if (w->count == 0)
@@ -670,18 +657,18 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, (w->count * 2 + 2));
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, (w->count * 2 + 2));
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*out++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*out++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*out++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1427,24 +1414,21 @@ static int
 gen8_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_SEMAPHORE_WAIT |
-			MI_SEMAPHORE_GLOBAL_GTT |
-			MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(ring, signal->global_seqno);
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_advance(ring);
+	*out++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+		 MI_SEMAPHORE_SAD_GTE_SDD;
+	*out++ = signal->global_seqno;
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	intel_ring_advance(req, out);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
@@ -1461,28 +1445,27 @@ static int
 gen6_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
 	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
-	int ret;
+	u32 *out;
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, dw1 | wait_mbox);
+	*out++ = dw1 | wait_mbox;
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(ring, signal->global_seqno - 1);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = signal->global_seqno - 1;
+	*out++ = 0;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1583,16 +1566,15 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 static int
 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_FLUSH;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 	return 0;
 }
 
@@ -1658,20 +1640,17 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 length,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			MI_BATCH_GTT |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE_I965);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1685,59 +1664,56 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
-	int ret;
+	u32 *out, cs_offset = i915_ggtt_offset(req->engine->scratch);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(ring, cs_offset);
-	intel_ring_emit(ring, 0xdeadbeef);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+	*out++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+	*out++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+	*out++ = cs_offset;
+	*out++ = 0xdeadbeef;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(req, 6 + 2);
-		if (ret)
-			return ret;
+		out = intel_ring_begin(req, 6 + 2);
+		if (IS_ERR(out))
+			return PTR_ERR(out);
 
 		/* Blit the batch (which has now all relocs applied) to the
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(ring,
-				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(ring, cs_offset);
-		intel_ring_emit(ring, 4096);
-		intel_ring_emit(ring, offset);
-
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*out++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+		*out++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+		*out++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+		*out++ = cs_offset;
+		*out++ = 4096;
+		*out++ = offset;
+
+		*out++ = MI_FLUSH;
+		*out++ = MI_NOOP;
+		intel_ring_advance(req, out);
 
 		/* ... and execute it. */
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*out++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1747,17 +1723,16 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*out++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2171,7 +2146,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
 {
-	int ret;
+	u32 *out;
 
 	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
 
@@ -2184,9 +2159,9 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 	GEM_BUG_ON(!request->engine->buffer);
 	request->ring = request->engine->buffer;
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, 0);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	request->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
@@ -2241,7 +2216,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	return 0;
 }
 
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
 	struct intel_ring *ring = req->ring;
 	int remain_actual = ring->size - ring->tail;
@@ -2249,6 +2224,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
+	u32 *out;
 
 	total_bytes = bytes + req->reserved_space;
 
@@ -2275,7 +2251,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
-			return ret;
+			return ERR_PTR(ret);
 	}
 
 	if (unlikely(need_wrap)) {
@@ -2288,32 +2264,33 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		ring->space -= remain_actual;
 	}
 
+	out = ring->vaddr + ring->tail;
+	ring->tail += bytes;
 	ring->space -= bytes;
 	GEM_BUG_ON(ring->space < 0);
-	GEM_BUG_ONLY(ring->advance = ring->tail + bytes);
-	return 0;
+
+	return out;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	int num_dwords =
-		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
-	int ret;
+		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	u32 *out;
 
 	if (num_dwords == 0)
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(req, num_dwords);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, num_dwords);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	while (num_dwords--)
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2357,13 +2334,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2385,16 +2360,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*out++ = 0; /* upper addr */
+		*out++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = 0;
+		*out++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 	return 0;
 }
 
@@ -2403,23 +2378,22 @@ gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2429,22 +2403,20 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
 		  u64 offset, u32 len,
 		  unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
+	*out++ = MI_BATCH_BUFFER_START |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2454,20 +2426,18 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
+	*out++ = MI_BATCH_BUFFER_START |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE_I965);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2476,13 +2446,11 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 
 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2503,17 +2471,16 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*out++ = 0; /* upper addr */
+		*out++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = 0;
+		*out++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 896838ca502c..8303fa7abc05 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -144,7 +144,6 @@ struct intel_ring {
 
 	u32 head;
 	u32 tail;
-	GEM_BUG_ONLY_DECLARE(u32 advance);
 
 	int space;
 	int size;
@@ -495,21 +494,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
-int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
-{
-	*(uint32_t *)(ring->vaddr + ring->tail) = data;
-	ring->tail += 4;
-}
-
-static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
-{
-	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
 
-static inline void intel_ring_advance(struct intel_ring *ring)
+static inline void
+intel_ring_advance(struct drm_i915_gem_request *req, u32 *out)
 {
 	/* Dummy function.
 	 *
@@ -519,7 +509,7 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 	 * reserved for the command packet (i.e. the value passed to
 	 * intel_ring_begin()).
 	 */
-	GEM_BUG_ONLY_ON(ring->tail != ring->advance);
+	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != out);
 }
 
 static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH v5] drm/i915: Emit to ringbuffer directly
  2017-02-08 14:14   ` [PATCH v5] " Tvrtko Ursulin
@ 2017-02-08 17:49     ` Chris Wilson
  2017-02-08 18:04       ` [PATCH v6] " Tvrtko Ursulin
  0 siblings, 1 reply; 28+ messages in thread
From: Chris Wilson @ 2017-02-08 17:49 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: Intel-gfx

Just recording a couple of BUG_ON for posterity, would be nice additions
to this patch:

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 11d355d314e1..702c023cd502 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2264,6 +2264,7 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
                ring->space -= remain_actual;
        }
 
+       GEM_BUG_ON(ring->tail > ring->size - bytes);
        out = (u32 *)(ring->vaddr + ring->tail);
        ring->tail += bytes;
        ring->space -= bytes;

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 896838ca502c..117e1e735fc7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -526,6 +526,7 @@ static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
 {
        /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
        u32 offset = addr - ring->vaddr;
+       GEM_BUG_ON(offset > ring->size);
        return offset & (ring->size - 1);
 }


On Wed, Feb 08, 2017 at 02:14:34PM +0000, Tvrtko Ursulin wrote:
> @@ -862,10 +863,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
>  	 * GPU processing the request, we never over-estimate the
>  	 * position of the ring's HEAD.
>  	 */
> -	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
> -	GEM_BUG_ON(err);
>  	request->postfix = ring->tail;
> -	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
> +	out = intel_ring_begin(request, engine->emit_breadcrumb_sz);
> +	GEM_BUG_ON(IS_ERR(out));

A test for BAT :)
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* [PATCH v6] drm/i915: Emit to ringbuffer directly
  2017-02-08 17:49     ` Chris Wilson
@ 2017-02-08 18:04       ` Tvrtko Ursulin
  2017-02-08 18:21         ` Chris Wilson
  2017-02-09  8:00         ` [PATCH v6] " Joonas Lahtinen
  0 siblings, 2 replies; 28+ messages in thread
From: Tvrtko Ursulin @ 2017-02-08 18:04 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

This removes the usage of intel_ring_emit in favour of
directly writing to the ring buffer.

intel_ring_emit was preventing the compiler for optimising
fetch and increment of the current ring buffer pointer and
therefore generating very verbose code for every write.

It had no useful purpose since all ringbuffer operations
are started and ended with intel_ring_begin and
intel_ring_advance respectively, with no bail out in the
middle possible, so it is fine to increment the tail in
intel_ring_begin and let the code manage the pointer
itself.

Useless instruction removal amounts to approximately
two and half kilobytes of saved text on my build.

Not sure if this has any measurable performance
implications but executing a ton of useless instructions
on fast paths cannot be good.

Patch is not fully polished, but it compiles and runs
on Gen9 at least.

v2:
 * Change return from intel_ring_begin to error pointer by
   popular demand.
 * Move tail increment to intel_ring_advance to enable some
   error checking.

v3:
 * Move tail advance back into intel_ring_begin.
 * Rebase and tidy.

v4:
 * Complete rebase after a few months since v3.

v5:
 * Remove unecessary cast and fix !debug compile. (Chris Wilson)

v6:
 * Make intel_ring_offset take request as well.
 * Fix recording of request postfix plus a sprinkle of asserts.
   (Chris Wilson)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  76 ++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  40 ++-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  69 ++--
 drivers/gpu/drm/i915/i915_gem_request.c    |  10 +-
 drivers/gpu/drm/i915/intel_display.c       | 132 ++++----
 drivers/gpu/drm/i915/intel_lrc.c           | 162 +++++-----
 drivers/gpu/drm/i915/intel_mocs.c          |  51 ++-
 drivers/gpu/drm/i915/intel_overlay.c       |  79 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 492 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  26 +-
 10 files changed, 527 insertions(+), 610 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 680105421bb9..49070b7aad30 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -584,10 +584,9 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	enum intel_engine_id id;
-	u32 flags = hw_flags | MI_MM_SPACE_GTT;
+	u32 *out, flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
 		i915.semaphores ?
@@ -617,99 +616,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	if (INTEL_GEN(dev_priv) >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		*out++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*out++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring,
-						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring,
-						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*out++ = i915_mmio_reg_offset(
+					   RING_PSMI_CTL(signaller->mmio_base));
+				*out++ = _MASKED_BIT_ENABLE(
+					 GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 		}
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring,
-			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
+	*out++ = MI_NOOP;
+	*out++ = MI_SET_CONTEXT;
+	*out++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*out++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(ring, last_reg);
-				intel_ring_emit(ring,
-						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*out++ = i915_mmio_reg_offset(last_reg);
+				*out++ = _MASKED_BIT_DISABLE(
+					 GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(ring,
-					MI_STORE_REGISTER_MEM |
-					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(ring, last_reg);
-			intel_ring_emit(ring,
-					i915_ggtt_offset(engine->scratch));
-			intel_ring_emit(ring, MI_NOOP);
+			*out++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+			*out++ = i915_mmio_reg_offset(last_reg);
+			*out++ = i915_ggtt_offset(engine->scratch);
+			*out++ = MI_NOOP;
 		}
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		*out++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return ret;
 }
 
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
-	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ring *ring = req->ring;
-	int i, ret;
+	u32 *out, *remap_info = req->i915->l3_parity.remap_info[slice];
+	int i;
 
 	if (!remap_info)
 		return 0;
 
-	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/*
 	 * Note: We do not worry about the concurrent register cacheline hang
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-		intel_ring_emit(ring, remap_info[i]);
+		*out++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+		*out++ = remap_info[i];
 	}
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8b60a14af9e2..8ff0f186f558 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1336,25 +1336,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret, i;
+	u32 *out;
+	int i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(req, 4 * 3);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4 * 3);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
+		*out++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1415,7 +1415,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
-	u32 instp_mask;
+	u32 instp_mask, *out;
 	int ret;
 
 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1461,17 +1461,15 @@ execbuf_submit(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ring *ring = params->request->ring;
-
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		out = intel_ring_begin(params->request, 4);
+		if (IS_ERR(out))
+			return PTR_ERR(out);
+
+		*out++ = MI_NOOP;
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(INSTPM);
+		*out++ = instp_mask << 16 | instp_mode;
+		intel_ring_advance(params->request, out);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 22b3374096e8..a62a233ebc58 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -680,23 +680,22 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
-	int ret;
+	u32 *out;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(ring, upper_32_bits(addr));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(ring, lower_32_bits(addr));
-	intel_ring_advance(ring);
+	*out++ = MI_LOAD_REGISTER_IMM(1);
+	*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
+	*out++ = upper_32_bits(addr);
+	*out++ = MI_LOAD_REGISTER_IMM(1);
+	*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
+	*out++ = lower_32_bits(addr);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1723,8 +1722,8 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *out;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1732,17 +1731,17 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_LOAD_REGISTER_IMM(2);
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*out++ = PP_DIR_DCLV_2G;
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*out++ = get_pd_offset(ppgtt);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1750,8 +1749,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *out;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1759,17 +1758,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
+
+	*out++ = MI_LOAD_REGISTER_IMM(2);
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*out++ = PP_DIR_DCLV_2G;
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*out++ = get_pd_offset(ppgtt);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 6cf3b3e7c0fd..63d7f2fb5018 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -830,6 +830,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	struct intel_ring *ring = request->ring;
 	struct intel_timeline *timeline = request->timeline;
 	struct drm_i915_gem_request *prev;
+	u32 *out;
 	int err;
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -868,10 +869,11 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * GPU processing the request, we never over-estimate the
 	 * position of the ring's HEAD.
 	 */
-	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
-	GEM_BUG_ON(err);
-	request->postfix = ring->tail;
-	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+	out = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+	GEM_BUG_ON(IS_ERR(out));
+	GEM_BUG_ON(engine->emit_breadcrumb_sz * sizeof(u32) > ring->tail);
+	request->postfix = ring->tail -
+			   engine->emit_breadcrumb_sz * sizeof(u32);
 
 	/* Seal the request and mark it as pending execution. Note that
 	 * we may inspect this state, without holding any locks, during
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d0d042495dc7..285a356e9e42 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11762,14 +11762,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Can't queue multiple flips, so wait for the previous
 	 * one to finish before executing the next.
@@ -11778,13 +11776,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	*out++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*out++ = MI_NOOP;
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = 0; /* aux display base address, unused */
 
 	return 0;
 }
@@ -11796,26 +11793,24 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	if (intel_crtc->plane)
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*out++ = MI_NOOP;
+	*out++ = MI_DISPLAY_FLIP_I915 |
+		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = MI_NOOP;
 
 	return 0;
 }
@@ -11827,25 +11822,22 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* i965+ uses the linear or tiled offsets from the
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
-			intel_fb_modifier_to_tiling(fb->modifier));
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset |
+		 intel_fb_modifier_to_tiling(fb->modifier);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
 	 * untested on non-native modes, so ignore it for now.
@@ -11853,7 +11845,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*out++ = pf | pipesrc;
 
 	return 0;
 }
@@ -11865,21 +11857,17 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*out++ = intel_crtc->flip_work->gtt_offset;
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -11889,7 +11877,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*out++ = pf | pipesrc;
 
 	return 0;
 }
@@ -11902,9 +11890,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t plane_bit = 0;
+	u32 *out, plane_bit = 0;
 	int len, ret;
 
 	switch (intel_crtc->plane) {
@@ -11948,9 +11935,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Unmask the flip-done completion message. Note that the bspec says that
 	 * we should do this for both the BCS and RCS, and that we must not unmask
@@ -11962,31 +11949,28 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
 	if (req->engine->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-					  DERRMR_PIPEB_PRI_FLIP_DONE |
-					  DERRMR_PIPEC_PRI_FLIP_DONE));
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(DERRMR);
+		*out++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+			 DERRMR_PIPEB_PRI_FLIP_DONE |
+			 DERRMR_PIPEC_PRI_FLIP_DONE);
 		if (IS_GEN8(dev_priv))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
-					      MI_SRM_LRM_GLOBAL_GTT);
+			*out++ = MI_STORE_REGISTER_MEM_GEN8 |
+				 MI_SRM_LRM_GLOBAL_GTT;
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
-					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring,
-				i915_ggtt_offset(req->engine->scratch) + 256);
+			*out++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+		*out++ = i915_mmio_reg_offset(DERRMR);
+		*out++ = i915_ggtt_offset(req->engine->scratch) + 256;
 		if (IS_GEN8(dev_priv)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			*out++ = 0;
+			*out++ = MI_NOOP;
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	*out++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+	*out++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = MI_NOOP;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e071bcdc2833..1d863e8a55e0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -850,6 +850,7 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_context *ce = &request->ctx->engine[engine->id];
+	u32 *out;
 	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
@@ -874,9 +875,11 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 			goto err;
 	}
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
+	out = intel_ring_begin(request, 0);
+	if (IS_ERR(out)) {
+		ret = PTR_ERR(out);
 		goto err_unreserve;
+	}
 
 	if (!ce->initialised) {
 		ret = engine->init_context(request);
@@ -905,9 +908,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	int ret, i;
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *out;
+	int ret, i;
 
 	if (w->count == 0)
 		return 0;
@@ -916,18 +919,18 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, w->count * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, w->count * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*out++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*out++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*out++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1420,27 +1423,27 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
-	int i, ret;
+	u32 *out;
+	int i;
 
-	ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	*out++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
-		intel_ring_emit(ring, upper_32_bits(pd_daddr));
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
-		intel_ring_emit(ring, lower_32_bits(pd_daddr));
+		*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+		*out++ = upper_32_bits(pd_daddr);
+		*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+		*out++ = lower_32_bits(pd_daddr);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1449,8 +1452,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+	u32 *out;
 	int ret;
 
 	/* Don't rely in hw updating PDPs, specially in lite-restore.
@@ -1471,19 +1474,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
-			(ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1504,13 +1506,11 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
-	struct intel_ring *ring = request->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(request, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW + 1;
 
@@ -1527,13 +1527,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR |
-			MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ring, 0); /* upper addr */
-	intel_ring_emit(ring, 0); /* value */
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+	*out++ = 0; /* upper addr */
+	*out++ = 0; /* value */
+	intel_ring_advance(request, out);
 
 	return 0;
 }
@@ -1541,13 +1539,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 mode)
 {
-	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr =
 		i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
-	u32 flags = 0;
-	int ret;
+	u32 *out, flags = 0;
 	int len;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -1589,45 +1585,45 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 	if (dc_flush_wa)
 		len += 12;
 
-	ret = intel_ring_begin(request, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	if (vf_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = PIPE_CONTROL_DC_FLUSH_ENABLE;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = 0;
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = PIPE_CONTROL_CS_STALL;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(request, out);
 
 	return 0;
 }
@@ -1641,7 +1637,7 @@ static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
 {
 	*out++ = MI_NOOP;
 	*out++ = MI_NOOP;
-	request->wa_tail = intel_ring_offset(request->ring, out);
+	request->wa_tail = intel_ring_offset(request, out);
 }
 
 static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
@@ -1656,7 +1652,7 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
 	*out++ = request->global_seqno;
 	*out++ = MI_USER_INTERRUPT;
 	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	request->tail = intel_ring_offset(request, out);
 
 	gen8_emit_wa_tail(request, out);
 }
@@ -1684,7 +1680,7 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
 	*out++ = 0;
 	*out++ = MI_USER_INTERRUPT;
 	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	request->tail = intel_ring_offset(request, out);
 
 	gen8_emit_wa_tail(request, out);
 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 773e36253e7c..a7a28c496890 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
-	int ret;
+	u32 *out;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[index].control_value);
+		*out++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*out++ = table->table[index].control_value;
 	}
 
 	/*
@@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[0].control_value);
+		*out++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*out++ = table->table[0].control_value;
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	unsigned int i;
-	int ret;
+	u32 *out;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 2 * i, 2 * i + 1);
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 2 * i, 0);
 		i++;
 	}
 
@@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 0, 0);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 0608fad7f593..f612a02ddfbe 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	int ret;
+	u32 *out;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
@@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret) {
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
 	overlay->active = true;
@@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_I830(dev_priv))
 		i830_overlay_clock_gating(dev_priv, false);
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+	*out++ = overlay->flip_addr | OFC_UPDATE;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
-	u32 tmp;
-	int ret;
+	u32 tmp, *out;
 
 	WARN_ON(!overlay->active);
 
@@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 2);
-	if (ret) {
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*out++ = flip_addr;
+	intel_ring_advance(req, out);
 
 	intel_overlay_flip_prepare(overlay, vma);
 
@@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	u32 flip_addr = overlay->flip_addr;
-	int ret;
+	u32 *out, flip_addr = overlay->flip_addr;
 
 	WARN_ON(!overlay->active);
 
@@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret) {
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
-	ring = req->ring;
-
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*out++ = flip_addr;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
 	/* turn overlay off */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+	*out++ = flip_addr;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	intel_overlay_flip_prepare(overlay, NULL);
 
@@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
+	u32 *out;
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
-		struct intel_ring *ring;
 
 		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-		ret = intel_ring_begin(req, 2);
-		if (ret) {
+		out = intel_ring_begin(req, 2);
+		if (IS_ERR(out)) {
 			i915_add_request_no_flush(req);
-			return ret;
+			return PTR_ERR(out);
 		}
 
-		ring = req->ring;
-		intel_ring_emit(ring,
-				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+		*out++ = MI_NOOP;
+		intel_ring_advance(req, out);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d3d1e64f2498..f36d03f7ff6d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -61,22 +61,20 @@ void intel_ring_update_space(struct intel_ring *ring)
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
 	cmd = MI_FLUSH;
 
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -84,9 +82,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
 	/*
 	 * read/write caches:
@@ -123,13 +119,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 			cmd |= MI_INVALIDATE_ISP;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -174,35 +170,33 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0); /* low dword */
-	intel_ring_emit(ring, 0); /* high dword */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(5);
+	*out++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0; /* low dword */
+	*out++ = 0; /* high dword */
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(5);
+	*out++ = PIPE_CONTROL_QW_WRITE;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -210,10 +204,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
+	u32 *out, flags = 0;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -247,15 +240,15 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = flags;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -263,20 +256,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring,
-			PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*out++ = 0;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -284,11 +274,9 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
-	int ret;
+	u32 *out, flags = 0;
 
 	/*
 	 * Ensure that any following seqno writes only happen when the render
@@ -332,15 +320,15 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -349,20 +337,19 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -659,8 +646,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *out;
 	int ret, i;
 
 	if (w->count == 0)
@@ -670,18 +657,18 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, (w->count * 2 + 2));
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, (w->count * 2 + 2));
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*out++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*out++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*out++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1370,7 +1357,7 @@ static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
 	*out++ = req->global_seqno;
 	*out++ = MI_USER_INTERRUPT;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, out);
 }
 
 static const int i9xx_emit_breadcrumb_sz = 4;
@@ -1410,7 +1397,7 @@ static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
 	*out++ = MI_USER_INTERRUPT;
 	*out++ = MI_NOOP;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, out);
 }
 
 static const int gen8_render_emit_breadcrumb_sz = 8;
@@ -1427,24 +1414,21 @@ static int
 gen8_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_SEMAPHORE_WAIT |
-			MI_SEMAPHORE_GLOBAL_GTT |
-			MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(ring, signal->global_seqno);
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_advance(ring);
+	*out++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+		 MI_SEMAPHORE_SAD_GTE_SDD;
+	*out++ = signal->global_seqno;
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	intel_ring_advance(req, out);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
@@ -1461,28 +1445,27 @@ static int
 gen6_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
 	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
-	int ret;
+	u32 *out;
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, dw1 | wait_mbox);
+	*out++ = dw1 | wait_mbox;
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(ring, signal->global_seqno - 1);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = signal->global_seqno - 1;
+	*out++ = 0;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1583,16 +1566,15 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 static int
 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_FLUSH;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 	return 0;
 }
 
@@ -1658,20 +1640,17 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 length,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			MI_BATCH_GTT |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE_I965);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1685,59 +1664,56 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
-	int ret;
+	u32 *out, cs_offset = i915_ggtt_offset(req->engine->scratch);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(ring, cs_offset);
-	intel_ring_emit(ring, 0xdeadbeef);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+	*out++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+	*out++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+	*out++ = cs_offset;
+	*out++ = 0xdeadbeef;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(req, 6 + 2);
-		if (ret)
-			return ret;
+		out = intel_ring_begin(req, 6 + 2);
+		if (IS_ERR(out))
+			return PTR_ERR(out);
 
 		/* Blit the batch (which has now all relocs applied) to the
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(ring,
-				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(ring, cs_offset);
-		intel_ring_emit(ring, 4096);
-		intel_ring_emit(ring, offset);
-
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*out++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+		*out++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+		*out++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+		*out++ = cs_offset;
+		*out++ = 4096;
+		*out++ = offset;
+
+		*out++ = MI_FLUSH;
+		*out++ = MI_NOOP;
+		intel_ring_advance(req, out);
 
 		/* ... and execute it. */
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*out++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1747,17 +1723,16 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*out++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2171,7 +2146,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
 {
-	int ret;
+	u32 *out;
 
 	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
 
@@ -2184,9 +2159,9 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 	GEM_BUG_ON(!request->engine->buffer);
 	request->ring = request->engine->buffer;
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, 0);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	request->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
@@ -2241,7 +2216,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	return 0;
 }
 
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
 	struct intel_ring *ring = req->ring;
 	int remain_actual = ring->size - ring->tail;
@@ -2249,6 +2224,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
+	u32 *out;
 
 	total_bytes = bytes + req->reserved_space;
 
@@ -2275,7 +2251,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
-			return ret;
+			return ERR_PTR(ret);
 	}
 
 	if (unlikely(need_wrap)) {
@@ -2288,32 +2264,34 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		ring->space -= remain_actual;
 	}
 
+	GEM_BUG_ON(ring->tail > ring->size - bytes);
+	out = ring->vaddr + ring->tail;
+	ring->tail += bytes;
 	ring->space -= bytes;
 	GEM_BUG_ON(ring->space < 0);
-	GEM_BUG_ONLY(ring->advance = ring->tail + bytes);
-	return 0;
+
+	return out;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	int num_dwords =
-		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
-	int ret;
+		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	u32 *out;
 
 	if (num_dwords == 0)
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(req, num_dwords);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, num_dwords);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	while (num_dwords--)
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2357,13 +2335,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2385,16 +2361,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*out++ = 0; /* upper addr */
+		*out++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = 0;
+		*out++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 	return 0;
 }
 
@@ -2403,23 +2379,22 @@ gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2429,22 +2404,20 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
 		  u64 offset, u32 len,
 		  unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
+	*out++ = MI_BATCH_BUFFER_START |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2454,20 +2427,18 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
+	*out++ = MI_BATCH_BUFFER_START |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE_I965);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2476,13 +2447,11 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 
 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2503,17 +2472,16 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*out++ = 0; /* upper addr */
+		*out++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = 0;
+		*out++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 896838ca502c..77960db0dde5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -144,7 +144,6 @@ struct intel_ring {
 
 	u32 head;
 	u32 tail;
-	GEM_BUG_ONLY_DECLARE(u32 advance);
 
 	int space;
 	int size;
@@ -495,21 +494,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
-int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
-{
-	*(uint32_t *)(ring->vaddr + ring->tail) = data;
-	ring->tail += 4;
-}
-
-static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
-{
-	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
 
-static inline void intel_ring_advance(struct intel_ring *ring)
+static inline void
+intel_ring_advance(struct drm_i915_gem_request *req, u32 *out)
 {
 	/* Dummy function.
 	 *
@@ -519,14 +509,16 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 	 * reserved for the command packet (i.e. the value passed to
 	 * intel_ring_begin()).
 	 */
-	GEM_BUG_ONLY_ON(ring->tail != ring->advance);
+	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != out);
 }
 
-static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
+static inline
+u32 intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
 {
 	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
-	u32 offset = addr - ring->vaddr;
-	return offset & (ring->size - 1);
+	u32 offset = addr - req->ring->vaddr;
+	GEM_BUG_ON(offset > req->ring->size);
+	return offset & (req->ring->size - 1);
 }
 
 int __intel_ring_space(int head, int tail, int size);
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH v6] drm/i915: Emit to ringbuffer directly
  2017-02-08 18:04       ` [PATCH v6] " Tvrtko Ursulin
@ 2017-02-08 18:21         ` Chris Wilson
  2017-02-09  9:02           ` [PATCH v7] " Tvrtko Ursulin
  2017-02-09  8:00         ` [PATCH v6] " Joonas Lahtinen
  1 sibling, 1 reply; 28+ messages in thread
From: Chris Wilson @ 2017-02-08 18:21 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: Intel-gfx

On Wed, Feb 08, 2017 at 06:04:20PM +0000, Tvrtko Ursulin wrote:
> @@ -868,10 +869,11 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
>  	 * GPU processing the request, we never over-estimate the
>  	 * position of the ring's HEAD.
>  	 */
> -	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
> -	GEM_BUG_ON(err);
> -	request->postfix = ring->tail;
> -	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
> +	out = intel_ring_begin(request, engine->emit_breadcrumb_sz);
> +	GEM_BUG_ON(IS_ERR(out));
> +	GEM_BUG_ON(engine->emit_breadcrumb_sz * sizeof(u32) > ring->tail);
> +	request->postfix = ring->tail -
> +			   engine->emit_breadcrumb_sz * sizeof(u32);

request->postfix = intel_ring_offset(req, out); ?

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev7)
  2017-02-08 13:10 [PATCH] drm/i915: Emit to ringbuffer directly Tvrtko Ursulin
  2017-02-08 13:36 ` Chris Wilson
@ 2017-02-08 22:32 ` Patchwork
  2017-02-09 10:22 ` ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev8) Patchwork
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2017-02-08 22:32 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Emit to ringbuffer directly (rev7)
URL   : https://patchwork.freedesktop.org/series/12186/
State : failure

== Summary ==

  LD      net/ipv6/built-in.o
  LD [M]  sound/pci/hda/snd-hda-codec-generic.o
  LD      sound/pci/built-in.o
  LD      drivers/iommu/built-in.o
  LD      drivers/usb/storage/usb-storage.o
  LD      drivers/tty/serial/8250/8250.o
  LD      drivers/usb/storage/built-in.o
  LD      drivers/pci/pcie/aer/aerdriver.o
  LD      drivers/pci/pcie/aer/built-in.o
  LD      drivers/md/dm-mod.o
  LD      drivers/pci/pcie/built-in.o
  LD      drivers/video/fbdev/core/fb.o
  LD      sound/built-in.o
  LD      drivers/gpu/drm/drm.o
  LD      drivers/video/fbdev/core/built-in.o
  LD [M]  drivers/mmc/core/mmc_block.o
  LD      drivers/thermal/thermal_sys.o
  AR      lib/lib.a
  LD      drivers/mmc/built-in.o
  LD      drivers/thermal/built-in.o
  EXPORTS lib/lib-ksyms.o
  LD [M]  drivers/net/ethernet/broadcom/genet/genet.o
  LD      drivers/video/fbdev/built-in.o
  LD      lib/built-in.o
  LD [M]  drivers/usb/serial/usbserial.o
  LD      drivers/pci/built-in.o
  LD [M]  drivers/net/ethernet/intel/igbvf/igbvf.o
  LD      drivers/tty/serial/8250/8250_base.o
  LD      drivers/tty/serial/8250/built-in.o
  LD      drivers/spi/built-in.o
  LD      drivers/video/console/built-in.o
  LD      drivers/video/built-in.o
drivers/gpu/drm/i915/gvt/cmd_parser.c: In function ‘shadow_workload_ring_buffer’:
drivers/gpu/drm/i915/gvt/cmd_parser.c:2611:6: error: assignment makes integer from pointer without a cast [-Werror=int-conversion]
  ret = intel_ring_begin(workload->req, workload->rb_len / 4);
      ^
drivers/gpu/drm/i915/gvt/cmd_parser.c:2640:21: error: passing argument 1 of ‘intel_ring_advance’ from incompatible pointer type [-Werror=incompatible-pointer-types]
  intel_ring_advance(ring);
                     ^
In file included from drivers/gpu/drm/i915/intel_uc.h:29:0,
                 from drivers/gpu/drm/i915/i915_drv.h:60,
                 from drivers/gpu/drm/i915/gvt/cmd_parser.c:38:
drivers/gpu/drm/i915/intel_ringbuffer.h:502:1: note: expected ‘struct drm_i915_gem_request *’ but argument is of type ‘struct intel_ring *’
 intel_ring_advance(struct drm_i915_gem_request *req, u32 *out)
 ^
drivers/gpu/drm/i915/gvt/cmd_parser.c:2640:2: error: too few arguments to function ‘intel_ring_advance’
  intel_ring_advance(ring);
  ^
In file included from drivers/gpu/drm/i915/intel_uc.h:29:0,
                 from drivers/gpu/drm/i915/i915_drv.h:60,
                 from drivers/gpu/drm/i915/gvt/cmd_parser.c:38:
drivers/gpu/drm/i915/intel_ringbuffer.h:502:1: note: declared here
 intel_ring_advance(struct drm_i915_gem_request *req, u32 *out)
 ^
cc1: all warnings being treated as errors
scripts/Makefile.build:294: recipe for target 'drivers/gpu/drm/i915/gvt/cmd_parser.o' failed
make[4]: *** [drivers/gpu/drm/i915/gvt/cmd_parser.o] Error 1
make[4]: *** Waiting for unfinished jobs....
  LD      drivers/tty/serial/built-in.o
  LD      drivers/scsi/scsi_mod.o
  LD      drivers/usb/gadget/udc/udc-core.o
  LD      net/ipv4/built-in.o
  LD      drivers/usb/gadget/udc/built-in.o
  LD      drivers/usb/gadget/libcomposite.o
  LD      drivers/usb/gadget/built-in.o
  LD      drivers/scsi/sd_mod.o
  LD      drivers/scsi/built-in.o
  LD [M]  drivers/net/ethernet/intel/e1000/e1000.o
  LD      fs/btrfs/btrfs.o
  LD      fs/btrfs/built-in.o
  CC      arch/x86/kernel/cpu/capflags.o
  LD      arch/x86/kernel/cpu/built-in.o
  LD      arch/x86/kernel/built-in.o
  LD [M]  drivers/net/ethernet/intel/igb/igb.o
  LD      drivers/tty/vt/built-in.o
  LD      drivers/tty/built-in.o
  LD      arch/x86/built-in.o
  LD      drivers/usb/host/xhci-hcd.o
  LD      drivers/usb/core/usbcore.o
  LD      net/core/built-in.o
  LD      drivers/usb/core/built-in.o
  LD      net/built-in.o
  LD      fs/ext4/ext4.o
  LD      drivers/md/md-mod.o
  LD      drivers/md/built-in.o
  LD      fs/ext4/built-in.o
  LD      fs/built-in.o
  LD      drivers/usb/host/built-in.o
  LD      drivers/usb/built-in.o
  LD [M]  drivers/net/ethernet/intel/e1000e/e1000e.o
  LD      drivers/net/ethernet/built-in.o
  LD      drivers/net/built-in.o
scripts/Makefile.build:553: recipe for target 'drivers/gpu/drm/i915' failed
make[3]: *** [drivers/gpu/drm/i915] Error 2
scripts/Makefile.build:553: recipe for target 'drivers/gpu/drm' failed
make[2]: *** [drivers/gpu/drm] Error 2
scripts/Makefile.build:553: recipe for target 'drivers/gpu' failed
make[1]: *** [drivers/gpu] Error 2
Makefile:988: recipe for target 'drivers' failed
make: *** [drivers] Error 2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v6] drm/i915: Emit to ringbuffer directly
  2017-02-08 18:04       ` [PATCH v6] " Tvrtko Ursulin
  2017-02-08 18:21         ` Chris Wilson
@ 2017-02-09  8:00         ` Joonas Lahtinen
  2017-02-09  9:10           ` Chris Wilson
  1 sibling, 1 reply; 28+ messages in thread
From: Joonas Lahtinen @ 2017-02-09  8:00 UTC (permalink / raw)
  To: Tvrtko Ursulin, Intel-gfx

On ke, 2017-02-08 at 18:04 +0000, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> This removes the usage of intel_ring_emit in favour of
> directly writing to the ring buffer.
> 
> intel_ring_emit was preventing the compiler for optimising
> fetch and increment of the current ring buffer pointer and
> therefore generating very verbose code for every write.
> 
> It had no useful purpose since all ringbuffer operations
> are started and ended with intel_ring_begin and
> intel_ring_advance respectively, with no bail out in the
> middle possible, so it is fine to increment the tail in
> intel_ring_begin and let the code manage the pointer
> itself.
> 
> Useless instruction removal amounts to approximately
> two and half kilobytes of saved text on my build.
> 
> Not sure if this has any measurable performance
> implications but executing a ton of useless instructions
> on fast paths cannot be good.
> 
> Patch is not fully polished, but it compiles and runs
> on Gen9 at least.
> 
> v2:
>  * Change return from intel_ring_begin to error pointer by
>    popular demand.
>  * Move tail increment to intel_ring_advance to enable some
>    error checking.
> 
> v3:
>  * Move tail advance back into intel_ring_begin.
>  * Rebase and tidy.
> 
> v4:
>  * Complete rebase after a few months since v3.
> 
> v5:
>  * Remove unecessary cast and fix !debug compile. (Chris Wilson)
> 
> v6:
>  * Make intel_ring_offset take request as well.
>  * Fix recording of request postfix plus a sprinkle of asserts.
>    (Chris Wilson)
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>

<SNIP>

> @@ -617,99 +616,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>  	if (INTEL_GEN(dev_priv) >= 7)
>  		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
>  
> -	ret = intel_ring_begin(req, len);
> -	if (ret)
> -		return ret;
> +	out = intel_ring_begin(req, len);
> +	if (IS_ERR(out))
> +		return PTR_ERR(out);
>  
>  	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
>  	if (INTEL_GEN(dev_priv) >= 7) {
> -		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
> +		*out++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;

I expressed my concern in the previous iteration of this series months
ago, and here goes again; Lets try to keep the writes easily greppable.

So intel_ring_emit (or better name) could remain as a wrapper

#define (something something)_emit(x, y) *(x)++ = (y)

Or, we make the name distinctive, "*ring++" would work for that.

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v7] drm/i915: Emit to ringbuffer directly
  2017-02-08 18:21         ` Chris Wilson
@ 2017-02-09  9:02           ` Tvrtko Ursulin
  2017-02-09 13:35             ` Chris Wilson
  2017-02-13 12:53             ` [PATCH v8] " Tvrtko Ursulin
  0 siblings, 2 replies; 28+ messages in thread
From: Tvrtko Ursulin @ 2017-02-09  9:02 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

This removes the usage of intel_ring_emit in favour of
directly writing to the ring buffer.

intel_ring_emit was preventing the compiler for optimising
fetch and increment of the current ring buffer pointer and
therefore generating very verbose code for every write.

It had no useful purpose since all ringbuffer operations
are started and ended with intel_ring_begin and
intel_ring_advance respectively, with no bail out in the
middle possible, so it is fine to increment the tail in
intel_ring_begin and let the code manage the pointer
itself.

Useless instruction removal amounts to approximately
two and half kilobytes of saved text on my build.

Not sure if this has any measurable performance
implications but executing a ton of useless instructions
on fast paths cannot be good.

Patch is not fully polished, but it compiles and runs
on Gen9 at least.

v2:
 * Change return from intel_ring_begin to error pointer by
   popular demand.
 * Move tail increment to intel_ring_advance to enable some
   error checking.

v3:
 * Move tail advance back into intel_ring_begin.
 * Rebase and tidy.

v4:
 * Complete rebase after a few months since v3.

v5:
 * Remove unecessary cast and fix !debug compile. (Chris Wilson)

v6:
 * Make intel_ring_offset take request as well.
 * Fix recording of request postfix plus a sprinkle of asserts.
   (Chris Wilson)

v7:
 * Use intel_ring_offset to get the postfix. (Chris Wilson)
 * Convert GVT code as well.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
---

Zhi, could you please check that they way I handled the GVT
code makes sense.

I changed copy_gma_to_hva to return error or number of bytes
copied since that makes it easier for the new flavour of
intel_ring_begin and intel_ring_advance to work.

I was only able to compile test it.

Thanks,

Tvrtko
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c      |  36 +--
 drivers/gpu/drm/i915/i915_gem_context.c    |  76 ++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  40 ++-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  69 ++--
 drivers/gpu/drm/i915/i915_gem_request.c    |   8 +-
 drivers/gpu/drm/i915/intel_display.c       | 132 ++++----
 drivers/gpu/drm/i915/intel_lrc.c           | 162 +++++-----
 drivers/gpu/drm/i915/intel_mocs.c          |  51 ++-
 drivers/gpu/drm/i915/intel_overlay.c       |  79 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 492 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  26 +-
 11 files changed, 540 insertions(+), 631 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ee1db7447ce..44a66aa8a4c3 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1512,7 +1512,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
 		len += copy_len;
 		gma += copy_len;
 	}
-	return 0;
+	return len;
 }
 
 
@@ -1626,7 +1626,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
 			      gma, gma + bb_size,
 			      dst);
-	if (ret) {
+	if (ret < 0) {
 		gvt_err("fail to copy guest ring buffer\n");
 		goto unmap_src;
 	}
@@ -2590,11 +2590,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
-	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
-	struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
 	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
-	unsigned int copy_len = 0;
+	u32 *out;
 	int ret;
 
 	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2608,36 +2605,33 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 	gma_top = workload->rb_start + guest_rb_size;
 
 	/* allocate shadow ring buffer */
-	ret = intel_ring_begin(workload->req, workload->rb_len / 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* get shadow ring buffer va */
-	workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+	workload->shadow_ring_buffer_va = out;
 
 	/* head > tail --> copy head <-> top */
 	if (gma_head > gma_tail) {
 		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
-				gma_head, gma_top,
-				workload->shadow_ring_buffer_va);
-		if (ret) {
+				      gma_head, gma_top, out);
+		if (ret < 0) {
 			gvt_err("fail to copy guest ring buffer\n");
 			return ret;
 		}
-		copy_len = gma_top - gma_head;
+		out += ret / sizeof(u32);
 		gma_head = workload->rb_start;
 	}
 
 	/* copy head or start <-> tail */
-	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
-			gma_head, gma_tail,
-			workload->shadow_ring_buffer_va + copy_len);
-	if (ret) {
+	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, out);
+	if (ret < 0) {
 		gvt_err("fail to copy guest ring buffer\n");
 		return ret;
 	}
-	ring->tail += workload->rb_len;
-	intel_ring_advance(ring);
+	out += ret / sizeof(u32);
+	intel_ring_advance(workload->req, out);
 	return 0;
 }
 
@@ -2691,7 +2685,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 				wa_ctx->workload->vgpu->gtt.ggtt_mm,
 				guest_gma, guest_gma + ctx_size,
 				map);
-	if (ret) {
+	if (ret < 0) {
 		gvt_err("fail to copy guest indirect ctx\n");
 		goto unmap_src;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 680105421bb9..49070b7aad30 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -584,10 +584,9 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	enum intel_engine_id id;
-	u32 flags = hw_flags | MI_MM_SPACE_GTT;
+	u32 *out, flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
 		i915.semaphores ?
@@ -617,99 +616,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	if (INTEL_GEN(dev_priv) >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		*out++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*out++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring,
-						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring,
-						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*out++ = i915_mmio_reg_offset(
+					   RING_PSMI_CTL(signaller->mmio_base));
+				*out++ = _MASKED_BIT_ENABLE(
+					 GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 		}
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring,
-			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
+	*out++ = MI_NOOP;
+	*out++ = MI_SET_CONTEXT;
+	*out++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*out++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(ring, last_reg);
-				intel_ring_emit(ring,
-						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*out++ = i915_mmio_reg_offset(last_reg);
+				*out++ = _MASKED_BIT_DISABLE(
+					 GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(ring,
-					MI_STORE_REGISTER_MEM |
-					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(ring, last_reg);
-			intel_ring_emit(ring,
-					i915_ggtt_offset(engine->scratch));
-			intel_ring_emit(ring, MI_NOOP);
+			*out++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+			*out++ = i915_mmio_reg_offset(last_reg);
+			*out++ = i915_ggtt_offset(engine->scratch);
+			*out++ = MI_NOOP;
 		}
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		*out++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return ret;
 }
 
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
-	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ring *ring = req->ring;
-	int i, ret;
+	u32 *out, *remap_info = req->i915->l3_parity.remap_info[slice];
+	int i;
 
 	if (!remap_info)
 		return 0;
 
-	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/*
 	 * Note: We do not worry about the concurrent register cacheline hang
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-		intel_ring_emit(ring, remap_info[i]);
+		*out++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+		*out++ = remap_info[i];
 	}
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8b60a14af9e2..8ff0f186f558 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1336,25 +1336,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret, i;
+	u32 *out;
+	int i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(req, 4 * 3);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4 * 3);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
+		*out++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1415,7 +1415,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
-	u32 instp_mask;
+	u32 instp_mask, *out;
 	int ret;
 
 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1461,17 +1461,15 @@ execbuf_submit(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ring *ring = params->request->ring;
-
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		out = intel_ring_begin(params->request, 4);
+		if (IS_ERR(out))
+			return PTR_ERR(out);
+
+		*out++ = MI_NOOP;
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(INSTPM);
+		*out++ = instp_mask << 16 | instp_mode;
+		intel_ring_advance(params->request, out);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 22b3374096e8..a62a233ebc58 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -680,23 +680,22 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
-	int ret;
+	u32 *out;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(ring, upper_32_bits(addr));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(ring, lower_32_bits(addr));
-	intel_ring_advance(ring);
+	*out++ = MI_LOAD_REGISTER_IMM(1);
+	*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
+	*out++ = upper_32_bits(addr);
+	*out++ = MI_LOAD_REGISTER_IMM(1);
+	*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
+	*out++ = lower_32_bits(addr);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1723,8 +1722,8 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *out;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1732,17 +1731,17 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_LOAD_REGISTER_IMM(2);
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*out++ = PP_DIR_DCLV_2G;
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*out++ = get_pd_offset(ppgtt);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1750,8 +1749,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *out;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1759,17 +1758,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
+
+	*out++ = MI_LOAD_REGISTER_IMM(2);
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*out++ = PP_DIR_DCLV_2G;
+	*out++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*out++ = get_pd_offset(ppgtt);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 6cf3b3e7c0fd..844c83265cab 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -830,6 +830,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	struct intel_ring *ring = request->ring;
 	struct intel_timeline *timeline = request->timeline;
 	struct drm_i915_gem_request *prev;
+	u32 *out;
 	int err;
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -868,10 +869,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * GPU processing the request, we never over-estimate the
 	 * position of the ring's HEAD.
 	 */
-	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
-	GEM_BUG_ON(err);
-	request->postfix = ring->tail;
-	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+	out = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+	GEM_BUG_ON(IS_ERR(out));
+	request->postfix = intel_ring_offset(request, out);
 
 	/* Seal the request and mark it as pending execution. Note that
 	 * we may inspect this state, without holding any locks, during
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d0d042495dc7..285a356e9e42 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11762,14 +11762,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Can't queue multiple flips, so wait for the previous
 	 * one to finish before executing the next.
@@ -11778,13 +11776,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	*out++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*out++ = MI_NOOP;
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = 0; /* aux display base address, unused */
 
 	return 0;
 }
@@ -11796,26 +11793,24 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	if (intel_crtc->plane)
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*out++ = MI_NOOP;
+	*out++ = MI_DISPLAY_FLIP_I915 |
+		 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = MI_NOOP;
 
 	return 0;
 }
@@ -11827,25 +11822,22 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* i965+ uses the linear or tiled offsets from the
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
-			intel_fb_modifier_to_tiling(fb->modifier));
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0];
+	*out++ = intel_crtc->flip_work->gtt_offset |
+		 intel_fb_modifier_to_tiling(fb->modifier);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
 	 * untested on non-native modes, so ignore it for now.
@@ -11853,7 +11845,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*out++ = pf | pipesrc;
 
 	return 0;
 }
@@ -11865,21 +11857,17 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	*out++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*out++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*out++ = intel_crtc->flip_work->gtt_offset;
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -11889,7 +11877,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*out++ = pf | pipesrc;
 
 	return 0;
 }
@@ -11902,9 +11890,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t plane_bit = 0;
+	u32 *out, plane_bit = 0;
 	int len, ret;
 
 	switch (intel_crtc->plane) {
@@ -11948,9 +11935,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Unmask the flip-done completion message. Note that the bspec says that
 	 * we should do this for both the BCS and RCS, and that we must not unmask
@@ -11962,31 +11949,28 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
 	if (req->engine->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-					  DERRMR_PIPEB_PRI_FLIP_DONE |
-					  DERRMR_PIPEC_PRI_FLIP_DONE));
+		*out++ = MI_LOAD_REGISTER_IMM(1);
+		*out++ = i915_mmio_reg_offset(DERRMR);
+		*out++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+			 DERRMR_PIPEB_PRI_FLIP_DONE |
+			 DERRMR_PIPEC_PRI_FLIP_DONE);
 		if (IS_GEN8(dev_priv))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
-					      MI_SRM_LRM_GLOBAL_GTT);
+			*out++ = MI_STORE_REGISTER_MEM_GEN8 |
+				 MI_SRM_LRM_GLOBAL_GTT;
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
-					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring,
-				i915_ggtt_offset(req->engine->scratch) + 256);
+			*out++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+		*out++ = i915_mmio_reg_offset(DERRMR);
+		*out++ = i915_ggtt_offset(req->engine->scratch) + 256;
 		if (IS_GEN8(dev_priv)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			*out++ = 0;
+			*out++ = MI_NOOP;
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	*out++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+	*out++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*out++ = intel_crtc->flip_work->gtt_offset;
+	*out++ = MI_NOOP;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e071bcdc2833..1d863e8a55e0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -850,6 +850,7 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_context *ce = &request->ctx->engine[engine->id];
+	u32 *out;
 	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
@@ -874,9 +875,11 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 			goto err;
 	}
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
+	out = intel_ring_begin(request, 0);
+	if (IS_ERR(out)) {
+		ret = PTR_ERR(out);
 		goto err_unreserve;
+	}
 
 	if (!ce->initialised) {
 		ret = engine->init_context(request);
@@ -905,9 +908,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	int ret, i;
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *out;
+	int ret, i;
 
 	if (w->count == 0)
 		return 0;
@@ -916,18 +919,18 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, w->count * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, w->count * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*out++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*out++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*out++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1420,27 +1423,27 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
-	int i, ret;
+	u32 *out;
+	int i;
 
-	ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	*out++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
-		intel_ring_emit(ring, upper_32_bits(pd_daddr));
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
-		intel_ring_emit(ring, lower_32_bits(pd_daddr));
+		*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+		*out++ = upper_32_bits(pd_daddr);
+		*out++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+		*out++ = lower_32_bits(pd_daddr);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1449,8 +1452,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+	u32 *out;
 	int ret;
 
 	/* Don't rely in hw updating PDPs, specially in lite-restore.
@@ -1471,19 +1474,18 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
-			(ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1504,13 +1506,11 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
-	struct intel_ring *ring = request->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(request, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW + 1;
 
@@ -1527,13 +1527,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR |
-			MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ring, 0); /* upper addr */
-	intel_ring_emit(ring, 0); /* value */
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+	*out++ = 0; /* upper addr */
+	*out++ = 0; /* value */
+	intel_ring_advance(request, out);
 
 	return 0;
 }
@@ -1541,13 +1539,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 mode)
 {
-	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr =
 		i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
-	u32 flags = 0;
-	int ret;
+	u32 *out, flags = 0;
 	int len;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -1589,45 +1585,45 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 	if (dc_flush_wa)
 		len += 12;
 
-	ret = intel_ring_begin(request, len);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, len);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	if (vf_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = PIPE_CONTROL_DC_FLUSH_ENABLE;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = 0;
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*out++ = GFX_OP_PIPE_CONTROL(6);
+		*out++ = PIPE_CONTROL_CS_STALL;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
+		*out++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(request, out);
 
 	return 0;
 }
@@ -1641,7 +1637,7 @@ static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
 {
 	*out++ = MI_NOOP;
 	*out++ = MI_NOOP;
-	request->wa_tail = intel_ring_offset(request->ring, out);
+	request->wa_tail = intel_ring_offset(request, out);
 }
 
 static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
@@ -1656,7 +1652,7 @@ static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
 	*out++ = request->global_seqno;
 	*out++ = MI_USER_INTERRUPT;
 	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	request->tail = intel_ring_offset(request, out);
 
 	gen8_emit_wa_tail(request, out);
 }
@@ -1684,7 +1680,7 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
 	*out++ = 0;
 	*out++ = MI_USER_INTERRUPT;
 	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	request->tail = intel_ring_offset(request, out);
 
 	gen8_emit_wa_tail(request, out);
 }
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 773e36253e7c..a7a28c496890 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
-	int ret;
+	u32 *out;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[index].control_value);
+		*out++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*out++ = table->table[index].control_value;
 	}
 
 	/*
@@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[0].control_value);
+		*out++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*out++ = table->table[0].control_value;
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	unsigned int i;
-	int ret;
+	u32 *out;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+	*out++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 2 * i, 2 * i + 1);
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 2 * i, 0);
 		i++;
 	}
 
@@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
+		*out++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*out++ = l3cc_combine(table, 0, 0);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 0608fad7f593..f612a02ddfbe 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	int ret;
+	u32 *out;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
@@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret) {
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
 	overlay->active = true;
@@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_I830(dev_priv))
 		i830_overlay_clock_gating(dev_priv, false);
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+	*out++ = overlay->flip_addr | OFC_UPDATE;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
-	u32 tmp;
-	int ret;
+	u32 tmp, *out;
 
 	WARN_ON(!overlay->active);
 
@@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 2);
-	if (ret) {
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*out++ = flip_addr;
+	intel_ring_advance(req, out);
 
 	intel_overlay_flip_prepare(overlay, vma);
 
@@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	u32 flip_addr = overlay->flip_addr;
-	int ret;
+	u32 *out, flip_addr = overlay->flip_addr;
 
 	WARN_ON(!overlay->active);
 
@@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret) {
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(out);
 	}
 
-	ring = req->ring;
-
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*out++ = flip_addr;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
 	/* turn overlay off */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*out++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+	*out++ = flip_addr;
+	*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	intel_overlay_flip_prepare(overlay, NULL);
 
@@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
+	u32 *out;
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
-		struct intel_ring *ring;
 
 		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-		ret = intel_ring_begin(req, 2);
-		if (ret) {
+		out = intel_ring_begin(req, 2);
+		if (IS_ERR(out)) {
 			i915_add_request_no_flush(req);
-			return ret;
+			return PTR_ERR(out);
 		}
 
-		ring = req->ring;
-		intel_ring_emit(ring,
-				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*out++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+		*out++ = MI_NOOP;
+		intel_ring_advance(req, out);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d3d1e64f2498..f36d03f7ff6d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -61,22 +61,20 @@ void intel_ring_update_space(struct intel_ring *ring)
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
 	cmd = MI_FLUSH;
 
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -84,9 +82,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *out;
 
 	/*
 	 * read/write caches:
@@ -123,13 +119,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 			cmd |= MI_INVALIDATE_ISP;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = cmd;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -174,35 +170,33 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0); /* low dword */
-	intel_ring_emit(ring, 0); /* high dword */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(5);
+	*out++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0; /* low dword */
+	*out++ = 0; /* high dword */
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(5);
+	*out++ = PIPE_CONTROL_QW_WRITE;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -210,10 +204,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
+	u32 *out, flags = 0;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -247,15 +240,15 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = flags;
+	*out++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -263,20 +256,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring,
-			PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*out++ = 0;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -284,11 +274,9 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
-	int ret;
+	u32 *out, flags = 0;
 
 	/*
 	 * Ensure that any following seqno writes only happen when the render
@@ -332,15 +320,15 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(4);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -349,20 +337,19 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*out++ = GFX_OP_PIPE_CONTROL(6);
+	*out++ = flags;
+	*out++ = scratch_addr;
+	*out++ = 0;
+	*out++ = 0;
+	*out++ = 0;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -659,8 +646,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *out;
 	int ret, i;
 
 	if (w->count == 0)
@@ -670,18 +657,18 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, (w->count * 2 + 2));
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, (w->count * 2 + 2));
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*out++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*out++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*out++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1370,7 +1357,7 @@ static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
 	*out++ = req->global_seqno;
 	*out++ = MI_USER_INTERRUPT;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, out);
 }
 
 static const int i9xx_emit_breadcrumb_sz = 4;
@@ -1410,7 +1397,7 @@ static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
 	*out++ = MI_USER_INTERRUPT;
 	*out++ = MI_NOOP;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, out);
 }
 
 static const int gen8_render_emit_breadcrumb_sz = 8;
@@ -1427,24 +1414,21 @@ static int
 gen8_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_SEMAPHORE_WAIT |
-			MI_SEMAPHORE_GLOBAL_GTT |
-			MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(ring, signal->global_seqno);
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_advance(ring);
+	*out++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+		 MI_SEMAPHORE_SAD_GTE_SDD;
+	*out++ = signal->global_seqno;
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	intel_ring_advance(req, out);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
@@ -1461,28 +1445,27 @@ static int
 gen6_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
 	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
-	int ret;
+	u32 *out;
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, dw1 | wait_mbox);
+	*out++ = dw1 | wait_mbox;
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(ring, signal->global_seqno - 1);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = signal->global_seqno - 1;
+	*out++ = 0;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1583,16 +1566,15 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 static int
 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_FLUSH;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 	return 0;
 }
 
@@ -1658,20 +1640,17 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 length,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			MI_BATCH_GTT |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE_I965);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1685,59 +1664,56 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
-	int ret;
+	u32 *out, cs_offset = i915_ggtt_offset(req->engine->scratch);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 6);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(ring, cs_offset);
-	intel_ring_emit(ring, 0xdeadbeef);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+	*out++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+	*out++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+	*out++ = cs_offset;
+	*out++ = 0xdeadbeef;
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(req, 6 + 2);
-		if (ret)
-			return ret;
+		out = intel_ring_begin(req, 6 + 2);
+		if (IS_ERR(out))
+			return PTR_ERR(out);
 
 		/* Blit the batch (which has now all relocs applied) to the
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(ring,
-				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(ring, cs_offset);
-		intel_ring_emit(ring, 4096);
-		intel_ring_emit(ring, offset);
-
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*out++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+		*out++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+		*out++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+		*out++ = cs_offset;
+		*out++ = 4096;
+		*out++ = offset;
+
+		*out++ = MI_FLUSH;
+		*out++ = MI_NOOP;
+		intel_ring_advance(req, out);
 
 		/* ... and execute it. */
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*out++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -1747,17 +1723,16 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*out++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2171,7 +2146,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
 {
-	int ret;
+	u32 *out;
 
 	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
 
@@ -2184,9 +2159,9 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 	GEM_BUG_ON(!request->engine->buffer);
 	request->ring = request->engine->buffer;
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(request, 0);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	request->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
@@ -2241,7 +2216,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	return 0;
 }
 
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
 	struct intel_ring *ring = req->ring;
 	int remain_actual = ring->size - ring->tail;
@@ -2249,6 +2224,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
+	u32 *out;
 
 	total_bytes = bytes + req->reserved_space;
 
@@ -2275,7 +2251,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
-			return ret;
+			return ERR_PTR(ret);
 	}
 
 	if (unlikely(need_wrap)) {
@@ -2288,32 +2264,34 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		ring->space -= remain_actual;
 	}
 
+	GEM_BUG_ON(ring->tail > ring->size - bytes);
+	out = ring->vaddr + ring->tail;
+	ring->tail += bytes;
 	ring->space -= bytes;
 	GEM_BUG_ON(ring->space < 0);
-	GEM_BUG_ONLY(ring->advance = ring->tail + bytes);
-	return 0;
+
+	return out;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	int num_dwords =
-		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
-	int ret;
+		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	u32 *out;
 
 	if (num_dwords == 0)
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(req, num_dwords);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, num_dwords);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	while (num_dwords--)
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2357,13 +2335,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2385,16 +2361,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*out++ = 0; /* upper addr */
+		*out++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = 0;
+		*out++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 	return 0;
 }
 
@@ -2403,23 +2379,22 @@ gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*out++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
+	*out++ = lower_32_bits(offset);
+	*out++ = upper_32_bits(offset);
+	*out++ = MI_NOOP;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2429,22 +2404,20 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
 		  u64 offset, u32 len,
 		  unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
+	*out++ = MI_BATCH_BUFFER_START |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+		 (dispatch_flags & I915_DISPATCH_RS ?
+		 MI_BATCH_RESOURCE_STREAMER : 0);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2454,20 +2427,18 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *out;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 2);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
+	*out++ = MI_BATCH_BUFFER_START |
+		 (dispatch_flags & I915_DISPATCH_SECURE ?
+		 0 : MI_BATCH_NON_SECURE_I965);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*out++ = offset;
+	intel_ring_advance(req, out);
 
 	return 0;
 }
@@ -2476,13 +2447,11 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 
 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *out;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	out = intel_ring_begin(req, 4);
+	if (IS_ERR(out))
+		return PTR_ERR(out);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2503,17 +2472,16 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*out++ = cmd;
+	*out++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*out++ = 0; /* upper addr */
+		*out++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*out++ = 0;
+		*out++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, out);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 896838ca502c..77960db0dde5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -144,7 +144,6 @@ struct intel_ring {
 
 	u32 head;
 	u32 tail;
-	GEM_BUG_ONLY_DECLARE(u32 advance);
 
 	int space;
 	int size;
@@ -495,21 +494,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
-int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
-{
-	*(uint32_t *)(ring->vaddr + ring->tail) = data;
-	ring->tail += 4;
-}
-
-static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
-{
-	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
 
-static inline void intel_ring_advance(struct intel_ring *ring)
+static inline void
+intel_ring_advance(struct drm_i915_gem_request *req, u32 *out)
 {
 	/* Dummy function.
 	 *
@@ -519,14 +509,16 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 	 * reserved for the command packet (i.e. the value passed to
 	 * intel_ring_begin()).
 	 */
-	GEM_BUG_ONLY_ON(ring->tail != ring->advance);
+	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != out);
 }
 
-static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
+static inline
+u32 intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
 {
 	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
-	u32 offset = addr - ring->vaddr;
-	return offset & (ring->size - 1);
+	u32 offset = addr - req->ring->vaddr;
+	GEM_BUG_ON(offset > req->ring->size);
+	return offset & (req->ring->size - 1);
 }
 
 int __intel_ring_space(int head, int tail, int size);
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH v6] drm/i915: Emit to ringbuffer directly
  2017-02-09  8:00         ` [PATCH v6] " Joonas Lahtinen
@ 2017-02-09  9:10           ` Chris Wilson
  2017-02-09 10:37             ` Mika Kuoppala
  0 siblings, 1 reply; 28+ messages in thread
From: Chris Wilson @ 2017-02-09  9:10 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: Intel-gfx

On Thu, Feb 09, 2017 at 10:00:35AM +0200, Joonas Lahtinen wrote:
> On ke, 2017-02-08 at 18:04 +0000, Tvrtko Ursulin wrote:
> > From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > 
> > This removes the usage of intel_ring_emit in favour of
> > directly writing to the ring buffer.
> > 
> > intel_ring_emit was preventing the compiler for optimising
> > fetch and increment of the current ring buffer pointer and
> > therefore generating very verbose code for every write.
> > 
> > It had no useful purpose since all ringbuffer operations
> > are started and ended with intel_ring_begin and
> > intel_ring_advance respectively, with no bail out in the
> > middle possible, so it is fine to increment the tail in
> > intel_ring_begin and let the code manage the pointer
> > itself.
> > 
> > Useless instruction removal amounts to approximately
> > two and half kilobytes of saved text on my build.
> > 
> > Not sure if this has any measurable performance
> > implications but executing a ton of useless instructions
> > on fast paths cannot be good.
> > 
> > Patch is not fully polished, but it compiles and runs
> > on Gen9 at least.
> > 
> > v2:
> >  * Change return from intel_ring_begin to error pointer by
> >    popular demand.
> >  * Move tail increment to intel_ring_advance to enable some
> >    error checking.
> > 
> > v3:
> >  * Move tail advance back into intel_ring_begin.
> >  * Rebase and tidy.
> > 
> > v4:
> >  * Complete rebase after a few months since v3.
> > 
> > v5:
> >  * Remove unecessary cast and fix !debug compile. (Chris Wilson)
> > 
> > v6:
> >  * Make intel_ring_offset take request as well.
> >  * Fix recording of request postfix plus a sprinkle of asserts.
> >    (Chris Wilson)
> > 
> > Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > Cc: Chris Wilson <chris@chris-wilson.co.uk>
> 
> <SNIP>
> 
> > @@ -617,99 +616,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
> >  	if (INTEL_GEN(dev_priv) >= 7)
> >  		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
> >  
> > -	ret = intel_ring_begin(req, len);
> > -	if (ret)
> > -		return ret;
> > +	out = intel_ring_begin(req, len);
> > +	if (IS_ERR(out))
> > +		return PTR_ERR(out);
> >  
> >  	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
> >  	if (INTEL_GEN(dev_priv) >= 7) {
> > -		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
> > +		*out++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
> 
> I expressed my concern in the previous iteration of this series months
> ago, and here goes again; Lets try to keep the writes easily greppable.
> 
> So intel_ring_emit (or better name) could remain as a wrapper
> 
> #define (something something)_emit(x, y) *(x)++ = (y)

My concern with intel_ring_emit() remaining is that we are no longer
operating on the ring. The pointer to use for emitting is retrieved from
the request, so I think pointer = i915_gem_request_emit(rq, num_dwords)
is what we want in the near future.

I suppose if that was

	ring = i915_gem_request_emit(rq, num_dwords);
	intel_ring_emit(ring, blah)
	intel_ring_advance(rq, ring); /* this still needs polish */

It'll just about do, problem being that intel_ring_foo() is not
operating on an struct intel_ring. :|

s/intel_ring_emit/ring_emit/ ?
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev8)
  2017-02-08 13:10 [PATCH] drm/i915: Emit to ringbuffer directly Tvrtko Ursulin
  2017-02-08 13:36 ` Chris Wilson
  2017-02-08 22:32 ` ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev7) Patchwork
@ 2017-02-09 10:22 ` Patchwork
  2017-02-09 21:52 ` Patchwork
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2017-02-09 10:22 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Emit to ringbuffer directly (rev8)
URL   : https://patchwork.freedesktop.org/series/12186/
State : failure

== Summary ==

Series 12186v8 drm/i915: Emit to ringbuffer directly
https://patchwork.freedesktop.org/api/1.0/series/12186/revisions/8/mbox/

Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-a:
                pass       -> INCOMPLETE (fi-snb-2600)

fi-bdw-5557u     total:252  pass:238  dwarn:0   dfail:0   fail:0   skip:14 
fi-bsw-n3050     total:252  pass:213  dwarn:0   dfail:0   fail:0   skip:39 
fi-bxt-j4205     total:252  pass:230  dwarn:0   dfail:0   fail:0   skip:22 
fi-bxt-t5700     total:83   pass:70   dwarn:0   dfail:0   fail:0   skip:12 
fi-byt-j1900     total:252  pass:225  dwarn:0   dfail:0   fail:0   skip:27 
fi-byt-n2820     total:252  pass:221  dwarn:0   dfail:0   fail:0   skip:31 
fi-hsw-4770      total:252  pass:233  dwarn:0   dfail:0   fail:0   skip:19 
fi-hsw-4770r     total:252  pass:233  dwarn:0   dfail:0   fail:0   skip:19 
fi-ilk-650       total:252  pass:199  dwarn:0   dfail:0   fail:0   skip:53 
fi-ivb-3520m     total:252  pass:231  dwarn:0   dfail:0   fail:0   skip:21 
fi-ivb-3770      total:252  pass:231  dwarn:0   dfail:0   fail:0   skip:21 
fi-kbl-7500u     total:252  pass:229  dwarn:0   dfail:0   fail:2   skip:21 
fi-skl-6260u     total:252  pass:239  dwarn:0   dfail:0   fail:0   skip:13 
fi-skl-6700hq    total:252  pass:232  dwarn:0   dfail:0   fail:0   skip:20 
fi-skl-6700k     total:252  pass:227  dwarn:4   dfail:0   fail:0   skip:21 
fi-skl-6770hq    total:252  pass:239  dwarn:0   dfail:0   fail:0   skip:13 
fi-snb-2520m     total:252  pass:221  dwarn:0   dfail:0   fail:0   skip:31 
fi-snb-2600      total:209  pass:182  dwarn:0   dfail:0   fail:0   skip:26 

d55644c47e7068dcebf82c60332ea6d268897758 drm-tip: 2017y-02m-09d-08h-22m-32s UTC integration manifest
b26b969 drm/i915: Emit to ringbuffer directly

== Logs ==

For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_3750/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v6] drm/i915: Emit to ringbuffer directly
  2017-02-09  9:10           ` Chris Wilson
@ 2017-02-09 10:37             ` Mika Kuoppala
  2017-02-09 11:49               ` Tvrtko Ursulin
  0 siblings, 1 reply; 28+ messages in thread
From: Mika Kuoppala @ 2017-02-09 10:37 UTC (permalink / raw)
  To: Chris Wilson, Joonas Lahtinen; +Cc: Intel-gfx

Chris Wilson <chris@chris-wilson.co.uk> writes:

> On Thu, Feb 09, 2017 at 10:00:35AM +0200, Joonas Lahtinen wrote:
>> On ke, 2017-02-08 at 18:04 +0000, Tvrtko Ursulin wrote:
>> > From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> > 
>> > This removes the usage of intel_ring_emit in favour of
>> > directly writing to the ring buffer.
>> > 
>> > intel_ring_emit was preventing the compiler for optimising
>> > fetch and increment of the current ring buffer pointer and
>> > therefore generating very verbose code for every write.
>> > 
>> > It had no useful purpose since all ringbuffer operations
>> > are started and ended with intel_ring_begin and
>> > intel_ring_advance respectively, with no bail out in the
>> > middle possible, so it is fine to increment the tail in
>> > intel_ring_begin and let the code manage the pointer
>> > itself.
>> > 
>> > Useless instruction removal amounts to approximately
>> > two and half kilobytes of saved text on my build.
>> > 
>> > Not sure if this has any measurable performance
>> > implications but executing a ton of useless instructions
>> > on fast paths cannot be good.
>> > 
>> > Patch is not fully polished, but it compiles and runs
>> > on Gen9 at least.
>> > 
>> > v2:
>> >  * Change return from intel_ring_begin to error pointer by
>> >    popular demand.
>> >  * Move tail increment to intel_ring_advance to enable some
>> >    error checking.
>> > 
>> > v3:
>> >  * Move tail advance back into intel_ring_begin.
>> >  * Rebase and tidy.
>> > 
>> > v4:
>> >  * Complete rebase after a few months since v3.
>> > 
>> > v5:
>> >  * Remove unecessary cast and fix !debug compile. (Chris Wilson)
>> > 
>> > v6:
>> >  * Make intel_ring_offset take request as well.
>> >  * Fix recording of request postfix plus a sprinkle of asserts.
>> >    (Chris Wilson)
>> > 
>> > Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> > Cc: Chris Wilson <chris@chris-wilson.co.uk>
>> 
>> <SNIP>
>> 
>> > @@ -617,99 +616,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>> >  	if (INTEL_GEN(dev_priv) >= 7)
>> >  		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
>> >  
>> > -	ret = intel_ring_begin(req, len);
>> > -	if (ret)
>> > -		return ret;
>> > +	out = intel_ring_begin(req, len);
>> > +	if (IS_ERR(out))
>> > +		return PTR_ERR(out);
>> >  
>> >  	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
>> >  	if (INTEL_GEN(dev_priv) >= 7) {
>> > -		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
>> > +		*out++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
>> 
>> I expressed my concern in the previous iteration of this series months
>> ago, and here goes again; Lets try to keep the writes easily greppable.
>> 
>> So intel_ring_emit (or better name) could remain as a wrapper
>> 
>> #define (something something)_emit(x, y) *(x)++ = (y)
>
> My concern with intel_ring_emit() remaining is that we are no longer
> operating on the ring. The pointer to use for emitting is retrieved from
> the request, so I think pointer = i915_gem_request_emit(rq, num_dwords)
> is what we want in the near future.
>
> I suppose if that was
>
> 	ring = i915_gem_request_emit(rq, num_dwords);
> 	intel_ring_emit(ring, blah)
> 	intel_ring_advance(rq, ring); /* this still needs polish */
>

Going through request feels right. For ring_emit
we could use shorter:

cs_emit and cs_advance.

They are rings but for users at this level the distinction
feels unimportant.

Just my few bikesheds.

-Mika

> It'll just about do, problem being that intel_ring_foo() is not
> operating on an struct intel_ring. :|
>
> s/intel_ring_emit/ring_emit/ ?
> -Chris
>
> -- 
> Chris Wilson, Intel Open Source Technology Centre
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v6] drm/i915: Emit to ringbuffer directly
  2017-02-09 10:37             ` Mika Kuoppala
@ 2017-02-09 11:49               ` Tvrtko Ursulin
  2017-02-09 13:29                 ` Chris Wilson
  0 siblings, 1 reply; 28+ messages in thread
From: Tvrtko Ursulin @ 2017-02-09 11:49 UTC (permalink / raw)
  To: Mika Kuoppala, Chris Wilson, Joonas Lahtinen; +Cc: Intel-gfx


On 09/02/2017 10:37, Mika Kuoppala wrote:
> Chris Wilson <chris@chris-wilson.co.uk> writes:
>
>> On Thu, Feb 09, 2017 at 10:00:35AM +0200, Joonas Lahtinen wrote:
>>> On ke, 2017-02-08 at 18:04 +0000, Tvrtko Ursulin wrote:
>>>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>>>
>>>> This removes the usage of intel_ring_emit in favour of
>>>> directly writing to the ring buffer.
>>>>
>>>> intel_ring_emit was preventing the compiler for optimising
>>>> fetch and increment of the current ring buffer pointer and
>>>> therefore generating very verbose code for every write.
>>>>
>>>> It had no useful purpose since all ringbuffer operations
>>>> are started and ended with intel_ring_begin and
>>>> intel_ring_advance respectively, with no bail out in the
>>>> middle possible, so it is fine to increment the tail in
>>>> intel_ring_begin and let the code manage the pointer
>>>> itself.
>>>>
>>>> Useless instruction removal amounts to approximately
>>>> two and half kilobytes of saved text on my build.
>>>>
>>>> Not sure if this has any measurable performance
>>>> implications but executing a ton of useless instructions
>>>> on fast paths cannot be good.
>>>>
>>>> Patch is not fully polished, but it compiles and runs
>>>> on Gen9 at least.
>>>>
>>>> v2:
>>>>  * Change return from intel_ring_begin to error pointer by
>>>>    popular demand.
>>>>  * Move tail increment to intel_ring_advance to enable some
>>>>    error checking.
>>>>
>>>> v3:
>>>>  * Move tail advance back into intel_ring_begin.
>>>>  * Rebase and tidy.
>>>>
>>>> v4:
>>>>  * Complete rebase after a few months since v3.
>>>>
>>>> v5:
>>>>  * Remove unecessary cast and fix !debug compile. (Chris Wilson)
>>>>
>>>> v6:
>>>>  * Make intel_ring_offset take request as well.
>>>>  * Fix recording of request postfix plus a sprinkle of asserts.
>>>>    (Chris Wilson)
>>>>
>>>> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>>> Cc: Chris Wilson <chris@chris-wilson.co.uk>
>>>
>>> <SNIP>
>>>
>>>> @@ -617,99 +616,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
>>>>  	if (INTEL_GEN(dev_priv) >= 7)
>>>>  		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
>>>>
>>>> -	ret = intel_ring_begin(req, len);
>>>> -	if (ret)
>>>> -		return ret;
>>>> +	out = intel_ring_begin(req, len);
>>>> +	if (IS_ERR(out))
>>>> +		return PTR_ERR(out);
>>>>
>>>>  	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
>>>>  	if (INTEL_GEN(dev_priv) >= 7) {
>>>> -		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
>>>> +		*out++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
>>>
>>> I expressed my concern in the previous iteration of this series months
>>> ago, and here goes again; Lets try to keep the writes easily greppable.
>>>
>>> So intel_ring_emit (or better name) could remain as a wrapper
>>>
>>> #define (something something)_emit(x, y) *(x)++ = (y)
>>
>> My concern with intel_ring_emit() remaining is that we are no longer
>> operating on the ring. The pointer to use for emitting is retrieved from
>> the request, so I think pointer = i915_gem_request_emit(rq, num_dwords)
>> is what we want in the near future.
>>
>> I suppose if that was
>>
>> 	ring = i915_gem_request_emit(rq, num_dwords);
>> 	intel_ring_emit(ring, blah)
>> 	intel_ring_advance(rq, ring); /* this still needs polish */
>>
>
> Going through request feels right. For ring_emit
> we could use shorter:
>
> cs_emit and cs_advance.
>
> They are rings but for users at this level the distinction
> feels unimportant.
>
> Just my few bikesheds.

If the main concern is "grepability" then I am not sure that there is a 
problem with "*out++", be it out, batch, or something (just not ring!). 
There are no other such statements in the code base at the moment. So It 
would be completely unique for ring emission.

On top of that intel_ring_begin is also a current grep marker when 
looking for ring emission. Only exception is I think breadcrumb emission.

I can see one argument about helper being future proof, that if one day 
we decide to allow wrap or something, there would be no need to change 
it all back. It would mean having it as "out = intel_ring_emit(rq, out, 
dw)" though, so still a churny patch.

If the lasts thing is something we want to consider then OK, otherwise I 
am ambiguous whether we need a helper.

I am also not sure macro which increments the argument is that great.

1.
*out++ = MI_USER_INTERURPT;
*out++ = MI_NOOP;

I don't have a problem with this one.

2.
ring_emit(out, MI_USER_INTERRUPT);
ring_emit(out, MI_NOOP);

Don't like the automagicall increment.

3.
ring_emit(out++, MI_USER_INTERRUPT);
ring_emit(out++, MI_NOOP);

Better but for me doesn't beat 1 by a huge margin. Would it be a bit 
unsafe macro since we could put the argument in parentheses?

#define ring_emit(out, dw) *((out) - 1) = dw ?

Wonder what would happen with this one. :)

4.
out = ring_emit(out, MI_USER_INTERRUPT);
out = ring_emit(out, MI_NOOP);

Alternative to 3. but not sure if GCC would be able co completely 
optimize this. It should?

5.
out = ring_emit(rq, out, MI_USER_INTERURPT);
out = ring_emit(rq, out, MI_NOOP);

Future proof version?

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v6] drm/i915: Emit to ringbuffer directly
  2017-02-09 11:49               ` Tvrtko Ursulin
@ 2017-02-09 13:29                 ` Chris Wilson
  0 siblings, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2017-02-09 13:29 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: Intel-gfx

On Thu, Feb 09, 2017 at 11:49:28AM +0000, Tvrtko Ursulin wrote:
> 
> On 09/02/2017 10:37, Mika Kuoppala wrote:
> >Chris Wilson <chris@chris-wilson.co.uk> writes:
> >
> >>On Thu, Feb 09, 2017 at 10:00:35AM +0200, Joonas Lahtinen wrote:
> >>>I expressed my concern in the previous iteration of this series months
> >>>ago, and here goes again; Lets try to keep the writes easily greppable.
> >>>
> >>>So intel_ring_emit (or better name) could remain as a wrapper
> >>>
> >>>#define (something something)_emit(x, y) *(x)++ = (y)
> >>
> >>My concern with intel_ring_emit() remaining is that we are no longer
> >>operating on the ring. The pointer to use for emitting is retrieved from
> >>the request, so I think pointer = i915_gem_request_emit(rq, num_dwords)
> >>is what we want in the near future.
> >>
> >>I suppose if that was
> >>
> >>	ring = i915_gem_request_emit(rq, num_dwords);
> >>	intel_ring_emit(ring, blah)
> >>	intel_ring_advance(rq, ring); /* this still needs polish */
> >>
> >
> >Going through request feels right. For ring_emit
> >we could use shorter:
> >
> >cs_emit and cs_advance.
> >
> >They are rings but for users at this level the distinction
> >feels unimportant.
> >
> >Just my few bikesheds.
> 
> If the main concern is "grepability" then I am not sure that there
> is a problem with "*out++", be it out, batch, or something (just not
> ring!). There are no other such statements in the code base at the
> moment. So It would be completely unique for ring emission.
> 
> On top of that intel_ring_begin is also a current grep marker when
> looking for ring emission. Only exception is I think breadcrumb
> emission.
> 
> I can see one argument about helper being future proof, that if one
> day we decide to allow wrap or something, there would be no need to
> change it all back. It would mean having it as "out =
> intel_ring_emit(rq, out, dw)" though, so still a churny patch.
> 
> If the lasts thing is something we want to consider then OK,
> otherwise I am ambiguous whether we need a helper.
> 
> I am also not sure macro which increments the argument is that great.
> 
> 1.
> *out++ = MI_USER_INTERURPT;
> *out++ = MI_NOOP;

I am quite happy with just a normal pointer assignment, and share
concern over the simplest macro(2).

out = ring_emit(out, MI_USER_INTERRUPT);

if pushed, but the magic should never be in ring_emit itself but in the
preparation of the CS mapping for the request. I think ring_emit() is a
distraction.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v7] drm/i915: Emit to ringbuffer directly
  2017-02-09  9:02           ` [PATCH v7] " Tvrtko Ursulin
@ 2017-02-09 13:35             ` Chris Wilson
  2017-02-13 12:53             ` [PATCH v8] " Tvrtko Ursulin
  1 sibling, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2017-02-09 13:35 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: Intel-gfx

On Thu, Feb 09, 2017 at 09:02:06AM +0000, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> This removes the usage of intel_ring_emit in favour of
> directly writing to the ring buffer.
> 
> intel_ring_emit was preventing the compiler for optimising
> fetch and increment of the current ring buffer pointer and
> therefore generating very verbose code for every write.
> 
> It had no useful purpose since all ringbuffer operations
> are started and ended with intel_ring_begin and
> intel_ring_advance respectively, with no bail out in the
> middle possible, so it is fine to increment the tail in
> intel_ring_begin and let the code manage the pointer
> itself.
> 
> Useless instruction removal amounts to approximately
> two and half kilobytes of saved text on my build.
> 
> Not sure if this has any measurable performance
> implications but executing a ton of useless instructions
> on fast paths cannot be good.
> 
> Patch is not fully polished, but it compiles and runs
> on Gen9 at least.
> 
> v2:
>  * Change return from intel_ring_begin to error pointer by
>    popular demand.
>  * Move tail increment to intel_ring_advance to enable some
>    error checking.
> 
> v3:
>  * Move tail advance back into intel_ring_begin.
>  * Rebase and tidy.
> 
> v4:
>  * Complete rebase after a few months since v3.
> 
> v5:
>  * Remove unecessary cast and fix !debug compile. (Chris Wilson)
> 
> v6:
>  * Make intel_ring_offset take request as well.
>  * Fix recording of request postfix plus a sprinkle of asserts.
>    (Chris Wilson)
> 
> v7:
>  * Use intel_ring_offset to get the postfix. (Chris Wilson)
>  * Convert GVT code as well.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Zhi Wang <zhi.a.wang@intel.com>
> ---
> 
> Zhi, could you please check that they way I handled the GVT
> code makes sense.
> 
> I changed copy_gma_to_hva to return error or number of bytes
> copied since that makes it easier for the new flavour of
> intel_ring_begin and intel_ring_advance to work.
> 
> I was only able to compile test it.
> 
> Thanks,
> 
> Tvrtko
> ---
> @@ -2608,36 +2605,33 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
>  	gma_top = workload->rb_start + guest_rb_size;
>  
>  	/* allocate shadow ring buffer */
> -	ret = intel_ring_begin(workload->req, workload->rb_len / 4);
> -	if (ret)
> -		return ret;
> +	out = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
> +	if (IS_ERR(out))
> +		return PTR_ERR(out);
>  
>  	/* get shadow ring buffer va */
> -	workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
> +	workload->shadow_ring_buffer_va = out;
>  
>  	/* head > tail --> copy head <-> top */
>  	if (gma_head > gma_tail) {
>  		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
> -				gma_head, gma_top,
> -				workload->shadow_ring_buffer_va);
> -		if (ret) {
> +				      gma_head, gma_top, out);
> +		if (ret < 0) {
>  			gvt_err("fail to copy guest ring buffer\n");
>  			return ret;
>  		}
> -		copy_len = gma_top - gma_head;
> +		out += ret / sizeof(u32);
>  		gma_head = workload->rb_start;
>  	}
>  
>  	/* copy head or start <-> tail */
> -	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
> -			gma_head, gma_tail,
> -			workload->shadow_ring_buffer_va + copy_len);
> -	if (ret) {
> +	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, out);

I would have done
	out = copy_gma_to_have(... out);

Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev8)
  2017-02-08 13:10 [PATCH] drm/i915: Emit to ringbuffer directly Tvrtko Ursulin
                   ` (2 preceding siblings ...)
  2017-02-09 10:22 ` ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev8) Patchwork
@ 2017-02-09 21:52 ` Patchwork
  2017-02-10  8:22 ` ✗ Fi.CI.BAT: warning " Patchwork
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2017-02-09 21:52 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Emit to ringbuffer directly (rev8)
URL   : https://patchwork.freedesktop.org/series/12186/
State : failure

== Summary ==

Series 12186v8 drm/i915: Emit to ringbuffer directly
https://patchwork.freedesktop.org/api/1.0/series/12186/revisions/8/mbox/

Test kms_flip:
        Subgroup basic-flip-vs-wf_vblank:
                pass       -> FAIL       (fi-skl-6770hq)

fi-bdw-5557u     total:252  pass:238  dwarn:0   dfail:0   fail:0   skip:14 
fi-bsw-n3050     total:252  pass:213  dwarn:0   dfail:0   fail:0   skip:39 
fi-bxt-j4205     total:252  pass:230  dwarn:0   dfail:0   fail:0   skip:22 
fi-bxt-t5700     total:83   pass:70   dwarn:0   dfail:0   fail:0   skip:12 
fi-byt-j1900     total:252  pass:225  dwarn:0   dfail:0   fail:0   skip:27 
fi-byt-n2820     total:252  pass:221  dwarn:0   dfail:0   fail:0   skip:31 
fi-hsw-4770      total:252  pass:233  dwarn:0   dfail:0   fail:0   skip:19 
fi-hsw-4770r     total:252  pass:233  dwarn:0   dfail:0   fail:0   skip:19 
fi-ilk-650       total:252  pass:199  dwarn:0   dfail:0   fail:0   skip:53 
fi-ivb-3520m     total:252  pass:231  dwarn:0   dfail:0   fail:0   skip:21 
fi-ivb-3770      total:252  pass:231  dwarn:0   dfail:0   fail:0   skip:21 
fi-kbl-7500u     total:252  pass:229  dwarn:0   dfail:0   fail:2   skip:21 
fi-skl-6260u     total:252  pass:239  dwarn:0   dfail:0   fail:0   skip:13 
fi-skl-6700hq    total:252  pass:232  dwarn:0   dfail:0   fail:0   skip:20 
fi-skl-6700k     total:252  pass:227  dwarn:4   dfail:0   fail:0   skip:21 
fi-skl-6770hq    total:252  pass:238  dwarn:0   dfail:0   fail:1   skip:13 
fi-snb-2520m     total:252  pass:221  dwarn:0   dfail:0   fail:0   skip:31 
fi-snb-2600      total:252  pass:220  dwarn:0   dfail:0   fail:0   skip:32 

8ca0c2e06c85344b8fee2bd5c48d6f3571fc870a drm-tip: 2017y-02m-09d-13h-35m-52s UTC integration manifest
69ff6e6 drm/i915: Emit to ringbuffer directly

== Logs ==

For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_3761/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* ✗ Fi.CI.BAT: warning for drm/i915: Emit to ringbuffer directly (rev8)
  2017-02-08 13:10 [PATCH] drm/i915: Emit to ringbuffer directly Tvrtko Ursulin
                   ` (3 preceding siblings ...)
  2017-02-09 21:52 ` Patchwork
@ 2017-02-10  8:22 ` Patchwork
  2017-02-13 13:02 ` ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev9) Patchwork
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2017-02-10  8:22 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Emit to ringbuffer directly (rev8)
URL   : https://patchwork.freedesktop.org/series/12186/
State : warning

== Summary ==

Series 12186v8 drm/i915: Emit to ringbuffer directly
https://patchwork.freedesktop.org/api/1.0/series/12186/revisions/8/mbox/

Test gem_busy:
        Subgroup basic-hang-default:
                pass       -> DMESG-WARN (fi-hsw-4770r)

fi-bdw-5557u     total:252  pass:241  dwarn:0   dfail:0   fail:0   skip:11 
fi-bsw-n3050     total:252  pass:213  dwarn:0   dfail:0   fail:0   skip:39 
fi-bxt-j4205     total:252  pass:233  dwarn:0   dfail:0   fail:0   skip:19 
fi-bxt-t5700     total:83   pass:70   dwarn:0   dfail:0   fail:0   skip:12 
fi-byt-j1900     total:252  pass:225  dwarn:0   dfail:0   fail:0   skip:27 
fi-byt-n2820     total:252  pass:221  dwarn:0   dfail:0   fail:0   skip:31 
fi-hsw-4770      total:252  pass:236  dwarn:0   dfail:0   fail:0   skip:16 
fi-hsw-4770r     total:252  pass:235  dwarn:1   dfail:0   fail:0   skip:16 
fi-ilk-650       total:252  pass:202  dwarn:0   dfail:0   fail:0   skip:50 
fi-ivb-3520m     total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18 
fi-ivb-3770      total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18 
fi-kbl-7500u     total:252  pass:232  dwarn:0   dfail:0   fail:2   skip:18 
fi-skl-6260u     total:252  pass:242  dwarn:0   dfail:0   fail:0   skip:10 
fi-skl-6700hq    total:252  pass:235  dwarn:0   dfail:0   fail:0   skip:17 
fi-skl-6700k     total:252  pass:230  dwarn:4   dfail:0   fail:0   skip:18 
fi-skl-6770hq    total:252  pass:242  dwarn:0   dfail:0   fail:0   skip:10 
fi-snb-2520m     total:252  pass:224  dwarn:0   dfail:0   fail:0   skip:28 
fi-snb-2600      total:252  pass:223  dwarn:0   dfail:0   fail:0   skip:29 

489a7596c9ab4d38c388ab583b571fd7874ead48 drm-tip: 2017y-02m-09d-22h-55m-24s UTC integration manifest
29c1451 drm/i915: Emit to ringbuffer directly

== Logs ==

For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_3763/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v8] drm/i915: Emit to ringbuffer directly
  2017-02-09  9:02           ` [PATCH v7] " Tvrtko Ursulin
  2017-02-09 13:35             ` Chris Wilson
@ 2017-02-13 12:53             ` Tvrtko Ursulin
  2017-02-13 13:06               ` [PATCH v9] " Tvrtko Ursulin
  1 sibling, 1 reply; 28+ messages in thread
From: Tvrtko Ursulin @ 2017-02-13 12:53 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

This removes the usage of intel_ring_emit in favour of
directly writing to the ring buffer.

intel_ring_emit was preventing the compiler for optimising
fetch and increment of the current ring buffer pointer and
therefore generating very verbose code for every write.

It had no useful purpose since all ringbuffer operations
are started and ended with intel_ring_begin and
intel_ring_advance respectively, with no bail out in the
middle possible, so it is fine to increment the tail in
intel_ring_begin and let the code manage the pointer
itself.

Useless instruction removal amounts to approximately
two and half kilobytes of saved text on my build.

Not sure if this has any measurable performance
implications but executing a ton of useless instructions
on fast paths cannot be good.

Patch is not fully polished, but it compiles and runs
on Gen9 at least.

v2:
 * Change return from intel_ring_begin to error pointer by
   popular demand.
 * Move tail increment to intel_ring_advance to enable some
   error checking.

v3:
 * Move tail advance back into intel_ring_begin.
 * Rebase and tidy.

v4:
 * Complete rebase after a few months since v3.

v5:
 * Remove unecessary cast and fix !debug compile. (Chris Wilson)

v6:
 * Make intel_ring_offset take request as well.
 * Fix recording of request postfix plus a sprinkle of asserts.
   (Chris Wilson)

v7:
 * Use intel_ring_offset to get the postfix. (Chris Wilson)
 * Convert GVT code as well.

v8:
 * Rename *out++ to *cs++.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v7)
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c      |  36 +-
 drivers/gpu/drm/i915/i915_gem_context.c    |  76 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  40 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  69 ++--
 drivers/gpu/drm/i915/i915_gem_request.c    |   8 +-
 drivers/gpu/drm/i915/intel_display.c       | 131 +++----
 drivers/gpu/drm/i915/intel_lrc.c           | 207 +++++-----
 drivers/gpu/drm/i915/intel_mocs.c          |  51 ++-
 drivers/gpu/drm/i915/intel_overlay.c       |  79 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 592 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  30 +-
 11 files changed, 608 insertions(+), 711 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ee1db7447ce..eefb52baf548 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1512,7 +1512,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
 		len += copy_len;
 		gma += copy_len;
 	}
-	return 0;
+	return len;
 }
 
 
@@ -1626,7 +1626,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
 			      gma, gma + bb_size,
 			      dst);
-	if (ret) {
+	if (ret < 0) {
 		gvt_err("fail to copy guest ring buffer\n");
 		goto unmap_src;
 	}
@@ -2590,11 +2590,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
-	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
-	struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
 	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
-	unsigned int copy_len = 0;
+	u32 *cs;
 	int ret;
 
 	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2608,36 +2605,33 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 	gma_top = workload->rb_start + guest_rb_size;
 
 	/* allocate shadow ring buffer */
-	ret = intel_ring_begin(workload->req, workload->rb_len / 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* get shadow ring buffer va */
-	workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+	workload->shadow_ring_buffer_va = cs;
 
 	/* head > tail --> copy head <-> top */
 	if (gma_head > gma_tail) {
 		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
-				gma_head, gma_top,
-				workload->shadow_ring_buffer_va);
-		if (ret) {
+				      gma_head, gma_top, cs);
+		if (ret < 0) {
 			gvt_err("fail to copy guest ring buffer\n");
 			return ret;
 		}
-		copy_len = gma_top - gma_head;
+		cs += ret / sizeof(u32);
 		gma_head = workload->rb_start;
 	}
 
 	/* copy head or start <-> tail */
-	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
-			gma_head, gma_tail,
-			workload->shadow_ring_buffer_va + copy_len);
-	if (ret) {
+	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, out);
+	if (ret < 0) {
 		gvt_err("fail to copy guest ring buffer\n");
 		return ret;
 	}
-	ring->tail += workload->rb_len;
-	intel_ring_advance(ring);
+	cs += ret / sizeof(u32);
+	intel_ring_advance(workload->req, cs);
 	return 0;
 }
 
@@ -2691,7 +2685,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 				wa_ctx->workload->vgpu->gtt.ggtt_mm,
 				guest_gma, guest_gma + ctx_size,
 				map);
-	if (ret) {
+	if (ret < 0) {
 		gvt_err("fail to copy guest indirect ctx\n");
 		goto unmap_src;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index fb11b674f319..0745a1c24f9c 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -596,10 +596,9 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	enum intel_engine_id id;
-	u32 flags = hw_flags | MI_MM_SPACE_GTT;
+	u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
 		i915.semaphores ?
@@ -629,99 +628,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	if (INTEL_GEN(dev_priv) >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring,
-						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring,
-						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*cs++ = i915_mmio_reg_offset(
+					   RING_PSMI_CTL(signaller->mmio_base));
+				*cs++ = _MASKED_BIT_ENABLE(
+						GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 		}
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring,
-			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
+	*cs++ = MI_NOOP;
+	*cs++ = MI_SET_CONTEXT;
+	*cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_NOOP;
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(ring, last_reg);
-				intel_ring_emit(ring,
-						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*cs++ = i915_mmio_reg_offset(last_reg);
+				*cs++ = _MASKED_BIT_DISABLE(
+						GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(ring,
-					MI_STORE_REGISTER_MEM |
-					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(ring, last_reg);
-			intel_ring_emit(ring,
-					i915_ggtt_offset(engine->scratch));
-			intel_ring_emit(ring, MI_NOOP);
+			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+			*cs++ = i915_mmio_reg_offset(last_reg);
+			*cs++ = i915_ggtt_offset(engine->scratch);
+			*cs++ = MI_NOOP;
 		}
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return ret;
 }
 
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
-	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ring *ring = req->ring;
-	int i, ret;
+	u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
+	int i;
 
 	if (!remap_info)
 		return 0;
 
-	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/*
 	 * Note: We do not worry about the concurrent register cacheline hang
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-		intel_ring_emit(ring, remap_info[i]);
+		*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+		*cs++ = remap_info[i];
 	}
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 111b7f8c617a..da0846fe2ad6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1336,25 +1336,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret, i;
+	u32 *cs;
+	int i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(req, 4 * 3);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4 * 3);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
+		*cs++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1415,7 +1415,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
-	u32 instp_mask;
+	u32 instp_mask, *cs;
 	int ret;
 
 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1461,17 +1461,15 @@ execbuf_submit(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ring *ring = params->request->ring;
-
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		cs = intel_ring_begin(params->request, 4);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		*cs++ = MI_NOOP;
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = i915_mmio_reg_offset(INSTPM);
+		*cs++ = instp_mask << 16 | instp_mode;
+		intel_ring_advance(params->request, cs);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d580e2b4081b..841bcdfbf16e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -680,23 +680,22 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
-	int ret;
+	u32 *cs;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(ring, upper_32_bits(addr));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(ring, lower_32_bits(addr));
-	intel_ring_advance(ring);
+	*cs++ = MI_LOAD_REGISTER_IMM(1);
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
+	*cs++ = upper_32_bits(addr);
+	*cs++ = MI_LOAD_REGISTER_IMM(1);
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
+	*cs++ = lower_32_bits(addr);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1724,8 +1723,8 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *cs;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1733,17 +1732,17 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_LOAD_REGISTER_IMM(2);
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*cs++ = PP_DIR_DCLV_2G;
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*cs++ = get_pd_offset(ppgtt);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1751,8 +1750,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *cs;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1760,17 +1759,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_LOAD_REGISTER_IMM(2);
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*cs++ = PP_DIR_DCLV_2G;
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*cs++ = get_pd_offset(ppgtt);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index f31deeb72703..3478f90a6ee0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -824,6 +824,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	struct intel_ring *ring = request->ring;
 	struct intel_timeline *timeline = request->timeline;
 	struct drm_i915_gem_request *prev;
+	u32 *cs;
 	int err;
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -862,10 +863,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * GPU processing the request, we never over-estimate the
 	 * position of the ring's HEAD.
 	 */
-	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
-	GEM_BUG_ON(err);
-	request->postfix = ring->tail;
-	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+	cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+	GEM_BUG_ON(IS_ERR(cs));
+	request->postfix = intel_ring_offset(request, cs);
 
 	/* Seal the request and mark it as pending execution. Note that
 	 * we may inspect this state, without holding any locks, during
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 88b7d96c46a4..9d14dc384292 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10158,14 +10158,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *cs;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* Can't queue multiple flips, so wait for the previous
 	 * one to finish before executing the next.
@@ -10174,13 +10172,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*cs++ = MI_NOOP;
+	*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0];
+	*cs++ = intel_crtc->flip_work->gtt_offset;
+	*cs++ = 0; /* aux display base address, unused */
 
 	return 0;
 }
@@ -10192,26 +10189,23 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *cs;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	if (intel_crtc->plane)
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*cs++ = MI_NOOP;
+	*cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0];
+	*cs++ = intel_crtc->flip_work->gtt_offset;
+	*cs++ = MI_NOOP;
 
 	return 0;
 }
@@ -10223,25 +10217,22 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* i965+ uses the linear or tiled offsets from the
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
-			intel_fb_modifier_to_tiling(fb->modifier));
+	*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0];
+	*cs++ = intel_crtc->flip_work->gtt_offset |
+		intel_fb_modifier_to_tiling(fb->modifier);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
 	 * untested on non-native modes, so ignore it for now.
@@ -10249,7 +10240,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*cs++ = pf | pipesrc;
 
 	return 0;
 }
@@ -10261,21 +10252,17 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*cs++ = intel_crtc->flip_work->gtt_offset;
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -10285,7 +10272,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*cs++ = pf | pipesrc;
 
 	return 0;
 }
@@ -10298,9 +10285,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t plane_bit = 0;
+	u32 *cs, plane_bit = 0;
 	int len, ret;
 
 	switch (intel_crtc->plane) {
@@ -10344,9 +10330,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* Unmask the flip-done completion message. Note that the bspec says that
 	 * we should do this for both the BCS and RCS, and that we must not unmask
@@ -10358,31 +10344,28 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
 	if (req->engine->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-					  DERRMR_PIPEB_PRI_FLIP_DONE |
-					  DERRMR_PIPEC_PRI_FLIP_DONE));
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = i915_mmio_reg_offset(DERRMR);
+		*cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+			  DERRMR_PIPEB_PRI_FLIP_DONE |
+			  DERRMR_PIPEC_PRI_FLIP_DONE);
 		if (IS_GEN8(dev_priv))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
-					      MI_SRM_LRM_GLOBAL_GTT);
+			*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+				MI_SRM_LRM_GLOBAL_GTT;
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
-					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring,
-				i915_ggtt_offset(req->engine->scratch) + 256);
+			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+		*cs++ = i915_mmio_reg_offset(DERRMR);
+		*cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
 		if (IS_GEN8(dev_priv)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			*cs++ = 0;
+			*cs++ = MI_NOOP;
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	*cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+	*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*cs++ = intel_crtc->flip_work->gtt_offset;
+	*cs++ = MI_NOOP;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index af717e1356a9..f567d4b08863 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -834,6 +834,7 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_context *ce = &request->ctx->engine[engine->id];
+	u32 *cs;
 	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
@@ -858,9 +859,11 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 			goto err;
 	}
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
+	cs = intel_ring_begin(request, 0);
+	if (IS_ERR(cs)) {
+		ret = PTR_ERR(cs);
 		goto err_unreserve;
+	}
 
 	if (!ce->initialised) {
 		ret = engine->init_context(request);
@@ -889,9 +892,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	int ret, i;
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *cs;
+	int ret, i;
 
 	if (w->count == 0)
 		return 0;
@@ -900,18 +903,18 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, w->count * 2 + 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, w->count * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*cs++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1404,27 +1407,27 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
-	int i, ret;
+	u32 *cs;
+	int i;
 
-	ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	*cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
-		intel_ring_emit(ring, upper_32_bits(pd_daddr));
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
-		intel_ring_emit(ring, lower_32_bits(pd_daddr));
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+		*cs++ = upper_32_bits(pd_daddr);
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+		*cs++ = lower_32_bits(pd_daddr);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1433,8 +1436,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+	u32 *cs;
 	int ret;
 
 	/* Don't rely in hw updating PDPs, specially in lite-restore.
@@ -1455,19 +1458,17 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
-			(ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+		I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1488,13 +1489,11 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
-	struct intel_ring *ring = request->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *cs;
 
-	ret = intel_ring_begin(request, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(request, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	cmd = MI_FLUSH_DW + 1;
 
@@ -1511,13 +1510,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR |
-			MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ring, 0); /* upper addr */
-	intel_ring_emit(ring, 0); /* value */
-	intel_ring_advance(ring);
+	*cs++ = cmd;
+	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+	*cs++ = 0; /* upper addr */
+	*cs++ = 0; /* value */
+	intel_ring_advance(request, cs);
 
 	return 0;
 }
@@ -1525,13 +1522,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 mode)
 {
-	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr =
 		i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
-	u32 flags = 0;
-	int ret;
+	u32 *cs, flags = 0;
 	int len;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -1573,45 +1568,45 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 	if (dc_flush_wa)
 		len += 12;
 
-	ret = intel_ring_begin(request, len);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(request, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	if (vf_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
 	}
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_DC_FLUSH_ENABLE;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
 	}
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = flags;
+	*cs++ = scratch_addr;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_CS_STALL;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(request, cs);
 
 	return 0;
 }
@@ -1621,34 +1616,33 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
  * used as a workaround for not being allowed to do lite
  * restore with HEAD==TAIL (WaIdleLiteRestore).
  */
-static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
 {
-	*out++ = MI_NOOP;
-	*out++ = MI_NOOP;
-	request->wa_tail = intel_ring_offset(request->ring, out);
+	*cs++ = MI_NOOP;
+	*cs++ = MI_NOOP;
+	request->wa_tail = intel_ring_offset(request, cs);
 }
 
-static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
-				 u32 *out)
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
 {
 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-	*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
-	*out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
-	*out++ = 0;
-	*out++ = request->global_seqno;
-	*out++ = MI_USER_INTERRUPT;
-	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+	*cs++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+	*cs++ = 0;
+	*cs++ = request->global_seqno;
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
+	request->tail = intel_ring_offset(request, cs);
 
-	gen8_emit_wa_tail(request, out);
+	gen8_emit_wa_tail(request, cs);
 }
 
 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
 
 static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
-					u32 *out)
+					u32 *cs)
 {
 	/* We're using qword write, seqno should be aligned to 8 bytes. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1657,20 +1651,19 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
 	 * need a prior CS_STALL, which is emitted by the flush
 	 * following the batch.
 	 */
-	*out++ = GFX_OP_PIPE_CONTROL(6);
-	*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
-		  PIPE_CONTROL_CS_STALL |
-		  PIPE_CONTROL_QW_WRITE);
-	*out++ = intel_hws_seqno_address(request->engine);
-	*out++ = 0;
-	*out++ = request->global_seqno;
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+		PIPE_CONTROL_QW_WRITE;
+	*cs++ = intel_hws_seqno_address(request->engine);
+	*cs++ = 0;
+	*cs++ = request->global_seqno;
 	/* We're thrashing one dword of HWS. */
-	*out++ = 0;
-	*out++ = MI_USER_INTERRUPT;
-	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	*cs++ = 0;
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
+	request->tail = intel_ring_offset(request, cs);
 
-	gen8_emit_wa_tail(request, out);
+	gen8_emit_wa_tail(request, cs);
 }
 
 static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 773e36253e7c..92e461c68385 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
-	int ret;
+	u32 *cs;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[index].control_value);
+		*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*cs++ = table->table[index].control_value;
 	}
 
 	/*
@@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[0].control_value);
+		*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*cs++ = table->table[0].control_value;
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	unsigned int i;
-	int ret;
+	u32 *cs;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*cs++ = l3cc_combine(table, 2 * i, 2 * i + 1);
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*cs++ = l3cc_combine(table, 2 * i, 0);
 		i++;
 	}
 
@@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*cs++ = l3cc_combine(table, 0, 0);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 0608fad7f593..5ef9f5bfb92c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	int ret;
+	u32 *cs;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
@@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret) {
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(cs);
 	}
 
 	overlay->active = true;
@@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_I830(dev_priv))
 		i830_overlay_clock_gating(dev_priv, false);
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+	*cs++ = overlay->flip_addr | OFC_UPDATE;
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
-	u32 tmp;
-	int ret;
+	u32 tmp, *cs;
 
 	WARN_ON(!overlay->active);
 
@@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 2);
-	if (ret) {
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(cs);
 	}
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*cs++ = flip_addr;
+	intel_ring_advance(req, cs);
 
 	intel_overlay_flip_prepare(overlay, vma);
 
@@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	u32 flip_addr = overlay->flip_addr;
-	int ret;
+	u32 *cs, flip_addr = overlay->flip_addr;
 
 	WARN_ON(!overlay->active);
 
@@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret) {
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(cs);
 	}
 
-	ring = req->ring;
-
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*cs++ = flip_addr;
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
 	/* turn overlay off */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+	*cs++ = flip_addr;
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	intel_overlay_flip_prepare(overlay, NULL);
 
@@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
+	u32 *cs;
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
-		struct intel_ring *ring;
 
 		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-		ret = intel_ring_begin(req, 2);
-		if (ret) {
+		cs = intel_ring_begin(req, 2);
+		if (IS_ERR(cs)) {
 			i915_add_request_no_flush(req);
-			return ret;
+			return PTR_ERR(cs);
 		}
 
-		ring = req->ring;
-		intel_ring_emit(ring,
-				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+		*cs++ = MI_NOOP;
+		intel_ring_advance(req, cs);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9fba5664376e..0b9030c36625 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -61,22 +61,20 @@ void intel_ring_update_space(struct intel_ring *ring)
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *cs;
 
 	cmd = MI_FLUSH;
 
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = cmd;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -84,9 +82,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *cs;
 
 	/*
 	 * read/write caches:
@@ -123,13 +119,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 			cmd |= MI_INVALIDATE_ISP;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = cmd;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -174,35 +170,33 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	int ret;
-
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0); /* low dword */
-	intel_ring_emit(ring, 0); /* high dword */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
-
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	u32 *cs;
+
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(5);
+	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = 0; /* low dword */
+	*cs++ = 0; /* high dword */
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
+
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(5);
+	*cs++ = PIPE_CONTROL_QW_WRITE;
+	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -210,10 +204,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
+	u32 *cs, flags = 0;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -247,15 +240,15 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = flags;
+	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -263,20 +256,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring,
-			PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*cs++ = 0;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -284,11 +274,9 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
-	int ret;
+	u32 *cs, flags = 0;
 
 	/*
 	 * Ensure that any following seqno writes only happen when the render
@@ -332,15 +320,15 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = flags;
+	*cs++ = scratch_addr;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -349,20 +337,19 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = flags;
+	*cs++ = scratch_addr;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -659,8 +646,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *cs;
 	int ret, i;
 
 	if (w->count == 0)
@@ -670,18 +657,18 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, (w->count * 2 + 2));
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, (w->count * 2 + 2));
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*cs++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1276,7 +1263,7 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
 	i915_vma_unpin_and_release(&dev_priv->semaphore);
 }
 
-static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
@@ -1287,23 +1274,22 @@ static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		*out++ = GFX_OP_PIPE_CONTROL(6);
-		*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
-			  PIPE_CONTROL_QW_WRITE |
-			  PIPE_CONTROL_CS_STALL);
-		*out++ = lower_32_bits(gtt_offset);
-		*out++ = upper_32_bits(gtt_offset);
-		*out++ = req->global_seqno;
-		*out++ = 0;
-		*out++ = (MI_SEMAPHORE_SIGNAL |
-			  MI_SEMAPHORE_TARGET(waiter->hw_id));
-		*out++ = 0;
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_CS_STALL;
+		*cs++ = lower_32_bits(gtt_offset);
+		*cs++ = upper_32_bits(gtt_offset);
+		*cs++ = req->global_seqno;
+		*cs++ = 0;
+		*cs++ = MI_SEMAPHORE_SIGNAL |
+			MI_SEMAPHORE_TARGET(waiter->hw_id);
+		*cs++ = 0;
 	}
 
-	return out;
+	return cs;
 }
 
-static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
@@ -1314,19 +1300,19 @@ static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
-		*out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
-		*out++ = upper_32_bits(gtt_offset);
-		*out++ = req->global_seqno;
-		*out++ = (MI_SEMAPHORE_SIGNAL |
-			  MI_SEMAPHORE_TARGET(waiter->hw_id));
-		*out++ = 0;
+		*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+		*cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+		*cs++ = upper_32_bits(gtt_offset);
+		*cs++ = req->global_seqno;
+		*cs++ = MI_SEMAPHORE_SIGNAL |
+			MI_SEMAPHORE_TARGET(waiter->hw_id);
+		*cs++ = 0;
 	}
 
-	return out;
+	return cs;
 }
 
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *engine;
@@ -1341,16 +1327,16 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
 
 		mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
 		if (i915_mmio_reg_valid(mbox_reg)) {
-			*out++ = MI_LOAD_REGISTER_IMM(1);
-			*out++ = i915_mmio_reg_offset(mbox_reg);
-			*out++ = req->global_seqno;
+			*cs++ = MI_LOAD_REGISTER_IMM(1);
+			*cs++ = i915_mmio_reg_offset(mbox_reg);
+			*cs++ = req->global_seqno;
 			num_rings++;
 		}
 	}
 	if (num_rings & 1)
-		*out++ = MI_NOOP;
+		*cs++ = MI_NOOP;
 
-	return out;
+	return cs;
 }
 
 static void i9xx_submit_request(struct drm_i915_gem_request *request)
@@ -1362,15 +1348,14 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
 	I915_WRITE_TAIL(request->engine, request->tail);
 }
 
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
-				 u32 *out)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
 {
-	*out++ = MI_STORE_DWORD_INDEX;
-	*out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
-	*out++ = req->global_seqno;
-	*out++ = MI_USER_INTERRUPT;
+	*cs++ = MI_STORE_DWORD_INDEX;
+	*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+	*cs++ = req->global_seqno;
+	*cs++ = MI_USER_INTERRUPT;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, cs);
 }
 
 static const int i9xx_emit_breadcrumb_sz = 4;
@@ -1383,34 +1368,32 @@ static const int i9xx_emit_breadcrumb_sz = 4;
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
-				      u32 *out)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
 {
 	return i9xx_emit_breadcrumb(req,
-				    req->engine->semaphore.signal(req, out));
+				    req->engine->semaphore.signal(req, cs));
 }
 
 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
-					u32 *out)
+					u32 *cs)
 {
 	struct intel_engine_cs *engine = req->engine;
 
 	if (engine->semaphore.signal)
-		out = engine->semaphore.signal(req, out);
-
-	*out++ = GFX_OP_PIPE_CONTROL(6);
-	*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
-			       PIPE_CONTROL_CS_STALL |
-			       PIPE_CONTROL_QW_WRITE);
-	*out++ = intel_hws_seqno_address(engine);
-	*out++ = 0;
-	*out++ = req->global_seqno;
+		cs = engine->semaphore.signal(req, cs);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+		PIPE_CONTROL_QW_WRITE;
+	*cs++ = intel_hws_seqno_address(engine);
+	*cs++ = 0;
+	*cs++ = req->global_seqno;
 	/* We're thrashing one dword of HWS. */
-	*out++ = 0;
-	*out++ = MI_USER_INTERRUPT;
-	*out++ = MI_NOOP;
+	*cs++ = 0;
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, cs);
 }
 
 static const int gen8_render_emit_breadcrumb_sz = 8;
@@ -1427,24 +1410,21 @@ static int
 gen8_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_SEMAPHORE_WAIT |
-			MI_SEMAPHORE_GLOBAL_GTT |
-			MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(ring, signal->global_seqno);
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_advance(ring);
+	*cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_SAD_GTE_SDD;
+	*cs++ = signal->global_seqno;
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	intel_ring_advance(req, cs);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
@@ -1461,28 +1441,27 @@ static int
 gen6_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
 	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
-	int ret;
+	u32 *cs;
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, dw1 | wait_mbox);
+	*cs++ = dw1 | wait_mbox;
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(ring, signal->global_seqno - 1);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = signal->global_seqno - 1;
+	*cs++ = 0;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1583,16 +1562,15 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 static int
 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_FLUSH;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 	return 0;
 }
 
@@ -1658,20 +1636,16 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 length,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			MI_BATCH_GTT |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
+		I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
+	*cs++ = offset;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1685,59 +1659,56 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
-	int ret;
+	u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(ring, cs_offset);
-	intel_ring_emit(ring, 0xdeadbeef);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+	*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+	*cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+	*cs++ = cs_offset;
+	*cs++ = 0xdeadbeef;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(req, 6 + 2);
-		if (ret)
-			return ret;
+		cs = intel_ring_begin(req, 6 + 2);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
 
 		/* Blit the batch (which has now all relocs applied) to the
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(ring,
-				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(ring, cs_offset);
-		intel_ring_emit(ring, 4096);
-		intel_ring_emit(ring, offset);
-
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+		*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+		*cs++ = cs_offset;
+		*cs++ = 4096;
+		*cs++ = offset;
+
+		*cs++ = MI_FLUSH;
+		*cs++ = MI_NOOP;
+		intel_ring_advance(req, cs);
 
 		/* ... and execute it. */
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+		MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1747,17 +1718,16 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+		MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2165,7 +2135,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
 {
-	int ret;
+	u32 *cs;
 
 	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
 
@@ -2178,9 +2148,9 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 	GEM_BUG_ON(!request->engine->buffer);
 	request->ring = request->engine->buffer;
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(request, 0);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	request->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
@@ -2235,7 +2205,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	return 0;
 }
 
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
 	struct intel_ring *ring = req->ring;
 	int remain_actual = ring->size - ring->tail;
@@ -2243,6 +2213,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
+	u32 *cs;
 
 	total_bytes = bytes + req->reserved_space;
 
@@ -2269,7 +2240,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
-			return ret;
+			return ERR_PTR(ret);
 	}
 
 	if (unlikely(need_wrap)) {
@@ -2282,32 +2253,34 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		ring->space -= remain_actual;
 	}
 
+	GEM_BUG_ON(ring->tail > ring->size - bytes);
+	cs = ring->vaddr + ring->tail;
+	ring->tail += bytes;
 	ring->space -= bytes;
 	GEM_BUG_ON(ring->space < 0);
-	GEM_DEBUG_EXEC(ring->advance = ring->tail + bytes);
-	return 0;
+
+	return cs;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	int num_dwords =
-		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
-	int ret;
+		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	u32 *cs;
 
 	if (num_dwords == 0)
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(req, num_dwords);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, num_dwords);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	while (num_dwords--)
-		intel_ring_emit(ring, MI_NOOP);
+		*cs++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2351,13 +2324,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2379,16 +2350,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*cs++ = cmd;
+	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*cs++ = 0; /* upper addr */
+		*cs++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*cs++ = 0;
+		*cs++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 	return 0;
 }
 
@@ -2397,23 +2368,21 @@ gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+		I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2423,22 +2392,19 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
 		  u64 offset, u32 len,
 		  unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
+	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+		0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+		(dispatch_flags & I915_DISPATCH_RS ?
+		MI_BATCH_RESOURCE_STREAMER : 0);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*cs++ = offset;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2448,20 +2414,17 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
+	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+		0 : MI_BATCH_NON_SECURE_I965);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*cs++ = offset;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2470,13 +2433,11 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 
 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2497,17 +2458,16 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*cs++ = cmd;
+	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*cs++ = 0; /* upper addr */
+		*cs++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*cs++ = 0;
+		*cs++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b122c3cc6dbb..fe31201b9671 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -144,7 +144,6 @@ struct intel_ring {
 
 	u32 head;
 	u32 tail;
-	GEM_DEBUG_DECL(u32 advance);
 
 	int space;
 	int size;
@@ -290,7 +289,7 @@ struct intel_engine_cs {
 #define I915_DISPATCH_PINNED BIT(1)
 #define I915_DISPATCH_RS     BIT(2)
 	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
-					   u32 *out);
+					   u32 *cs);
 	int		emit_breadcrumb_sz;
 
 	/* Pass the request to the hardware queue (e.g. directly into
@@ -373,7 +372,7 @@ struct intel_engine_cs {
 		/* AKA wait() */
 		int	(*sync_to)(struct drm_i915_gem_request *req,
 				   struct drm_i915_gem_request *signal);
-		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *out);
+		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *cs);
 	} semaphore;
 
 	/* Execlists */
@@ -495,21 +494,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
-int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
-{
-	*(uint32_t *)(ring->vaddr + ring->tail) = data;
-	ring->tail += 4;
-}
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
 
-static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
-{
-	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
-
-static inline void intel_ring_advance(struct intel_ring *ring)
+static inline void
+intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
 {
 	/* Dummy function.
 	 *
@@ -519,14 +509,16 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 	 * reserved for the command packet (i.e. the value passed to
 	 * intel_ring_begin()).
 	 */
-	GEM_DEBUG_BUG_ON(ring->tail != ring->advance);
+	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
 }
 
-static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
+static inline u32
+intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
 {
 	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
-	u32 offset = addr - ring->vaddr;
-	return offset & (ring->size - 1);
+	u32 offset = addr - req->ring->vaddr;
+	GEM_BUG_ON(offset > req->ring->size);
+	return offset & (req->ring->size - 1);
 }
 
 int __intel_ring_space(int head, int tail, int size);
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev9)
  2017-02-08 13:10 [PATCH] drm/i915: Emit to ringbuffer directly Tvrtko Ursulin
                   ` (4 preceding siblings ...)
  2017-02-10  8:22 ` ✗ Fi.CI.BAT: warning " Patchwork
@ 2017-02-13 13:02 ` Patchwork
  2017-02-13 13:52 ` ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev10) Patchwork
  2017-02-14 13:52 ` ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev11) Patchwork
  7 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2017-02-13 13:02 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Emit to ringbuffer directly (rev9)
URL   : https://patchwork.freedesktop.org/series/12186/
State : failure

== Summary ==

  CC [M]  drivers/gpu/drm/i915/gvt/gvt.o
  CC [M]  drivers/gpu/drm/i915/gvt/trace_points.o
  CC [M]  drivers/gpu/drm/i915/gvt/firmware.o
  CC [M]  drivers/gpu/drm/i915/gvt/interrupt.o
  CC [M]  drivers/gpu/drm/i915/gvt/cfg_space.o
  CC [M]  drivers/gpu/drm/i915/gvt/gtt.o
  CC [M]  drivers/gpu/drm/i915/gvt/opregion.o
  CC [M]  drivers/gpu/drm/i915/gvt/mmio.o
  CC [M]  drivers/gpu/drm/i915/gvt/scheduler.o
  CC [M]  drivers/gpu/drm/i915/gvt/edid.o
  CC [M]  drivers/gpu/drm/i915/gvt/display.o
  CC [M]  drivers/gpu/drm/i915/gvt/execlist.o
  CC [M]  drivers/gpu/drm/i915/gvt/sched_policy.o
  LD      drivers/pci/pcie/aer/aerdriver.o
  CC [M]  drivers/gpu/drm/i915/intel_lpe_audio.o
  CC [M]  drivers/gpu/drm/i915/gvt/render.o
  CC [M]  drivers/gpu/drm/i915/gvt/cmd_parser.o
  LD      drivers/pci/pcie/aer/built-in.o
  LD      drivers/pci/pcie/built-in.o
  LD      net/packet/built-in.o
  LD [M]  drivers/misc/mei/mei-me.o
  LD [M]  drivers/mmc/core/mmc_block.o
  LD      drivers/misc/built-in.o
  LD      drivers/mmc/built-in.o
  LD      drivers/video/fbdev/core/fb.o
  LD      drivers/acpi/acpica/acpi.o
  LD      drivers/scsi/scsi_mod.o
  LD      net/xfrm/built-in.o
  LD [M]  sound/pci/hda/snd-hda-codec-generic.o
  LD      sound/pci/built-in.o
  LD      drivers/video/fbdev/core/built-in.o
  LD      drivers/tty/serial/8250/8250.o
  LD      drivers/rtc/built-in.o
  LD      drivers/acpi/acpica/built-in.o
  LD      drivers/video/fbdev/built-in.o
  LD      drivers/usb/storage/usb-storage.o
  LD      drivers/usb/storage/built-in.o
  LD      drivers/spi/built-in.o
  LD      sound/built-in.o
  LD      drivers/acpi/built-in.o
  LD      drivers/pci/built-in.o
  LD      drivers/usb/gadget/libcomposite.o
  LD      drivers/iommu/built-in.o
  LD [M]  drivers/net/ethernet/intel/igbvf/igbvf.o
  LD      net/ipv6/ipv6.o
drivers/gpu/drm/i915/gvt/cmd_parser.c: In function ‘shadow_workload_ring_buffer’:
drivers/gpu/drm/i915/gvt/cmd_parser.c:2628:69: error: ‘out’ undeclared (first use in this function)
  ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, out);
                                                                     ^
drivers/gpu/drm/i915/gvt/cmd_parser.c:2628:69: note: each undeclared identifier is reported only once for each function it appears in
  LD      drivers/usb/gadget/udc/udc-core.o
  LD      drivers/usb/gadget/udc/built-in.o
  LD      drivers/video/console/built-in.o
  LD      net/ipv6/built-in.o
  LD      drivers/usb/gadget/built-in.o
  LD      drivers/video/built-in.o
  LD      drivers/scsi/sd_mod.o
scripts/Makefile.build:294: recipe for target 'drivers/gpu/drm/i915/gvt/cmd_parser.o' failed
make[4]: *** [drivers/gpu/drm/i915/gvt/cmd_parser.o] Error 1
make[4]: *** Waiting for unfinished jobs....
  LD      drivers/scsi/built-in.o
  LD      drivers/tty/serial/8250/8250_base.o
  LD      drivers/tty/serial/8250/built-in.o
  LD      drivers/tty/serial/built-in.o
  LD [M]  drivers/net/ethernet/intel/e1000/e1000.o
  LD      fs/btrfs/btrfs.o
  LD      drivers/usb/core/usbcore.o
  LD      drivers/usb/core/built-in.o
  LD      fs/btrfs/built-in.o
  CC      arch/x86/kernel/cpu/capflags.o
  LD      drivers/tty/vt/built-in.o
  LD      arch/x86/kernel/cpu/built-in.o
  LD      drivers/tty/built-in.o
  LD      arch/x86/kernel/built-in.o
  LD [M]  drivers/net/ethernet/broadcom/genet/genet.o
  LD      net/ipv4/built-in.o
  LD      arch/x86/built-in.o
  LD      drivers/usb/host/xhci-hcd.o
  LD      drivers/md/md-mod.o
  LD      drivers/usb/host/built-in.o
  LD      drivers/md/built-in.o
  LD      drivers/usb/built-in.o
  LD      net/core/built-in.o
  LD [M]  drivers/net/ethernet/intel/igb/igb.o
  LD      net/built-in.o
  LD      fs/ext4/ext4.o
  LD      fs/ext4/built-in.o
  LD      fs/built-in.o
  LD [M]  drivers/net/ethernet/intel/e1000e/e1000e.o
scripts/Makefile.build:553: recipe for target 'drivers/gpu/drm/i915' failed
make[3]: *** [drivers/gpu/drm/i915] Error 2
scripts/Makefile.build:553: recipe for target 'drivers/gpu/drm' failed
make[2]: *** [drivers/gpu/drm] Error 2
scripts/Makefile.build:553: recipe for target 'drivers/gpu' failed
make[1]: *** [drivers/gpu] Error 2
make[1]: *** Waiting for unfinished jobs....
  LD      drivers/net/ethernet/built-in.o
  LD      drivers/net/built-in.o
Makefile:988: recipe for target 'drivers' failed
make: *** [drivers] Error 2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v9] drm/i915: Emit to ringbuffer directly
  2017-02-13 12:53             ` [PATCH v8] " Tvrtko Ursulin
@ 2017-02-13 13:06               ` Tvrtko Ursulin
  2017-02-13 21:09                 ` Chris Wilson
  0 siblings, 1 reply; 28+ messages in thread
From: Tvrtko Ursulin @ 2017-02-13 13:06 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

This removes the usage of intel_ring_emit in favour of
directly writing to the ring buffer.

intel_ring_emit was preventing the compiler for optimising
fetch and increment of the current ring buffer pointer and
therefore generating very verbose code for every write.

It had no useful purpose since all ringbuffer operations
are started and ended with intel_ring_begin and
intel_ring_advance respectively, with no bail out in the
middle possible, so it is fine to increment the tail in
intel_ring_begin and let the code manage the pointer
itself.

Useless instruction removal amounts to approximately
two and half kilobytes of saved text on my build.

Not sure if this has any measurable performance
implications but executing a ton of useless instructions
on fast paths cannot be good.

Patch is not fully polished, but it compiles and runs
on Gen9 at least.

v2:
 * Change return from intel_ring_begin to error pointer by
   popular demand.
 * Move tail increment to intel_ring_advance to enable some
   error checking.

v3:
 * Move tail advance back into intel_ring_begin.
 * Rebase and tidy.

v4:
 * Complete rebase after a few months since v3.

v5:
 * Remove unecessary cast and fix !debug compile. (Chris Wilson)

v6:
 * Make intel_ring_offset take request as well.
 * Fix recording of request postfix plus a sprinkle of asserts.
   (Chris Wilson)

v7:
 * Use intel_ring_offset to get the postfix. (Chris Wilson)
 * Convert GVT code as well.

v8:
 * Rename *out++ to *cs++.

v9:
 * Fix GVT out to cs conversion in GVT.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v7)
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c      |  36 +-
 drivers/gpu/drm/i915/i915_gem_context.c    |  76 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  40 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  69 ++--
 drivers/gpu/drm/i915/i915_gem_request.c    |   8 +-
 drivers/gpu/drm/i915/intel_display.c       | 131 +++----
 drivers/gpu/drm/i915/intel_lrc.c           | 207 +++++-----
 drivers/gpu/drm/i915/intel_mocs.c          |  51 ++-
 drivers/gpu/drm/i915/intel_overlay.c       |  79 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 592 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  30 +-
 11 files changed, 608 insertions(+), 711 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ee1db7447ce..59c3045af252 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1512,7 +1512,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
 		len += copy_len;
 		gma += copy_len;
 	}
-	return 0;
+	return len;
 }
 
 
@@ -1626,7 +1626,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
 			      gma, gma + bb_size,
 			      dst);
-	if (ret) {
+	if (ret < 0) {
 		gvt_err("fail to copy guest ring buffer\n");
 		goto unmap_src;
 	}
@@ -2590,11 +2590,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
-	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
-	struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
 	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
-	unsigned int copy_len = 0;
+	u32 *cs;
 	int ret;
 
 	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2608,36 +2605,33 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 	gma_top = workload->rb_start + guest_rb_size;
 
 	/* allocate shadow ring buffer */
-	ret = intel_ring_begin(workload->req, workload->rb_len / 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* get shadow ring buffer va */
-	workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+	workload->shadow_ring_buffer_va = cs;
 
 	/* head > tail --> copy head <-> top */
 	if (gma_head > gma_tail) {
 		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
-				gma_head, gma_top,
-				workload->shadow_ring_buffer_va);
-		if (ret) {
+				      gma_head, gma_top, cs);
+		if (ret < 0) {
 			gvt_err("fail to copy guest ring buffer\n");
 			return ret;
 		}
-		copy_len = gma_top - gma_head;
+		cs += ret / sizeof(u32);
 		gma_head = workload->rb_start;
 	}
 
 	/* copy head or start <-> tail */
-	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
-			gma_head, gma_tail,
-			workload->shadow_ring_buffer_va + copy_len);
-	if (ret) {
+	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
+	if (ret < 0) {
 		gvt_err("fail to copy guest ring buffer\n");
 		return ret;
 	}
-	ring->tail += workload->rb_len;
-	intel_ring_advance(ring);
+	cs += ret / sizeof(u32);
+	intel_ring_advance(workload->req, cs);
 	return 0;
 }
 
@@ -2691,7 +2685,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 				wa_ctx->workload->vgpu->gtt.ggtt_mm,
 				guest_gma, guest_gma + ctx_size,
 				map);
-	if (ret) {
+	if (ret < 0) {
 		gvt_err("fail to copy guest indirect ctx\n");
 		goto unmap_src;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index fb11b674f319..0745a1c24f9c 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -596,10 +596,9 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	enum intel_engine_id id;
-	u32 flags = hw_flags | MI_MM_SPACE_GTT;
+	u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
 		i915.semaphores ?
@@ -629,99 +628,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	if (INTEL_GEN(dev_priv) >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring,
-						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring,
-						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*cs++ = i915_mmio_reg_offset(
+					   RING_PSMI_CTL(signaller->mmio_base));
+				*cs++ = _MASKED_BIT_ENABLE(
+						GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 		}
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring,
-			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
+	*cs++ = MI_NOOP;
+	*cs++ = MI_SET_CONTEXT;
+	*cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_NOOP;
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(ring, last_reg);
-				intel_ring_emit(ring,
-						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*cs++ = i915_mmio_reg_offset(last_reg);
+				*cs++ = _MASKED_BIT_DISABLE(
+						GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(ring,
-					MI_STORE_REGISTER_MEM |
-					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(ring, last_reg);
-			intel_ring_emit(ring,
-					i915_ggtt_offset(engine->scratch));
-			intel_ring_emit(ring, MI_NOOP);
+			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+			*cs++ = i915_mmio_reg_offset(last_reg);
+			*cs++ = i915_ggtt_offset(engine->scratch);
+			*cs++ = MI_NOOP;
 		}
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return ret;
 }
 
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
-	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ring *ring = req->ring;
-	int i, ret;
+	u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
+	int i;
 
 	if (!remap_info)
 		return 0;
 
-	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/*
 	 * Note: We do not worry about the concurrent register cacheline hang
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-		intel_ring_emit(ring, remap_info[i]);
+		*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+		*cs++ = remap_info[i];
 	}
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 111b7f8c617a..da0846fe2ad6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1336,25 +1336,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret, i;
+	u32 *cs;
+	int i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(req, 4 * 3);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4 * 3);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
+		*cs++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1415,7 +1415,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
-	u32 instp_mask;
+	u32 instp_mask, *cs;
 	int ret;
 
 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1461,17 +1461,15 @@ execbuf_submit(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ring *ring = params->request->ring;
-
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		cs = intel_ring_begin(params->request, 4);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		*cs++ = MI_NOOP;
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = i915_mmio_reg_offset(INSTPM);
+		*cs++ = instp_mask << 16 | instp_mode;
+		intel_ring_advance(params->request, cs);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index d580e2b4081b..841bcdfbf16e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -680,23 +680,22 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
-	int ret;
+	u32 *cs;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(ring, upper_32_bits(addr));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(ring, lower_32_bits(addr));
-	intel_ring_advance(ring);
+	*cs++ = MI_LOAD_REGISTER_IMM(1);
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
+	*cs++ = upper_32_bits(addr);
+	*cs++ = MI_LOAD_REGISTER_IMM(1);
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
+	*cs++ = lower_32_bits(addr);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1724,8 +1723,8 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *cs;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1733,17 +1732,17 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_LOAD_REGISTER_IMM(2);
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*cs++ = PP_DIR_DCLV_2G;
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*cs++ = get_pd_offset(ppgtt);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1751,8 +1750,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *cs;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1760,17 +1759,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_LOAD_REGISTER_IMM(2);
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*cs++ = PP_DIR_DCLV_2G;
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*cs++ = get_pd_offset(ppgtt);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index f31deeb72703..3478f90a6ee0 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -824,6 +824,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	struct intel_ring *ring = request->ring;
 	struct intel_timeline *timeline = request->timeline;
 	struct drm_i915_gem_request *prev;
+	u32 *cs;
 	int err;
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -862,10 +863,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * GPU processing the request, we never over-estimate the
 	 * position of the ring's HEAD.
 	 */
-	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
-	GEM_BUG_ON(err);
-	request->postfix = ring->tail;
-	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+	cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+	GEM_BUG_ON(IS_ERR(cs));
+	request->postfix = intel_ring_offset(request, cs);
 
 	/* Seal the request and mark it as pending execution. Note that
 	 * we may inspect this state, without holding any locks, during
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 88b7d96c46a4..9d14dc384292 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10158,14 +10158,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *cs;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* Can't queue multiple flips, so wait for the previous
 	 * one to finish before executing the next.
@@ -10174,13 +10172,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*cs++ = MI_NOOP;
+	*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0];
+	*cs++ = intel_crtc->flip_work->gtt_offset;
+	*cs++ = 0; /* aux display base address, unused */
 
 	return 0;
 }
@@ -10192,26 +10189,23 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *cs;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	if (intel_crtc->plane)
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*cs++ = MI_NOOP;
+	*cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0];
+	*cs++ = intel_crtc->flip_work->gtt_offset;
+	*cs++ = MI_NOOP;
 
 	return 0;
 }
@@ -10223,25 +10217,22 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* i965+ uses the linear or tiled offsets from the
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
-			intel_fb_modifier_to_tiling(fb->modifier));
+	*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0];
+	*cs++ = intel_crtc->flip_work->gtt_offset |
+		intel_fb_modifier_to_tiling(fb->modifier);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
 	 * untested on non-native modes, so ignore it for now.
@@ -10249,7 +10240,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*cs++ = pf | pipesrc;
 
 	return 0;
 }
@@ -10261,21 +10252,17 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*cs++ = intel_crtc->flip_work->gtt_offset;
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -10285,7 +10272,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*cs++ = pf | pipesrc;
 
 	return 0;
 }
@@ -10298,9 +10285,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t plane_bit = 0;
+	u32 *cs, plane_bit = 0;
 	int len, ret;
 
 	switch (intel_crtc->plane) {
@@ -10344,9 +10330,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* Unmask the flip-done completion message. Note that the bspec says that
 	 * we should do this for both the BCS and RCS, and that we must not unmask
@@ -10358,31 +10344,28 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
 	if (req->engine->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-					  DERRMR_PIPEB_PRI_FLIP_DONE |
-					  DERRMR_PIPEC_PRI_FLIP_DONE));
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = i915_mmio_reg_offset(DERRMR);
+		*cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+			  DERRMR_PIPEB_PRI_FLIP_DONE |
+			  DERRMR_PIPEC_PRI_FLIP_DONE);
 		if (IS_GEN8(dev_priv))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
-					      MI_SRM_LRM_GLOBAL_GTT);
+			*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+				MI_SRM_LRM_GLOBAL_GTT;
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
-					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring,
-				i915_ggtt_offset(req->engine->scratch) + 256);
+			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+		*cs++ = i915_mmio_reg_offset(DERRMR);
+		*cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
 		if (IS_GEN8(dev_priv)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			*cs++ = 0;
+			*cs++ = MI_NOOP;
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	*cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+	*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*cs++ = intel_crtc->flip_work->gtt_offset;
+	*cs++ = MI_NOOP;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index af717e1356a9..f567d4b08863 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -834,6 +834,7 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_context *ce = &request->ctx->engine[engine->id];
+	u32 *cs;
 	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
@@ -858,9 +859,11 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 			goto err;
 	}
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
+	cs = intel_ring_begin(request, 0);
+	if (IS_ERR(cs)) {
+		ret = PTR_ERR(cs);
 		goto err_unreserve;
+	}
 
 	if (!ce->initialised) {
 		ret = engine->init_context(request);
@@ -889,9 +892,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	int ret, i;
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *cs;
+	int ret, i;
 
 	if (w->count == 0)
 		return 0;
@@ -900,18 +903,18 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, w->count * 2 + 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, w->count * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*cs++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1404,27 +1407,27 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
-	int i, ret;
+	u32 *cs;
+	int i;
 
-	ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	*cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
-		intel_ring_emit(ring, upper_32_bits(pd_daddr));
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
-		intel_ring_emit(ring, lower_32_bits(pd_daddr));
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+		*cs++ = upper_32_bits(pd_daddr);
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+		*cs++ = lower_32_bits(pd_daddr);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1433,8 +1436,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+	u32 *cs;
 	int ret;
 
 	/* Don't rely in hw updating PDPs, specially in lite-restore.
@@ -1455,19 +1458,17 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
-			(ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+		I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1488,13 +1489,11 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
-	struct intel_ring *ring = request->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *cs;
 
-	ret = intel_ring_begin(request, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(request, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	cmd = MI_FLUSH_DW + 1;
 
@@ -1511,13 +1510,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR |
-			MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ring, 0); /* upper addr */
-	intel_ring_emit(ring, 0); /* value */
-	intel_ring_advance(ring);
+	*cs++ = cmd;
+	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+	*cs++ = 0; /* upper addr */
+	*cs++ = 0; /* value */
+	intel_ring_advance(request, cs);
 
 	return 0;
 }
@@ -1525,13 +1522,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 mode)
 {
-	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr =
 		i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
-	u32 flags = 0;
-	int ret;
+	u32 *cs, flags = 0;
 	int len;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -1573,45 +1568,45 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 	if (dc_flush_wa)
 		len += 12;
 
-	ret = intel_ring_begin(request, len);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(request, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	if (vf_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
 	}
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_DC_FLUSH_ENABLE;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
 	}
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = flags;
+	*cs++ = scratch_addr;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_CS_STALL;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(request, cs);
 
 	return 0;
 }
@@ -1621,34 +1616,33 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
  * used as a workaround for not being allowed to do lite
  * restore with HEAD==TAIL (WaIdleLiteRestore).
  */
-static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
 {
-	*out++ = MI_NOOP;
-	*out++ = MI_NOOP;
-	request->wa_tail = intel_ring_offset(request->ring, out);
+	*cs++ = MI_NOOP;
+	*cs++ = MI_NOOP;
+	request->wa_tail = intel_ring_offset(request, cs);
 }
 
-static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
-				 u32 *out)
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
 {
 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-	*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
-	*out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
-	*out++ = 0;
-	*out++ = request->global_seqno;
-	*out++ = MI_USER_INTERRUPT;
-	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+	*cs++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+	*cs++ = 0;
+	*cs++ = request->global_seqno;
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
+	request->tail = intel_ring_offset(request, cs);
 
-	gen8_emit_wa_tail(request, out);
+	gen8_emit_wa_tail(request, cs);
 }
 
 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
 
 static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
-					u32 *out)
+					u32 *cs)
 {
 	/* We're using qword write, seqno should be aligned to 8 bytes. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1657,20 +1651,19 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
 	 * need a prior CS_STALL, which is emitted by the flush
 	 * following the batch.
 	 */
-	*out++ = GFX_OP_PIPE_CONTROL(6);
-	*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
-		  PIPE_CONTROL_CS_STALL |
-		  PIPE_CONTROL_QW_WRITE);
-	*out++ = intel_hws_seqno_address(request->engine);
-	*out++ = 0;
-	*out++ = request->global_seqno;
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+		PIPE_CONTROL_QW_WRITE;
+	*cs++ = intel_hws_seqno_address(request->engine);
+	*cs++ = 0;
+	*cs++ = request->global_seqno;
 	/* We're thrashing one dword of HWS. */
-	*out++ = 0;
-	*out++ = MI_USER_INTERRUPT;
-	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	*cs++ = 0;
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
+	request->tail = intel_ring_offset(request, cs);
 
-	gen8_emit_wa_tail(request, out);
+	gen8_emit_wa_tail(request, cs);
 }
 
 static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 773e36253e7c..92e461c68385 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
-	int ret;
+	u32 *cs;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[index].control_value);
+		*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*cs++ = table->table[index].control_value;
 	}
 
 	/*
@@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[0].control_value);
+		*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*cs++ = table->table[0].control_value;
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	unsigned int i;
-	int ret;
+	u32 *cs;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*cs++ = l3cc_combine(table, 2 * i, 2 * i + 1);
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*cs++ = l3cc_combine(table, 2 * i, 0);
 		i++;
 	}
 
@@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*cs++ = l3cc_combine(table, 0, 0);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 0608fad7f593..5ef9f5bfb92c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	int ret;
+	u32 *cs;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
@@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret) {
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(cs);
 	}
 
 	overlay->active = true;
@@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_I830(dev_priv))
 		i830_overlay_clock_gating(dev_priv, false);
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+	*cs++ = overlay->flip_addr | OFC_UPDATE;
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
-	u32 tmp;
-	int ret;
+	u32 tmp, *cs;
 
 	WARN_ON(!overlay->active);
 
@@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 2);
-	if (ret) {
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(cs);
 	}
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*cs++ = flip_addr;
+	intel_ring_advance(req, cs);
 
 	intel_overlay_flip_prepare(overlay, vma);
 
@@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	u32 flip_addr = overlay->flip_addr;
-	int ret;
+	u32 *cs, flip_addr = overlay->flip_addr;
 
 	WARN_ON(!overlay->active);
 
@@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret) {
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(cs);
 	}
 
-	ring = req->ring;
-
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*cs++ = flip_addr;
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
 	/* turn overlay off */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+	*cs++ = flip_addr;
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	intel_overlay_flip_prepare(overlay, NULL);
 
@@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
+	u32 *cs;
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
-		struct intel_ring *ring;
 
 		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-		ret = intel_ring_begin(req, 2);
-		if (ret) {
+		cs = intel_ring_begin(req, 2);
+		if (IS_ERR(cs)) {
 			i915_add_request_no_flush(req);
-			return ret;
+			return PTR_ERR(cs);
 		}
 
-		ring = req->ring;
-		intel_ring_emit(ring,
-				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+		*cs++ = MI_NOOP;
+		intel_ring_advance(req, cs);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9fba5664376e..0b9030c36625 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -61,22 +61,20 @@ void intel_ring_update_space(struct intel_ring *ring)
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *cs;
 
 	cmd = MI_FLUSH;
 
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = cmd;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -84,9 +82,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *cs;
 
 	/*
 	 * read/write caches:
@@ -123,13 +119,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 			cmd |= MI_INVALIDATE_ISP;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = cmd;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -174,35 +170,33 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	int ret;
-
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0); /* low dword */
-	intel_ring_emit(ring, 0); /* high dword */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
-
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	u32 *cs;
+
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(5);
+	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = 0; /* low dword */
+	*cs++ = 0; /* high dword */
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
+
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(5);
+	*cs++ = PIPE_CONTROL_QW_WRITE;
+	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -210,10 +204,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
+	u32 *cs, flags = 0;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -247,15 +240,15 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = flags;
+	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -263,20 +256,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring,
-			PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*cs++ = 0;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -284,11 +274,9 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
-	int ret;
+	u32 *cs, flags = 0;
 
 	/*
 	 * Ensure that any following seqno writes only happen when the render
@@ -332,15 +320,15 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = flags;
+	*cs++ = scratch_addr;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -349,20 +337,19 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = flags;
+	*cs++ = scratch_addr;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -659,8 +646,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *cs;
 	int ret, i;
 
 	if (w->count == 0)
@@ -670,18 +657,18 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, (w->count * 2 + 2));
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, (w->count * 2 + 2));
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*cs++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1276,7 +1263,7 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
 	i915_vma_unpin_and_release(&dev_priv->semaphore);
 }
 
-static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
@@ -1287,23 +1274,22 @@ static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		*out++ = GFX_OP_PIPE_CONTROL(6);
-		*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
-			  PIPE_CONTROL_QW_WRITE |
-			  PIPE_CONTROL_CS_STALL);
-		*out++ = lower_32_bits(gtt_offset);
-		*out++ = upper_32_bits(gtt_offset);
-		*out++ = req->global_seqno;
-		*out++ = 0;
-		*out++ = (MI_SEMAPHORE_SIGNAL |
-			  MI_SEMAPHORE_TARGET(waiter->hw_id));
-		*out++ = 0;
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_CS_STALL;
+		*cs++ = lower_32_bits(gtt_offset);
+		*cs++ = upper_32_bits(gtt_offset);
+		*cs++ = req->global_seqno;
+		*cs++ = 0;
+		*cs++ = MI_SEMAPHORE_SIGNAL |
+			MI_SEMAPHORE_TARGET(waiter->hw_id);
+		*cs++ = 0;
 	}
 
-	return out;
+	return cs;
 }
 
-static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
@@ -1314,19 +1300,19 @@ static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
-		*out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
-		*out++ = upper_32_bits(gtt_offset);
-		*out++ = req->global_seqno;
-		*out++ = (MI_SEMAPHORE_SIGNAL |
-			  MI_SEMAPHORE_TARGET(waiter->hw_id));
-		*out++ = 0;
+		*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+		*cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+		*cs++ = upper_32_bits(gtt_offset);
+		*cs++ = req->global_seqno;
+		*cs++ = MI_SEMAPHORE_SIGNAL |
+			MI_SEMAPHORE_TARGET(waiter->hw_id);
+		*cs++ = 0;
 	}
 
-	return out;
+	return cs;
 }
 
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *engine;
@@ -1341,16 +1327,16 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
 
 		mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
 		if (i915_mmio_reg_valid(mbox_reg)) {
-			*out++ = MI_LOAD_REGISTER_IMM(1);
-			*out++ = i915_mmio_reg_offset(mbox_reg);
-			*out++ = req->global_seqno;
+			*cs++ = MI_LOAD_REGISTER_IMM(1);
+			*cs++ = i915_mmio_reg_offset(mbox_reg);
+			*cs++ = req->global_seqno;
 			num_rings++;
 		}
 	}
 	if (num_rings & 1)
-		*out++ = MI_NOOP;
+		*cs++ = MI_NOOP;
 
-	return out;
+	return cs;
 }
 
 static void i9xx_submit_request(struct drm_i915_gem_request *request)
@@ -1362,15 +1348,14 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
 	I915_WRITE_TAIL(request->engine, request->tail);
 }
 
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
-				 u32 *out)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
 {
-	*out++ = MI_STORE_DWORD_INDEX;
-	*out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
-	*out++ = req->global_seqno;
-	*out++ = MI_USER_INTERRUPT;
+	*cs++ = MI_STORE_DWORD_INDEX;
+	*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+	*cs++ = req->global_seqno;
+	*cs++ = MI_USER_INTERRUPT;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, cs);
 }
 
 static const int i9xx_emit_breadcrumb_sz = 4;
@@ -1383,34 +1368,32 @@ static const int i9xx_emit_breadcrumb_sz = 4;
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
-				      u32 *out)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
 {
 	return i9xx_emit_breadcrumb(req,
-				    req->engine->semaphore.signal(req, out));
+				    req->engine->semaphore.signal(req, cs));
 }
 
 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
-					u32 *out)
+					u32 *cs)
 {
 	struct intel_engine_cs *engine = req->engine;
 
 	if (engine->semaphore.signal)
-		out = engine->semaphore.signal(req, out);
-
-	*out++ = GFX_OP_PIPE_CONTROL(6);
-	*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
-			       PIPE_CONTROL_CS_STALL |
-			       PIPE_CONTROL_QW_WRITE);
-	*out++ = intel_hws_seqno_address(engine);
-	*out++ = 0;
-	*out++ = req->global_seqno;
+		cs = engine->semaphore.signal(req, cs);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+		PIPE_CONTROL_QW_WRITE;
+	*cs++ = intel_hws_seqno_address(engine);
+	*cs++ = 0;
+	*cs++ = req->global_seqno;
 	/* We're thrashing one dword of HWS. */
-	*out++ = 0;
-	*out++ = MI_USER_INTERRUPT;
-	*out++ = MI_NOOP;
+	*cs++ = 0;
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, cs);
 }
 
 static const int gen8_render_emit_breadcrumb_sz = 8;
@@ -1427,24 +1410,21 @@ static int
 gen8_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_SEMAPHORE_WAIT |
-			MI_SEMAPHORE_GLOBAL_GTT |
-			MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(ring, signal->global_seqno);
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_advance(ring);
+	*cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_SAD_GTE_SDD;
+	*cs++ = signal->global_seqno;
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	intel_ring_advance(req, cs);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
@@ -1461,28 +1441,27 @@ static int
 gen6_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
 	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
-	int ret;
+	u32 *cs;
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, dw1 | wait_mbox);
+	*cs++ = dw1 | wait_mbox;
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(ring, signal->global_seqno - 1);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = signal->global_seqno - 1;
+	*cs++ = 0;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1583,16 +1562,15 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 static int
 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_FLUSH;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 	return 0;
 }
 
@@ -1658,20 +1636,16 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 length,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			MI_BATCH_GTT |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
+		I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
+	*cs++ = offset;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1685,59 +1659,56 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
-	int ret;
+	u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(ring, cs_offset);
-	intel_ring_emit(ring, 0xdeadbeef);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+	*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+	*cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+	*cs++ = cs_offset;
+	*cs++ = 0xdeadbeef;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(req, 6 + 2);
-		if (ret)
-			return ret;
+		cs = intel_ring_begin(req, 6 + 2);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
 
 		/* Blit the batch (which has now all relocs applied) to the
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(ring,
-				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(ring, cs_offset);
-		intel_ring_emit(ring, 4096);
-		intel_ring_emit(ring, offset);
-
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+		*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+		*cs++ = cs_offset;
+		*cs++ = 4096;
+		*cs++ = offset;
+
+		*cs++ = MI_FLUSH;
+		*cs++ = MI_NOOP;
+		intel_ring_advance(req, cs);
 
 		/* ... and execute it. */
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+		MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1747,17 +1718,16 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+		MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2165,7 +2135,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
 {
-	int ret;
+	u32 *cs;
 
 	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
 
@@ -2178,9 +2148,9 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 	GEM_BUG_ON(!request->engine->buffer);
 	request->ring = request->engine->buffer;
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(request, 0);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	request->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
@@ -2235,7 +2205,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	return 0;
 }
 
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
 	struct intel_ring *ring = req->ring;
 	int remain_actual = ring->size - ring->tail;
@@ -2243,6 +2213,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
+	u32 *cs;
 
 	total_bytes = bytes + req->reserved_space;
 
@@ -2269,7 +2240,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
-			return ret;
+			return ERR_PTR(ret);
 	}
 
 	if (unlikely(need_wrap)) {
@@ -2282,32 +2253,34 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		ring->space -= remain_actual;
 	}
 
+	GEM_BUG_ON(ring->tail > ring->size - bytes);
+	cs = ring->vaddr + ring->tail;
+	ring->tail += bytes;
 	ring->space -= bytes;
 	GEM_BUG_ON(ring->space < 0);
-	GEM_DEBUG_EXEC(ring->advance = ring->tail + bytes);
-	return 0;
+
+	return cs;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	int num_dwords =
-		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
-	int ret;
+		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	u32 *cs;
 
 	if (num_dwords == 0)
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(req, num_dwords);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, num_dwords);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	while (num_dwords--)
-		intel_ring_emit(ring, MI_NOOP);
+		*cs++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2351,13 +2324,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2379,16 +2350,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*cs++ = cmd;
+	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*cs++ = 0; /* upper addr */
+		*cs++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*cs++ = 0;
+		*cs++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 	return 0;
 }
 
@@ -2397,23 +2368,21 @@ gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+		I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2423,22 +2392,19 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
 		  u64 offset, u32 len,
 		  unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
+	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+		0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+		(dispatch_flags & I915_DISPATCH_RS ?
+		MI_BATCH_RESOURCE_STREAMER : 0);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*cs++ = offset;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2448,20 +2414,17 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
+	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+		0 : MI_BATCH_NON_SECURE_I965);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*cs++ = offset;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2470,13 +2433,11 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 
 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2497,17 +2458,16 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*cs++ = cmd;
+	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*cs++ = 0; /* upper addr */
+		*cs++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*cs++ = 0;
+		*cs++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index b122c3cc6dbb..fe31201b9671 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -144,7 +144,6 @@ struct intel_ring {
 
 	u32 head;
 	u32 tail;
-	GEM_DEBUG_DECL(u32 advance);
 
 	int space;
 	int size;
@@ -290,7 +289,7 @@ struct intel_engine_cs {
 #define I915_DISPATCH_PINNED BIT(1)
 #define I915_DISPATCH_RS     BIT(2)
 	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
-					   u32 *out);
+					   u32 *cs);
 	int		emit_breadcrumb_sz;
 
 	/* Pass the request to the hardware queue (e.g. directly into
@@ -373,7 +372,7 @@ struct intel_engine_cs {
 		/* AKA wait() */
 		int	(*sync_to)(struct drm_i915_gem_request *req,
 				   struct drm_i915_gem_request *signal);
-		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *out);
+		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *cs);
 	} semaphore;
 
 	/* Execlists */
@@ -495,21 +494,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
-int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
-{
-	*(uint32_t *)(ring->vaddr + ring->tail) = data;
-	ring->tail += 4;
-}
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
 
-static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
-{
-	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
-
-static inline void intel_ring_advance(struct intel_ring *ring)
+static inline void
+intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
 {
 	/* Dummy function.
 	 *
@@ -519,14 +509,16 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 	 * reserved for the command packet (i.e. the value passed to
 	 * intel_ring_begin()).
 	 */
-	GEM_DEBUG_BUG_ON(ring->tail != ring->advance);
+	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
 }
 
-static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
+static inline u32
+intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
 {
 	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
-	u32 offset = addr - ring->vaddr;
-	return offset & (ring->size - 1);
+	u32 offset = addr - req->ring->vaddr;
+	GEM_BUG_ON(offset > req->ring->size);
+	return offset & (req->ring->size - 1);
 }
 
 int __intel_ring_space(int head, int tail, int size);
-- 
2.7.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev10)
  2017-02-08 13:10 [PATCH] drm/i915: Emit to ringbuffer directly Tvrtko Ursulin
                   ` (5 preceding siblings ...)
  2017-02-13 13:02 ` ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev9) Patchwork
@ 2017-02-13 13:52 ` Patchwork
  2017-02-14 13:52 ` ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev11) Patchwork
  7 siblings, 0 replies; 28+ messages in thread
From: Patchwork @ 2017-02-13 13:52 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Emit to ringbuffer directly (rev10)
URL   : https://patchwork.freedesktop.org/series/12186/
State : success

== Summary ==

Series 12186v10 drm/i915: Emit to ringbuffer directly
https://patchwork.freedesktop.org/api/1.0/series/12186/revisions/10/mbox/

fi-bdw-5557u     total:252  pass:241  dwarn:0   dfail:0   fail:0   skip:11 
fi-bsw-n3050     total:252  pass:213  dwarn:0   dfail:0   fail:0   skip:39 
fi-bxt-j4205     total:252  pass:233  dwarn:0   dfail:0   fail:0   skip:19 
fi-bxt-t5700     total:83   pass:70   dwarn:0   dfail:0   fail:0   skip:12 
fi-byt-j1900     total:252  pass:225  dwarn:0   dfail:0   fail:0   skip:27 
fi-byt-n2820     total:252  pass:221  dwarn:0   dfail:0   fail:0   skip:31 
fi-hsw-4770      total:252  pass:236  dwarn:0   dfail:0   fail:0   skip:16 
fi-hsw-4770r     total:252  pass:236  dwarn:0   dfail:0   fail:0   skip:16 
fi-ilk-650       total:252  pass:202  dwarn:0   dfail:0   fail:0   skip:50 
fi-ivb-3520m     total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18 
fi-ivb-3770      total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18 
fi-kbl-7500u     total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18 
fi-skl-6260u     total:252  pass:242  dwarn:0   dfail:0   fail:0   skip:10 
fi-skl-6700hq    total:252  pass:235  dwarn:0   dfail:0   fail:0   skip:17 
fi-skl-6700k     total:252  pass:230  dwarn:4   dfail:0   fail:0   skip:18 
fi-skl-6770hq    total:252  pass:242  dwarn:0   dfail:0   fail:0   skip:10 
fi-snb-2520m     total:252  pass:224  dwarn:0   dfail:0   fail:0   skip:28 
fi-snb-2600      total:252  pass:223  dwarn:0   dfail:0   fail:0   skip:29 

58294e4063fcc710bd7bc97349c9753f1978d5c1 drm-tip: 2017y-02m-13d-11h-20m-27s UTC integration manifest
28debe1 drm/i915: Emit to ringbuffer directly

== Logs ==

For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_3790/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v9] drm/i915: Emit to ringbuffer directly
  2017-02-13 13:06               ` [PATCH v9] " Tvrtko Ursulin
@ 2017-02-13 21:09                 ` Chris Wilson
  2017-02-14 11:32                   ` [PATCH v10] " Tvrtko Ursulin
  0 siblings, 1 reply; 28+ messages in thread
From: Chris Wilson @ 2017-02-13 21:09 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: Intel-gfx

On Mon, Feb 13, 2017 at 01:06:42PM +0000, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> This removes the usage of intel_ring_emit in favour of
> directly writing to the ring buffer.
> 
> intel_ring_emit was preventing the compiler for optimising
> fetch and increment of the current ring buffer pointer and
> therefore generating very verbose code for every write.
> 
> It had no useful purpose since all ringbuffer operations
> are started and ended with intel_ring_begin and
> intel_ring_advance respectively, with no bail out in the
> middle possible, so it is fine to increment the tail in
> intel_ring_begin and let the code manage the pointer
> itself.
> 
> Useless instruction removal amounts to approximately
> two and half kilobytes of saved text on my build.
> 
> Not sure if this has any measurable performance
> implications but executing a ton of useless instructions
> on fast paths cannot be good.
> 
> Patch is not fully polished, but it compiles and runs
> on Gen9 at least.
> 
> v2:
>  * Change return from intel_ring_begin to error pointer by
>    popular demand.
>  * Move tail increment to intel_ring_advance to enable some
>    error checking.
> 
> v3:
>  * Move tail advance back into intel_ring_begin.
>  * Rebase and tidy.
> 
> v4:
>  * Complete rebase after a few months since v3.
> 
> v5:
>  * Remove unecessary cast and fix !debug compile. (Chris Wilson)
> 
> v6:
>  * Make intel_ring_offset take request as well.
>  * Fix recording of request postfix plus a sprinkle of asserts.
>    (Chris Wilson)
> 
> v7:
>  * Use intel_ring_offset to get the postfix. (Chris Wilson)
>  * Convert GVT code as well.
> 
> v8:
>  * Rename *out++ to *cs++.
> 
> v9:
>  * Fix GVT out to cs conversion in GVT.

Lgtm, just to warn you I've added a new intel_ring_begin
in drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Zhi Wang <zhi.a.wang@intel.com>
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v7)
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* [PATCH v10] drm/i915: Emit to ringbuffer directly
  2017-02-13 21:09                 ` Chris Wilson
@ 2017-02-14 11:32                   ` Tvrtko Ursulin
  2017-02-14 12:03                     ` Joonas Lahtinen
  2017-02-14 12:06                     ` Chris Wilson
  0 siblings, 2 replies; 28+ messages in thread
From: Tvrtko Ursulin @ 2017-02-14 11:32 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

This removes the usage of intel_ring_emit in favour of
directly writing to the ring buffer.

intel_ring_emit was preventing the compiler for optimising
fetch and increment of the current ring buffer pointer and
therefore generating very verbose code for every write.

It had no useful purpose since all ringbuffer operations
are started and ended with intel_ring_begin and
intel_ring_advance respectively, with no bail out in the
middle possible, so it is fine to increment the tail in
intel_ring_begin and let the code manage the pointer
itself.

Useless instruction removal amounts to approximately
two and half kilobytes of saved text on my build.

Not sure if this has any measurable performance
implications but executing a ton of useless instructions
on fast paths cannot be good.

Patch is not fully polished, but it compiles and runs
on Gen9 at least.

v2:
 * Change return from intel_ring_begin to error pointer by
   popular demand.
 * Move tail increment to intel_ring_advance to enable some
   error checking.

v3:
 * Move tail advance back into intel_ring_begin.
 * Rebase and tidy.

v4:
 * Complete rebase after a few months since v3.

v5:
 * Remove unecessary cast and fix !debug compile. (Chris Wilson)

v6:
 * Make intel_ring_offset take request as well.
 * Fix recording of request postfix plus a sprinkle of asserts.
   (Chris Wilson)

v7:
 * Use intel_ring_offset to get the postfix. (Chris Wilson)
 * Convert GVT code as well.

v8:
 * Rename *out++ to *cs++.

v9:
 * Fix GVT out to cs conversion in GVT.

v10:
 * Rebase for new intel_ring_begin in selftests.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Zhi Wang <zhi.a.wang@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v9)
---
 drivers/gpu/drm/i915/gvt/cmd_parser.c              |  36 +-
 drivers/gpu/drm/i915/i915_gem_context.c            |  76 ++-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c         |  40 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c                |  69 ++-
 drivers/gpu/drm/i915/i915_gem_request.c            |   8 +-
 drivers/gpu/drm/i915/intel_display.c               | 131 ++---
 drivers/gpu/drm/i915/intel_lrc.c                   | 207 ++++---
 drivers/gpu/drm/i915/intel_mocs.c                  |  51 +-
 drivers/gpu/drm/i915/intel_overlay.c               |  79 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.c            | 592 ++++++++++-----------
 drivers/gpu/drm/i915/intel_ringbuffer.h            |  30 +-
 .../gpu/drm/i915/selftests/i915_gem_coherency.c    |  33 +-
 12 files changed, 625 insertions(+), 727 deletions(-)

diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 7ee1db7447ce..59c3045af252 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -1512,7 +1512,7 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
 		len += copy_len;
 		gma += copy_len;
 	}
-	return 0;
+	return len;
 }
 
 
@@ -1626,7 +1626,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
 	ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
 			      gma, gma + bb_size,
 			      dst);
-	if (ret) {
+	if (ret < 0) {
 		gvt_err("fail to copy guest ring buffer\n");
 		goto unmap_src;
 	}
@@ -2590,11 +2590,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 {
 	struct intel_vgpu *vgpu = workload->vgpu;
-	int ring_id = workload->ring_id;
-	struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
-	struct intel_ring *ring = shadow_ctx->engine[ring_id].ring;
 	unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
-	unsigned int copy_len = 0;
+	u32 *cs;
 	int ret;
 
 	guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2608,36 +2605,33 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
 	gma_top = workload->rb_start + guest_rb_size;
 
 	/* allocate shadow ring buffer */
-	ret = intel_ring_begin(workload->req, workload->rb_len / 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* get shadow ring buffer va */
-	workload->shadow_ring_buffer_va = ring->vaddr + ring->tail;
+	workload->shadow_ring_buffer_va = cs;
 
 	/* head > tail --> copy head <-> top */
 	if (gma_head > gma_tail) {
 		ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
-				gma_head, gma_top,
-				workload->shadow_ring_buffer_va);
-		if (ret) {
+				      gma_head, gma_top, cs);
+		if (ret < 0) {
 			gvt_err("fail to copy guest ring buffer\n");
 			return ret;
 		}
-		copy_len = gma_top - gma_head;
+		cs += ret / sizeof(u32);
 		gma_head = workload->rb_start;
 	}
 
 	/* copy head or start <-> tail */
-	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
-			gma_head, gma_tail,
-			workload->shadow_ring_buffer_va + copy_len);
-	if (ret) {
+	ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail, cs);
+	if (ret < 0) {
 		gvt_err("fail to copy guest ring buffer\n");
 		return ret;
 	}
-	ring->tail += workload->rb_len;
-	intel_ring_advance(ring);
+	cs += ret / sizeof(u32);
+	intel_ring_advance(workload->req, cs);
 	return 0;
 }
 
@@ -2691,7 +2685,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
 				wa_ctx->workload->vgpu->gtt.ggtt_mm,
 				guest_gma, guest_gma + ctx_size,
 				map);
-	if (ret) {
+	if (ret < 0) {
 		gvt_err("fail to copy guest indirect ctx\n");
 		goto unmap_src;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c73bf02870d5..9037356536dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -596,10 +596,9 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	enum intel_engine_id id;
-	u32 flags = hw_flags | MI_MM_SPACE_GTT;
+	u32 *cs, flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
 		/* Use an extended w/a on ivb+ if signalling from other rings */
 		i915.semaphores ?
@@ -629,99 +628,92 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	if (INTEL_GEN(dev_priv) >= 7)
 		len += 2 + (num_rings ? 4*num_rings + 6 : 0);
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(ring,
-						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(ring,
-						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*cs++ = i915_mmio_reg_offset(
+					   RING_PSMI_CTL(signaller->mmio_base));
+				*cs++ = _MASKED_BIT_ENABLE(
+						GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 		}
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_SET_CONTEXT);
-	intel_ring_emit(ring,
-			i915_ggtt_offset(req->ctx->engine[RCS].state) | flags);
+	*cs++ = MI_NOOP;
+	*cs++ = MI_SET_CONTEXT;
+	*cs++ = i915_ggtt_offset(req->ctx->engine[RCS].state) | flags;
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_NOOP;
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(ring,
-					MI_LOAD_REGISTER_IMM(num_rings));
+			*cs++ = MI_LOAD_REGISTER_IMM(num_rings);
 			for_each_engine(signaller, dev_priv, id) {
 				if (signaller == engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(ring, last_reg);
-				intel_ring_emit(ring,
-						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+				*cs++ = i915_mmio_reg_offset(last_reg);
+				*cs++ = _MASKED_BIT_DISABLE(
+						GEN6_PSMI_SLEEP_MSG_DISABLE);
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(ring,
-					MI_STORE_REGISTER_MEM |
-					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(ring, last_reg);
-			intel_ring_emit(ring,
-					i915_ggtt_offset(engine->scratch));
-			intel_ring_emit(ring, MI_NOOP);
+			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+			*cs++ = i915_mmio_reg_offset(last_reg);
+			*cs++ = i915_ggtt_offset(engine->scratch);
+			*cs++ = MI_NOOP;
 		}
-		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return ret;
 }
 
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
-	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ring *ring = req->ring;
-	int i, ret;
+	u32 *cs, *remap_info = req->i915->l3_parity.remap_info[slice];
+	int i;
 
 	if (!remap_info)
 		return 0;
 
-	ret = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, GEN7_L3LOG_SIZE/4 * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/*
 	 * Note: We do not worry about the concurrent register cacheline hang
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
-		intel_ring_emit(ring, remap_info[i]);
+		*cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
+		*cs++ = remap_info[i];
 	}
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 111b7f8c617a..da0846fe2ad6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1336,25 +1336,25 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret, i;
+	u32 *cs;
+	int i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
 
-	ret = intel_ring_begin(req, 4 * 3);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4 * 3);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(ring, 0);
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = i915_mmio_reg_offset(GEN7_SO_WRITE_OFFSET(i));
+		*cs++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1415,7 +1415,7 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
-	u32 instp_mask;
+	u32 instp_mask, *cs;
 	int ret;
 
 	ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas);
@@ -1461,17 +1461,15 @@ execbuf_submit(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ring *ring = params->request->ring;
-
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
+		cs = intel_ring_begin(params->request, 4);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
+
+		*cs++ = MI_NOOP;
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = i915_mmio_reg_offset(INSTPM);
+		*cs++ = instp_mask << 16 | instp_mode;
+		intel_ring_advance(params->request, cs);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index eebbffdb9a0b..cb5ea5d38f11 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -686,23 +686,22 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
-	int ret;
+	u32 *cs;
 
 	BUG_ON(entry >= 4);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(ring, upper_32_bits(addr));
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(ring, lower_32_bits(addr));
-	intel_ring_advance(ring);
+	*cs++ = MI_LOAD_REGISTER_IMM(1);
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
+	*cs++ = upper_32_bits(addr);
+	*cs++ = MI_LOAD_REGISTER_IMM(1);
+	*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
+	*cs++ = lower_32_bits(addr);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1730,8 +1729,8 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *cs;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1739,17 +1738,17 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_LOAD_REGISTER_IMM(2);
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*cs++ = PP_DIR_DCLV_2G;
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*cs++ = get_pd_offset(ppgtt);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1757,8 +1756,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
+	u32 *cs;
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
@@ -1766,17 +1765,17 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(ring, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(ring, get_pd_offset(ppgtt));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = MI_LOAD_REGISTER_IMM(2);
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
+	*cs++ = PP_DIR_DCLV_2G;
+	*cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
+	*cs++ = get_pd_offset(ppgtt);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 24571a5289a4..2f6cfa47dc61 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -824,6 +824,7 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	struct intel_ring *ring = request->ring;
 	struct intel_timeline *timeline = request->timeline;
 	struct drm_i915_gem_request *prev;
+	u32 *cs;
 	int err;
 
 	lockdep_assert_held(&request->i915->drm.struct_mutex);
@@ -862,10 +863,9 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 	 * GPU processing the request, we never over-estimate the
 	 * position of the ring's HEAD.
 	 */
-	err = intel_ring_begin(request, engine->emit_breadcrumb_sz);
-	GEM_BUG_ON(err);
-	request->postfix = ring->tail;
-	ring->tail += engine->emit_breadcrumb_sz * sizeof(u32);
+	cs = intel_ring_begin(request, engine->emit_breadcrumb_sz);
+	GEM_BUG_ON(IS_ERR(cs));
+	request->postfix = intel_ring_offset(request, cs);
 
 	/* Seal the request and mark it as pending execution. Note that
 	 * we may inspect this state, without holding any locks, during
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 88b7d96c46a4..9d14dc384292 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -10158,14 +10158,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *cs;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* Can't queue multiple flips, so wait for the previous
 	 * one to finish before executing the next.
@@ -10174,13 +10172,12 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, 0); /* aux display base address, unused */
+	*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*cs++ = MI_NOOP;
+	*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0];
+	*cs++ = intel_crtc->flip_work->gtt_offset;
+	*cs++ = 0; /* aux display base address, unused */
 
 	return 0;
 }
@@ -10192,26 +10189,23 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	u32 flip_mask;
-	int ret;
+	u32 flip_mask, *cs;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	if (intel_crtc->plane)
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_WAIT_FOR_EVENT | flip_mask;
+	*cs++ = MI_NOOP;
+	*cs++ = MI_DISPLAY_FLIP_I915 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0];
+	*cs++ = intel_crtc->flip_work->gtt_offset;
+	*cs++ = MI_NOOP;
 
 	return 0;
 }
@@ -10223,25 +10217,22 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* i965+ uses the linear or tiled offsets from the
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0]);
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
-			intel_fb_modifier_to_tiling(fb->modifier));
+	*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0];
+	*cs++ = intel_crtc->flip_work->gtt_offset |
+		intel_fb_modifier_to_tiling(fb->modifier);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
 	 * untested on non-native modes, so ignore it for now.
@@ -10249,7 +10240,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*cs++ = pf | pipesrc;
 
 	return 0;
 }
@@ -10261,21 +10252,17 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t pf, pipesrc;
-	int ret;
+	u32 pf, pipesrc, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP |
-			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	*cs++ = MI_DISPLAY_FLIP | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane);
+	*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*cs++ = intel_crtc->flip_work->gtt_offset;
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -10285,7 +10272,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(ring, pf | pipesrc);
+	*cs++ = pf | pipesrc;
 
 	return 0;
 }
@@ -10298,9 +10285,8 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 uint32_t flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-	uint32_t plane_bit = 0;
+	u32 *cs, plane_bit = 0;
 	int len, ret;
 
 	switch (intel_crtc->plane) {
@@ -10344,9 +10330,9 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, len);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* Unmask the flip-done completion message. Note that the bspec says that
 	 * we should do this for both the BCS and RCS, and that we must not unmask
@@ -10358,31 +10344,28 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
 	if (req->engine->id == RCS) {
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
-					  DERRMR_PIPEB_PRI_FLIP_DONE |
-					  DERRMR_PIPEC_PRI_FLIP_DONE));
+		*cs++ = MI_LOAD_REGISTER_IMM(1);
+		*cs++ = i915_mmio_reg_offset(DERRMR);
+		*cs++ = ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+			  DERRMR_PIPEB_PRI_FLIP_DONE |
+			  DERRMR_PIPEC_PRI_FLIP_DONE);
 		if (IS_GEN8(dev_priv))
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
-					      MI_SRM_LRM_GLOBAL_GTT);
+			*cs++ = MI_STORE_REGISTER_MEM_GEN8 |
+				MI_SRM_LRM_GLOBAL_GTT;
 		else
-			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
-					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(ring, DERRMR);
-		intel_ring_emit(ring,
-				i915_ggtt_offset(req->engine->scratch) + 256);
+			*cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
+		*cs++ = i915_mmio_reg_offset(DERRMR);
+		*cs++ = i915_ggtt_offset(req->engine->scratch) + 256;
 		if (IS_GEN8(dev_priv)) {
-			intel_ring_emit(ring, 0);
-			intel_ring_emit(ring, MI_NOOP);
+			*cs++ = 0;
+			*cs++ = MI_NOOP;
 		}
 	}
 
-	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(ring, fb->pitches[0] |
-			intel_fb_modifier_to_tiling(fb->modifier));
-	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(ring, (MI_NOOP));
+	*cs++ = MI_DISPLAY_FLIP_I915 | plane_bit;
+	*cs++ = fb->pitches[0] | intel_fb_modifier_to_tiling(fb->modifier);
+	*cs++ = intel_crtc->flip_work->gtt_offset;
+	*cs++ = MI_NOOP;
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index af717e1356a9..f567d4b08863 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -834,6 +834,7 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_context *ce = &request->ctx->engine[engine->id];
+	u32 *cs;
 	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
@@ -858,9 +859,11 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 			goto err;
 	}
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
+	cs = intel_ring_begin(request, 0);
+	if (IS_ERR(cs)) {
+		ret = PTR_ERR(cs);
 		goto err_unreserve;
+	}
 
 	if (!ce->initialised) {
 		ret = engine->init_context(request);
@@ -889,9 +892,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	int ret, i;
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *cs;
+	int ret, i;
 
 	if (w->count == 0)
 		return 0;
@@ -900,18 +903,18 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, w->count * 2 + 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, w->count * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*cs++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1404,27 +1407,27 @@ static void reset_common_ring(struct intel_engine_cs *engine,
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
-	int i, ret;
+	u32 *cs;
+	int i;
 
-	ret = intel_ring_begin(req, num_lri_cmds * 2 + 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	*cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
-		intel_ring_emit(ring, upper_32_bits(pd_daddr));
-		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
-		intel_ring_emit(ring, lower_32_bits(pd_daddr));
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
+		*cs++ = upper_32_bits(pd_daddr);
+		*cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
+		*cs++ = lower_32_bits(pd_daddr);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1433,8 +1436,8 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
+	u32 *cs;
 	int ret;
 
 	/* Don't rely in hw updating PDPs, specially in lite-restore.
@@ -1455,19 +1458,17 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
-			(ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+		I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1488,13 +1489,11 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 
 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
-	struct intel_ring *ring = request->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *cs;
 
-	ret = intel_ring_begin(request, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(request, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	cmd = MI_FLUSH_DW + 1;
 
@@ -1511,13 +1510,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR |
-			MI_FLUSH_DW_USE_GTT);
-	intel_ring_emit(ring, 0); /* upper addr */
-	intel_ring_emit(ring, 0); /* value */
-	intel_ring_advance(ring);
+	*cs++ = cmd;
+	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
+	*cs++ = 0; /* upper addr */
+	*cs++ = 0; /* value */
+	intel_ring_advance(request, cs);
 
 	return 0;
 }
@@ -1525,13 +1522,11 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 mode)
 {
-	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr =
 		i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
-	u32 flags = 0;
-	int ret;
+	u32 *cs, flags = 0;
 	int len;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -1573,45 +1568,45 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 	if (dc_flush_wa)
 		len += 12;
 
-	ret = intel_ring_begin(request, len);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(request, len);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	if (vf_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
 	}
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_DC_FLUSH_ENABLE;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
 	}
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = flags;
+	*cs++ = scratch_addr;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
 
 	if (dc_flush_wa) {
-		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, 0);
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_CS_STALL;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
+		*cs++ = 0;
 	}
 
-	intel_ring_advance(ring);
+	intel_ring_advance(request, cs);
 
 	return 0;
 }
@@ -1621,34 +1616,33 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
  * used as a workaround for not being allowed to do lite
  * restore with HEAD==TAIL (WaIdleLiteRestore).
  */
-static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *out)
+static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
 {
-	*out++ = MI_NOOP;
-	*out++ = MI_NOOP;
-	request->wa_tail = intel_ring_offset(request->ring, out);
+	*cs++ = MI_NOOP;
+	*cs++ = MI_NOOP;
+	request->wa_tail = intel_ring_offset(request, cs);
 }
 
-static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request,
-				 u32 *out)
+static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
 {
 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-	*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
-	*out++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
-	*out++ = 0;
-	*out++ = request->global_seqno;
-	*out++ = MI_USER_INTERRUPT;
-	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+	*cs++ = intel_hws_seqno_address(request->engine) | MI_FLUSH_DW_USE_GTT;
+	*cs++ = 0;
+	*cs++ = request->global_seqno;
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
+	request->tail = intel_ring_offset(request, cs);
 
-	gen8_emit_wa_tail(request, out);
+	gen8_emit_wa_tail(request, cs);
 }
 
 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
 
 static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
-					u32 *out)
+					u32 *cs)
 {
 	/* We're using qword write, seqno should be aligned to 8 bytes. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
@@ -1657,20 +1651,19 @@ static void gen8_emit_breadcrumb_render(struct drm_i915_gem_request *request,
 	 * need a prior CS_STALL, which is emitted by the flush
 	 * following the batch.
 	 */
-	*out++ = GFX_OP_PIPE_CONTROL(6);
-	*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
-		  PIPE_CONTROL_CS_STALL |
-		  PIPE_CONTROL_QW_WRITE);
-	*out++ = intel_hws_seqno_address(request->engine);
-	*out++ = 0;
-	*out++ = request->global_seqno;
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+		PIPE_CONTROL_QW_WRITE;
+	*cs++ = intel_hws_seqno_address(request->engine);
+	*cs++ = 0;
+	*cs++ = request->global_seqno;
 	/* We're thrashing one dword of HWS. */
-	*out++ = 0;
-	*out++ = MI_USER_INTERRUPT;
-	*out++ = MI_NOOP;
-	request->tail = intel_ring_offset(request->ring, out);
+	*cs++ = 0;
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
+	request->tail = intel_ring_offset(request, cs);
 
-	gen8_emit_wa_tail(request, out);
+	gen8_emit_wa_tail(request, cs);
 }
 
 static const int gen8_emit_breadcrumb_render_sz = 8 + WA_TAIL_DWORDS;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 773e36253e7c..92e461c68385 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,23 +276,22 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
-	int ret;
+	u32 *cs;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2 + 2 * GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES);
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[index].control_value);
+		*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*cs++ = table->table[index].control_value;
 	}
 
 	/*
@@ -304,12 +303,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit_reg(ring, mocs_register(engine, index));
-		intel_ring_emit(ring, table->table[0].control_value);
+		*cs++ = i915_mmio_reg_offset(mocs_register(engine, index));
+		*cs++ = table->table[0].control_value;
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -336,29 +335,27 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ring *ring = req->ring;
 	unsigned int i;
-	int ret;
+	u32 *cs;
 
 	if (WARN_ON(table->size > GEN9_NUM_MOCS_ENTRIES))
 		return -ENODEV;
 
-	ret = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2 + GEN9_NUM_MOCS_ENTRIES);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
+	*cs++ = MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2);
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*cs++ = l3cc_combine(table, 2 * i, 2 * i + 1);
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*cs++ = l3cc_combine(table, 2 * i, 0);
 		i++;
 	}
 
@@ -368,12 +365,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
+		*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i));
+		*cs++ = l3cc_combine(table, 0, 0);
 	}
 
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 0608fad7f593..5ef9f5bfb92c 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -267,8 +267,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	int ret;
+	u32 *cs;
 
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
@@ -277,10 +276,10 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret) {
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(cs);
 	}
 
 	overlay->active = true;
@@ -288,12 +287,11 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	if (IS_I830(dev_priv))
 		i830_overlay_clock_gating(dev_priv, false);
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+	*cs++ = overlay->flip_addr | OFC_UPDATE;
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -326,10 +324,8 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
-	u32 tmp;
-	int ret;
+	u32 tmp, *cs;
 
 	WARN_ON(!overlay->active);
 
@@ -345,16 +341,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 2);
-	if (ret) {
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(cs);
 	}
 
-	ring = req->ring;
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_advance(ring);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*cs++ = flip_addr;
+	intel_ring_advance(req, cs);
 
 	intel_overlay_flip_prepare(overlay, vma);
 
@@ -408,9 +403,7 @@ static void intel_overlay_off_tail(struct i915_gem_active *active,
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_request *req;
-	struct intel_ring *ring;
-	u32 flip_addr = overlay->flip_addr;
-	int ret;
+	u32 *cs, flip_addr = overlay->flip_addr;
 
 	WARN_ON(!overlay->active);
 
@@ -424,25 +417,23 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret) {
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs)) {
 		i915_add_request_no_flush(req);
-		return ret;
+		return PTR_ERR(cs);
 	}
 
-	ring = req->ring;
-
 	/* wait for overlay to go idle */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+	*cs++ = flip_addr;
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
 	/* turn overlay off */
-	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-	intel_ring_emit(ring, flip_addr);
-	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	*cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+	*cs++ = flip_addr;
+	*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	intel_overlay_flip_prepare(overlay, NULL);
 
@@ -465,6 +456,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
+	u32 *cs;
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -478,23 +470,20 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
-		struct intel_ring *ring;
 
 		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-		ret = intel_ring_begin(req, 2);
-		if (ret) {
+		cs = intel_ring_begin(req, 2);
+		if (IS_ERR(cs)) {
 			i915_add_request_no_flush(req);
-			return ret;
+			return PTR_ERR(cs);
 		}
 
-		ring = req->ring;
-		intel_ring_emit(ring,
-				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+		*cs++ = MI_NOOP;
+		intel_ring_advance(req, cs);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9fba5664376e..0b9030c36625 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -61,22 +61,20 @@ void intel_ring_update_space(struct intel_ring *ring)
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *cs;
 
 	cmd = MI_FLUSH;
 
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_READ_FLUSH;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = cmd;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -84,9 +82,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cmd;
-	int ret;
+	u32 cmd, *cs;
 
 	/*
 	 * read/write caches:
@@ -123,13 +119,13 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 			cmd |= MI_INVALIDATE_ISP;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = cmd;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -174,35 +170,33 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	int ret;
-
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0); /* low dword */
-	intel_ring_emit(ring, 0); /* high dword */
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
-
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	u32 *cs;
+
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(5);
+	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = 0; /* low dword */
+	*cs++ = 0; /* high dword */
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
+
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(5);
+	*cs++ = PIPE_CONTROL_QW_WRITE;
+	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -210,10 +204,9 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
+	u32 *cs, flags = 0;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -247,15 +240,15 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = flags;
+	*cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -263,20 +256,17 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring,
-			PIPE_CONTROL_CS_STALL |
-			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+	*cs++ = 0;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -284,11 +274,9 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
-	u32 flags = 0;
-	int ret;
+	u32 *cs, flags = 0;
 
 	/*
 	 * Ensure that any following seqno writes only happen when the render
@@ -332,15 +320,15 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 		gen7_render_ring_cs_stall_wa(req);
 	}
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(4);
+	*cs++ = flags;
+	*cs++ = scratch_addr;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -349,20 +337,19 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(ring, flags);
-	intel_ring_emit(ring, scratch_addr);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, 0);
-	intel_ring_advance(ring);
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = flags;
+	*cs++ = scratch_addr;
+	*cs++ = 0;
+	*cs++ = 0;
+	*cs++ = 0;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -659,8 +646,8 @@ static void reset_ring_common(struct intel_engine_cs *engine,
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
+	u32 *cs;
 	int ret, i;
 
 	if (w->count == 0)
@@ -670,18 +657,18 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	ret = intel_ring_begin(req, (w->count * 2 + 2));
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, (w->count * 2 + 2));
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
+	*cs++ = MI_LOAD_REGISTER_IMM(w->count);
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ring, w->reg[i].addr);
-		intel_ring_emit(ring, w->reg[i].value);
+		*cs++ = i915_mmio_reg_offset(w->reg[i].addr);
+		*cs++ = w->reg[i].value;
 	}
-	intel_ring_emit(ring, MI_NOOP);
+	*cs++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
@@ -1276,7 +1263,7 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
 	i915_vma_unpin_and_release(&dev_priv->semaphore);
 }
 
-static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
@@ -1287,23 +1274,22 @@ static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		*out++ = GFX_OP_PIPE_CONTROL(6);
-		*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
-			  PIPE_CONTROL_QW_WRITE |
-			  PIPE_CONTROL_CS_STALL);
-		*out++ = lower_32_bits(gtt_offset);
-		*out++ = upper_32_bits(gtt_offset);
-		*out++ = req->global_seqno;
-		*out++ = 0;
-		*out++ = (MI_SEMAPHORE_SIGNAL |
-			  MI_SEMAPHORE_TARGET(waiter->hw_id));
-		*out++ = 0;
+		*cs++ = GFX_OP_PIPE_CONTROL(6);
+		*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_CS_STALL;
+		*cs++ = lower_32_bits(gtt_offset);
+		*cs++ = upper_32_bits(gtt_offset);
+		*cs++ = req->global_seqno;
+		*cs++ = 0;
+		*cs++ = MI_SEMAPHORE_SIGNAL |
+			MI_SEMAPHORE_TARGET(waiter->hw_id);
+		*cs++ = 0;
 	}
 
-	return out;
+	return cs;
 }
 
-static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
@@ -1314,19 +1300,19 @@ static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		*out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
-		*out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
-		*out++ = upper_32_bits(gtt_offset);
-		*out++ = req->global_seqno;
-		*out++ = (MI_SEMAPHORE_SIGNAL |
-			  MI_SEMAPHORE_TARGET(waiter->hw_id));
-		*out++ = 0;
+		*cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+		*cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+		*cs++ = upper_32_bits(gtt_offset);
+		*cs++ = req->global_seqno;
+		*cs++ = MI_SEMAPHORE_SIGNAL |
+			MI_SEMAPHORE_TARGET(waiter->hw_id);
+		*cs++ = 0;
 	}
 
-	return out;
+	return cs;
 }
 
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
 {
 	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *engine;
@@ -1341,16 +1327,16 @@ static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
 
 		mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
 		if (i915_mmio_reg_valid(mbox_reg)) {
-			*out++ = MI_LOAD_REGISTER_IMM(1);
-			*out++ = i915_mmio_reg_offset(mbox_reg);
-			*out++ = req->global_seqno;
+			*cs++ = MI_LOAD_REGISTER_IMM(1);
+			*cs++ = i915_mmio_reg_offset(mbox_reg);
+			*cs++ = req->global_seqno;
 			num_rings++;
 		}
 	}
 	if (num_rings & 1)
-		*out++ = MI_NOOP;
+		*cs++ = MI_NOOP;
 
-	return out;
+	return cs;
 }
 
 static void i9xx_submit_request(struct drm_i915_gem_request *request)
@@ -1362,15 +1348,14 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
 	I915_WRITE_TAIL(request->engine, request->tail);
 }
 
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
-				 u32 *out)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
 {
-	*out++ = MI_STORE_DWORD_INDEX;
-	*out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
-	*out++ = req->global_seqno;
-	*out++ = MI_USER_INTERRUPT;
+	*cs++ = MI_STORE_DWORD_INDEX;
+	*cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+	*cs++ = req->global_seqno;
+	*cs++ = MI_USER_INTERRUPT;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, cs);
 }
 
 static const int i9xx_emit_breadcrumb_sz = 4;
@@ -1383,34 +1368,32 @@ static const int i9xx_emit_breadcrumb_sz = 4;
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
-				      u32 *out)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
 {
 	return i9xx_emit_breadcrumb(req,
-				    req->engine->semaphore.signal(req, out));
+				    req->engine->semaphore.signal(req, cs));
 }
 
 static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
-					u32 *out)
+					u32 *cs)
 {
 	struct intel_engine_cs *engine = req->engine;
 
 	if (engine->semaphore.signal)
-		out = engine->semaphore.signal(req, out);
-
-	*out++ = GFX_OP_PIPE_CONTROL(6);
-	*out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
-			       PIPE_CONTROL_CS_STALL |
-			       PIPE_CONTROL_QW_WRITE);
-	*out++ = intel_hws_seqno_address(engine);
-	*out++ = 0;
-	*out++ = req->global_seqno;
+		cs = engine->semaphore.signal(req, cs);
+
+	*cs++ = GFX_OP_PIPE_CONTROL(6);
+	*cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+		PIPE_CONTROL_QW_WRITE;
+	*cs++ = intel_hws_seqno_address(engine);
+	*cs++ = 0;
+	*cs++ = req->global_seqno;
 	/* We're thrashing one dword of HWS. */
-	*out++ = 0;
-	*out++ = MI_USER_INTERRUPT;
-	*out++ = MI_NOOP;
+	*cs++ = 0;
+	*cs++ = MI_USER_INTERRUPT;
+	*cs++ = MI_NOOP;
 
-	req->tail = intel_ring_offset(req->ring, out);
+	req->tail = intel_ring_offset(req, cs);
 }
 
 static const int gen8_render_emit_breadcrumb_sz = 8;
@@ -1427,24 +1410,21 @@ static int
 gen8_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_SEMAPHORE_WAIT |
-			MI_SEMAPHORE_GLOBAL_GTT |
-			MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(ring, signal->global_seqno);
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_advance(ring);
+	*cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+		MI_SEMAPHORE_SAD_GTE_SDD;
+	*cs++ = signal->global_seqno;
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	intel_ring_advance(req, cs);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
@@ -1461,28 +1441,27 @@ static int
 gen6_ring_sync_to(struct drm_i915_gem_request *req,
 		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
 	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
-	int ret;
+	u32 *cs;
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, dw1 | wait_mbox);
+	*cs++ = dw1 | wait_mbox;
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(ring, signal->global_seqno - 1);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = signal->global_seqno - 1;
+	*cs++ = 0;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1583,16 +1562,15 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 static int
 bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_FLUSH);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_FLUSH;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 	return 0;
 }
 
@@ -1658,20 +1636,16 @@ i965_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 length,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			MI_BATCH_GTT |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
+		I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
+	*cs++ = offset;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1685,59 +1659,56 @@ i830_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
-	int ret;
+	u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
 
-	ret = intel_ring_begin(req, 6);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 6);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(ring, cs_offset);
-	intel_ring_emit(ring, 0xdeadbeef);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+	*cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+	*cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+	*cs++ = cs_offset;
+	*cs++ = 0xdeadbeef;
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
 			return -ENOSPC;
 
-		ret = intel_ring_begin(req, 6 + 2);
-		if (ret)
-			return ret;
+		cs = intel_ring_begin(req, 6 + 2);
+		if (IS_ERR(cs))
+			return PTR_ERR(cs);
 
 		/* Blit the batch (which has now all relocs applied) to the
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(ring,
-				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(ring, cs_offset);
-		intel_ring_emit(ring, 4096);
-		intel_ring_emit(ring, offset);
-
-		intel_ring_emit(ring, MI_FLUSH);
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_advance(ring);
+		*cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+		*cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+		*cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+		*cs++ = cs_offset;
+		*cs++ = 4096;
+		*cs++ = offset;
+
+		*cs++ = MI_FLUSH;
+		*cs++ = MI_NOOP;
+		intel_ring_advance(req, cs);
 
 		/* ... and execute it. */
 		offset = cs_offset;
 	}
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+		MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -1747,17 +1718,16 @@ i915_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+	*cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+		MI_BATCH_NON_SECURE);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2165,7 +2135,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
 {
-	int ret;
+	u32 *cs;
 
 	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
 
@@ -2178,9 +2148,9 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 	GEM_BUG_ON(!request->engine->buffer);
 	request->ring = request->engine->buffer;
 
-	ret = intel_ring_begin(request, 0);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(request, 0);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	request->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
@@ -2235,7 +2205,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	return 0;
 }
 
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
 	struct intel_ring *ring = req->ring;
 	int remain_actual = ring->size - ring->tail;
@@ -2243,6 +2213,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
+	u32 *cs;
 
 	total_bytes = bytes + req->reserved_space;
 
@@ -2269,7 +2240,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
-			return ret;
+			return ERR_PTR(ret);
 	}
 
 	if (unlikely(need_wrap)) {
@@ -2282,32 +2253,34 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		ring->space -= remain_actual;
 	}
 
+	GEM_BUG_ON(ring->tail > ring->size - bytes);
+	cs = ring->vaddr + ring->tail;
+	ring->tail += bytes;
 	ring->space -= bytes;
 	GEM_BUG_ON(ring->space < 0);
-	GEM_DEBUG_EXEC(ring->advance = ring->tail + bytes);
-	return 0;
+
+	return cs;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *ring = req->ring;
 	int num_dwords =
-		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
-	int ret;
+		(req->ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	u32 *cs;
 
 	if (num_dwords == 0)
 		return 0;
 
 	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
-	ret = intel_ring_begin(req, num_dwords);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, num_dwords);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	while (num_dwords--)
-		intel_ring_emit(ring, MI_NOOP);
+		*cs++ = MI_NOOP;
 
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2351,13 +2324,11 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2379,16 +2350,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*cs++ = cmd;
+	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*cs++ = 0; /* upper addr */
+		*cs++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*cs++ = 0;
+		*cs++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 	return 0;
 }
 
@@ -2397,23 +2368,21 @@ gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(ring, lower_32_bits(offset));
-	intel_ring_emit(ring, upper_32_bits(offset));
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	*cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+		I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+	*cs++ = lower_32_bits(offset);
+	*cs++ = upper_32_bits(offset);
+	*cs++ = MI_NOOP;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2423,22 +2392,19 @@ hsw_emit_bb_start(struct drm_i915_gem_request *req,
 		  u64 offset, u32 len,
 		  unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
-			(dispatch_flags & I915_DISPATCH_RS ?
-			 MI_BATCH_RESOURCE_STREAMER : 0));
+	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+		0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+		(dispatch_flags & I915_DISPATCH_RS ?
+		MI_BATCH_RESOURCE_STREAMER : 0);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*cs++ = offset;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2448,20 +2414,17 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 		   u64 offset, u32 len,
 		   unsigned int dispatch_flags)
 {
-	struct intel_ring *ring = req->ring;
-	int ret;
+	u32 *cs;
 
-	ret = intel_ring_begin(req, 2);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 2);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
-	intel_ring_emit(ring,
-			MI_BATCH_BUFFER_START |
-			(dispatch_flags & I915_DISPATCH_SECURE ?
-			 0 : MI_BATCH_NON_SECURE_I965));
+	*cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+		0 : MI_BATCH_NON_SECURE_I965);
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(ring, offset);
-	intel_ring_advance(ring);
+	*cs++ = offset;
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
@@ -2470,13 +2433,11 @@ gen6_emit_bb_start(struct drm_i915_gem_request *req,
 
 static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
-	struct intel_ring *ring = req->ring;
-	uint32_t cmd;
-	int ret;
+	u32 cmd, *cs;
 
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
+	cs = intel_ring_begin(req, 4);
+	if (IS_ERR(cs))
+		return PTR_ERR(cs);
 
 	cmd = MI_FLUSH_DW;
 	if (INTEL_GEN(req->i915) >= 8)
@@ -2497,17 +2458,16 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	 */
 	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(ring, cmd);
-	intel_ring_emit(ring,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	*cs++ = cmd;
+	*cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(ring, 0); /* upper addr */
-		intel_ring_emit(ring, 0); /* value */
+		*cs++ = 0; /* upper addr */
+		*cs++ = 0; /* value */
 	} else  {
-		intel_ring_emit(ring, 0);
-		intel_ring_emit(ring, MI_NOOP);
+		*cs++ = 0;
+		*cs++ = MI_NOOP;
 	}
-	intel_ring_advance(ring);
+	intel_ring_advance(req, cs);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index cc62e89010d3..4350713dbc58 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -145,7 +145,6 @@ struct intel_ring {
 
 	u32 head;
 	u32 tail;
-	GEM_DEBUG_DECL(u32 advance);
 
 	int space;
 	int size;
@@ -292,7 +291,7 @@ struct intel_engine_cs {
 #define I915_DISPATCH_PINNED BIT(1)
 #define I915_DISPATCH_RS     BIT(2)
 	void		(*emit_breadcrumb)(struct drm_i915_gem_request *req,
-					   u32 *out);
+					   u32 *cs);
 	int		emit_breadcrumb_sz;
 
 	/* Pass the request to the hardware queue (e.g. directly into
@@ -375,7 +374,7 @@ struct intel_engine_cs {
 		/* AKA wait() */
 		int	(*sync_to)(struct drm_i915_gem_request *req,
 				   struct drm_i915_gem_request *signal);
-		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *out);
+		u32	*(*signal)(struct drm_i915_gem_request *req, u32 *cs);
 	} semaphore;
 
 	/* Execlists */
@@ -497,21 +496,12 @@ void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
-int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
-{
-	*(uint32_t *)(ring->vaddr + ring->tail) = data;
-	ring->tail += 4;
-}
+u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req, int n);
 
-static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
-{
-	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
-}
-
-static inline void intel_ring_advance(struct intel_ring *ring)
+static inline void
+intel_ring_advance(struct drm_i915_gem_request *req, u32 *cs)
 {
 	/* Dummy function.
 	 *
@@ -521,14 +511,16 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 	 * reserved for the command packet (i.e. the value passed to
 	 * intel_ring_begin()).
 	 */
-	GEM_DEBUG_BUG_ON(ring->tail != ring->advance);
+	GEM_BUG_ON((req->ring->vaddr + req->ring->tail) != cs);
 }
 
-static inline u32 intel_ring_offset(struct intel_ring *ring, void *addr)
+static inline u32
+intel_ring_offset(struct drm_i915_gem_request *req, void *addr)
 {
 	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
-	u32 offset = addr - ring->vaddr;
-	return offset & (ring->size - 1);
+	u32 offset = addr - req->ring->vaddr;
+	GEM_BUG_ON(offset > req->ring->size);
+	return offset & (req->ring->size - 1);
 }
 
 int __intel_ring_space(int head, int tail, int size);
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
index 483cbe02d983..ad92c60290f1 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
@@ -186,6 +186,7 @@ static int gpu_set(struct drm_i915_gem_object *obj,
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct drm_i915_gem_request *rq;
 	struct i915_vma *vma;
+	u32 *cs;
 	int err;
 
 	err = i915_gem_object_set_to_gtt_domain(obj, true);
@@ -202,30 +203,30 @@ static int gpu_set(struct drm_i915_gem_object *obj,
 		return PTR_ERR(rq);
 	}
 
-	err = intel_ring_begin(rq, 4);
-	if (err) {
+	cs = intel_ring_begin(rq, 4);
+	if (IS_ERR(cs)) {
 		__i915_add_request(rq, false);
 		i915_vma_unpin(vma);
-		return err;
+		return PTR_ERR(cs);
 	}
 
 	if (INTEL_GEN(i915) >= 8) {
-		intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM_GEN4 | 1 << 22);
-		intel_ring_emit(rq->ring, lower_32_bits(i915_ggtt_offset(vma) + offset));
-		intel_ring_emit(rq->ring, upper_32_bits(i915_ggtt_offset(vma) + offset));
-		intel_ring_emit(rq->ring, v);
+		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+		*cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset);
+		*cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset);
+		*cs++ = v;
 	} else if (INTEL_GEN(i915) >= 4) {
-		intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM_GEN4 | 1 << 22);
-		intel_ring_emit(rq->ring, 0);
-		intel_ring_emit(rq->ring, i915_ggtt_offset(vma) + offset);
-		intel_ring_emit(rq->ring, v);
+		*cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22;
+		*cs++ = 0;
+		*cs++ = i915_ggtt_offset(vma) + offset;
+		*cs++ = v;
 	} else {
-		intel_ring_emit(rq->ring, MI_STORE_DWORD_IMM | 1 << 22);
-		intel_ring_emit(rq->ring, i915_ggtt_offset(vma) + offset);
-		intel_ring_emit(rq->ring, v);
-		intel_ring_emit(rq->ring, MI_NOOP);
+		*cs++ = MI_STORE_DWORD_IMM | 1 << 22;
+		*cs++ = i915_ggtt_offset(vma) + offset;
+		*cs++ = v;
+		*cs++ = MI_NOOP;
 	}
-	intel_ring_advance(rq->ring);
+	intel_ring_advance(rq, cs);
 
 	i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 	i915_vma_unpin(vma);
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 28+ messages in thread

* Re: [PATCH v10] drm/i915: Emit to ringbuffer directly
  2017-02-14 11:32                   ` [PATCH v10] " Tvrtko Ursulin
@ 2017-02-14 12:03                     ` Joonas Lahtinen
  2017-02-14 12:06                     ` Chris Wilson
  1 sibling, 0 replies; 28+ messages in thread
From: Joonas Lahtinen @ 2017-02-14 12:03 UTC (permalink / raw)
  To: Tvrtko Ursulin, Intel-gfx

On ti, 2017-02-14 at 11:32 +0000, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> This removes the usage of intel_ring_emit in favour of
> directly writing to the ring buffer.
> 
> intel_ring_emit was preventing the compiler for optimising
> fetch and increment of the current ring buffer pointer and
> therefore generating very verbose code for every write.
> 
> It had no useful purpose since all ringbuffer operations
> are started and ended with intel_ring_begin and
> intel_ring_advance respectively, with no bail out in the
> middle possible, so it is fine to increment the tail in
> intel_ring_begin and let the code manage the pointer
> itself.
> 
> Useless instruction removal amounts to approximately
> two and half kilobytes of saved text on my build.
> 
> Not sure if this has any measurable performance
> implications but executing a ton of useless instructions
> on fast paths cannot be good.
> 
> Patch is not fully polished, but it compiles and runs
> on Gen9 at least.
> 
> v2:
>  * Change return from intel_ring_begin to error pointer by
>    popular demand.
>  * Move tail increment to intel_ring_advance to enable some
>    error checking.
> 
> v3:
>  * Move tail advance back into intel_ring_begin.
>  * Rebase and tidy.
> 
> v4:
>  * Complete rebase after a few months since v3.
> 
> v5:
>  * Remove unecessary cast and fix !debug compile. (Chris Wilson)
> 
> v6:
>  * Make intel_ring_offset take request as well.
>  * Fix recording of request postfix plus a sprinkle of asserts.
>    (Chris Wilson)
> 
> v7:
>  * Use intel_ring_offset to get the postfix. (Chris Wilson)
>  * Convert GVT code as well.
> 
> v8:
>  * Rename *out++ to *cs++.
> 
> v9:
>  * Fix GVT out to cs conversion in GVT.
> 
> v10:
>  * Rebase for new intel_ring_begin in selftests.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Zhi Wang <zhi.a.wang@intel.com>
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v9)

Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: [PATCH v10] drm/i915: Emit to ringbuffer directly
  2017-02-14 11:32                   ` [PATCH v10] " Tvrtko Ursulin
  2017-02-14 12:03                     ` Joonas Lahtinen
@ 2017-02-14 12:06                     ` Chris Wilson
  1 sibling, 0 replies; 28+ messages in thread
From: Chris Wilson @ 2017-02-14 12:06 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: Intel-gfx

On Tue, Feb 14, 2017 at 11:32:42AM +0000, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> This removes the usage of intel_ring_emit in favour of
> directly writing to the ring buffer.
> 
> intel_ring_emit was preventing the compiler for optimising
> fetch and increment of the current ring buffer pointer and
> therefore generating very verbose code for every write.
> 
> It had no useful purpose since all ringbuffer operations
> are started and ended with intel_ring_begin and
> intel_ring_advance respectively, with no bail out in the
> middle possible, so it is fine to increment the tail in
> intel_ring_begin and let the code manage the pointer
> itself.
> 
> Useless instruction removal amounts to approximately
> two and half kilobytes of saved text on my build.
> 
> Not sure if this has any measurable performance
> implications but executing a ton of useless instructions
> on fast paths cannot be good.
> 
> Patch is not fully polished, but it compiles and runs
> on Gen9 at least.

Probably safe to drop this comment now. :)
 
> v2:
>  * Change return from intel_ring_begin to error pointer by
>    popular demand.
>  * Move tail increment to intel_ring_advance to enable some
>    error checking.
> 
> v3:
>  * Move tail advance back into intel_ring_begin.
>  * Rebase and tidy.
> 
> v4:
>  * Complete rebase after a few months since v3.
> 
> v5:
>  * Remove unecessary cast and fix !debug compile. (Chris Wilson)
> 
> v6:
>  * Make intel_ring_offset take request as well.
>  * Fix recording of request postfix plus a sprinkle of asserts.
>    (Chris Wilson)
> 
> v7:
>  * Use intel_ring_offset to get the postfix. (Chris Wilson)
>  * Convert GVT code as well.
> 
> v8:
>  * Rename *out++ to *cs++.
> 
> v9:
>  * Fix GVT out to cs conversion in GVT.
> 
> v10:
>  * Rebase for new intel_ring_begin in selftests.
> 
> Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Zhi Wang <zhi.a.wang@intel.com>
> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> (v9)

Looking good to me.

Joonas care to ack?
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev11)
  2017-02-08 13:10 [PATCH] drm/i915: Emit to ringbuffer directly Tvrtko Ursulin
                   ` (6 preceding siblings ...)
  2017-02-13 13:52 ` ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev10) Patchwork
@ 2017-02-14 13:52 ` Patchwork
  2017-02-14 14:39   ` Tvrtko Ursulin
  7 siblings, 1 reply; 28+ messages in thread
From: Patchwork @ 2017-02-14 13:52 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx

== Series Details ==

Series: drm/i915: Emit to ringbuffer directly (rev11)
URL   : https://patchwork.freedesktop.org/series/12186/
State : success

== Summary ==

Series 12186v11 drm/i915: Emit to ringbuffer directly
https://patchwork.freedesktop.org/api/1.0/series/12186/revisions/11/mbox/

fi-bdw-5557u     total:252  pass:241  dwarn:0   dfail:0   fail:0   skip:11 
fi-bsw-n3050     total:252  pass:213  dwarn:0   dfail:0   fail:0   skip:39 
fi-bxt-j4205     total:252  pass:233  dwarn:0   dfail:0   fail:0   skip:19 
fi-bxt-t5700     total:83   pass:70   dwarn:0   dfail:0   fail:0   skip:12 
fi-byt-j1900     total:252  pass:225  dwarn:0   dfail:0   fail:0   skip:27 
fi-byt-n2820     total:252  pass:221  dwarn:0   dfail:0   fail:0   skip:31 
fi-hsw-4770      total:252  pass:236  dwarn:0   dfail:0   fail:0   skip:16 
fi-hsw-4770r     total:252  pass:236  dwarn:0   dfail:0   fail:0   skip:16 
fi-ilk-650       total:252  pass:202  dwarn:0   dfail:0   fail:0   skip:50 
fi-ivb-3520m     total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18 
fi-ivb-3770      total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18 
fi-kbl-7500u     total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18 
fi-skl-6260u     total:252  pass:242  dwarn:0   dfail:0   fail:0   skip:10 
fi-skl-6700hq    total:252  pass:235  dwarn:0   dfail:0   fail:0   skip:17 
fi-skl-6700k     total:252  pass:230  dwarn:4   dfail:0   fail:0   skip:18 
fi-skl-6770hq    total:252  pass:242  dwarn:0   dfail:0   fail:0   skip:10 
fi-snb-2520m     total:252  pass:224  dwarn:0   dfail:0   fail:0   skip:28 
fi-snb-2600      total:252  pass:223  dwarn:0   dfail:0   fail:0   skip:29 

fd9871a3f519674bb9b29b219393a5a3d470a04b drm-tip: 2017y-02m-14d-12h-28m-14s UTC integration manifest
02cf3f5 drm/i915: Emit to ringbuffer directly

== Logs ==

For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_3800/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

* Re: ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev11)
  2017-02-14 13:52 ` ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev11) Patchwork
@ 2017-02-14 14:39   ` Tvrtko Ursulin
  0 siblings, 0 replies; 28+ messages in thread
From: Tvrtko Ursulin @ 2017-02-14 14:39 UTC (permalink / raw)
  To: intel-gfx, Tvrtko Ursulin


On 14/02/2017 13:52, Patchwork wrote:
> == Series Details ==
>
> Series: drm/i915: Emit to ringbuffer directly (rev11)
> URL   : https://patchwork.freedesktop.org/series/12186/
> State : success
>
> == Summary ==
>
> Series 12186v11 drm/i915: Emit to ringbuffer directly
> https://patchwork.freedesktop.org/api/1.0/series/12186/revisions/11/mbox/
>
> fi-bdw-5557u     total:252  pass:241  dwarn:0   dfail:0   fail:0   skip:11
> fi-bsw-n3050     total:252  pass:213  dwarn:0   dfail:0   fail:0   skip:39
> fi-bxt-j4205     total:252  pass:233  dwarn:0   dfail:0   fail:0   skip:19
> fi-bxt-t5700     total:83   pass:70   dwarn:0   dfail:0   fail:0   skip:12
> fi-byt-j1900     total:252  pass:225  dwarn:0   dfail:0   fail:0   skip:27
> fi-byt-n2820     total:252  pass:221  dwarn:0   dfail:0   fail:0   skip:31
> fi-hsw-4770      total:252  pass:236  dwarn:0   dfail:0   fail:0   skip:16
> fi-hsw-4770r     total:252  pass:236  dwarn:0   dfail:0   fail:0   skip:16
> fi-ilk-650       total:252  pass:202  dwarn:0   dfail:0   fail:0   skip:50
> fi-ivb-3520m     total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18
> fi-ivb-3770      total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18
> fi-kbl-7500u     total:252  pass:234  dwarn:0   dfail:0   fail:0   skip:18
> fi-skl-6260u     total:252  pass:242  dwarn:0   dfail:0   fail:0   skip:10
> fi-skl-6700hq    total:252  pass:235  dwarn:0   dfail:0   fail:0   skip:17
> fi-skl-6700k     total:252  pass:230  dwarn:4   dfail:0   fail:0   skip:18
> fi-skl-6770hq    total:252  pass:242  dwarn:0   dfail:0   fail:0   skip:10
> fi-snb-2520m     total:252  pass:224  dwarn:0   dfail:0   fail:0   skip:28
> fi-snb-2600      total:252  pass:223  dwarn:0   dfail:0   fail:0   skip:29
>
> fd9871a3f519674bb9b29b219393a5a3d470a04b drm-tip: 2017y-02m-14d-12h-28m-14s UTC integration manifest
> 02cf3f5 drm/i915: Emit to ringbuffer directly

Pushed, thanks for the review!

Regards,

Tvrtko

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 28+ messages in thread

end of thread, other threads:[~2017-02-14 14:39 UTC | newest]

Thread overview: 28+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-02-08 13:10 [PATCH] drm/i915: Emit to ringbuffer directly Tvrtko Ursulin
2017-02-08 13:36 ` Chris Wilson
2017-02-08 13:58   ` Chris Wilson
2017-02-08 14:14   ` [PATCH v5] " Tvrtko Ursulin
2017-02-08 17:49     ` Chris Wilson
2017-02-08 18:04       ` [PATCH v6] " Tvrtko Ursulin
2017-02-08 18:21         ` Chris Wilson
2017-02-09  9:02           ` [PATCH v7] " Tvrtko Ursulin
2017-02-09 13:35             ` Chris Wilson
2017-02-13 12:53             ` [PATCH v8] " Tvrtko Ursulin
2017-02-13 13:06               ` [PATCH v9] " Tvrtko Ursulin
2017-02-13 21:09                 ` Chris Wilson
2017-02-14 11:32                   ` [PATCH v10] " Tvrtko Ursulin
2017-02-14 12:03                     ` Joonas Lahtinen
2017-02-14 12:06                     ` Chris Wilson
2017-02-09  8:00         ` [PATCH v6] " Joonas Lahtinen
2017-02-09  9:10           ` Chris Wilson
2017-02-09 10:37             ` Mika Kuoppala
2017-02-09 11:49               ` Tvrtko Ursulin
2017-02-09 13:29                 ` Chris Wilson
2017-02-08 22:32 ` ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev7) Patchwork
2017-02-09 10:22 ` ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev8) Patchwork
2017-02-09 21:52 ` Patchwork
2017-02-10  8:22 ` ✗ Fi.CI.BAT: warning " Patchwork
2017-02-13 13:02 ` ✗ Fi.CI.BAT: failure for drm/i915: Emit to ringbuffer directly (rev9) Patchwork
2017-02-13 13:52 ` ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev10) Patchwork
2017-02-14 13:52 ` ✓ Fi.CI.BAT: success for drm/i915: Emit to ringbuffer directly (rev11) Patchwork
2017-02-14 14:39   ` Tvrtko Ursulin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.