All of lore.kernel.org
 help / color / mirror / Atom feed
* A few bug fixes leading to exporting prime fences [mostly reviewed]
@ 2016-08-01  9:10 Chris Wilson
  2016-08-01  9:10 ` [PATCH 01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
                   ` (73 more replies)
  0 siblings, 74 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

The current beginning, almost all have been reviewed with a small amount
of review fallout. Perhaps the biggest unresolved review comment is from
Daniel concerning intel_frontbuffer kerneldoc which is still a mystery to
me.
-Chris

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* [PATCH 01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 02/73] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
                   ` (72 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Both perform the same actions with more or less indirection, so just
unify the code.

v2: Add back a few intel_engine_cs locals

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-11-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  47 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  53 ++--
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  48 ++--
 drivers/gpu/drm/i915/intel_display.c       |  80 +++---
 drivers/gpu/drm/i915/intel_lrc.c           | 183 +++++++-------
 drivers/gpu/drm/i915/intel_lrc.h           |  26 --
 drivers/gpu/drm/i915/intel_mocs.c          |  38 ++-
 drivers/gpu/drm/i915/intel_overlay.c       |  50 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 382 +++++++++++++++--------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  25 +-
 10 files changed, 450 insertions(+), 482 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index bd13d084e19c..a0e24eb5e167 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -552,6 +552,7 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_engine_cs *engine = req->engine;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
@@ -589,64 +590,64 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 
 	/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
 	if (INTEL_GEN(dev_priv) >= 7) {
-		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_DISABLE);
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 
-			intel_ring_emit(engine,
+			intel_ring_emit(ring,
 					MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_engine(signaller, dev_priv) {
 				if (signaller == engine)
 					continue;
 
-				intel_ring_emit_reg(engine,
+				intel_ring_emit_reg(ring,
 						    RING_PSMI_CTL(signaller->mmio_base));
-				intel_ring_emit(engine,
+				intel_ring_emit(ring,
 						_MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
 		}
 	}
 
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_emit(engine, MI_SET_CONTEXT);
-	intel_ring_emit(engine,
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_SET_CONTEXT);
+	intel_ring_emit(ring,
 			i915_gem_obj_ggtt_offset(req->ctx->engine[RCS].state) |
 			flags);
 	/*
 	 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
 	 * WaMiSetContext_Hang:snb,ivb,vlv
 	 */
-	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
 	if (INTEL_GEN(dev_priv) >= 7) {
 		if (num_rings) {
 			struct intel_engine_cs *signaller;
 			i915_reg_t last_reg = {}; /* keep gcc quiet */
 
-			intel_ring_emit(engine,
+			intel_ring_emit(ring,
 					MI_LOAD_REGISTER_IMM(num_rings));
 			for_each_engine(signaller, dev_priv) {
 				if (signaller == engine)
 					continue;
 
 				last_reg = RING_PSMI_CTL(signaller->mmio_base);
-				intel_ring_emit_reg(engine, last_reg);
-				intel_ring_emit(engine,
+				intel_ring_emit_reg(ring, last_reg);
+				intel_ring_emit(ring,
 						_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
 			}
 
 			/* Insert a delay before the next switch! */
-			intel_ring_emit(engine,
+			intel_ring_emit(ring,
 					MI_STORE_REGISTER_MEM |
 					MI_SRM_LRM_GLOBAL_GTT);
-			intel_ring_emit_reg(engine, last_reg);
-			intel_ring_emit(engine, engine->scratch.gtt_offset);
-			intel_ring_emit(engine, MI_NOOP);
+			intel_ring_emit_reg(ring, last_reg);
+			intel_ring_emit(ring, engine->scratch.gtt_offset);
+			intel_ring_emit(ring, MI_NOOP);
 		}
-		intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
+		intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
 	}
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return ret;
 }
@@ -654,7 +655,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
 	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int i, ret;
 
 	if (!remap_info)
@@ -669,13 +670,13 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice)
 	 * here because no other code should access these registers other than
 	 * at initialization time.
 	 */
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4));
 	for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
-		intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
-		intel_ring_emit(engine, remap_info[i]);
+		intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
+		intel_ring_emit(ring, remap_info[i]);
 	}
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index aa35867f3032..2f9f0daa1bc2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1171,14 +1171,12 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 }
 
 static int
-i915_reset_gen7_sol_offsets(struct drm_device *dev,
-			    struct drm_i915_gem_request *req)
+i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret, i;
 
-	if (!IS_GEN7(dev) || engine != &dev_priv->engine[RCS]) {
+	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
 		DRM_DEBUG("sol reset is gen7/rcs only\n");
 		return -EINVAL;
 	}
@@ -1188,12 +1186,12 @@ i915_reset_gen7_sol_offsets(struct drm_device *dev,
 		return ret;
 
 	for (i = 0; i < 4; i++) {
-		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(engine, GEN7_SO_WRITE_OFFSET(i));
-		intel_ring_emit(engine, 0);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, GEN7_SO_WRITE_OFFSET(i));
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1256,9 +1254,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
 			       struct list_head *vmas)
 {
-	struct drm_device *dev = params->dev;
-	struct intel_engine_cs *engine = params->engine;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
 	int instp_mode;
 	u32 instp_mask;
@@ -1272,34 +1268,31 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	if (ret)
 		return ret;
 
-	WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<engine->id),
-	     "%s didn't clear reload\n", engine->name);
-
 	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
 	instp_mask = I915_EXEC_CONSTANTS_MASK;
 	switch (instp_mode) {
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
+		if (instp_mode != 0 && params->engine->id != RCS) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
 
 		if (instp_mode != dev_priv->relative_constants_mode) {
-			if (INTEL_INFO(dev)->gen < 4) {
+			if (INTEL_INFO(dev_priv)->gen < 4) {
 				DRM_DEBUG("no rel constants on pre-gen4\n");
 				return -EINVAL;
 			}
 
-			if (INTEL_INFO(dev)->gen > 5 &&
+			if (INTEL_INFO(dev_priv)->gen > 5 &&
 			    instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
 				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
 				return -EINVAL;
 			}
 
 			/* The HW changed the meaning on this bit on gen6 */
-			if (INTEL_INFO(dev)->gen >= 6)
+			if (INTEL_INFO(dev_priv)->gen >= 6)
 				instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
 		}
 		break;
@@ -1308,23 +1301,25 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 		return -EINVAL;
 	}
 
-	if (engine == &dev_priv->engine[RCS] &&
+	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
+		struct intel_ringbuffer *ring = params->request->ringbuf;
+
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(engine, INSTPM);
-		intel_ring_emit(engine, instp_mask << 16 | instp_mode);
-		intel_ring_advance(engine);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, INSTPM);
+		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ring);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
 
 	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		ret = i915_reset_gen7_sol_offsets(dev, params->request);
+		ret = i915_reset_gen7_sol_offsets(params->request);
 		if (ret)
 			return ret;
 	}
@@ -1336,9 +1331,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	if (exec_len == 0)
 		exec_len = params->batch_obj->base.size;
 
-	ret = engine->dispatch_execbuffer(params->request,
-					exec_start, exec_len,
-					params->dispatch_flags);
+	ret = params->engine->dispatch_execbuffer(params->request,
+						  exec_start, exec_len,
+						  params->dispatch_flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 38e7d992a20d..b38a5311f996 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -669,6 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
@@ -678,13 +679,13 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(engine, GEN8_RING_PDP_UDW(engine, entry));
-	intel_ring_emit(engine, upper_32_bits(addr));
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-	intel_ring_emit_reg(engine, GEN8_RING_PDP_LDW(engine, entry));
-	intel_ring_emit(engine, lower_32_bits(addr));
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
+	intel_ring_emit(ring, upper_32_bits(addr));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
+	intel_ring_emit(ring, lower_32_bits(addr));
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1660,6 +1661,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
@@ -1672,13 +1674,13 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(engine, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(engine, get_pd_offset(ppgtt));
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(ring, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(ring, get_pd_offset(ppgtt));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1686,6 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
@@ -1698,17 +1701,18 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(2));
-	intel_ring_emit_reg(engine, RING_PP_DIR_DCLV(engine));
-	intel_ring_emit(engine, PP_DIR_DCLV_2G);
-	intel_ring_emit_reg(engine, RING_PP_DIR_BASE(engine));
-	intel_ring_emit(engine, get_pd_offset(ppgtt));
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
+	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
+	intel_ring_emit(ring, PP_DIR_DCLV_2G);
+	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
+	intel_ring_emit(ring, get_pd_offset(ppgtt));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
-		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		ret = engine->flush(req,
+				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index c4c1c85366de..79305f8dbe55 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11115,7 +11115,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11131,13 +11131,13 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_emit(engine, MI_DISPLAY_FLIP |
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0]);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(engine, 0); /* aux display base address, unused */
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, 0); /* aux display base address, unused */
 
 	return 0;
 }
@@ -11149,7 +11149,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11162,13 +11162,13 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 		flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
 	else
 		flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0]);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, MI_NOOP);
 
 	return 0;
 }
@@ -11180,7 +11180,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11194,10 +11194,10 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 * Display Registers (which do not change across a page-flip)
 	 * so we need only reprogram the base address.
 	 */
-	intel_ring_emit(engine, MI_DISPLAY_FLIP |
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0]);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
+	intel_ring_emit(ring, fb->pitches[0]);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset |
 			obj->tiling_mode);
 
 	/* XXX Enabling the panel-fitter across page-flip is so far
@@ -11206,7 +11206,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(engine, pf | pipesrc);
+	intel_ring_emit(ring, pf | pipesrc);
 
 	return 0;
 }
@@ -11218,7 +11218,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11228,10 +11228,10 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_DISPLAY_FLIP |
+	intel_ring_emit(ring, MI_DISPLAY_FLIP |
 			MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-	intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
 
 	/* Contrary to the suggestions in the documentation,
 	 * "Enable Panel Fitter" does not seem to be required when page
@@ -11241,7 +11241,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 	 */
 	pf = 0;
 	pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
-	intel_ring_emit(engine, pf | pipesrc);
+	intel_ring_emit(ring, pf | pipesrc);
 
 	return 0;
 }
@@ -11253,7 +11253,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
@@ -11274,7 +11274,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	}
 
 	len = 4;
-	if (engine->id == RCS) {
+	if (req->engine->id == RCS) {
 		len += 6;
 		/*
 		 * On Gen 8, SRM is now taking an extra dword to accommodate
@@ -11312,30 +11312,30 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 	 * for the RCS also doesn't appear to drop events. Setting the DERRMR
 	 * to zero does lead to lockups within MI_DISPLAY_FLIP.
 	 */
-	if (engine->id == RCS) {
-		intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(engine, DERRMR);
-		intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
+	if (req->engine->id == RCS) {
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, DERRMR);
+		intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
 					  DERRMR_PIPEB_PRI_FLIP_DONE |
 					  DERRMR_PIPEC_PRI_FLIP_DONE));
 		if (IS_GEN8(dev))
-			intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
+			intel_ring_emit(ring, MI_STORE_REGISTER_MEM_GEN8 |
 					      MI_SRM_LRM_GLOBAL_GTT);
 		else
-			intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
+			intel_ring_emit(ring, MI_STORE_REGISTER_MEM |
 					      MI_SRM_LRM_GLOBAL_GTT);
-		intel_ring_emit_reg(engine, DERRMR);
-		intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
+		intel_ring_emit_reg(ring, DERRMR);
+		intel_ring_emit(ring, req->engine->scratch.gtt_offset + 256);
 		if (IS_GEN8(dev)) {
-			intel_ring_emit(engine, 0);
-			intel_ring_emit(engine, MI_NOOP);
+			intel_ring_emit(ring, 0);
+			intel_ring_emit(ring, MI_NOOP);
 		}
 	}
 
-	intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
-	intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
-	intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
-	intel_ring_emit(engine, (MI_NOOP));
+	intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
+	intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
+	intel_ring_emit(ring, intel_crtc->flip_work->gtt_offset);
+	intel_ring_emit(ring, (MI_NOOP));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 4b6b33f21444..883af4601f61 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -773,7 +773,7 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	struct intel_ringbuffer *ringbuf = request->ringbuf;
 	struct intel_engine_cs *engine = request->engine;
 
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_advance(ringbuf);
 	request->tail = ringbuf->tail;
 
 	/*
@@ -782,9 +782,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	 *
 	 * Caller must reserve WA_TAIL_DWORDS for us!
 	 */
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	/* We keep the previous context alive until we retire the following
 	 * request. This ensures that any the context object is still pinned
@@ -868,11 +868,11 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 		if (ret)
 			return ret;
 
-		intel_logical_ring_emit(ringbuf, MI_NOOP);
-		intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-		intel_logical_ring_emit_reg(ringbuf, INSTPM);
-		intel_logical_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-		intel_logical_ring_advance(ringbuf);
+		intel_ring_emit(ringbuf, MI_NOOP);
+		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ringbuf, INSTPM);
+		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ringbuf);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
@@ -1045,14 +1045,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_logical_ring_emit_reg(ringbuf, w->reg[i].addr);
-		intel_logical_ring_emit(ringbuf, w->reg[i].value);
+		intel_ring_emit_reg(ringbuf, w->reg[i].addr);
+		intel_ring_emit(ringbuf, w->reg[i].value);
 	}
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ringbuf, MI_NOOP);
 
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_advance(ringbuf);
 
 	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
@@ -1555,8 +1555,8 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
 
@@ -1564,20 +1564,18 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(num_lri_cmds));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_lri_cmds));
 	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
 		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
 
-		intel_logical_ring_emit_reg(ringbuf,
-					    GEN8_RING_PDP_UDW(engine, i));
-		intel_logical_ring_emit(ringbuf, upper_32_bits(pd_daddr));
-		intel_logical_ring_emit_reg(ringbuf,
-					    GEN8_RING_PDP_LDW(engine, i));
-		intel_logical_ring_emit(ringbuf, lower_32_bits(pd_daddr));
+		intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, i));
+		intel_ring_emit(ring, upper_32_bits(pd_daddr));
+		intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, i));
+		intel_ring_emit(ring, lower_32_bits(pd_daddr));
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1585,7 +1583,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -1612,14 +1610,14 @@ static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_logical_ring_emit(ringbuf, MI_BATCH_BUFFER_START_GEN8 |
-				(ppgtt<<8) |
-				(dispatch_flags & I915_DISPATCH_RS ?
-				 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_logical_ring_emit(ringbuf, lower_32_bits(offset));
-	intel_logical_ring_emit(ringbuf, upper_32_bits(offset));
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 |
+			(ppgtt<<8) |
+			(dispatch_flags & I915_DISPATCH_RS ?
+			 MI_BATCH_RESOURCE_STREAMER : 0));
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1642,9 +1640,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *engine = ringbuf->engine;
-	struct drm_i915_private *dev_priv = request->i915;
+	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_engine_cs *engine = ring->engine;
 	uint32_t cmd;
 	int ret;
 
@@ -1663,17 +1660,17 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 
 	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (engine == &dev_priv->engine[VCS])
+		if (engine->id == VCS)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
-	intel_logical_ring_emit(ringbuf, cmd);
-	intel_logical_ring_emit(ringbuf,
-				I915_GEM_HWS_SCRATCH_ADDR |
-				MI_FLUSH_DW_USE_GTT);
-	intel_logical_ring_emit(ringbuf, 0); /* upper addr */
-	intel_logical_ring_emit(ringbuf, 0); /* value */
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring,
+			I915_GEM_HWS_SCRATCH_ADDR |
+			MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, 0); /* upper addr */
+	intel_ring_emit(ring, 0); /* value */
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1682,8 +1679,8 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
-	struct intel_engine_cs *engine = ringbuf->engine;
+	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
 	u32 flags = 0;
@@ -1734,40 +1731,40 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 		return ret;
 
 	if (vf_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
 	if (dc_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, PIPE_CONTROL_DC_FLUSH_ENABLE);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-	intel_logical_ring_emit(ringbuf, flags);
-	intel_logical_ring_emit(ringbuf, scratch_addr);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, 0);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
 
 	if (dc_flush_wa) {
-		intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-		intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
-		intel_logical_ring_emit(ringbuf, 0);
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring, PIPE_CONTROL_CS_STALL);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, 0);
 	}
 
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1796,7 +1793,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1806,21 +1803,20 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
 	/* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
 	BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
 
-	intel_logical_ring_emit(ringbuf,
-				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
-	intel_logical_ring_emit(ringbuf,
-				intel_hws_seqno_address(request->engine) |
-				MI_FLUSH_DW_USE_GTT);
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, request->fence.seqno);
-	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ring, (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+	intel_ring_emit(ring,
+			intel_hws_seqno_address(request->engine) |
+			MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, request->fence.seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
 	return intel_logical_ring_advance_and_submit(request);
 }
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
@@ -1834,19 +1830,18 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 	 * need a prior CS_STALL, which is emitted by the flush
 	 * following the batch.
 	 */
-	intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
-	intel_logical_ring_emit(ringbuf,
-				(PIPE_CONTROL_GLOBAL_GTT_IVB |
-				 PIPE_CONTROL_CS_STALL |
-				 PIPE_CONTROL_QW_WRITE));
-	intel_logical_ring_emit(ringbuf,
-				intel_hws_seqno_address(request->engine));
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, i915_gem_request_get_seqno(request));
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring,
+			(PIPE_CONTROL_GLOBAL_GTT_IVB |
+			 PIPE_CONTROL_CS_STALL |
+			 PIPE_CONTROL_QW_WRITE));
+	intel_ring_emit(ring, intel_hws_seqno_address(request->engine));
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(request));
 	/* We're thrashing one dword of HWS. */
-	intel_logical_ring_emit(ringbuf, 0);
-	intel_logical_ring_emit(ringbuf, MI_USER_INTERRUPT);
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
 	return intel_logical_ring_advance_and_submit(request);
 }
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 38287302c5ba..d26fb44549e5 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -73,32 +73,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
 int intel_engines_init(struct drm_device *dev);
 
 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-/**
- * intel_logical_ring_advance() - advance the ringbuffer tail
- * @ringbuf: Ringbuffer to advance.
- *
- * The tail is only updated in our logical ringbuffer struct.
- */
-static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
-{
-	__intel_ringbuffer_advance(ringbuf);
-}
-
-/**
- * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
- * @ringbuf: Ringbuffer to write to.
- * @data: DWORD to write.
- */
-static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
-					   u32 data)
-{
-	__intel_ringbuffer_emit(ringbuf, data);
-}
-static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
-					       i915_reg_t reg)
-{
-	intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
-}
 
 /* Logical Ring Contexts */
 
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index bd46968d8a07..3059c52030b4 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -288,14 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf,
-				MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
 	for (index = 0; index < table->size; index++) {
-		intel_logical_ring_emit_reg(ringbuf,
-					    mocs_register(engine, index));
-		intel_logical_ring_emit(ringbuf,
-					table->table[index].control_value);
+		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
+		intel_ring_emit(ringbuf, table->table[index].control_value);
 	}
 
 	/*
@@ -307,14 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_logical_ring_emit_reg(ringbuf,
-					    mocs_register(engine, index));
-		intel_logical_ring_emit(ringbuf,
-					table->table[0].control_value);
+		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
+		intel_ring_emit(ringbuf, table->table[0].control_value);
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
@@ -352,19 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_logical_ring_emit(ringbuf,
+	intel_ring_emit(ringbuf,
 			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf,
-					l3cc_combine(table, 2*i, 2*i+1));
+		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1));
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
 		i++;
 	}
 
@@ -374,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_logical_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_logical_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
+		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
 	}
 
-	intel_logical_ring_emit(ringbuf, MI_NOOP);
-	intel_logical_ring_advance(ringbuf);
+	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_advance(ringbuf);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index c10ce368787e..ec63b64fb202 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -235,6 +235,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
+	struct intel_ringbuffer *ring;
 	int ret;
 
 	WARN_ON(overlay->active);
@@ -252,11 +253,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	overlay->active = true;
 
-	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
-	intel_ring_emit(engine, overlay->flip_addr | OFC_UPDATE);
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	ring = req->ringbuf;
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return intel_overlay_do_wait_request(overlay, req, NULL);
 }
@@ -268,6 +270,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
+	struct intel_ringbuffer *ring;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
 	int ret;
@@ -292,9 +295,10 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 		return ret;
 	}
 
-	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(engine, flip_addr);
-	intel_ring_advance(engine);
+	ring = req->ringbuf;
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_advance(ring);
 
 	WARN_ON(overlay->last_flip_req);
 	i915_gem_request_assign(&overlay->last_flip_req, req);
@@ -336,6 +340,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
+	struct intel_ringbuffer *ring;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
 
@@ -357,24 +362,25 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 		return ret;
 	}
 
+	ring = req->ringbuf;
 	/* wait for overlay to go idle */
-	intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
-	intel_ring_emit(engine, flip_addr);
-	intel_ring_emit(engine, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	intel_ring_emit(ring, flip_addr);
+	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	/* turn overlay off */
 	if (IS_I830(dev_priv)) {
 		/* Workaround: Don't disable the overlay fully, since otherwise
 		 * it dies on the next OVERLAY_ON cmd. */
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
 	} else {
-		intel_ring_emit(engine, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
-		intel_ring_emit(engine, flip_addr);
-		intel_ring_emit(engine,
+		intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+		intel_ring_emit(ring, flip_addr);
+		intel_ring_emit(ring,
 				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 	}
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
 }
@@ -420,6 +426,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
+		struct intel_ringbuffer *ring;
 
 		req = i915_gem_request_alloc(engine, NULL);
 		if (IS_ERR(req))
@@ -431,10 +438,11 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 			return ret;
 		}
 
-		intel_ring_emit(engine,
+		ring = req->ringbuf;
+		intel_ring_emit(ring,
 				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_advance(engine);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 
 		ret = intel_overlay_do_wait_request(overlay, req,
 						    intel_overlay_release_old_vid_tail);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 15acaf617303..12670068f081 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -58,7 +58,7 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
 					    ringbuf->tail, ringbuf->size);
 }
 
-static void __intel_ring_advance(struct intel_engine_cs *engine)
+static void __intel_engine_submit(struct intel_engine_cs *engine)
 {
 	struct intel_ringbuffer *ringbuf = engine->buffer;
 	ringbuf->tail &= ringbuf->size - 1;
@@ -70,7 +70,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 cmd;
 	int ret;
 
@@ -85,9 +85,9 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -97,7 +97,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	u32 cmd;
 	int ret;
 
@@ -129,23 +129,20 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 	 * are flushed at any MI_FLUSH.
 	 */
 
-	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-	if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
-		cmd &= ~MI_NO_WRITE_FLUSH;
-	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+	cmd = MI_FLUSH;
+	if (invalidate_domains) {
 		cmd |= MI_EXE_FLUSH;
-
-	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
-	    (IS_G4X(req->i915) || IS_GEN5(req->i915)))
-		cmd |= MI_INVALIDATE_ISP;
+		if (IS_G4X(req->i915) || IS_GEN5(req->i915))
+			cmd |= MI_INVALIDATE_ISP;
+	}
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -190,34 +187,35 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 scratch_addr =
+		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
 			PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(engine, 0); /* low dword */
-	intel_ring_emit(engine, 0); /* high dword */
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0); /* low dword */
+	intel_ring_emit(ring, 0); /* high dword */
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(5));
-	intel_ring_emit(engine, PIPE_CONTROL_QW_WRITE);
-	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -226,9 +224,10 @@ static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 scratch_addr =
+		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/* Force SNB workarounds for PIPE_CONTROL flushes */
@@ -266,11 +265,11 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(engine, flags);
-	intel_ring_emit(engine, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -278,19 +277,20 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(engine, PIPE_CONTROL_CS_STALL |
-			      PIPE_CONTROL_STALL_AT_SCOREBOARD);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring,
+			PIPE_CONTROL_CS_STALL |
+			PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -299,9 +299,10 @@ static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 scratch_addr =
+		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
-	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
 
 	/*
@@ -350,11 +351,11 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(4));
-	intel_ring_emit(engine, flags);
-	intel_ring_emit(engine, scratch_addr);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -363,20 +364,20 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(engine, flags);
-	intel_ring_emit(engine, scratch_addr);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, 0);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -385,8 +386,8 @@ static int
 gen8_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	u32 flags = 0;
 	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
+	u32 flags = 0;
 	int ret;
 
 	flags |= PIPE_CONTROL_CS_STALL;
@@ -679,14 +680,14 @@ err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	struct i915_workarounds *w = &req->i915->workarounds;
 	int ret, i;
 
 	if (w->count == 0)
 		return 0;
 
-	engine->gpu_caches_dirty = true;
+	req->engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -695,16 +696,16 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(engine, w->reg[i].addr);
-		intel_ring_emit(engine, w->reg[i].value);
+		intel_ring_emit_reg(ring, w->reg[i].addr);
+		intel_ring_emit(ring, w->reg[i].value);
 	}
-	intel_ring_emit(engine, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
-	engine->gpu_caches_dirty = true;
+	req->engine->gpu_caches_dirty = true;
 	ret = intel_ring_flush_all_caches(req);
 	if (ret)
 		return ret;
@@ -1337,7 +1338,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-	struct intel_engine_cs *signaller = signaller_req->engine;
+	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1352,20 +1353,23 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+		u64 gtt_offset =
+			signaller_req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
 		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(signaller, PIPE_CONTROL_GLOBAL_GTT_IVB |
-					   PIPE_CONTROL_QW_WRITE |
-					   PIPE_CONTROL_CS_STALL);
+		intel_ring_emit(signaller,
+				PIPE_CONTROL_GLOBAL_GTT_IVB |
+				PIPE_CONTROL_QW_WRITE |
+				PIPE_CONTROL_CS_STALL);
 		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
 		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
 		intel_ring_emit(signaller, signaller_req->fence.seqno);
 		intel_ring_emit(signaller, 0);
-		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-					   MI_SEMAPHORE_TARGET(waiter->hw_id));
+		intel_ring_emit(signaller,
+				MI_SEMAPHORE_SIGNAL |
+				MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
 
@@ -1376,7 +1380,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-	struct intel_engine_cs *signaller = signaller_req->engine;
+	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1391,18 +1395,21 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset = signaller->semaphore.signal_ggtt[id];
+		u64 gtt_offset =
+			signaller_req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(signaller, (MI_FLUSH_DW + 1) |
-					   MI_FLUSH_DW_OP_STOREDW);
-		intel_ring_emit(signaller, lower_32_bits(gtt_offset) |
-					   MI_FLUSH_DW_USE_GTT);
+		intel_ring_emit(signaller,
+				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
+		intel_ring_emit(signaller,
+				lower_32_bits(gtt_offset) |
+				MI_FLUSH_DW_USE_GTT);
 		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
 		intel_ring_emit(signaller, signaller_req->fence.seqno);
-		intel_ring_emit(signaller, MI_SEMAPHORE_SIGNAL |
-					   MI_SEMAPHORE_TARGET(waiter->hw_id));
+		intel_ring_emit(signaller,
+				MI_SEMAPHORE_SIGNAL |
+				MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
 
@@ -1412,7 +1419,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
-	struct intel_engine_cs *signaller = signaller_req->engine;
+	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	enum intel_engine_id id;
@@ -1428,7 +1435,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		return ret;
 
 	for_each_engine_id(useless, dev_priv, id) {
-		i915_reg_t mbox_reg = signaller->semaphore.mbox.signal[id];
+		i915_reg_t mbox_reg =
+			signaller_req->engine->semaphore.mbox.signal[id];
 
 		if (i915_mmio_reg_valid(mbox_reg)) {
 			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
@@ -1456,6 +1464,7 @@ static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	if (engine->semaphore.signal)
@@ -1466,12 +1475,11 @@ gen6_add_request(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(engine,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(engine, req->fence.seqno);
-	intel_ring_emit(engine, MI_USER_INTERRUPT);
-	__intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, req->fence.seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	__intel_engine_submit(engine);
 
 	return 0;
 }
@@ -1480,6 +1488,7 @@ static int
 gen8_render_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	if (engine->semaphore.signal)
@@ -1489,18 +1498,18 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, GFX_OP_PIPE_CONTROL(6));
-	intel_ring_emit(engine, (PIPE_CONTROL_GLOBAL_GTT_IVB |
-				 PIPE_CONTROL_CS_STALL |
-				 PIPE_CONTROL_QW_WRITE));
-	intel_ring_emit(engine, intel_hws_seqno_address(req->engine));
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, i915_gem_request_get_seqno(req));
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+	intel_ring_emit(ring, (PIPE_CONTROL_GLOBAL_GTT_IVB |
+			       PIPE_CONTROL_CS_STALL |
+			       PIPE_CONTROL_QW_WRITE));
+	intel_ring_emit(ring, intel_hws_seqno_address(engine));
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, i915_gem_request_get_seqno(req));
 	/* We're thrashing one dword of HWS. */
-	intel_ring_emit(engine, 0);
-	intel_ring_emit(engine, MI_USER_INTERRUPT);
-	intel_ring_emit(engine, MI_NOOP);
-	__intel_ring_advance(engine);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	intel_ring_emit(ring, MI_NOOP);
+	__intel_engine_submit(engine);
 
 	return 0;
 }
@@ -1524,9 +1533,9 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_engine_cs *waiter = waiter_req->engine;
+	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
 	struct drm_i915_private *dev_priv = waiter_req->i915;
-	u64 offset = GEN8_WAIT_OFFSET(waiter, signaller->id);
+	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
@@ -1558,11 +1567,11 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_engine_cs *waiter = waiter_req->engine;
+	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id];
+	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
 	int ret;
 
 	/* Throughout all of the GEM code, seqno passed implies our current
@@ -1692,35 +1701,34 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_FLUSH);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_FLUSH);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 	return 0;
 }
 
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(engine,
-			I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(engine, req->fence.seqno);
-	intel_ring_emit(engine, MI_USER_INTERRUPT);
-	__intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	intel_ring_emit(ring, req->fence.seqno);
+	intel_ring_emit(ring, MI_USER_INTERRUPT);
+	__intel_engine_submit(req->engine);
 
 	return 0;
 }
@@ -1787,20 +1795,20 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine,
+	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			MI_BATCH_GTT |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
-	intel_ring_emit(engine, offset);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1814,8 +1822,8 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
-	u32 cs_offset = engine->scratch.gtt_offset;
+	struct intel_ringbuffer *ring = req->ringbuf;
+	u32 cs_offset = req->engine->scratch.gtt_offset;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -1823,13 +1831,13 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* Evict the invalid PTE TLBs */
-	intel_ring_emit(engine, COLOR_BLT_CMD | BLT_WRITE_RGBA);
-	intel_ring_emit(engine, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
-	intel_ring_emit(engine, I830_TLB_ENTRIES << 16 | 4); /* load each page */
-	intel_ring_emit(engine, cs_offset);
-	intel_ring_emit(engine, 0xdeadbeef);
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
+	intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
+	intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
+	intel_ring_emit(ring, cs_offset);
+	intel_ring_emit(ring, 0xdeadbeef);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
 		if (len > I830_BATCH_LIMIT)
@@ -1843,17 +1851,17 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 		 * stable batch scratch bo area (so that the CS never
 		 * stumbles over its tlb invalidation bug) ...
 		 */
-		intel_ring_emit(engine, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
-		intel_ring_emit(engine,
+		intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
+		intel_ring_emit(ring,
 				BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
-		intel_ring_emit(engine, DIV_ROUND_UP(len, 4096) << 16 | 4096);
-		intel_ring_emit(engine, cs_offset);
-		intel_ring_emit(engine, 4096);
-		intel_ring_emit(engine, offset);
+		intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
+		intel_ring_emit(ring, cs_offset);
+		intel_ring_emit(ring, 4096);
+		intel_ring_emit(ring, offset);
 
-		intel_ring_emit(engine, MI_FLUSH);
-		intel_ring_emit(engine, MI_NOOP);
-		intel_ring_advance(engine);
+		intel_ring_emit(ring, MI_FLUSH);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_advance(ring);
 
 		/* ... and execute it. */
 		offset = cs_offset;
@@ -1863,10 +1871,10 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					  0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1876,17 +1884,17 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
-	intel_ring_emit(engine, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
-					  0 : MI_BATCH_NON_SECURE));
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
+	intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
+					0 : MI_BATCH_NON_SECURE));
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2418,8 +2426,9 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
-	int num_dwords = (engine->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+	struct intel_ringbuffer *ring = req->ringbuf;
+	int num_dwords =
+		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
 
 	if (num_dwords == 0)
@@ -2431,9 +2440,9 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 		return ret;
 
 	while (num_dwords--)
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, MI_NOOP);
 
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2524,7 +2533,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	uint32_t cmd;
 	int ret;
 
@@ -2552,17 +2561,16 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 	if (invalidate & I915_GEM_GPU_DOMAINS)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine,
-			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(engine, 0); /* upper addr */
-		intel_ring_emit(engine, 0); /* value */
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
 	} else  {
-		intel_ring_emit(engine, 0);
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
 	}
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 	return 0;
 }
 
@@ -2571,8 +2579,8 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
-	bool ppgtt = USES_PPGTT(engine->dev) &&
+	struct intel_ringbuffer *ring = req->ringbuf;
+	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -2581,13 +2589,13 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 		return ret;
 
 	/* FIXME(BDW): Address space and security selectors. */
-	intel_ring_emit(engine, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
-	intel_ring_emit(engine, lower_32_bits(offset));
-	intel_ring_emit(engine, upper_32_bits(offset));
-	intel_ring_emit(engine, MI_NOOP);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2597,22 +2605,22 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine,
+	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
 			(dispatch_flags & I915_DISPATCH_RS ?
 			 MI_BATCH_RESOURCE_STREAMER : 0));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(engine, offset);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2622,20 +2630,20 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(engine,
+	intel_ring_emit(ring,
 			MI_BATCH_BUFFER_START |
 			(dispatch_flags & I915_DISPATCH_SECURE ?
 			 0 : MI_BATCH_NON_SECURE_I965));
 	/* bit0-7 is the length on GEN6+ */
-	intel_ring_emit(engine, offset);
-	intel_ring_advance(engine);
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2645,7 +2653,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_engine_cs *engine = req->engine;
+	struct intel_ringbuffer *ring = req->ringbuf;
 	uint32_t cmd;
 	int ret;
 
@@ -2672,17 +2680,17 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
 	 */
 	if (invalidate & I915_GEM_DOMAIN_RENDER)
 		cmd |= MI_INVALIDATE_TLB;
-	intel_ring_emit(engine, cmd);
-	intel_ring_emit(engine,
+	intel_ring_emit(ring, cmd);
+	intel_ring_emit(ring,
 			I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
 	if (INTEL_GEN(req->i915) >= 8) {
-		intel_ring_emit(engine, 0); /* upper addr */
-		intel_ring_emit(engine, 0); /* value */
+		intel_ring_emit(ring, 0); /* upper addr */
+		intel_ring_emit(ring, 0); /* value */
 	} else  {
-		intel_ring_emit(engine, 0);
-		intel_ring_emit(engine, MI_NOOP);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring, MI_NOOP);
 	}
-	intel_ring_advance(engine);
+	intel_ring_advance(ring);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 9a0a02653039..4f4b8ea4df82 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -454,32 +454,21 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void __intel_ringbuffer_emit(struct intel_ringbuffer *rb,
-					   u32 data)
+static inline void intel_ring_emit(struct intel_ringbuffer *ring, u32 data)
 {
-	*(uint32_t *)(rb->vaddr + rb->tail) = data;
-	rb->tail += 4;
+	*(uint32_t *)(ring->vaddr + ring->tail) = data;
+	ring->tail += 4;
 }
 
-static inline void __intel_ringbuffer_advance(struct intel_ringbuffer *rb)
-{
-	rb->tail &= rb->size - 1;
-}
-
-static inline void intel_ring_emit(struct intel_engine_cs *engine, u32 data)
-{
-	__intel_ringbuffer_emit(engine->buffer, data);
-}
-
-static inline void intel_ring_emit_reg(struct intel_engine_cs *engine,
+static inline void intel_ring_emit_reg(struct intel_ringbuffer *ring,
 				       i915_reg_t reg)
 {
-	intel_ring_emit(engine, i915_mmio_reg_offset(reg));
+	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
 }
 
-static inline void intel_ring_advance(struct intel_engine_cs *engine)
+static inline void intel_ring_advance(struct intel_ringbuffer *ring)
 {
-	__intel_ringbuffer_advance(engine->buffer);
+	ring->tail &= ring->size - 1;
 }
 
 int __intel_ring_space(int head, int tail, int size);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 02/73] drm/i915: Rename request->ringbuf to request->ring
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
  2016-08-01  9:10 ` [PATCH 01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 03/73] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
                   ` (71 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Now that we have disambuigated ring and engine, we can use the clearer
and more consistent name for the intel_ringbuffer pointer in the
request.

@@
struct drm_i915_gem_request *r;
@@
- r->ringbuf
+ r->ring

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-12-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  4 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  4 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  6 +-
 drivers/gpu/drm/i915/i915_gem_request.c    | 16 +++---
 drivers/gpu/drm/i915/i915_gem_request.h    |  2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      | 19 +++---
 drivers/gpu/drm/i915/intel_display.c       | 10 ++--
 drivers/gpu/drm/i915/intel_lrc.c           | 57 +++++++++---------
 drivers/gpu/drm/i915/intel_mocs.c          | 36 ++++++------
 drivers/gpu/drm/i915/intel_overlay.c       |  8 +--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 92 +++++++++++++++---------------
 11 files changed, 125 insertions(+), 129 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a0e24eb5e167..f7f4a8c40afe 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -552,7 +552,7 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
@@ -655,7 +655,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
 	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int i, ret;
 
 	if (!remap_info)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2f9f0daa1bc2..42389de4752a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1173,7 +1173,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret, i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
@@ -1303,7 +1303,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ringbuffer *ring = params->request->ringbuf;
+		struct intel_ringbuffer *ring = params->request->ring;
 
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b38a5311f996..46cae2a92bda 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
@@ -1661,7 +1661,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
@@ -1688,7 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 49396b895a36..d2133c41be13 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -170,7 +170,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	 * Note this requires that we are always called in request
 	 * completion order.
 	 */
-	request->ringbuf->last_retired_head = request->postfix;
+	request->ring->last_retired_head = request->postfix;
 
 	i915_gem_request_remove_from_client(request);
 
@@ -423,7 +423,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 			bool flush_caches)
 {
 	struct intel_engine_cs *engine;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ringbuffer *ring;
 	u32 request_start;
 	u32 reserved_tail;
 	int ret;
@@ -432,14 +432,14 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 		return;
 
 	engine = request->engine;
-	ringbuf = request->ringbuf;
+	ring = request->ring;
 
 	/*
 	 * To ensure that this call will not fail, space for its emissions
 	 * should already have been reserved in the ring buffer. Let the ring
 	 * know that it is time to use that space up.
 	 */
-	request_start = intel_ring_get_tail(ringbuf);
+	request_start = intel_ring_get_tail(ring);
 	reserved_tail = request->reserved_space;
 	request->reserved_space = 0;
 
@@ -486,21 +486,21 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 * GPU processing the request, we never over-estimate the
 	 * position of the head.
 	 */
-	request->postfix = intel_ring_get_tail(ringbuf);
+	request->postfix = intel_ring_get_tail(ring);
 
 	if (i915.enable_execlists) {
 		ret = engine->emit_request(request);
 	} else {
 		ret = engine->add_request(request);
 
-		request->tail = intel_ring_get_tail(ringbuf);
+		request->tail = intel_ring_get_tail(ring);
 	}
 	/* Not allowed to fail! */
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
 	/* Sanity check that the reserved size was large enough. */
-	ret = intel_ring_get_tail(ringbuf) - request_start;
+	ret = intel_ring_get_tail(ring) - request_start;
 	if (ret < 0)
-		ret += ringbuf->size;
+		ret += ring->size;
 	WARN_ONCE(ret > reserved_tail,
 		  "Not enough space reserved (%d bytes) "
 		  "for adding the request (%d bytes)\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index e06e81f459df..68868d825d9d 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -61,7 +61,7 @@ struct drm_i915_gem_request {
 	 */
 	struct i915_gem_context *ctx;
 	struct intel_engine_cs *engine;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ringbuffer *ring;
 	struct intel_signal_node signaling;
 
 	/** GEM sequence number associated with the previous request,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index bc4a3ebc2662..d490a43461ae 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1097,7 +1097,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 		request = i915_gem_find_active_request(engine);
 		if (request) {
 			struct i915_address_space *vm;
-			struct intel_ringbuffer *rb;
+			struct intel_ringbuffer *ring;
 
 			vm = request->ctx->ppgtt ?
 				&request->ctx->ppgtt->base : &ggtt->base;
@@ -1114,7 +1114,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 			if (HAS_BROKEN_CS_TLB(dev_priv))
 				ee->wa_batchbuffer =
 					i915_error_ggtt_object_create(dev_priv,
-							     engine->scratch.obj);
+								      engine->scratch.obj);
 
 			if (request->pid) {
 				struct task_struct *task;
@@ -1131,23 +1131,20 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 			error->simulated |=
 				request->ctx->flags & CONTEXT_NO_ERROR_CAPTURE;
 
-			rb = request->ringbuf;
-			ee->cpu_ring_head = rb->head;
-			ee->cpu_ring_tail = rb->tail;
+			ring = request->ring;
+			ee->cpu_ring_head = ring->head;
+			ee->cpu_ring_tail = ring->tail;
 			ee->ringbuffer =
 				i915_error_ggtt_object_create(dev_priv,
-							      rb->obj);
+							      ring->obj);
 		}
 
 		ee->hws_page =
 			i915_error_ggtt_object_create(dev_priv,
 						      engine->status_page.obj);
 
-		if (engine->wa_ctx.obj) {
-			ee->wa_ctx =
-				i915_error_ggtt_object_create(dev_priv,
-							      engine->wa_ctx.obj);
-		}
+		ee->wa_ctx = i915_error_ggtt_object_create(dev_priv,
+							   engine->wa_ctx.obj);
 
 		i915_gem_record_active_context(engine, error, ee);
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 79305f8dbe55..f3735f92ffcb 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11115,7 +11115,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11149,7 +11149,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11180,7 +11180,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11218,7 +11218,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11253,7 +11253,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 883af4601f61..ce7dcf85ea4b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -714,7 +714,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 			return ret;
 	}
 
-	request->ringbuf = ce->ringbuf;
+	request->ring = ce->ringbuf;
 
 	if (i915.enable_guc_submission) {
 		/*
@@ -770,11 +770,11 @@ err_unpin:
 static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ringbuf = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 
-	intel_ring_advance(ringbuf);
-	request->tail = ringbuf->tail;
+	intel_ring_advance(ring);
+	request->tail = ring->tail;
 
 	/*
 	 * Here we add two extra NOOPs as padding to avoid
@@ -782,9 +782,9 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	 *
 	 * Caller must reserve WA_TAIL_DWORDS for us!
 	 */
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	/* We keep the previous context alive until we retire the following
 	 * request. This ensures that any the context object is still pinned
@@ -821,7 +821,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	struct drm_device       *dev = params->dev;
 	struct intel_engine_cs *engine = params->engine;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ringbuffer *ringbuf = params->ctx->engine[engine->id].ringbuf;
+	struct intel_ringbuffer *ring = params->request->ring;
 	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
@@ -833,7 +833,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	case I915_EXEC_CONSTANTS_REL_GENERAL:
 	case I915_EXEC_CONSTANTS_ABSOLUTE:
 	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && engine != &dev_priv->engine[RCS]) {
+		if (instp_mode != 0 && engine->id != RCS) {
 			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
 			return -EINVAL;
 		}
@@ -862,17 +862,17 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	if (ret)
 		return ret;
 
-	if (engine == &dev_priv->engine[RCS] &&
+	if (engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
 			return ret;
 
-		intel_ring_emit(ringbuf, MI_NOOP);
-		intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ringbuf, INSTPM);
-		intel_ring_emit(ringbuf, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ringbuf);
+		intel_ring_emit(ring, MI_NOOP);
+		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+		intel_ring_emit_reg(ring, INSTPM);
+		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
+		intel_ring_advance(ring);
 
 		dev_priv->relative_constants_mode = instp_mode;
 	}
@@ -1030,7 +1030,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 
 	if (w->count == 0)
@@ -1045,14 +1045,14 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(w->count));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
 	for (i = 0; i < w->count; i++) {
-		intel_ring_emit_reg(ringbuf, w->reg[i].addr);
-		intel_ring_emit(ringbuf, w->reg[i].value);
+		intel_ring_emit_reg(ring, w->reg[i].addr);
+		intel_ring_emit(ring, w->reg[i].value);
 	}
-	intel_ring_emit(ringbuf, MI_NOOP);
+	intel_ring_emit(ring, MI_NOOP);
 
-	intel_ring_advance(ringbuf);
+	intel_ring_advance(ring);
 
 	engine->gpu_caches_dirty = true;
 	ret = logical_ring_flush_all_caches(req);
@@ -1555,7 +1555,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
@@ -1583,7 +1583,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -1640,8 +1640,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
-	struct intel_ringbuffer *ring = request->ringbuf;
-	struct intel_engine_cs *engine = ring->engine;
+	struct intel_ringbuffer *ring = request->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -1660,7 +1659,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 
 	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
 		cmd |= MI_INVALIDATE_TLB;
-		if (engine->id == VCS)
+		if (request->engine->id == VCS)
 			cmd |= MI_INVALIDATE_BSD;
 	}
 
@@ -1679,7 +1678,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
@@ -1793,7 +1792,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1816,7 +1815,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ringbuf;
+	struct intel_ringbuffer *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 3059c52030b4..8534ec3648bd 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
 	int ret;
@@ -288,11 +288,11 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
+	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES));
 
 	for (index = 0; index < table->size; index++) {
-		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
-		intel_ring_emit(ringbuf, table->table[index].control_value);
+		intel_ring_emit_reg(ring, mocs_register(engine, index));
+		intel_ring_emit(ring, table->table[index].control_value);
 	}
 
 	/*
@@ -304,12 +304,12 @@ static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 	 * that value to all the used entries.
 	 */
 	for (; index < GEN9_NUM_MOCS_ENTRIES; index++) {
-		intel_ring_emit_reg(ringbuf, mocs_register(engine, index));
-		intel_ring_emit(ringbuf, table->table[0].control_value);
+		intel_ring_emit_reg(ring, mocs_register(engine, index));
+		intel_ring_emit(ring, table->table[0].control_value);
 	}
 
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -336,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	unsigned int i;
 	int ret;
 
@@ -347,18 +347,18 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ringbuf,
+	intel_ring_emit(ring,
 			MI_LOAD_REGISTER_IMM(GEN9_NUM_MOCS_ENTRIES / 2));
 
 	for (i = 0; i < table->size/2; i++) {
-		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 2*i+1));
+		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ring, l3cc_combine(table, 2*i, 2*i+1));
 	}
 
 	if (table->size & 0x01) {
 		/* Odd table size - 1 left over */
-		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ringbuf, l3cc_combine(table, 2*i, 0));
+		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ring, l3cc_combine(table, 2*i, 0));
 		i++;
 	}
 
@@ -368,12 +368,12 @@ static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 	 * they are reserved by the hardware.
 	 */
 	for (; i < GEN9_NUM_MOCS_ENTRIES / 2; i++) {
-		intel_ring_emit_reg(ringbuf, GEN9_LNCFCMOCS(i));
-		intel_ring_emit(ringbuf, l3cc_combine(table, 0, 0));
+		intel_ring_emit_reg(ring, GEN9_LNCFCMOCS(i));
+		intel_ring_emit(ring, l3cc_combine(table, 0, 0));
 	}
 
-	intel_ring_emit(ringbuf, MI_NOOP);
-	intel_ring_advance(ringbuf);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index ec63b64fb202..e750c0e3a267 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -253,7 +253,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 
 	overlay->active = true;
 
-	ring = req->ringbuf;
+	ring = req->ring;
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_ON);
 	intel_ring_emit(ring, overlay->flip_addr | OFC_UPDATE);
 	intel_ring_emit(ring, MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -295,7 +295,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 		return ret;
 	}
 
-	ring = req->ringbuf;
+	ring = req->ring;
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
 	intel_ring_advance(ring);
@@ -362,7 +362,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 		return ret;
 	}
 
-	ring = req->ringbuf;
+	ring = req->ring;
 	/* wait for overlay to go idle */
 	intel_ring_emit(ring, MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	intel_ring_emit(ring, flip_addr);
@@ -438,7 +438,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 			return ret;
 		}
 
-		ring = req->ringbuf;
+		ring = req->ring;
 		intel_ring_emit(ring,
 				MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
 		intel_ring_emit(ring, MI_NOOP);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 12670068f081..9d70d2f92de7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -70,7 +70,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -97,7 +97,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -187,7 +187,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
@@ -224,7 +224,7 @@ static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
@@ -277,7 +277,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -299,7 +299,7 @@ static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
@@ -364,7 +364,7 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -680,7 +680,7 @@ err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 	int ret, i;
 
@@ -1338,7 +1338,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+	struct intel_ringbuffer *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1380,7 +1380,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+	struct intel_ringbuffer *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1419,7 +1419,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
-	struct intel_ringbuffer *signaller = signaller_req->ringbuf;
+	struct intel_ringbuffer *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	enum intel_engine_id id;
@@ -1464,7 +1464,7 @@ static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	if (engine->semaphore.signal)
@@ -1488,7 +1488,7 @@ static int
 gen8_render_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	if (engine->semaphore.signal)
@@ -1533,7 +1533,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
+	struct intel_ringbuffer *waiter = waiter_req->ring;
 	struct drm_i915_private *dev_priv = waiter_req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
 	struct i915_hw_ppgtt *ppgtt;
@@ -1567,7 +1567,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ringbuf;
+	struct intel_ringbuffer *waiter = waiter_req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
@@ -1701,7 +1701,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1717,7 +1717,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -1795,7 +1795,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1822,7 +1822,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	u32 cs_offset = req->engine->scratch.gtt_offset;
 	int ret;
 
@@ -1884,7 +1884,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2312,7 +2312,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 	 */
 	request->reserved_space += LEGACY_REQUEST_SIZE;
 
-	request->ringbuf = request->engine->buffer;
+	request->ring = request->engine->buffer;
 
 	ret = intel_ring_begin(request, 0);
 	if (ret)
@@ -2324,12 +2324,12 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *target;
 
-	intel_ring_update_space(ringbuf);
-	if (ringbuf->space >= bytes)
+	intel_ring_update_space(ring);
+	if (ring->space >= bytes)
 		return 0;
 
 	/*
@@ -2351,12 +2351,12 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 		 * from multiple ringbuffers. Here, we must ignore any that
 		 * aren't from the ringbuffer we're considering.
 		 */
-		if (target->ringbuf != ringbuf)
+		if (target->ring != ring)
 			continue;
 
 		/* Would completion of this request free enough space? */
-		space = __intel_ring_space(target->postfix, ringbuf->tail,
-					   ringbuf->size);
+		space = __intel_ring_space(target->postfix, ring->tail,
+					   ring->size);
 		if (space >= bytes)
 			break;
 	}
@@ -2369,9 +2369,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 
 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-	struct intel_ringbuffer *ringbuf = req->ringbuf;
-	int remain_actual = ringbuf->size - ringbuf->tail;
-	int remain_usable = ringbuf->effective_size - ringbuf->tail;
+	struct intel_ringbuffer *ring = req->ring;
+	int remain_actual = ring->size - ring->tail;
+	int remain_usable = ring->effective_size - ring->tail;
 	int bytes = num_dwords * sizeof(u32);
 	int total_bytes, wait_bytes;
 	bool need_wrap = false;
@@ -2398,35 +2398,35 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		wait_bytes = total_bytes;
 	}
 
-	if (wait_bytes > ringbuf->space) {
+	if (wait_bytes > ring->space) {
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
 			return ret;
 
-		intel_ring_update_space(ringbuf);
-		if (unlikely(ringbuf->space < wait_bytes))
+		intel_ring_update_space(ring);
+		if (unlikely(ring->space < wait_bytes))
 			return -EAGAIN;
 	}
 
 	if (unlikely(need_wrap)) {
-		GEM_BUG_ON(remain_actual > ringbuf->space);
-		GEM_BUG_ON(ringbuf->tail + remain_actual > ringbuf->size);
+		GEM_BUG_ON(remain_actual > ring->space);
+		GEM_BUG_ON(ring->tail + remain_actual > ring->size);
 
 		/* Fill the tail with MI_NOOP */
-		memset(ringbuf->vaddr + ringbuf->tail, 0, remain_actual);
-		ringbuf->tail = 0;
-		ringbuf->space -= remain_actual;
+		memset(ring->vaddr + ring->tail, 0, remain_actual);
+		ring->tail = 0;
+		ring->space -= remain_actual;
 	}
 
-	ringbuf->space -= bytes;
-	GEM_BUG_ON(ringbuf->space < 0);
+	ring->space -= bytes;
+	GEM_BUG_ON(ring->space < 0);
 	return 0;
 }
 
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int num_dwords =
 		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
@@ -2533,7 +2533,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2579,7 +2579,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
@@ -2605,7 +2605,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2630,7 +2630,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2653,7 +2653,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ringbuf;
+	struct intel_ringbuffer *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 03/73] drm/i915: Rename intel_context[engine].ringbuf
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
  2016-08-01  9:10 ` [PATCH 01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
  2016-08-01  9:10 ` [PATCH 02/73] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 04/73] drm/i915: Rename struct intel_ringbuffer to struct intel_ring Chris Wilson
                   ` (70 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Perform s/ringbuf/ring/ on the context struct for consistency with the
ring/engine split.

v2: Kill an outdated error_ringbuf label

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-14-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  8 +++----
 drivers/gpu/drm/i915/i915_drv.h            |  2 +-
 drivers/gpu/drm/i915/i915_gem_context.c    |  4 ++--
 drivers/gpu/drm/i915/i915_guc_submission.c |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 37 ++++++++++++++----------------
 5 files changed, 25 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9aa62c5b5f65..bde68741809b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -425,8 +425,8 @@ static int per_file_ctx_stats(int id, void *ptr, void *data)
 	for (n = 0; n < ARRAY_SIZE(ctx->engine); n++) {
 		if (ctx->engine[n].state)
 			per_file_stats(0, ctx->engine[n].state, data);
-		if (ctx->engine[n].ringbuf)
-			per_file_stats(0, ctx->engine[n].ringbuf->obj, data);
+		if (ctx->engine[n].ring)
+			per_file_stats(0, ctx->engine[n].ring->obj, data);
 	}
 
 	return 0;
@@ -2066,8 +2066,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
 			seq_putc(m, ce->initialised ? 'I' : 'i');
 			if (ce->state)
 				describe_obj(m, ce->state);
-			if (ce->ringbuf)
-				describe_ctx_ringbuf(m, ce->ringbuf);
+			if (ce->ring)
+				describe_ctx_ringbuf(m, ce->ring);
 			seq_putc(m, '\n');
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 65ada5d2f88c..db43ced6f5e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -894,7 +894,7 @@ struct i915_gem_context {
 
 	struct intel_context {
 		struct drm_i915_gem_object *state;
-		struct intel_ringbuffer *ringbuf;
+		struct intel_ringbuffer *ring;
 		struct i915_vma *lrc_vma;
 		uint32_t *lrc_reg_state;
 		u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f7f4a8c40afe..f825b1e4aadf 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -173,8 +173,8 @@ void i915_gem_context_free(struct kref *ctx_ref)
 			continue;
 
 		WARN_ON(ce->pin_count);
-		if (ce->ringbuf)
-			intel_ringbuffer_free(ce->ringbuf);
+		if (ce->ring)
+			intel_ringbuffer_free(ce->ring);
 
 		i915_gem_object_put(ce->state);
 	}
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 01c1c1671811..eccd34832fe6 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -363,7 +363,7 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
 		lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
 				(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
 
-		obj = ce->ringbuf->obj;
+		obj = ce->ring->obj;
 		gfx_addr = i915_gem_obj_ggtt_offset(obj);
 
 		lrc->ring_begin = gfx_addr;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ce7dcf85ea4b..97ce2e32a507 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -482,11 +482,8 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
 		 * resubmit the request. See gen8_emit_request() for where we
 		 * prepare the padding after the end of the request.
 		 */
-		struct intel_ringbuffer *ringbuf;
-
-		ringbuf = req0->ctx->engine[engine->id].ringbuf;
 		req0->tail += 8;
-		req0->tail &= ringbuf->size - 1;
+		req0->tail &= req0->ring->size - 1;
 	}
 
 	execlists_submit_requests(req0, req1);
@@ -714,7 +711,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
 			return ret;
 	}
 
-	request->ring = ce->ringbuf;
+	request->ring = ce->ring;
 
 	if (i915.enable_guc_submission) {
 		/*
@@ -976,14 +973,14 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 
 	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
-	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ringbuf);
+	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ring);
 	if (ret)
 		goto unpin_map;
 
 	ce->lrc_vma = i915_gem_obj_to_ggtt(ce->state);
 	intel_lr_context_descriptor_update(ctx, engine);
 
-	lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ringbuf->vma->node.start;
+	lrc_reg_state[CTX_RING_BUFFER_START+1] = ce->ring->vma->node.start;
 	ce->lrc_reg_state = lrc_reg_state;
 	ce->state->dirty = true;
 
@@ -1014,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 	if (--ce->pin_count)
 		return;
 
-	intel_unpin_ringbuffer_obj(ce->ringbuf);
+	intel_unpin_ringbuffer_obj(ce->ring);
 
 	i915_gem_object_unpin_map(ce->state);
 	i915_gem_object_ggtt_unpin(ce->state);
@@ -2348,7 +2345,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	struct drm_i915_gem_object *ctx_obj;
 	struct intel_context *ce = &ctx->engine[engine->id];
 	uint32_t context_size;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ringbuffer *ring;
 	int ret;
 
 	WARN_ON(ce->state);
@@ -2364,29 +2361,29 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 		return PTR_ERR(ctx_obj);
 	}
 
-	ringbuf = intel_engine_create_ringbuffer(engine, ctx->ring_size);
-	if (IS_ERR(ringbuf)) {
-		ret = PTR_ERR(ringbuf);
+	ring = intel_engine_create_ringbuffer(engine, ctx->ring_size);
+	if (IS_ERR(ring)) {
+		ret = PTR_ERR(ring);
 		goto error_deref_obj;
 	}
 
-	ret = populate_lr_context(ctx, ctx_obj, engine, ringbuf);
+	ret = populate_lr_context(ctx, ctx_obj, engine, ring);
 	if (ret) {
 		DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
-		goto error_ringbuf;
+		goto error_ring_free;
 	}
 
-	ce->ringbuf = ringbuf;
+	ce->ring = ring;
 	ce->state = ctx_obj;
 	ce->initialised = engine->init_context == NULL;
 
 	return 0;
 
-error_ringbuf:
-	intel_ringbuffer_free(ringbuf);
+error_ring_free:
+	intel_ringbuffer_free(ring);
 error_deref_obj:
 	i915_gem_object_put(ctx_obj);
-	ce->ringbuf = NULL;
+	ce->ring = NULL;
 	ce->state = NULL;
 	return ret;
 }
@@ -2417,7 +2414,7 @@ void intel_lr_context_reset(struct drm_i915_private *dev_priv,
 
 		i915_gem_object_unpin_map(ctx_obj);
 
-		ce->ringbuf->head = 0;
-		ce->ringbuf->tail = 0;
+		ce->ring->head = 0;
+		ce->ring->tail = 0;
 	}
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 04/73] drm/i915: Rename struct intel_ringbuffer to struct intel_ring
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (2 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 03/73] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 05/73] drm/i915: Rename residual ringbuf parameters Chris Wilson
                   ` (69 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

The state stored in this struct is not only the information about the
buffer object, but the ring used to communicate with the hardware. Using
buffer here is overly specific and, for me at least, conflates with the
notion of buffer objects themselves.

s/struct intel_ringbuffer/struct intel_ring/
s/enum intel_ring_hangcheck/enum intel_engine_hangcheck/
s/describe_ctx_ringbuf()/describe_ctx_ring()/
s/intel_ring_get_active_head()/intel_engine_get_active_head()/
s/intel_ring_sync_index()/intel_engine_sync_index()/
s/intel_ring_init_seqno()/intel_engine_init_seqno()/
s/ring_stuck()/engine_stuck()/
s/intel_cleanup_engine()/intel_engine_cleanup()/
s/intel_stop_engine()/intel_engine_stop()/
s/intel_pin_and_map_ringbuffer_obj()/intel_pin_and_map_ring()/
s/intel_unpin_ringbuffer()/intel_unpin_ring()/
s/intel_engine_create_ringbuffer()/intel_engine_create_ring()/
s/intel_ring_flush_all_caches()/intel_engine_flush_all_caches()/
s/intel_ring_invalidate_all_caches()/intel_engine_invalidate_all_caches()/
s/intel_ringbuffer_free()/intel_ring_free()/

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-15-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  11 ++-
 drivers/gpu/drm/i915/i915_drv.h            |   4 +-
 drivers/gpu/drm/i915/i915_gem.c            |  16 ++--
 drivers/gpu/drm/i915/i915_gem_context.c    |   6 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   6 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        |   6 +-
 drivers/gpu/drm/i915/i915_gem_request.c    |   6 +-
 drivers/gpu/drm/i915/i915_gem_request.h    |   2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      |   8 +-
 drivers/gpu/drm/i915/i915_irq.c            |  14 ++--
 drivers/gpu/drm/i915/intel_display.c       |  10 +--
 drivers/gpu/drm/i915/intel_engine_cs.c     |   2 +-
 drivers/gpu/drm/i915/intel_lrc.c           |  34 ++++----
 drivers/gpu/drm/i915/intel_mocs.c          |   4 +-
 drivers/gpu/drm/i915/intel_overlay.c       |   8 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 128 ++++++++++++++---------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  51 ++++++------
 17 files changed, 157 insertions(+), 159 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index bde68741809b..dccc72d63dd0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1419,7 +1419,7 @@ static int i915_hangcheck_info(struct seq_file *m, void *unused)
 	intel_runtime_pm_get(dev_priv);
 
 	for_each_engine_id(engine, dev_priv, id) {
-		acthd[id] = intel_ring_get_active_head(engine);
+		acthd[id] = intel_engine_get_active_head(engine);
 		seqno[id] = intel_engine_get_seqno(engine);
 	}
 
@@ -2017,12 +2017,11 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
 	return 0;
 }
 
-static void describe_ctx_ringbuf(struct seq_file *m,
-				 struct intel_ringbuffer *ringbuf)
+static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
 {
 	seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, last head: %d)",
-		   ringbuf->space, ringbuf->head, ringbuf->tail,
-		   ringbuf->last_retired_head);
+		   ring->space, ring->head, ring->tail,
+		   ring->last_retired_head);
 }
 
 static int i915_context_status(struct seq_file *m, void *unused)
@@ -2067,7 +2066,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 			if (ce->state)
 				describe_obj(m, ce->state);
 			if (ce->ring)
-				describe_ctx_ringbuf(m, ce->ring);
+				describe_ctx_ring(m, ce->ring);
 			seq_putc(m, '\n');
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index db43ced6f5e6..4c43bd3d74b1 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -518,7 +518,7 @@ struct drm_i915_error_state {
 		bool waiting;
 		int num_waiters;
 		int hangcheck_score;
-		enum intel_ring_hangcheck_action hangcheck_action;
+		enum intel_engine_hangcheck_action hangcheck_action;
 		int num_requests;
 
 		/* our own tracking of ring head and tail */
@@ -894,7 +894,7 @@ struct i915_gem_context {
 
 	struct intel_context {
 		struct drm_i915_gem_object *state;
-		struct intel_ringbuffer *ring;
+		struct intel_ring *ring;
 		struct i915_vma *lrc_vma;
 		uint32_t *lrc_reg_state;
 		u64 lrc_desc;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7bfce1d5c61b..59890f523c5f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2486,7 +2486,7 @@ static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
 
 static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 {
-	struct intel_ringbuffer *buffer;
+	struct intel_ring *ring;
 
 	while (!list_empty(&engine->active_list)) {
 		struct drm_i915_gem_object *obj;
@@ -2502,7 +2502,7 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 	 * (lockless) lookup doesn't try and wait upon the request as we
 	 * reset it.
 	 */
-	intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+	intel_engine_init_seqno(engine, engine->last_submitted_seqno);
 
 	/*
 	 * Clear the execlists queue up before freeing the requests, as those
@@ -2541,9 +2541,9 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 	 * upon reset is less than when we start. Do one more pass over
 	 * all the ringbuffers to reset last_retired_head.
 	 */
-	list_for_each_entry(buffer, &engine->buffers, link) {
-		buffer->last_retired_head = buffer->tail;
-		intel_ring_update_space(buffer);
+	list_for_each_entry(ring, &engine->buffers, link) {
+		ring->last_retired_head = ring->tail;
+		intel_ring_update_space(ring);
 	}
 
 	engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
@@ -2870,7 +2870,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 		i915_gem_object_retire_request(obj, from_req);
 	} else {
-		int idx = intel_ring_sync_index(from, to);
+		int idx = intel_engine_sync_index(from, to);
 		u32 seqno = i915_gem_request_get_seqno(from_req);
 
 		WARN_ON(!to_req);
@@ -4570,8 +4570,8 @@ int i915_gem_init(struct drm_device *dev)
 
 	if (!i915.enable_execlists) {
 		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
-		dev_priv->gt.cleanup_engine = intel_cleanup_engine;
-		dev_priv->gt.stop_engine = intel_stop_engine;
+		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
+		dev_priv->gt.stop_engine = intel_engine_stop;
 	} else {
 		dev_priv->gt.execbuf_submit = intel_execlists_submission;
 		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index f825b1e4aadf..3336a5fcd029 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -174,7 +174,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
 
 		WARN_ON(ce->pin_count);
 		if (ce->ring)
-			intel_ringbuffer_free(ce->ring);
+			intel_ring_free(ce->ring);
 
 		i915_gem_object_put(ce->state);
 	}
@@ -552,7 +552,7 @@ static inline int
 mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 {
 	struct drm_i915_private *dev_priv = req->i915;
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	u32 flags = hw_flags | MI_MM_SPACE_GTT;
 	const int num_rings =
@@ -655,7 +655,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 static int remap_l3(struct drm_i915_gem_request *req, int slice)
 {
 	u32 *remap_info = req->i915->l3_parity.remap_info[slice];
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int i, ret;
 
 	if (!remap_info)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 42389de4752a..d0ef675fb169 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1001,7 +1001,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return intel_ring_invalidate_all_caches(req);
+	return intel_engine_invalidate_all_caches(req);
 }
 
 static bool
@@ -1173,7 +1173,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret, i;
 
 	if (!IS_GEN7(req->i915) || req->engine->id != RCS) {
@@ -1303,7 +1303,7 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 
 	if (params->engine->id == RCS &&
 	    instp_mode != dev_priv->relative_constants_mode) {
-		struct intel_ringbuffer *ring = params->request->ring;
+		struct intel_ring *ring = params->request->ring;
 
 		ret = intel_ring_begin(params->request, 4);
 		if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 46cae2a92bda..ebfa0406a6a1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -669,7 +669,7 @@ static int gen8_write_pdp(struct drm_i915_gem_request *req,
 			  unsigned entry,
 			  dma_addr_t addr)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
@@ -1661,7 +1661,7 @@ static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			 struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
@@ -1688,7 +1688,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 			  struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index d2133c41be13..942b5b1f1602 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -244,7 +244,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 
 	/* Finally reset hw state */
 	for_each_engine(engine, dev_priv)
-		intel_ring_init_seqno(engine, seqno);
+		intel_engine_init_seqno(engine, seqno);
 
 	return 0;
 }
@@ -423,7 +423,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 			bool flush_caches)
 {
 	struct intel_engine_cs *engine;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	u32 request_start;
 	u32 reserved_tail;
 	int ret;
@@ -454,7 +454,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 		if (i915.enable_execlists)
 			ret = logical_ring_flush_all_caches(request);
 		else
-			ret = intel_ring_flush_all_caches(request);
+			ret = intel_engine_flush_all_caches(request);
 		/* Not allowed to fail! */
 		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 68868d825d9d..382ca5a163eb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -61,7 +61,7 @@ struct drm_i915_gem_request {
 	 */
 	struct i915_gem_context *ctx;
 	struct intel_engine_cs *engine;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	struct intel_signal_node signaling;
 
 	/** GEM sequence number associated with the previous request,
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d490a43461ae..226b28efe9ab 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -221,7 +221,7 @@ static void print_error_buffers(struct drm_i915_error_state_buf *m,
 	}
 }
 
-static const char *hangcheck_action_to_str(enum intel_ring_hangcheck_action a)
+static const char *hangcheck_action_to_str(enum intel_engine_hangcheck_action a)
 {
 	switch (a) {
 	case HANGCHECK_IDLE:
@@ -879,7 +879,7 @@ static void gen8_record_semaphore_state(struct drm_i915_error_state *error,
 		signal_offset =
 			(GEN8_SIGNAL_OFFSET(engine, id) & (PAGE_SIZE - 1)) / 4;
 		tmp = error->semaphore_obj->pages[0];
-		idx = intel_ring_sync_index(engine, to);
+		idx = intel_engine_sync_index(engine, to);
 
 		ee->semaphore_mboxes[idx] = tmp[signal_offset];
 		ee->semaphore_seqno[idx] = engine->semaphore.sync_seqno[idx];
@@ -981,7 +981,7 @@ static void error_record_engine_registers(struct drm_i915_error_state *error,
 
 	ee->waiting = intel_engine_has_waiter(engine);
 	ee->instpm = I915_READ(RING_INSTPM(engine->mmio_base));
-	ee->acthd = intel_ring_get_active_head(engine);
+	ee->acthd = intel_engine_get_active_head(engine);
 	ee->seqno = intel_engine_get_seqno(engine);
 	ee->last_seqno = engine->last_submitted_seqno;
 	ee->start = I915_READ_START(engine);
@@ -1097,7 +1097,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 		request = i915_gem_find_active_request(engine);
 		if (request) {
 			struct i915_address_space *vm;
-			struct intel_ringbuffer *ring;
+			struct intel_ring *ring;
 
 			vm = request->ctx->ppgtt ?
 				&request->ctx->ppgtt->base : &ggtt->base;
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f5bf4f913a91..e58650096426 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -2993,7 +2993,7 @@ static bool subunits_stuck(struct intel_engine_cs *engine)
 	return stuck;
 }
 
-static enum intel_ring_hangcheck_action
+static enum intel_engine_hangcheck_action
 head_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
 	if (acthd != engine->hangcheck.acthd) {
@@ -3011,11 +3011,11 @@ head_stuck(struct intel_engine_cs *engine, u64 acthd)
 	return HANGCHECK_HUNG;
 }
 
-static enum intel_ring_hangcheck_action
-ring_stuck(struct intel_engine_cs *engine, u64 acthd)
+static enum intel_engine_hangcheck_action
+engine_stuck(struct intel_engine_cs *engine, u64 acthd)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	enum intel_ring_hangcheck_action ha;
+	enum intel_engine_hangcheck_action ha;
 	u32 tmp;
 
 	ha = head_stuck(engine, acthd);
@@ -3124,7 +3124,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 		if (engine->irq_seqno_barrier)
 			engine->irq_seqno_barrier(engine);
 
-		acthd = intel_ring_get_active_head(engine);
+		acthd = intel_engine_get_active_head(engine);
 		seqno = intel_engine_get_seqno(engine);
 
 		/* Reset stuck interrupts between batch advances */
@@ -3154,8 +3154,8 @@ static void i915_hangcheck_elapsed(struct work_struct *work)
 				 * being repeatedly kicked and so responsible
 				 * for stalling the machine.
 				 */
-				engine->hangcheck.action = ring_stuck(engine,
-								      acthd);
+				engine->hangcheck.action =
+					engine_stuck(engine, acthd);
 
 				switch (engine->hangcheck.action) {
 				case HANGCHECK_IDLE:
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f3735f92ffcb..abd0a484e5ae 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11115,7 +11115,7 @@ static int intel_gen2_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11149,7 +11149,7 @@ static int intel_gen3_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	u32 flip_mask;
 	int ret;
@@ -11180,7 +11180,7 @@ static int intel_gen4_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11218,7 +11218,7 @@ static int intel_gen6_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t pf, pipesrc;
@@ -11253,7 +11253,7 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
 				 struct drm_i915_gem_request *req,
 				 uint32_t flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	uint32_t plane_bit = 0;
 	int len, ret;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index b90dd2f500bd..a5f2128dd5a3 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -155,7 +155,7 @@ cleanup:
 		if (i915.enable_execlists)
 			intel_logical_ring_cleanup(&dev_priv->engine[i]);
 		else
-			intel_cleanup_engine(&dev_priv->engine[i]);
+			intel_engine_cleanup(&dev_priv->engine[i]);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 97ce2e32a507..9668331f973a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -767,7 +767,7 @@ err_unpin:
 static int
 intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ring;
+	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 
 	intel_ring_advance(ring);
@@ -818,7 +818,7 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	struct drm_device       *dev = params->dev;
 	struct intel_engine_cs *engine = params->engine;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ringbuffer *ring = params->request->ring;
+	struct intel_ring *ring = params->request->ring;
 	u64 exec_start;
 	int instp_mode;
 	u32 instp_mask;
@@ -973,7 +973,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 
 	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
-	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ce->ring);
+	ret = intel_pin_and_map_ring(dev_priv, ce->ring);
 	if (ret)
 		goto unpin_map;
 
@@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 	if (--ce->pin_count)
 		return;
 
-	intel_unpin_ringbuffer_obj(ce->ring);
+	intel_unpin_ring(ce->ring);
 
 	i915_gem_object_unpin_map(ce->state);
 	i915_gem_object_ggtt_unpin(ce->state);
@@ -1027,7 +1027,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 
 	if (w->count == 0)
@@ -1552,7 +1552,7 @@ static int gen9_init_render_ring(struct intel_engine_cs *engine)
 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 {
 	struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	const int num_lri_cmds = GEN8_LEGACY_PDPES * 2;
 	int i, ret;
@@ -1580,7 +1580,7 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
 			      u64 offset, unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
 
@@ -1637,8 +1637,8 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 			   u32 invalidate_domains,
 			   u32 unused)
 {
-	struct intel_ringbuffer *ring = request->ring;
-	uint32_t cmd;
+	struct intel_ring *ring = request->ring;
+	u32 cmd;
 	int ret;
 
 	ret = intel_ring_begin(request, 4);
@@ -1675,7 +1675,7 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 				  u32 invalidate_domains,
 				  u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = request->ring;
+	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
 	u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	bool vf_flush_wa = false, dc_flush_wa = false;
@@ -1789,7 +1789,7 @@ static void bxt_a_seqno_barrier(struct intel_engine_cs *engine)
 
 static int gen8_emit_request(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ring;
+	struct intel_ring *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 6 + WA_TAIL_DWORDS);
@@ -1812,7 +1812,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 {
-	struct intel_ringbuffer *ring = request->ring;
+	struct intel_ring *ring = request->ring;
 	int ret;
 
 	ret = intel_ring_begin(request, 8 + WA_TAIL_DWORDS);
@@ -2164,7 +2164,7 @@ static int
 populate_lr_context(struct i915_gem_context *ctx,
 		    struct drm_i915_gem_object *ctx_obj,
 		    struct intel_engine_cs *engine,
-		    struct intel_ringbuffer *ringbuf)
+		    struct intel_ring *ring)
 {
 	struct drm_i915_private *dev_priv = ctx->i915;
 	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
@@ -2217,7 +2217,7 @@ populate_lr_context(struct i915_gem_context *ctx,
 		       RING_START(engine->mmio_base), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_RING_BUFFER_CONTROL,
 		       RING_CTL(engine->mmio_base),
-		       ((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
+		       ((ring->size - PAGE_SIZE) & RING_NR_PAGES) | RING_VALID);
 	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_U,
 		       RING_BBADDR_UDW(engine->mmio_base), 0);
 	ASSIGN_CTX_REG(reg_state, CTX_BB_HEAD_L,
@@ -2345,7 +2345,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	struct drm_i915_gem_object *ctx_obj;
 	struct intel_context *ce = &ctx->engine[engine->id];
 	uint32_t context_size;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	int ret;
 
 	WARN_ON(ce->state);
@@ -2361,7 +2361,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 		return PTR_ERR(ctx_obj);
 	}
 
-	ring = intel_engine_create_ringbuffer(engine, ctx->ring_size);
+	ring = intel_engine_create_ring(engine, ctx->ring_size);
 	if (IS_ERR(ring)) {
 		ret = PTR_ERR(ring);
 		goto error_deref_obj;
@@ -2380,7 +2380,7 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 	return 0;
 
 error_ring_free:
-	intel_ringbuffer_free(ring);
+	intel_ring_free(ring);
 error_deref_obj:
 	i915_gem_object_put(ctx_obj);
 	ce->ring = NULL;
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c
index 8534ec3648bd..80bb9247ce66 100644
--- a/drivers/gpu/drm/i915/intel_mocs.c
+++ b/drivers/gpu/drm/i915/intel_mocs.c
@@ -276,7 +276,7 @@ int intel_mocs_init_engine(struct intel_engine_cs *engine)
 static int emit_mocs_control_table(struct drm_i915_gem_request *req,
 				   const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	enum intel_engine_id engine = req->engine->id;
 	unsigned int index;
 	int ret;
@@ -336,7 +336,7 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table,
 static int emit_mocs_l3cc_table(struct drm_i915_gem_request *req,
 				const struct drm_i915_mocs_table *table)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	unsigned int i;
 	int ret;
 
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index e750c0e3a267..8f1d4d9ef345 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -235,7 +235,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	int ret;
 
 	WARN_ON(overlay->active);
@@ -270,7 +270,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
 	int ret;
@@ -340,7 +340,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	struct drm_i915_private *dev_priv = overlay->i915;
 	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
 	int ret;
 
@@ -426,7 +426,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 	if (I915_READ(ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT) {
 		/* synchronous slowpath */
 		struct drm_i915_gem_request *req;
-		struct intel_ringbuffer *ring;
+		struct intel_ring *ring;
 
 		req = i915_gem_request_alloc(engine, NULL);
 		if (IS_ERR(req))
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9d70d2f92de7..acbabbdececd 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -47,7 +47,7 @@ int __intel_ring_space(int head, int tail, int size)
 	return space - I915_RING_FREE_SPACE;
 }
 
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
+void intel_ring_update_space(struct intel_ring *ringbuf)
 {
 	if (ringbuf->last_retired_head != -1) {
 		ringbuf->head = ringbuf->last_retired_head;
@@ -60,9 +60,10 @@ void intel_ring_update_space(struct intel_ringbuffer *ringbuf)
 
 static void __intel_engine_submit(struct intel_engine_cs *engine)
 {
-	struct intel_ringbuffer *ringbuf = engine->buffer;
-	ringbuf->tail &= ringbuf->size - 1;
-	engine->write_tail(engine, ringbuf->tail);
+	struct intel_ring *ring = engine->buffer;
+
+	ring->tail &= ring->size - 1;
+	engine->write_tail(engine, ring->tail);
 }
 
 static int
@@ -70,7 +71,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -97,7 +98,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32	invalidate_domains,
 		       u32	flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 cmd;
 	int ret;
 
@@ -187,7 +188,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	int ret;
@@ -224,7 +225,7 @@ static int
 gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
@@ -277,7 +278,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 static int
 gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -299,7 +300,7 @@ static int
 gen7_render_ring_flush(struct drm_i915_gem_request *req,
 		       u32 invalidate_domains, u32 flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
 		req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
@@ -364,7 +365,7 @@ static int
 gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 		       u32 flags, u32 scratch_addr)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 6);
@@ -427,7 +428,7 @@ static void ring_write_tail(struct intel_engine_cs *engine,
 	I915_WRITE_TAIL(engine, value);
 }
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine)
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	u64 acthd;
@@ -553,8 +554,8 @@ static bool stop_ring(struct intel_engine_cs *engine)
 static int init_ring_common(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct intel_ringbuffer *ringbuf = engine->buffer;
-	struct drm_i915_gem_object *obj = ringbuf->obj;
+	struct intel_ring *ring = engine->buffer;
+	struct drm_i915_gem_object *obj = ring->obj;
 	int ret = 0;
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -604,7 +605,7 @@ static int init_ring_common(struct intel_engine_cs *engine)
 	(void)I915_READ_HEAD(engine);
 
 	I915_WRITE_CTL(engine,
-			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)
+			((ring->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_VALID);
 
 	/* If the head is still not zero, the ring is dead */
@@ -623,10 +624,10 @@ static int init_ring_common(struct intel_engine_cs *engine)
 		goto out;
 	}
 
-	ringbuf->last_retired_head = -1;
-	ringbuf->head = I915_READ_HEAD(engine);
-	ringbuf->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
-	intel_ring_update_space(ringbuf);
+	ring->last_retired_head = -1;
+	ring->head = I915_READ_HEAD(engine);
+	ring->tail = I915_READ_TAIL(engine) & TAIL_ADDR;
+	intel_ring_update_space(ring);
 
 	intel_engine_init_hangcheck(engine);
 
@@ -680,7 +681,7 @@ err:
 
 static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 	int ret, i;
 
@@ -688,7 +689,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 		return 0;
 
 	req->engine->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(req);
+	ret = intel_engine_flush_all_caches(req);
 	if (ret)
 		return ret;
 
@@ -706,7 +707,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	intel_ring_advance(ring);
 
 	req->engine->gpu_caches_dirty = true;
-	ret = intel_ring_flush_all_caches(req);
+	ret = intel_engine_flush_all_caches(req);
 	if (ret)
 		return ret;
 
@@ -1338,7 +1339,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 8
-	struct intel_ringbuffer *signaller = signaller_req->ring;
+	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1380,7 +1381,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 			   unsigned int num_dwords)
 {
 #define MBOX_UPDATE_DWORDS 6
-	struct intel_ringbuffer *signaller = signaller_req->ring;
+	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
@@ -1419,7 +1420,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 		       unsigned int num_dwords)
 {
-	struct intel_ringbuffer *signaller = signaller_req->ring;
+	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *useless;
 	enum intel_engine_id id;
@@ -1464,7 +1465,7 @@ static int
 gen6_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	if (engine->semaphore.signal)
@@ -1488,7 +1489,7 @@ static int
 gen8_render_add_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	if (engine->semaphore.signal)
@@ -1533,7 +1534,7 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ring;
+	struct intel_ring *waiter = waiter_req->ring;
 	struct drm_i915_private *dev_priv = waiter_req->i915;
 	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
 	struct i915_hw_ppgtt *ppgtt;
@@ -1567,7 +1568,7 @@ gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
 	       struct intel_engine_cs *signaller,
 	       u32 seqno)
 {
-	struct intel_ringbuffer *waiter = waiter_req->ring;
+	struct intel_ring *waiter = waiter_req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
@@ -1701,7 +1702,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 	       u32     invalidate_domains,
 	       u32     flush_domains)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1717,7 +1718,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 static int
 i9xx_add_request(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 4);
@@ -1795,7 +1796,7 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 length,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1822,7 +1823,7 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	u32 cs_offset = req->engine->scratch.gtt_offset;
 	int ret;
 
@@ -1884,7 +1885,7 @@ i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			 u64 offset, u32 len,
 			 unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -1992,7 +1993,7 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 	return 0;
 }
 
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+void intel_unpin_ring(struct intel_ring *ringbuf)
 {
 	GEM_BUG_ON(!ringbuf->vma);
 	GEM_BUG_ON(!ringbuf->vaddr);
@@ -2007,8 +2008,8 @@ void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
 	ringbuf->vma = NULL;
 }
 
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
-				     struct intel_ringbuffer *ringbuf)
+int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
+			   struct intel_ring *ringbuf)
 {
 	struct drm_i915_gem_object *obj = ringbuf->obj;
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -2060,14 +2061,14 @@ err_unpin:
 	return ret;
 }
 
-static void intel_destroy_ringbuffer_obj(struct intel_ringbuffer *ringbuf)
+static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
 {
 	i915_gem_object_put(ringbuf->obj);
 	ringbuf->obj = NULL;
 }
 
 static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-				      struct intel_ringbuffer *ringbuf)
+				      struct intel_ring *ringbuf)
 {
 	struct drm_i915_gem_object *obj;
 
@@ -2087,10 +2088,10 @@ static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
 	return 0;
 }
 
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 {
-	struct intel_ringbuffer *ring;
+	struct intel_ring *ring;
 	int ret;
 
 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
@@ -2128,7 +2129,7 @@ intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size)
 }
 
 void
-intel_ringbuffer_free(struct intel_ringbuffer *ring)
+intel_ring_free(struct intel_ring *ring)
 {
 	intel_destroy_ringbuffer_obj(ring);
 	list_del(&ring->link);
@@ -2189,7 +2190,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct intel_ringbuffer *ringbuf;
+	struct intel_ring *ringbuf;
 	int ret;
 
 	WARN_ON(engine->buffer);
@@ -2214,7 +2215,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	if (ret)
 		goto error;
 
-	ringbuf = intel_engine_create_ringbuffer(engine, 32 * PAGE_SIZE);
+	ringbuf = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
 	if (IS_ERR(ringbuf)) {
 		ret = PTR_ERR(ringbuf);
 		goto error;
@@ -2232,7 +2233,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 			goto error;
 	}
 
-	ret = intel_pin_and_map_ringbuffer_obj(dev_priv, ringbuf);
+	ret = intel_pin_and_map_ring(dev_priv, ringbuf);
 	if (ret) {
 		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
 				engine->name, ret);
@@ -2243,11 +2244,11 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	return 0;
 
 error:
-	intel_cleanup_engine(engine);
+	intel_engine_cleanup(engine);
 	return ret;
 }
 
-void intel_cleanup_engine(struct intel_engine_cs *engine)
+void intel_engine_cleanup(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv;
 
@@ -2257,11 +2258,11 @@ void intel_cleanup_engine(struct intel_engine_cs *engine)
 	dev_priv = engine->i915;
 
 	if (engine->buffer) {
-		intel_stop_engine(engine);
+		intel_engine_stop(engine);
 		WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-		intel_unpin_ringbuffer_obj(engine->buffer);
-		intel_ringbuffer_free(engine->buffer);
+		intel_unpin_ring(engine->buffer);
+		intel_ring_free(engine->buffer);
 		engine->buffer = NULL;
 	}
 
@@ -2324,7 +2325,7 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *target;
 
@@ -2369,7 +2370,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 
 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int remain_actual = ring->size - ring->tail;
 	int remain_usable = ring->effective_size - ring->tail;
 	int bytes = num_dwords * sizeof(u32);
@@ -2426,7 +2427,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 /* Align the ring tail to a cacheline boundary */
 int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int num_dwords =
 		(ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
 	int ret;
@@ -2447,7 +2448,7 @@ int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno)
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 
@@ -2533,7 +2534,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 			       u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2579,7 +2580,7 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
 			!(dispatch_flags & I915_DISPATCH_SECURE);
 	int ret;
@@ -2605,7 +2606,7 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			     u64 offset, u32 len,
 			     unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2630,7 +2631,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 			      u64 offset, u32 len,
 			      unsigned dispatch_flags)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	int ret;
 
 	ret = intel_ring_begin(req, 2);
@@ -2653,7 +2654,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 static int gen6_ring_flush(struct drm_i915_gem_request *req,
 			   u32 invalidate, u32 flush)
 {
-	struct intel_ringbuffer *ring = req->ring;
+	struct intel_ring *ring = req->ring;
 	uint32_t cmd;
 	int ret;
 
@@ -2970,7 +2971,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 }
 
 int
-intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
+intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	int ret;
@@ -2989,7 +2990,7 @@ intel_ring_flush_all_caches(struct drm_i915_gem_request *req)
 }
 
 int
-intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
+intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	uint32_t flush_domains;
@@ -3009,8 +3010,7 @@ intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-void
-intel_stop_engine(struct intel_engine_cs *engine)
+void intel_engine_stop(struct intel_engine_cs *engine)
 {
 	int ret;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 4f4b8ea4df82..8f94e9345819 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -62,7 +62,7 @@ struct  intel_hw_status_page {
 	(i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj) + \
 	 GEN8_SEMAPHORE_OFFSET(from, (__ring)->id))
 
-enum intel_ring_hangcheck_action {
+enum intel_engine_hangcheck_action {
 	HANGCHECK_IDLE = 0,
 	HANGCHECK_WAIT,
 	HANGCHECK_ACTIVE,
@@ -72,17 +72,17 @@ enum intel_ring_hangcheck_action {
 
 #define HANGCHECK_SCORE_RING_HUNG 31
 
-struct intel_ring_hangcheck {
+struct intel_engine_hangcheck {
 	u64 acthd;
 	unsigned long user_interrupts;
 	u32 seqno;
 	int score;
-	enum intel_ring_hangcheck_action action;
+	enum intel_engine_hangcheck_action action;
 	int deadlock;
 	u32 instdone[I915_NUM_INSTDONE_REG];
 };
 
-struct intel_ringbuffer {
+struct intel_ring {
 	struct drm_i915_gem_object *obj;
 	void *vaddr;
 	struct i915_vma *vma;
@@ -149,7 +149,7 @@ struct intel_engine_cs {
 	u64 fence_context;
 	u32		mmio_base;
 	unsigned int irq_shift;
-	struct intel_ringbuffer *buffer;
+	struct intel_ring *buffer;
 	struct list_head buffers;
 
 	/* Rather than have every client wait upon all user interrupts,
@@ -329,7 +329,7 @@ struct intel_engine_cs {
 
 	struct i915_gem_context *last_context;
 
-	struct intel_ring_hangcheck hangcheck;
+	struct intel_engine_hangcheck hangcheck;
 
 	struct {
 		struct drm_i915_gem_object *obj;
@@ -376,8 +376,8 @@ intel_engine_flag(const struct intel_engine_cs *engine)
 }
 
 static inline u32
-intel_ring_sync_index(struct intel_engine_cs *engine,
-		      struct intel_engine_cs *other)
+intel_engine_sync_index(struct intel_engine_cs *engine,
+			struct intel_engine_cs *other)
 {
 	int idx;
 
@@ -439,45 +439,44 @@ intel_write_status_page(struct intel_engine_cs *engine,
 #define I915_GEM_HWS_SCRATCH_INDEX	0x40
 #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
 
-struct intel_ringbuffer *
-intel_engine_create_ringbuffer(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ringbuffer_obj(struct drm_i915_private *dev_priv,
-				     struct intel_ringbuffer *ringbuf);
-void intel_unpin_ringbuffer_obj(struct intel_ringbuffer *ringbuf);
-void intel_ringbuffer_free(struct intel_ringbuffer *ring);
+struct intel_ring *
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
+			   struct intel_ring *ring);
+void intel_unpin_ring(struct intel_ring *ring);
+void intel_ring_free(struct intel_ring *ring);
 
-void intel_stop_engine(struct intel_engine_cs *engine);
-void intel_cleanup_engine(struct intel_engine_cs *engine);
+void intel_engine_stop(struct intel_engine_cs *engine);
+void intel_engine_cleanup(struct intel_engine_cs *engine);
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request);
 
 int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n);
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
-static inline void intel_ring_emit(struct intel_ringbuffer *ring, u32 data)
+static inline void intel_ring_emit(struct intel_ring *ring, u32 data)
 {
 	*(uint32_t *)(ring->vaddr + ring->tail) = data;
 	ring->tail += 4;
 }
 
-static inline void intel_ring_emit_reg(struct intel_ringbuffer *ring,
-				       i915_reg_t reg)
+static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
 {
 	intel_ring_emit(ring, i915_mmio_reg_offset(reg));
 }
 
-static inline void intel_ring_advance(struct intel_ringbuffer *ring)
+static inline void intel_ring_advance(struct intel_ring *ring)
 {
 	ring->tail &= ring->size - 1;
 }
 
 int __intel_ring_space(int head, int tail, int size);
-void intel_ring_update_space(struct intel_ringbuffer *ringbuf);
+void intel_ring_update_space(struct intel_ring *ringbuf);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
-void intel_ring_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_ring_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_ring_invalidate_all_caches(struct drm_i915_gem_request *req);
+void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
+int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
+int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
 void intel_fini_pipe_control(struct intel_engine_cs *engine);
@@ -491,7 +490,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine);
 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine);
 
-u64 intel_ring_get_active_head(struct intel_engine_cs *engine);
+u64 intel_engine_get_active_head(struct intel_engine_cs *engine);
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 {
 	return intel_read_status_page(engine, I915_GEM_HWS_INDEX);
@@ -499,7 +498,7 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
-static inline u32 intel_ring_get_tail(struct intel_ringbuffer *ringbuf)
+static inline u32 intel_ring_get_tail(struct intel_ring *ringbuf)
 {
 	return ringbuf->tail;
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 05/73] drm/i915: Rename residual ringbuf parameters
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (3 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 04/73] drm/i915: Rename struct intel_ringbuffer to struct intel_ring Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 06/73] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
                   ` (68 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Now that we have a clear ring/engine split and a struct intel_ring, we
no longer need the stopgap ringbuf names.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-16-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 66 ++++++++++++++++-----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  6 +--
 2 files changed, 36 insertions(+), 36 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index acbabbdececd..5dd720e7feaa 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -47,15 +47,15 @@ int __intel_ring_space(int head, int tail, int size)
 	return space - I915_RING_FREE_SPACE;
 }
 
-void intel_ring_update_space(struct intel_ring *ringbuf)
+void intel_ring_update_space(struct intel_ring *ring)
 {
-	if (ringbuf->last_retired_head != -1) {
-		ringbuf->head = ringbuf->last_retired_head;
-		ringbuf->last_retired_head = -1;
+	if (ring->last_retired_head != -1) {
+		ring->head = ring->last_retired_head;
+		ring->last_retired_head = -1;
 	}
 
-	ringbuf->space = __intel_ring_space(ringbuf->head & HEAD_ADDR,
-					    ringbuf->tail, ringbuf->size);
+	ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
+					 ring->tail, ring->size);
 }
 
 static void __intel_engine_submit(struct intel_engine_cs *engine)
@@ -1993,25 +1993,25 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 	return 0;
 }
 
-void intel_unpin_ring(struct intel_ring *ringbuf)
+void intel_unpin_ring(struct intel_ring *ring)
 {
-	GEM_BUG_ON(!ringbuf->vma);
-	GEM_BUG_ON(!ringbuf->vaddr);
+	GEM_BUG_ON(!ring->vma);
+	GEM_BUG_ON(!ring->vaddr);
 
-	if (HAS_LLC(ringbuf->obj->base.dev) && !ringbuf->obj->stolen)
-		i915_gem_object_unpin_map(ringbuf->obj);
+	if (HAS_LLC(ring->obj->base.dev) && !ring->obj->stolen)
+		i915_gem_object_unpin_map(ring->obj);
 	else
-		i915_vma_unpin_iomap(ringbuf->vma);
-	ringbuf->vaddr = NULL;
+		i915_vma_unpin_iomap(ring->vma);
+	ring->vaddr = NULL;
 
-	i915_gem_object_ggtt_unpin(ringbuf->obj);
-	ringbuf->vma = NULL;
+	i915_gem_object_ggtt_unpin(ring->obj);
+	ring->vma = NULL;
 }
 
 int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
-			   struct intel_ring *ringbuf)
+			   struct intel_ring *ring)
 {
-	struct drm_i915_gem_object *obj = ringbuf->obj;
+	struct drm_i915_gem_object *obj = ring->obj;
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
 	unsigned flags = PIN_OFFSET_BIAS | 4096;
 	void *addr;
@@ -2052,8 +2052,8 @@ int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
 		}
 	}
 
-	ringbuf->vaddr = addr;
-	ringbuf->vma = i915_gem_obj_to_ggtt(obj);
+	ring->vaddr = addr;
+	ring->vma = i915_gem_obj_to_ggtt(obj);
 	return 0;
 
 err_unpin:
@@ -2061,29 +2061,29 @@ err_unpin:
 	return ret;
 }
 
-static void intel_destroy_ringbuffer_obj(struct intel_ring *ringbuf)
+static void intel_destroy_ringbuffer_obj(struct intel_ring *ring)
 {
-	i915_gem_object_put(ringbuf->obj);
-	ringbuf->obj = NULL;
+	i915_gem_object_put(ring->obj);
+	ring->obj = NULL;
 }
 
 static int intel_alloc_ringbuffer_obj(struct drm_device *dev,
-				      struct intel_ring *ringbuf)
+				      struct intel_ring *ring)
 {
 	struct drm_i915_gem_object *obj;
 
 	obj = NULL;
 	if (!HAS_LLC(dev))
-		obj = i915_gem_object_create_stolen(dev, ringbuf->size);
+		obj = i915_gem_object_create_stolen(dev, ring->size);
 	if (obj == NULL)
-		obj = i915_gem_object_create(dev, ringbuf->size);
+		obj = i915_gem_object_create(dev, ring->size);
 	if (IS_ERR(obj))
 		return PTR_ERR(obj);
 
 	/* mark ring buffers as read-only from GPU side by default */
 	obj->gt_ro = 1;
 
-	ringbuf->obj = obj;
+	ring->obj = obj;
 
 	return 0;
 }
@@ -2190,7 +2190,7 @@ static void intel_ring_context_unpin(struct i915_gem_context *ctx,
 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
-	struct intel_ring *ringbuf;
+	struct intel_ring *ring;
 	int ret;
 
 	WARN_ON(engine->buffer);
@@ -2215,12 +2215,12 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 	if (ret)
 		goto error;
 
-	ringbuf = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
-	if (IS_ERR(ringbuf)) {
-		ret = PTR_ERR(ringbuf);
+	ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
+	if (IS_ERR(ring)) {
+		ret = PTR_ERR(ring);
 		goto error;
 	}
-	engine->buffer = ringbuf;
+	engine->buffer = ring;
 
 	if (I915_NEED_GFX_HWS(dev_priv)) {
 		ret = init_status_page(engine);
@@ -2233,11 +2233,11 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 			goto error;
 	}
 
-	ret = intel_pin_and_map_ring(dev_priv, ringbuf);
+	ret = intel_pin_and_map_ring(dev_priv, ring);
 	if (ret) {
 		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
 				engine->name, ret);
-		intel_destroy_ringbuffer_obj(ringbuf);
+		intel_destroy_ringbuffer_obj(ring);
 		goto error;
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8f94e9345819..2dfc418c5102 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -471,7 +471,7 @@ static inline void intel_ring_advance(struct intel_ring *ring)
 }
 
 int __intel_ring_space(int head, int tail, int size);
-void intel_ring_update_space(struct intel_ring *ringbuf);
+void intel_ring_update_space(struct intel_ring *ring);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
@@ -498,9 +498,9 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
-static inline u32 intel_ring_get_tail(struct intel_ring *ringbuf)
+static inline u32 intel_ring_get_tail(struct intel_ring *ring)
 {
-	return ringbuf->tail;
+	return ring->tail;
 }
 
 /*
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 06/73] drm/i915: Rename intel_pin_and_map_ring()
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (4 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 05/73] drm/i915: Rename residual ringbuf parameters Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 07/73] drm/i915: Remove obsolete engine->gpu_caches_dirty Chris Wilson
                   ` (67 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

For more consistent oop-naming, we would use intel_ring_verb, so pick
intel_ring_pin() and intel_ring_unpin().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-17-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_lrc.c        |  4 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c | 38 ++++++++++++++++-----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  5 ++---
 3 files changed, 23 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 9668331f973a..e4f809f879c6 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -973,7 +973,7 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 
 	lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
 
-	ret = intel_pin_and_map_ring(dev_priv, ce->ring);
+	ret = intel_ring_pin(ce->ring);
 	if (ret)
 		goto unpin_map;
 
@@ -1011,7 +1011,7 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 	if (--ce->pin_count)
 		return;
 
-	intel_unpin_ring(ce->ring);
+	intel_ring_unpin(ce->ring);
 
 	i915_gem_object_unpin_map(ce->state);
 	i915_gem_object_ggtt_unpin(ce->state);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5dd720e7feaa..e7a7f67ab06d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1993,24 +1993,9 @@ static int init_phys_status_page(struct intel_engine_cs *engine)
 	return 0;
 }
 
-void intel_unpin_ring(struct intel_ring *ring)
-{
-	GEM_BUG_ON(!ring->vma);
-	GEM_BUG_ON(!ring->vaddr);
-
-	if (HAS_LLC(ring->obj->base.dev) && !ring->obj->stolen)
-		i915_gem_object_unpin_map(ring->obj);
-	else
-		i915_vma_unpin_iomap(ring->vma);
-	ring->vaddr = NULL;
-
-	i915_gem_object_ggtt_unpin(ring->obj);
-	ring->vma = NULL;
-}
-
-int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
-			   struct intel_ring *ring)
+int intel_ring_pin(struct intel_ring *ring)
 {
+	struct drm_i915_private *dev_priv = ring->engine->i915;
 	struct drm_i915_gem_object *obj = ring->obj;
 	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
 	unsigned flags = PIN_OFFSET_BIAS | 4096;
@@ -2061,6 +2046,21 @@ err_unpin:
 	return ret;
 }
 
+void intel_ring_unpin(struct intel_ring *ring)
+{
+	GEM_BUG_ON(!ring->vma);
+	GEM_BUG_ON(!ring->vaddr);
+
+	if (HAS_LLC(ring->engine->i915) && !ring->obj->stolen)
+		i915_gem_object_unpin_map(ring->obj);
+	else
+		i915_vma_unpin_iomap(ring->vma);
+	ring->vaddr = NULL;
+
+	i915_gem_object_ggtt_unpin(ring->obj);
+	ring->vma = NULL;
+}
+
 static void intel_destroy_ringbuffer_obj(struct intel_ring *ring)
 {
 	i915_gem_object_put(ring->obj);
@@ -2233,7 +2233,7 @@ static int intel_init_ring_buffer(struct intel_engine_cs *engine)
 			goto error;
 	}
 
-	ret = intel_pin_and_map_ring(dev_priv, ring);
+	ret = intel_ring_pin(ring);
 	if (ret) {
 		DRM_ERROR("Failed to pin and map ringbuffer %s: %d\n",
 				engine->name, ret);
@@ -2261,7 +2261,7 @@ void intel_engine_cleanup(struct intel_engine_cs *engine)
 		intel_engine_stop(engine);
 		WARN_ON(!IS_GEN2(dev_priv) && (I915_READ_MODE(engine) & MODE_IDLE) == 0);
 
-		intel_unpin_ring(engine->buffer);
+		intel_ring_unpin(engine->buffer);
 		intel_ring_free(engine->buffer);
 		engine->buffer = NULL;
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2dfc418c5102..ba54ffcdd55a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -441,9 +441,8 @@ intel_write_status_page(struct intel_engine_cs *engine,
 
 struct intel_ring *
 intel_engine_create_ring(struct intel_engine_cs *engine, int size);
-int intel_pin_and_map_ring(struct drm_i915_private *dev_priv,
-			   struct intel_ring *ring);
-void intel_unpin_ring(struct intel_ring *ring);
+int intel_ring_pin(struct intel_ring *ring);
+void intel_ring_unpin(struct intel_ring *ring);
 void intel_ring_free(struct intel_ring *ring);
 
 void intel_engine_stop(struct intel_engine_cs *engine);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 07/73] drm/i915: Remove obsolete engine->gpu_caches_dirty
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (5 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 06/73] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 08/73] drm/i915: Reduce engine->emit_flush() to a single mode parameter Chris Wilson
                   ` (66 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Space for flushing the GPU cache prior to completing the request is
preallocated and so cannot fail - the GPU caches will always be flushed
along with the completed request. This means we no longer have to track
whether the GPU cache is dirty between batches like we had to with the
outstanding_lazy_seqno.

With the removal of the duplication in the per-backend entry points for
emitting the obsolete lazy flush, we can then further unify the
engine->emit_flush.

v2: Expand a bit on the legacy of gpu_caches_dirty

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-18-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  9 +---
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 11 +++--
 drivers/gpu/drm/i915/i915_gem_request.c    |  8 ++--
 drivers/gpu/drm/i915/intel_lrc.c           | 47 +++----------------
 drivers/gpu/drm/i915/intel_lrc.h           |  2 -
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 72 +++++++-----------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  7 ---
 8 files changed, 37 insertions(+), 121 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3336a5fcd029..beece8feb8fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(dev_priv)) {
-		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
+		ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d0ef675fb169..35c4c595e5ba 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -998,10 +998,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
 
-	/* Unconditionally invalidate gpu caches and ensure that we do flush
-	 * any residual writes from the previous batch.
-	 */
-	return intel_engine_invalidate_all_caches(req);
+	/* Unconditionally invalidate GPU caches and TLBs. */
+	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
 }
 
 static bool
@@ -1163,9 +1161,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static void
 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
-	/* Unconditionally force add_request to emit a full flush. */
-	params->engine->gpu_caches_dirty = true;
-
 	/* Add a breadcrumb for the completion of the batch buffer */
 	__i915_add_request(params->request, params->batch_obj, true);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ebfa0406a6a1..39fa9eb10514 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1666,7 +1666,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req,
+				 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1693,7 +1694,8 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req,
+				 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1711,8 +1713,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
-		ret = engine->flush(req,
-				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		ret = engine->emit_flush(req,
+					 I915_GEM_GPU_DOMAINS,
+					 I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 942b5b1f1602..7e3206051ced 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -451,12 +451,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 * what.
 	 */
 	if (flush_caches) {
-		if (i915.enable_execlists)
-			ret = logical_ring_flush_all_caches(request);
-		else
-			ret = intel_engine_flush_all_caches(request);
+		ret = engine->emit_flush(request, 0, I915_GEM_GPU_DOMAINS);
+
 		/* Not allowed to fail! */
-		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
+		WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
 	}
 
 	trace_i915_gem_request_add(request);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e4f809f879c6..33ff9f6d588f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -642,24 +642,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
 	spin_unlock_bh(&engine->execlist_lock);
 }
 
-static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	uint32_t flush_domains;
-	int ret;
-
-	flush_domains = 0;
-	if (engine->gpu_caches_dirty)
-		flush_domains = I915_GEM_GPU_DOMAINS;
-
-	ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-	if (ret)
-		return ret;
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 				 struct list_head *vmas)
 {
@@ -690,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return logical_ring_invalidate_all_caches(req);
+	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -930,22 +912,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
 	I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	if (!engine->gpu_caches_dirty)
-		return 0;
-
-	ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
-	if (ret)
-		return ret;
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
 				struct intel_engine_cs *engine)
 {
@@ -1026,15 +992,15 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 
 	if (w->count == 0)
 		return 0;
 
-	engine->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1051,8 +1017,9 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	engine->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index d26fb44549e5..33e0193e5451 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -72,8 +72,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
 
 int intel_engines_init(struct drm_device *dev);
 
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-
 /* Logical Ring Contexts */
 
 /* One extra page is added before LRC for GuC as shared data */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e7a7f67ab06d..9e4b49644553 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -688,8 +688,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (w->count == 0)
 		return 0;
 
-	req->engine->gpu_caches_dirty = true;
-	ret = intel_engine_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -706,8 +707,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	req->engine->gpu_caches_dirty = true;
-	ret = intel_engine_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -2860,21 +2862,21 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->init_context = intel_rcs_ctx_init;
 		engine->add_request = gen8_render_add_request;
-		engine->flush = gen8_render_ring_flush;
+		engine->emit_flush = gen8_render_ring_flush;
 		if (i915.semaphores)
 			engine->semaphore.signal = gen8_rcs_signal;
 	} else if (INTEL_GEN(dev_priv) >= 6) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->flush = gen7_render_ring_flush;
+		engine->emit_flush = gen7_render_ring_flush;
 		if (IS_GEN6(dev_priv))
-			engine->flush = gen6_render_ring_flush;
+			engine->emit_flush = gen6_render_ring_flush;
 	} else if (IS_GEN5(dev_priv)) {
-		engine->flush = gen4_render_ring_flush;
+		engine->emit_flush = gen4_render_ring_flush;
 	} else {
 		if (INTEL_GEN(dev_priv) < 4)
-			engine->flush = gen2_render_ring_flush;
+			engine->emit_flush = gen2_render_ring_flush;
 		else
-			engine->flush = gen4_render_ring_flush;
+			engine->emit_flush = gen4_render_ring_flush;
 		engine->irq_enable_mask = I915_USER_INTERRUPT;
 	}
 
@@ -2911,12 +2913,12 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev_priv))
 			engine->write_tail = gen6_bsd_ring_write_tail;
-		engine->flush = gen6_bsd_ring_flush;
+		engine->emit_flush = gen6_bsd_ring_flush;
 		if (INTEL_GEN(dev_priv) < 8)
 			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 	} else {
 		engine->mmio_base = BSD_RING_BASE;
-		engine->flush = bsd_ring_flush;
+		engine->emit_flush = bsd_ring_flush;
 		if (IS_GEN5(dev_priv))
 			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
 		else
@@ -2935,7 +2937,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_bsd_ring_flush;
+	engine->emit_flush = gen6_bsd_ring_flush;
 
 	return intel_init_ring_buffer(engine);
 }
@@ -2946,7 +2948,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_ring_flush;
+	engine->emit_flush = gen6_ring_flush;
 	if (INTEL_GEN(dev_priv) < 8)
 		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 
@@ -2959,7 +2961,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_ring_flush;
+	engine->emit_flush = gen6_ring_flush;
 
 	if (INTEL_GEN(dev_priv) < 8) {
 		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
@@ -2970,46 +2972,6 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 	return intel_init_ring_buffer(engine);
 }
 
-int
-intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	if (!engine->gpu_caches_dirty)
-		return 0;
-
-	ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
-int
-intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	uint32_t flush_domains;
-	int ret;
-
-	flush_domains = 0;
-	if (engine->gpu_caches_dirty)
-		flush_domains = I915_GEM_GPU_DOMAINS;
-
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
 void intel_engine_stop(struct intel_engine_cs *engine)
 {
 	int ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ba54ffcdd55a..00723401f98c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -206,9 +206,6 @@ struct intel_engine_cs {
 
 	void		(*write_tail)(struct intel_engine_cs *engine,
 				      u32 value);
-	int __must_check (*flush)(struct drm_i915_gem_request *req,
-				  u32	invalidate_domains,
-				  u32	flush_domains);
 	int		(*add_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
@@ -325,8 +322,6 @@ struct intel_engine_cs {
 	 */
 	u32 last_submitted_seqno;
 
-	bool gpu_caches_dirty;
-
 	struct i915_gem_context *last_context;
 
 	struct intel_engine_hangcheck hangcheck;
@@ -474,8 +469,6 @@ void intel_ring_update_space(struct intel_ring *ring);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
 void intel_fini_pipe_control(struct intel_engine_cs *engine);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 08/73] drm/i915: Reduce engine->emit_flush() to a single mode parameter
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (6 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 07/73] drm/i915: Remove obsolete engine->gpu_caches_dirty Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 09/73] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
                   ` (65 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Rather than passing a complete set of GPU cache domains for either
invalidation or for flushing, or even both, just pass a single parameter
to the engine->emit_flush to determine the required operations.

engine->emit_flush(GPU, 0) -> engine->emit_flush(EMIT_INVALIDATE)
engine->emit_flush(0, GPU) -> engine->emit_flush(EMIT_FLUSH)
engine->emit_flush(GPU, GPU) -> engine->emit_flush(EMIT_FLUSH | EMIT_INVALIDATE)

This allows us to extend the behaviour easily in future, for example if
we want just a command barrier without the overhead of flushing.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dave Gordon <david.s.gordon@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 10 ++----
 drivers/gpu/drm/i915/i915_gem_request.c    |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c           | 23 +++++-------
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 57 +++++++++++-------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  6 ++--
 7 files changed, 38 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index beece8feb8fe..edde8411c478 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(dev_priv)) {
-		ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
+		ret = engine->emit_flush(req, EMIT_INVALIDATE);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 35c4c595e5ba..e49776e34eed 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -999,7 +999,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 		wmb();
 
 	/* Unconditionally invalidate GPU caches and TLBs. */
-	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
+	return req->engine->emit_flush(req, EMIT_INVALIDATE);
 }
 
 static bool
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 39fa9eb10514..671b1cab5e54 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1666,8 +1666,7 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->emit_flush(req,
-				 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
 	if (ret)
 		return ret;
 
@@ -1694,8 +1693,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->emit_flush(req,
-				 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
 	if (ret)
 		return ret;
 
@@ -1713,9 +1711,7 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
-		ret = engine->emit_flush(req,
-					 I915_GEM_GPU_DOMAINS,
-					 I915_GEM_GPU_DOMAINS);
+		ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 7e3206051ced..67f16feb0552 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -451,7 +451,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 * what.
 	 */
 	if (flush_caches) {
-		ret = engine->emit_flush(request, 0, I915_GEM_GPU_DOMAINS);
+		ret = engine->emit_flush(request, EMIT_FLUSH);
 
 		/* Not allowed to fail! */
 		WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 33ff9f6d588f..3f1c5cdd38a0 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -672,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
+	return req->engine->emit_flush(req, EMIT_INVALIDATE);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -998,9 +998,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (w->count == 0)
 		return 0;
 
-	ret = req->engine->emit_flush(req,
-				      I915_GEM_GPU_DOMAINS,
-				      I915_GEM_GPU_DOMAINS);
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -1017,9 +1015,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	ret = req->engine->emit_flush(req,
-				      I915_GEM_GPU_DOMAINS,
-				      I915_GEM_GPU_DOMAINS);
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -1600,9 +1596,7 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
 	I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
 }
 
-static int gen8_emit_flush(struct drm_i915_gem_request *request,
-			   u32 invalidate_domains,
-			   u32 unused)
+static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
 {
 	struct intel_ring *ring = request->ring;
 	u32 cmd;
@@ -1621,7 +1615,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 	 */
 	cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
 
-	if (invalidate_domains & I915_GEM_GPU_DOMAINS) {
+	if (mode & EMIT_INVALIDATE) {
 		cmd |= MI_INVALIDATE_TLB;
 		if (request->engine->id == VCS)
 			cmd |= MI_INVALIDATE_BSD;
@@ -1639,8 +1633,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request,
 }
 
 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
-				  u32 invalidate_domains,
-				  u32 flush_domains)
+				  u32 mode)
 {
 	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
@@ -1652,14 +1645,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
 
 	flags |= PIPE_CONTROL_CS_STALL;
 
-	if (flush_domains) {
+	if (mode & EMIT_FLUSH) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
 	}
 
-	if (invalidate_domains) {
+	if (mode & EMIT_INVALIDATE) {
 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
 		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9e4b49644553..1f876e7ce582 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -67,19 +67,15 @@ static void __intel_engine_submit(struct intel_engine_cs *engine)
 }
 
 static int
-gen2_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32	invalidate_domains,
-		       u32	flush_domains)
+gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
 	struct intel_ring *ring = req->ring;
 	u32 cmd;
 	int ret;
 
 	cmd = MI_FLUSH;
-	if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
-		cmd |= MI_NO_WRITE_FLUSH;
 
-	if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_READ_FLUSH;
 
 	ret = intel_ring_begin(req, 2);
@@ -94,9 +90,7 @@ gen2_render_ring_flush(struct drm_i915_gem_request *req,
 }
 
 static int
-gen4_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32	invalidate_domains,
-		       u32	flush_domains)
+gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
 	struct intel_ring *ring = req->ring;
 	u32 cmd;
@@ -131,7 +125,7 @@ gen4_render_ring_flush(struct drm_i915_gem_request *req,
 	 */
 
 	cmd = MI_FLUSH;
-	if (invalidate_domains) {
+	if (mode & EMIT_INVALIDATE) {
 		cmd |= MI_EXE_FLUSH;
 		if (IS_G4X(req->i915) || IS_GEN5(req->i915))
 			cmd |= MI_INVALIDATE_ISP;
@@ -222,8 +216,7 @@ intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
 }
 
 static int
-gen6_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32 invalidate_domains, u32 flush_domains)
+gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
 	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
@@ -240,7 +233,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 	 * number of bits based on the write domains has little performance
 	 * impact.
 	 */
-	if (flush_domains) {
+	if (mode & EMIT_FLUSH) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 		/*
@@ -249,7 +242,7 @@ gen6_render_ring_flush(struct drm_i915_gem_request *req,
 		 */
 		flags |= PIPE_CONTROL_CS_STALL;
 	}
-	if (invalidate_domains) {
+	if (mode & EMIT_INVALIDATE) {
 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
 		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -297,8 +290,7 @@ gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
 }
 
 static int
-gen7_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32 invalidate_domains, u32 flush_domains)
+gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
 	struct intel_ring *ring = req->ring;
 	u32 scratch_addr =
@@ -320,13 +312,13 @@ gen7_render_ring_flush(struct drm_i915_gem_request *req,
 	 * number of bits based on the write domains has little performance
 	 * impact.
 	 */
-	if (flush_domains) {
+	if (mode & EMIT_FLUSH) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
 	}
-	if (invalidate_domains) {
+	if (mode & EMIT_INVALIDATE) {
 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
 		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -384,8 +376,7 @@ gen8_emit_pipe_control(struct drm_i915_gem_request *req,
 }
 
 static int
-gen8_render_ring_flush(struct drm_i915_gem_request *req,
-		       u32 invalidate_domains, u32 flush_domains)
+gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
 	u32 scratch_addr = req->engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
 	u32 flags = 0;
@@ -393,13 +384,13 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req,
 
 	flags |= PIPE_CONTROL_CS_STALL;
 
-	if (flush_domains) {
+	if (mode & EMIT_FLUSH) {
 		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
 		flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
 		flags |= PIPE_CONTROL_FLUSH_ENABLE;
 	}
-	if (invalidate_domains) {
+	if (mode & EMIT_INVALIDATE) {
 		flags |= PIPE_CONTROL_TLB_INVALIDATE;
 		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
 		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
@@ -688,9 +679,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (w->count == 0)
 		return 0;
 
-	ret = req->engine->emit_flush(req,
-				      I915_GEM_GPU_DOMAINS,
-				      I915_GEM_GPU_DOMAINS);
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -707,9 +696,7 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	ret = req->engine->emit_flush(req,
-				      I915_GEM_GPU_DOMAINS,
-				      I915_GEM_GPU_DOMAINS);
+	ret = req->engine->emit_flush(req, EMIT_BARRIER);
 	if (ret)
 		return ret;
 
@@ -1700,9 +1687,7 @@ i8xx_irq_disable(struct intel_engine_cs *engine)
 }
 
 static int
-bsd_ring_flush(struct drm_i915_gem_request *req,
-	       u32     invalidate_domains,
-	       u32     flush_domains)
+bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2533,8 +2518,7 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
-			       u32 invalidate, u32 flush)
+static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
 	struct intel_ring *ring = req->ring;
 	uint32_t cmd;
@@ -2561,7 +2545,7 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req,
 	 * operation is complete. This bit is only valid when the
 	 * Post-Sync Operation field is a value of 1h or 3h."
 	 */
-	if (invalidate & I915_GEM_GPU_DOMAINS)
+	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 
 	intel_ring_emit(ring, cmd);
@@ -2653,8 +2637,7 @@ gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 
 /* Blitter support (SandyBridge+) */
 
-static int gen6_ring_flush(struct drm_i915_gem_request *req,
-			   u32 invalidate, u32 flush)
+static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
 	struct intel_ring *ring = req->ring;
 	uint32_t cmd;
@@ -2681,7 +2664,7 @@ static int gen6_ring_flush(struct drm_i915_gem_request *req,
 	 * operation is complete. This bit is only valid when the
 	 * Post-Sync Operation field is a value of 1h or 3h."
 	 */
-	if (invalidate & I915_GEM_DOMAIN_RENDER)
+	if (mode & EMIT_INVALIDATE)
 		cmd |= MI_INVALIDATE_TLB;
 	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 00723401f98c..76d0495943c3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -292,8 +292,10 @@ struct intel_engine_cs {
 	u32 ctx_desc_template;
 	int		(*emit_request)(struct drm_i915_gem_request *request);
 	int		(*emit_flush)(struct drm_i915_gem_request *request,
-				      u32 invalidate_domains,
-				      u32 flush_domains);
+				      u32 mode);
+#define EMIT_INVALIDATE	BIT(0)
+#define EMIT_FLUSH	BIT(1)
+#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
 	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
 					 u64 offset, unsigned dispatch_flags);
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 09/73] drm/i915: Simplify request_alloc by returning the allocated request
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (7 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 08/73] drm/i915: Reduce engine->emit_flush() to a single mode parameter Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 10/73] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
                   ` (64 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

If is simpler and leads to more readable code through the callstack if
the allocation returns the allocated struct through the return value.

The importance of this is that it no longer looks like we accidentally
allocate requests as side-effect of calling certain functions.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-19-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            |  3 +-
 drivers/gpu/drm/i915/i915_gem.c            | 75 ++++++++----------------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 12 ++---
 drivers/gpu/drm/i915/i915_gem_request.c    | 58 ++++++++---------------
 drivers/gpu/drm/i915/i915_trace.h          | 13 +++---
 drivers/gpu/drm/i915/intel_display.c       | 36 ++++++--------
 drivers/gpu/drm/i915/intel_lrc.c           |  2 +-
 drivers/gpu/drm/i915/intel_overlay.c       | 20 ++++----
 8 files changed, 79 insertions(+), 140 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4c43bd3d74b1..8f4edc9dd7a3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3171,8 +3171,7 @@ static inline void i915_gem_object_unpin_map(struct drm_i915_gem_object *obj)
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
-			 struct intel_engine_cs *to,
-			 struct drm_i915_gem_request **to_req);
+			 struct drm_i915_gem_request *to);
 void i915_vma_move_to_active(struct i915_vma *vma,
 			     struct drm_i915_gem_request *req);
 int i915_gem_dumb_create(struct drm_file *file_priv,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 59890f523c5f..b6c4ff63725f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2845,51 +2845,35 @@ out:
 
 static int
 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		       struct intel_engine_cs *to,
-		       struct drm_i915_gem_request *from_req,
-		       struct drm_i915_gem_request **to_req)
+		       struct drm_i915_gem_request *to,
+		       struct drm_i915_gem_request *from)
 {
-	struct intel_engine_cs *from;
 	int ret;
 
-	from = i915_gem_request_get_engine(from_req);
-	if (to == from)
+	if (to->engine == from->engine)
 		return 0;
 
-	if (i915_gem_request_completed(from_req))
+	if (i915_gem_request_completed(from))
 		return 0;
 
 	if (!i915.semaphores) {
-		struct drm_i915_private *i915 = to_i915(obj->base.dev);
-		ret = __i915_wait_request(from_req,
-					  i915->mm.interruptible,
+		ret = __i915_wait_request(from,
+					  from->i915->mm.interruptible,
 					  NULL,
 					  NO_WAITBOOST);
 		if (ret)
 			return ret;
 
-		i915_gem_object_retire_request(obj, from_req);
+		i915_gem_object_retire_request(obj, from);
 	} else {
-		int idx = intel_engine_sync_index(from, to);
-		u32 seqno = i915_gem_request_get_seqno(from_req);
+		int idx = intel_engine_sync_index(from->engine, to->engine);
+		u32 seqno = i915_gem_request_get_seqno(from);
 
-		WARN_ON(!to_req);
-
-		if (seqno <= from->semaphore.sync_seqno[idx])
+		if (seqno <= from->engine->semaphore.sync_seqno[idx])
 			return 0;
 
-		if (*to_req == NULL) {
-			struct drm_i915_gem_request *req;
-
-			req = i915_gem_request_alloc(to, NULL);
-			if (IS_ERR(req))
-				return PTR_ERR(req);
-
-			*to_req = req;
-		}
-
-		trace_i915_gem_ring_sync_to(*to_req, from, from_req);
-		ret = to->semaphore.sync_to(*to_req, from, seqno);
+		trace_i915_gem_ring_sync_to(to, from);
+		ret = to->engine->semaphore.sync_to(to, from->engine, seqno);
 		if (ret)
 			return ret;
 
@@ -2897,8 +2881,8 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		 * might have just caused seqno wrap under
 		 * the radar.
 		 */
-		from->semaphore.sync_seqno[idx] =
-			i915_gem_request_get_seqno(obj->last_read_req[from->id]);
+		from->engine->semaphore.sync_seqno[idx] =
+			i915_gem_request_get_seqno(obj->last_read_req[from->engine->id]);
 	}
 
 	return 0;
@@ -2908,17 +2892,12 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  * i915_gem_object_sync - sync an object to a ring.
  *
  * @obj: object which may be in use on another ring.
- * @to: ring we wish to use the object on. May be NULL.
- * @to_req: request we wish to use the object for. See below.
- *          This will be allocated and returned if a request is
- *          required but not passed in.
+ * @to: request we are wishing to use
  *
  * This code is meant to abstract object synchronization with the GPU.
- * Calling with NULL implies synchronizing the object with the CPU
- * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow one engine to write
- * into a buffer at any time, but multiple readers. To ensure each has
- * a coherent view of memory, we must:
+ * Conceptually we serialise writes between engines inside the GPU.
+ * We only allow one engine to write into a buffer at any time, but
+ * multiple readers. To ensure each has a coherent view of memory, we must:
  *
  * - If there is an outstanding write request to the object, the new
  *   request must wait for it to complete (either CPU or in hw, requests
@@ -2927,22 +2906,11 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  * - If we are a write request (pending_write_domain is set), the new
  *   request must wait for outstanding read requests to complete.
  *
- * For CPU synchronisation (NULL to) no request is required. For syncing with
- * rings to_req must be non-NULL. However, a request does not have to be
- * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
- * request will be allocated automatically and returned through *to_req. Note
- * that it is not guaranteed that commands will be emitted (because the system
- * might already be idle). Hence there is no need to create a request that
- * might never have any work submitted. Note further that if a request is
- * returned in *to_req, it is the responsibility of the caller to submit
- * that request (after potentially adding more work to it).
- *
  * Returns 0 if successful, else propagates up the lower layer error.
  */
 int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		     struct intel_engine_cs *to,
-		     struct drm_i915_gem_request **to_req)
+		     struct drm_i915_gem_request *to)
 {
 	const bool readonly = obj->base.pending_write_domain == 0;
 	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
@@ -2951,9 +2919,6 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (!obj->active)
 		return 0;
 
-	if (to == NULL)
-		return i915_gem_object_wait_rendering(obj, readonly);
-
 	n = 0;
 	if (readonly) {
 		if (obj->last_write_req)
@@ -2964,7 +2929,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 				req[n++] = obj->last_read_req[i];
 	}
 	for (i = 0; i < n; i++) {
-		ret = __i915_gem_object_sync(obj, to, req[i], to_req);
+		ret = __i915_gem_object_sync(obj, to, req[i]);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e49776e34eed..fe06b5886ee4 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -981,7 +981,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->engine, &req);
+			ret = i915_gem_object_sync(obj, req);
 			if (ret)
 				return ret;
 		}
@@ -1427,7 +1427,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct drm_i915_gem_request *req = NULL;
 	struct eb_vmas *eb;
 	struct drm_i915_gem_object *batch_obj;
 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
@@ -1615,13 +1614,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
 
 	/* Allocate a request for this batch buffer nice and early. */
-	req = i915_gem_request_alloc(engine, ctx);
-	if (IS_ERR(req)) {
-		ret = PTR_ERR(req);
+	params->request = i915_gem_request_alloc(engine, ctx);
+	if (IS_ERR(params->request)) {
+		ret = PTR_ERR(params->request);
 		goto err_batch_unpin;
 	}
 
-	ret = i915_gem_request_add_to_client(req, file);
+	ret = i915_gem_request_add_to_client(params->request, file);
 	if (ret)
 		goto err_request;
 
@@ -1637,7 +1636,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->dispatch_flags          = dispatch_flags;
 	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
-	params->request                 = req;
 
 	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
 err_request:
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 67f16feb0552..f4e6c40ac4e3 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -292,10 +292,21 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
 	return 0;
 }
 
-static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *engine,
-			 struct i915_gem_context *ctx,
-			 struct drm_i915_gem_request **req_out)
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ *       This can be NULL if the request is not directly related to
+ *       any specific user context, in which case this function will
+ *       choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+		       struct i915_gem_context *ctx)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
 	unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
@@ -303,18 +314,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
 	u32 seqno;
 	int ret;
 
-	if (!req_out)
-		return -EINVAL;
-
-	*req_out = NULL;
-
 	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
 	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
 	 * and restart.
 	 */
 	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
 	if (ret)
-		return ret;
+		return ERR_PTR(ret);
 
 	/* Move the oldest request to the slab-cache (if not in use!) */
 	req = list_first_entry_or_null(&engine->request_list,
@@ -324,7 +330,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
 
 	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
 	if (!req)
-		return -ENOMEM;
+		return ERR_PTR(-ENOMEM);
 
 	ret = i915_gem_get_seqno(dev_priv, &seqno);
 	if (ret)
@@ -357,39 +363,13 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
 	if (ret)
 		goto err_ctx;
 
-	*req_out = req;
-	return 0;
+	return req;
 
 err_ctx:
 	i915_gem_context_put(ctx);
 err:
 	kmem_cache_free(dev_priv->requests, req);
-	return ret;
-}
-
-/**
- * i915_gem_request_alloc - allocate a request structure
- *
- * @engine: engine that we wish to issue the request on.
- * @ctx: context that the request will be associated with.
- *       This can be NULL if the request is not directly related to
- *       any specific user context, in which case this function will
- *       choose an appropriate context to use.
- *
- * Returns a pointer to the allocated request if successful,
- * or an error code if not.
- */
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-		       struct i915_gem_context *ctx)
-{
-	struct drm_i915_gem_request *req;
-	int err;
-
-	if (!ctx)
-		ctx = engine->i915->kernel_context;
-	err = __i915_gem_request_alloc(engine, ctx, &req);
-	return err ? ERR_PTR(err) : req;
+	return ERR_PTR(ret);
 }
 
 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 007112d1e049..9e43c0aa6e3b 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -449,10 +449,9 @@ TRACE_EVENT(i915_gem_evict_vm,
 );
 
 TRACE_EVENT(i915_gem_ring_sync_to,
-	    TP_PROTO(struct drm_i915_gem_request *to_req,
-		     struct intel_engine_cs *from,
-		     struct drm_i915_gem_request *req),
-	    TP_ARGS(to_req, from, req),
+	    TP_PROTO(struct drm_i915_gem_request *to,
+		     struct drm_i915_gem_request *from),
+	    TP_ARGS(to, from),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
@@ -463,9 +462,9 @@ TRACE_EVENT(i915_gem_ring_sync_to,
 
 	    TP_fast_assign(
 			   __entry->dev = from->i915->drm.primary->index;
-			   __entry->sync_from = from->id;
-			   __entry->sync_to = to_req->engine->id;
-			   __entry->seqno = req->fence.seqno;
+			   __entry->sync_from = from->engine->id;
+			   __entry->sync_to = to->engine->id;
+			   __entry->seqno = from->fence.seqno;
 			   ),
 
 	    TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index abd0a484e5ae..134dcf66e17b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11575,7 +11575,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	struct intel_flip_work *work;
 	struct intel_engine_cs *engine;
 	bool mmio_flip;
-	struct drm_i915_gem_request *request = NULL;
+	struct drm_i915_gem_request *request;
 	int ret;
 
 	/*
@@ -11682,22 +11682,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 	mmio_flip = use_mmio_flip(engine, obj);
 
-	/* When using CS flips, we want to emit semaphores between rings.
-	 * However, when using mmio flips we will create a task to do the
-	 * synchronisation, so all we want here is to pin the framebuffer
-	 * into the display plane and skip any waits.
-	 */
-	if (!mmio_flip) {
-		ret = i915_gem_object_sync(obj, engine, &request);
-		if (!ret && !request) {
-			request = i915_gem_request_alloc(engine, NULL);
-			ret = PTR_ERR_OR_ZERO(request);
-		}
-
-		if (ret)
-			goto cleanup_pending;
-	}
-
 	ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
 	if (ret)
 		goto cleanup_pending;
@@ -11715,14 +11699,24 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 		schedule_work(&work->mmio_work);
 	} else {
-		i915_gem_request_assign(&work->flip_queued_req, request);
+		request = i915_gem_request_alloc(engine, engine->last_context);
+		if (IS_ERR(request)) {
+			ret = PTR_ERR(request);
+			goto cleanup_unpin;
+		}
+
+		ret = i915_gem_object_sync(obj, request);
+		if (ret)
+			goto cleanup_request;
+
 		ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
 						   page_flip_flags);
 		if (ret)
-			goto cleanup_unpin;
+			goto cleanup_request;
 
 		intel_mark_page_flip_active(intel_crtc, work);
 
+		work->flip_queued_req = i915_gem_request_get(request);
 		i915_add_request_no_flush(request);
 	}
 
@@ -11737,11 +11731,11 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 
 	return 0;
 
+cleanup_request:
+	i915_add_request_no_flush(request);
 cleanup_unpin:
 	intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
 cleanup_pending:
-	if (!IS_ERR_OR_NULL(request))
-		i915_add_request_no_flush(request);
 	atomic_dec(&intel_crtc->unpin_work_count);
 	mutex_unlock(&dev->struct_mutex);
 cleanup:
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 3f1c5cdd38a0..f4d6736bf039 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -655,7 +655,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 		struct drm_i915_gem_object *obj = vma->obj;
 
 		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req->engine, &req);
+			ret = i915_gem_object_sync(obj, req);
 			if (ret)
 				return ret;
 		}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 8f1d4d9ef345..651efe4e468e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -229,11 +229,18 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 	return 0;
 }
 
+static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
+{
+	struct drm_i915_private *dev_priv = overlay->i915;
+	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
+
+	return i915_gem_request_alloc(engine, dev_priv->kernel_context);
+}
+
 /* overlay needs to be disable in OCMD reg */
 static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
 	struct intel_ring *ring;
 	int ret;
@@ -241,7 +248,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	WARN_ON(overlay->active);
 	WARN_ON(IS_I830(dev_priv) && !(dev_priv->quirks & QUIRK_PIPEA_FORCE));
 
-	req = i915_gem_request_alloc(engine, NULL);
+	req = alloc_request(overlay);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -268,7 +275,6 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 				  bool load_polyphase_filter)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
 	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
@@ -285,7 +291,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	if (tmp & (1 << 17))
 		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
 
-	req = i915_gem_request_alloc(engine, NULL);
+	req = alloc_request(overlay);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -338,7 +344,6 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
 static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	struct drm_i915_gem_request *req;
 	struct intel_ring *ring;
 	u32 flip_addr = overlay->flip_addr;
@@ -352,7 +357,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	 * of the hw. Do it in both cases */
 	flip_addr |= OFC_UPDATE;
 
-	req = i915_gem_request_alloc(engine, NULL);
+	req = alloc_request(overlay);
 	if (IS_ERR(req))
 		return PTR_ERR(req);
 
@@ -412,7 +417,6 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 {
 	struct drm_i915_private *dev_priv = overlay->i915;
-	struct intel_engine_cs *engine = &dev_priv->engine[RCS];
 	int ret;
 
 	lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -428,7 +432,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 		struct drm_i915_gem_request *req;
 		struct intel_ring *ring;
 
-		req = i915_gem_request_alloc(engine, NULL);
+		req = alloc_request(overlay);
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 10/73] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (8 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 09/73] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 11/73] drm/i915: Remove intel_ring_get_tail() Chris Wilson
                   ` (63 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Both the ->dispatch_execbuffer and ->emit_bb_start callbacks do exactly
the same thing, add MI_BATCHBUFFER_START to the request's ringbuffer -
we need only one vfunc.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-20-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  6 ++--
 drivers/gpu/drm/i915/i915_gem_render_state.c | 16 +++++-----
 drivers/gpu/drm/i915/intel_lrc.c             | 15 ++++++---
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 48 ++++++++++++++--------------
 drivers/gpu/drm/i915/intel_ringbuffer.h      | 12 +++----
 5 files changed, 50 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index fe06b5886ee4..ca941ff7a94b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1326,9 +1326,9 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
 	if (exec_len == 0)
 		exec_len = params->batch_obj->base.size;
 
-	ret = params->engine->dispatch_execbuffer(params->request,
-						  exec_start, exec_len,
-						  params->dispatch_flags);
+	ret = params->engine->emit_bb_start(params->request,
+					    exec_start, exec_len,
+					    params->dispatch_flags);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index b2be4676a5cf..2ba759f3ab6f 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -234,18 +234,18 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	if (so.rodata == NULL)
 		return 0;
 
-	ret = req->engine->dispatch_execbuffer(req, so.ggtt_offset,
-					     so.rodata->batch_items * 4,
-					     I915_DISPATCH_SECURE);
+	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
+					 so.rodata->batch_items * 4,
+					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
 	if (so.aux_batch_size > 8) {
-		ret = req->engine->dispatch_execbuffer(req,
-						     (so.ggtt_offset +
-						      so.aux_batch_offset),
-						     so.aux_batch_size,
-						     I915_DISPATCH_SECURE);
+		ret = req->engine->emit_bb_start(req,
+						 (so.ggtt_offset +
+						  so.aux_batch_offset),
+						 so.aux_batch_size,
+						 I915_DISPATCH_SECURE);
 		if (ret)
 			goto out;
 	}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index f4d6736bf039..bf42a66d6624 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -859,7 +859,9 @@ int intel_execlists_submission(struct i915_execbuffer_params *params,
 	exec_start = params->batch_obj_vm_offset +
 		     args->batch_start_offset;
 
-	ret = engine->emit_bb_start(params->request, exec_start, params->dispatch_flags);
+	ret = engine->emit_bb_start(params->request,
+				    exec_start, args->batch_len,
+				    params->dispatch_flags);
 	if (ret)
 		return ret;
 
@@ -1541,7 +1543,8 @@ static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
 }
 
 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
-			      u64 offset, unsigned dispatch_flags)
+			      u64 offset, u32 len,
+			      unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	bool ppgtt = !(dispatch_flags & I915_DISPATCH_SECURE);
@@ -1814,13 +1817,15 @@ static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
 		return 0;
 
 	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
-				       I915_DISPATCH_SECURE);
+					 so.rodata->batch_items * 4,
+					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
 	ret = req->engine->emit_bb_start(req,
-				       (so.ggtt_offset + so.aux_batch_offset),
-				       I915_DISPATCH_SECURE);
+					 (so.ggtt_offset + so.aux_batch_offset),
+					 so.aux_batch_size,
+					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto out;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1f876e7ce582..799a7dc02675 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1779,9 +1779,9 @@ gen8_irq_disable(struct intel_engine_cs *engine)
 }
 
 static int
-i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 length,
-			 unsigned dispatch_flags)
+i965_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 length,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -1806,9 +1806,9 @@ i965_dispatch_execbuffer(struct drm_i915_gem_request *req,
 #define I830_TLB_ENTRIES (2)
 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
 static int
-i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 len,
-			 unsigned dispatch_flags)
+i830_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	u32 cs_offset = req->engine->scratch.gtt_offset;
@@ -1868,9 +1868,9 @@ i830_dispatch_execbuffer(struct drm_i915_gem_request *req,
 }
 
 static int
-i915_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			 u64 offset, u32 len,
-			 unsigned dispatch_flags)
+i915_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2562,9 +2562,9 @@ static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 }
 
 static int
-gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			      u64 offset, u32 len,
-			      unsigned dispatch_flags)
+gen8_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	bool ppgtt = USES_PPGTT(req->i915) &&
@@ -2588,9 +2588,9 @@ gen8_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 }
 
 static int
-hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			     u64 offset, u32 len,
-			     unsigned dispatch_flags)
+hsw_emit_bb_start(struct drm_i915_gem_request *req,
+		  u64 offset, u32 len,
+		  unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2613,9 +2613,9 @@ hsw_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
 }
 
 static int
-gen6_ring_dispatch_execbuffer(struct drm_i915_gem_request *req,
-			      u64 offset, u32 len,
-			      unsigned dispatch_flags)
+gen6_emit_bb_start(struct drm_i915_gem_request *req,
+		   u64 offset, u32 len,
+		   unsigned int dispatch_flags)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -2818,15 +2818,15 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 		engine->add_request = gen6_add_request;
 
 	if (INTEL_GEN(dev_priv) >= 8)
-		engine->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;
+		engine->emit_bb_start = gen8_emit_bb_start;
 	else if (INTEL_GEN(dev_priv) >= 6)
-		engine->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
+		engine->emit_bb_start = gen6_emit_bb_start;
 	else if (INTEL_GEN(dev_priv) >= 4)
-		engine->dispatch_execbuffer = i965_dispatch_execbuffer;
+		engine->emit_bb_start = i965_emit_bb_start;
 	else if (IS_I830(dev_priv) || IS_845G(dev_priv))
-		engine->dispatch_execbuffer = i830_dispatch_execbuffer;
+		engine->emit_bb_start = i830_emit_bb_start;
 	else
-		engine->dispatch_execbuffer = i915_dispatch_execbuffer;
+		engine->emit_bb_start = i915_emit_bb_start;
 
 	intel_ring_init_irq(dev_priv, engine);
 	intel_ring_init_semaphores(dev_priv, engine);
@@ -2864,7 +2864,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 	}
 
 	if (IS_HASWELL(dev_priv))
-		engine->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
+		engine->emit_bb_start = hsw_emit_bb_start;
 
 	engine->init_hw = init_render_ring;
 	engine->cleanup = render_ring_cleanup;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 76d0495943c3..45ba29c0b20e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -214,12 +214,6 @@ struct intel_engine_cs {
 	 * monotonic, even if not coherent.
 	 */
 	void		(*irq_seqno_barrier)(struct intel_engine_cs *engine);
-	int		(*dispatch_execbuffer)(struct drm_i915_gem_request *req,
-					       u64 offset, u32 length,
-					       unsigned dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
 	void		(*cleanup)(struct intel_engine_cs *engine);
 
 	/* GEN8 signal/wait table - never trust comments!
@@ -297,7 +291,11 @@ struct intel_engine_cs {
 #define EMIT_FLUSH	BIT(1)
 #define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
 	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
-					 u64 offset, unsigned dispatch_flags);
+					 u64 offset, u32 length,
+					 unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE 0x1
+#define I915_DISPATCH_PINNED 0x2
+#define I915_DISPATCH_RS     0x4
 
 	/**
 	 * List of objects currently involved in rendering from the
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 11/73] drm/i915: Remove intel_ring_get_tail()
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (9 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 10/73] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 12/73] drm/i915: Convert engine->write_tail to operate on a request Chris Wilson
                   ` (62 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Joonas doesn't like the tiny function, especially if I go around making
it more complicated and using it elsewhere. To remove that temptation,
remove the function!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-21-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_request.c | 8 ++++----
 drivers/gpu/drm/i915/intel_ringbuffer.h | 5 -----
 2 files changed, 4 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index f4e6c40ac4e3..606b0b8a5f91 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -419,7 +419,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 * should already have been reserved in the ring buffer. Let the ring
 	 * know that it is time to use that space up.
 	 */
-	request_start = intel_ring_get_tail(ring);
+	request_start = ring->tail;
 	reserved_tail = request->reserved_space;
 	request->reserved_space = 0;
 
@@ -464,19 +464,19 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 * GPU processing the request, we never over-estimate the
 	 * position of the head.
 	 */
-	request->postfix = intel_ring_get_tail(ring);
+	request->postfix = ring->tail;
 
 	if (i915.enable_execlists) {
 		ret = engine->emit_request(request);
 	} else {
 		ret = engine->add_request(request);
 
-		request->tail = intel_ring_get_tail(ring);
+		request->tail = ring->tail;
 	}
 	/* Not allowed to fail! */
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
 	/* Sanity check that the reserved size was large enough. */
-	ret = intel_ring_get_tail(ring) - request_start;
+	ret = ring->tail - request_start;
 	if (ret < 0)
 		ret += ring->size;
 	WARN_ONCE(ret > reserved_tail,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 45ba29c0b20e..0c3c7185d9ad 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -490,11 +490,6 @@ static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine)
 
 int init_workarounds_ring(struct intel_engine_cs *engine);
 
-static inline u32 intel_ring_get_tail(struct intel_ring *ring)
-{
-	return ring->tail;
-}
-
 /*
  * Arbitrary size for largest possible 'add request' sequence. The code paths
  * are complex and variable. Empirical measurement shows that the worst case
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 12/73] drm/i915: Convert engine->write_tail to operate on a request
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (10 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 11/73] drm/i915: Remove intel_ring_get_tail() Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write Chris Wilson
                   ` (61 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

If we rewrite the I915_WRITE_TAIL specialisation for the legacy
ringbuffer as submitting the request onto the ringbuffer, we can unify
the vfunc with both execlists and GuC in the next patch.

v2: Drop the modulus from the I915_WRITE_TAIL as it is currently being
applied in intel_ring_advance() after every command packet, and add a
comment explaining why we need the modulus at all.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-22-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_request.c |  8 ++----
 drivers/gpu/drm/i915/intel_ringbuffer.c | 51 ++++++++++++++++-----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h | 10 +++++--
 3 files changed, 36 insertions(+), 33 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 606b0b8a5f91..a885905df3bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -466,15 +466,13 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 */
 	request->postfix = ring->tail;
 
-	if (i915.enable_execlists) {
+	if (i915.enable_execlists)
 		ret = engine->emit_request(request);
-	} else {
+	else
 		ret = engine->add_request(request);
-
-		request->tail = ring->tail;
-	}
 	/* Not allowed to fail! */
 	WARN(ret, "emit|add_request failed: %d!\n", ret);
+
 	/* Sanity check that the reserved size was large enough. */
 	ret = ring->tail - request_start;
 	if (ret < 0)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 799a7dc02675..3142085b5cc0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -58,14 +58,6 @@ void intel_ring_update_space(struct intel_ring *ring)
 					 ring->tail, ring->size);
 }
 
-static void __intel_engine_submit(struct intel_engine_cs *engine)
-{
-	struct intel_ring *ring = engine->buffer;
-
-	ring->tail &= ring->size - 1;
-	engine->write_tail(engine, ring->tail);
-}
-
 static int
 gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 {
@@ -412,13 +404,6 @@ gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	return gen8_emit_pipe_control(req, flags, scratch_addr);
 }
 
-static void ring_write_tail(struct intel_engine_cs *engine,
-			    u32 value)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	I915_WRITE_TAIL(engine, value);
-}
-
 u64 intel_engine_get_active_head(struct intel_engine_cs *engine)
 {
 	struct drm_i915_private *dev_priv = engine->i915;
@@ -532,7 +517,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
 
 	I915_WRITE_CTL(engine, 0);
 	I915_WRITE_HEAD(engine, 0);
-	engine->write_tail(engine, 0);
+	I915_WRITE_TAIL(engine, 0);
 
 	if (!IS_GEN2(dev_priv)) {
 		(void)I915_READ_CTL(engine);
@@ -1469,7 +1454,10 @@ gen6_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, req->fence.seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_engine_submit(engine);
+	intel_ring_advance(ring);
+
+	req->tail = ring->tail;
+	engine->submit_request(req);
 
 	return 0;
 }
@@ -1499,7 +1487,9 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
-	__intel_engine_submit(engine);
+
+	req->tail = ring->tail;
+	engine->submit_request(req);
 
 	return 0;
 }
@@ -1716,11 +1706,21 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, req->fence.seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	__intel_engine_submit(req->engine);
+	intel_ring_advance(ring);
+
+	req->tail = ring->tail;
+	req->engine->submit_request(req);
 
 	return 0;
 }
 
+static void i9xx_submit_request(struct drm_i915_gem_request *request)
+{
+	struct drm_i915_private *dev_priv = request->i915;
+
+	I915_WRITE_TAIL(request->engine, request->tail);
+}
+
 static void
 gen6_irq_enable(struct intel_engine_cs *engine)
 {
@@ -2479,10 +2479,9 @@ void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno)
 	rcu_read_unlock();
 }
 
-static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
-				     u32 value)
+static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 {
-	struct drm_i915_private *dev_priv = engine->i915;
+	struct drm_i915_private *dev_priv = request->i915;
 
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
@@ -2506,8 +2505,8 @@ static void gen6_bsd_ring_write_tail(struct intel_engine_cs *engine,
 		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
 	/* Now that the ring is fully powered up, update the tail */
-	I915_WRITE_FW(RING_TAIL(engine->mmio_base), value);
-	POSTING_READ_FW(RING_TAIL(engine->mmio_base));
+	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
+	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
 
 	/* Let the ring send IDLE messages to the GT again,
 	 * and so let it sleep to conserve power when idle.
@@ -2811,7 +2810,7 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 				      struct intel_engine_cs *engine)
 {
 	engine->init_hw = init_ring_common;
-	engine->write_tail = ring_write_tail;
+	engine->submit_request = i9xx_submit_request;
 
 	engine->add_request = i9xx_add_request;
 	if (INTEL_GEN(dev_priv) >= 6)
@@ -2895,7 +2894,7 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) >= 6) {
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev_priv))
-			engine->write_tail = gen6_bsd_ring_write_tail;
+			engine->submit_request = gen6_bsd_submit_request;
 		engine->emit_flush = gen6_bsd_ring_flush;
 		if (INTEL_GEN(dev_priv) < 8)
 			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 0c3c7185d9ad..14d2ea36fb88 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -204,8 +204,6 @@ struct intel_engine_cs {
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
-	void		(*write_tail)(struct intel_engine_cs *engine,
-				      u32 value);
 	int		(*add_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
@@ -296,6 +294,7 @@ struct intel_engine_cs {
 #define I915_DISPATCH_SECURE 0x1
 #define I915_DISPATCH_PINNED 0x2
 #define I915_DISPATCH_RS     0x4
+	void		(*submit_request)(struct drm_i915_gem_request *req);
 
 	/**
 	 * List of objects currently involved in rendering from the
@@ -461,6 +460,13 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
 
 static inline void intel_ring_advance(struct intel_ring *ring)
 {
+	/* The modulus is required so that we avoid writing
+	 * request->tail == ring->size, rather than the expected 0,
+	 * into the RING_TAIL register as that can cause a GPU hang.
+	 * As this is only strictly required for the request->tail,
+	 * and only then as we write the value into hardware, we can
+	 * one day remove the modulus after every command packet.
+	 */
 	ring->tail &= ring->size - 1;
 }
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (11 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 12/73] drm/i915: Convert engine->write_tail to operate on a request Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01 10:07   ` Joonas Lahtinen
  2016-08-01  9:10 ` [PATCH 14/73] drm/i915: Unify request submission Chris Wilson
                   ` (60 subsequent siblings)
  73 siblings, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Space reservation is already safe with respect to the ring->size
modulus, but hardware only expects to see values in the range
0...ring->size-1 (inclusive) and so requires the modulus to prevent us
writing the value ring->size instead of 0. As this is only required for
the register itself, we can defer the modulus to the register update and
not perform it after every command packet. We keep the
intel_ring_advance() around in the code to provide demarcation for the
end-of-packet (which then can be compared against intel_ring_begin() as
the number of dwords emitted must match the reserved space).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Dave Gordon <david.s.gordon@intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c |  6 ++++--
 drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++++++++----
 3 files changed, 18 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bf42a66d6624..824f7efe4e64 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
 	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
 
-	reg_state[CTX_RING_TAIL+1] = rq->tail;
+	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
 
 	/* True 32b PPGTT with dynamic page allocation: update PDP
 	 * registers and point the unallocated PDPs to scratch page.
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3142085b5cc0..21d5e8209400 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1718,7 +1718,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
 {
 	struct drm_i915_private *dev_priv = request->i915;
 
-	I915_WRITE_TAIL(request->engine, request->tail);
+	I915_WRITE_TAIL(request->engine,
+			intel_ring_offset(request->ring, request->tail));
 }
 
 static void
@@ -2505,7 +2506,8 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
 	/* Now that the ring is fully powered up, update the tail */
-	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
+	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
+		      intel_ring_offset(request->ring, request->tail));
 	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
 
 	/* Let the ring send IDLE messages to the GT again,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 14d2ea36fb88..9ac96ddb01ee 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -460,14 +460,23 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
 
 static inline void intel_ring_advance(struct intel_ring *ring)
 {
+	/* Dummy function.
+	 *
+	 * This serves as a placeholder in the code so that the reader
+	 * can compare against the preceding intel_ring_begin() and
+	 * check that the number of dwords emitted matches the space
+	 * reserved for the command packet (i.e. the value passed to
+	 * intel_ring_begin()).
+	 */
+}
+
+static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
+{
 	/* The modulus is required so that we avoid writing
 	 * request->tail == ring->size, rather than the expected 0,
 	 * into the RING_TAIL register as that can cause a GPU hang.
-	 * As this is only strictly required for the request->tail,
-	 * and only then as we write the value into hardware, we can
-	 * one day remove the modulus after every command packet.
 	 */
-	ring->tail &= ring->size - 1;
+	return value & (ring->size - 1);
 }
 
 int __intel_ring_space(int head, int tail, int size);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 14/73] drm/i915: Unify request submission
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (12 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01 14:30   ` Joonas Lahtinen
  2016-08-01 17:17   ` [PATCH v2] " Chris Wilson
  2016-08-01  9:10 ` [PATCH 15/73] drm/i915/lrc: Update function names to match request flow Chris Wilson
                   ` (59 subsequent siblings)
  73 siblings, 2 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Move request submission from emit_request into its own common vfunc
from i915_add_request().

v2: Convert I915_DISPATCH_flags to BIT(x) whilst passing
v3: Rename a few functions to match.
v4: Reenable execlists submission after disabling guc.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-23-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v3
---
 drivers/gpu/drm/i915/i915_gem_request.c    |  8 +++-----
 drivers/gpu/drm/i915/i915_guc_submission.c | 12 +++++++++---
 drivers/gpu/drm/i915/intel_guc.h           |  1 -
 drivers/gpu/drm/i915/intel_lrc.c           | 26 +++++++++++++++-----------
 drivers/gpu/drm/i915/intel_lrc.h           |  2 ++
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 23 +++++++++--------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    | 27 +++++++++++++--------------
 7 files changed, 51 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index a885905df3bb..e378eb61979b 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -466,12 +466,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 */
 	request->postfix = ring->tail;
 
-	if (i915.enable_execlists)
-		ret = engine->emit_request(request);
-	else
-		ret = engine->add_request(request);
 	/* Not allowed to fail! */
-	WARN(ret, "emit|add_request failed: %d!\n", ret);
+	ret = engine->emit_request(request);
+	WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
 
 	/* Sanity check that the reserved size was large enough. */
 	ret = ring->tail - request_start;
@@ -483,6 +480,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 		  reserved_tail, ret);
 
 	i915_gem_mark_busy(engine);
+	engine->submit_request(request);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index eccd34832fe6..23bbd0ff86c7 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -585,7 +585,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
  * The only error here arises if the doorbell hardware isn't functioning
  * as expected, which really shouln't happen.
  */
-int i915_guc_submit(struct drm_i915_gem_request *rq)
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
 {
 	unsigned int engine_id = rq->engine->id;
 	struct intel_guc *guc = &rq->i915->guc;
@@ -602,8 +602,6 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
 
 	guc->submissions[engine_id] += 1;
 	guc->last_seqno[engine_id] = rq->fence.seqno;
-
-	return b_ret;
 }
 
 /*
@@ -992,6 +990,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
 	struct i915_guc_client *client;
+	struct intel_engine_cs *engine;
 
 	/* client for execbuf submission */
 	client = guc_client_alloc(dev_priv,
@@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 	host2guc_sample_forcewake(guc, client);
 	guc_init_doorbell_hw(guc);
 
+	/* Take over from manual control of ELSP (execlists) */
+	for_each_engine(engine, dev_priv)
+		engine->submit_request = i915_guc_submit;
+
 	return 0;
 }
 
@@ -1015,6 +1018,9 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
 
 	guc_client_free(dev_priv, guc->execbuf_client);
 	guc->execbuf_client = NULL;
+
+	/* Revert back to manual ELSP submission */
+	intel_execlists_enable_submission(dev_priv);
 }
 
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 3e3e743740c0..623cf26cd784 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -160,7 +160,6 @@ extern int intel_guc_resume(struct drm_device *dev);
 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
 int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
-int i915_guc_submit(struct drm_i915_gem_request *rq);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 824f7efe4e64..8a72ee86e825 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -738,7 +738,7 @@ err_unpin:
 }
 
 /*
- * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+ * intel_logical_ring_advance() - advance the tail and prepare for submission
  * @request: Request to advance the logical ringbuffer of.
  *
  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
@@ -747,7 +747,7 @@ err_unpin:
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 static int
-intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
+intel_logical_ring_advance(struct drm_i915_gem_request *request)
 {
 	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
@@ -773,12 +773,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	 */
 	request->previous_context = engine->last_context;
 	engine->last_context = request->ctx;
-
-	if (i915.enable_guc_submission)
-		i915_guc_submit(request);
-	else
-		execlists_context_queue(request);
-
 	return 0;
 }
 
@@ -1770,7 +1764,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
 	intel_ring_emit(ring, request->fence.seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
-	return intel_logical_ring_advance_and_submit(request);
+	return intel_logical_ring_advance(request);
 }
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
@@ -1801,7 +1795,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
-	return intel_logical_ring_advance_and_submit(request);
+	return intel_logical_ring_advance(request);
 }
 
 static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
@@ -1902,13 +1896,23 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 	engine->i915 = NULL;
 }
 
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	for_each_engine(engine, dev_priv)
+		engine->submit_request = execlists_context_queue;
+}
+
 static void
 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
 	/* Default vfuncs which can be overriden by each engine. */
 	engine->init_hw = gen8_init_common_ring;
-	engine->emit_request = gen8_emit_request;
 	engine->emit_flush = gen8_emit_flush;
+	engine->emit_request = gen8_emit_request;
+	engine->submit_request = execlists_context_queue;
+
 	engine->irq_enable = gen8_logical_ring_enable_irq;
 	engine->irq_disable = gen8_logical_ring_disable_irq;
 	engine->emit_bb_start = gen8_emit_bb_start;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 33e0193e5451..bdd764af6d06 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -95,6 +95,8 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
 				    int enable_execlists);
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
+
 struct i915_execbuffer_params;
 int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 21d5e8209400..abea42408b5a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1428,15 +1428,14 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 }
 
 /**
- * gen6_add_request - Update the semaphore mailbox registers
+ * gen6_emit_request - Update the semaphore mailbox registers
  *
  * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static int
-gen6_add_request(struct drm_i915_gem_request *req)
+static int gen6_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
@@ -1457,13 +1456,11 @@ gen6_add_request(struct drm_i915_gem_request *req)
 	intel_ring_advance(ring);
 
 	req->tail = ring->tail;
-	engine->submit_request(req);
 
 	return 0;
 }
 
-static int
-gen8_render_add_request(struct drm_i915_gem_request *req)
+static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
@@ -1487,9 +1484,9 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	req->tail = ring->tail;
-	engine->submit_request(req);
 
 	return 0;
 }
@@ -1692,8 +1689,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	return 0;
 }
 
-static int
-i9xx_add_request(struct drm_i915_gem_request *req)
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -1709,7 +1705,6 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 	intel_ring_advance(ring);
 
 	req->tail = ring->tail;
-	req->engine->submit_request(req);
 
 	return 0;
 }
@@ -2812,11 +2807,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 				      struct intel_engine_cs *engine)
 {
 	engine->init_hw = init_ring_common;
-	engine->submit_request = i9xx_submit_request;
 
-	engine->add_request = i9xx_add_request;
+	engine->emit_request = i9xx_emit_request;
 	if (INTEL_GEN(dev_priv) >= 6)
-		engine->add_request = gen6_add_request;
+		engine->emit_request = gen6_emit_request;
+	engine->submit_request = i9xx_submit_request;
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		engine->emit_bb_start = gen8_emit_bb_start;
@@ -2845,7 +2840,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->add_request = gen8_render_add_request;
+		engine->emit_request = gen8_render_emit_request;
 		engine->emit_flush = gen8_render_ring_flush;
 		if (i915.semaphores)
 			engine->semaphore.signal = gen8_rcs_signal;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 9ac96ddb01ee..5e06d6fbd506 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -204,7 +204,19 @@ struct intel_engine_cs {
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
-	int		(*add_request)(struct drm_i915_gem_request *req);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
+				      u32 mode);
+#define EMIT_INVALIDATE	BIT(0)
+#define EMIT_FLUSH	BIT(1)
+#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
+					 u64 offset, u32 length,
+					 unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+#define I915_DISPATCH_RS     BIT(2)
+	int		(*emit_request)(struct drm_i915_gem_request *req);
+	void		(*submit_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
@@ -282,19 +294,6 @@ struct intel_engine_cs {
 	unsigned int idle_lite_restore_wa;
 	bool disable_lite_restore_wa;
 	u32 ctx_desc_template;
-	int		(*emit_request)(struct drm_i915_gem_request *request);
-	int		(*emit_flush)(struct drm_i915_gem_request *request,
-				      u32 mode);
-#define EMIT_INVALIDATE	BIT(0)
-#define EMIT_FLUSH	BIT(1)
-#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
-	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
-					 u64 offset, u32 length,
-					 unsigned int dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
-	void		(*submit_request)(struct drm_i915_gem_request *req);
 
 	/**
 	 * List of objects currently involved in rendering from the
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 15/73] drm/i915/lrc: Update function names to match request flow
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (13 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 14/73] drm/i915: Unify request submission Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 16/73] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal() Chris Wilson
                   ` (58 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

With adding engine->submit_request, we now have a bunch of functions
with similar names used at different stages of the execlist submission.
Try a different coat of paint, to hopefully reduce confusion between the
requests, intel_engine_cs and the actual execlists submision process.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-24-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_lrc.c | 18 +++++++++---------
 1 file changed, 9 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8a72ee86e825..961767929fed 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -384,8 +384,8 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
 		execlists_update_context_pdps(ppgtt, reg_state);
 }
 
-static void execlists_submit_requests(struct drm_i915_gem_request *rq0,
-				      struct drm_i915_gem_request *rq1)
+static void execlists_elsp_submit_contexts(struct drm_i915_gem_request *rq0,
+					   struct drm_i915_gem_request *rq1)
 {
 	struct drm_i915_private *dev_priv = rq0->i915;
 	unsigned int fw_domains = rq0->engine->fw_domains;
@@ -418,7 +418,7 @@ static inline void execlists_context_status_change(
 	atomic_notifier_call_chain(&rq->ctx->status_notifier, status, rq);
 }
 
-static void execlists_context_unqueue(struct intel_engine_cs *engine)
+static void execlists_unqueue(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *req0 = NULL, *req1 = NULL;
 	struct drm_i915_gem_request *cursor, *tmp;
@@ -486,7 +486,7 @@ static void execlists_context_unqueue(struct intel_engine_cs *engine)
 		req0->tail &= req0->ring->size - 1;
 	}
 
-	execlists_submit_requests(req0, req1);
+	execlists_elsp_submit_contexts(req0, req1);
 }
 
 static unsigned int
@@ -597,7 +597,7 @@ static void intel_lrc_irq_handler(unsigned long data)
 	if (submit_contexts) {
 		if (!engine->disable_lite_restore_wa ||
 		    (csb[i][0] & GEN8_CTX_STATUS_ACTIVE_IDLE))
-			execlists_context_unqueue(engine);
+			execlists_unqueue(engine);
 	}
 
 	spin_unlock(&engine->execlist_lock);
@@ -606,7 +606,7 @@ static void intel_lrc_irq_handler(unsigned long data)
 		DRM_ERROR("More than two context complete events?\n");
 }
 
-static void execlists_context_queue(struct drm_i915_gem_request *request)
+static void execlists_submit_request(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct drm_i915_gem_request *cursor;
@@ -637,7 +637,7 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
 	list_add_tail(&request->execlist_link, &engine->execlist_queue);
 	request->ctx_hw_id = request->ctx->hw_id;
 	if (num_elements == 0)
-		execlists_context_unqueue(engine);
+		execlists_unqueue(engine);
 
 	spin_unlock_bh(&engine->execlist_lock);
 }
@@ -1901,7 +1901,7 @@ void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
 	struct intel_engine_cs *engine;
 
 	for_each_engine(engine, dev_priv)
-		engine->submit_request = execlists_context_queue;
+		engine->submit_request = execlists_submit_request;
 }
 
 static void
@@ -1911,7 +1911,7 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 	engine->init_hw = gen8_init_common_ring;
 	engine->emit_flush = gen8_emit_flush;
 	engine->emit_request = gen8_emit_request;
-	engine->submit_request = execlists_context_queue;
+	engine->submit_request = execlists_submit_request;
 
 	engine->irq_enable = gen8_logical_ring_enable_irq;
 	engine->irq_disable = gen8_logical_ring_disable_irq;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 16/73] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal()
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (14 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 15/73] drm/i915/lrc: Update function names to match request flow Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 17/73] drm/i915: Reuse legacy breadcrumbs + tail emission Chris Wilson
                   ` (57 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Rather than pass in the num_dwords that the caller wishes to use after
the signal command packet, split the breadcrumb emission into two phases
and have both the signal and breadcrumb individiually acquire space on
the ring. This makes the interface simpler for the reader, and will
simplify for patches.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-25-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 51 ++++++++++++++-------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  4 +--
 2 files changed, 23 insertions(+), 32 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index abea42408b5a..9d93468cc2ce 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1309,10 +1309,8 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
 	intel_fini_pipe_control(engine);
 }
 
-static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
-			   unsigned int num_dwords)
+static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req)
 {
-#define MBOX_UPDATE_DWORDS 8
 	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
@@ -1320,10 +1318,7 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
-	ret = intel_ring_begin(signaller_req, num_dwords);
+	ret = intel_ring_begin(signaller_req, (num_rings-1) * 8);
 	if (ret)
 		return ret;
 
@@ -1347,14 +1342,13 @@ static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req,
 				MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
+	intel_ring_advance(signaller);
 
 	return 0;
 }
 
-static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
-			   unsigned int num_dwords)
+static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req)
 {
-#define MBOX_UPDATE_DWORDS 6
 	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
 	struct intel_engine_cs *waiter;
@@ -1362,10 +1356,7 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	num_dwords += (num_rings-1) * MBOX_UPDATE_DWORDS;
-#undef MBOX_UPDATE_DWORDS
-
-	ret = intel_ring_begin(signaller_req, num_dwords);
+	ret = intel_ring_begin(signaller_req, (num_rings-1) * 6);
 	if (ret)
 		return ret;
 
@@ -1387,12 +1378,12 @@ static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req,
 				MI_SEMAPHORE_TARGET(waiter->hw_id));
 		intel_ring_emit(signaller, 0);
 	}
+	intel_ring_advance(signaller);
 
 	return 0;
 }
 
-static int gen6_signal(struct drm_i915_gem_request *signaller_req,
-		       unsigned int num_dwords)
+static int gen6_signal(struct drm_i915_gem_request *signaller_req)
 {
 	struct intel_ring *signaller = signaller_req->ring;
 	struct drm_i915_private *dev_priv = signaller_req->i915;
@@ -1400,12 +1391,8 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 	enum intel_engine_id id;
 	int ret, num_rings;
 
-#define MBOX_UPDATE_DWORDS 3
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	num_dwords += round_up((num_rings-1) * MBOX_UPDATE_DWORDS, 2);
-#undef MBOX_UPDATE_DWORDS
-
-	ret = intel_ring_begin(signaller_req, num_dwords);
+	ret = intel_ring_begin(signaller_req, round_up((num_rings-1) * 3, 2));
 	if (ret)
 		return ret;
 
@@ -1423,6 +1410,7 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 	/* If num_dwords was rounded, make sure the tail pointer is correct */
 	if (num_rings % 2 == 0)
 		intel_ring_emit(signaller, MI_NOOP);
+	intel_ring_advance(signaller);
 
 	return 0;
 }
@@ -1441,11 +1429,13 @@ static int gen6_emit_request(struct drm_i915_gem_request *req)
 	struct intel_ring *ring = req->ring;
 	int ret;
 
-	if (engine->semaphore.signal)
-		ret = engine->semaphore.signal(req, 4);
-	else
-		ret = intel_ring_begin(req, 4);
+	if (engine->semaphore.signal) {
+		ret = engine->semaphore.signal(req);
+		if (ret)
+			return ret;
+	}
 
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
@@ -1466,10 +1456,13 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 	struct intel_ring *ring = req->ring;
 	int ret;
 
-	if (engine->semaphore.signal)
-		ret = engine->semaphore.signal(req, 8);
-	else
-		ret = intel_ring_begin(req, 8);
+	if (engine->semaphore.signal) {
+		ret = engine->semaphore.signal(req);
+		if (ret)
+			return ret;
+	}
+
+	ret = intel_ring_begin(req, 8);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 5e06d6fbd506..6f2edae6ef0d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -280,9 +280,7 @@ struct intel_engine_cs {
 		int	(*sync_to)(struct drm_i915_gem_request *to_req,
 				   struct intel_engine_cs *from,
 				   u32 seqno);
-		int	(*signal)(struct drm_i915_gem_request *signaller_req,
-				  /* num_dwords needed by caller */
-				  unsigned int num_dwords);
+		int	(*signal)(struct drm_i915_gem_request *signaller_req);
 	} semaphore;
 
 	/* Execlists */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 17/73] drm/i915: Reuse legacy breadcrumbs + tail emission
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (15 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 16/73] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal() Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 18/73] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores Chris Wilson
                   ` (56 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

As GEN6+ is now a simple variant on the basic breadcrumbs + tail write,
reuse the common code.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-26-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 74 +++++++++++++--------------------
 1 file changed, 30 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9d93468cc2ce..a020d816332e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1415,26 +1415,19 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req)
 	return 0;
 }
 
-/**
- * gen6_emit_request - Update the semaphore mailbox registers
- *
- * @request - request to write to the ring
- *
- * Update the mailbox registers in the *other* rings with the current seqno.
- * This acts like a signal in the canonical semaphore.
- */
-static int gen6_emit_request(struct drm_i915_gem_request *req)
+static void i9xx_submit_request(struct drm_i915_gem_request *request)
+{
+	struct drm_i915_private *dev_priv = request->i915;
+
+	I915_WRITE_TAIL(request->engine,
+			intel_ring_offset(request->ring, request->tail));
+}
+
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
 	int ret;
 
-	if (engine->semaphore.signal) {
-		ret = engine->semaphore.signal(req);
-		if (ret)
-			return ret;
-	}
-
 	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
@@ -1450,6 +1443,27 @@ static int gen6_emit_request(struct drm_i915_gem_request *req)
 	return 0;
 }
 
+/**
+ * gen6_emit_request - Update the semaphore mailbox registers
+ *
+ * @request - request to write to the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
+static int gen6_emit_request(struct drm_i915_gem_request *req)
+{
+	if (req->engine->semaphore.signal) {
+		int ret;
+
+		ret = req->engine->semaphore.signal(req);
+		if (ret)
+			return ret;
+	}
+
+	return i9xx_emit_request(req);
+}
+
 static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
@@ -1682,34 +1696,6 @@ bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	return 0;
 }
 
-static int i9xx_emit_request(struct drm_i915_gem_request *req)
-{
-	struct intel_ring *ring = req->ring;
-	int ret;
-
-	ret = intel_ring_begin(req, 4);
-	if (ret)
-		return ret;
-
-	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
-	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, req->fence.seqno);
-	intel_ring_emit(ring, MI_USER_INTERRUPT);
-	intel_ring_advance(ring);
-
-	req->tail = ring->tail;
-
-	return 0;
-}
-
-static void i9xx_submit_request(struct drm_i915_gem_request *request)
-{
-	struct drm_i915_private *dev_priv = request->i915;
-
-	I915_WRITE_TAIL(request->engine,
-			intel_ring_offset(request->ring, request->tail));
-}
-
 static void
 gen6_irq_enable(struct intel_engine_cs *engine)
 {
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 18/73] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (16 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 17/73] drm/i915: Reuse legacy breadcrumbs + tail emission Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 19/73] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
                   ` (55 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

As gen6_emit_request() only differs from i9xx_emit_request() when
semaphores are enabled, only use the specialised vfunc in that scenario.

v2: Reorder semaphore init so as to keep engine->emit_request default
vfunc selection compact.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-27-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 24 +++++++++++-------------
 1 file changed, 11 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index a020d816332e..cce6258a0c5d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1444,22 +1444,20 @@ static int i9xx_emit_request(struct drm_i915_gem_request *req)
 }
 
 /**
- * gen6_emit_request - Update the semaphore mailbox registers
+ * gen6_sema_emit_request - Update the semaphore mailbox registers
  *
  * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static int gen6_emit_request(struct drm_i915_gem_request *req)
+static int gen6_sema_emit_request(struct drm_i915_gem_request *req)
 {
-	if (req->engine->semaphore.signal) {
-		int ret;
+	int ret;
 
-		ret = req->engine->semaphore.signal(req);
-		if (ret)
-			return ret;
-	}
+	ret = req->engine->semaphore.signal(req);
+	if (ret)
+		return ret;
 
 	return i9xx_emit_request(req);
 }
@@ -2785,12 +2783,15 @@ static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 				      struct intel_engine_cs *engine)
 {
+	intel_ring_init_irq(dev_priv, engine);
+	intel_ring_init_semaphores(dev_priv, engine);
+
 	engine->init_hw = init_ring_common;
 
 	engine->emit_request = i9xx_emit_request;
-	if (INTEL_GEN(dev_priv) >= 6)
-		engine->emit_request = gen6_emit_request;
 	engine->submit_request = i9xx_submit_request;
+	if (i915.semaphores)
+		engine->emit_request = gen6_sema_emit_request;
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		engine->emit_bb_start = gen8_emit_bb_start;
@@ -2802,9 +2803,6 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 		engine->emit_bb_start = i830_emit_bb_start;
 	else
 		engine->emit_bb_start = i915_emit_bb_start;
-
-	intel_ring_init_irq(dev_priv, engine);
-	intel_ring_init_semaphores(dev_priv, engine);
 }
 
 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 19/73] drm/i915: Remove duplicate golden render state init from execlists
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (17 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 18/73] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 20/73] drm/i915: Refactor golden render state emission to unconfuse gcc Chris Wilson
                   ` (54 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Now that we use the same vfuncs for emitting the batch buffer in both
execlists and legacy, the golden render state initialisation is
identical between both.

v2: gcc wants so.ggtt_offset initialised (even though it is not used)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-28-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_gem_render_state.c | 23 +++++++++++++------
 drivers/gpu/drm/i915/i915_gem_render_state.h | 18 ---------------
 drivers/gpu/drm/i915/intel_lrc.c             | 34 +---------------------------
 drivers/gpu/drm/i915/intel_renderstate.h     | 16 +++++++++----
 4 files changed, 28 insertions(+), 63 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 2ba759f3ab6f..a9b56d18a93b 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,6 +28,15 @@
 #include "i915_drv.h"
 #include "intel_renderstate.h"
 
+struct render_state {
+	const struct intel_renderstate_rodata *rodata;
+	struct drm_i915_gem_object *obj;
+	u64 ggtt_offset;
+	int gen;
+	u32 aux_batch_size;
+	u32 aux_batch_offset;
+};
+
 static const struct intel_renderstate_rodata *
 render_state_get_rodata(const int gen)
 {
@@ -51,6 +60,7 @@ static int render_state_init(struct render_state *so,
 	int ret;
 
 	so->gen = INTEL_GEN(dev_priv);
+	so->ggtt_offset = 0; /* keep gcc quiet */
 	so->rodata = render_state_get_rodata(so->gen);
 	if (so->rodata == NULL)
 		return 0;
@@ -192,14 +202,14 @@ err_out:
 
 #undef OUT_BATCH
 
-void i915_gem_render_state_fini(struct render_state *so)
+static void render_state_fini(struct render_state *so)
 {
 	i915_gem_object_ggtt_unpin(so->obj);
 	i915_gem_object_put(so->obj);
 }
 
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
-				  struct render_state *so)
+static int render_state_prepare(struct intel_engine_cs *engine,
+				struct render_state *so)
 {
 	int ret;
 
@@ -215,7 +225,7 @@ int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
 
 	ret = render_state_setup(so);
 	if (ret) {
-		i915_gem_render_state_fini(so);
+		render_state_fini(so);
 		return ret;
 	}
 
@@ -227,7 +237,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	struct render_state so;
 	int ret;
 
-	ret = i915_gem_render_state_prepare(req->engine, &so);
+	ret = render_state_prepare(req->engine, &so);
 	if (ret)
 		return ret;
 
@@ -251,8 +261,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	}
 
 	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-
 out:
-	i915_gem_render_state_fini(&so);
+	render_state_fini(&so);
 	return ret;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 6aaa3a10a630..c44fca8599bb 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -26,24 +26,6 @@
 
 #include <linux/types.h>
 
-struct intel_renderstate_rodata {
-	const u32 *reloc;
-	const u32 *batch;
-	const u32 batch_items;
-};
-
-struct render_state {
-	const struct intel_renderstate_rodata *rodata;
-	struct drm_i915_gem_object *obj;
-	u64 ggtt_offset;
-	int gen;
-	u32 aux_batch_size;
-	u32 aux_batch_offset;
-};
-
 int i915_gem_render_state_init(struct drm_i915_gem_request *req);
-void i915_gem_render_state_fini(struct render_state *so);
-int i915_gem_render_state_prepare(struct intel_engine_cs *engine,
-				  struct render_state *so);
 
 #endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 961767929fed..8160416f0018 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1798,38 +1798,6 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 	return intel_logical_ring_advance(request);
 }
 
-static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
-{
-	struct render_state so;
-	int ret;
-
-	ret = i915_gem_render_state_prepare(req->engine, &so);
-	if (ret)
-		return ret;
-
-	if (so.rodata == NULL)
-		return 0;
-
-	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
-					 so.rodata->batch_items * 4,
-					 I915_DISPATCH_SECURE);
-	if (ret)
-		goto out;
-
-	ret = req->engine->emit_bb_start(req,
-					 (so.ggtt_offset + so.aux_batch_offset),
-					 so.aux_batch_size,
-					 I915_DISPATCH_SECURE);
-	if (ret)
-		goto out;
-
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-
-out:
-	i915_gem_render_state_fini(&so);
-	return ret;
-}
-
 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 {
 	int ret;
@@ -1846,7 +1814,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 	if (ret)
 		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
 
-	return intel_lr_context_render_state_init(req);
+	return i915_gem_render_state_init(req);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_renderstate.h b/drivers/gpu/drm/i915/intel_renderstate.h
index 5bd69852752c..08f6fea05a2c 100644
--- a/drivers/gpu/drm/i915/intel_renderstate.h
+++ b/drivers/gpu/drm/i915/intel_renderstate.h
@@ -24,12 +24,13 @@
 #ifndef _INTEL_RENDERSTATE_H
 #define _INTEL_RENDERSTATE_H
 
-#include "i915_drv.h"
+#include <linux/types.h>
 
-extern const struct intel_renderstate_rodata gen6_null_state;
-extern const struct intel_renderstate_rodata gen7_null_state;
-extern const struct intel_renderstate_rodata gen8_null_state;
-extern const struct intel_renderstate_rodata gen9_null_state;
+struct intel_renderstate_rodata {
+	const u32 *reloc;
+	const u32 *batch;
+	const u32 batch_items;
+};
 
 #define RO_RENDERSTATE(_g)						\
 	const struct intel_renderstate_rodata gen ## _g ## _null_state = { \
@@ -38,4 +39,9 @@ extern const struct intel_renderstate_rodata gen9_null_state;
 		.batch_items = sizeof(gen ## _g ## _null_state_batch)/4, \
 	}
 
+extern const struct intel_renderstate_rodata gen6_null_state;
+extern const struct intel_renderstate_rodata gen7_null_state;
+extern const struct intel_renderstate_rodata gen8_null_state;
+extern const struct intel_renderstate_rodata gen9_null_state;
+
 #endif /* INTEL_RENDERSTATE_H */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 20/73] drm/i915: Refactor golden render state emission to unconfuse gcc
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (18 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 19/73] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 21/73] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
                   ` (53 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

GCC was inlining the init and setup functions, but was getting itself
confused into thinking that variables could be used uninitialised. If we
do the inline for gcc, it is happy! As a bonus we shrink the code.

v2: A couple of minor tweaks from Joonas

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-29-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_render_state.c | 95 ++++++++--------------------
 1 file changed, 27 insertions(+), 68 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index a9b56d18a93b..f85c5505bce2 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -32,15 +32,14 @@ struct render_state {
 	const struct intel_renderstate_rodata *rodata;
 	struct drm_i915_gem_object *obj;
 	u64 ggtt_offset;
-	int gen;
 	u32 aux_batch_size;
 	u32 aux_batch_offset;
 };
 
 static const struct intel_renderstate_rodata *
-render_state_get_rodata(const int gen)
+render_state_get_rodata(const struct drm_i915_gem_request *req)
 {
-	switch (gen) {
+	switch (INTEL_GEN(req->i915)) {
 	case 6:
 		return &gen6_null_state;
 	case 7:
@@ -54,36 +53,6 @@ render_state_get_rodata(const int gen)
 	return NULL;
 }
 
-static int render_state_init(struct render_state *so,
-			     struct drm_i915_private *dev_priv)
-{
-	int ret;
-
-	so->gen = INTEL_GEN(dev_priv);
-	so->ggtt_offset = 0; /* keep gcc quiet */
-	so->rodata = render_state_get_rodata(so->gen);
-	if (so->rodata == NULL)
-		return 0;
-
-	if (so->rodata->batch_items * 4 > 4096)
-		return -EINVAL;
-
-	so->obj = i915_gem_object_create(&dev_priv->drm, 4096);
-	if (IS_ERR(so->obj))
-		return PTR_ERR(so->obj);
-
-	ret = i915_gem_obj_ggtt_pin(so->obj, 4096, 0);
-	if (ret)
-		goto free_gem;
-
-	so->ggtt_offset = i915_gem_obj_ggtt_offset(so->obj);
-	return 0;
-
-free_gem:
-	i915_gem_object_put(so->obj);
-	return ret;
-}
-
 /*
  * Macro to add commands to auxiliary batch.
  * This macro only checks for page overflow before inserting the commands,
@@ -106,6 +75,7 @@ static int render_state_setup(struct render_state *so)
 {
 	struct drm_device *dev = so->obj->base.dev;
 	const struct intel_renderstate_rodata *rodata = so->rodata;
+	const bool has_64bit_reloc = INTEL_GEN(dev) >= 8;
 	unsigned int i = 0, reloc_index = 0;
 	struct page *page;
 	u32 *d;
@@ -124,7 +94,7 @@ static int render_state_setup(struct render_state *so)
 		if (i * 4  == rodata->reloc[reloc_index]) {
 			u64 r = s + so->ggtt_offset;
 			s = lower_32_bits(r);
-			if (so->gen >= 8) {
+			if (has_64bit_reloc) {
 				if (i + 1 >= rodata->batch_items ||
 				    rodata->batch[i + 1] != 0) {
 					ret = -EINVAL;
@@ -202,53 +172,40 @@ err_out:
 
 #undef OUT_BATCH
 
-static void render_state_fini(struct render_state *so)
-{
-	i915_gem_object_ggtt_unpin(so->obj);
-	i915_gem_object_put(so->obj);
-}
-
-static int render_state_prepare(struct intel_engine_cs *engine,
-				struct render_state *so)
+int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 {
+	struct render_state so;
 	int ret;
 
-	if (WARN_ON(engine->id != RCS))
+	if (WARN_ON(req->engine->id != RCS))
 		return -ENOENT;
 
-	ret = render_state_init(so, engine->i915);
-	if (ret)
-		return ret;
-
-	if (so->rodata == NULL)
+	so.rodata = render_state_get_rodata(req);
+	if (!so.rodata)
 		return 0;
 
-	ret = render_state_setup(so);
-	if (ret) {
-		render_state_fini(so);
-		return ret;
-	}
-
-	return 0;
-}
+	if (so.rodata->batch_items * 4 > 4096)
+		return -EINVAL;
 
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
-{
-	struct render_state so;
-	int ret;
+	so.obj = i915_gem_object_create(&req->i915->drm, 4096);
+	if (IS_ERR(so.obj))
+		return PTR_ERR(so.obj);
 
-	ret = render_state_prepare(req->engine, &so);
+	ret = i915_gem_obj_ggtt_pin(so.obj, 4096, 0);
 	if (ret)
-		return ret;
+		goto err_obj;
 
-	if (so.rodata == NULL)
-		return 0;
+	so.ggtt_offset = i915_gem_obj_ggtt_offset(so.obj);
+
+	ret = render_state_setup(&so);
+	if (ret)
+		goto err_unpin;
 
 	ret = req->engine->emit_bb_start(req, so.ggtt_offset,
 					 so.rodata->batch_items * 4,
 					 I915_DISPATCH_SECURE);
 	if (ret)
-		goto out;
+		goto err_unpin;
 
 	if (so.aux_batch_size > 8) {
 		ret = req->engine->emit_bb_start(req,
@@ -257,11 +214,13 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 						 so.aux_batch_size,
 						 I915_DISPATCH_SECURE);
 		if (ret)
-			goto out;
+			goto err_unpin;
 	}
 
 	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
-out:
-	render_state_fini(&so);
+err_unpin:
+	i915_gem_object_ggtt_unpin(so.obj);
+err_obj:
+	i915_gem_object_put(so.obj);
 	return ret;
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 21/73] drm/i915: Unify legacy/execlists submit_execbuf callbacks
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (19 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 20/73] drm/i915: Refactor golden render state emission to unconfuse gcc Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 22/73] drm/i915: Simplify calling engine->sync_to Chris Wilson
                   ` (52 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Now that emitting requests is identical between legacy and execlists, we
can use the same function to build up the ring for submitting to either
engine. (With the exception of i915_switch_contexts(), but in time that
will also be handled gracefully.)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-30-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h            |  20 -----
 drivers/gpu/drm/i915/i915_gem.c            |   2 -
 drivers/gpu/drm/i915/i915_gem_context.c    |   7 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  24 ++++--
 drivers/gpu/drm/i915/intel_lrc.c           | 123 -----------------------------
 drivers/gpu/drm/i915/intel_lrc.h           |   5 --
 drivers/gpu/drm/i915/intel_ringbuffer.c    |   6 +-
 7 files changed, 23 insertions(+), 164 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8f4edc9dd7a3..49ce21a82c29 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1705,18 +1705,6 @@ struct i915_virtual_gpu {
 	bool active;
 };
 
-struct i915_execbuffer_params {
-	struct drm_device               *dev;
-	struct drm_file                 *file;
-	uint32_t                        dispatch_flags;
-	uint32_t                        args_batch_start_offset;
-	uint64_t                        batch_obj_vm_offset;
-	struct intel_engine_cs *engine;
-	struct drm_i915_gem_object      *batch_obj;
-	struct i915_gem_context            *ctx;
-	struct drm_i915_gem_request     *request;
-};
-
 /* used in computing the new watermarks state */
 struct intel_wm_config {
 	unsigned int num_pipes_active;
@@ -2016,9 +2004,6 @@ struct drm_i915_private {
 
 	/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
 	struct {
-		int (*execbuf_submit)(struct i915_execbuffer_params *params,
-				      struct drm_i915_gem_execbuffer2 *args,
-				      struct list_head *vmas);
 		void (*cleanup_engine)(struct intel_engine_cs *engine);
 		void (*stop_engine)(struct intel_engine_cs *engine);
 
@@ -2993,11 +2978,6 @@ int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
-void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
-					struct drm_i915_gem_request *req);
-int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
-				   struct drm_i915_gem_execbuffer2 *args,
-				   struct list_head *vmas);
 int i915_gem_execbuffer(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b6c4ff63725f..d79b949fb4c4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4534,11 +4534,9 @@ int i915_gem_init(struct drm_device *dev)
 	mutex_lock(&dev->struct_mutex);
 
 	if (!i915.enable_execlists) {
-		dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
 		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
 		dev_priv->gt.stop_engine = intel_engine_stop;
 	} else {
-		dev_priv->gt.execbuf_submit = intel_execlists_submission;
 		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
 		dev_priv->gt.stop_engine = intel_logical_ring_stop;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index edde8411c478..d7a7cc8b6fa4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -894,8 +894,9 @@ int i915_switch_context(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 
-	WARN_ON(i915.enable_execlists);
 	lockdep_assert_held(&req->i915->drm.struct_mutex);
+	if (i915.enable_execlists)
+		return 0;
 
 	if (!req->ctx->engine[engine->id].state) {
 		struct i915_gem_context *to = req->ctx;
@@ -943,9 +944,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 		if (IS_ERR(req))
 			return PTR_ERR(req);
 
-		ret = 0;
-		if (!i915.enable_execlists)
-			ret = i915_switch_context(req);
+		ret = i915_switch_context(req);
 		i915_add_request_no_flush(req);
 		if (ret)
 			return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ca941ff7a94b..a4b98afc00ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -42,6 +42,18 @@
 
 #define BATCH_OFFSET_BIAS (256*1024)
 
+struct i915_execbuffer_params {
+	struct drm_device               *dev;
+	struct drm_file                 *file;
+	u32				 dispatch_flags;
+	u32				 args_batch_start_offset;
+	u32				 batch_obj_vm_offset;
+	struct intel_engine_cs          *engine;
+	struct drm_i915_gem_object      *batch_obj;
+	struct i915_gem_context         *ctx;
+	struct drm_i915_gem_request     *request;
+};
+
 struct eb_vmas {
 	struct list_head vmas;
 	int and;
@@ -1117,7 +1129,7 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 	return ctx;
 }
 
-void
+static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
 {
@@ -1244,10 +1256,10 @@ err:
 		return ERR_PTR(ret);
 }
 
-int
-i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
-			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas)
+static int
+execbuf_submit(struct i915_execbuffer_params *params,
+	       struct drm_i915_gem_execbuffer2 *args,
+	       struct list_head *vmas)
 {
 	struct drm_i915_private *dev_priv = params->request->i915;
 	u64 exec_start, exec_len;
@@ -1637,7 +1649,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
 
-	ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
+	ret = execbuf_submit(params, args, &eb->vmas);
 err_request:
 	i915_gem_execbuffer_retire_commands(params);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8160416f0018..236801d9c79c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -642,39 +642,6 @@ static void execlists_submit_request(struct drm_i915_gem_request *request)
 	spin_unlock_bh(&engine->execlist_lock);
 }
 
-static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
-				 struct list_head *vmas)
-{
-	const unsigned other_rings = ~intel_engine_flag(req->engine);
-	struct i915_vma *vma;
-	uint32_t flush_domains = 0;
-	bool flush_chipset = false;
-	int ret;
-
-	list_for_each_entry(vma, vmas, exec_list) {
-		struct drm_i915_gem_object *obj = vma->obj;
-
-		if (obj->active & other_rings) {
-			ret = i915_gem_object_sync(obj, req);
-			if (ret)
-				return ret;
-		}
-
-		if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
-			flush_chipset |= i915_gem_clflush_object(obj, false);
-
-		flush_domains |= obj->base.write_domain;
-	}
-
-	if (flush_domains & I915_GEM_DOMAIN_GTT)
-		wmb();
-
-	/* Unconditionally invalidate gpu caches and ensure that we do flush
-	 * any residual writes from the previous batch.
-	 */
-	return req->engine->emit_flush(req, EMIT_INVALIDATE);
-}
-
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
@@ -776,96 +743,6 @@ intel_logical_ring_advance(struct drm_i915_gem_request *request)
 	return 0;
 }
 
-/**
- * intel_execlists_submission() - submit a batchbuffer for execution, Execlists style
- * @params: execbuffer call parameters.
- * @args: execbuffer call arguments.
- * @vmas: list of vmas.
- *
- * This is the evil twin version of i915_gem_ringbuffer_submission. It abstracts
- * away the submission details of the execbuffer ioctl call.
- *
- * Return: non-zero if the submission fails.
- */
-int intel_execlists_submission(struct i915_execbuffer_params *params,
-			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas)
-{
-	struct drm_device       *dev = params->dev;
-	struct intel_engine_cs *engine = params->engine;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct intel_ring *ring = params->request->ring;
-	u64 exec_start;
-	int instp_mode;
-	u32 instp_mask;
-	int ret;
-
-	instp_mode = args->flags & I915_EXEC_CONSTANTS_MASK;
-	instp_mask = I915_EXEC_CONSTANTS_MASK;
-	switch (instp_mode) {
-	case I915_EXEC_CONSTANTS_REL_GENERAL:
-	case I915_EXEC_CONSTANTS_ABSOLUTE:
-	case I915_EXEC_CONSTANTS_REL_SURFACE:
-		if (instp_mode != 0 && engine->id != RCS) {
-			DRM_DEBUG("non-0 rel constants mode on non-RCS\n");
-			return -EINVAL;
-		}
-
-		if (instp_mode != dev_priv->relative_constants_mode) {
-			if (instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) {
-				DRM_DEBUG("rel surface constants mode invalid on gen5+\n");
-				return -EINVAL;
-			}
-
-			/* The HW changed the meaning on this bit on gen6 */
-			instp_mask &= ~I915_EXEC_CONSTANTS_REL_SURFACE;
-		}
-		break;
-	default:
-		DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode);
-		return -EINVAL;
-	}
-
-	if (args->flags & I915_EXEC_GEN7_SOL_RESET) {
-		DRM_DEBUG("sol reset is gen7 only\n");
-		return -EINVAL;
-	}
-
-	ret = execlists_move_to_gpu(params->request, vmas);
-	if (ret)
-		return ret;
-
-	if (engine->id == RCS &&
-	    instp_mode != dev_priv->relative_constants_mode) {
-		ret = intel_ring_begin(params->request, 4);
-		if (ret)
-			return ret;
-
-		intel_ring_emit(ring, MI_NOOP);
-		intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
-		intel_ring_emit_reg(ring, INSTPM);
-		intel_ring_emit(ring, instp_mask << 16 | instp_mode);
-		intel_ring_advance(ring);
-
-		dev_priv->relative_constants_mode = instp_mode;
-	}
-
-	exec_start = params->batch_obj_vm_offset +
-		     args->batch_start_offset;
-
-	ret = engine->emit_bb_start(params->request,
-				    exec_start, args->batch_len,
-				    params->dispatch_flags);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
-
-	i915_gem_execbuffer_move_to_active(vmas, params->request);
-
-	return 0;
-}
-
 void intel_execlists_cancel_requests(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *req, *tmp;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index bdd764af6d06..a52cf57dbd40 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -97,11 +97,6 @@ int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
 				    int enable_execlists);
 void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
 
-struct i915_execbuffer_params;
-int intel_execlists_submission(struct i915_execbuffer_params *params,
-			       struct drm_i915_gem_execbuffer2 *args,
-			       struct list_head *vmas);
-
 void intel_execlists_cancel_requests(struct intel_engine_cs *engine);
 
 #endif /* _INTEL_LRC_H_ */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cce6258a0c5d..13015361f8b9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2478,9 +2478,7 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
 		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
 
 	/* Now that the ring is fully powered up, update the tail */
-	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
-		      intel_ring_offset(request->ring, request->tail));
-	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
+	i9xx_submit_request(request);
 
 	/* Let the ring send IDLE messages to the GT again,
 	 * and so let it sleep to conserve power when idle.
@@ -2789,9 +2787,9 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 	engine->init_hw = init_ring_common;
 
 	engine->emit_request = i9xx_emit_request;
-	engine->submit_request = i9xx_submit_request;
 	if (i915.semaphores)
 		engine->emit_request = gen6_sema_emit_request;
+	engine->submit_request = i9xx_submit_request;
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		engine->emit_bb_start = gen8_emit_bb_start;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 22/73] drm/i915: Simplify calling engine->sync_to
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (20 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 21/73] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 23/73] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals Chris Wilson
                   ` (51 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Since requests can no longer be generated as a side-effect of
intel_ring_begin(), we know that the seqno will be unchanged during
ring-emission. This predicatablity then means we do not have to check
for the seqno wrapping around whilst emitting the semaphore for
engine->sync_to().

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-31-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/i915_drv.h         |  2 +-
 drivers/gpu/drm/i915/i915_gem.c         | 13 ++-----
 drivers/gpu/drm/i915/i915_gem_request.c |  9 +----
 drivers/gpu/drm/i915/intel_ringbuffer.c | 64 ++++++++++++---------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  5 ++-
 5 files changed, 30 insertions(+), 63 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 49ce21a82c29..9b18b9c875ec 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1757,7 +1757,7 @@ struct drm_i915_private {
 	struct i915_gem_context *kernel_context;
 	struct intel_engine_cs engine[I915_NUM_ENGINES];
 	struct drm_i915_gem_object *semaphore_obj;
-	uint32_t last_seqno, next_seqno;
+	u32 next_seqno;
 
 	struct drm_dma_handle *status_page_dmah;
 	struct resource mch_res;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d79b949fb4c4..3df6b485d2d4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2867,22 +2867,15 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		i915_gem_object_retire_request(obj, from);
 	} else {
 		int idx = intel_engine_sync_index(from->engine, to->engine);
-		u32 seqno = i915_gem_request_get_seqno(from);
-
-		if (seqno <= from->engine->semaphore.sync_seqno[idx])
+		if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
 			return 0;
 
 		trace_i915_gem_ring_sync_to(to, from);
-		ret = to->engine->semaphore.sync_to(to, from->engine, seqno);
+		ret = to->engine->semaphore.sync_to(to, from);
 		if (ret)
 			return ret;
 
-		/* We use last_read_req because sync_to()
-		 * might have just caused seqno wrap under
-		 * the radar.
-		 */
-		from->engine->semaphore.sync_seqno[idx] =
-			i915_gem_request_get_seqno(obj->last_read_req[from->engine->id]);
+		from->engine->semaphore.sync_seqno[idx] = from->fence.seqno;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index e378eb61979b..11c19e7f82fa 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -264,14 +264,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
 	if (ret)
 		return ret;
 
-	/* Carefully set the last_seqno value so that wrap
-	 * detection still works
-	 */
 	dev_priv->next_seqno = seqno;
-	dev_priv->last_seqno = seqno - 1;
-	if (dev_priv->last_seqno == 0)
-		dev_priv->last_seqno--;
-
 	return 0;
 }
 
@@ -288,7 +281,7 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
 		dev_priv->next_seqno = 1;
 	}
 
-	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+	*seqno = dev_priv->next_seqno++;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 13015361f8b9..6c7531c4e1cf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1496,12 +1496,6 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
-					      u32 seqno)
-{
-	return dev_priv->last_seqno < seqno;
-}
-
 /**
  * intel_ring_sync - sync the waiter to the signaller on seqno
  *
@@ -1511,24 +1505,23 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_i915_private *dev_priv,
  */
 
 static int
-gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
-	       struct intel_engine_cs *signaller,
-	       u32 seqno)
+gen8_ring_sync(struct drm_i915_gem_request *wait,
+	       struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = waiter_req->ring;
-	struct drm_i915_private *dev_priv = waiter_req->i915;
-	u64 offset = GEN8_WAIT_OFFSET(waiter_req->engine, signaller->id);
+	struct intel_ring *waiter = wait->ring;
+	struct drm_i915_private *dev_priv = wait->i915;
+	u64 offset = GEN8_WAIT_OFFSET(wait->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
-	ret = intel_ring_begin(waiter_req, 4);
+	ret = intel_ring_begin(wait, 4);
 	if (ret)
 		return ret;
 
 	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
 				MI_SEMAPHORE_GLOBAL_GTT |
 				MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(waiter, seqno);
+	intel_ring_emit(waiter, signal->fence.seqno);
 	intel_ring_emit(waiter, lower_32_bits(offset));
 	intel_ring_emit(waiter, upper_32_bits(offset));
 	intel_ring_advance(waiter);
@@ -1538,48 +1531,37 @@ gen8_ring_sync(struct drm_i915_gem_request *waiter_req,
 	 * We do this on the i915_switch_context() following the wait and
 	 * before the dispatch.
 	 */
-	ppgtt = waiter_req->ctx->ppgtt;
-	if (ppgtt && waiter_req->engine->id != RCS)
-		ppgtt->pd_dirty_rings |= intel_engine_flag(waiter_req->engine);
+	ppgtt = wait->ctx->ppgtt;
+	if (ppgtt && wait->engine->id != RCS)
+		ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine);
 	return 0;
 }
 
 static int
-gen6_ring_sync(struct drm_i915_gem_request *waiter_req,
-	       struct intel_engine_cs *signaller,
-	       u32 seqno)
+gen6_ring_sync(struct drm_i915_gem_request *wait,
+	       struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = waiter_req->ring;
+	struct intel_ring *waiter = wait->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter_req->engine->id];
+	u32 wait_mbox = signal->engine->semaphore.mbox.wait[wait->engine->id];
 	int ret;
 
-	/* Throughout all of the GEM code, seqno passed implies our current
-	 * seqno is >= the last seqno executed. However for hardware the
-	 * comparison is strictly greater than.
-	 */
-	seqno -= 1;
-
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(waiter_req, 4);
+	ret = intel_ring_begin(wait, 4);
 	if (ret)
 		return ret;
 
-	/* If seqno wrap happened, omit the wait with no-ops */
-	if (likely(!i915_gem_has_seqno_wrapped(waiter_req->i915, seqno))) {
-		intel_ring_emit(waiter, dw1 | wait_mbox);
-		intel_ring_emit(waiter, seqno);
-		intel_ring_emit(waiter, 0);
-		intel_ring_emit(waiter, MI_NOOP);
-	} else {
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-		intel_ring_emit(waiter, MI_NOOP);
-	}
+	intel_ring_emit(waiter, dw1 | wait_mbox);
+	/* Throughout all of the GEM code, seqno passed implies our current
+	 * seqno is >= the last seqno executed. However for hardware the
+	 * comparison is strictly greater than.
+	 */
+	intel_ring_emit(waiter, signal->fence.seqno - 1);
+	intel_ring_emit(waiter, 0);
+	intel_ring_emit(waiter, MI_NOOP);
 	intel_ring_advance(waiter);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 6f2edae6ef0d..fe68b12061ef 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -277,9 +277,8 @@ struct intel_engine_cs {
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct drm_i915_gem_request *to_req,
-				   struct intel_engine_cs *from,
-				   u32 seqno);
+		int	(*sync_to)(struct drm_i915_gem_request *to,
+				   struct drm_i915_gem_request *from);
 		int	(*signal)(struct drm_i915_gem_request *signaller_req);
 	} semaphore;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 23/73] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (21 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 22/73] drm/i915: Simplify calling engine->sync_to Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 24/73] drm/i915: Amalgamate GGTT/ppGTT vma debug list walkers Chris Wilson
                   ` (50 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

In order to be more consistent with the rest of the request construction
and ring emission, use the common names for the ring and request.

Rather than using signaler_req, waiter_req, and intel_ring *wait, we use
plain req and ring.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-32-git-send-email-chris@chris-wilson.co.uk
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 132 ++++++++++++++++----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |   6 +-
 2 files changed, 68 insertions(+), 70 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6c7531c4e1cf..645454ddafea 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1309,108 +1309,105 @@ static void render_ring_cleanup(struct intel_engine_cs *engine)
 	intel_fini_pipe_control(engine);
 }
 
-static int gen8_rcs_signal(struct drm_i915_gem_request *signaller_req)
+static int gen8_rcs_signal(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *signaller = signaller_req->ring;
-	struct drm_i915_private *dev_priv = signaller_req->i915;
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	ret = intel_ring_begin(signaller_req, (num_rings-1) * 8);
+	ret = intel_ring_begin(req, (num_rings-1) * 8);
 	if (ret)
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset =
-			signaller_req->engine->semaphore.signal_ggtt[id];
+		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(signaller, GFX_OP_PIPE_CONTROL(6));
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
+		intel_ring_emit(ring,
 				PIPE_CONTROL_GLOBAL_GTT_IVB |
 				PIPE_CONTROL_QW_WRITE |
 				PIPE_CONTROL_CS_STALL);
-		intel_ring_emit(signaller, lower_32_bits(gtt_offset));
-		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-		intel_ring_emit(signaller, signaller_req->fence.seqno);
-		intel_ring_emit(signaller, 0);
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring, lower_32_bits(gtt_offset));
+		intel_ring_emit(ring, upper_32_bits(gtt_offset));
+		intel_ring_emit(ring, req->fence.seqno);
+		intel_ring_emit(ring, 0);
+		intel_ring_emit(ring,
 				MI_SEMAPHORE_SIGNAL |
 				MI_SEMAPHORE_TARGET(waiter->hw_id));
-		intel_ring_emit(signaller, 0);
+		intel_ring_emit(ring, 0);
 	}
-	intel_ring_advance(signaller);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-static int gen8_xcs_signal(struct drm_i915_gem_request *signaller_req)
+static int gen8_xcs_signal(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *signaller = signaller_req->ring;
-	struct drm_i915_private *dev_priv = signaller_req->i915;
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *waiter;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	ret = intel_ring_begin(signaller_req, (num_rings-1) * 6);
+	ret = intel_ring_begin(req, (num_rings-1) * 6);
 	if (ret)
 		return ret;
 
 	for_each_engine_id(waiter, dev_priv, id) {
-		u64 gtt_offset =
-			signaller_req->engine->semaphore.signal_ggtt[id];
+		u64 gtt_offset = req->engine->semaphore.signal_ggtt[id];
 		if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
 			continue;
 
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring,
 				(MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW);
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring,
 				lower_32_bits(gtt_offset) |
 				MI_FLUSH_DW_USE_GTT);
-		intel_ring_emit(signaller, upper_32_bits(gtt_offset));
-		intel_ring_emit(signaller, signaller_req->fence.seqno);
-		intel_ring_emit(signaller,
+		intel_ring_emit(ring, upper_32_bits(gtt_offset));
+		intel_ring_emit(ring, req->fence.seqno);
+		intel_ring_emit(ring,
 				MI_SEMAPHORE_SIGNAL |
 				MI_SEMAPHORE_TARGET(waiter->hw_id));
-		intel_ring_emit(signaller, 0);
+		intel_ring_emit(ring, 0);
 	}
-	intel_ring_advance(signaller);
+	intel_ring_advance(ring);
 
 	return 0;
 }
 
-static int gen6_signal(struct drm_i915_gem_request *signaller_req)
+static int gen6_signal(struct drm_i915_gem_request *req)
 {
-	struct intel_ring *signaller = signaller_req->ring;
-	struct drm_i915_private *dev_priv = signaller_req->i915;
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
 	struct intel_engine_cs *useless;
 	enum intel_engine_id id;
 	int ret, num_rings;
 
 	num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask);
-	ret = intel_ring_begin(signaller_req, round_up((num_rings-1) * 3, 2));
+	ret = intel_ring_begin(req, round_up((num_rings-1) * 3, 2));
 	if (ret)
 		return ret;
 
 	for_each_engine_id(useless, dev_priv, id) {
-		i915_reg_t mbox_reg =
-			signaller_req->engine->semaphore.mbox.signal[id];
+		i915_reg_t mbox_reg = req->engine->semaphore.mbox.signal[id];
 
 		if (i915_mmio_reg_valid(mbox_reg)) {
-			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1));
-			intel_ring_emit_reg(signaller, mbox_reg);
-			intel_ring_emit(signaller, signaller_req->fence.seqno);
+			intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+			intel_ring_emit_reg(ring, mbox_reg);
+			intel_ring_emit(ring, req->fence.seqno);
 		}
 	}
 
 	/* If num_dwords was rounded, make sure the tail pointer is correct */
 	if (num_rings % 2 == 0)
-		intel_ring_emit(signaller, MI_NOOP);
-	intel_ring_advance(signaller);
+		intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -1505,64 +1502,65 @@ static int gen8_render_emit_request(struct drm_i915_gem_request *req)
  */
 
 static int
-gen8_ring_sync(struct drm_i915_gem_request *wait,
-	       struct drm_i915_gem_request *signal)
+gen8_ring_sync_to(struct drm_i915_gem_request *req,
+		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = wait->ring;
-	struct drm_i915_private *dev_priv = wait->i915;
-	u64 offset = GEN8_WAIT_OFFSET(wait->engine, signal->engine->id);
+	struct intel_ring *ring = req->ring;
+	struct drm_i915_private *dev_priv = req->i915;
+	u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
 
-	ret = intel_ring_begin(wait, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(waiter, MI_SEMAPHORE_WAIT |
-				MI_SEMAPHORE_GLOBAL_GTT |
-				MI_SEMAPHORE_SAD_GTE_SDD);
-	intel_ring_emit(waiter, signal->fence.seqno);
-	intel_ring_emit(waiter, lower_32_bits(offset));
-	intel_ring_emit(waiter, upper_32_bits(offset));
-	intel_ring_advance(waiter);
+	intel_ring_emit(ring,
+			MI_SEMAPHORE_WAIT |
+			MI_SEMAPHORE_GLOBAL_GTT |
+			MI_SEMAPHORE_SAD_GTE_SDD);
+	intel_ring_emit(ring, signal->fence.seqno);
+	intel_ring_emit(ring, lower_32_bits(offset));
+	intel_ring_emit(ring, upper_32_bits(offset));
+	intel_ring_advance(ring);
 
 	/* When the !RCS engines idle waiting upon a semaphore, they lose their
 	 * pagetables and we must reload them before executing the batch.
 	 * We do this on the i915_switch_context() following the wait and
 	 * before the dispatch.
 	 */
-	ppgtt = wait->ctx->ppgtt;
-	if (ppgtt && wait->engine->id != RCS)
-		ppgtt->pd_dirty_rings |= intel_engine_flag(wait->engine);
+	ppgtt = req->ctx->ppgtt;
+	if (ppgtt && req->engine->id != RCS)
+		ppgtt->pd_dirty_rings |= intel_engine_flag(req->engine);
 	return 0;
 }
 
 static int
-gen6_ring_sync(struct drm_i915_gem_request *wait,
-	       struct drm_i915_gem_request *signal)
+gen6_ring_sync_to(struct drm_i915_gem_request *req,
+		  struct drm_i915_gem_request *signal)
 {
-	struct intel_ring *waiter = wait->ring;
+	struct intel_ring *ring = req->ring;
 	u32 dw1 = MI_SEMAPHORE_MBOX |
 		  MI_SEMAPHORE_COMPARE |
 		  MI_SEMAPHORE_REGISTER;
-	u32 wait_mbox = signal->engine->semaphore.mbox.wait[wait->engine->id];
+	u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->id];
 	int ret;
 
 	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
 
-	ret = intel_ring_begin(wait, 4);
+	ret = intel_ring_begin(req, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(waiter, dw1 | wait_mbox);
+	intel_ring_emit(ring, dw1 | wait_mbox);
 	/* Throughout all of the GEM code, seqno passed implies our current
 	 * seqno is >= the last seqno executed. However for hardware the
 	 * comparison is strictly greater than.
 	 */
-	intel_ring_emit(waiter, signal->fence.seqno - 1);
-	intel_ring_emit(waiter, 0);
-	intel_ring_emit(waiter, MI_NOOP);
-	intel_ring_advance(waiter);
+	intel_ring_emit(ring, signal->fence.seqno - 1);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	return 0;
 }
@@ -2667,7 +2665,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
 	if (INTEL_GEN(dev_priv) >= 8) {
 		u64 offset = i915_gem_obj_ggtt_offset(dev_priv->semaphore_obj);
 
-		engine->semaphore.sync_to = gen8_ring_sync;
+		engine->semaphore.sync_to = gen8_ring_sync_to;
 		engine->semaphore.signal = gen8_xcs_signal;
 
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -2681,7 +2679,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
 			engine->semaphore.signal_ggtt[i] = ring_offset;
 		}
 	} else if (INTEL_GEN(dev_priv) >= 6) {
-		engine->semaphore.sync_to = gen6_ring_sync;
+		engine->semaphore.sync_to = gen6_ring_sync_to;
 		engine->semaphore.signal = gen6_signal;
 
 		/*
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index fe68b12061ef..39d2e43a868c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -277,9 +277,9 @@ struct intel_engine_cs {
 		};
 
 		/* AKA wait() */
-		int	(*sync_to)(struct drm_i915_gem_request *to,
-				   struct drm_i915_gem_request *from);
-		int	(*signal)(struct drm_i915_gem_request *signaller_req);
+		int	(*sync_to)(struct drm_i915_gem_request *req,
+				   struct drm_i915_gem_request *signal);
+		int	(*signal)(struct drm_i915_gem_request *req);
 	} semaphore;
 
 	/* Execlists */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 24/73] drm/i915: Amalgamate GGTT/ppGTT vma debug list walkers
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (22 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 23/73] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 25/73] drm/i915: Split early global GTT initialisation Chris Wilson
                   ` (49 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

As we can now have multiple VMA inside the global GTT (with partial
mappings, rotations, etc), it is no longer true that there may just be a
single GGTT entry and so we should walk the full vma_list to count up
the actual usage. In addition to unifying the two walkers, switch from
multiplying the object size for each vma to summing the bound vma sizes.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c | 42 +++++++++++++------------------------
 1 file changed, 15 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index dccc72d63dd0..044443704db5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -335,6 +335,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 	struct drm_i915_gem_object *obj = ptr;
 	struct file_stats *stats = data;
 	struct i915_vma *vma;
+	int bound = 0;
 
 	stats->count++;
 	stats->total += obj->base.size;
@@ -342,41 +343,28 @@ static int per_file_stats(int id, void *ptr, void *data)
 	if (obj->base.name || obj->base.dma_buf)
 		stats->shared += obj->base.size;
 
-	if (USES_FULL_PPGTT(obj->base.dev)) {
-		list_for_each_entry(vma, &obj->vma_list, obj_link) {
-			struct i915_hw_ppgtt *ppgtt;
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;
 
-			if (!drm_mm_node_allocated(&vma->node))
-				continue;
+		bound++;
 
-			if (vma->is_ggtt) {
-				stats->global += obj->base.size;
-				continue;
-			}
+		if (vma->is_ggtt) {
+			stats->global += vma->node.size;
+		} else {
+			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
 
-			ppgtt = container_of(vma->vm, struct i915_hw_ppgtt, base);
 			if (ppgtt->file_priv != stats->file_priv)
 				continue;
-
-			if (obj->active) /* XXX per-vma statistic */
-				stats->active += obj->base.size;
-			else
-				stats->inactive += obj->base.size;
-
-			return 0;
-		}
-	} else {
-		if (i915_gem_obj_ggtt_bound(obj)) {
-			stats->global += obj->base.size;
-			if (obj->active)
-				stats->active += obj->base.size;
-			else
-				stats->inactive += obj->base.size;
-			return 0;
 		}
+
+		if (obj->active) /* XXX per-vma statistic */
+			stats->active += vma->node.size;
+		else
+			stats->inactive += vma->node.size;
 	}
 
-	if (!list_empty(&obj->global_list))
+	if (!bound)
 		stats->unbound += obj->base.size;
 
 	return 0;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 25/73] drm/i915: Split early global GTT initialisation
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (23 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 24/73] drm/i915: Amalgamate GGTT/ppGTT vma debug list walkers Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 26/73] drm/i915: Store owning file on the i915_address_space Chris Wilson
                   ` (48 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Initialising the global GTT is tricky as we wish to use the drm_mm range
manager during the modesetting initialisation (to capture stolen
allocations from the BIOS) before we actually enable GEM. To overcome
this, we currently setup the drm_mm first and then carefully rebind
them.

v2: Fixup after rebasing

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c        | 19 -------
 drivers/gpu/drm/i915/i915_gem.c        |  6 ++-
 drivers/gpu/drm/i915/i915_gem_gtt.c    | 95 +++++++++++++---------------------
 drivers/gpu/drm/i915/i915_gem_gtt.h    |  2 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c | 17 +++---
 5 files changed, 48 insertions(+), 91 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 83afdd0597b5..478e8168ad94 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -993,8 +993,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
 static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	uint32_t aperture_size;
 	int ret;
 
 	if (i915_inject_load_failure())
@@ -1040,7 +1038,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 		}
 	}
 
-
 	/* 965GM sometimes incorrectly writes to hardware status page (HWS)
 	 * using 32bit addressing, overwriting memory if HWS is located
 	 * above 4GB.
@@ -1059,19 +1056,6 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
 		}
 	}
 
-	aperture_size = ggtt->mappable_end;
-
-	ggtt->mappable =
-		io_mapping_create_wc(ggtt->mappable_base,
-				     aperture_size);
-	if (!ggtt->mappable) {
-		ret = -EIO;
-		goto out_ggtt;
-	}
-
-	ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base,
-					      aperture_size);
-
 	pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
 			   PM_QOS_DEFAULT_VALUE);
 
@@ -1112,14 +1096,11 @@ out_ggtt:
 static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
 	if (dev->pdev->msi_enabled)
 		pci_disable_msi(dev->pdev);
 
 	pm_qos_remove_request(&dev_priv->pm_qos);
-	arch_phys_wc_del(ggtt->mtrr);
-	io_mapping_free(ggtt->mappable);
 	i915_ggtt_cleanup_hw(dev);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3df6b485d2d4..a0ea1ee16f67 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4543,7 +4543,10 @@ int i915_gem_init(struct drm_device *dev)
 	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
 	i915_gem_init_userptr(dev_priv);
-	i915_gem_init_ggtt(dev);
+
+	ret = i915_gem_init_ggtt(dev);
+	if (ret)
+		goto out_unlock;
 
 	ret = i915_gem_context_init(dev);
 	if (ret)
@@ -4634,7 +4637,6 @@ i915_gem_load_init(struct drm_device *dev)
 				  SLAB_HWCACHE_ALIGN,
 				  NULL);
 
-	INIT_LIST_HEAD(&dev_priv->vm_list);
 	INIT_LIST_HEAD(&dev_priv->context_list);
 	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
 	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 671b1cab5e54..b76b0213da0c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2746,10 +2746,7 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
 		*end -= 4096;
 }
 
-static int i915_gem_setup_global_gtt(struct drm_device *dev,
-				     u64 start,
-				     u64 mappable_end,
-				     u64 end)
+int i915_gem_init_ggtt(struct drm_device *dev)
 {
 	/* Let GEM Manage all of the aperture.
 	 *
@@ -2762,46 +2759,14 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 	 */
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	struct drm_mm_node *entry;
-	struct drm_i915_gem_object *obj;
 	unsigned long hole_start, hole_end;
+	struct drm_mm_node *entry;
 	int ret;
 
-	BUG_ON(mappable_end > end);
-
-	ggtt->base.start = start;
-
-	/* Subtract the guard page before address space initialization to
-	 * shrink the range used by drm_mm */
-	ggtt->base.total = end - start - PAGE_SIZE;
-	i915_address_space_init(&ggtt->base, dev_priv);
-	ggtt->base.total += PAGE_SIZE;
-
 	ret = intel_vgt_balloon(dev_priv);
 	if (ret)
 		return ret;
 
-	if (!HAS_LLC(dev))
-		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
-
-	/* Mark any preallocated objects as occupied */
-	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
-
-		DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
-			      i915_gem_obj_ggtt_offset(obj), obj->base.size);
-
-		WARN_ON(i915_gem_obj_ggtt_bound(obj));
-		ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
-		if (ret) {
-			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
-			return ret;
-		}
-		vma->bound |= GLOBAL_BIND;
-		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
-	}
-
 	/* Clear any non-preallocated blocks */
 	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
 		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
@@ -2811,9 +2776,11 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 	}
 
 	/* And finally clear the reserved guard page */
-	ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
+	ggtt->base.clear_range(&ggtt->base,
+			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
+			       true);
 
-	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
+	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
 		struct i915_hw_ppgtt *ppgtt;
 
 		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
@@ -2850,18 +2817,6 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
 }
 
 /**
- * i915_gem_init_ggtt - Initialize GEM for Global GTT
- * @dev: DRM device
- */
-void i915_gem_init_ggtt(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-	i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
-}
-
-/**
  * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
  * @dev: DRM device
  */
@@ -2886,6 +2841,9 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
 	}
 
 	ggtt->base.cleanup(&ggtt->base);
+
+	arch_phys_wc_del(ggtt->mtrr);
+	io_mapping_free(ggtt->mappable);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3217,10 +3175,12 @@ int i915_ggtt_init_hw(struct drm_device *dev)
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	int ret;
 
-	if (INTEL_INFO(dev)->gen <= 5) {
+	INIT_LIST_HEAD(&dev_priv->vm_list);
+
+	if (INTEL_GEN(dev) <= 5) {
 		ggtt->probe = i915_gmch_probe;
 		ggtt->base.cleanup = i915_gmch_remove;
-	} else if (INTEL_INFO(dev)->gen < 8) {
+	} else if (INTEL_GEN(dev) < 8) {
 		ggtt->probe = gen6_gmch_probe;
 		ggtt->base.cleanup = gen6_gmch_remove;
 
@@ -3254,13 +3214,21 @@ int i915_ggtt_init_hw(struct drm_device *dev)
 		ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
 	}
 
-	/*
-	 * Initialise stolen early so that we may reserve preallocated
-	 * objects for the BIOS to KMS transition.
+	/* Subtract the guard page before address space initialization to
+	 * shrink the range used by drm_mm.
 	 */
-	ret = i915_gem_init_stolen(dev);
-	if (ret)
-		goto out_gtt_cleanup;
+	ggtt->base.total -= PAGE_SIZE;
+	i915_address_space_init(&ggtt->base, dev_priv);
+	ggtt->base.total += PAGE_SIZE;
+	if (!HAS_LLC(dev_priv))
+		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
+
+	ggtt->mappable =
+		io_mapping_create_wc(ggtt->mappable_base, ggtt->mappable_end);
+	if (!ggtt->mappable)
+		return -EIO;
+
+	ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
 
 	/* GMADR is the PCI mmio aperture into the global GTT. */
 	DRM_INFO("Memory usable by graphics device = %lluM\n",
@@ -3272,11 +3240,18 @@ int i915_ggtt_init_hw(struct drm_device *dev)
 		DRM_INFO("VT-d active for gfx access\n");
 #endif
 
+	/*
+	 * Initialise stolen early so that we may reserve preallocated
+	 * objects for the BIOS to KMS transition.
+	 */
+	ret = i915_gem_init_stolen(dev);
+	if (ret)
+		goto out_gtt_cleanup;
+
 	return 0;
 
 out_gtt_cleanup:
 	ggtt->base.cleanup(&ggtt->base);
-
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c4a657973cc6..c347e2a22c65 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -523,7 +523,7 @@ i915_page_dir_dma_addr(const struct i915_hw_ppgtt *ppgtt, const unsigned n)
 
 int i915_ggtt_init_hw(struct drm_device *dev);
 int i915_ggtt_enable_hw(struct drm_device *dev);
-void i915_gem_init_ggtt(struct drm_device *dev);
+int i915_gem_init_ggtt(struct drm_device *dev);
 void i915_ggtt_cleanup_hw(struct drm_device *dev);
 
 int i915_ppgtt_init_hw(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 310756c30723..9a8cc8c51077 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -698,18 +698,17 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	 */
 	vma->node.start = gtt_offset;
 	vma->node.size = size;
-	if (drm_mm_initialized(&ggtt->base.mm)) {
-		ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
-		if (ret) {
-			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
-			goto err;
-		}
 
-		vma->bound |= GLOBAL_BIND;
-		__i915_vma_set_map_and_fenceable(vma);
-		list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
+	ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
+	if (ret) {
+		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
+		goto err;
 	}
 
+	vma->bound |= GLOBAL_BIND;
+	__i915_vma_set_map_and_fenceable(vma);
+	list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
+
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	i915_gem_object_pin_pages(obj);
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 26/73] drm/i915: Store owning file on the i915_address_space
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (24 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 25/73] drm/i915: Split early global GTT initialisation Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 27/73] drm/i915: Count how many VMA are bound for an object Chris Wilson
                   ` (47 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

For the global GTT (and aliasing GTT), the address space is owned by the
device (it is a global resource) and so the per-file owner field is
NULL. For per-process GTT (where we create an address space per
context), each is owned by the opening file. We can use this ownership
information to both distinguish GGTT and ppGTT address spaces, as well
as occasionally inspect the owner.

v2: Whitespace, tells us who owns i915_address_space

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c     |  2 +-
 drivers/gpu/drm/i915/i915_drv.h         |  1 -
 drivers/gpu/drm/i915/i915_gem_context.c |  3 ++-
 drivers/gpu/drm/i915/i915_gem_gtt.c     | 30 +++++++++++++++---------------
 drivers/gpu/drm/i915/i915_gem_gtt.h     | 17 +++++++++++------
 5 files changed, 29 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 044443704db5..5b1191030723 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -354,7 +354,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 		} else {
 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
 
-			if (ppgtt->file_priv != stats->file_priv)
+			if (ppgtt->base.file != stats->file_priv)
 				continue;
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9b18b9c875ec..7452a4015956 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3300,7 +3300,6 @@ i915_vm_to_ppgtt(struct i915_address_space *vm)
 	return container_of(vm, struct i915_hw_ppgtt, base);
 }
 
-
 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
 {
 	return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d7a7cc8b6fa4..24383f0ea3a4 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -327,7 +327,8 @@ i915_gem_create_context(struct drm_device *dev,
 		return ctx;
 
 	if (USES_FULL_PPGTT(dev)) {
-		struct i915_hw_ppgtt *ppgtt = i915_ppgtt_create(dev, file_priv);
+		struct i915_hw_ppgtt *ppgtt =
+			i915_ppgtt_create(to_i915(dev), file_priv);
 
 		if (IS_ERR(ppgtt)) {
 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b76b0213da0c..edae5c9862a7 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2104,11 +2104,12 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
 	return 0;
 }
 
-static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
+			   struct drm_i915_private *dev_priv)
 {
-	ppgtt->base.dev = dev;
+	ppgtt->base.dev = &dev_priv->drm;
 
-	if (INTEL_INFO(dev)->gen < 8)
+	if (INTEL_INFO(dev_priv)->gen < 8)
 		return gen6_ppgtt_init(ppgtt);
 	else
 		return gen8_ppgtt_init(ppgtt);
@@ -2143,15 +2144,17 @@ static void gtt_write_workarounds(struct drm_device *dev)
 		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
 }
 
-static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
+static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
+			   struct drm_i915_private *dev_priv,
+			   struct drm_i915_file_private *file_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	int ret = 0;
+	int ret;
 
-	ret = __hw_ppgtt_init(dev, ppgtt);
+	ret = __hw_ppgtt_init(ppgtt, dev_priv);
 	if (ret == 0) {
 		kref_init(&ppgtt->ref);
 		i915_address_space_init(&ppgtt->base, dev_priv);
+		ppgtt->base.file = file_priv;
 	}
 
 	return ret;
@@ -2183,7 +2186,8 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 }
 
 struct i915_hw_ppgtt *
-i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
+i915_ppgtt_create(struct drm_i915_private *dev_priv,
+		  struct drm_i915_file_private *fpriv)
 {
 	struct i915_hw_ppgtt *ppgtt;
 	int ret;
@@ -2192,14 +2196,12 @@ i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
 	if (!ppgtt)
 		return ERR_PTR(-ENOMEM);
 
-	ret = i915_ppgtt_init(dev, ppgtt);
+	ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
 	if (ret) {
 		kfree(ppgtt);
 		return ERR_PTR(ret);
 	}
 
-	ppgtt->file_priv = fpriv;
-
 	trace_i915_ppgtt_create(&ppgtt->base);
 
 	return ppgtt;
@@ -2787,9 +2789,8 @@ int i915_gem_init_ggtt(struct drm_device *dev)
 		if (!ppgtt)
 			return -ENOMEM;
 
-		ret = __hw_ppgtt_init(dev, ppgtt);
+		ret = __hw_ppgtt_init(ppgtt, dev_priv);
 		if (ret) {
-			ppgtt->base.cleanup(&ppgtt->base);
 			kfree(ppgtt);
 			return ret;
 		}
@@ -3200,7 +3201,6 @@ int i915_ggtt_init_hw(struct drm_device *dev)
 	}
 
 	ggtt->base.dev = dev;
-	ggtt->base.is_ggtt = true;
 
 	ret = ggtt->probe(ggtt);
 	if (ret)
@@ -3307,7 +3307,7 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
 			struct i915_hw_ppgtt *ppgtt;
 
-			if (vm->is_ggtt)
+			if (i915_is_ggtt(vm))
 				ppgtt = dev_priv->mm.aliasing_ppgtt;
 			else
 				ppgtt = i915_vm_to_ppgtt(vm);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c347e2a22c65..529fb483afc8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -272,12 +272,19 @@ struct i915_pml4 {
 struct i915_address_space {
 	struct drm_mm mm;
 	struct drm_device *dev;
+	/* Every address space belongs to a struct file - except for the global
+	 * GTT that is owned by the driver (and so @file is set to NULL). In
+	 * principle, no information should leak from one context to another
+	 * (or between files/processes etc) unless explicitly shared by the
+	 * owner. Tracking the owner is important in order to free up per-file
+	 * objects along with the file, to aide resource tracking, and to
+	 * assign blame.
+	 */
+	struct drm_i915_file_private *file;
 	struct list_head global_link;
 	u64 start;		/* Start offset always 0 for dri2 */
 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 
-	bool is_ggtt;
-
 	struct i915_page_scratch *scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
@@ -338,7 +345,7 @@ struct i915_address_space {
 			u32 flags);
 };
 
-#define i915_is_ggtt(V) ((V)->is_ggtt)
+#define i915_is_ggtt(V) (!(V)->file)
 
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
@@ -380,8 +387,6 @@ struct i915_hw_ppgtt {
 		struct i915_page_directory pd;		/* GEN6-7 */
 	};
 
-	struct drm_i915_file_private *file_priv;
-
 	gen6_pte_t __iomem *pd_addr;
 
 	int (*enable)(struct i915_hw_ppgtt *ppgtt);
@@ -528,7 +533,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev);
 
 int i915_ppgtt_init_hw(struct drm_device *dev);
 void i915_ppgtt_release(struct kref *kref);
-struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
+struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv,
 					struct drm_i915_file_private *fpriv);
 static inline void i915_ppgtt_get(struct i915_hw_ppgtt *ppgtt)
 {
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 27/73] drm/i915: Count how many VMA are bound for an object
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (25 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 26/73] drm/i915: Store owning file on the i915_address_space Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 28/73] drm/i915: Be more careful when unbinding vma Chris Wilson
                   ` (46 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Since we may have VMA allocated for an object, but we interrupted their
binding, there is a disparity between have elements on the obj->vma_list
and being bound. i915_gem_obj_bound_any() does this check, but this is
not rigorously observed - add an explicit count to make it easier.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c      | 12 +++++-------
 drivers/gpu/drm/i915/i915_drv.h          |  3 ++-
 drivers/gpu/drm/i915/i915_gem.c          | 30 ++++++++++--------------------
 drivers/gpu/drm/i915/i915_gem_shrinker.c | 17 ++++++++---------
 drivers/gpu/drm/i915/i915_gem_stolen.c   |  1 +
 5 files changed, 26 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 5b1191030723..62a9685f7ad1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -174,6 +174,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
 		seq_printf(m, " (fence: %d)", obj->fence_reg);
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
+		if (!drm_mm_node_allocated(&vma->node))
+			continue;
+
 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
 			   vma->is_ggtt ? "g" : "pp",
 			   vma->node.start, vma->node.size);
@@ -335,11 +338,11 @@ static int per_file_stats(int id, void *ptr, void *data)
 	struct drm_i915_gem_object *obj = ptr;
 	struct file_stats *stats = data;
 	struct i915_vma *vma;
-	int bound = 0;
 
 	stats->count++;
 	stats->total += obj->base.size;
-
+	if (!obj->bind_count)
+		stats->unbound += obj->base.size;
 	if (obj->base.name || obj->base.dma_buf)
 		stats->shared += obj->base.size;
 
@@ -347,8 +350,6 @@ static int per_file_stats(int id, void *ptr, void *data)
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
-		bound++;
-
 		if (vma->is_ggtt) {
 			stats->global += vma->node.size;
 		} else {
@@ -364,9 +365,6 @@ static int per_file_stats(int id, void *ptr, void *data)
 			stats->inactive += vma->node.size;
 	}
 
-	if (!bound)
-		stats->unbound += obj->base.size;
-
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7452a4015956..7a9a2d820aca 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2221,6 +2221,8 @@ struct drm_i915_gem_object {
 	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
 
 	unsigned int has_wc_mmap;
+	/** Count of VMA actually bound by this object */
+	unsigned int bind_count;
 	unsigned int pin_display;
 
 	struct sg_table *pages;
@@ -3266,7 +3268,6 @@ i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *o)
 	return i915_gem_obj_ggtt_offset_view(o, &i915_ggtt_view_normal);
 }
 
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
 				  const struct i915_ggtt_view *view);
 bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a0ea1ee16f67..18ebf95899d9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2107,7 +2107,7 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 	if (obj->pages_pin_count)
 		return -EBUSY;
 
-	BUG_ON(i915_gem_obj_bound_any(obj));
+	GEM_BUG_ON(obj->bind_count);
 
 	/* ->put_pages might need to allocate memory for the bit17 swizzle
 	 * array, hence protect them from being reaped by removing them from gtt
@@ -2965,7 +2965,6 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	int ret;
 
 	if (list_empty(&vma->obj_link))
@@ -2979,7 +2978,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 	if (vma->pin_count)
 		return -EBUSY;
 
-	BUG_ON(obj->pages == NULL);
+	GEM_BUG_ON(obj->bind_count == 0);
+	GEM_BUG_ON(!obj->pages);
 
 	if (wait) {
 		ret = i915_gem_object_wait_rendering(obj, false);
@@ -3019,8 +3019,9 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 
 	/* Since the unbound list is global, only move to that list if
 	 * no more VMAs exist. */
-	if (list_empty(&obj->vma_list))
-		list_move_tail(&obj->global_list, &dev_priv->mm.unbound_list);
+	if (--obj->bind_count == 0)
+		list_move_tail(&obj->global_list,
+			       &to_i915(obj->base.dev)->mm.unbound_list);
 
 	/* And finally now the object is completely decoupled from this vma,
 	 * we can drop its hold on the backing storage and allow it to be
@@ -3255,6 +3256,7 @@ search_free:
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	list_add_tail(&vma->vm_link, &vm->inactive_list);
+	obj->bind_count++;
 
 	return vma;
 
@@ -3450,7 +3452,6 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 {
 	struct drm_device *dev = obj->base.dev;
 	struct i915_vma *vma, *next;
-	bool bound = false;
 	int ret = 0;
 
 	if (obj->cache_level == cache_level)
@@ -3474,8 +3475,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 			ret = i915_vma_unbind(vma);
 			if (ret)
 				return ret;
-		} else
-			bound = true;
+		}
 	}
 
 	/* We can reuse the existing drm_mm nodes but need to change the
@@ -3485,7 +3485,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	 * rewrite the PTE in the belief that doing so tramples upon less
 	 * state and so involves less work.
 	 */
-	if (bound) {
+	if (obj->bind_count) {
 		/* Before we change the PTE, the GPU must not be accessing it.
 		 * If we wait upon the object, we know that all the bound
 		 * VMA are no longer active.
@@ -4223,6 +4223,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 			dev_priv->mm.interruptible = was_interruptible;
 		}
 	}
+	GEM_BUG_ON(obj->bind_count);
 
 	/* Stolen objects don't hold a ref, but do hold pin count. Fix that up
 	 * before progressing. */
@@ -4840,17 +4841,6 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
 	return false;
 }
 
-bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
-{
-	struct i915_vma *vma;
-
-	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (drm_mm_node_allocated(&vma->node))
-			return true;
-
-	return false;
-}
-
 unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
 {
 	struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 5d4772c146b1..b95cd9f404f6 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -48,19 +48,15 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
 #endif
 }
 
-static int num_vma_bound(struct drm_i915_gem_object *obj)
+static bool any_vma_pinned(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
-	int count = 0;
 
-	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (drm_mm_node_allocated(&vma->node))
-			count++;
+	list_for_each_entry(vma, &obj->vma_list, obj_link)
 		if (vma->pin_count)
-			count++;
-	}
+			return true;
 
-	return count;
+	return false;
 }
 
 static bool swap_available(void)
@@ -82,7 +78,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj)
 	 * to the GPU, simply unbinding from the GPU is not going to succeed
 	 * in releasing our pin count on the pages themselves.
 	 */
-	if (obj->pages_pin_count != num_vma_bound(obj))
+	if (obj->pages_pin_count > obj->bind_count)
+		return false;
+
+	if (any_vma_pinned(obj))
 		return false;
 
 	/* We can only return physical pages to the system if we can either
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 9a8cc8c51077..2c321c8df7c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -708,6 +708,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 	vma->bound |= GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
 	list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
+	obj->bind_count++;
 
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	i915_gem_object_pin_pages(obj);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 28/73] drm/i915: Be more careful when unbinding vma
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (26 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 27/73] drm/i915: Count how many VMA are bound for an object Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 29/73] drm/i915: Kill drop_pages() Chris Wilson
                   ` (45 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

When we call i915_vma_unbind(), we will wait upon outstanding rendering.
This will also trigger a retirement phase, which may update the object
lists. If, we extend request tracking to the VMA itself (rather than
keep it at the encompassing object), then there is a potential that the
obj->vma_list be modified for other elements upon i915_vma_unbind(). As
a result, if we walk over the object list and call i915_vma_unbind(), we
need to be prepared for that list to change.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h          |  2 ++
 drivers/gpu/drm/i915/i915_gem.c          | 57 +++++++++++++++++++++++---------
 drivers/gpu/drm/i915/i915_gem_shrinker.c |  8 +----
 drivers/gpu/drm/i915/i915_gem_userptr.c  |  4 +--
 4 files changed, 46 insertions(+), 25 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7a9a2d820aca..a6bab90bf832 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3052,6 +3052,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
  * _guarantee_ VMA in question is _not in use_ anywhere.
  */
 int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 18ebf95899d9..4da5cb7e8bab 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -283,18 +283,38 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
 	.release = i915_gem_object_release_phys,
 };
 
+int
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
+{
+	struct i915_vma *vma;
+	LIST_HEAD(still_in_list);
+	int ret;
+
+	/* The vma will only be freed if it is marked as closed, and if we wait
+	 * upon rendering to the vma, we may unbind anything in the list.
+	 */
+	while ((vma = list_first_entry_or_null(&obj->vma_list,
+					       struct i915_vma,
+					       obj_link))) {
+		list_move_tail(&vma->obj_link, &still_in_list);
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			break;
+	}
+	list_splice(&still_in_list, &obj->vma_list);
+
+	return ret;
+}
+
 static int
 drop_pages(struct drm_i915_gem_object *obj)
 {
-	struct i915_vma *vma, *next;
 	int ret;
 
 	i915_gem_object_get(obj);
-	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
-		if (i915_vma_unbind(vma))
-			break;
-
-	ret = i915_gem_object_put_pages(obj);
+	ret = i915_gem_object_unbind(obj);
+	if (ret == 0)
+		ret = i915_gem_object_put_pages(obj);
 	i915_gem_object_put(obj);
 
 	return ret;
@@ -3450,8 +3470,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct i915_vma *vma, *next;
+	struct i915_vma *vma;
 	int ret = 0;
 
 	if (obj->cache_level == cache_level)
@@ -3462,7 +3481,8 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	 * catch the issue of the CS prefetch crossing page boundaries and
 	 * reading an invalid PTE on older architectures.
 	 */
-	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
+restart:
+	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
@@ -3471,11 +3491,18 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 			return -EBUSY;
 		}
 
-		if (!i915_gem_valid_gtt_space(vma, cache_level)) {
-			ret = i915_vma_unbind(vma);
-			if (ret)
-				return ret;
-		}
+		if (i915_gem_valid_gtt_space(vma, cache_level))
+			continue;
+
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			return ret;
+
+		/* As unbinding may affect other elements in the
+		 * obj->vma_list (due to side-effects from retiring
+		 * an active vma), play safe and restart the iterator.
+		 */
+		goto restart;
 	}
 
 	/* We can reuse the existing drm_mm nodes but need to change the
@@ -3494,7 +3521,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 		if (ret)
 			return ret;
 
-		if (!HAS_LLC(dev) && cache_level != I915_CACHE_NONE) {
+		if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
 			/* Access to snoopable pages through the GTT is
 			 * incoherent and on some machines causes a hard
 			 * lockup. Relinquish the CPU mmaping to force
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index b95cd9f404f6..64d179d4f684 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -172,8 +172,6 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 		       (obj = list_first_entry_or_null(phase->list,
 						       typeof(*obj),
 						       global_list))) {
-			struct i915_vma *vma, *v;
-
 			list_move_tail(&obj->global_list, &still_in_list);
 
 			if (flags & I915_SHRINK_PURGEABLE &&
@@ -193,11 +191,7 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 			i915_gem_object_get(obj);
 
 			/* For the unbound phase, this should be a no-op! */
-			list_for_each_entry_safe(vma, v,
-						 &obj->vma_list, obj_link)
-				if (i915_vma_unbind(vma))
-					break;
-
+			i915_gem_object_unbind(obj);
 			if (i915_gem_object_put_pages(obj) == 0)
 				count += obj->base.size >> PAGE_SHIFT;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index ca8b82ab93d6..e935b327f3f9 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -104,7 +104,6 @@ static void cancel_userptr(struct work_struct *work)
 
 	if (obj->pages != NULL) {
 		struct drm_i915_private *dev_priv = to_i915(dev);
-		struct i915_vma *vma, *tmp;
 		bool was_interruptible;
 
 		wait_rendering(obj);
@@ -112,8 +111,7 @@ static void cancel_userptr(struct work_struct *work)
 		was_interruptible = dev_priv->mm.interruptible;
 		dev_priv->mm.interruptible = false;
 
-		list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
-			WARN_ON(i915_vma_unbind(vma));
+		WARN_ON(i915_gem_object_unbind(obj));
 		WARN_ON(i915_gem_object_put_pages(obj));
 
 		dev_priv->mm.interruptible = was_interruptible;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 29/73] drm/i915: Kill drop_pages()
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (27 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 28/73] drm/i915: Be more careful when unbinding vma Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 30/73] drm/i915: Introduce i915_gem_active for request tracking Chris Wilson
                   ` (44 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

The drop_pages() function is a dangerous trap in that it can release the
passed in object pointer and so unless the caller is aware, it can
easily trick us into using the stale object afterwards. Move it into its
solitary callsite where we know it is safe.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 20 +++++---------------
 1 file changed, 5 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4da5cb7e8bab..9cc6ec0c8dbf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -306,20 +306,6 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	return ret;
 }
 
-static int
-drop_pages(struct drm_i915_gem_object *obj)
-{
-	int ret;
-
-	i915_gem_object_get(obj);
-	ret = i915_gem_object_unbind(obj);
-	if (ret == 0)
-		ret = i915_gem_object_put_pages(obj);
-	i915_gem_object_put(obj);
-
-	return ret;
-}
-
 int
 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 			    int align)
@@ -340,7 +326,11 @@ i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 	if (obj->base.filp == NULL)
 		return -EINVAL;
 
-	ret = drop_pages(obj);
+	ret = i915_gem_object_unbind(obj);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_object_put_pages(obj);
 	if (ret)
 		return ret;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 30/73] drm/i915: Introduce i915_gem_active for request tracking
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (28 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 29/73] drm/i915: Kill drop_pages() Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 31/73] drm/i915: Prepare i915_gem_active for annotations Chris Wilson
                   ` (43 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

In the next patch, request tracking is made more generic and for that we
need a new expanded struct and to separate out the logic changes from
the mechanical churn, we split out the structure renaming into this
patch.

v2: Writer's block. Add some spiel about why we track requests.
v3: Now i915_gem_active.
v4: Now with i915_gem_active_set() for attaching to the active request.
v5: Use i915_gem_active_set() from inside the retirement handlers

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        | 11 +++---
 drivers/gpu/drm/i915/i915_drv.h            |  9 +++--
 drivers/gpu/drm/i915/i915_gem.c            | 58 +++++++++++++++---------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  4 +--
 drivers/gpu/drm/i915/i915_gem_fence.c      |  6 ++--
 drivers/gpu/drm/i915/i915_gem_request.h    | 41 +++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_tiling.c     |  2 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c    |  2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      |  7 ++--
 drivers/gpu/drm/i915/intel_display.c       |  8 ++---
 10 files changed, 93 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 62a9685f7ad1..ea74f1c5cf36 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -155,10 +155,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->base.write_domain);
 	for_each_engine_id(engine, dev_priv, id)
 		seq_printf(m, "%x ",
-				i915_gem_request_get_seqno(obj->last_read_req[id]));
+			   i915_gem_request_get_seqno(obj->last_read[id].request));
 	seq_printf(m, "] %x %x%s%s%s",
-		   i915_gem_request_get_seqno(obj->last_write_req),
-		   i915_gem_request_get_seqno(obj->last_fenced_req),
+		   i915_gem_request_get_seqno(obj->last_write.request),
+		   i915_gem_request_get_seqno(obj->last_fence.request),
 		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -195,9 +195,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		*t = '\0';
 		seq_printf(m, " (%s mappable)", s);
 	}
-	if (obj->last_write_req != NULL)
-		seq_printf(m, " (%s)",
-			   i915_gem_request_get_engine(obj->last_write_req)->name);
+	if (obj->last_write.request)
+		seq_printf(m, " (%s)", obj->last_write.request->engine->name);
 	if (obj->frontbuffer_bits)
 		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a6bab90bf832..17a206f19fcb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2242,11 +2242,10 @@ struct drm_i915_gem_object {
 	 * requests on one ring where the write request is older than the
 	 * read request. This allows for the CPU to read from an active
 	 * buffer by only waiting for the write to complete.
-	 * */
-	struct drm_i915_gem_request *last_read_req[I915_NUM_ENGINES];
-	struct drm_i915_gem_request *last_write_req;
-	/** Breadcrumb of last fenced GPU access to the buffer. */
-	struct drm_i915_gem_request *last_fenced_req;
+	 */
+	struct i915_gem_active last_read[I915_NUM_ENGINES];
+	struct i915_gem_active last_write;
+	struct i915_gem_active last_fence;
 
 	/** Current tiling stride for the object, if it's tiled. */
 	uint32_t stride;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9cc6ec0c8dbf..8f0b48568519 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1353,23 +1353,23 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 	int ret, i;
 
 	if (readonly) {
-		if (obj->last_write_req != NULL) {
-			ret = i915_wait_request(obj->last_write_req);
+		if (obj->last_write.request) {
+			ret = i915_wait_request(obj->last_write.request);
 			if (ret)
 				return ret;
 
-			i = obj->last_write_req->engine->id;
-			if (obj->last_read_req[i] == obj->last_write_req)
+			i = obj->last_write.request->engine->id;
+			if (obj->last_read[i].request == obj->last_write.request)
 				i915_gem_object_retire__read(obj, i);
 			else
 				i915_gem_object_retire__write(obj);
 		}
 	} else {
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			if (obj->last_read_req[i] == NULL)
+			if (!obj->last_read[i].request)
 				continue;
 
-			ret = i915_wait_request(obj->last_read_req[i]);
+			ret = i915_wait_request(obj->last_read[i].request);
 			if (ret)
 				return ret;
 
@@ -1397,9 +1397,9 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
 {
 	int idx = req->engine->id;
 
-	if (obj->last_read_req[idx] == req)
+	if (obj->last_read[idx].request == req)
 		i915_gem_object_retire__read(obj, idx);
-	else if (obj->last_write_req == req)
+	else if (obj->last_write.request == req)
 		i915_gem_object_retire__write(obj);
 
 	if (!i915_reset_in_progress(&req->i915->gpu_error))
@@ -1428,7 +1428,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	if (readonly) {
 		struct drm_i915_gem_request *req;
 
-		req = obj->last_write_req;
+		req = obj->last_write.request;
 		if (req == NULL)
 			return 0;
 
@@ -1437,7 +1437,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
 			struct drm_i915_gem_request *req;
 
-			req = obj->last_read_req[i];
+			req = obj->last_read[i].request;
 			if (req == NULL)
 				continue;
 
@@ -2375,7 +2375,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	obj->active |= intel_engine_flag(engine);
 
 	list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
-	i915_gem_request_assign(&obj->last_read_req[engine->id], req);
+	i915_gem_active_set(&obj->last_read[engine->id], req);
 
 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
@@ -2383,10 +2383,10 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
-	GEM_BUG_ON(obj->last_write_req == NULL);
-	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
+	GEM_BUG_ON(!obj->last_write.request);
+	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write.request->engine)));
 
-	i915_gem_request_assign(&obj->last_write_req, NULL);
+	i915_gem_active_set(&obj->last_write, NULL);
 	intel_fb_obj_flush(obj, true, ORIGIN_CS);
 }
 
@@ -2395,13 +2395,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
 {
 	struct i915_vma *vma;
 
-	GEM_BUG_ON(obj->last_read_req[idx] == NULL);
+	GEM_BUG_ON(!obj->last_read[idx].request);
 	GEM_BUG_ON(!(obj->active & (1 << idx)));
 
 	list_del_init(&obj->engine_list[idx]);
-	i915_gem_request_assign(&obj->last_read_req[idx], NULL);
+	i915_gem_active_set(&obj->last_read[idx], NULL);
 
-	if (obj->last_write_req && obj->last_write_req->engine->id == idx)
+	if (obj->last_write.request && obj->last_write.request->engine->id == idx)
 		i915_gem_object_retire__write(obj);
 
 	obj->active &= ~(1 << idx);
@@ -2420,7 +2420,7 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
 			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	}
 
-	i915_gem_request_assign(&obj->last_fenced_req, NULL);
+	i915_gem_active_set(&obj->last_fence, NULL);
 	i915_gem_object_put(obj);
 }
 
@@ -2621,7 +2621,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 				       struct drm_i915_gem_object,
 				       engine_list[engine->id]);
 
-		if (!list_empty(&obj->last_read_req[engine->id]->list))
+		if (!list_empty(&obj->last_read[engine->id].request->list))
 			break;
 
 		i915_gem_object_retire__read(obj, engine->id);
@@ -2754,7 +2754,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct drm_i915_gem_request *req;
 
-		req = obj->last_read_req[i];
+		req = obj->last_read[i].request;
 		if (req == NULL)
 			continue;
 
@@ -2830,10 +2830,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	i915_gem_object_put(obj);
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (obj->last_read_req[i] == NULL)
+		if (!obj->last_read[i].request)
 			continue;
 
-		req[n++] = i915_gem_request_get(obj->last_read_req[i]);
+		req[n++] = i915_gem_request_get(obj->last_read[i].request);
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -2924,12 +2924,12 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 	n = 0;
 	if (readonly) {
-		if (obj->last_write_req)
-			req[n++] = obj->last_write_req;
+		if (obj->last_write.request)
+			req[n++] = obj->last_write.request;
 	} else {
 		for (i = 0; i < I915_NUM_ENGINES; i++)
-			if (obj->last_read_req[i])
-				req[n++] = obj->last_read_req[i];
+			if (obj->last_read[i].request)
+				req[n++] = obj->last_read[i].request;
 	}
 	for (i = 0; i < n; i++) {
 		ret = __i915_gem_object_sync(obj, to, req[i]);
@@ -4026,12 +4026,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
 			struct drm_i915_gem_request *req;
 
-			req = obj->last_read_req[i];
+			req = obj->last_read[i].request;
 			if (req)
 				args->busy |= 1 << (16 + req->engine->exec_id);
 		}
-		if (obj->last_write_req)
-			args->busy |= obj->last_write_req->engine->exec_id;
+		if (obj->last_write.request)
+			args->busy |= obj->last_write.request->engine->exec_id;
 	}
 
 unref:
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a4b98afc00ca..5e1fb85b708b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1150,7 +1150,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 
 		i915_vma_move_to_active(vma, req);
 		if (obj->base.write_domain) {
-			i915_gem_request_assign(&obj->last_write_req, req);
+			i915_gem_active_set(&obj->last_write, req);
 
 			intel_fb_obj_invalidate(obj, ORIGIN_CS);
 
@@ -1158,7 +1158,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
 		}
 		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-			i915_gem_request_assign(&obj->last_fenced_req, req);
+			i915_gem_active_set(&obj->last_fence, req);
 			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
 				struct drm_i915_private *dev_priv = engine->i915;
 				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 251d7a95af89..d16b385e79e9 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -261,12 +261,12 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 {
-	if (obj->last_fenced_req) {
-		int ret = i915_wait_request(obj->last_fenced_req);
+	if (obj->last_fence.request) {
+		int ret = i915_wait_request(obj->last_fence.request);
 		if (ret)
 			return ret;
 
-		i915_gem_request_assign(&obj->last_fenced_req, NULL);
+		i915_gem_active_set(&obj->last_fence, NULL);
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 382ca5a163eb..cf2df33f9892 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -249,4 +249,45 @@ static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
 		__i915_spin_request(request, state, timeout_us));
 }
 
+/* We treat requests as fences. This is not be to confused with our
+ * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
+ * We use the fences to synchronize access from the CPU with activity on the
+ * GPU, for example, we should not rewrite an object's PTE whilst the GPU
+ * is reading them. We also track fences at a higher level to provide
+ * implicit synchronisation around GEM objects, e.g. set-domain will wait
+ * for outstanding GPU rendering before marking the object ready for CPU
+ * access, or a pageflip will wait until the GPU is complete before showing
+ * the frame on the scanout.
+ *
+ * In order to use a fence, the object must track the fence it needs to
+ * serialise with. For example, GEM objects want to track both read and
+ * write access so that we can perform concurrent read operations between
+ * the CPU and GPU engines, as well as waiting for all rendering to
+ * complete, or waiting for the last GPU user of a "fence register". The
+ * object then embeds a #i915_gem_active to track the most recent (in
+ * retirement order) request relevant for the desired mode of access.
+ * The #i915_gem_active is updated with i915_gem_active_set() to track the
+ * most recent fence request, typically this is done as part of
+ * i915_vma_move_to_active().
+ *
+ * When the #i915_gem_active completes (is retired), it will
+ * signal its completion to the owner through a callback as well as mark
+ * itself as idle (i915_gem_active.request == NULL). The owner
+ * can then perform any action, such as delayed freeing of an active
+ * resource including itself.
+ */
+struct i915_gem_active {
+	struct drm_i915_gem_request *request;
+};
+
+static inline void
+i915_gem_active_set(struct i915_gem_active *active,
+		    struct drm_i915_gem_request *request)
+{
+	i915_gem_request_assign(&active->request, request);
+}
+
+#define for_each_active(mask, idx) \
+	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
+
 #endif /* I915_GEM_REQUEST_H */
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index e83fc2d0433c..00d796da65fb 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -242,7 +242,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 			}
 
 			obj->fence_dirty =
-				obj->last_fenced_req ||
+				obj->last_fence.request ||
 				obj->fence_reg != I915_FENCE_REG_NONE;
 
 			obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e935b327f3f9..32f50a70ea42 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -74,7 +74,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct drm_i915_gem_request *req;
 
-		req = obj->last_read_req[i];
+		req = obj->last_read[i].request;
 		if (req == NULL)
 			continue;
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 226b28efe9ab..d6482e980d5e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -751,8 +751,8 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->size = obj->base.size;
 	err->name = obj->base.name;
 	for (i = 0; i < I915_NUM_ENGINES; i++)
-		err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read_req[i]);
-	err->wseqno = i915_gem_request_get_seqno(obj->last_write_req);
+		err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read[i].request);
+	err->wseqno = i915_gem_request_get_seqno(obj->last_write.request);
 	err->gtt_offset = vma->node.start;
 	err->read_domains = obj->base.read_domains;
 	err->write_domain = obj->base.write_domain;
@@ -764,8 +764,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->dirty = obj->dirty;
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
-	err->engine = obj->last_write_req ?
-		i915_gem_request_get_engine(obj->last_write_req)->id : -1;
+	err->engine = obj->last_write.request ? obj->last_write.request->engine->id : -1;
 	err->cache_level = obj->cache_level;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 134dcf66e17b..fdfad33cac0a 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11370,7 +11370,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
 	if (resv && !reservation_object_test_signaled_rcu(resv, false))
 		return true;
 
-	return engine != i915_gem_request_get_engine(obj->last_write_req);
+	return engine != i915_gem_request_get_engine(obj->last_write.request);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11673,7 +11673,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 		engine = &dev_priv->engine[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
-		engine = i915_gem_request_get_engine(obj->last_write_req);
+		engine = i915_gem_request_get_engine(obj->last_write.request);
 		if (engine == NULL || engine->id != RCS)
 			engine = &dev_priv->engine[BCS];
 	} else {
@@ -11695,7 +11695,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 		INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
 
 		i915_gem_request_assign(&work->flip_queued_req,
-					obj->last_write_req);
+					obj->last_write.request);
 
 		schedule_work(&work->mmio_work);
 	} else {
@@ -14043,7 +14043,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 			to_intel_plane_state(new_state);
 
 		i915_gem_request_assign(&plane_state->wait_req,
-					obj->last_write_req);
+					obj->last_write.request);
 	}
 
 	return ret;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 31/73] drm/i915: Prepare i915_gem_active for annotations
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (29 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 30/73] drm/i915: Introduce i915_gem_active for request tracking Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 32/73] drm/i915: Mark up i915_gem_active for locking annotation Chris Wilson
                   ` (42 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

In the future, we will want to add annotations to the i915_gem_active
struct. The API is thus expanded to hide direct access to the contents
of i915_gem_active and mediated instead through a number of helpers.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c     |  13 ++--
 drivers/gpu/drm/i915/i915_gem.c         |  85 ++++++++++++---------
 drivers/gpu/drm/i915/i915_gem_fence.c   |  11 ++-
 drivers/gpu/drm/i915/i915_gem_request.h | 127 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_tiling.c  |   2 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c |   8 +-
 drivers/gpu/drm/i915/i915_gpu_error.c   |   9 ++-
 drivers/gpu/drm/i915/intel_display.c    |  15 ++--
 8 files changed, 204 insertions(+), 66 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ea74f1c5cf36..2e431cfe8bde 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -155,10 +155,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->base.write_domain);
 	for_each_engine_id(engine, dev_priv, id)
 		seq_printf(m, "%x ",
-			   i915_gem_request_get_seqno(obj->last_read[id].request));
+			   i915_gem_active_get_seqno(&obj->last_read[id]));
 	seq_printf(m, "] %x %x%s%s%s",
-		   i915_gem_request_get_seqno(obj->last_write.request),
-		   i915_gem_request_get_seqno(obj->last_fence.request),
+		   i915_gem_active_get_seqno(&obj->last_write),
+		   i915_gem_active_get_seqno(&obj->last_fence),
 		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -195,8 +195,11 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		*t = '\0';
 		seq_printf(m, " (%s mappable)", s);
 	}
-	if (obj->last_write.request)
-		seq_printf(m, " (%s)", obj->last_write.request->engine->name);
+
+	engine = i915_gem_active_get_engine(&obj->last_write);
+	if (engine)
+		seq_printf(m, " (%s)", engine->name);
+
 	if (obj->frontbuffer_bits)
 		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8f0b48568519..b55e27f21688 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1349,27 +1349,30 @@ int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 			       bool readonly)
 {
+	struct drm_i915_gem_request *request;
 	struct reservation_object *resv;
 	int ret, i;
 
 	if (readonly) {
-		if (obj->last_write.request) {
-			ret = i915_wait_request(obj->last_write.request);
+		request = i915_gem_active_peek(&obj->last_write);
+		if (request) {
+			ret = i915_wait_request(request);
 			if (ret)
 				return ret;
 
-			i = obj->last_write.request->engine->id;
-			if (obj->last_read[i].request == obj->last_write.request)
+			i = request->engine->id;
+			if (i915_gem_active_peek(&obj->last_read[i]) == request)
 				i915_gem_object_retire__read(obj, i);
 			else
 				i915_gem_object_retire__write(obj);
 		}
 	} else {
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			if (!obj->last_read[i].request)
+			request = i915_gem_active_peek(&obj->last_read[i]);
+			if (!request)
 				continue;
 
-			ret = i915_wait_request(obj->last_read[i].request);
+			ret = i915_wait_request(request);
 			if (ret)
 				return ret;
 
@@ -1397,9 +1400,9 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
 {
 	int idx = req->engine->id;
 
-	if (obj->last_read[idx].request == req)
+	if (i915_gem_active_peek(&obj->last_read[idx]) == req)
 		i915_gem_object_retire__read(obj, idx);
-	else if (obj->last_write.request == req)
+	else if (i915_gem_active_peek(&obj->last_write) == req)
 		i915_gem_object_retire__write(obj);
 
 	if (!i915_reset_in_progress(&req->i915->gpu_error))
@@ -1428,20 +1431,20 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	if (readonly) {
 		struct drm_i915_gem_request *req;
 
-		req = obj->last_write.request;
+		req = i915_gem_active_get(&obj->last_write);
 		if (req == NULL)
 			return 0;
 
-		requests[n++] = i915_gem_request_get(req);
+		requests[n++] = req;
 	} else {
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
 			struct drm_i915_gem_request *req;
 
-			req = obj->last_read[i].request;
+			req = i915_gem_active_get(&obj->last_read[i]);
 			if (req == NULL)
 				continue;
 
-			requests[n++] = i915_gem_request_get(req);
+			requests[n++] = req;
 		}
 	}
 
@@ -2383,8 +2386,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
-	GEM_BUG_ON(!obj->last_write.request);
-	GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write.request->engine)));
+	GEM_BUG_ON(!__i915_gem_active_is_busy(&obj->last_write));
+	GEM_BUG_ON(!(obj->active & intel_engine_flag(i915_gem_active_get_engine(&obj->last_write))));
 
 	i915_gem_active_set(&obj->last_write, NULL);
 	intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2393,15 +2396,17 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
 {
+	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
 
-	GEM_BUG_ON(!obj->last_read[idx].request);
+	GEM_BUG_ON(!__i915_gem_active_is_busy(&obj->last_read[idx]));
 	GEM_BUG_ON(!(obj->active & (1 << idx)));
 
 	list_del_init(&obj->engine_list[idx]);
 	i915_gem_active_set(&obj->last_read[idx], NULL);
 
-	if (obj->last_write.request && obj->last_write.request->engine->id == idx)
+	engine = i915_gem_active_get_engine(&obj->last_write);
+	if (engine && engine->id == idx)
 		i915_gem_object_retire__write(obj);
 
 	obj->active &= ~(1 << idx);
@@ -2621,7 +2626,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 				       struct drm_i915_gem_object,
 				       engine_list[engine->id]);
 
-		if (!list_empty(&obj->last_read[engine->id].request->list))
+		if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id])->list))
 			break;
 
 		i915_gem_object_retire__read(obj, engine->id);
@@ -2754,7 +2759,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct drm_i915_gem_request *req;
 
-		req = obj->last_read[i].request;
+		req = i915_gem_active_peek(&obj->last_read[i]);
 		if (req == NULL)
 			continue;
 
@@ -2794,7 +2799,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
 	struct drm_i915_gem_wait *args = data;
 	struct drm_i915_gem_object *obj;
-	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
+	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
 	int i, n = 0;
 	int ret;
 
@@ -2830,20 +2835,21 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	i915_gem_object_put(obj);
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		if (!obj->last_read[i].request)
-			continue;
+		struct drm_i915_gem_request *req;
 
-		req[n++] = i915_gem_request_get(obj->last_read[i].request);
+		req = i915_gem_active_get(&obj->last_read[i]);
+		if (req)
+			requests[n++] = req;
 	}
 
 	mutex_unlock(&dev->struct_mutex);
 
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
-			ret = __i915_wait_request(req[i], true,
+			ret = __i915_wait_request(requests[i], true,
 						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
 						  to_rps_client(file));
-		i915_gem_request_put(req[i]);
+		i915_gem_request_put(requests[i]);
 	}
 	return ret;
 
@@ -2916,7 +2922,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		     struct drm_i915_gem_request *to)
 {
 	const bool readonly = obj->base.pending_write_domain == 0;
-	struct drm_i915_gem_request *req[I915_NUM_ENGINES];
+	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
 	int ret, i, n;
 
 	if (!obj->active)
@@ -2924,15 +2930,22 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 	n = 0;
 	if (readonly) {
-		if (obj->last_write.request)
-			req[n++] = obj->last_write.request;
+		struct drm_i915_gem_request *req;
+
+		req = i915_gem_active_peek(&obj->last_write);
+		if (req)
+			requests[n++] = req;
 	} else {
-		for (i = 0; i < I915_NUM_ENGINES; i++)
-			if (obj->last_read[i].request)
-				req[n++] = obj->last_read[i].request;
+		for (i = 0; i < I915_NUM_ENGINES; i++) {
+			struct drm_i915_gem_request *req;
+
+			req = i915_gem_active_peek(&obj->last_read[i]);
+			if (req)
+				requests[n++] = req;
+		}
 	}
 	for (i = 0; i < n; i++) {
-		ret = __i915_gem_object_sync(obj, to, req[i]);
+		ret = __i915_gem_object_sync(obj, to, requests[i]);
 		if (ret)
 			return ret;
 	}
@@ -4021,17 +4034,17 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 
 	args->busy = 0;
 	if (obj->active) {
+		struct drm_i915_gem_request *req;
 		int i;
 
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			struct drm_i915_gem_request *req;
-
-			req = obj->last_read[i].request;
+			req = i915_gem_active_peek(&obj->last_read[i]);
 			if (req)
 				args->busy |= 1 << (16 + req->engine->exec_id);
 		}
-		if (obj->last_write.request)
-			args->busy |= obj->last_write.request->engine->exec_id;
+		req = i915_gem_active_peek(&obj->last_write);
+		if (req)
+			args->busy |= req->engine->exec_id;
 	}
 
 unref:
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index d16b385e79e9..9fdbd66128a6 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -261,14 +261,13 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 {
-	if (obj->last_fence.request) {
-		int ret = i915_wait_request(obj->last_fence.request);
-		if (ret)
-			return ret;
+	int ret;
 
-		i915_gem_active_set(&obj->last_fence, NULL);
-	}
+	ret = i915_gem_active_wait(&obj->last_fence);
+	if (ret)
+		return ret;
 
+	i915_gem_active_set(&obj->last_fence, NULL);
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index cf2df33f9892..6280812e9c7d 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -280,6 +280,15 @@ struct i915_gem_active {
 	struct drm_i915_gem_request *request;
 };
 
+/**
+ * i915_gem_active_set - updates the tracker to watch the current request
+ * @active - the active tracker
+ * @request - the request to watch
+ *
+ * i915_gem_active_set() watches the given @request for completion. Whilst
+ * that @request is busy, the @active reports busy. When that @request is
+ * retired, the @active tracker is updated to report idle.
+ */
 static inline void
 i915_gem_active_set(struct i915_gem_active *active,
 		    struct drm_i915_gem_request *request)
@@ -287,6 +296,124 @@ i915_gem_active_set(struct i915_gem_active *active,
 	i915_gem_request_assign(&active->request, request);
 }
 
+/**
+ * i915_gem_active_peek - report the request being monitored
+ * @active - the active tracker
+ *
+ * i915_gem_active_peek() returns the current request being tracked, or NULL.
+ * It does not obtain a reference on the request for the caller, so the
+ * caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_peek(const struct i915_gem_active *active)
+{
+	return active->request;
+}
+
+/**
+ * i915_gem_active_get - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold struct_mutex.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	request = i915_gem_active_peek(active);
+	if (!request || i915_gem_request_completed(request))
+		return NULL;
+
+	return i915_gem_request_get(request);
+}
+
+/**
+ * __i915_gem_active_is_busy - report whether the active tracker is assigned
+ * @active - the active tracker
+ *
+ * __i915_gem_active_is_busy() returns true if the active tracker is currently
+ * assigned to a request. Due to the lazy retiring, that request may be idle
+ * and this may report stale information.
+ */
+static inline bool
+__i915_gem_active_is_busy(const struct i915_gem_active *active)
+{
+	return i915_gem_active_peek(active);
+}
+
+/**
+ * i915_gem_active_is_idle - report whether the active tracker is idle
+ * @active - the active tracker
+ *
+ * i915_gem_active_is_idle() returns true if the active tracker is currently
+ * unassigned or if the request is complete (but not yet retired). Requires
+ * the caller to hold struct_mutex (but that can be relaxed if desired).
+ */
+static inline bool
+i915_gem_active_is_idle(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	request = i915_gem_active_peek(active);
+	if (!request || i915_gem_request_completed(request))
+		return true;
+
+	return false;
+}
+
+/**
+ * i915_gem_active_wait - waits until the request is completed
+ * @active - the active request on which to wait
+ *
+ * i915_gem_active_wait() waits until the request is completed before
+ * returning. Note that it does not guarantee that the request is
+ * retired first, see i915_gem_active_retire().
+ */
+static inline int __must_check
+i915_gem_active_wait(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	request = i915_gem_active_peek(active);
+	if (!request)
+		return 0;
+
+	return i915_wait_request(request);
+}
+
+/**
+ * i915_gem_active_retire - waits until the request is retired
+ * @active - the active request on which to wait
+ *
+ * i915_gem_active_retire() waits until the request is completed,
+ * and then ensures that at least the retirement handler for this
+ * @active tracker is called before returning. If the @active
+ * tracker is idle, the function returns immediately.
+ */
+static inline int __must_check
+i915_gem_active_retire(const struct i915_gem_active *active)
+{
+	return i915_gem_active_wait(active);
+}
+
+/* Convenience functions for peeking at state inside active's request whilst
+ * guarded by the struct_mutex.
+ */
+
+static inline uint32_t
+i915_gem_active_get_seqno(const struct i915_gem_active *active)
+{
+	return i915_gem_request_get_seqno(i915_gem_active_peek(active));
+}
+
+static inline struct intel_engine_cs *
+i915_gem_active_get_engine(const struct i915_gem_active *active)
+{
+	return i915_gem_request_get_engine(i915_gem_active_peek(active));
+}
+
 #define for_each_active(mask, idx) \
 	for (; mask ? idx = ffs(mask) - 1, 1 : 0; mask &= ~BIT(idx))
 
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 00d796da65fb..8cef2d6b291a 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -242,7 +242,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 			}
 
 			obj->fence_dirty =
-				obj->last_fence.request ||
+				!i915_gem_active_is_idle(&obj->last_fence) ||
 				obj->fence_reg != I915_FENCE_REG_NONE;
 
 			obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 32f50a70ea42..00ab5e9d2eb7 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -74,11 +74,9 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct drm_i915_gem_request *req;
 
-		req = obj->last_read[i].request;
-		if (req == NULL)
-			continue;
-
-		requests[n++] = i915_gem_request_get(req);
+		req = i915_gem_active_get(&obj->last_read[i]);
+		if (req)
+			requests[n++] = req;
 	}
 
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d6482e980d5e..585fe2bdadd5 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -746,13 +746,14 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 		       struct i915_vma *vma)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	struct intel_engine_cs *engine;
 	int i;
 
 	err->size = obj->base.size;
 	err->name = obj->base.name;
 	for (i = 0; i < I915_NUM_ENGINES; i++)
-		err->rseqno[i] = i915_gem_request_get_seqno(obj->last_read[i].request);
-	err->wseqno = i915_gem_request_get_seqno(obj->last_write.request);
+		err->rseqno[i] = i915_gem_active_get_seqno(&obj->last_read[i]);
+	err->wseqno = i915_gem_active_get_seqno(&obj->last_write);
 	err->gtt_offset = vma->node.start;
 	err->read_domains = obj->base.read_domains;
 	err->write_domain = obj->base.write_domain;
@@ -764,8 +765,10 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->dirty = obj->dirty;
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
-	err->engine = obj->last_write.request ? obj->last_write.request->engine->id : -1;
 	err->cache_level = obj->cache_level;
+
+	engine = i915_gem_active_get_engine(&obj->last_write);
+	err->engine = engine ? engine->id : -1;
 }
 
 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fdfad33cac0a..94259613dfa6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11370,7 +11370,7 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
 	if (resv && !reservation_object_test_signaled_rcu(resv, false))
 		return true;
 
-	return engine != i915_gem_request_get_engine(obj->last_write.request);
+	return engine != i915_gem_active_get_engine(&obj->last_write);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11673,7 +11673,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 		engine = &dev_priv->engine[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
-		engine = i915_gem_request_get_engine(obj->last_write.request);
+		engine = i915_gem_active_get_engine(&obj->last_write);
 		if (engine == NULL || engine->id != RCS)
 			engine = &dev_priv->engine[BCS];
 	} else {
@@ -11694,9 +11694,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	if (mmio_flip) {
 		INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
 
-		i915_gem_request_assign(&work->flip_queued_req,
-					obj->last_write.request);
-
+		work->flip_queued_req = i915_gem_active_get(&obj->last_write);
 		schedule_work(&work->mmio_work);
 	} else {
 		request = i915_gem_request_alloc(engine, engine->last_context);
@@ -14039,11 +14037,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 	}
 
 	if (ret == 0) {
-		struct intel_plane_state *plane_state =
-			to_intel_plane_state(new_state);
-
-		i915_gem_request_assign(&plane_state->wait_req,
-					obj->last_write.request);
+		to_intel_plane_state(new_state)->wait_req =
+			i915_gem_active_get(&obj->last_write);
 	}
 
 	return ret;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 32/73] drm/i915: Mark up i915_gem_active for locking annotation
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (30 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 31/73] drm/i915: Prepare i915_gem_active for annotations Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 33/73] drm/i915: Refactor blocking waits Chris Wilson
                   ` (41 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

The future annotations will track the locking used for access to ensure
that it is always sufficient. We make the preparations now to present
the API ahead and to make sure that GCC can eliminate the unused
parameter.

Before:	6298417 3619610  696320 10614347         a1f64b vmlinux
After:	6298417 3619610  696320 10614347         a1f64b vmlinux
(with i915 builtin)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c     | 12 +++++---
 drivers/gpu/drm/i915/i915_gem.c         | 49 ++++++++++++++++++++++-----------
 drivers/gpu/drm/i915/i915_gem_fence.c   |  3 +-
 drivers/gpu/drm/i915/i915_gem_request.h | 38 +++++++++++++++----------
 drivers/gpu/drm/i915/i915_gem_tiling.c  |  3 +-
 drivers/gpu/drm/i915/i915_gem_userptr.c |  3 +-
 drivers/gpu/drm/i915/i915_gpu_error.c   | 29 +++++++++++++++----
 drivers/gpu/drm/i915/intel_display.c    | 12 +++++---
 8 files changed, 102 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2e431cfe8bde..a22df97d3ea7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -155,10 +155,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->base.write_domain);
 	for_each_engine_id(engine, dev_priv, id)
 		seq_printf(m, "%x ",
-			   i915_gem_active_get_seqno(&obj->last_read[id]));
+			   i915_gem_active_get_seqno(&obj->last_read[id],
+						     &obj->base.dev->struct_mutex));
 	seq_printf(m, "] %x %x%s%s%s",
-		   i915_gem_active_get_seqno(&obj->last_write),
-		   i915_gem_active_get_seqno(&obj->last_fence),
+		   i915_gem_active_get_seqno(&obj->last_write,
+					     &obj->base.dev->struct_mutex),
+		   i915_gem_active_get_seqno(&obj->last_fence,
+					     &obj->base.dev->struct_mutex),
 		   i915_cache_level_str(to_i915(obj->base.dev), obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
@@ -196,7 +199,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		seq_printf(m, " (%s mappable)", s);
 	}
 
-	engine = i915_gem_active_get_engine(&obj->last_write);
+	engine = i915_gem_active_get_engine(&obj->last_write,
+					    &obj->base.dev->struct_mutex);
 	if (engine)
 		seq_printf(m, " (%s)", engine->name);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b55e27f21688..63d974b22b58 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1354,21 +1354,24 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 	int ret, i;
 
 	if (readonly) {
-		request = i915_gem_active_peek(&obj->last_write);
+		request = i915_gem_active_peek(&obj->last_write,
+					       &obj->base.dev->struct_mutex);
 		if (request) {
 			ret = i915_wait_request(request);
 			if (ret)
 				return ret;
 
 			i = request->engine->id;
-			if (i915_gem_active_peek(&obj->last_read[i]) == request)
+			if (i915_gem_active_peek(&obj->last_read[i],
+						 &obj->base.dev->struct_mutex) == request)
 				i915_gem_object_retire__read(obj, i);
 			else
 				i915_gem_object_retire__write(obj);
 		}
 	} else {
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			request = i915_gem_active_peek(&obj->last_read[i]);
+			request = i915_gem_active_peek(&obj->last_read[i],
+						       &obj->base.dev->struct_mutex);
 			if (!request)
 				continue;
 
@@ -1400,9 +1403,11 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
 {
 	int idx = req->engine->id;
 
-	if (i915_gem_active_peek(&obj->last_read[idx]) == req)
+	if (i915_gem_active_peek(&obj->last_read[idx],
+				 &obj->base.dev->struct_mutex) == req)
 		i915_gem_object_retire__read(obj, idx);
-	else if (i915_gem_active_peek(&obj->last_write) == req)
+	else if (i915_gem_active_peek(&obj->last_write,
+				      &obj->base.dev->struct_mutex) == req)
 		i915_gem_object_retire__write(obj);
 
 	if (!i915_reset_in_progress(&req->i915->gpu_error))
@@ -1431,7 +1436,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	if (readonly) {
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_active_get(&obj->last_write);
+		req = i915_gem_active_get(&obj->last_write,
+					  &obj->base.dev->struct_mutex);
 		if (req == NULL)
 			return 0;
 
@@ -1440,7 +1446,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
 			struct drm_i915_gem_request *req;
 
-			req = i915_gem_active_get(&obj->last_read[i]);
+			req = i915_gem_active_get(&obj->last_read[i],
+						  &obj->base.dev->struct_mutex);
 			if (req == NULL)
 				continue;
 
@@ -2387,7 +2394,9 @@ static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 {
 	GEM_BUG_ON(!__i915_gem_active_is_busy(&obj->last_write));
-	GEM_BUG_ON(!(obj->active & intel_engine_flag(i915_gem_active_get_engine(&obj->last_write))));
+	GEM_BUG_ON(!(obj->active &
+		     intel_engine_flag(i915_gem_active_get_engine(&obj->last_write,
+								  &obj->base.dev->struct_mutex))));
 
 	i915_gem_active_set(&obj->last_write, NULL);
 	intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@ -2405,7 +2414,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
 	list_del_init(&obj->engine_list[idx]);
 	i915_gem_active_set(&obj->last_read[idx], NULL);
 
-	engine = i915_gem_active_get_engine(&obj->last_write);
+	engine = i915_gem_active_get_engine(&obj->last_write,
+					    &obj->base.dev->struct_mutex);
 	if (engine && engine->id == idx)
 		i915_gem_object_retire__write(obj);
 
@@ -2626,7 +2636,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 				       struct drm_i915_gem_object,
 				       engine_list[engine->id]);
 
-		if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id])->list))
+		if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id],
+						      &obj->base.dev->struct_mutex)->list))
 			break;
 
 		i915_gem_object_retire__read(obj, engine->id);
@@ -2759,7 +2770,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_active_peek(&obj->last_read[i]);
+		req = i915_gem_active_peek(&obj->last_read[i],
+					   &obj->base.dev->struct_mutex);
 		if (req == NULL)
 			continue;
 
@@ -2837,7 +2849,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_active_get(&obj->last_read[i]);
+		req = i915_gem_active_get(&obj->last_read[i],
+					  &obj->base.dev->struct_mutex);
 		if (req)
 			requests[n++] = req;
 	}
@@ -2932,14 +2945,16 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (readonly) {
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_active_peek(&obj->last_write);
+		req = i915_gem_active_peek(&obj->last_write,
+					   &obj->base.dev->struct_mutex);
 		if (req)
 			requests[n++] = req;
 	} else {
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
 			struct drm_i915_gem_request *req;
 
-			req = i915_gem_active_peek(&obj->last_read[i]);
+			req = i915_gem_active_peek(&obj->last_read[i],
+						   &obj->base.dev->struct_mutex);
 			if (req)
 				requests[n++] = req;
 		}
@@ -4038,11 +4053,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		int i;
 
 		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			req = i915_gem_active_peek(&obj->last_read[i]);
+			req = i915_gem_active_peek(&obj->last_read[i],
+						   &obj->base.dev->struct_mutex);
 			if (req)
 				args->busy |= 1 << (16 + req->engine->exec_id);
 		}
-		req = i915_gem_active_peek(&obj->last_write);
+		req = i915_gem_active_peek(&obj->last_write,
+					   &obj->base.dev->struct_mutex);
 		if (req)
 			args->busy |= req->engine->exec_id;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index 9fdbd66128a6..a4ec4fecbea0 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -263,7 +263,8 @@ i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 {
 	int ret;
 
-	ret = i915_gem_active_wait(&obj->last_fence);
+	ret = i915_gem_active_wait(&obj->last_fence,
+				   &obj->base.dev->struct_mutex);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 6280812e9c7d..9e56a2c63b0e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -296,6 +296,12 @@ i915_gem_active_set(struct i915_gem_active *active,
 	i915_gem_request_assign(&active->request, request);
 }
 
+static inline struct drm_i915_gem_request *
+__i915_gem_active_peek(const struct i915_gem_active *active)
+{
+	return active->request;
+}
+
 /**
  * i915_gem_active_peek - report the request being monitored
  * @active - the active tracker
@@ -305,7 +311,7 @@ i915_gem_active_set(struct i915_gem_active *active,
  * caller must hold struct_mutex.
  */
 static inline struct drm_i915_gem_request *
-i915_gem_active_peek(const struct i915_gem_active *active)
+i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
 {
 	return active->request;
 }
@@ -318,11 +324,11 @@ i915_gem_active_peek(const struct i915_gem_active *active)
  * if the active tracker is idle. The caller must hold struct_mutex.
  */
 static inline struct drm_i915_gem_request *
-i915_gem_active_get(const struct i915_gem_active *active)
+i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 {
 	struct drm_i915_gem_request *request;
 
-	request = i915_gem_active_peek(active);
+	request = i915_gem_active_peek(active, mutex);
 	if (!request || i915_gem_request_completed(request))
 		return NULL;
 
@@ -340,7 +346,7 @@ i915_gem_active_get(const struct i915_gem_active *active)
 static inline bool
 __i915_gem_active_is_busy(const struct i915_gem_active *active)
 {
-	return i915_gem_active_peek(active);
+	return __i915_gem_active_peek(active);
 }
 
 /**
@@ -352,11 +358,12 @@ __i915_gem_active_is_busy(const struct i915_gem_active *active)
  * the caller to hold struct_mutex (but that can be relaxed if desired).
  */
 static inline bool
-i915_gem_active_is_idle(const struct i915_gem_active *active)
+i915_gem_active_is_idle(const struct i915_gem_active *active,
+			struct mutex *mutex)
 {
 	struct drm_i915_gem_request *request;
 
-	request = i915_gem_active_peek(active);
+	request = i915_gem_active_peek(active, mutex);
 	if (!request || i915_gem_request_completed(request))
 		return true;
 
@@ -372,11 +379,11 @@ i915_gem_active_is_idle(const struct i915_gem_active *active)
  * retired first, see i915_gem_active_retire().
  */
 static inline int __must_check
-i915_gem_active_wait(const struct i915_gem_active *active)
+i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
 {
 	struct drm_i915_gem_request *request;
 
-	request = i915_gem_active_peek(active);
+	request = i915_gem_active_peek(active, mutex);
 	if (!request)
 		return 0;
 
@@ -393,9 +400,10 @@ i915_gem_active_wait(const struct i915_gem_active *active)
  * tracker is idle, the function returns immediately.
  */
 static inline int __must_check
-i915_gem_active_retire(const struct i915_gem_active *active)
+i915_gem_active_retire(const struct i915_gem_active *active,
+		       struct mutex *mutex)
 {
-	return i915_gem_active_wait(active);
+	return i915_gem_active_wait(active, mutex);
 }
 
 /* Convenience functions for peeking at state inside active's request whilst
@@ -403,15 +411,17 @@ i915_gem_active_retire(const struct i915_gem_active *active)
  */
 
 static inline uint32_t
-i915_gem_active_get_seqno(const struct i915_gem_active *active)
+i915_gem_active_get_seqno(const struct i915_gem_active *active,
+			  struct mutex *mutex)
 {
-	return i915_gem_request_get_seqno(i915_gem_active_peek(active));
+	return i915_gem_request_get_seqno(i915_gem_active_peek(active, mutex));
 }
 
 static inline struct intel_engine_cs *
-i915_gem_active_get_engine(const struct i915_gem_active *active)
+i915_gem_active_get_engine(const struct i915_gem_active *active,
+			   struct mutex *mutex)
 {
-	return i915_gem_request_get_engine(i915_gem_active_peek(active));
+	return i915_gem_request_get_engine(i915_gem_active_peek(active, mutex));
 }
 
 #define for_each_active(mask, idx) \
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 8cef2d6b291a..fa2eb4a4f212 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -242,7 +242,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 			}
 
 			obj->fence_dirty =
-				!i915_gem_active_is_idle(&obj->last_fence) ||
+				!i915_gem_active_is_idle(&obj->last_fence,
+							 &dev->struct_mutex) ||
 				obj->fence_reg != I915_FENCE_REG_NONE;
 
 			obj->tiling_mode = args->tiling_mode;
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 00ab5e9d2eb7..e57521dbddc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -74,7 +74,8 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct drm_i915_gem_request *req;
 
-		req = i915_gem_active_get(&obj->last_read[i]);
+		req = i915_gem_active_get(&obj->last_read[i],
+					  &obj->base.dev->struct_mutex);
 		if (req)
 			requests[n++] = req;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 585fe2bdadd5..0d2882ae7c89 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -742,18 +742,38 @@ unwind:
 #define i915_error_ggtt_object_create(dev_priv, src) \
 	i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
 
+/* The error capture is special as tries to run underneath the normal
+ * locking rules - so we use the raw version of the i915_gem_active lookup.
+ */
+static inline uint32_t
+__active_get_seqno(struct i915_gem_active *active)
+{
+	return i915_gem_request_get_seqno(__i915_gem_active_peek(active));
+}
+
+static inline int
+__active_get_engine_id(struct i915_gem_active *active)
+{
+	struct intel_engine_cs *engine;
+
+	engine = i915_gem_request_get_engine(__i915_gem_active_peek(active));
+	return engine ? engine->id : -1;
+}
+
 static void capture_bo(struct drm_i915_error_buffer *err,
 		       struct i915_vma *vma)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
-	struct intel_engine_cs *engine;
 	int i;
 
 	err->size = obj->base.size;
 	err->name = obj->base.name;
+
 	for (i = 0; i < I915_NUM_ENGINES; i++)
-		err->rseqno[i] = i915_gem_active_get_seqno(&obj->last_read[i]);
-	err->wseqno = i915_gem_active_get_seqno(&obj->last_write);
+		err->rseqno[i] = __active_get_seqno(&obj->last_read[i]);
+	err->wseqno = __active_get_seqno(&obj->last_write);
+	err->engine = __active_get_engine_id(&obj->last_write);
+
 	err->gtt_offset = vma->node.start;
 	err->read_domains = obj->base.read_domains;
 	err->write_domain = obj->base.write_domain;
@@ -766,9 +786,6 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->purgeable = obj->madv != I915_MADV_WILLNEED;
 	err->userptr = obj->userptr.mm != NULL;
 	err->cache_level = obj->cache_level;
-
-	engine = i915_gem_active_get_engine(&obj->last_write);
-	err->engine = engine ? engine->id : -1;
 }
 
 static u32 capture_active_bo(struct drm_i915_error_buffer *err,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 94259613dfa6..d0d42ca2d7e6 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11370,7 +11370,8 @@ static bool use_mmio_flip(struct intel_engine_cs *engine,
 	if (resv && !reservation_object_test_signaled_rcu(resv, false))
 		return true;
 
-	return engine != i915_gem_active_get_engine(&obj->last_write);
+	return engine != i915_gem_active_get_engine(&obj->last_write,
+						    &obj->base.dev->struct_mutex);
 }
 
 static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
@@ -11673,7 +11674,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	} else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
 		engine = &dev_priv->engine[BCS];
 	} else if (INTEL_INFO(dev)->gen >= 7) {
-		engine = i915_gem_active_get_engine(&obj->last_write);
+		engine = i915_gem_active_get_engine(&obj->last_write,
+						    &obj->base.dev->struct_mutex);
 		if (engine == NULL || engine->id != RCS)
 			engine = &dev_priv->engine[BCS];
 	} else {
@@ -11694,7 +11696,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 	if (mmio_flip) {
 		INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
 
-		work->flip_queued_req = i915_gem_active_get(&obj->last_write);
+		work->flip_queued_req = i915_gem_active_get(&obj->last_write,
+							    &obj->base.dev->struct_mutex);
 		schedule_work(&work->mmio_work);
 	} else {
 		request = i915_gem_request_alloc(engine, engine->last_context);
@@ -14038,7 +14041,8 @@ intel_prepare_plane_fb(struct drm_plane *plane,
 
 	if (ret == 0) {
 		to_intel_plane_state(new_state)->wait_req =
-			i915_gem_active_get(&obj->last_write);
+			i915_gem_active_get(&obj->last_write,
+					    &obj->base.dev->struct_mutex);
 	}
 
 	return ret;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 33/73] drm/i915: Refactor blocking waits
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (31 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 32/73] drm/i915: Mark up i915_gem_active for locking annotation Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 34/73] drm/i915: Rename request->list to link for consistency Chris Wilson
                   ` (40 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Tidy up the for loops that handle waiting for read/write vs read-only
access.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 158 +++++++++++++++++++---------------------
 1 file changed, 75 insertions(+), 83 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 63d974b22b58..73b368683fd7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1339,6 +1339,23 @@ put_rpm:
 	return ret;
 }
 
+static void
+i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
+			       struct drm_i915_gem_request *req)
+{
+	int idx = req->engine->id;
+
+	if (i915_gem_active_peek(&obj->last_read[idx],
+				 &obj->base.dev->struct_mutex) == req)
+		i915_gem_object_retire__read(obj, idx);
+	else if (i915_gem_active_peek(&obj->last_write,
+				      &obj->base.dev->struct_mutex) == req)
+		i915_gem_object_retire__write(obj);
+
+	if (!i915_reset_in_progress(&req->i915->gpu_error))
+		i915_gem_request_retire_upto(req);
+}
+
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
@@ -1349,39 +1366,34 @@ int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 			       bool readonly)
 {
-	struct drm_i915_gem_request *request;
 	struct reservation_object *resv;
-	int ret, i;
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int idx, ret;
 
-	if (readonly) {
-		request = i915_gem_active_peek(&obj->last_write,
-					       &obj->base.dev->struct_mutex);
-		if (request) {
-			ret = i915_wait_request(request);
-			if (ret)
-				return ret;
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-			i = request->engine->id;
-			if (i915_gem_active_peek(&obj->last_read[i],
-						 &obj->base.dev->struct_mutex) == request)
-				i915_gem_object_retire__read(obj, i);
-			else
-				i915_gem_object_retire__write(obj);
-		}
+	if (!readonly) {
+		active = obj->last_read;
+		active_mask = obj->active;
 	} else {
-		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			request = i915_gem_active_peek(&obj->last_read[i],
-						       &obj->base.dev->struct_mutex);
-			if (!request)
-				continue;
+		active_mask = 1;
+		active = &obj->last_write;
+	}
 
-			ret = i915_wait_request(request);
-			if (ret)
-				return ret;
+	for_each_active(active_mask, idx) {
+		struct drm_i915_gem_request *request;
 
-			i915_gem_object_retire__read(obj, i);
-		}
-		GEM_BUG_ON(obj->active);
+		request = i915_gem_active_peek(&active[idx],
+					       &obj->base.dev->struct_mutex);
+		if (!request)
+			continue;
+
+		ret = i915_wait_request(request);
+		if (ret)
+			return ret;
+
+		i915_gem_object_retire_request(obj, request);
 	}
 
 	resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -1397,23 +1409,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 	return 0;
 }
 
-static void
-i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
-			       struct drm_i915_gem_request *req)
-{
-	int idx = req->engine->id;
-
-	if (i915_gem_active_peek(&obj->last_read[idx],
-				 &obj->base.dev->struct_mutex) == req)
-		i915_gem_object_retire__read(obj, idx);
-	else if (i915_gem_active_peek(&obj->last_write,
-				      &obj->base.dev->struct_mutex) == req)
-		i915_gem_object_retire__write(obj);
-
-	if (!i915_reset_in_progress(&req->i915->gpu_error))
-		i915_gem_request_retire_upto(req);
-}
-
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
  * as the object state may change during this call.
  */
@@ -1425,34 +1420,31 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
+	struct i915_gem_active *active;
+	unsigned long active_mask;
 	int ret, i, n = 0;
 
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 	BUG_ON(!dev_priv->mm.interruptible);
 
-	if (!obj->active)
+	active_mask = obj->active;
+	if (!active_mask)
 		return 0;
 
-	if (readonly) {
-		struct drm_i915_gem_request *req;
-
-		req = i915_gem_active_get(&obj->last_write,
-					  &obj->base.dev->struct_mutex);
-		if (req == NULL)
-			return 0;
-
-		requests[n++] = req;
+	if (!readonly) {
+		active = obj->last_read;
 	} else {
-		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			struct drm_i915_gem_request *req;
+		active_mask = 1;
+		active = &obj->last_write;
+	}
 
-			req = i915_gem_active_get(&obj->last_read[i],
-						  &obj->base.dev->struct_mutex);
-			if (req == NULL)
-				continue;
+	for_each_active(active_mask, i) {
+		struct drm_i915_gem_request *req;
 
+		req = i915_gem_active_get(&active[i],
+					  &obj->base.dev->struct_mutex);
+		if (req)
 			requests[n++] = req;
-		}
 	}
 
 	mutex_unlock(&dev->struct_mutex);
@@ -2934,33 +2926,33 @@ int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		     struct drm_i915_gem_request *to)
 {
-	const bool readonly = obj->base.pending_write_domain == 0;
-	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-	int ret, i, n;
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int idx;
 
-	if (!obj->active)
-		return 0;
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-	n = 0;
-	if (readonly) {
-		struct drm_i915_gem_request *req;
+	active_mask = obj->active;
+	if (!active_mask)
+		return 0;
 
-		req = i915_gem_active_peek(&obj->last_write,
-					   &obj->base.dev->struct_mutex);
-		if (req)
-			requests[n++] = req;
+	if (obj->base.pending_write_domain) {
+		active = obj->last_read;
 	} else {
-		for (i = 0; i < I915_NUM_ENGINES; i++) {
-			struct drm_i915_gem_request *req;
-
-			req = i915_gem_active_peek(&obj->last_read[i],
-						   &obj->base.dev->struct_mutex);
-			if (req)
-				requests[n++] = req;
-		}
+		active_mask = 1;
+		active = &obj->last_write;
 	}
-	for (i = 0; i < n; i++) {
-		ret = __i915_gem_object_sync(obj, to, requests[i]);
+
+	for_each_active(active_mask, idx) {
+		struct drm_i915_gem_request *request;
+		int ret;
+
+		request = i915_gem_active_peek(&active[idx],
+					       &obj->base.dev->struct_mutex);
+		if (!request)
+			continue;
+
+		ret = __i915_gem_object_sync(obj, to, request);
 		if (ret)
 			return ret;
 	}
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 34/73] drm/i915: Rename request->list to link for consistency
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (32 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 33/73] drm/i915: Refactor blocking waits Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 35/73] drm/i915: Remove obsolete i915_gem_object_flush_active() Chris Wilson
                   ` (39 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

We use "list" to denote the list and "link" to denote an element on that
list. Rename request->list to match this idiom.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c     |  4 ++--
 drivers/gpu/drm/i915/i915_gem.c         | 10 +++++-----
 drivers/gpu/drm/i915/i915_gem_request.c | 12 ++++++------
 drivers/gpu/drm/i915/i915_gem_request.h |  4 ++--
 drivers/gpu/drm/i915/i915_gpu_error.c   |  4 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c |  6 +++---
 6 files changed, 20 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a22df97d3ea7..2f5a16797659 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -746,13 +746,13 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 		int count;
 
 		count = 0;
-		list_for_each_entry(req, &engine->request_list, list)
+		list_for_each_entry(req, &engine->request_list, link)
 			count++;
 		if (count == 0)
 			continue;
 
 		seq_printf(m, "%s requests: %d\n", engine->name, count);
-		list_for_each_entry(req, &engine->request_list, list) {
+		list_for_each_entry(req, &engine->request_list, link) {
 			struct task_struct *task;
 
 			rcu_read_lock();
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 73b368683fd7..259f97ccbe46 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2475,7 +2475,7 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 	 * extra delay for a recent interrupt is pointless. Hence, we do
 	 * not need an engine->irq_seqno_barrier() before the seqno reads.
 	 */
-	list_for_each_entry(request, &engine->request_list, list) {
+	list_for_each_entry(request, &engine->request_list, link) {
 		if (i915_gem_request_completed(request))
 			continue;
 
@@ -2497,7 +2497,7 @@ static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
 	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
 	i915_set_reset_status(request->ctx, ring_hung);
-	list_for_each_entry_continue(request, &engine->request_list, list)
+	list_for_each_entry_continue(request, &engine->request_list, link)
 		i915_set_reset_status(request->ctx, false);
 }
 
@@ -2546,7 +2546,7 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 
 		request = list_last_entry(&engine->request_list,
 					  struct drm_i915_gem_request,
-					  list);
+					  link);
 
 		i915_gem_request_retire_upto(request);
 	}
@@ -2609,7 +2609,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 
 		request = list_first_entry(&engine->request_list,
 					   struct drm_i915_gem_request,
-					   list);
+					   link);
 
 		if (!i915_gem_request_completed(request))
 			break;
@@ -2629,7 +2629,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 				       engine_list[engine->id]);
 
 		if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id],
-						      &obj->base.dev->struct_mutex)->list))
+						      &obj->base.dev->struct_mutex)->link))
 			break;
 
 		i915_gem_object_retire__read(obj, engine->id);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 11c19e7f82fa..7802156d481a 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -160,7 +160,7 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 {
 	trace_i915_gem_request_retire(request);
-	list_del_init(&request->list);
+	list_del_init(&request->link);
 
 	/* We know the GPU must have read the request to have
 	 * sent us the seqno + interrupt, so use the position
@@ -191,12 +191,12 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
 
 	lockdep_assert_held(&req->i915->drm.struct_mutex);
 
-	if (list_empty(&req->list))
+	if (list_empty(&req->link))
 		return;
 
 	do {
 		tmp = list_first_entry(&engine->request_list,
-				       typeof(*tmp), list);
+				       typeof(*tmp), link);
 
 		i915_gem_request_retire(tmp);
 	} while (tmp != req);
@@ -317,7 +317,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 
 	/* Move the oldest request to the slab-cache (if not in use!) */
 	req = list_first_entry_or_null(&engine->request_list,
-				       typeof(*req), list);
+				       typeof(*req), link);
 	if (req && i915_gem_request_completed(req))
 		i915_gem_request_retire(req);
 
@@ -450,7 +450,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	request->emitted_jiffies = jiffies;
 	request->previous_seqno = engine->last_submitted_seqno;
 	smp_store_mb(engine->last_submitted_seqno, request->fence.seqno);
-	list_add_tail(&request->list, &engine->request_list);
+	list_add_tail(&request->link, &engine->request_list);
 
 	/* Record the position of the start of the request so that
 	 * should we detect the updated seqno part-way through the
@@ -570,7 +570,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 
 	might_sleep();
 
-	if (list_empty(&req->list))
+	if (list_empty(&req->link))
 		return 0;
 
 	if (i915_gem_request_completed(req))
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 9e56a2c63b0e..cb3fa58d4eb5 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -105,8 +105,8 @@ struct drm_i915_gem_request {
 	/** Time at which this request was emitted, in jiffies. */
 	unsigned long emitted_jiffies;
 
-	/** global list entry for this request */
-	struct list_head list;
+	/** engine->request_list entry for this request */
+	struct list_head link;
 
 	struct drm_i915_file_private *file_priv;
 	/** file_priv list entry for this request */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 0d2882ae7c89..c19f72e1bcf7 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1168,7 +1168,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 		i915_gem_record_active_context(engine, error, ee);
 
 		count = 0;
-		list_for_each_entry(request, &engine->request_list, list)
+		list_for_each_entry(request, &engine->request_list, link)
 			count++;
 
 		ee->num_requests = count;
@@ -1180,7 +1180,7 @@ static void i915_gem_record_rings(struct drm_i915_private *dev_priv,
 		}
 
 		count = 0;
-		list_for_each_entry(request, &engine->request_list, list) {
+		list_for_each_entry(request, &engine->request_list, link) {
 			struct drm_i915_error_request *erq;
 
 			if (count >= ee->num_requests) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 645454ddafea..34f3b57c7db3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2235,7 +2235,7 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 
 	req = list_entry(engine->request_list.prev,
 			 struct drm_i915_gem_request,
-			 list);
+			 link);
 
 	/* Make sure we do not trigger any retires */
 	return __i915_wait_request(req,
@@ -2284,7 +2284,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	 */
 	GEM_BUG_ON(!req->reserved_space);
 
-	list_for_each_entry(target, &engine->request_list, list) {
+	list_for_each_entry(target, &engine->request_list, link) {
 		unsigned space;
 
 		/*
@@ -2302,7 +2302,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 			break;
 	}
 
-	if (WARN_ON(&target->list == &engine->request_list))
+	if (WARN_ON(&target->link == &engine->request_list))
 		return -ENOSPC;
 
 	return i915_wait_request(target);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 35/73] drm/i915: Remove obsolete i915_gem_object_flush_active()
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (33 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 34/73] drm/i915: Rename request->list to link for consistency Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 36/73] drm/i915: Refactor activity tracking for requests Chris Wilson
                   ` (38 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Since we track requests, and requests are always added to the GPU fully
formed, we never have to flush the incomplete request and know that the
given request will eventually complete without any further action on our
part.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 59 +++--------------------------------------
 1 file changed, 3 insertions(+), 56 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 259f97ccbe46..320a8e40d21b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2746,35 +2746,6 @@ out_rearm:
 }
 
 /**
- * Ensures that an object will eventually get non-busy by flushing any required
- * write domains, emitting any outstanding lazy request and retiring and
- * completed requests.
- * @obj: object to flush
- */
-static int
-i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
-{
-	int i;
-
-	if (!obj->active)
-		return 0;
-
-	for (i = 0; i < I915_NUM_ENGINES; i++) {
-		struct drm_i915_gem_request *req;
-
-		req = i915_gem_active_peek(&obj->last_read[i],
-					   &obj->base.dev->struct_mutex);
-		if (req == NULL)
-			continue;
-
-		if (i915_gem_request_completed(req))
-			i915_gem_object_retire__read(obj, i);
-	}
-
-	return 0;
-}
-
-/**
  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  * @dev: drm device pointer
  * @data: ioctl data blob
@@ -2820,24 +2791,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 		return -ENOENT;
 	}
 
-	/* Need to make sure the object gets inactive eventually. */
-	ret = i915_gem_object_flush_active(obj);
-	if (ret)
-		goto out;
-
 	if (!obj->active)
 		goto out;
 
-	/* Do this after OLR check to make sure we make forward progress polling
-	 * on this IOCTL with a timeout == 0 (like busy ioctl)
-	 */
-	if (args->timeout_ns == 0) {
-		ret = -ETIME;
-		goto out;
-	}
-
-	i915_gem_object_put(obj);
-
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
 		struct drm_i915_gem_request *req;
 
@@ -2847,6 +2803,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 			requests[n++] = req;
 	}
 
+out:
+	i915_gem_object_put(obj);
 	mutex_unlock(&dev->struct_mutex);
 
 	for (i = 0; i < n; i++) {
@@ -2857,11 +2815,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 		i915_gem_request_put(requests[i]);
 	}
 	return ret;
-
-out:
-	i915_gem_object_put(obj);
-	mutex_unlock(&dev->struct_mutex);
-	return ret;
 }
 
 static int
@@ -4032,13 +3985,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 
 	/* Count all active objects as busy, even if they are currently not used
 	 * by the gpu. Users of this interface expect objects to eventually
-	 * become non-busy without any further actions, therefore emit any
-	 * necessary flushes here.
+	 * become non-busy without any further actions.
 	 */
-	ret = i915_gem_object_flush_active(obj);
-	if (ret)
-		goto unref;
-
 	args->busy = 0;
 	if (obj->active) {
 		struct drm_i915_gem_request *req;
@@ -4056,7 +4004,6 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 			args->busy |= req->engine->exec_id;
 	}
 
-unref:
 	i915_gem_object_put(obj);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 36/73] drm/i915: Refactor activity tracking for requests
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (34 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 35/73] drm/i915: Remove obsolete i915_gem_object_flush_active() Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01 12:52   ` Joonas Lahtinen
  2016-08-01  9:10 ` [PATCH 37/73] drm/i915: Track requests inside each intel_ring Chris Wilson
                   ` (37 subsequent siblings)
  73 siblings, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

With the introduction of requests, we amplified the number of atomic
refcounted objects we use and update every execbuffer; from none to
several references, and a set of references that need to be changed. We
also introduced interesting side-effects in the order of retiring
requests and objects.

Instead of independently tracking the last request for an object, track
the active objects for each request. The object will reside in the
buffer list of its most recent active request and so we reduce the kref
interchange to a list_move. Now retirements are entirely driven by the
request, dramatically simplifying activity tracking on the object
themselves, and removing the ambiguity between retiring objects and
retiring requests.

Furthermore with the consolidation of managing the activity tracking
centrally, we can look forward to using RCU to enable lockless lookup of
the current active requests for an object. In the future, we will be
able to query the status or wait upon rendering to an object without
even touching the struct_mutex BKL.

All told, less code, simpler and faster, and more extensible.

v2: Add a typedef for the function pointer for convenience later.
v3: Make the noop retirement callback explicit. Allow passing NULL to
the init_gem_active() which is expanded to a common noop function.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile           |   1 -
 drivers/gpu/drm/i915/i915_drv.h         |  10 ---
 drivers/gpu/drm/i915/i915_gem.c         | 129 ++++++--------------------------
 drivers/gpu/drm/i915/i915_gem_debug.c   |  70 -----------------
 drivers/gpu/drm/i915/i915_gem_fence.c   |  11 +--
 drivers/gpu/drm/i915/i915_gem_request.c |  50 +++++++++++--
 drivers/gpu/drm/i915/i915_gem_request.h |  97 +++++++++++++++++-------
 drivers/gpu/drm/i915/intel_engine_cs.c  |   1 -
 drivers/gpu/drm/i915/intel_ringbuffer.h |  12 ---
 9 files changed, 138 insertions(+), 243 deletions(-)
 delete mode 100644 drivers/gpu/drm/i915/i915_gem_debug.c

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 6092f0ea24df..dda724f04445 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -25,7 +25,6 @@ i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o
 i915-y += i915_cmd_parser.o \
 	  i915_gem_batch_pool.o \
 	  i915_gem_context.o \
-	  i915_gem_debug.o \
 	  i915_gem_dmabuf.o \
 	  i915_gem_evict.o \
 	  i915_gem_execbuffer.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 17a206f19fcb..6b57d8ff7218 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -432,8 +432,6 @@ void intel_link_compute_m_n(int bpp, int nlanes,
 #define DRIVER_MINOR		6
 #define DRIVER_PATCHLEVEL	0
 
-#define WATCH_LISTS	0
-
 struct opregion_header;
 struct opregion_acpi;
 struct opregion_swsci;
@@ -2153,7 +2151,6 @@ struct drm_i915_gem_object {
 	struct drm_mm_node *stolen;
 	struct list_head global_list;
 
-	struct list_head engine_list[I915_NUM_ENGINES];
 	/** Used in execbuf to temporarily hold a ref */
 	struct list_head obj_exec_link;
 
@@ -3463,13 +3460,6 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec
 		obj->tiling_mode != I915_TILING_NONE;
 }
 
-/* i915_gem_debug.c */
-#if WATCH_LISTS
-int i915_verify_lists(struct drm_device *dev);
-#else
-#define i915_verify_lists(dev) 0
-#endif
-
 /* i915_debugfs.c */
 #ifdef CONFIG_DEBUG_FS
 int i915_debugfs_register(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 320a8e40d21b..c47c9f95eafa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,10 +43,6 @@
 
 static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
 static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int engine);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
 				  enum i915_cache_level level)
@@ -141,7 +137,6 @@ int i915_mutex_lock_interruptible(struct drm_device *dev)
 	if (ret)
 		return ret;
 
-	WARN_ON(i915_verify_lists(dev));
 	return 0;
 }
 
@@ -1339,23 +1334,6 @@ put_rpm:
 	return ret;
 }
 
-static void
-i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
-			       struct drm_i915_gem_request *req)
-{
-	int idx = req->engine->id;
-
-	if (i915_gem_active_peek(&obj->last_read[idx],
-				 &obj->base.dev->struct_mutex) == req)
-		i915_gem_object_retire__read(obj, idx);
-	else if (i915_gem_active_peek(&obj->last_write,
-				      &obj->base.dev->struct_mutex) == req)
-		i915_gem_object_retire__write(obj);
-
-	if (!i915_reset_in_progress(&req->i915->gpu_error))
-		i915_gem_request_retire_upto(req);
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
@@ -1382,18 +1360,10 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 	}
 
 	for_each_active(active_mask, idx) {
-		struct drm_i915_gem_request *request;
-
-		request = i915_gem_active_peek(&active[idx],
-					       &obj->base.dev->struct_mutex);
-		if (!request)
-			continue;
-
-		ret = i915_wait_request(request);
+		ret = i915_gem_active_wait(&active[idx],
+					   &obj->base.dev->struct_mutex);
 		if (ret)
 			return ret;
-
-		i915_gem_object_retire_request(obj, request);
 	}
 
 	resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -1453,11 +1423,8 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 		ret = __i915_wait_request(requests[i], true, NULL, rps);
 	mutex_lock(&dev->struct_mutex);
 
-	for (i = 0; i < n; i++) {
-		if (ret == 0)
-			i915_gem_object_retire_request(obj, requests[i]);
+	for (i = 0; i < n; i++)
 		i915_gem_request_put(requests[i]);
-	}
 
 	return ret;
 }
@@ -2376,40 +2343,31 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 		i915_gem_object_get(obj);
 	obj->active |= intel_engine_flag(engine);
 
-	list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
 	i915_gem_active_set(&obj->last_read[engine->id], req);
 
 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
 static void
-i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
+i915_gem_object_retire__write(struct i915_gem_active *active,
+			      struct drm_i915_gem_request *request)
 {
-	GEM_BUG_ON(!__i915_gem_active_is_busy(&obj->last_write));
-	GEM_BUG_ON(!(obj->active &
-		     intel_engine_flag(i915_gem_active_get_engine(&obj->last_write,
-								  &obj->base.dev->struct_mutex))));
+	struct drm_i915_gem_object *obj =
+		container_of(active, struct drm_i915_gem_object, last_write);
 
-	i915_gem_active_set(&obj->last_write, NULL);
 	intel_fb_obj_flush(obj, true, ORIGIN_CS);
 }
 
 static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
+i915_gem_object_retire__read(struct i915_gem_active *active,
+			     struct drm_i915_gem_request *request)
 {
-	struct intel_engine_cs *engine;
+	int idx = request->engine->id;
+	struct drm_i915_gem_object *obj =
+		container_of(active, struct drm_i915_gem_object, last_read[idx]);
 	struct i915_vma *vma;
 
-	GEM_BUG_ON(!__i915_gem_active_is_busy(&obj->last_read[idx]));
-	GEM_BUG_ON(!(obj->active & (1 << idx)));
-
-	list_del_init(&obj->engine_list[idx]);
-	i915_gem_active_set(&obj->last_read[idx], NULL);
-
-	engine = i915_gem_active_get_engine(&obj->last_write,
-					    &obj->base.dev->struct_mutex);
-	if (engine && engine->id == idx)
-		i915_gem_object_retire__write(obj);
+	GEM_BUG_ON((obj->active & (1 << idx)) == 0);
 
 	obj->active &= ~(1 << idx);
 	if (obj->active)
@@ -2419,15 +2377,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
 	 * so that we don't steal from recently used but inactive objects
 	 * (unless we are forced to ofc!)
 	 */
-	list_move_tail(&obj->global_list,
-		       &to_i915(obj->base.dev)->mm.bound_list);
+	list_move_tail(&obj->global_list, &request->i915->mm.bound_list);
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
 		if (!list_empty(&vma->vm_link))
 			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	}
 
-	i915_gem_active_set(&obj->last_fence, NULL);
 	i915_gem_object_put(obj);
 }
 
@@ -2505,16 +2461,6 @@ static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 {
 	struct intel_ring *ring;
 
-	while (!list_empty(&engine->active_list)) {
-		struct drm_i915_gem_object *obj;
-
-		obj = list_first_entry(&engine->active_list,
-				       struct drm_i915_gem_object,
-				       engine_list[engine->id]);
-
-		i915_gem_object_retire__read(obj, engine->id);
-	}
-
 	/* Mark all pending requests as complete so that any concurrent
 	 * (lockless) lookup doesn't try and wait upon the request as we
 	 * reset it.
@@ -2586,8 +2532,6 @@ void i915_gem_reset(struct drm_device *dev)
 	i915_gem_context_reset(dev);
 
 	i915_gem_restore_fences(dev);
-
-	WARN_ON(i915_verify_lists(dev));
 }
 
 /**
@@ -2597,13 +2541,6 @@ void i915_gem_reset(struct drm_device *dev)
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 {
-	WARN_ON(i915_verify_lists(engine->dev));
-
-	/* Retire requests first as we use it above for the early return.
-	 * If we retire requests last, we may use a later seqno and so clear
-	 * the requests lists without clearing the active list, leading to
-	 * confusion.
-	 */
 	while (!list_empty(&engine->request_list)) {
 		struct drm_i915_gem_request *request;
 
@@ -2616,26 +2553,6 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
 
 		i915_gem_request_retire_upto(request);
 	}
-
-	/* Move any buffers on the active list that are no longer referenced
-	 * by the ringbuffer to the flushing/inactive lists as appropriate,
-	 * before we free the context associated with the requests.
-	 */
-	while (!list_empty(&engine->active_list)) {
-		struct drm_i915_gem_object *obj;
-
-		obj = list_first_entry(&engine->active_list,
-				       struct drm_i915_gem_object,
-				       engine_list[engine->id]);
-
-		if (!list_empty(&i915_gem_active_peek(&obj->last_read[engine->id],
-						      &obj->base.dev->struct_mutex)->link))
-			break;
-
-		i915_gem_object_retire__read(obj, engine->id);
-	}
-
-	WARN_ON(i915_verify_lists(engine->dev));
 }
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
@@ -2818,8 +2735,7 @@ out:
 }
 
 static int
-__i915_gem_object_sync(struct drm_i915_gem_object *obj,
-		       struct drm_i915_gem_request *to,
+__i915_gem_object_sync(struct drm_i915_gem_request *to,
 		       struct drm_i915_gem_request *from)
 {
 	int ret;
@@ -2827,9 +2743,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 	if (to->engine == from->engine)
 		return 0;
 
-	if (i915_gem_request_completed(from))
-		return 0;
-
 	if (!i915.semaphores) {
 		ret = __i915_wait_request(from,
 					  from->i915->mm.interruptible,
@@ -2837,8 +2750,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
 					  NO_WAITBOOST);
 		if (ret)
 			return ret;
-
-		i915_gem_object_retire_request(obj, from);
 	} else {
 		int idx = intel_engine_sync_index(from->engine, to->engine);
 		if (from->fence.seqno <= from->engine->semaphore.sync_seqno[idx])
@@ -2905,7 +2816,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 		if (!request)
 			continue;
 
-		ret = __i915_gem_object_sync(obj, to, request);
+		ret = __i915_gem_object_sync(to, request);
 		if (ret)
 			return ret;
 	}
@@ -3041,7 +2952,6 @@ int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
 			return ret;
 	}
 
-	WARN_ON(i915_verify_lists(dev));
 	return 0;
 }
 
@@ -4081,7 +3991,11 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 
 	INIT_LIST_HEAD(&obj->global_list);
 	for (i = 0; i < I915_NUM_ENGINES; i++)
-		INIT_LIST_HEAD(&obj->engine_list[i]);
+		init_request_active(&obj->last_read[i],
+				    i915_gem_object_retire__read);
+	init_request_active(&obj->last_write,
+			    i915_gem_object_retire__write);
+	init_request_active(&obj->last_fence, NULL);
 	INIT_LIST_HEAD(&obj->obj_exec_link);
 	INIT_LIST_HEAD(&obj->vma_list);
 	INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4574,7 +4488,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
 static void
 init_engine_lists(struct intel_engine_cs *engine)
 {
-	INIT_LIST_HEAD(&engine->active_list);
 	INIT_LIST_HEAD(&engine->request_list);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
deleted file mode 100644
index a56516482394..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright © 2008 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Keith Packard <keithp@keithp.com>
- *
- */
-
-#include <drm/drmP.h>
-#include <drm/i915_drm.h>
-#include "i915_drv.h"
-
-#if WATCH_LISTS
-int
-i915_verify_lists(struct drm_device *dev)
-{
-	static int warned;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_object *obj;
-	struct intel_engine_cs *engine;
-	int err = 0;
-
-	if (warned)
-		return 0;
-
-	for_each_engine(engine, dev_priv) {
-		list_for_each_entry(obj, &engine->active_list,
-				    engine_list[engine->id]) {
-			if (obj->base.dev != dev ||
-			    !atomic_read(&obj->base.refcount.refcount)) {
-				DRM_ERROR("%s: freed active obj %p\n",
-					  engine->name, obj);
-				err++;
-				break;
-			} else if (!obj->active ||
-				   obj->last_read_req[engine->id] == NULL) {
-				DRM_ERROR("%s: invalid active obj %p\n",
-					  engine->name, obj);
-				err++;
-			} else if (obj->base.write_domain) {
-				DRM_ERROR("%s: invalid write obj %p (w %x)\n",
-					  engine->name,
-					  obj, obj->base.write_domain);
-				err++;
-			}
-		}
-	}
-
-	return warned = err;
-}
-#endif /* WATCH_LIST */
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index a4ec4fecbea0..dbaab9ce29c9 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -261,15 +261,8 @@ static inline void i915_gem_object_fence_lost(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
 {
-	int ret;
-
-	ret = i915_gem_active_wait(&obj->last_fence,
-				   &obj->base.dev->struct_mutex);
-	if (ret)
-		return ret;
-
-	i915_gem_active_set(&obj->last_fence, NULL);
-	return 0;
+	return i915_gem_active_retire(&obj->last_fence,
+				      &obj->base.dev->struct_mutex);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 7802156d481a..cdaaeb6dd913 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -22,6 +22,8 @@
  *
  */
 
+#include <linux/prefetch.h>
+
 #include "i915_drv.h"
 
 static const char *i915_fence_get_driver_name(struct fence *fence)
@@ -157,8 +159,16 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
 	request->pid = NULL;
 }
 
+void i915_gem_retire_noop(struct i915_gem_active *active,
+			  struct drm_i915_gem_request *request)
+{
+	/* Space left intentionally blank */
+}
+
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 {
+	struct i915_gem_active *active, *next;
+
 	trace_i915_gem_request_retire(request);
 	list_del_init(&request->link);
 
@@ -172,6 +182,33 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	 */
 	request->ring->last_retired_head = request->postfix;
 
+	/* Walk through the active list, calling retire on each. This allows
+	 * objects to track their GPU activity and mark themselves as idle
+	 * when their *last* active request is completed (updating state
+	 * tracking lists for eviction, active references for GEM, etc).
+	 *
+	 * As the ->retire() may free the node, we decouple it first and
+	 * pass along the auxiliary information (to avoid dereferencing
+	 * the node after the callback).
+	 */
+	list_for_each_entry_safe(active, next, &request->active_list, link) {
+		/* In microbenchmarks or focusing upon time inside the kernel,
+		 * we may spend an inordinate amount of time simply handling
+		 * the retirement of requests and processing their callbacks.
+		 * Of which, this loop itself is particularly hot due to the
+		 * cache misses when jumping around the list of i915_gem_active.
+		 * So we try to keep this loop as streamlined as possible and
+		 * also prefetch the next i915_gem_active to try and hide
+		 * the likely cache miss.
+		 */
+		prefetchw(next);
+
+		INIT_LIST_HEAD(&active->link);
+		active->request = NULL;
+
+		active->retire(active, request);
+	}
+
 	i915_gem_request_remove_from_client(request);
 
 	if (request->previous_context) {
@@ -200,8 +237,6 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
 
 		i915_gem_request_retire(tmp);
 	} while (tmp != req);
-
-	WARN_ON(i915_verify_lists(engine->dev));
 }
 
 static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible)
@@ -336,6 +371,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 		   engine->fence_context,
 		   seqno);
 
+	INIT_LIST_HEAD(&req->active_list);
 	req->i915 = dev_priv;
 	req->engine = engine;
 	req->ctx = i915_gem_context_get(ctx);
@@ -570,9 +606,6 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 
 	might_sleep();
 
-	if (list_empty(&req->link))
-		return 0;
-
 	if (i915_gem_request_completed(req))
 		return 0;
 
@@ -705,10 +738,13 @@ int i915_wait_request(struct drm_i915_gem_request *req)
 {
 	int ret;
 
-	GEM_BUG_ON(!req);
 	lockdep_assert_held(&req->i915->drm.struct_mutex);
+	GEM_BUG_ON(list_empty(&req->link));
 
-	ret = __i915_wait_request(req, req->i915->mm.interruptible, NULL, NULL);
+	ret = __i915_wait_request(req,
+				  req->i915->mm.interruptible,
+				  NULL,
+				  NULL);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index cb3fa58d4eb5..70eeae6b0e1a 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -101,6 +101,7 @@ struct drm_i915_gem_request {
 	 * error state dump only).
 	 */
 	struct drm_i915_gem_object *batch_obj;
+	struct list_head active_list;
 
 	/** Time at which this request was emitted, in jiffies. */
 	unsigned long emitted_jiffies;
@@ -213,8 +214,12 @@ struct intel_rps_client;
 int __i915_wait_request(struct drm_i915_gem_request *req,
 			bool interruptible,
 			s64 *timeout,
-			struct intel_rps_client *rps);
-int __must_check i915_wait_request(struct drm_i915_gem_request *req);
+			struct intel_rps_client *rps)
+	__attribute__((nonnull(1)));
+
+int __must_check
+i915_wait_request(struct drm_i915_gem_request *req)
+	__attribute__((nonnull));
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
@@ -276,10 +281,39 @@ static inline bool i915_spin_request(const struct drm_i915_gem_request *request,
  * can then perform any action, such as delayed freeing of an active
  * resource including itself.
  */
+struct i915_gem_active;
+
+typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
+				   struct drm_i915_gem_request *);
+
 struct i915_gem_active {
 	struct drm_i915_gem_request *request;
+	struct list_head link;
+	i915_gem_retire_fn retire;
 };
 
+void i915_gem_retire_noop(struct i915_gem_active *,
+			  struct drm_i915_gem_request *request);
+
+/**
+ * init_request_active - prepares the activity tracker for use
+ * @active - the active tracker
+ * @func - a callback when then the tracker is retired (becomes idle),
+ *         can be NULL
+ *
+ * init_request_active() prepares the embedded @active struct for use as
+ * an activity tracker, that is for tracking the last known active request
+ * associated with it. When the last request becomes idle, when it is retired
+ * after completion, the optional callback @func is invoked.
+ */
+static inline void
+init_request_active(struct i915_gem_active *active,
+		    i915_gem_retire_fn retire)
+{
+	INIT_LIST_HEAD(&active->link);
+	active->retire = retire ?: i915_gem_retire_noop;
+}
+
 /**
  * i915_gem_active_set - updates the tracker to watch the current request
  * @active - the active tracker
@@ -293,7 +327,8 @@ static inline void
 i915_gem_active_set(struct i915_gem_active *active,
 		    struct drm_i915_gem_request *request)
 {
-	i915_gem_request_assign(&active->request, request);
+	list_move(&active->link, &request->active_list);
+	active->request = request;
 }
 
 static inline struct drm_i915_gem_request *
@@ -303,17 +338,23 @@ __i915_gem_active_peek(const struct i915_gem_active *active)
 }
 
 /**
- * i915_gem_active_peek - report the request being monitored
+ * i915_gem_active_peek - report the active request being monitored
  * @active - the active tracker
  *
- * i915_gem_active_peek() returns the current request being tracked, or NULL.
- * It does not obtain a reference on the request for the caller, so the
- * caller must hold struct_mutex.
+ * i915_gem_active_peek() returns the current request being tracked if
+ * still active, or NULL. It does not obtain a reference on the request
+ * for the caller, so the caller must hold struct_mutex.
  */
 static inline struct drm_i915_gem_request *
 i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
 {
-	return active->request;
+	struct drm_i915_gem_request *request;
+
+	request = active->request;
+	if (!request || i915_gem_request_completed(request))
+		return NULL;
+
+	return request;
 }
 
 /**
@@ -326,13 +367,7 @@ i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
 static inline struct drm_i915_gem_request *
 i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 {
-	struct drm_i915_gem_request *request;
-
-	request = i915_gem_active_peek(active, mutex);
-	if (!request || i915_gem_request_completed(request))
-		return NULL;
-
-	return i915_gem_request_get(request);
+	return i915_gem_request_get(i915_gem_active_peek(active, mutex));
 }
 
 /**
@@ -361,13 +396,7 @@ static inline bool
 i915_gem_active_is_idle(const struct i915_gem_active *active,
 			struct mutex *mutex)
 {
-	struct drm_i915_gem_request *request;
-
-	request = i915_gem_active_peek(active, mutex);
-	if (!request || i915_gem_request_completed(request))
-		return true;
-
-	return false;
+	return !i915_gem_active_peek(active, mutex);
 }
 
 /**
@@ -377,6 +406,9 @@ i915_gem_active_is_idle(const struct i915_gem_active *active,
  * i915_gem_active_wait() waits until the request is completed before
  * returning. Note that it does not guarantee that the request is
  * retired first, see i915_gem_active_retire().
+ *
+ * i915_gem_active_wait() returns immediately if the active
+ * request is already complete.
  */
 static inline int __must_check
 i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
@@ -387,7 +419,7 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
 	if (!request)
 		return 0;
 
-	return i915_wait_request(request);
+	return __i915_wait_request(request, true, NULL, NULL);
 }
 
 /**
@@ -400,10 +432,25 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
  * tracker is idle, the function returns immediately.
  */
 static inline int __must_check
-i915_gem_active_retire(const struct i915_gem_active *active,
+i915_gem_active_retire(struct i915_gem_active *active,
 		       struct mutex *mutex)
 {
-	return i915_gem_active_wait(active, mutex);
+	struct drm_i915_gem_request *request;
+	int ret;
+
+	request = active->request;
+	if (!request)
+		return 0;
+
+	ret = __i915_wait_request(request, true, NULL, NULL);
+	if (ret)
+		return ret;
+
+	list_del_init(&active->link);
+	active->request = NULL;
+	active->retire(active, request);
+
+	return 0;
 }
 
 /* Convenience functions for peeking at state inside active's request whilst
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index a5f2128dd5a3..7dc214dfc2a8 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -177,7 +177,6 @@ void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
  */
 void intel_engine_setup_common(struct intel_engine_cs *engine)
 {
-	INIT_LIST_HEAD(&engine->active_list);
 	INIT_LIST_HEAD(&engine->request_list);
 	INIT_LIST_HEAD(&engine->buffers);
 	INIT_LIST_HEAD(&engine->execlist_queue);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 39d2e43a868c..541c51022d4a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -293,18 +293,6 @@ struct intel_engine_cs {
 	u32 ctx_desc_template;
 
 	/**
-	 * List of objects currently involved in rendering from the
-	 * ringbuffer.
-	 *
-	 * Includes buffers having the contents of their GPU caches
-	 * flushed, not necessarily primitives.  last_read_req
-	 * represents when the rendering involved will be completed.
-	 *
-	 * A reference is held on the buffer while on this list.
-	 */
-	struct list_head active_list;
-
-	/**
 	 * List of breadcrumbs associated with GPU requests currently
 	 * outstanding.
 	 */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 37/73] drm/i915: Track requests inside each intel_ring
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (35 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 36/73] drm/i915: Refactor activity tracking for requests Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 38/73] drm/i915: Convert intel_overlay to request tracking Chris Wilson
                   ` (36 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

By tracking each request occupying space inside an individual
intel_ring, we can greatly simplify the logic of tracking available
space and not worry about other timelines. (Each ring is an ordered
timeline of committed requests.)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_request.c |  2 ++
 drivers/gpu/drm/i915/i915_gem_request.h |  3 +++
 drivers/gpu/drm/i915/intel_ringbuffer.c | 15 ++++-----------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  2 ++
 4 files changed, 11 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index cdaaeb6dd913..a91e79fc5553 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -180,6 +180,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 	 * Note this requires that we are always called in request
 	 * completion order.
 	 */
+	list_del(&request->ring_link);
 	request->ring->last_retired_head = request->postfix;
 
 	/* Walk through the active list, calling retire on each. This allows
@@ -487,6 +488,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	request->previous_seqno = engine->last_submitted_seqno;
 	smp_store_mb(engine->last_submitted_seqno, request->fence.seqno);
 	list_add_tail(&request->link, &engine->request_list);
+	list_add_tail(&request->ring_link, &ring->request_list);
 
 	/* Record the position of the start of the request so that
 	 * should we detect the updated seqno part-way through the
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 70eeae6b0e1a..98dedf9152fa 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -109,6 +109,9 @@ struct drm_i915_gem_request {
 	/** engine->request_list entry for this request */
 	struct list_head link;
 
+	/** ring->request_list entry for this request */
+	struct list_head ring_link;
+
 	struct drm_i915_file_private *file_priv;
 	/** file_priv list entry for this request */
 	struct list_head client_list;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 34f3b57c7db3..51a1651eda7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2044,6 +2044,8 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 	ring->engine = engine;
 	list_add(&ring->link, &engine->buffers);
 
+	INIT_LIST_HEAD(&ring->request_list);
+
 	ring->size = size;
 	/* Workaround an erratum on the i830 which causes a hang if
 	 * the TAIL pointer points to within the last 2 cachelines
@@ -2266,7 +2268,6 @@ int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
 static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
 	struct intel_ring *ring = req->ring;
-	struct intel_engine_cs *engine = req->engine;
 	struct drm_i915_gem_request *target;
 
 	intel_ring_update_space(ring);
@@ -2284,17 +2285,9 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	 */
 	GEM_BUG_ON(!req->reserved_space);
 
-	list_for_each_entry(target, &engine->request_list, link) {
+	list_for_each_entry(target, &ring->request_list, ring_link) {
 		unsigned space;
 
-		/*
-		 * The request queue is per-engine, so can contain requests
-		 * from multiple ringbuffers. Here, we must ignore any that
-		 * aren't from the ringbuffer we're considering.
-		 */
-		if (target->ring != ring)
-			continue;
-
 		/* Would completion of this request free enough space? */
 		space = __intel_ring_space(target->postfix, ring->tail,
 					   ring->size);
@@ -2302,7 +2295,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 			break;
 	}
 
-	if (WARN_ON(&target->link == &engine->request_list))
+	if (WARN_ON(&target->ring_link == &ring->request_list))
 		return -ENOSPC;
 
 	return i915_wait_request(target);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 541c51022d4a..560011f4327d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -90,6 +90,8 @@ struct intel_ring {
 	struct intel_engine_cs *engine;
 	struct list_head link;
 
+	struct list_head request_list;
+
 	u32 head;
 	u32 tail;
 	int space;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 38/73] drm/i915: Convert intel_overlay to request tracking
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (36 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 37/73] drm/i915: Track requests inside each intel_ring Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 39/73] drm/i915: Move the special case wait-request handling to its one caller Chris Wilson
                   ` (35 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

intel_overlay already tracks its last flip request, along with action to
take after its completion. Refactor intel_overlay to reuse the common
i915_gem_active tracker.

v2: Now using i915_gem_retire_fn typedef

References: https://bugs.freedesktop.org/show_bug.cgi?id=93730
References: https://bugs.freedesktop.org/show_bug.cgi?id=96851
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_overlay.c | 82 +++++++++++++++---------------------
 1 file changed, 34 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 651efe4e468e..2c598d63c794 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -183,8 +183,7 @@ struct intel_overlay {
 	u32 flip_addr;
 	struct drm_i915_gem_object *reg_bo;
 	/* flip handling */
-	struct drm_i915_gem_request *last_flip_req;
-	void (*flip_tail)(struct intel_overlay *);
+	struct i915_gem_active last_flip;
 };
 
 static struct overlay_registers __iomem *
@@ -210,23 +209,24 @@ static void intel_overlay_unmap_regs(struct intel_overlay *overlay,
 		io_mapping_unmap(regs);
 }
 
-static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+static void intel_overlay_submit_request(struct intel_overlay *overlay,
 					 struct drm_i915_gem_request *req,
-					 void (*tail)(struct intel_overlay *))
+					 i915_gem_retire_fn retire)
 {
-	int ret;
-
-	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req, req);
+	GEM_BUG_ON(i915_gem_active_peek(&overlay->last_flip,
+					&overlay->i915->drm.struct_mutex));
+	overlay->last_flip.retire = retire;
+	i915_gem_active_set(&overlay->last_flip, req);
 	i915_add_request(req);
+}
 
-	overlay->flip_tail = tail;
-	ret = i915_wait_request(overlay->last_flip_req);
-	if (ret)
-		return ret;
-
-	i915_gem_request_assign(&overlay->last_flip_req, NULL);
-	return 0;
+static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
+					 struct drm_i915_gem_request *req,
+					 i915_gem_retire_fn retire)
+{
+	intel_overlay_submit_request(overlay, req, retire);
+	return i915_gem_active_retire(&overlay->last_flip,
+				      &overlay->i915->drm.struct_mutex);
 }
 
 static struct drm_i915_gem_request *alloc_request(struct intel_overlay *overlay)
@@ -306,25 +306,32 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	intel_ring_emit(ring, flip_addr);
 	intel_ring_advance(ring);
 
-	WARN_ON(overlay->last_flip_req);
-	i915_gem_request_assign(&overlay->last_flip_req, req);
-	i915_add_request(req);
+	intel_overlay_submit_request(overlay, req, NULL);
 
 	return 0;
 }
 
-static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
+static void intel_overlay_release_old_vid_tail(struct i915_gem_active *active,
+					       struct drm_i915_gem_request *req)
 {
+	struct intel_overlay *overlay =
+		container_of(active, typeof(*overlay), last_flip);
 	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
 
+	i915_gem_track_fb(obj, NULL,
+			  INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
+
 	i915_gem_object_ggtt_unpin(obj);
 	i915_gem_object_put(obj);
 
 	overlay->old_vid_bo = NULL;
 }
 
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
+static void intel_overlay_off_tail(struct i915_gem_active *active,
+				   struct drm_i915_gem_request *req)
 {
+	struct intel_overlay *overlay =
+		container_of(active, typeof(*overlay), last_flip);
 	struct drm_i915_gem_object *obj = overlay->vid_bo;
 
 	/* never have the overlay hw on without showing a frame */
@@ -387,27 +394,16 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 	}
 	intel_ring_advance(ring);
 
-	return intel_overlay_do_wait_request(overlay, req, intel_overlay_off_tail);
+	return intel_overlay_do_wait_request(overlay, req,
+					     intel_overlay_off_tail);
 }
 
 /* recover from an interruption due to a signal
  * We have to be careful not to repeat work forever an make forward progess. */
 static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
 {
-	int ret;
-
-	if (overlay->last_flip_req == NULL)
-		return 0;
-
-	ret = i915_wait_request(overlay->last_flip_req);
-	if (ret)
-		return ret;
-
-	if (overlay->flip_tail)
-		overlay->flip_tail(overlay);
-
-	i915_gem_request_assign(&overlay->last_flip_req, NULL);
-	return 0;
+	return i915_gem_active_retire(&overlay->last_flip,
+				      &overlay->i915->drm.struct_mutex);
 }
 
 /* Wait for pending overlay flip and release old frame.
@@ -452,13 +448,9 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
 						    intel_overlay_release_old_vid_tail);
 		if (ret)
 			return ret;
-	}
+	} else
+		intel_overlay_release_old_vid_tail(&overlay->last_flip, NULL);
 
-	intel_overlay_release_old_vid_tail(overlay);
-
-
-	i915_gem_track_fb(overlay->old_vid_bo, NULL,
-			  INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
 	return 0;
 }
 
@@ -471,7 +463,6 @@ void intel_overlay_reset(struct drm_i915_private *dev_priv)
 
 	intel_overlay_release_old_vid(overlay);
 
-	overlay->last_flip_req = NULL;
 	overlay->old_xscale = 0;
 	overlay->old_yscale = 0;
 	overlay->crtc = NULL;
@@ -882,12 +873,7 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
 	iowrite32(0, &regs->OCMD);
 	intel_overlay_unmap_regs(overlay, regs);
 
-	ret = intel_overlay_off(overlay);
-	if (ret != 0)
-		return ret;
-
-	intel_overlay_off_tail(overlay);
-	return 0;
+	return intel_overlay_off(overlay);
 }
 
 static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 39/73] drm/i915: Move the special case wait-request handling to its one caller
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (37 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 38/73] drm/i915: Convert intel_overlay to request tracking Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 40/73] drm/i915: Disable waitboosting for a saturated engine Chris Wilson
                   ` (34 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_request.c | 25 -------------------------
 drivers/gpu/drm/i915/i915_gem_request.h |  4 ----
 drivers/gpu/drm/i915/intel_ringbuffer.c | 18 +++++++++++++-----
 3 files changed, 13 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index a91e79fc5553..85ec5ca5c36b 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -731,28 +731,3 @@ complete:
 
 	return ret;
 }
-
-/**
- * Waits for a request to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int i915_wait_request(struct drm_i915_gem_request *req)
-{
-	int ret;
-
-	lockdep_assert_held(&req->i915->drm.struct_mutex);
-	GEM_BUG_ON(list_empty(&req->link));
-
-	ret = __i915_wait_request(req,
-				  req->i915->mm.interruptible,
-				  NULL,
-				  NULL);
-	if (ret)
-		return ret;
-
-	/* If the GPU hung, we want to keep the requests to find the guilty. */
-	if (!i915_reset_in_progress(&req->i915->gpu_error))
-		i915_gem_request_retire_upto(req);
-
-	return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 98dedf9152fa..2d8d36976c41 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -220,10 +220,6 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 			struct intel_rps_client *rps)
 	__attribute__((nonnull(1)));
 
-int __must_check
-i915_wait_request(struct drm_i915_gem_request *req)
-	__attribute__((nonnull));
-
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 51a1651eda7f..5e29b14b878a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2269,6 +2269,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 {
 	struct intel_ring *ring = req->ring;
 	struct drm_i915_gem_request *target;
+	int ret;
 
 	intel_ring_update_space(ring);
 	if (ring->space >= bytes)
@@ -2298,7 +2299,18 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	if (WARN_ON(&target->ring_link == &ring->request_list))
 		return -ENOSPC;
 
-	return i915_wait_request(target);
+	ret = __i915_wait_request(target, true, NULL, NULL);
+	if (ret)
+		return ret;
+
+	if (i915_reset_in_progress(&target->i915->gpu_error))
+		return -EAGAIN;
+
+	i915_gem_request_retire_upto(target);
+
+	intel_ring_update_space(ring);
+	GEM_BUG_ON(ring->space < bytes);
+	return 0;
 }
 
 int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
@@ -2336,10 +2348,6 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 		int ret = wait_for_space(req, wait_bytes);
 		if (unlikely(ret))
 			return ret;
-
-		intel_ring_update_space(ring);
-		if (unlikely(ring->space < wait_bytes))
-			return -EAGAIN;
 	}
 
 	if (unlikely(need_wrap)) {
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 40/73] drm/i915: Disable waitboosting for a saturated engine
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (38 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 39/73] drm/i915: Move the special case wait-request handling to its one caller Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 41/73] drm/i915: s/__i915_wait_request/i915_wait_request/ Chris Wilson
                   ` (33 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

If the user floods the GPU with so many requests that the engine stalls
waiting for free space, don't automatically promote the GPU to maximum
frequencies. If the GPU really is saturated with work, it will migrate
to high clocks by itself, otherwise it is merely a user flooding us with
busy-work.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 5e29b14b878a..04ce1252c136 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2299,7 +2299,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	if (WARN_ON(&target->ring_link == &ring->request_list))
 		return -ENOSPC;
 
-	ret = __i915_wait_request(target, true, NULL, NULL);
+	ret = __i915_wait_request(target, true, NULL, NO_WAITBOOST);
 	if (ret)
 		return ret;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 41/73] drm/i915: s/__i915_wait_request/i915_wait_request/
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (39 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 40/73] drm/i915: Disable waitboosting for a saturated engine Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 42/73] drm/i915: Double check activity before relocations Chris Wilson
                   ` (32 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

There is only one wait on request function now, so drop the "expert"
indication of leading __.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c         | 18 +++++++++---------
 drivers/gpu/drm/i915/i915_gem_request.c | 16 ++++++++--------
 drivers/gpu/drm/i915/i915_gem_request.h | 12 ++++++------
 drivers/gpu/drm/i915/i915_gem_userptr.c |  2 +-
 drivers/gpu/drm/i915/intel_display.c    | 14 +++++++-------
 drivers/gpu/drm/i915/intel_ringbuffer.c |  8 ++++----
 6 files changed, 35 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c47c9f95eafa..d320d3711fa5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1420,7 +1420,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	mutex_unlock(&dev->struct_mutex);
 	ret = 0;
 	for (i = 0; ret == 0 && i < n; i++)
-		ret = __i915_wait_request(requests[i], true, NULL, rps);
+		ret = i915_wait_request(requests[i], true, NULL, rps);
 	mutex_lock(&dev->struct_mutex);
 
 	for (i = 0; i < n; i++)
@@ -2726,9 +2726,9 @@ out:
 
 	for (i = 0; i < n; i++) {
 		if (ret == 0)
-			ret = __i915_wait_request(requests[i], true,
-						  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
-						  to_rps_client(file));
+			ret = i915_wait_request(requests[i], true,
+						args->timeout_ns > 0 ? &args->timeout_ns : NULL,
+						to_rps_client(file));
 		i915_gem_request_put(requests[i]);
 	}
 	return ret;
@@ -2744,10 +2744,10 @@ __i915_gem_object_sync(struct drm_i915_gem_request *to,
 		return 0;
 
 	if (!i915.semaphores) {
-		ret = __i915_wait_request(from,
-					  from->i915->mm.interruptible,
-					  NULL,
-					  NO_WAITBOOST);
+		ret = i915_wait_request(from,
+					from->i915->mm.interruptible,
+					NULL,
+					NO_WAITBOOST);
 		if (ret)
 			return ret;
 	} else {
@@ -3712,7 +3712,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 	if (target == NULL)
 		return 0;
 
-	ret = __i915_wait_request(target, true, NULL, NULL);
+	ret = i915_wait_request(target, true, NULL, NULL);
 	i915_gem_request_put(target);
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 85ec5ca5c36b..8549375aa75e 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -70,9 +70,9 @@ static signed long i915_fence_wait(struct fence *fence,
 		timeout = NULL;
 	}
 
-	ret = __i915_wait_request(to_request(fence),
-				  interruptible, timeout,
-				  NO_WAITBOOST);
+	ret = i915_wait_request(to_request(fence),
+				interruptible, timeout,
+				NO_WAITBOOST);
 	if (ret == -ETIME)
 		return 0;
 
@@ -579,7 +579,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 }
 
 /**
- * __i915_wait_request - wait until execution of request has finished
+ * i915_wait_request - wait until execution of request has finished
  * @req: duh!
  * @interruptible: do an interruptible wait (normally yes)
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
@@ -595,10 +595,10 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
  * Returns 0 if the request was found within the alloted time. Else returns the
  * errno with remaining time filled in timeout argument.
  */
-int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
-			s64 *timeout,
-			struct intel_rps_client *rps)
+int i915_wait_request(struct drm_i915_gem_request *req,
+		      bool interruptible,
+		      s64 *timeout,
+		      struct intel_rps_client *rps)
 {
 	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
 	DEFINE_WAIT(reset);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 2d8d36976c41..4241d49a024c 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -214,10 +214,10 @@ struct intel_rps_client;
 #define IS_RPS_CLIENT(p) (!IS_ERR(p))
 #define IS_RPS_USER(p) (!IS_ERR_OR_NULL(p))
 
-int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
-			s64 *timeout,
-			struct intel_rps_client *rps)
+int i915_wait_request(struct drm_i915_gem_request *req,
+		      bool interruptible,
+		      s64 *timeout,
+		      struct intel_rps_client *rps)
 	__attribute__((nonnull(1)));
 
 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine);
@@ -418,7 +418,7 @@ i915_gem_active_wait(const struct i915_gem_active *active, struct mutex *mutex)
 	if (!request)
 		return 0;
 
-	return __i915_wait_request(request, true, NULL, NULL);
+	return i915_wait_request(request, true, NULL, NULL);
 }
 
 /**
@@ -441,7 +441,7 @@ i915_gem_active_retire(struct i915_gem_active *active,
 	if (!request)
 		return 0;
 
-	ret = __i915_wait_request(request, true, NULL, NULL);
+	ret = i915_wait_request(request, true, NULL, NULL);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index e57521dbddc6..651a84ba840c 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -83,7 +83,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
 	mutex_unlock(&dev->struct_mutex);
 
 	for (i = 0; i < n; i++)
-		__i915_wait_request(requests[i], false, NULL, NULL);
+		i915_wait_request(requests[i], false, NULL, NULL);
 
 	mutex_lock(&dev->struct_mutex);
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d0d42ca2d7e6..f9e85146194e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -11463,9 +11463,9 @@ static void intel_mmio_flip_work_func(struct work_struct *w)
 	struct reservation_object *resv;
 
 	if (work->flip_queued_req)
-		WARN_ON(__i915_wait_request(work->flip_queued_req,
-					    false, NULL,
-					    NO_WAITBOOST));
+		WARN_ON(i915_wait_request(work->flip_queued_req,
+					  false, NULL,
+					  NO_WAITBOOST));
 
 	/* For framebuffer backed by dmabuf, wait for fence */
 	resv = i915_gem_object_get_dmabuf_resv(obj);
@@ -13508,8 +13508,8 @@ static int intel_atomic_prepare_commit(struct drm_device *dev,
 			if (!intel_plane_state->wait_req)
 				continue;
 
-			ret = __i915_wait_request(intel_plane_state->wait_req,
-						  true, NULL, NULL);
+			ret = i915_wait_request(intel_plane_state->wait_req,
+						true, NULL, NULL);
 			if (ret) {
 				/* Any hang should be swallowed by the wait */
 				WARN_ON(ret == -EIO);
@@ -13621,8 +13621,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		if (!intel_plane_state->wait_req)
 			continue;
 
-		ret = __i915_wait_request(intel_plane_state->wait_req,
-					  true, NULL, NULL);
+		ret = i915_wait_request(intel_plane_state->wait_req,
+					true, NULL, NULL);
 		/* EIO should be eaten, and we can't get interrupted in the
 		 * worker, and blocking commits have waited already. */
 		WARN_ON(ret);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 04ce1252c136..1659b34143c4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2240,9 +2240,9 @@ int intel_engine_idle(struct intel_engine_cs *engine)
 			 link);
 
 	/* Make sure we do not trigger any retires */
-	return __i915_wait_request(req,
-				   req->i915->mm.interruptible,
-				   NULL, NULL);
+	return i915_wait_request(req,
+				 req->i915->mm.interruptible,
+				 NULL, NULL);
 }
 
 int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -2299,7 +2299,7 @@ static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
 	if (WARN_ON(&target->ring_link == &ring->request_list))
 		return -ENOSPC;
 
-	ret = __i915_wait_request(target, true, NULL, NO_WAITBOOST);
+	ret = i915_wait_request(target, true, NULL, NO_WAITBOOST);
 	if (ret)
 		return ret;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 42/73] drm/i915: Double check activity before relocations
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (40 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 41/73] drm/i915: s/__i915_wait_request/i915_wait_request/ Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 43/73] drm/i915: Move request list retirement to i915_gem_request.c Chris Wilson
                   ` (31 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

If the object is active and we need to perform a relocation upon it, we
need to take the slow relocation path. Before we do, double check the
active requests to see if they have completed.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 16 +++++++++++++++-
 1 file changed, 15 insertions(+), 1 deletion(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 5e1fb85b708b..2f7173d7a7b0 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -441,6 +441,20 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
 	return 0;
 }
 
+static bool object_is_idle(struct drm_i915_gem_object *obj)
+{
+	unsigned long active = obj->active;
+	int idx;
+
+	for_each_active(active, idx) {
+		if (!i915_gem_active_is_idle(&obj->last_read[idx],
+					     &obj->base.dev->struct_mutex))
+			return false;
+	}
+
+	return true;
+}
+
 static int
 i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 				   struct eb_vmas *eb,
@@ -524,7 +538,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
 	}
 
 	/* We can't wait for rendering with pagefaults disabled */
-	if (obj->active && pagefault_disabled())
+	if (pagefault_disabled() && !object_is_idle(obj))
 		return -EFAULT;
 
 	if (use_cpu_reloc(obj))
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 43/73] drm/i915: Move request list retirement to i915_gem_request.c
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (41 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 42/73] drm/i915: Double check activity before relocations Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 44/73] drm/i915: i915_vma_move_to_active prep patch Chris Wilson
                   ` (30 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

As the list retirement is now clean of implementation details, we can
move it closer to the request management.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c         | 44 ---------------------------------
 drivers/gpu/drm/i915/i915_gem_request.c | 35 ++++++++++++++++++++++++++
 2 files changed, 35 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d320d3711fa5..ac89c4c2008a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2534,50 +2534,6 @@ void i915_gem_reset(struct drm_device *dev)
 	i915_gem_restore_fences(dev);
 }
 
-/**
- * This function clears the request list as sequence numbers are passed.
- * @engine: engine to retire requests on
- */
-void
-i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
-{
-	while (!list_empty(&engine->request_list)) {
-		struct drm_i915_gem_request *request;
-
-		request = list_first_entry(&engine->request_list,
-					   struct drm_i915_gem_request,
-					   link);
-
-		if (!i915_gem_request_completed(request))
-			break;
-
-		i915_gem_request_retire_upto(request);
-	}
-}
-
-void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
-{
-	struct intel_engine_cs *engine;
-
-	lockdep_assert_held(&dev_priv->drm.struct_mutex);
-
-	if (dev_priv->gt.active_engines == 0)
-		return;
-
-	GEM_BUG_ON(!dev_priv->gt.awake);
-
-	for_each_engine(engine, dev_priv) {
-		i915_gem_retire_requests_ring(engine);
-		if (list_empty(&engine->request_list))
-			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
-	}
-
-	if (dev_priv->gt.active_engines == 0)
-		queue_delayed_work(dev_priv->wq,
-				   &dev_priv->gt.idle_work,
-				   msecs_to_jiffies(100));
-}
-
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8549375aa75e..6faa84832ade 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -731,3 +731,38 @@ complete:
 
 	return ret;
 }
+
+void i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
+{
+	struct drm_i915_gem_request *request, *next;
+
+	list_for_each_entry_safe(request, next, &engine->request_list, link) {
+		if (!i915_gem_request_completed(request))
+			break;
+
+		i915_gem_request_retire(request);
+	}
+}
+
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+	if (dev_priv->gt.active_engines == 0)
+		return;
+
+	GEM_BUG_ON(!dev_priv->gt.awake);
+
+	for_each_engine(engine, dev_priv) {
+		i915_gem_retire_requests_ring(engine);
+		if (list_empty(&engine->request_list))
+			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
+	}
+
+	if (dev_priv->gt.active_engines == 0)
+		queue_delayed_work(dev_priv->wq,
+				   &dev_priv->gt.idle_work,
+				   msecs_to_jiffies(100));
+}
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 44/73] drm/i915: i915_vma_move_to_active prep patch
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (42 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 43/73] drm/i915: Move request list retirement to i915_gem_request.c Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 45/73] drm/i915: Track active vma requests Chris Wilson
                   ` (29 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

This patch is broken out of the next just to remove the code motion from
that patch and make it more readable. What we do here is move the
i915_vma_move_to_active() to i915_gem_execbuffer.c and put the three
stages (read, write, fenced) together so that future modifications to
active handling are all located in the same spot. The importance of this
is so that we can more simply control the order in which the requests
are place in the retirement list (i.e. control the order at which we
retire and so control the lifetimes to avoid having to hold onto
references).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h              |  3 +-
 drivers/gpu/drm/i915/i915_gem.c              | 18 --------
 drivers/gpu/drm/i915/i915_gem_context.c      |  9 ++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   | 65 ++++++++++++++++++----------
 drivers/gpu/drm/i915/i915_gem_render_state.c |  2 +-
 5 files changed, 51 insertions(+), 46 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6b57d8ff7218..a1c4c768c0c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3153,7 +3153,8 @@ int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
 			 struct drm_i915_gem_request *to);
 void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct drm_i915_gem_request *req);
+			     struct drm_i915_gem_request *req,
+			     unsigned int flags);
 int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_device *dev,
 			 struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ac89c4c2008a..da0e4c01fd25 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2330,24 +2330,6 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
 	return obj->mapping;
 }
 
-void i915_vma_move_to_active(struct i915_vma *vma,
-			     struct drm_i915_gem_request *req)
-{
-	struct drm_i915_gem_object *obj = vma->obj;
-	struct intel_engine_cs *engine;
-
-	engine = i915_gem_request_get_engine(req);
-
-	/* Add a reference if we're newly entering the active list. */
-	if (obj->active == 0)
-		i915_gem_object_get(obj);
-	obj->active |= intel_engine_flag(engine);
-
-	i915_gem_active_set(&obj->last_read[engine->id], req);
-
-	list_move_tail(&vma->vm_link, &vma->vm->active_list);
-}
-
 static void
 i915_gem_object_retire__write(struct i915_gem_active *active,
 			      struct drm_i915_gem_request *request)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 24383f0ea3a4..823e74c86eaa 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -816,8 +816,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 	 * MI_SET_CONTEXT instead of when the next seqno has completed.
 	 */
 	if (from != NULL) {
-		from->engine[RCS].state->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
-		i915_vma_move_to_active(i915_gem_obj_to_ggtt(from->engine[RCS].state), req);
+		struct drm_i915_gem_object *obj = from->engine[RCS].state;
+
 		/* As long as MI_SET_CONTEXT is serializing, ie. it flushes the
 		 * whole damn pipeline, we don't need to explicitly mark the
 		 * object dirty. The only exception is that the context must be
@@ -825,10 +825,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 		 * able to defer doing this until we know the object would be
 		 * swapped, but there is no way to do that yet.
 		 */
-		from->engine[RCS].state->dirty = 1;
+		obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION;
+		i915_vma_move_to_active(i915_gem_obj_to_ggtt(obj), req, 0);
 
 		/* obj is kept alive until the next request by its active ref */
-		i915_gem_object_ggtt_unpin(from->engine[RCS].state);
+		i915_gem_object_ggtt_unpin(obj);
 		i915_gem_context_put(from);
 	}
 	engine->last_context = i915_gem_context_get(to);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 2f7173d7a7b0..8bf20f52b79f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1143,43 +1143,64 @@ i915_gem_validate_context(struct drm_device *dev, struct drm_file *file,
 	return ctx;
 }
 
+void i915_vma_move_to_active(struct i915_vma *vma,
+			     struct drm_i915_gem_request *req,
+			     unsigned int flags)
+{
+	struct drm_i915_gem_object *obj = vma->obj;
+	const unsigned int idx = req->engine->id;
+
+	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+
+	obj->dirty = 1; /* be paranoid  */
+
+	/* Add a reference if we're newly entering the active list. */
+	if (obj->active == 0)
+		i915_gem_object_get(obj);
+	obj->active |= 1 << idx;
+	i915_gem_active_set(&obj->last_read[idx], req);
+
+	if (flags & EXEC_OBJECT_WRITE) {
+		i915_gem_active_set(&obj->last_write, req);
+
+		intel_fb_obj_invalidate(obj, ORIGIN_CS);
+
+		/* update for the implicit flush after a batch */
+		obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
+	}
+
+	if (flags & EXEC_OBJECT_NEEDS_FENCE) {
+		i915_gem_active_set(&obj->last_fence, req);
+		if (flags & __EXEC_OBJECT_HAS_FENCE) {
+			struct drm_i915_private *dev_priv = req->i915;
+
+			list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
+				       &dev_priv->mm.fence_list);
+		}
+	}
+
+	list_move_tail(&vma->vm_link, &vma->vm->active_list);
+}
+
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
 {
-	struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, vmas, exec_list) {
-		struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 		struct drm_i915_gem_object *obj = vma->obj;
 		u32 old_read = obj->base.read_domains;
 		u32 old_write = obj->base.write_domain;
 
-		obj->dirty = 1; /* be paranoid  */
 		obj->base.write_domain = obj->base.pending_write_domain;
-		if (obj->base.write_domain == 0)
+		if (obj->base.write_domain)
+			vma->exec_entry->flags |= EXEC_OBJECT_WRITE;
+		else
 			obj->base.pending_read_domains |= obj->base.read_domains;
 		obj->base.read_domains = obj->base.pending_read_domains;
 
-		i915_vma_move_to_active(vma, req);
-		if (obj->base.write_domain) {
-			i915_gem_active_set(&obj->last_write, req);
-
-			intel_fb_obj_invalidate(obj, ORIGIN_CS);
-
-			/* update for the implicit flush after a batch */
-			obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS;
-		}
-		if (entry->flags & EXEC_OBJECT_NEEDS_FENCE) {
-			i915_gem_active_set(&obj->last_fence, req);
-			if (entry->flags & __EXEC_OBJECT_HAS_FENCE) {
-				struct drm_i915_private *dev_priv = engine->i915;
-				list_move_tail(&dev_priv->fence_regs[obj->fence_reg].lru_list,
-					       &dev_priv->mm.fence_list);
-			}
-		}
-
+		i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index f85c5505bce2..90236672ac1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -217,7 +217,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 			goto err_unpin;
 	}
 
-	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req);
+	i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), req, 0);
 err_unpin:
 	i915_gem_object_ggtt_unpin(so.obj);
 err_obj:
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 45/73] drm/i915: Track active vma requests
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (43 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 44/73] drm/i915: i915_vma_move_to_active prep patch Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 46/73] drm/i915: Release vma when the handle is closed Chris Wilson
                   ` (28 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Hook the vma itself into the i915_gem_request_retire() so that we can
accurately track when a solitary vma is inactive (as opposed to having
to wait for the entire object to be idle). This improves the interaction
when using multiple contexts (with full-ppgtt) and eliminates some
frequent list walking when retiring objects after a completed request.

A side-effect is that we get an active vma reference for free. The
consequence of this is shown in the next patch...

v2: Update inline names to be consistent with
i915_gem_object_get_active()

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_gem.c            | 50 +++++++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 10 +++++-
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 20 ++++++++++++
 drivers/gpu/drm/i915/i915_gem_gtt.h        | 33 ++++++++++++++++++++
 5 files changed, 91 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2f5a16797659..6285d50e6876 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -365,7 +365,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 				continue;
 		}
 
-		if (obj->active) /* XXX per-vma statistic */
+		if (i915_vma_is_active(vma))
 			stats->active += vma->node.size;
 		else
 			stats->inactive += vma->node.size;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index da0e4c01fd25..3ebbeb4e0a24 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2347,7 +2347,6 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
 	int idx = request->engine->id;
 	struct drm_i915_gem_object *obj =
 		container_of(active, struct drm_i915_gem_object, last_read[idx]);
-	struct i915_vma *vma;
 
 	GEM_BUG_ON((obj->active & (1 << idx)) == 0);
 
@@ -2359,12 +2358,9 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
 	 * so that we don't steal from recently used but inactive objects
 	 * (unless we are forced to ofc!)
 	 */
-	list_move_tail(&obj->global_list, &request->i915->mm.bound_list);
-
-	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (!list_empty(&vma->vm_link))
-			list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-	}
+	if (obj->bind_count)
+		list_move_tail(&obj->global_list,
+			       &request->i915->mm.bound_list);
 
 	i915_gem_object_put(obj);
 }
@@ -2797,8 +2793,29 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	unsigned long active;
 	int ret;
 
+	/* First wait upon any activity as retiring the request may
+	 * have side-effects such as unpinning or even unbinding this vma.
+	 */
+	active = i915_vma_get_active(vma);
+	if (active && wait) {
+		int idx;
+
+		for_each_active(active, idx) {
+			ret = i915_gem_active_retire(&vma->last_read[idx],
+						   &vma->vm->dev->struct_mutex);
+			if (ret)
+				return ret;
+		}
+
+		GEM_BUG_ON(i915_vma_is_active(vma));
+	}
+
+	if (vma->pin_count)
+		return -EBUSY;
+
 	if (list_empty(&vma->obj_link))
 		return 0;
 
@@ -2807,18 +2824,9 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 		return 0;
 	}
 
-	if (vma->pin_count)
-		return -EBUSY;
-
 	GEM_BUG_ON(obj->bind_count == 0);
 	GEM_BUG_ON(!obj->pages);
 
-	if (wait) {
-		ret = i915_gem_object_wait_rendering(obj, false);
-		if (ret)
-			return ret;
-	}
-
 	if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 		i915_gem_object_finish_gtt(obj);
 
@@ -3201,9 +3209,6 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	uint32_t old_write_domain, old_read_domains;
 	struct i915_vma *vma;
 	int ret;
@@ -3256,9 +3261,10 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
 	/* And bump the LRU for this access */
 	vma = i915_gem_obj_to_ggtt(obj);
-	if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
-		list_move_tail(&vma->vm_link,
-			       &ggtt->base.inactive_list);
+	if (vma &&
+	    drm_mm_node_allocated(&vma->node) &&
+	    !i915_vma_is_active(vma))
+		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 8bf20f52b79f..5e3b5054f72d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1154,7 +1154,13 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
 	obj->dirty = 1; /* be paranoid  */
 
-	/* Add a reference if we're newly entering the active list. */
+	/* Add a reference if we're newly entering the active list.
+	 * The order in which we add operations to the retirement queue is
+	 * vital here: mark_active adds to the start of the callback list,
+	 * such that subsequent callbacks are called first. Therefore we
+	 * add the active reference first and queue for it to be dropped
+	 * *last*.
+	 */
 	if (obj->active == 0)
 		i915_gem_object_get(obj);
 	obj->active |= 1 << idx;
@@ -1179,6 +1185,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 		}
 	}
 
+	i915_vma_set_active(vma, idx);
+	i915_gem_active_set(&vma->last_read[idx], req);
 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index edae5c9862a7..f5f563d0a1ce 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3320,12 +3320,30 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	i915_ggtt_flush(dev_priv);
 }
 
+static void
+i915_vma_retire(struct i915_gem_active *active,
+		struct drm_i915_gem_request *rq)
+{
+	const unsigned int idx = rq->engine->id;
+	struct i915_vma *vma =
+		container_of(active, struct i915_vma, last_read[idx]);
+
+	GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
+
+	i915_vma_clear_active(vma, idx);
+	if (i915_vma_is_active(vma))
+		return;
+
+	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+}
+
 static struct i915_vma *
 __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 		      struct i915_address_space *vm,
 		      const struct i915_ggtt_view *ggtt_view)
 {
 	struct i915_vma *vma;
+	int i;
 
 	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
 		return ERR_PTR(-EINVAL);
@@ -3337,6 +3355,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	INIT_LIST_HEAD(&vma->vm_link);
 	INIT_LIST_HEAD(&vma->obj_link);
 	INIT_LIST_HEAD(&vma->exec_list);
+	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
+		init_request_active(&vma->last_read[i], i915_vma_retire);
 	vma->vm = vm;
 	vma->obj = obj;
 	vma->is_ggtt = i915_is_ggtt(vm);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 529fb483afc8..674e118e3686 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -36,6 +36,8 @@
 
 #include <linux/io-mapping.h>
 
+#include "i915_gem_request.h"
+
 struct drm_i915_file_private;
 
 typedef uint32_t gen6_pte_t;
@@ -179,6 +181,9 @@ struct i915_vma {
 	struct i915_address_space *vm;
 	void __iomem *iomap;
 
+	unsigned int active;
+	struct i915_gem_active last_read[I915_NUM_ENGINES];
+
 	/** Flags and address space this VMA is bound to */
 #define GLOBAL_BIND	(1<<0)
 #define LOCAL_BIND	(1<<1)
@@ -222,6 +227,34 @@ struct i915_vma {
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
+static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
+{
+	return vma->active;
+}
+
+static inline bool i915_vma_is_active(const struct i915_vma *vma)
+{
+	return i915_vma_get_active(vma);
+}
+
+static inline void i915_vma_set_active(struct i915_vma *vma,
+				       unsigned int engine)
+{
+	vma->active |= BIT(engine);
+}
+
+static inline void i915_vma_clear_active(struct i915_vma *vma,
+					 unsigned int engine)
+{
+	vma->active &= ~BIT(engine);
+}
+
+static inline bool i915_vma_has_active_engine(const struct i915_vma *vma,
+					      unsigned int engine)
+{
+	return vma->active & BIT(engine);
+}
+
 struct i915_page_dma {
 	struct page *page;
 	union {
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 46/73] drm/i915: Release vma when the handle is closed
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (44 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 45/73] drm/i915: Track active vma requests Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01 11:26   ` Joonas Lahtinen
  2016-08-01  9:10 ` [PATCH 47/73] drm/i915: Mark the context and address space as closed Chris Wilson
                   ` (27 subsequent siblings)
  73 siblings, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

In order to prevent a leak of the vma on shared objects, we need to
hook into the object_close callback to destroy the vma on the object for
this file. However, if we destroyed that vma immediately we may cause
unexpected application stalls as we try to unbind a busy vma - hence we
defer the unbind to when we retire the vma.

v2: Keep vma allocated until closed. This is useful for a later
optimisation, but it is required now in order to handle potential
recursion of i915_vma_unbind() by retiring itself.
v3: Comments are important.

Testcase: igt/gem_ppggtt/flink-and-close-vma-leak
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c       |  1 +
 drivers/gpu/drm/i915/i915_drv.h       |  4 +-
 drivers/gpu/drm/i915/i915_gem.c       | 88 ++++++++++++++++++-----------------
 drivers/gpu/drm/i915/i915_gem_evict.c |  8 +---
 drivers/gpu/drm/i915/i915_gem_gtt.c   | 25 ++++++++++
 drivers/gpu/drm/i915/i915_gem_gtt.h   |  1 +
 6 files changed, 77 insertions(+), 50 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 478e8168ad94..869baa6a5196 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2574,6 +2574,7 @@ static struct drm_driver driver = {
 	.postclose = i915_driver_postclose,
 	.set_busid = drm_pci_set_busid,
 
+	.gem_close_object = i915_gem_close_object,
 	.gem_free_object = i915_gem_free_object,
 	.gem_vm_ops = &i915_gem_vm_ops,
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a1c4c768c0c3..f470ea195253 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3014,8 +3014,8 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
 						  size_t size);
 struct drm_i915_gem_object *i915_gem_object_create_from_data(
 		struct drm_device *dev, const void *data, size_t size);
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 void i915_gem_free_object(struct drm_gem_object *obj);
-void i915_gem_vma_destroy(struct i915_vma *vma);
 
 /* Flags used by pin/bind&friends. */
 #define PIN_MAPPABLE	(1<<0)
@@ -3048,6 +3048,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
  * _guarantee_ VMA in question is _not in use_ anywhere.
  */
 int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
+void i915_vma_close(struct i915_vma *vma);
+void i915_vma_destroy(struct i915_vma *vma);
 
 int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3ebbeb4e0a24..74d24d7e3ca2 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2596,6 +2596,19 @@ out_rearm:
 	}
 }
 
+void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
+{
+	struct drm_i915_gem_object *obj = to_intel_bo(gem);
+	struct drm_i915_file_private *fpriv = file->driver_priv;
+	struct i915_vma *vma, *vn;
+
+	mutex_lock(&obj->base.dev->struct_mutex);
+	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
+		if (vma->vm->file == fpriv)
+			i915_vma_close(vma);
+	mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
 /**
  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
  * @dev: drm device pointer
@@ -2803,26 +2816,32 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 	if (active && wait) {
 		int idx;
 
+		/* When a closed VMA is retired, it is unbound - eek.
+		 * In order to prevent it from being recursively closed,
+		 * take a pin on the vma so that the second unbind is
+		 * aborted.
+		 */
+		vma->pin_count++;
+
 		for_each_active(active, idx) {
 			ret = i915_gem_active_retire(&vma->last_read[idx],
 						   &vma->vm->dev->struct_mutex);
 			if (ret)
-				return ret;
+				break;
 		}
 
+		vma->pin_count--;
+		if (ret)
+			return ret;
+
 		GEM_BUG_ON(i915_vma_is_active(vma));
 	}
 
 	if (vma->pin_count)
 		return -EBUSY;
 
-	if (list_empty(&vma->obj_link))
-		return 0;
-
-	if (!drm_mm_node_allocated(&vma->node)) {
-		i915_gem_vma_destroy(vma);
-		return 0;
-	}
+	if (!drm_mm_node_allocated(&vma->node))
+		goto destroy;
 
 	GEM_BUG_ON(obj->bind_count == 0);
 	GEM_BUG_ON(!obj->pages);
@@ -2855,7 +2874,6 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 	}
 
 	drm_mm_remove_node(&vma->node);
-	i915_gem_vma_destroy(vma);
 
 	/* Since the unbound list is global, only move to that list if
 	 * no more VMAs exist. */
@@ -2869,6 +2887,10 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 	 */
 	i915_gem_object_unpin_pages(obj);
 
+destroy:
+	if (unlikely(vma->closed))
+		i915_vma_destroy(vma);
+
 	return 0;
 }
 
@@ -3043,7 +3065,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 
 		if (offset & (alignment - 1) || offset + size > end) {
 			ret = -EINVAL;
-			goto err_free_vma;
+			goto err_vma;
 		}
 		vma->node.start = offset;
 		vma->node.size = size;
@@ -3055,7 +3077,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 				ret = drm_mm_reserve_node(&vm->mm, &vma->node);
 		}
 		if (ret)
-			goto err_free_vma;
+			goto err_vma;
 	} else {
 		if (flags & PIN_HIGH) {
 			search_flag = DRM_MM_SEARCH_BELOW;
@@ -3080,7 +3102,7 @@ search_free:
 			if (ret == 0)
 				goto search_free;
 
-			goto err_free_vma;
+			goto err_vma;
 		}
 	}
 	if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
@@ -3101,8 +3123,7 @@ search_free:
 
 err_remove_node:
 	drm_mm_remove_node(&vma->node);
-err_free_vma:
-	i915_gem_vma_destroy(vma);
+err_vma:
 	vma = ERR_PTR(ret);
 err_unpin:
 	i915_gem_object_unpin_pages(obj);
@@ -4051,21 +4072,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 
 	trace_i915_gem_object_destroy(obj);
 
+	/* All file-owned VMA should have been released by this point through
+	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
+	 * However, the object may also be bound into the global GTT (e.g.
+	 * older GPUs without per-process support, or for direct access through
+	 * the GTT either for the user or for scanout). Those VMA still need to
+	 * unbound now.
+	 */
 	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
-		int ret;
-
+		GEM_BUG_ON(!vma->is_ggtt);
+		GEM_BUG_ON(i915_vma_is_active(vma));
 		vma->pin_count = 0;
-		ret = __i915_vma_unbind_no_wait(vma);
-		if (WARN_ON(ret == -ERESTARTSYS)) {
-			bool was_interruptible;
-
-			was_interruptible = dev_priv->mm.interruptible;
-			dev_priv->mm.interruptible = false;
-
-			WARN_ON(i915_vma_unbind(vma));
-
-			dev_priv->mm.interruptible = was_interruptible;
-		}
+		i915_vma_close(vma);
 	}
 	GEM_BUG_ON(obj->bind_count);
 
@@ -4129,22 +4147,6 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
 	return NULL;
 }
 
-void i915_gem_vma_destroy(struct i915_vma *vma)
-{
-	WARN_ON(vma->node.allocated);
-
-	/* Keep the vma as a placeholder in the execbuffer reservation lists */
-	if (!list_empty(&vma->exec_list))
-		return;
-
-	if (!vma->is_ggtt)
-		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
-
-	list_del(&vma->obj_link);
-
-	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
-}
-
 static void
 i915_gem_stop_engines(struct drm_device *dev)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 81f7b4383d5e..3437ced76cb6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -182,8 +182,8 @@ found:
 				       struct i915_vma,
 				       exec_list);
 		if (drm_mm_scan_remove_block(&vma->node)) {
+			vma->pin_count++;
 			list_move(&vma->exec_list, &eviction_list);
-			i915_gem_object_get(vma->obj);
 			continue;
 		}
 		list_del_init(&vma->exec_list);
@@ -191,18 +191,14 @@ found:
 
 	/* Unbinding will emit any required flushes */
 	while (!list_empty(&eviction_list)) {
-		struct drm_i915_gem_object *obj;
-
 		vma = list_first_entry(&eviction_list,
 				       struct i915_vma,
 				       exec_list);
 
-		obj =  vma->obj;
 		list_del_init(&vma->exec_list);
+		vma->pin_count--;
 		if (ret == 0)
 			ret = i915_vma_unbind(vma);
-
-		i915_gem_object_put(obj);
 	}
 
 	return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f5f563d0a1ce..e0a864ae9e0b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3335,6 +3335,31 @@ i915_vma_retire(struct i915_gem_active *active,
 		return;
 
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
+	if (unlikely(vma->closed && !vma->pin_count))
+		WARN_ON(i915_vma_unbind(vma));
+}
+
+void i915_vma_destroy(struct i915_vma *vma)
+{
+	GEM_BUG_ON(vma->node.allocated);
+	GEM_BUG_ON(i915_vma_is_active(vma));
+	GEM_BUG_ON(!vma->closed);
+
+	list_del(&vma->vm_link);
+	if (!vma->is_ggtt)
+		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
+
+	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
+}
+
+void i915_vma_close(struct i915_vma *vma)
+{
+	GEM_BUG_ON(vma->closed);
+	vma->closed = true;
+
+	list_del_init(&vma->obj_link);
+	if (!i915_vma_is_active(vma) && !vma->pin_count)
+		WARN_ON(__i915_vma_unbind_no_wait(vma));
 }
 
 static struct i915_vma *
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 674e118e3686..a3ea1bda2b1b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -189,6 +189,7 @@ struct i915_vma {
 #define LOCAL_BIND	(1<<1)
 	unsigned int bound : 4;
 	bool is_ggtt : 1;
+	bool closed : 1;
 
 	/**
 	 * Support different GGTT views into the same object.
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 47/73] drm/i915: Mark the context and address space as closed
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (45 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 46/73] drm/i915: Release vma when the handle is closed Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 48/73] Revert "drm/i915: Clean up associated VMAs on context destruction" Chris Wilson
                   ` (26 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

When the user closes the context mark it and the dependent address space
as closed. As we use an asynchronous destruct method, this has two
purposes.  First it allows us to flag the closed context and detect
internal errors if we to create any new objects for it (as it is removed
from the user's namespace, these should be internal bugs only). And
secondly, it allows us to immediately reap stale vma.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |  1 +
 drivers/gpu/drm/i915/i915_gem.c         | 15 ++++++------
 drivers/gpu/drm/i915/i915_gem_context.c | 43 ++++++++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.c     |  9 +++++--
 drivers/gpu/drm/i915/i915_gem_gtt.h     |  9 +++++++
 drivers/gpu/drm/i915/i915_gem_stolen.c  |  2 +-
 6 files changed, 63 insertions(+), 16 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f470ea195253..ce472c9bd7f9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -907,6 +907,7 @@ struct i915_gem_context {
 	struct list_head link;
 
 	u8 remap_slice;
+	bool closed:1;
 };
 
 enum fb_op_origin {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 74d24d7e3ca2..32a7bbdc0b7f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2857,12 +2857,15 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 		__i915_vma_iounmap(vma);
 	}
 
-	trace_i915_vma_unbind(vma);
-
-	vma->vm->unbind_vma(vma);
+	if (likely(!vma->vm->closed)) {
+		trace_i915_vma_unbind(vma);
+		vma->vm->unbind_vma(vma);
+	}
 	vma->bound = 0;
 
-	list_del_init(&vma->vm_link);
+	drm_mm_remove_node(&vma->node);
+	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
+
 	if (vma->is_ggtt) {
 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 			obj->map_and_fenceable = false;
@@ -2873,8 +2876,6 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 		vma->ggtt_view.pages = NULL;
 	}
 
-	drm_mm_remove_node(&vma->node);
-
 	/* Since the unbound list is global, only move to that list if
 	 * no more VMAs exist. */
 	if (--obj->bind_count == 0)
@@ -3116,7 +3117,7 @@ search_free:
 		goto err_remove_node;
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	list_add_tail(&vma->vm_link, &vm->inactive_list);
+	list_move_tail(&vma->vm_link, &vm->inactive_list);
 	obj->bind_count++;
 
 	return vma;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 823e74c86eaa..a4ee62335e75 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -156,6 +156,7 @@ void i915_gem_context_free(struct kref *ctx_ref)
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	trace_i915_context_free(ctx);
+	GEM_BUG_ON(!ctx->closed);
 
 	/*
 	 * This context is going away and we need to remove all VMAs still
@@ -224,6 +225,37 @@ i915_gem_alloc_context_obj(struct drm_device *dev, size_t size)
 	return obj;
 }
 
+static void i915_ppgtt_close(struct i915_address_space *vm)
+{
+	struct list_head *phases[] = {
+		&vm->active_list,
+		&vm->inactive_list,
+		&vm->unbound_list,
+		NULL,
+	}, **phase;
+
+	GEM_BUG_ON(vm->closed);
+	vm->closed = true;
+
+	for (phase = phases; *phase; phase++) {
+		struct i915_vma *vma, *vn;
+
+		list_for_each_entry_safe(vma, vn, *phase, vm_link)
+			if (!vma->closed)
+				i915_vma_close(vma);
+	}
+}
+
+static void context_close(struct i915_gem_context *ctx)
+{
+	GEM_BUG_ON(ctx->closed);
+	ctx->closed = true;
+	if (ctx->ppgtt)
+		i915_ppgtt_close(&ctx->ppgtt->base);
+	ctx->file_priv = ERR_PTR(-EBADF);
+	i915_gem_context_put(ctx);
+}
+
 static int assign_hw_id(struct drm_i915_private *dev_priv, unsigned *out)
 {
 	int ret;
@@ -305,7 +337,7 @@ __create_hw_context(struct drm_device *dev,
 	return ctx;
 
 err_out:
-	i915_gem_context_put(ctx);
+	context_close(ctx);
 	return ERR_PTR(ret);
 }
 
@@ -334,7 +366,7 @@ i915_gem_create_context(struct drm_device *dev,
 			DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
 					 PTR_ERR(ppgtt));
 			idr_remove(&file_priv->context_idr, ctx->user_handle);
-			i915_gem_context_put(ctx);
+			context_close(ctx);
 			return ERR_CAST(ppgtt);
 		}
 
@@ -505,7 +537,7 @@ void i915_gem_context_fini(struct drm_device *dev)
 
 	lockdep_assert_held(&dev->struct_mutex);
 
-	i915_gem_context_put(dctx);
+	context_close(dctx);
 	dev_priv->kernel_context = NULL;
 
 	ida_destroy(&dev_priv->context_hw_ida);
@@ -515,8 +547,7 @@ static int context_idr_cleanup(int id, void *p, void *data)
 {
 	struct i915_gem_context *ctx = p;
 
-	ctx->file_priv = ERR_PTR(-EBADF);
-	i915_gem_context_put(ctx);
+	context_close(ctx);
 	return 0;
 }
 
@@ -1014,7 +1045,7 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
 	}
 
 	idr_remove(&file_priv->context_idr, ctx->user_handle);
-	i915_gem_context_put(ctx);
+	context_close(ctx);
 	mutex_unlock(&dev->struct_mutex);
 
 	DRM_DEBUG_DRIVER("HW context %d destroyed\n", args->ctx_id);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e0a864ae9e0b..409be1df70fa 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2122,6 +2122,7 @@ static void i915_address_space_init(struct i915_address_space *vm,
 	vm->dev = &dev_priv->drm;
 	INIT_LIST_HEAD(&vm->active_list);
 	INIT_LIST_HEAD(&vm->inactive_list);
+	INIT_LIST_HEAD(&vm->unbound_list);
 	list_add_tail(&vm->global_link, &dev_priv->vm_list);
 }
 
@@ -2214,9 +2215,10 @@ void  i915_ppgtt_release(struct kref *kref)
 
 	trace_i915_ppgtt_release(&ppgtt->base);
 
-	/* vmas should already be unbound */
+	/* vmas should already be unbound and destroyed */
 	WARN_ON(!list_empty(&ppgtt->base.active_list));
 	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
+	WARN_ON(!list_empty(&ppgtt->base.unbound_list));
 
 	list_del(&ppgtt->base.global_link);
 	drm_mm_takedown(&ppgtt->base.mm);
@@ -3370,6 +3372,8 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int i;
 
+	GEM_BUG_ON(vm->closed);
+
 	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
 		return ERR_PTR(-EINVAL);
 
@@ -3377,11 +3381,11 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	if (vma == NULL)
 		return ERR_PTR(-ENOMEM);
 
-	INIT_LIST_HEAD(&vma->vm_link);
 	INIT_LIST_HEAD(&vma->obj_link);
 	INIT_LIST_HEAD(&vma->exec_list);
 	for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
 		init_request_active(&vma->last_read[i], i915_vma_retire);
+	list_add(&vma->vm_link, &vm->unbound_list);
 	vma->vm = vm;
 	vma->obj = obj;
 	vma->is_ggtt = i915_is_ggtt(vm);
@@ -3422,6 +3426,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 	if (!vma)
 		vma = __i915_gem_vma_create(obj, &ggtt->base, view);
 
+	GEM_BUG_ON(vma->closed);
 	return vma;
 
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index a3ea1bda2b1b..2ebf07defa4e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -319,6 +319,8 @@ struct i915_address_space {
 	u64 start;		/* Start offset always 0 for dri2 */
 	u64 total;		/* size addr space maps (ex. 2GB for ggtt) */
 
+	bool closed;
+
 	struct i915_page_scratch *scratch_page;
 	struct i915_page_table *scratch_pt;
 	struct i915_page_directory *scratch_pd;
@@ -347,6 +349,13 @@ struct i915_address_space {
 	 */
 	struct list_head inactive_list;
 
+	/**
+	 * List of vma that have been unbound.
+	 *
+	 * A reference is not held on the buffer while on this list.
+	 */
+	struct list_head unbound_list;
+
 	/* FIXME: Need a more generic return type */
 	gen6_pte_t (*pte_encode)(dma_addr_t addr,
 				 enum i915_cache_level level,
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 2c321c8df7c3..bc91ffe614e2 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -707,7 +707,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 
 	vma->bound |= GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
-	list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
+	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
 	obj->bind_count++;
 
 	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 48/73] Revert "drm/i915: Clean up associated VMAs on context destruction"
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (46 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 47/73] drm/i915: Mark the context and address space as closed Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 49/73] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (25 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

This reverts commit e9f24d5fb7cf3628b195b18ff3ac4e37937ceeae.

The patch was only a stop-gap measure that fixed half the problem - the
leak of the fbcon when restarting X. A complete solution required
releasing the VMA when the object itself was closed rather than rely on
file/process exit. The previous patches add the VMA tracking necessary
to do close them along with the object, context or file, and so the time
has come to remove the partial fix.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |  5 -----
 drivers/gpu/drm/i915/i915_gem.c         | 14 ++------------
 drivers/gpu/drm/i915/i915_gem_context.c | 22 ----------------------
 drivers/gpu/drm/i915/i915_gem_gtt.c     |  2 +-
 4 files changed, 3 insertions(+), 40 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ce472c9bd7f9..66b98fa4715a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3044,11 +3044,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		  u32 flags);
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
-/*
- * BEWARE: Do not use the function below unless you can _absolutely_
- * _guarantee_ VMA in question is _not in use_ anywhere.
- */
-int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
 void i915_vma_close(struct i915_vma *vma);
 void i915_vma_destroy(struct i915_vma *vma);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 32a7bbdc0b7f..8e23e7eb28ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2803,7 +2803,7 @@ static void __i915_vma_iounmap(struct i915_vma *vma)
 	vma->iomap = NULL;
 }
 
-static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
+int i915_vma_unbind(struct i915_vma *vma)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
 	unsigned long active;
@@ -2813,7 +2813,7 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 	 * have side-effects such as unpinning or even unbinding this vma.
 	 */
 	active = i915_vma_get_active(vma);
-	if (active && wait) {
+	if (active) {
 		int idx;
 
 		/* When a closed VMA is retired, it is unbound - eek.
@@ -2895,16 +2895,6 @@ destroy:
 	return 0;
 }
 
-int i915_vma_unbind(struct i915_vma *vma)
-{
-	return __i915_vma_unbind(vma, true);
-}
-
-int __i915_vma_unbind_no_wait(struct i915_vma *vma)
-{
-	return __i915_vma_unbind(vma, false);
-}
-
 int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
 {
 	struct intel_engine_cs *engine;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index a4ee62335e75..eff6d3953ecd 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -134,21 +134,6 @@ static int get_context_size(struct drm_i915_private *dev_priv)
 	return ret;
 }
 
-static void i915_gem_context_clean(struct i915_gem_context *ctx)
-{
-	struct i915_hw_ppgtt *ppgtt = ctx->ppgtt;
-	struct i915_vma *vma, *next;
-
-	if (!ppgtt)
-		return;
-
-	list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
-				 vm_link) {
-		if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
-			break;
-	}
-}
-
 void i915_gem_context_free(struct kref *ctx_ref)
 {
 	struct i915_gem_context *ctx = container_of(ctx_ref, typeof(*ctx), ref);
@@ -158,13 +143,6 @@ void i915_gem_context_free(struct kref *ctx_ref)
 	trace_i915_context_free(ctx);
 	GEM_BUG_ON(!ctx->closed);
 
-	/*
-	 * This context is going away and we need to remove all VMAs still
-	 * around. This is to handle imported shared objects for which
-	 * destructor did not run when their handles were closed.
-	 */
-	i915_gem_context_clean(ctx);
-
 	i915_ppgtt_put(ctx->ppgtt);
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 409be1df70fa..bfa211e9dbaa 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3361,7 +3361,7 @@ void i915_vma_close(struct i915_vma *vma)
 
 	list_del_init(&vma->obj_link);
 	if (!i915_vma_is_active(vma) && !vma->pin_count)
-		WARN_ON(__i915_vma_unbind_no_wait(vma));
+		WARN_ON(i915_vma_unbind(vma));
 }
 
 static struct i915_vma *
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 49/73] drm/i915: Combine loops within i915_gem_evict_something
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (47 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 48/73] Revert "drm/i915: Clean up associated VMAs on context destruction" Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 50/73] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something() Chris Wilson
                   ` (24 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Slight micro-optimise to produce combine loops so that gcc is able to
optimise the inner-loops concisely. Since we are reviewing the loops, we
can update the comments to describe the current state of affairs, in
particular the distinction between evicting from the global GTT (which
may contain untracked items and transient global pins) and the
per-process GTT.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_evict.c | 143 +++++++++++++++++-----------------
 1 file changed, 70 insertions(+), 73 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3437ced76cb6..016be7316676 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -34,6 +34,19 @@
 #include "i915_trace.h"
 
 static bool
+gpu_is_idle(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	for_each_engine(engine, dev_priv) {
+		if (!list_empty(&engine->request_list))
+			return false;
+	}
+
+	return true;
+}
+
+static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
 	if (vma->pin_count)
@@ -76,37 +89,31 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 			 unsigned long start, unsigned long end,
 			 unsigned flags)
 {
-	struct list_head eviction_list, unwind_list;
-	struct i915_vma *vma;
-	int ret = 0;
-	int pass = 0;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct list_head eviction_list;
+	struct list_head *phases[] = {
+		&vm->inactive_list,
+		&vm->active_list,
+		NULL,
+	}, **phase;
+	struct i915_vma *vma, *next;
+	int ret;
 
 	trace_i915_gem_evict(dev, min_size, alignment, flags);
 
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
 	 * The oldest idle objects reside on the inactive list, which is in
-	 * retirement order. The next objects to retire are those on the (per
-	 * ring) active list that do not have an outstanding flush. Once the
-	 * hardware reports completion (the seqno is updated after the
-	 * batchbuffer has been finished) the clean buffer objects would
-	 * be retired to the inactive list. Any dirty objects would be added
-	 * to the tail of the flushing list. So after processing the clean
-	 * active objects we need to emit a MI_FLUSH to retire the flushing
-	 * list, hence the retirement order of the flushing list is in
-	 * advance of the dirty objects on the active lists.
+	 * retirement order. The next objects to retire are those in flight,
+	 * on the active list, again in retirement order.
 	 *
 	 * The retirement sequence is thus:
 	 *   1. Inactive objects (already retired)
-	 *   2. Clean active objects
-	 *   3. Flushing list
-	 *   4. Dirty active objects.
+	 *   2. Active objects (will stall on unbinding)
 	 *
 	 * On each list, the oldest objects lie at the HEAD with the freshest
 	 * object on the TAIL.
 	 */
-
-	INIT_LIST_HEAD(&unwind_list);
 	if (start != 0 || end != vm->total) {
 		drm_mm_init_scan_with_range(&vm->mm, min_size,
 					    alignment, cache_level,
@@ -114,79 +121,71 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 	} else
 		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
-search_again:
-	/* First see if there is a large enough contiguous idle region... */
-	list_for_each_entry(vma, &vm->inactive_list, vm_link) {
-		if (mark_free(vma, &unwind_list))
-			goto found;
-	}
-
 	if (flags & PIN_NONBLOCK)
-		goto none;
+		phases[1] = NULL;
 
-	/* Now merge in the soon-to-be-expired objects... */
-	list_for_each_entry(vma, &vm->active_list, vm_link) {
-		if (mark_free(vma, &unwind_list))
-			goto found;
-	}
+search_again:
+	INIT_LIST_HEAD(&eviction_list);
+	phase = phases;
+	do {
+		list_for_each_entry(vma, *phase, vm_link)
+			if (mark_free(vma, &eviction_list))
+				goto found;
+	} while (*++phase);
 
-none:
 	/* Nothing found, clean up and bail out! */
-	while (!list_empty(&unwind_list)) {
-		vma = list_first_entry(&unwind_list,
-				       struct i915_vma,
-				       exec_list);
+	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
 		ret = drm_mm_scan_remove_block(&vma->node);
 		BUG_ON(ret);
 
-		list_del_init(&vma->exec_list);
+		INIT_LIST_HEAD(&vma->exec_list);
 	}
 
 	/* Can we unpin some objects such as idle hw contents,
-	 * or pending flips?
+	 * or pending flips? But since only the GGTT has global entries
+	 * such as scanouts, rinbuffers and contexts, we can skip the
+	 * purge when inspecting per-process local address spaces.
 	 */
-	if (flags & PIN_NONBLOCK)
+	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
 		return -ENOSPC;
 
-	/* Only idle the GPU and repeat the search once */
-	if (pass++ == 0) {
-		struct drm_i915_private *dev_priv = to_i915(dev);
-
-		if (i915_is_ggtt(vm)) {
-			ret = i915_gem_switch_to_kernel_context(dev_priv);
-			if (ret)
-				return ret;
-		}
-
-		ret = i915_gem_wait_for_idle(dev_priv);
-		if (ret)
-			return ret;
-
-		i915_gem_retire_requests(dev_priv);
-		goto search_again;
+	if (gpu_is_idle(dev_priv)) {
+		/* If we still have pending pageflip completions, drop
+		 * back to userspace to give our workqueues time to
+		 * acquire our locks and unpin the old scanouts.
+		 */
+		return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
 	}
 
-	/* If we still have pending pageflip completions, drop
-	 * back to userspace to give our workqueues time to
-	 * acquire our locks and unpin the old scanouts.
+	/* Not everything in the GGTT is tracked via vma (otherwise we
+	 * could evict as required with minimal stalling) so we are forced
+	 * to idle the GPU and explicitly retire outstanding requests in
+	 * the hopes that we can then remove contexts and the like only
+	 * bound by their active reference.
 	 */
-	return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
+	ret = i915_gem_switch_to_kernel_context(dev_priv);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_wait_for_idle(dev_priv);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests(dev_priv);
+	goto search_again;
 
 found:
 	/* drm_mm doesn't allow any other other operations while
-	 * scanning, therefore store to be evicted objects on a
-	 * temporary list. */
-	INIT_LIST_HEAD(&eviction_list);
-	while (!list_empty(&unwind_list)) {
-		vma = list_first_entry(&unwind_list,
-				       struct i915_vma,
-				       exec_list);
-		if (drm_mm_scan_remove_block(&vma->node)) {
+	 * scanning, therefore store to-be-evicted objects on a
+	 * temporary list and take a reference for all before
+	 * calling unbind (which may remove the active reference
+	 * of any of our objects, thus corrupting the list).
+	 */
+	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+		if (drm_mm_scan_remove_block(&vma->node))
 			vma->pin_count++;
-			list_move(&vma->exec_list, &eviction_list);
-			continue;
-		}
-		list_del_init(&vma->exec_list);
+		else
+			list_del_init(&vma->exec_list);
 	}
 
 	/* Unbinding will emit any required flushes */
@@ -200,7 +199,6 @@ found:
 		if (ret == 0)
 			ret = i915_vma_unbind(vma);
 	}
-
 	return ret;
 }
 
@@ -279,7 +277,6 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 			return ret;
 
 		i915_gem_retire_requests(dev_priv);
-
 		WARN_ON(!list_empty(&vm->active_list));
 	}
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 50/73] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something()
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (48 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 49/73] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:10 ` [PATCH 51/73] drm/i915: Double check the active status on the batch pool Chris Wilson
                   ` (23 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

Eviction is VM local, so we can ignore the significance of the
drm_device in the caller, and leave it to i915_gem_evict_something() to
manage itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h       |  3 +--
 drivers/gpu/drm/i915/i915_gem.c       |  2 +-
 drivers/gpu/drm/i915/i915_gem_evict.c |  9 ++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.c   |  2 +-
 drivers/gpu/drm/i915/i915_trace.h     | 14 ++++++++------
 5 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 66b98fa4715a..fbda38f25c6b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3398,8 +3398,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
 				       struct drm_file *file);
 
 /* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev,
-					  struct i915_address_space *vm,
+int __must_check i915_gem_evict_something(struct i915_address_space *vm,
 					  int min_size,
 					  unsigned alignment,
 					  unsigned cache_level,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8e23e7eb28ae..3402fec5e33c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3086,7 +3086,7 @@ search_free:
 							  search_flag,
 							  alloc_flag);
 		if (ret) {
-			ret = i915_gem_evict_something(dev, vm, size, alignment,
+			ret = i915_gem_evict_something(vm, size, alignment,
 						       obj->cache_level,
 						       start, end,
 						       flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 016be7316676..4bce72fa14c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -61,7 +61,6 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
 
 /**
  * i915_gem_evict_something - Evict vmas to make room for binding a new one
- * @dev: drm_device
  * @vm: address space to evict from
  * @min_size: size of the desired free space
  * @alignment: alignment constraint of the desired free space
@@ -84,12 +83,12 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
  * memory in e.g. the shrinker.
  */
 int
-i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+i915_gem_evict_something(struct i915_address_space *vm,
 			 int min_size, unsigned alignment, unsigned cache_level,
 			 unsigned long start, unsigned long end,
 			 unsigned flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(vm->dev);
 	struct list_head eviction_list;
 	struct list_head *phases[] = {
 		&vm->inactive_list,
@@ -99,7 +98,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 	struct i915_vma *vma, *next;
 	int ret;
 
-	trace_i915_gem_evict(dev, min_size, alignment, flags);
+	trace_i915_gem_evict(vm, min_size, alignment, flags);
 
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
@@ -154,7 +153,7 @@ search_again:
 		 * back to userspace to give our workqueues time to
 		 * acquire our locks and unpin the old scanouts.
 		 */
-		return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
+		return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
 	}
 
 	/* Not everything in the GGTT is tracked via vma (otherwise we
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index bfa211e9dbaa..31d13aea5461 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2012,7 +2012,7 @@ alloc:
 						  0, ggtt->base.total,
 						  DRM_MM_TOPDOWN);
 	if (ret == -ENOSPC && !retried) {
-		ret = i915_gem_evict_something(dev, &ggtt->base,
+		ret = i915_gem_evict_something(&ggtt->base,
 					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
 					       I915_CACHE_NONE,
 					       0, ggtt->base.total,
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 9e43c0aa6e3b..178798002a73 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -394,25 +394,27 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 );
 
 TRACE_EVENT(i915_gem_evict,
-	    TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
-	    TP_ARGS(dev, size, align, flags),
+	    TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
+	    TP_ARGS(vm, size, align, flags),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
+			     __field(struct i915_address_space *, vm)
 			     __field(u32, size)
 			     __field(u32, align)
-			     __field(unsigned, flags)
+			     __field(unsigned int, flags)
 			    ),
 
 	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
+			   __entry->dev = vm->dev->primary->index;
+			   __entry->vm = vm;
 			   __entry->size = size;
 			   __entry->align = align;
 			   __entry->flags = flags;
 			  ),
 
-	    TP_printk("dev=%d, size=%d, align=%d %s",
-		      __entry->dev, __entry->size, __entry->align,
+	    TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
+		      __entry->dev, __entry->vm, __entry->size, __entry->align,
 		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
 );
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 51/73] drm/i915: Double check the active status on the batch pool
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (49 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 50/73] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something() Chris Wilson
@ 2016-08-01  9:10 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 52/73] drm/i915: Remove request retirement before each batch Chris Wilson
                   ` (22 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:10 UTC (permalink / raw)
  To: intel-gfx

We should not rely on obj->active being uptodate unless we manually
flush it. Instead, we can verify that the next available batch object is
idle by looking at its last active request (and checking it for
completion).

v2: remove the struct drm_device forward declaration added in the
process of removing its necessity

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_batch_pool.c | 15 ++++++++-------
 drivers/gpu/drm/i915/i915_gem_batch_pool.h |  6 ++++--
 drivers/gpu/drm/i915/intel_engine_cs.c     |  2 +-
 3 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 825981b5aa40..ed989596d9a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -41,15 +41,15 @@
 
 /**
  * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @dev: the drm device
+ * @engine: the associated request submission engine
  * @pool: the batch buffer pool
  */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
 			      struct i915_gem_batch_pool *pool)
 {
 	int n;
 
-	pool->dev = dev;
+	pool->engine = engine;
 
 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
 		INIT_LIST_HEAD(&pool->cache_list[n]);
@@ -65,7 +65,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 {
 	int n;
 
-	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 
 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
 		struct drm_i915_gem_object *obj, *next;
@@ -101,7 +101,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 	struct list_head *list;
 	int n;
 
-	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 
 	/* Compute a power-of-two bucket, but throw everything greater than
 	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
@@ -114,7 +114,8 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 
 	list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
 		/* The batches are strictly LRU ordered */
-		if (tmp->active)
+		if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
+					     &tmp->base.dev->struct_mutex))
 			break;
 
 		/* While we're looping, do some clean up */
@@ -133,7 +134,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 	if (obj == NULL) {
 		int ret;
 
-		obj = i915_gem_object_create(pool->dev, size);
+		obj = i915_gem_object_create(&pool->engine->i915->drm, size);
 		if (IS_ERR(obj))
 			return obj;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
index 848e90703eed..10d5ac4c00d3 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -27,13 +27,15 @@
 
 #include "i915_drv.h"
 
+struct intel_engine_cs;
+
 struct i915_gem_batch_pool {
-	struct drm_device *dev;
+	struct intel_engine_cs *engine;
 	struct list_head cache_list[4];
 };
 
 /* i915_gem_batch_pool.c */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
 			      struct i915_gem_batch_pool *pool);
 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
 struct drm_i915_gem_object*
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 7dc214dfc2a8..6665068e583c 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -185,7 +185,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
 	engine->fence_context = fence_context_alloc(1);
 
 	intel_engine_init_hangcheck(engine);
-	i915_gem_batch_pool_init(&engine->i915->drm, &engine->batch_pool);
+	i915_gem_batch_pool_init(engine, &engine->batch_pool);
 }
 
 /**
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 52/73] drm/i915: Remove request retirement before each batch
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (50 preceding siblings ...)
  2016-08-01  9:10 ` [PATCH 51/73] drm/i915: Double check the active status on the batch pool Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 53/73] drm/i915: Remove i915_gem_execbuffer_retire_commands() Chris Wilson
                   ` (21 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

This reimplements the denial-of-service protection against igt from
commit 227f782e4667 ("drm/i915: Retire requests before creating a new
one") and transfers the stall from before each batch into get_pages().
The issue is that the stall is increasing latency between batches which
is detrimental in some cases (especially coupled with execlists) to
keeping the GPU well fed. Also we have made the observation that retiring
requests can of itself free objects (and requests) and therefore makes
a good first step when shrinking.

v2: Recycle objects prior to i915_gem_object_get_pages()
v3: Remove the reference to the ring from i915_gem_requests_ring() as it
operates on an intel_engine_cs.
v4: Since commit 9b5f4e5ed6fd ("drm/i915: Retire oldest completed request
before allocating next") we no longer need the safeguard to retire
requests before get_pages(). We no longer see the huge latencies when
hitting the shrinker between allocations.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            | 1 -
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 --
 drivers/gpu/drm/i915/i915_gem_request.c    | 4 ++--
 3 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbda38f25c6b..2de3d16f7b80 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3169,7 +3169,6 @@ struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
 
 static inline u32 i915_reset_counter(struct i915_gpu_error *error)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 5e3b5054f72d..0593ea3ba211 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -781,8 +781,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
 	bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
 	int retry;
 
-	i915_gem_retire_requests_ring(engine);
-
 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
 	INIT_LIST_HEAD(&ordered_vmas);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 6faa84832ade..773b942a3a5f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -732,7 +732,7 @@ complete:
 	return ret;
 }
 
-void i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
+static void engine_retire_requests(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request, *next;
 
@@ -756,7 +756,7 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 	GEM_BUG_ON(!dev_priv->gt.awake);
 
 	for_each_engine(engine, dev_priv) {
-		i915_gem_retire_requests_ring(engine);
+		engine_retire_requests(engine);
 		if (list_empty(&engine->request_list))
 			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
 	}
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 53/73] drm/i915: Remove i915_gem_execbuffer_retire_commands()
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (51 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 52/73] drm/i915: Remove request retirement before each batch Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 54/73] drm/i915: Fix up vma alignment to be u64 Chris Wilson
                   ` (20 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

Move the single line to the callsite as the name is now misleading, and
the purpose is solely to add the request to the execution queue. Here,
we can see that if we failed to dispatch the batch from the request, we
can forgo flushing the GPU when closing the request.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0593ea3ba211..63984c4d8e5a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1211,13 +1211,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 	}
 }
 
-static void
-i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
-{
-	/* Add a breadcrumb for the completion of the batch buffer */
-	__i915_add_request(params->request, params->batch_obj, true);
-}
-
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
@@ -1692,7 +1685,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 	ret = execbuf_submit(params, args, &eb->vmas);
 err_request:
-	i915_gem_execbuffer_retire_commands(params);
+	__i915_add_request(params->request, params->batch_obj, ret == 0);
 
 err_batch_unpin:
 	/*
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 54/73] drm/i915: Fix up vma alignment to be u64
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (52 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 53/73] drm/i915: Remove i915_gem_execbuffer_retire_commands() Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01 12:21   ` Joonas Lahtinen
  2016-08-01  9:11 ` [PATCH 55/73] drm/i915: Pad GTT views of exec objects up to user specified size Chris Wilson
                   ` (19 subsequent siblings)
  73 siblings, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

This is not the full fix, as we are required to percolate the u64 nature
down through the drm_mm stack, but this is required now to prevent
explosions due to mismatch between execbuf (eb_vma_misplaced) and vma
binding (i915_vma_misplaced) - and reduces the risk of spurious changes
as we adjust the vma interface in the next patches.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h       | 14 ++++++--------
 drivers/gpu/drm/i915/i915_gem.c       | 26 +++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_evict.c |  5 +++--
 3 files changed, 22 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2de3d16f7b80..74a31358fd87 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3032,13 +3032,13 @@ void i915_gem_free_object(struct drm_gem_object *obj);
 int __must_check
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
-		    uint32_t alignment,
-		    uint64_t flags);
+		    u64 alignment,
+		    u64 flags);
 int __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
-			 uint32_t alignment,
-			 uint64_t flags);
+			 u64 alignment,
+			 u64 flags);
 
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		  u32 flags);
@@ -3398,11 +3398,9 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
-					  int min_size,
-					  unsigned alignment,
+					  u64 min_size, u64 alignment,
 					  unsigned cache_level,
-					  unsigned long start,
-					  unsigned long end,
+					  u64 start, u64 end,
 					  unsigned flags);
 int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3402fec5e33c..6039e46af69a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2960,8 +2960,8 @@ static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 			   struct i915_address_space *vm,
 			   const struct i915_ggtt_view *ggtt_view,
-			   unsigned alignment,
-			   uint64_t flags)
+			   u64 alignment,
+			   u64 flags)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3020,9 +3020,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 		alignment = flags & PIN_MAPPABLE ? fence_alignment :
 						unfenced_alignment;
 	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
-		DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
+		DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
 			  ggtt_view ? ggtt_view->type : 0,
-			  alignment);
+			  (long long)alignment);
 		return ERR_PTR(-EINVAL);
 	}
 
@@ -3675,7 +3675,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 }
 
 static bool
-i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
+i915_vma_misplaced(struct i915_vma *vma, u64 alignment, u64 flags)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
 
@@ -3724,8 +3724,8 @@ static int
 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		       struct i915_address_space *vm,
 		       const struct i915_ggtt_view *ggtt_view,
-		       uint32_t alignment,
-		       uint64_t flags)
+		       u64 alignment,
+		       u64 flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct i915_vma *vma;
@@ -3754,12 +3754,12 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		if (i915_vma_misplaced(vma, alignment, flags)) {
 			WARN(vma->pin_count,
 			     "bo is already pinned in %s with incorrect alignment:"
-			     " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
+			     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
 			     " obj->map_and_fenceable=%d\n",
 			     ggtt_view ? "ggtt" : "ppgtt",
 			     upper_32_bits(vma->node.start),
 			     lower_32_bits(vma->node.start),
-			     alignment,
+			     (long long)alignment,
 			     !!(flags & PIN_MAPPABLE),
 			     obj->map_and_fenceable);
 			ret = i915_vma_unbind(vma);
@@ -3795,8 +3795,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
-		    uint32_t alignment,
-		    uint64_t flags)
+		    u64 alignment,
+		    u64 flags)
 {
 	return i915_gem_object_do_pin(obj, vm,
 				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
@@ -3806,8 +3806,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 int
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
-			 uint32_t alignment,
-			 uint64_t flags)
+			 u64 alignment,
+			 u64 flags)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 4bce72fa14c4..ef12ecd2b182 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -84,8 +84,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
  */
 int
 i915_gem_evict_something(struct i915_address_space *vm,
-			 int min_size, unsigned alignment, unsigned cache_level,
-			 unsigned long start, unsigned long end,
+			 u64 min_size, u64 alignment,
+			 unsigned cache_level,
+			 u64 start, u64 end,
 			 unsigned flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(vm->dev);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 55/73] drm/i915: Pad GTT views of exec objects up to user specified size
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (53 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 54/73] drm/i915: Fix up vma alignment to be u64 Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 56/73] drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check Chris Wilson
                   ` (18 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

Our GPUs impose certain requirements upon buffers that depend upon how
exactly they are used. Typically this is expressed as that they require
a larger surface than would be naively computed by pitch * height.
Normally such requirements are hidden away in the userspace driver, but
when we accept pointers from strangers and later impose extra conditions
on them, the original client allocator has no idea about the
monstrosities in the GPU and we require the userspace driver to inform
the kernel how many padding pages are required beyond the client
allocation.

v2: Long time, no see
v3: Try an anonymous union for uapi struct compatibility

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            |  6 ++-
 drivers/gpu/drm/i915/i915_gem.c            | 79 ++++++++++++++----------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++-
 include/uapi/drm/i915_drm.h                |  8 ++-
 4 files changed, 63 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 74a31358fd87..1e1369319326 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3032,11 +3032,13 @@ void i915_gem_free_object(struct drm_gem_object *obj);
 int __must_check
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
+		    u64 size,
 		    u64 alignment,
 		    u64 flags);
 int __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
+			 u64 size,
 			 u64 alignment,
 			 u64 flags);
 
@@ -3313,8 +3315,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
-	return i915_gem_object_pin(obj, &ggtt->base,
-				   alignment, flags | PIN_GLOBAL);
+	return i915_gem_object_pin(obj, &ggtt->base, 0, alignment,
+				   flags | PIN_GLOBAL);
 }
 
 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6039e46af69a..518dadd46feb 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1689,7 +1689,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	/* Now pin it into the GTT if needed */
-	ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
+	ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
 	if (ret)
 		goto unlock;
 
@@ -2960,21 +2960,20 @@ static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 			   struct i915_address_space *vm,
 			   const struct i915_ggtt_view *ggtt_view,
+			   u64 size,
 			   u64 alignment,
 			   u64 flags)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	u32 fence_alignment, unfenced_alignment;
-	u32 search_flag, alloc_flag;
 	u64 start, end;
-	u64 size, fence_size;
+	u32 search_flag, alloc_flag;
 	struct i915_vma *vma;
 	int ret;
 
 	if (i915_is_ggtt(vm)) {
-		u32 view_size;
+		u32 fence_size, fence_alignment, unfenced_alignment;
+		u64 view_size;
 
 		if (WARN_ON(!ggtt_view))
 			return ERR_PTR(-EINVAL);
@@ -2992,48 +2991,39 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 								view_size,
 								obj->tiling_mode,
 								false);
-		size = flags & PIN_MAPPABLE ? fence_size : view_size;
+		size = max(size, view_size);
+		if (flags & PIN_MAPPABLE)
+			size = max_t(u64, size, fence_size);
+
+		if (alignment == 0)
+			alignment = flags & PIN_MAPPABLE ? fence_alignment :
+				unfenced_alignment;
+		if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
+			DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
+				  ggtt_view ? ggtt_view->type : 0,
+				  (long long)alignment);
+			return ERR_PTR(-EINVAL);
+		}
 	} else {
-		fence_size = i915_gem_get_gtt_size(dev,
-						   obj->base.size,
-						   obj->tiling_mode);
-		fence_alignment = i915_gem_get_gtt_alignment(dev,
-							     obj->base.size,
-							     obj->tiling_mode,
-							     true);
-		unfenced_alignment =
-			i915_gem_get_gtt_alignment(dev,
-						   obj->base.size,
-						   obj->tiling_mode,
-						   false);
-		size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
+		size = max_t(u64, size, obj->base.size);
+		alignment = 4096;
 	}
 
 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
 	end = vm->total;
 	if (flags & PIN_MAPPABLE)
-		end = min_t(u64, end, ggtt->mappable_end);
+		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
 	if (flags & PIN_ZONE_4G)
 		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
 
-	if (alignment == 0)
-		alignment = flags & PIN_MAPPABLE ? fence_alignment :
-						unfenced_alignment;
-	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
-		DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
-			  ggtt_view ? ggtt_view->type : 0,
-			  (long long)alignment);
-		return ERR_PTR(-EINVAL);
-	}
-
 	/* If binding the object/GGTT view requires more space than the entire
 	 * aperture has, reject it early before evicting everything in a vain
 	 * attempt to find space.
 	 */
 	if (size > end) {
-		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
+		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
 			  ggtt_view ? ggtt_view->type : 0,
-			  size,
+			  size, obj->base.size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
 			  end);
 		return ERR_PTR(-E2BIG);
@@ -3527,7 +3517,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * (e.g. libkms for the bootup splash), we have to ensure that we
 	 * always use map_and_fenceable for all scanout buffers.
 	 */
-	ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+	ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
 				       view->type == I915_GGTT_VIEW_NORMAL ?
 				       PIN_MAPPABLE : 0);
 	if (ret)
@@ -3675,12 +3665,14 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 }
 
 static bool
-i915_vma_misplaced(struct i915_vma *vma, u64 alignment, u64 flags)
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
 
-	if (alignment &&
-	    vma->node.start & (alignment - 1))
+	if (vma->node.size < size)
+		return true;
+
+	if (alignment && vma->node.start & (alignment - 1))
 		return true;
 
 	if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
@@ -3724,6 +3716,7 @@ static int
 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		       struct i915_address_space *vm,
 		       const struct i915_ggtt_view *ggtt_view,
+		       u64 size,
 		       u64 alignment,
 		       u64 flags)
 {
@@ -3751,7 +3744,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
 			return -EBUSY;
 
-		if (i915_vma_misplaced(vma, alignment, flags)) {
+		if (i915_vma_misplaced(vma, size, alignment, flags)) {
 			WARN(vma->pin_count,
 			     "bo is already pinned in %s with incorrect alignment:"
 			     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
@@ -3772,8 +3765,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 
 	bound = vma ? vma->bound : 0;
 	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
-						 flags);
+		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view,
+						 size, alignment, flags);
 		if (IS_ERR(vma))
 			return PTR_ERR(vma);
 	} else {
@@ -3795,17 +3788,19 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
+		    u64 size,
 		    u64 alignment,
 		    u64 flags)
 {
 	return i915_gem_object_do_pin(obj, vm,
 				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
-				      alignment, flags);
+				      size, alignment, flags);
 }
 
 int
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
+			 u64 size,
 			 u64 alignment,
 			 u64 flags)
 {
@@ -3816,7 +3811,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 	BUG_ON(!view);
 
 	return i915_gem_object_do_pin(obj, &ggtt->base, view,
-				      alignment, flags | PIN_GLOBAL);
+				      size, alignment, flags | PIN_GLOBAL);
 }
 
 void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 63984c4d8e5a..d2e27e730ecb 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -682,10 +682,14 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 			flags |= PIN_HIGH;
 	}
 
-	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
+	ret = i915_gem_object_pin(obj, vma->vm,
+				  entry->pad_to_size,
+				  entry->alignment,
+				  flags);
 	if ((ret == -ENOSPC  || ret == -E2BIG) &&
 	    only_mappable_for_reloc(entry->flags))
 		ret = i915_gem_object_pin(obj, vma->vm,
+					  entry->pad_to_size,
 					  entry->alignment,
 					  flags & ~PIN_MAPPABLE);
 	if (ret)
@@ -748,6 +752,9 @@ eb_vma_misplaced(struct i915_vma *vma)
 	    vma->node.start & (entry->alignment - 1))
 		return true;
 
+	if (vma->node.size < entry->pad_to_size)
+		return true;
+
 	if (entry->flags & EXEC_OBJECT_PINNED &&
 	    vma->node.start != entry->offset)
 		return true;
@@ -1091,6 +1098,14 @@ validate_exec_list(struct drm_device *dev,
 		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
 			return -EINVAL;
 
+		/* pad_to_size was once a reserved field, so sanitize it */
+		if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
+			if (offset_in_page(exec[i].pad_to_size))
+				return -EINVAL;
+		} else {
+			exec[i].pad_to_size = 0;
+		}
+
 		/* First check for malicious input causing overflow in
 		 * the worst case where we need to allocate the entire
 		 * relocation tree as a single array.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 33ce5ff9556a..0f292733cffc 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -727,11 +727,15 @@ struct drm_i915_gem_exec_object2 {
 #define EXEC_OBJECT_WRITE		 (1<<2)
 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
 #define EXEC_OBJECT_PINNED		 (1<<4)
+#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
-#define __EXEC_OBJECT_UNKNOWN_FLAGS	(-(EXEC_OBJECT_PINNED<<1))
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PAD_TO_SIZE<<1)
 	__u64 flags;
 
-	__u64 rsvd1;
+	union {
+		__u64 rsvd1;
+		__u64 pad_to_size;
+	};
 	__u64 rsvd2;
 };
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 56/73] drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (54 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 55/73] drm/i915: Pad GTT views of exec objects up to user specified size Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 57/73] drm/i915: Split insertion/binding of an object into the VM Chris Wilson
                   ` (17 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

i915_gem_valid_gtt_space() is used after inserting the VMA to double
check the list - the location should have been chosen to pass all the
restrictions.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 518dadd46feb..ba89d98e2aed 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3086,10 +3086,7 @@ search_free:
 			goto err_vma;
 		}
 	}
-	if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
-		ret = -EINVAL;
-		goto err_remove_node;
-	}
+	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
 	trace_i915_vma_bind(vma, flags);
 	ret = i915_vma_bind(vma, obj->cache_level, flags);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 57/73] drm/i915: Split insertion/binding of an object into the VM
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (55 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 56/73] drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 58/73] drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations Chris Wilson
                   ` (16 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

Split the insertion into the address space's range manager and binding
of that object into the GTT to simplify the code flow when pinning a
VMA.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 35 +++++++++++++++--------------------
 1 file changed, 15 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ba89d98e2aed..a6a30c7514e0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2957,12 +2957,12 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
  * @flags: mask of PIN_* flags to use
  */
 static struct i915_vma *
-i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
-			   struct i915_address_space *vm,
-			   const struct i915_ggtt_view *ggtt_view,
-			   u64 size,
-			   u64 alignment,
-			   u64 flags)
+i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
+			       struct i915_address_space *vm,
+			       const struct i915_ggtt_view *ggtt_view,
+			       u64 size,
+			       u64 alignment,
+			       u64 flags)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3088,19 +3088,12 @@ search_free:
 	}
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
-	trace_i915_vma_bind(vma, flags);
-	ret = i915_vma_bind(vma, obj->cache_level, flags);
-	if (ret)
-		goto err_remove_node;
-
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	list_move_tail(&vma->vm_link, &vm->inactive_list);
 	obj->bind_count++;
 
 	return vma;
 
-err_remove_node:
-	drm_mm_remove_node(&vma->node);
 err_vma:
 	vma = ERR_PTR(ret);
 err_unpin:
@@ -3760,24 +3753,26 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		}
 	}
 
-	bound = vma ? vma->bound : 0;
 	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view,
-						 size, alignment, flags);
+		vma = i915_gem_object_insert_into_vm(obj, vm, ggtt_view,
+						     size, alignment, flags);
 		if (IS_ERR(vma))
 			return PTR_ERR(vma);
-	} else {
-		ret = i915_vma_bind(vma, obj->cache_level, flags);
-		if (ret)
-			return ret;
 	}
 
+	bound = vma->bound;
+	ret = i915_vma_bind(vma, obj->cache_level, flags);
+	if (ret)
+		return ret;
+
 	if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
 	    (bound ^ vma->bound) & GLOBAL_BIND) {
 		__i915_vma_set_map_and_fenceable(vma);
 		WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
 	}
 
+	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+
 	vma->pin_count++;
 	return 0;
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 58/73] drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (56 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 57/73] drm/i915: Split insertion/binding of an object into the VM Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 59/73] drm/i915: Update the GGTT size/alignment query functions Chris Wilson
                   ` (15 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

As we always allocate in chunks of 4096 (that being both the PAGE_SIZE
and our own GTT_PAGE_SIZE), we know that all results from the drm_mm are
aligned to at least 4096. The drm_mm allocator itself is optimised for
alignment == 0, and so by converting alignments of 4096 to 0 we can
satisfy our own requirements and still hit the faster path.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a6a30c7514e0..9cb57e5bfdf6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3068,6 +3068,15 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 			alloc_flag = DRM_MM_CREATE_DEFAULT;
 		}
 
+		/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+		 * so we know that we always have a minimum alignment of 4096.
+		 * The drm_mm range manager is optimised to return results
+		 * with zero alignment, so where possible use the optimal
+		 * path.
+		 */
+		if (alignment <= 4096)
+			alignment = 0;
+
 search_free:
 		ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
 							  size, alignment,
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 59/73] drm/i915: Update the GGTT size/alignment query functions
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (57 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 58/73] drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01 12:27   ` Joonas Lahtinen
  2016-08-01  9:11 ` [PATCH 60/73] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private Chris Wilson
                   ` (14 subsequent siblings)
  73 siblings, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

In order to be consistent with other address space functions, we want to
pass around 64-bit sizes, even though all known global GTT are limited
to 4GiB. Similar, we are trying to be consistent in using the _ggtt_
nomenclature when referring to the special global GTT.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h        |  8 ++--
 drivers/gpu/drm/i915/i915_gem.c        | 77 +++++++++++++++++++---------------
 drivers/gpu/drm/i915/i915_gem_tiling.c |  3 +-
 3 files changed, 49 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1e1369319326..b6e56ecb8637 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3241,11 +3241,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-			    int tiling_mode, bool fenced);
+u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode);
+u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+				int tiling_mode, bool fenced);
 
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9cb57e5bfdf6..432868eafa60 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1844,29 +1844,39 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
 		i915_gem_release_mmap(obj);
 }
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+/**
+ * i915_gem_get_ggtt_size - return required global GTT size for an object
+ * @dev: drm device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ *
+ * Return the required GTT size for an object, taking into account
+ * potential fence register mapping.
+ */
+u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
 {
-	uint32_t gtt_size;
+	u64 ggtt_size;
 
-	if (INTEL_INFO(dev)->gen >= 4 ||
+	GEM_BUG_ON(size == 0);
+
+	if (INTEL_GEN(dev) >= 4 ||
 	    tiling_mode == I915_TILING_NONE)
 		return size;
 
 	/* Previous chips need a power-of-two fence region when tiling */
 	if (IS_GEN3(dev))
-		gtt_size = 1024*1024;
+		ggtt_size = 1024*1024;
 	else
-		gtt_size = 512*1024;
+		ggtt_size = 512*1024;
 
-	while (gtt_size < size)
-		gtt_size <<= 1;
+	while (ggtt_size < size)
+		ggtt_size <<= 1;
 
-	return gtt_size;
+	return ggtt_size;
 }
 
 /**
- * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * i915_gem_get_ggtt_alignment - return required GTT alignment for an object
  * @dev: drm device
  * @size: object size
  * @tiling_mode: tiling mode
@@ -1875,15 +1885,16 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
  * Return the required GTT alignment for an object, taking into account
  * potential fence register mapping.
  */
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-			   int tiling_mode, bool fenced)
+u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+				int tiling_mode, bool fenced)
 {
+	GEM_BUG_ON(size == 0);
+
 	/*
 	 * Minimum alignment is 4k (GTT page size), but might be greater
 	 * if a fence register is needed for the object.
 	 */
-	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+	if (INTEL_GEN(dev) >= 4 || (!fenced && IS_G33(dev)) ||
 	    tiling_mode == I915_TILING_NONE)
 		return 4096;
 
@@ -1891,7 +1902,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
 	 * Previous chips need to be aligned to the size of the smallest
 	 * fence register that can contain the object.
 	 */
-	return i915_gem_get_gtt_size(dev, size, tiling_mode);
+	return i915_gem_get_ggtt_size(dev, size, tiling_mode);
 }
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2980,17 +2991,17 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 
 		view_size = i915_ggtt_view_size(obj, ggtt_view);
 
-		fence_size = i915_gem_get_gtt_size(dev,
-						   view_size,
-						   obj->tiling_mode);
-		fence_alignment = i915_gem_get_gtt_alignment(dev,
-							     view_size,
-							     obj->tiling_mode,
-							     true);
-		unfenced_alignment = i915_gem_get_gtt_alignment(dev,
-								view_size,
-								obj->tiling_mode,
-								false);
+		fence_size = i915_gem_get_ggtt_size(dev,
+						    view_size,
+						    obj->tiling_mode);
+		fence_alignment = i915_gem_get_ggtt_alignment(dev,
+							      view_size,
+							      obj->tiling_mode,
+							      true);
+		unfenced_alignment = i915_gem_get_ggtt_alignment(dev,
+								 view_size,
+								 obj->tiling_mode,
+								 false);
 		size = max(size, view_size);
 		if (flags & PIN_MAPPABLE)
 			size = max_t(u64, size, fence_size);
@@ -3694,13 +3705,13 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	bool mappable, fenceable;
 	u32 fence_size, fence_alignment;
 
-	fence_size = i915_gem_get_gtt_size(obj->base.dev,
-					   obj->base.size,
-					   obj->tiling_mode);
-	fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
-						     obj->base.size,
-						     obj->tiling_mode,
-						     true);
+	fence_size = i915_gem_get_ggtt_size(obj->base.dev,
+					    obj->base.size,
+					    obj->tiling_mode);
+	fence_alignment = i915_gem_get_ggtt_alignment(obj->base.dev,
+						      obj->base.size,
+						      obj->tiling_mode,
+						      true);
 
 	fenceable = (vma->node.size == fence_size &&
 		     (vma->node.start & (fence_alignment - 1)) == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index fa2eb4a4f212..4e42da691e4e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -133,7 +133,8 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 			return false;
 	}
 
-	size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
+	size = i915_gem_get_ggtt_size(obj->base.dev,
+				      obj->base.size, tiling_mode);
 	if (i915_gem_obj_ggtt_size(obj) != size)
 		return false;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 60/73] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (58 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 59/73] drm/i915: Update the GGTT size/alignment query functions Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01 12:30   ` Joonas Lahtinen
  2016-08-01  9:11 ` [PATCH 61/73] drm/i915: Record allocated vma size Chris Wilson
                   ` (13 subsequent siblings)
  73 siblings, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

For consistency, internal functions should take drm_i915_private rather
than drm_device. Now that we are subclassing drm_device, there are no
more size wins, but being consistent is its own blessing.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h        |  5 +++--
 drivers/gpu/drm/i915/i915_gem.c        | 30 ++++++++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_tiling.c |  8 ++++----
 3 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b6e56ecb8637..3d73394b52d7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3241,8 +3241,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode);
-u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
+			   int tiling_mode);
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
 				int tiling_mode, bool fenced);
 
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 432868eafa60..f11bf6c4f86a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1846,25 +1846,26 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
 
 /**
  * i915_gem_get_ggtt_size - return required global GTT size for an object
- * @dev: drm device
+ * @dev_priv: i915 device
  * @size: object size
  * @tiling_mode: tiling mode
  *
  * Return the required GTT size for an object, taking into account
  * potential fence register mapping.
  */
-u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
+			   u64 size, int tiling_mode)
 {
 	u64 ggtt_size;
 
 	GEM_BUG_ON(size == 0);
 
-	if (INTEL_GEN(dev) >= 4 ||
+	if (INTEL_GEN(dev_priv) >= 4 ||
 	    tiling_mode == I915_TILING_NONE)
 		return size;
 
 	/* Previous chips need a power-of-two fence region when tiling */
-	if (IS_GEN3(dev))
+	if (IS_GEN3(dev_priv))
 		ggtt_size = 1024*1024;
 	else
 		ggtt_size = 512*1024;
@@ -1877,7 +1878,7 @@ u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
 
 /**
  * i915_gem_get_ggtt_alignment - return required GTT alignment for an object
- * @dev: drm device
+ * @dev_priv: i915 device
  * @size: object size
  * @tiling_mode: tiling mode
  * @fenced: is fenced alignemned required or not
@@ -1885,7 +1886,7 @@ u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
  * Return the required GTT alignment for an object, taking into account
  * potential fence register mapping.
  */
-u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
 				int tiling_mode, bool fenced)
 {
 	GEM_BUG_ON(size == 0);
@@ -1894,7 +1895,7 @@ u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
 	 * Minimum alignment is 4k (GTT page size), but might be greater
 	 * if a fence register is needed for the object.
 	 */
-	if (INTEL_GEN(dev) >= 4 || (!fenced && IS_G33(dev)) ||
+	if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
 	    tiling_mode == I915_TILING_NONE)
 		return 4096;
 
@@ -1902,7 +1903,7 @@ u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
 	 * Previous chips need to be aligned to the size of the smallest
 	 * fence register that can contain the object.
 	 */
-	return i915_gem_get_ggtt_size(dev, size, tiling_mode);
+	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
 }
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2991,14 +2992,14 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 
 		view_size = i915_ggtt_view_size(obj, ggtt_view);
 
-		fence_size = i915_gem_get_ggtt_size(dev,
+		fence_size = i915_gem_get_ggtt_size(dev_priv,
 						    view_size,
 						    obj->tiling_mode);
-		fence_alignment = i915_gem_get_ggtt_alignment(dev,
+		fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
 							      view_size,
 							      obj->tiling_mode,
 							      true);
-		unfenced_alignment = i915_gem_get_ggtt_alignment(dev,
+		unfenced_alignment = i915_gem_get_ggtt_alignment(dev_priv,
 								 view_size,
 								 obj->tiling_mode,
 								 false);
@@ -3702,13 +3703,14 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	bool mappable, fenceable;
 	u32 fence_size, fence_alignment;
 
-	fence_size = i915_gem_get_ggtt_size(obj->base.dev,
+	fence_size = i915_gem_get_ggtt_size(dev_priv,
 					    obj->base.size,
 					    obj->tiling_mode);
-	fence_alignment = i915_gem_get_ggtt_alignment(obj->base.dev,
+	fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
 						      obj->base.size,
 						      obj->tiling_mode,
 						      true);
@@ -3717,7 +3719,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 		     (vma->node.start & (fence_alignment - 1)) == 0);
 
 	mappable = (vma->node.start + fence_size <=
-		    to_i915(obj->base.dev)->ggtt.mappable_end);
+		    dev_priv->ggtt.mappable_end);
 
 	obj->map_and_fenceable = mappable && fenceable;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4e42da691e4e..b7f9875f69b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -117,15 +117,16 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 static bool
 i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 {
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	u32 size;
 
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 
-	if (INTEL_INFO(obj->base.dev)->gen >= 4)
+	if (INTEL_GEN(dev_priv) >= 4)
 		return true;
 
-	if (IS_GEN3(obj->base.dev)) {
+	if (IS_GEN3(dev_priv)) {
 		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
 			return false;
 	} else {
@@ -133,8 +134,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 			return false;
 	}
 
-	size = i915_gem_get_ggtt_size(obj->base.dev,
-				      obj->base.size, tiling_mode);
+	size = i915_gem_get_ggtt_size(dev_priv, obj->base.size, tiling_mode);
 	if (i915_gem_obj_ggtt_size(obj) != size)
 		return false;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 61/73] drm/i915: Record allocated vma size
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (59 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 60/73] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01 12:36   ` Joonas Lahtinen
  2016-08-01  9:11 ` [PATCH 62/73] drm/i915: Wrap vma->pin_count accessors with small inline helpers Chris Wilson
                   ` (12 subsequent siblings)
  73 siblings, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

Tracking the size of the VMA as allocated allows us to dramatically
reduce the complexity of later functions (like inserting the VMA in to
the drm_mm range manager).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c     | 103 ++++++++++++++----------------------
 drivers/gpu/drm/i915/i915_gem_gtt.c |  67 ++++++++---------------
 drivers/gpu/drm/i915/i915_gem_gtt.h |   5 +-
 3 files changed, 63 insertions(+), 112 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f11bf6c4f86a..d89fbee5c48a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2976,53 +2976,36 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 			       u64 alignment,
 			       u64 flags)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	u64 start, end;
-	u32 search_flag, alloc_flag;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct i915_vma *vma;
+	u64 start, end;
+	u64 min_alignment;
 	int ret;
 
-	if (i915_is_ggtt(vm)) {
-		u32 fence_size, fence_alignment, unfenced_alignment;
-		u64 view_size;
-
-		if (WARN_ON(!ggtt_view))
-			return ERR_PTR(-EINVAL);
-
-		view_size = i915_ggtt_view_size(obj, ggtt_view);
-
-		fence_size = i915_gem_get_ggtt_size(dev_priv,
-						    view_size,
-						    obj->tiling_mode);
-		fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
-							      view_size,
-							      obj->tiling_mode,
-							      true);
-		unfenced_alignment = i915_gem_get_ggtt_alignment(dev_priv,
-								 view_size,
-								 obj->tiling_mode,
-								 false);
-		size = max(size, view_size);
-		if (flags & PIN_MAPPABLE)
-			size = max_t(u64, size, fence_size);
-
-		if (alignment == 0)
-			alignment = flags & PIN_MAPPABLE ? fence_alignment :
-				unfenced_alignment;
-		if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
-			DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
-				  ggtt_view ? ggtt_view->type : 0,
-				  (long long)alignment);
-			return ERR_PTR(-EINVAL);
-		}
-	} else {
-		size = max_t(u64, size, obj->base.size);
-		alignment = 4096;
+	vma = ggtt_view ?
+		i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
+		i915_gem_obj_lookup_or_create_vma(obj, vm);
+	if (IS_ERR(vma))
+		return vma;
+
+	size = max(size, vma->size);
+	if (flags & PIN_MAPPABLE)
+		size = i915_gem_get_ggtt_size(dev_priv, size, obj->tiling_mode);
+
+	min_alignment =
+		i915_gem_get_ggtt_alignment(dev_priv, size, obj->tiling_mode,
+					    flags & PIN_MAPPABLE);
+	if (alignment == 0)
+		alignment = min_alignment;
+	if (alignment & (min_alignment - 1)) {
+		DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
+			  alignment, min_alignment);
+		return ERR_PTR(-EINVAL);
 	}
 
 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-	end = vm->total;
+
+	end = vma->vm->total;
 	if (flags & PIN_MAPPABLE)
 		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
 	if (flags & PIN_ZONE_4G)
@@ -3033,8 +3016,7 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 	 * attempt to find space.
 	 */
 	if (size > end) {
-		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
-			  ggtt_view ? ggtt_view->type : 0,
+		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
 			  size, obj->base.size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
 			  end);
@@ -3047,31 +3029,27 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 
 	i915_gem_object_pin_pages(obj);
 
-	vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
-			  i915_gem_obj_lookup_or_create_vma(obj, vm);
-
-	if (IS_ERR(vma))
-		goto err_unpin;
-
 	if (flags & PIN_OFFSET_FIXED) {
 		uint64_t offset = flags & PIN_OFFSET_MASK;
-
-		if (offset & (alignment - 1) || offset + size > end) {
+		if (offset & (alignment - 1) || offset > end - size) {
 			ret = -EINVAL;
-			goto err_vma;
+			goto err_unpin;
 		}
+
 		vma->node.start = offset;
 		vma->node.size = size;
 		vma->node.color = obj->cache_level;
-		ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+		ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
 		if (ret) {
 			ret = i915_gem_evict_for_vma(vma);
 			if (ret == 0)
-				ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+				ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+			if (ret)
+				goto err_unpin;
 		}
-		if (ret)
-			goto err_vma;
 	} else {
+		u32 search_flag, alloc_flag;
+
 		if (flags & PIN_HIGH) {
 			search_flag = DRM_MM_SEARCH_BELOW;
 			alloc_flag = DRM_MM_CREATE_TOP;
@@ -3090,36 +3068,35 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 			alignment = 0;
 
 search_free:
-		ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
+		ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+							  &vma->node,
 							  size, alignment,
 							  obj->cache_level,
 							  start, end,
 							  search_flag,
 							  alloc_flag);
 		if (ret) {
-			ret = i915_gem_evict_something(vm, size, alignment,
+			ret = i915_gem_evict_something(vma->vm, size, alignment,
 						       obj->cache_level,
 						       start, end,
 						       flags);
 			if (ret == 0)
 				goto search_free;
 
-			goto err_vma;
+			goto err_unpin;
 		}
 	}
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	list_move_tail(&vma->vm_link, &vm->inactive_list);
+	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	obj->bind_count++;
 
 	return vma;
 
-err_vma:
-	vma = ERR_PTR(ret);
 err_unpin:
 	i915_gem_object_unpin_pages(obj);
-	return vma;
+	return ERR_PTR(ret);
 }
 
 bool
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 31d13aea5461..ae6426e83ee0 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -184,7 +184,7 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
 	vma->vm->clear_range(vma->vm,
 			     vma->node.start,
-			     vma->obj->base.size,
+			     vma->size,
 			     true);
 }
 
@@ -2697,28 +2697,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
-	struct drm_device *dev = vma->vm->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_object *obj = vma->obj;
-	const uint64_t size = min_t(uint64_t,
-				    obj->base.size,
-				    vma->node.size);
+	struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+	const u64 size = min(vma->size, vma->node.size);
 
-	if (vma->bound & GLOBAL_BIND) {
+	if (vma->bound & GLOBAL_BIND)
 		vma->vm->clear_range(vma->vm,
-				     vma->node.start,
-				     size,
+				     vma->node.start, size,
 				     true);
-	}
-
-	if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
-		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
 
+	if (vma->bound & LOCAL_BIND && appgtt)
 		appgtt->base.clear_range(&appgtt->base,
-					 vma->node.start,
-					 size,
+					 vma->node.start, size,
 					 true);
-	}
 }
 
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@ -3367,14 +3357,14 @@ void i915_vma_close(struct i915_vma *vma)
 static struct i915_vma *
 __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 		      struct i915_address_space *vm,
-		      const struct i915_ggtt_view *ggtt_view)
+		      const struct i915_ggtt_view *view)
 {
 	struct i915_vma *vma;
 	int i;
 
 	GEM_BUG_ON(vm->closed);
 
-	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+	if (WARN_ON(i915_is_ggtt(vm) != !!view))
 		return ERR_PTR(-EINVAL);
 
 	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
@@ -3388,12 +3378,22 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	list_add(&vma->vm_link, &vm->unbound_list);
 	vma->vm = vm;
 	vma->obj = obj;
+	vma->size = obj->base.size;
 	vma->is_ggtt = i915_is_ggtt(vm);
 
-	if (i915_is_ggtt(vm))
-		vma->ggtt_view = *ggtt_view;
-	else
+	if (i915_is_ggtt(vm)) {
+		vma->ggtt_view = *view;
+		if (view->type == I915_GGTT_VIEW_PARTIAL) {
+			vma->size = view->params.partial.size;
+			vma->size <<= PAGE_SHIFT;
+		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
+			vma->size =
+				intel_rotation_info_size(&view->params.rotated);
+			vma->size <<= PAGE_SHIFT;
+		}
+	} else {
 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+	}
 
 	list_add_tail(&vma->obj_link, &obj->vma_list);
 
@@ -3678,29 +3678,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	return 0;
 }
 
-/**
- * i915_ggtt_view_size - Get the size of a GGTT view.
- * @obj: Object the view is of.
- * @view: The view in question.
- *
- * @return The size of the GGTT view in bytes.
- */
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
-		    const struct i915_ggtt_view *view)
-{
-	if (view->type == I915_GGTT_VIEW_NORMAL) {
-		return obj->base.size;
-	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
-		return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
-	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
-		return view->params.partial.size << PAGE_SHIFT;
-	} else {
-		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
-		return obj->base.size;
-	}
-}
-
 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 {
 	void __iomem *ptr;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 2ebf07defa4e..2de6f613ac1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -180,6 +180,7 @@ struct i915_vma {
 	struct drm_i915_gem_object *obj;
 	struct i915_address_space *vm;
 	void __iomem *iomap;
+	u64 size;
 
 	unsigned int active;
 	struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -610,10 +611,6 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
 	return true;
 }
 
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
-		    const struct i915_ggtt_view *view);
-
 /**
  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
  * @vma: VMA to iomap
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 62/73] drm/i915: Wrap vma->pin_count accessors with small inline helpers
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (60 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 61/73] drm/i915: Record allocated vma size Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 63/73] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
                   ` (11 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

In the next few patches, the VMA pinning API is overhauled and to reduce
the churn we pull out the update to the accessors into a prep patch.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_gem.c            | 26 ++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_evict.c      | 12 ++++++------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 drivers/gpu/drm/i915/i915_gem_fence.c      |  2 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 10 +++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.h        | 31 ++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_gpu_error.c      |  4 ++--
 8 files changed, 58 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 6285d50e6876..e6428feffc56 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -168,7 +168,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (vma->pin_count > 0)
+		if (i915_vma_is_pinned(vma))
 			pin_count++;
 	}
 	seq_printf(m, " (pinned x %d)", pin_count);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d89fbee5c48a..fddd989873f0 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -153,10 +153,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
-		if (vma->pin_count)
+		if (i915_vma_is_pinned(vma))
 			pinned += vma->node.size;
 	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
-		if (vma->pin_count)
+		if (i915_vma_is_pinned(vma))
 			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
 
@@ -2806,7 +2806,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
 
 static void __i915_vma_iounmap(struct i915_vma *vma)
 {
-	GEM_BUG_ON(vma->pin_count);
+	GEM_BUG_ON(i915_vma_is_pinned(vma));
 
 	if (vma->iomap == NULL)
 		return;
@@ -2833,7 +2833,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		 * take a pin on the vma so that the second unbind is
 		 * aborted.
 		 */
-		vma->pin_count++;
+		__i915_vma_pin(vma);
 
 		for_each_active(active, idx) {
 			ret = i915_gem_active_retire(&vma->last_read[idx],
@@ -2842,14 +2842,14 @@ int i915_vma_unbind(struct i915_vma *vma)
 				break;
 		}
 
-		vma->pin_count--;
+		__i915_vma_unpin(vma);
 		if (ret)
 			return ret;
 
 		GEM_BUG_ON(i915_vma_is_active(vma));
 	}
 
-	if (vma->pin_count)
+	if (i915_vma_is_pinned(vma))
 		return -EBUSY;
 
 	if (!drm_mm_node_allocated(&vma->node))
@@ -3293,7 +3293,7 @@ restart:
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
-		if (vma->pin_count) {
+		if (i915_vma_is_pinned(vma)) {
 			DRM_DEBUG("can not change the cache level of pinned objects\n");
 			return -EBUSY;
 		}
@@ -3730,11 +3730,11 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 			  i915_gem_obj_to_vma(obj, vm);
 
 	if (vma) {
-		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+		if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
 			return -EBUSY;
 
 		if (i915_vma_misplaced(vma, size, alignment, flags)) {
-			WARN(vma->pin_count,
+			WARN(i915_vma_is_pinned(vma),
 			     "bo is already pinned in %s with incorrect alignment:"
 			     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
 			     " obj->map_and_fenceable=%d\n",
@@ -3772,7 +3772,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 
-	vma->pin_count++;
+	__i915_vma_pin(vma);
 	return 0;
 }
 
@@ -3811,10 +3811,10 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 {
 	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
-	WARN_ON(vma->pin_count == 0);
+	WARN_ON(!i915_vma_is_pinned(vma));
 	WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
 
-	--vma->pin_count;
+	__i915_vma_unpin(vma);
 }
 
 int
@@ -4682,7 +4682,7 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
 	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->pin_count > 0)
+		if (i915_vma_is_pinned(vma))
 			return true;
 
 	return false;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ef12ecd2b182..7be425826539 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -49,7 +49,7 @@ gpu_is_idle(struct drm_i915_private *dev_priv)
 static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
-	if (vma->pin_count)
+	if (i915_vma_is_pinned(vma))
 		return false;
 
 	if (WARN_ON(!list_empty(&vma->exec_list)))
@@ -183,7 +183,7 @@ found:
 	 */
 	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
 		if (drm_mm_scan_remove_block(&vma->node))
-			vma->pin_count++;
+			__i915_vma_pin(vma);
 		else
 			list_del_init(&vma->exec_list);
 	}
@@ -195,7 +195,7 @@ found:
 				       exec_list);
 
 		list_del_init(&vma->exec_list);
-		vma->pin_count--;
+		__i915_vma_unpin(vma);
 		if (ret == 0)
 			ret = i915_vma_unbind(vma);
 	}
@@ -220,8 +220,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
 
 		vma = container_of(node, typeof(*vma), node);
 
-		if (vma->pin_count) {
-			if (!vma->exec_entry || (vma->pin_count > 1))
+		if (i915_vma_is_pinned(vma)) {
+			if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
 				/* Object is pinned for some other use */
 				return -EBUSY;
 
@@ -281,7 +281,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 	}
 
 	list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
-		if (vma->pin_count == 0)
+		if (!i915_vma_is_pinned(vma))
 			WARN_ON(i915_vma_unbind(vma));
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2e27e730ecb..82ed80f68103 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -261,7 +261,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 		i915_gem_object_unpin_fence(obj);
 
 	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-		vma->pin_count--;
+		__i915_vma_unpin(vma);
 
 	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index dbaab9ce29c9..3b462da612ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -431,7 +431,7 @@ i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 
 		WARN_ON(!ggtt_vma ||
 			dev_priv->fence_regs[obj->fence_reg].pin_count >
-			ggtt_vma->pin_count);
+			i915_vma_pin_count(ggtt_vma));
 		dev_priv->fence_regs[obj->fence_reg].pin_count++;
 		return true;
 	} else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ae6426e83ee0..2052f05c4e12 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3327,7 +3327,7 @@ i915_vma_retire(struct i915_gem_active *active,
 		return;
 
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-	if (unlikely(vma->closed && !vma->pin_count))
+	if (unlikely(vma->closed && !i915_vma_is_pinned(vma)))
 		WARN_ON(i915_vma_unbind(vma));
 }
 
@@ -3350,7 +3350,7 @@ void i915_vma_close(struct i915_vma *vma)
 	vma->closed = true;
 
 	list_del_init(&vma->obj_link);
-	if (!i915_vma_is_active(vma) && !vma->pin_count)
+	if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
 		WARN_ON(i915_vma_unbind(vma));
 }
 
@@ -3659,12 +3659,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 
 	if (vma->bound == 0 && vma->vm->allocate_va_range) {
 		/* XXX: i915_vma_pin() will fix this +- hack */
-		vma->pin_count++;
+		__i915_vma_pin(vma);
 		trace_i915_va_alloc(vma);
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
 						 vma->node.size);
-		vma->pin_count--;
+		__i915_vma_unpin(vma);
 		if (ret)
 			return ret;
 	}
@@ -3700,6 +3700,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 		vma->iomap = ptr;
 	}
 
-	vma->pin_count++;
+	__i915_vma_pin(vma);
 	return ptr;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 2de6f613ac1e..d822734b5bc0 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -611,6 +611,34 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
 	return true;
 }
 
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+	return vma->pin_count;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+	return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+	vma->pin_count++;
+	GEM_BUG_ON(!i915_vma_is_pinned(vma));
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!i915_vma_is_pinned(vma));
+	vma->pin_count--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+	__i915_vma_unpin(vma);
+}
+
 /**
  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
  * @vma: VMA to iomap
@@ -639,9 +667,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
 static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
 	lockdep_assert_held(&vma->vm->dev->struct_mutex);
-	GEM_BUG_ON(vma->pin_count == 0);
 	GEM_BUG_ON(vma->iomap == NULL);
-	vma->pin_count--;
+	i915_vma_unpin(vma);
 }
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index c19f72e1bcf7..d94eb907a23a 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -818,7 +818,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
 			break;
 
 		list_for_each_entry(vma, &obj->vma_list, obj_link)
-			if (vma->vm == vm && vma->pin_count > 0)
+			if (vma->vm == vm && i915_vma_is_pinned(vma))
 				capture_bo(err++, vma);
 	}
 
@@ -1230,7 +1230,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		list_for_each_entry(vma, &obj->vma_list, obj_link)
-			if (vma->vm == vm && vma->pin_count > 0)
+			if (vma->vm == vm && i915_vma_is_pinned(vma))
 				i++;
 	}
 	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 63/73] drm/i915: Start passing around i915_vma from execbuffer
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (61 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 62/73] drm/i915: Wrap vma->pin_count accessors with small inline helpers Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 64/73] drm/i915: Combine all i915_vma bitfields into a single set of flags Chris Wilson
                   ` (10 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx; +Cc: Mika Kuoppala

During execbuffer we look up the i915_vma in order to reserve them in
the VM. However, we then do a double lookup of the vma in order to then
pin them, all because we lack the necessary interfaces to operate on
i915_vma - so introduce i915_vma_pin()!

v2: Tidy parameter lists to remove one level of redirection in the hot
path.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            |  24 +----
 drivers/gpu/drm/i915/i915_gem.c            | 157 ++++++++++++-----------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 142 +++++++++++---------------
 drivers/gpu/drm/i915/i915_gem_gtt.c        |   3 -
 drivers/gpu/drm/i915/i915_gem_gtt.h        |  14 +++
 5 files changed, 139 insertions(+), 201 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3d73394b52d7..cda8238c952f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3018,23 +3018,6 @@ struct drm_i915_gem_object *i915_gem_object_create_from_data(
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 void i915_gem_free_object(struct drm_gem_object *obj);
 
-/* Flags used by pin/bind&friends. */
-#define PIN_MAPPABLE	(1<<0)
-#define PIN_NONBLOCK	(1<<1)
-#define PIN_GLOBAL	(1<<2)
-#define PIN_OFFSET_BIAS	(1<<3)
-#define PIN_USER	(1<<4)
-#define PIN_UPDATE	(1<<5)
-#define PIN_ZONE_4G	(1<<6)
-#define PIN_HIGH	(1<<7)
-#define PIN_OFFSET_FIXED	(1<<8)
-#define PIN_OFFSET_MASK (~4095)
-int __must_check
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm,
-		    u64 size,
-		    u64 alignment,
-		    u64 flags);
 int __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
@@ -3311,11 +3294,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 		      uint32_t alignment,
 		      unsigned flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-	return i915_gem_object_pin(obj, &ggtt->base, 0, alignment,
-				   flags | PIN_GLOBAL);
+	return i915_gem_object_ggtt_pin(obj, &i915_ggtt_view_normal,
+					0, alignment, flags);
 }
 
 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fddd989873f0..3158b5dd14aa 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2968,25 +2968,17 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
  * @alignment: requested alignment
  * @flags: mask of PIN_* flags to use
  */
-static struct i915_vma *
-i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
-			       struct i915_address_space *vm,
-			       const struct i915_ggtt_view *ggtt_view,
-			       u64 size,
-			       u64 alignment,
-			       u64 flags)
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_vma *vma;
+	struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+	struct drm_i915_gem_object *obj = vma->obj;
 	u64 start, end;
 	u64 min_alignment;
 	int ret;
 
-	vma = ggtt_view ?
-		i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
-		i915_gem_obj_lookup_or_create_vma(obj, vm);
-	if (IS_ERR(vma))
-		return vma;
+	GEM_BUG_ON(vma->bound);
+	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
 	size = max(size, vma->size);
 	if (flags & PIN_MAPPABLE)
@@ -3000,7 +2992,7 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 	if (alignment & (min_alignment - 1)) {
 		DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
 			  alignment, min_alignment);
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 	}
 
 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
@@ -3020,17 +3012,17 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 			  size, obj->base.size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
 			  end);
-		return ERR_PTR(-E2BIG);
+		return -E2BIG;
 	}
 
 	ret = i915_gem_object_get_pages(obj);
 	if (ret)
-		return ERR_PTR(ret);
+		return ret;
 
 	i915_gem_object_pin_pages(obj);
 
 	if (flags & PIN_OFFSET_FIXED) {
-		uint64_t offset = flags & PIN_OFFSET_MASK;
+		u64 offset = flags & PIN_OFFSET_MASK;
 		if (offset & (alignment - 1) || offset > end - size) {
 			ret = -EINVAL;
 			goto err_unpin;
@@ -3092,11 +3084,11 @@ search_free:
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	obj->bind_count++;
 
-	return vma;
+	return 0;
 
 err_unpin:
 	i915_gem_object_unpin_pages(obj);
-	return ERR_PTR(ret);
+	return ret;
 }
 
 bool
@@ -3657,6 +3649,9 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
 
+	if (!drm_mm_node_allocated(&vma->node))
+		return false;
+
 	if (vma->node.size < size)
 		return true;
 
@@ -3701,91 +3696,42 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	obj->map_and_fenceable = mappable && fenceable;
 }
 
-static int
-i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
-		       struct i915_address_space *vm,
-		       const struct i915_ggtt_view *ggtt_view,
-		       u64 size,
-		       u64 alignment,
-		       u64 flags)
+int
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_vma *vma;
-	unsigned bound;
+	unsigned int bound = vma->bound;
 	int ret;
 
-	if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
-		return -ENODEV;
-
-	if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
-		return -EINVAL;
-
-	if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
-		return -EINVAL;
-
-	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
-		return -EINVAL;
-
-	vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
-			  i915_gem_obj_to_vma(obj, vm);
+	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+	GEM_BUG_ON((flags & PIN_GLOBAL) && !vma->is_ggtt);
 
-	if (vma) {
-		if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
-			return -EBUSY;
-
-		if (i915_vma_misplaced(vma, size, alignment, flags)) {
-			WARN(i915_vma_is_pinned(vma),
-			     "bo is already pinned in %s with incorrect alignment:"
-			     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
-			     " obj->map_and_fenceable=%d\n",
-			     ggtt_view ? "ggtt" : "ppgtt",
-			     upper_32_bits(vma->node.start),
-			     lower_32_bits(vma->node.start),
-			     (long long)alignment,
-			     !!(flags & PIN_MAPPABLE),
-			     obj->map_and_fenceable);
-			ret = i915_vma_unbind(vma);
-			if (ret)
-				return ret;
+	if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+		return -EBUSY;
 
-			vma = NULL;
-		}
-	}
+	/* Pin early to prevent the shrinker/eviction logic from destroying
+	 * our vma as we insert and bind.
+	 */
+	__i915_vma_pin(vma);
 
-	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-		vma = i915_gem_object_insert_into_vm(obj, vm, ggtt_view,
-						     size, alignment, flags);
-		if (IS_ERR(vma))
-			return PTR_ERR(vma);
+	if (!bound) {
+		ret = i915_vma_insert(vma, size, alignment, flags);
+		if (ret)
+			goto err;
 	}
 
-	bound = vma->bound;
-	ret = i915_vma_bind(vma, obj->cache_level, flags);
+	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
 	if (ret)
-		return ret;
+		goto err;
 
-	if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
-	    (bound ^ vma->bound) & GLOBAL_BIND) {
+	if ((bound ^ vma->bound) & GLOBAL_BIND)
 		__i915_vma_set_map_and_fenceable(vma);
-		WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-	}
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
-
-	__i915_vma_pin(vma);
 	return 0;
-}
 
-int
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm,
-		    u64 size,
-		    u64 alignment,
-		    u64 flags)
-{
-	return i915_gem_object_do_pin(obj, vm,
-				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
-				      size, alignment, flags);
+err:
+	__i915_vma_unpin(vma);
+	return ret;
 }
 
 int
@@ -3795,14 +3741,35 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 u64 alignment,
 			 u64 flags)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_vma *vma;
+	int ret;
 
 	BUG_ON(!view);
 
-	return i915_gem_object_do_pin(obj, &ggtt->base, view,
-				      size, alignment, flags | PIN_GLOBAL);
+	vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	if (i915_vma_misplaced(vma, size, alignment, flags)) {
+		if (flags & PIN_NONBLOCK &&
+		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
+			return -ENOSPC;
+
+		WARN(i915_vma_is_pinned(vma),
+		     "bo is already pinned in ggtt with incorrect alignment:"
+		     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
+		     " obj->map_and_fenceable=%d\n",
+		     upper_32_bits(vma->node.start),
+		     lower_32_bits(vma->node.start),
+		     (long long)alignment,
+		     !!(flags & PIN_MAPPABLE),
+		     obj->map_and_fenceable);
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			return ret;
+	}
+
+	return i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
 }
 
 void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 82ed80f68103..db87d30b86ac 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -45,11 +45,10 @@
 struct i915_execbuffer_params {
 	struct drm_device               *dev;
 	struct drm_file                 *file;
-	u32				 dispatch_flags;
-	u32				 args_batch_start_offset;
-	u32				 batch_obj_vm_offset;
+	struct i915_vma			*batch;
+	u32				dispatch_flags;
+	u32				args_batch_start_offset;
 	struct intel_engine_cs          *engine;
-	struct drm_i915_gem_object      *batch_obj;
 	struct i915_gem_context         *ctx;
 	struct drm_i915_gem_request     *request;
 };
@@ -102,6 +101,26 @@ eb_reset(struct eb_vmas *eb)
 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 
+static struct i915_vma *
+eb_get_batch(struct eb_vmas *eb)
+{
+	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+	/*
+	 * SNA is doing fancy tricks with compressing batch buffers, which leads
+	 * to negative relocation deltas. Usually that works out ok since the
+	 * relocate address is still positive, except when the batch is placed
+	 * very low in the GTT. Ensure this doesn't happen.
+	 *
+	 * Note that actual hangs have only been observed on gen7, but for
+	 * paranoia do it everywhere.
+	 */
+	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
+		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+	return vma;
+}
+
 static int
 eb_lookup_vmas(struct eb_vmas *eb,
 	       struct drm_i915_gem_exec_object2 *exec,
@@ -198,35 +217,6 @@ err:
 	return ret;
 }
 
-static inline struct i915_vma *
-eb_get_batch_vma(struct eb_vmas *eb)
-{
-	/* The batch is always the LAST item in the VMA list */
-	struct i915_vma *vma = list_last_entry(&eb->vmas, typeof(*vma), exec_list);
-
-	return vma;
-}
-
-static struct drm_i915_gem_object *
-eb_get_batch(struct eb_vmas *eb)
-{
-	struct i915_vma *vma = eb_get_batch_vma(eb);
-
-	/*
-	 * SNA is doing fancy tricks with compressing batch buffers, which leads
-	 * to negative relocation deltas. Usually that works out ok since the
-	 * relocate address is still positive, except when the batch is placed
-	 * very low in the GTT. Ensure this doesn't happen.
-	 *
-	 * Note that actual hangs have only been observed on gen7, but for
-	 * paranoia do it everywhere.
-	 */
-	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
-		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
-
-	return vma->obj;
-}
-
 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 {
 	if (eb->and < 0) {
@@ -682,16 +672,16 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 			flags |= PIN_HIGH;
 	}
 
-	ret = i915_gem_object_pin(obj, vma->vm,
-				  entry->pad_to_size,
-				  entry->alignment,
-				  flags);
-	if ((ret == -ENOSPC  || ret == -E2BIG) &&
+	ret = i915_vma_pin(vma,
+			   entry->pad_to_size,
+			   entry->alignment,
+			   flags);
+	if ((ret == -ENOSPC || ret == -E2BIG) &&
 	    only_mappable_for_reloc(entry->flags))
-		ret = i915_gem_object_pin(obj, vma->vm,
-					  entry->pad_to_size,
-					  entry->alignment,
-					  flags & ~PIN_MAPPABLE);
+		ret = i915_vma_pin(vma,
+				   entry->pad_to_size,
+				   entry->alignment,
+				   flags & ~PIN_MAPPABLE);
 	if (ret)
 		return ret;
 
@@ -1252,11 +1242,11 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static struct drm_i915_gem_object*
+static struct i915_vma*
 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
-			  struct eb_vmas *eb,
 			  struct drm_i915_gem_object *batch_obj,
+			  struct eb_vmas *eb,
 			  u32 batch_start_offset,
 			  u32 batch_len,
 			  bool is_master)
@@ -1268,7 +1258,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
 						   PAGE_ALIGN(batch_len));
 	if (IS_ERR(shadow_batch_obj))
-		return shadow_batch_obj;
+		return ERR_CAST(shadow_batch_obj);
 
 	ret = intel_engine_cmd_parser(engine,
 				      batch_obj,
@@ -1293,14 +1283,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 	i915_gem_object_get(shadow_batch_obj);
 	list_add_tail(&vma->exec_list, &eb->vmas);
 
-	shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
-
-	return shadow_batch_obj;
+	return vma;
 
 err:
 	i915_gem_object_unpin_pages(shadow_batch_obj);
 	if (ret == -EACCES) /* unhandled chained batch */
-		return batch_obj;
+		return NULL;
 	else
 		return ERR_PTR(ret);
 }
@@ -1381,11 +1369,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	}
 
 	exec_len   = args->batch_len;
-	exec_start = params->batch_obj_vm_offset +
+	exec_start = params->batch->node.start +
 		     params->args_batch_start_offset;
 
 	if (exec_len == 0)
-		exec_len = params->batch_obj->base.size;
+		exec_len = params->batch->size;
 
 	ret = params->engine->emit_bb_start(params->request,
 					    exec_start, exec_len,
@@ -1489,7 +1477,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct eb_vmas *eb;
-	struct drm_i915_gem_object *batch_obj;
 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
@@ -1583,7 +1570,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto err;
 
 	/* take note of the batch buffer before we might reorder the lists */
-	batch_obj = eb_get_batch(eb);
+	params->batch = eb_get_batch(eb);
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1607,7 +1594,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	}
 
 	/* Set the pending read domains for the batch buffer to COMMAND */
-	if (batch_obj->base.pending_write_domain) {
+	if (params->batch->obj->base.pending_write_domain) {
 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
 		ret = -EINVAL;
 		goto err;
@@ -1615,26 +1602,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 	params->args_batch_start_offset = args->batch_start_offset;
 	if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
-		struct drm_i915_gem_object *parsed_batch_obj;
-
-		parsed_batch_obj = i915_gem_execbuffer_parse(engine,
-							     &shadow_exec_entry,
-							     eb,
-							     batch_obj,
-							     args->batch_start_offset,
-							     args->batch_len,
-							     drm_is_current_master(file));
-		if (IS_ERR(parsed_batch_obj)) {
-			ret = PTR_ERR(parsed_batch_obj);
+		struct i915_vma *vma;
+
+		vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
+						params->batch->obj,
+						eb,
+						args->batch_start_offset,
+						args->batch_len,
+						drm_is_current_master(file));
+		if (IS_ERR(vma)) {
+			ret = PTR_ERR(vma);
 			goto err;
 		}
 
-		/*
-		 * parsed_batch_obj == batch_obj means batch not fully parsed:
-		 * Accept, but don't promote to secure.
-		 */
-
-		if (parsed_batch_obj != batch_obj) {
+		if (vma) {
 			/*
 			 * Batch parsed and accepted:
 			 *
@@ -1646,16 +1627,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			 */
 			dispatch_flags |= I915_DISPATCH_SECURE;
 			params->args_batch_start_offset = 0;
-			batch_obj = parsed_batch_obj;
+			params->batch = vma;
 		}
 	}
 
-	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+	params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * hsw should have this fixed, but bdw mucks it up again. */
 	if (dispatch_flags & I915_DISPATCH_SECURE) {
+		struct drm_i915_gem_object *obj = params->batch->obj;
+
 		/*
 		 * So on first glance it looks freaky that we pin the batch here
 		 * outside of the reservation loop. But:
@@ -1666,13 +1649,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		 *   fitting due to fragmentation.
 		 * So this is actually safe.
 		 */
-		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
+		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
 		if (ret)
 			goto err;
 
-		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
-	} else
-		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
+		params->batch = i915_gem_obj_to_ggtt(obj);
+	}
 
 	/* Allocate a request for this batch buffer nice and early. */
 	params->request = i915_gem_request_alloc(engine, ctx);
@@ -1695,12 +1677,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->file                    = file;
 	params->engine                    = engine;
 	params->dispatch_flags          = dispatch_flags;
-	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
 
 	ret = execbuf_submit(params, args, &eb->vmas);
 err_request:
-	__i915_add_request(params->request, params->batch_obj, ret == 0);
+	__i915_add_request(params->request, params->batch->obj, ret == 0);
 
 err_batch_unpin:
 	/*
@@ -1710,8 +1691,7 @@ err_batch_unpin:
 	 * active.
 	 */
 	if (dispatch_flags & I915_DISPATCH_SECURE)
-		i915_gem_object_ggtt_unpin(batch_obj);
-
+		i915_vma_unpin(params->batch);
 err:
 	/* the request owns the ref now */
 	i915_gem_context_put(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 2052f05c4e12..824a8b5c6b2c 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3658,13 +3658,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		return 0;
 
 	if (vma->bound == 0 && vma->vm->allocate_va_range) {
-		/* XXX: i915_vma_pin() will fix this +- hack */
-		__i915_vma_pin(vma);
 		trace_i915_va_alloc(vma);
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
 						 vma->node.size);
-		__i915_vma_unpin(vma);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index d822734b5bc0..cee553e89c19 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -611,6 +611,20 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
 	return true;
 }
 
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+/* Flags used by pin/bind&friends. */
+#define PIN_MAPPABLE		BIT(0)
+#define PIN_NONBLOCK		BIT(1)
+#define PIN_GLOBAL		BIT(2)
+#define PIN_OFFSET_BIAS		BIT(3)
+#define PIN_USER		BIT(4)
+#define PIN_UPDATE		BIT(5)
+#define PIN_ZONE_4G		BIT(6)
+#define PIN_HIGH		BIT(7)
+#define PIN_OFFSET_FIXED	BIT(8)
+#define PIN_OFFSET_MASK		(~4095)
+
 static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
 	return vma->pin_count;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 64/73] drm/i915: Combine all i915_vma bitfields into a single set of flags
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (62 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 63/73] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 65/73] drm/i915: Make i915_vma_pin() small and inline Chris Wilson
                   ` (9 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

In preparation to perform some magic to speed up i915_vma_pin(), which
is among the hottest of hot paths in execbuf, refactor all the bitfields
accessed by i915_vma_pin() into a single unified set of flags.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  8 ++---
 drivers/gpu/drm/i915/i915_gem.c            | 40 ++++++++++++----------
 drivers/gpu/drm/i915/i915_gem_context.c    |  2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  5 +--
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 46 ++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_gtt.h        | 55 ++++++++++++++++++------------
 drivers/gpu/drm/i915/i915_gem_shrinker.c   |  2 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c     |  2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      |  4 +--
 9 files changed, 90 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index e6428feffc56..fcfa9ca6b50a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -125,7 +125,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
+		if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
 			size += vma->node.size;
 	}
 
@@ -181,9 +181,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 			continue;
 
 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
-			   vma->is_ggtt ? "g" : "pp",
+			   i915_vma_is_ggtt(vma) ? "g" : "pp",
 			   vma->node.start, vma->node.size);
-		if (vma->is_ggtt)
+		if (i915_vma_is_ggtt(vma))
 			seq_printf(m, ", type: %u", vma->ggtt_view.type);
 		seq_puts(m, ")");
 	}
@@ -356,7 +356,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
-		if (vma->is_ggtt) {
+		if (i915_vma_is_ggtt(vma)) {
 			stats->global += vma->node.size;
 		} else {
 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3158b5dd14aa..f6e2b38cf66f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2858,7 +2858,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 	GEM_BUG_ON(obj->bind_count == 0);
 	GEM_BUG_ON(!obj->pages);
 
-	if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+	if (i915_vma_is_ggtt(vma) &&
+	    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 		i915_gem_object_finish_gtt(obj);
 
 		/* release the fence reg _after_ flushing */
@@ -2873,12 +2874,12 @@ int i915_vma_unbind(struct i915_vma *vma)
 		trace_i915_vma_unbind(vma);
 		vma->vm->unbind_vma(vma);
 	}
-	vma->bound = 0;
+	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
 	drm_mm_remove_node(&vma->node);
 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
-	if (vma->is_ggtt) {
+	if (i915_vma_is_ggtt(vma)) {
 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 			obj->map_and_fenceable = false;
 		} else if (vma->ggtt_view.pages) {
@@ -2901,7 +2902,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 	i915_gem_object_unpin_pages(obj);
 
 destroy:
-	if (unlikely(vma->closed))
+	if (unlikely(i915_vma_is_closed(vma)))
 		i915_vma_destroy(vma);
 
 	return 0;
@@ -2977,7 +2978,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	u64 min_alignment;
 	int ret;
 
-	GEM_BUG_ON(vma->bound);
+	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
 	size = max(size, vma->size);
@@ -3699,13 +3700,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 int
 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	unsigned int bound = vma->bound;
+	unsigned int bound;
 	int ret;
 
 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
-	GEM_BUG_ON((flags & PIN_GLOBAL) && !vma->is_ggtt);
+	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
-	if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+	bound = vma->flags;
+	if (WARN_ON((bound & I915_VMA_PIN_MASK) == I915_VMA_PIN_MASK))
 		return -EBUSY;
 
 	/* Pin early to prevent the shrinker/eviction logic from destroying
@@ -3713,7 +3715,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	 */
 	__i915_vma_pin(vma);
 
-	if (!bound) {
+	if ((bound & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)) == 0) {
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
 			goto err;
@@ -3723,7 +3725,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	if (ret)
 		goto err;
 
-	if ((bound ^ vma->bound) & GLOBAL_BIND)
+	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
 		__i915_vma_set_map_and_fenceable(vma);
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
@@ -4024,9 +4026,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	 * unbound now.
 	 */
 	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
-		GEM_BUG_ON(!vma->is_ggtt);
+		GEM_BUG_ON(!i915_vma_is_ggtt(vma));
 		GEM_BUG_ON(i915_vma_is_active(vma));
-		vma->pin_count = 0;
+		vma->flags &= ~I915_VMA_PIN_MASK;
 		i915_vma_close(vma);
 	}
 	GEM_BUG_ON(obj->bind_count);
@@ -4086,7 +4088,8 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
 	GEM_BUG_ON(!view);
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
+		if (i915_vma_is_ggtt(vma) &&
+		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma;
 	return NULL;
 }
@@ -4575,7 +4578,7 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
 	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
 	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
+		if (i915_vma_is_ggtt(vma) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
 		if (vma->vm == vm)
@@ -4593,7 +4596,8 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
+		if (i915_vma_is_ggtt(vma) &&
+		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma->node.start;
 
 	WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -4606,7 +4610,7 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
+		if (i915_vma_is_ggtt(vma) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
 		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
@@ -4622,7 +4626,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->is_ggtt &&
+		if (i915_vma_is_ggtt(vma) &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
 		    drm_mm_node_allocated(&vma->node))
 			return true;
@@ -4637,7 +4641,7 @@ unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
 	GEM_BUG_ON(list_empty(&o->vma_list));
 
 	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
+		if (i915_vma_is_ggtt(vma) &&
 		    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
 			return vma->node.size;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index eff6d3953ecd..dc7c0ae73b62 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -219,7 +219,7 @@ static void i915_ppgtt_close(struct i915_address_space *vm)
 		struct i915_vma *vma, *vn;
 
 		list_for_each_entry_safe(vma, vn, *phase, vm_link)
-			if (!vma->closed)
+			if (!i915_vma_is_closed(vma))
 				i915_vma_close(vma);
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index db87d30b86ac..888c63368fa0 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -717,7 +717,7 @@ need_reloc_mappable(struct i915_vma *vma)
 	if (entry->relocation_count == 0)
 		return false;
 
-	if (!vma->is_ggtt)
+	if (!i915_vma_is_ggtt(vma))
 		return false;
 
 	/* See also use_cpu_reloc() */
@@ -736,7 +736,8 @@ eb_vma_misplaced(struct i915_vma *vma)
 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	struct drm_i915_gem_object *obj = vma->obj;
 
-	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
+	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+		!i915_vma_is_ggtt(vma));
 
 	if (entry->alignment &&
 	    vma->node.start & (entry->alignment - 1))
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 824a8b5c6b2c..660aa9919f72 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2654,7 +2654,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
 	 * upgrade to both bound if we bind either to avoid double-binding.
 	 */
-	vma->bound |= GLOBAL_BIND | LOCAL_BIND;
+	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 
 	return 0;
 }
@@ -2676,14 +2676,14 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 		pte_flags |= PTE_READ_ONLY;
 
 
-	if (flags & GLOBAL_BIND) {
+	if (flags & I915_VMA_GLOBAL_BIND) {
 		vma->vm->insert_entries(vma->vm,
 					vma->ggtt_view.pages,
 					vma->node.start,
 					cache_level, pte_flags);
 	}
 
-	if (flags & LOCAL_BIND) {
+	if (flags & I915_VMA_LOCAL_BIND) {
 		struct i915_hw_ppgtt *appgtt =
 			to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
 		appgtt->base.insert_entries(&appgtt->base,
@@ -2700,12 +2700,12 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
 	struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
 	const u64 size = min(vma->size, vma->node.size);
 
-	if (vma->bound & GLOBAL_BIND)
+	if (vma->flags & I915_VMA_GLOBAL_BIND)
 		vma->vm->clear_range(vma->vm,
 				     vma->node.start, size,
 				     true);
 
-	if (vma->bound & LOCAL_BIND && appgtt)
+	if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
 		appgtt->base.clear_range(&appgtt->base,
 					 vma->node.start, size,
 					 true);
@@ -3327,7 +3327,7 @@ i915_vma_retire(struct i915_gem_active *active,
 		return;
 
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-	if (unlikely(vma->closed && !i915_vma_is_pinned(vma)))
+	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
 		WARN_ON(i915_vma_unbind(vma));
 }
 
@@ -3335,10 +3335,10 @@ void i915_vma_destroy(struct i915_vma *vma)
 {
 	GEM_BUG_ON(vma->node.allocated);
 	GEM_BUG_ON(i915_vma_is_active(vma));
-	GEM_BUG_ON(!vma->closed);
+	GEM_BUG_ON(!i915_vma_is_closed(vma));
 
 	list_del(&vma->vm_link);
-	if (!vma->is_ggtt)
+	if (!i915_vma_is_ggtt(vma))
 		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
 	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
@@ -3346,8 +3346,8 @@ void i915_vma_destroy(struct i915_vma *vma)
 
 void i915_vma_close(struct i915_vma *vma)
 {
-	GEM_BUG_ON(vma->closed);
-	vma->closed = true;
+	GEM_BUG_ON(i915_vma_is_closed(vma));
+	vma->flags |= I915_VMA_CLOSED;
 
 	list_del_init(&vma->obj_link);
 	if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
@@ -3379,9 +3379,9 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	vma->vm = vm;
 	vma->obj = obj;
 	vma->size = obj->base.size;
-	vma->is_ggtt = i915_is_ggtt(vm);
 
 	if (i915_is_ggtt(vm)) {
+		vma->flags |= I915_VMA_GGTT;
 		vma->ggtt_view = *view;
 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
 			vma->size = view->params.partial.size;
@@ -3426,7 +3426,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 	if (!vma)
 		vma = __i915_gem_vma_create(obj, &ggtt->base, view);
 
-	GEM_BUG_ON(vma->closed);
+	GEM_BUG_ON(i915_vma_is_closed(vma));
 	return vma;
 
 }
@@ -3637,27 +3637,28 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		  u32 flags)
 {
-	int ret;
 	u32 bind_flags;
+	u32 vma_flags;
+	int ret;
 
 	if (WARN_ON(flags == 0))
 		return -EINVAL;
 
 	bind_flags = 0;
 	if (flags & PIN_GLOBAL)
-		bind_flags |= GLOBAL_BIND;
+		bind_flags |= I915_VMA_GLOBAL_BIND;
 	if (flags & PIN_USER)
-		bind_flags |= LOCAL_BIND;
+		bind_flags |= I915_VMA_LOCAL_BIND;
 
+	vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 	if (flags & PIN_UPDATE)
-		bind_flags |= vma->bound;
+		bind_flags |= vma_flags;
 	else
-		bind_flags &= ~vma->bound;
-
+		bind_flags &= ~vma_flags;
 	if (bind_flags == 0)
 		return 0;
 
-	if (vma->bound == 0 && vma->vm->allocate_va_range) {
+	if (vma_flags == 0 && vma->vm->allocate_va_range) {
 		trace_i915_va_alloc(vma);
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
@@ -3670,8 +3671,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	if (ret)
 		return ret;
 
-	vma->bound |= bind_flags;
-
+	vma->flags |= bind_flags;
 	return 0;
 }
 
@@ -3683,8 +3683,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 	if (WARN_ON(!vma->obj->map_and_fenceable))
 		return IO_ERR_PTR(-ENODEV);
 
-	GEM_BUG_ON(!vma->is_ggtt);
-	GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
 
 	ptr = vma->iomap;
 	if (ptr == NULL) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index cee553e89c19..5839091efaa6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -182,15 +182,28 @@ struct i915_vma {
 	void __iomem *iomap;
 	u64 size;
 
-	unsigned int active;
-	struct i915_gem_active last_read[I915_NUM_ENGINES];
+	unsigned int flags;
+	/**
+	 * How many users have pinned this object in GTT space. The following
+	 * users can each hold at most one reference: pwrite/pread, execbuffer
+	 * (objects are not allowed multiple times for the same batchbuffer),
+	 * and the framebuffer code. When switching/pageflipping, the
+	 * framebuffer code has at most two buffers pinned per crtc.
+	 *
+	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+	 * bits with absolutely no headroom. So use 4 bits.
+	 */
+#define I915_VMA_PIN_MASK 0xf
 
 	/** Flags and address space this VMA is bound to */
-#define GLOBAL_BIND	(1<<0)
-#define LOCAL_BIND	(1<<1)
-	unsigned int bound : 4;
-	bool is_ggtt : 1;
-	bool closed : 1;
+#define I915_VMA_GLOBAL_BIND	BIT(5)
+#define I915_VMA_LOCAL_BIND	BIT(6)
+
+#define I915_VMA_GGTT	BIT(7)
+#define I915_VMA_CLOSED BIT(8)
+
+	unsigned int active;
+	struct i915_gem_active last_read[I915_NUM_ENGINES];
 
 	/**
 	 * Support different GGTT views into the same object.
@@ -215,20 +228,18 @@ struct i915_vma {
 	struct hlist_node exec_node;
 	unsigned long exec_handle;
 	struct drm_i915_gem_exec_object2 *exec_entry;
-
-	/**
-	 * How many users have pinned this object in GTT space. The following
-	 * users can each hold at most one reference: pwrite/pread, execbuffer
-	 * (objects are not allowed multiple times for the same batchbuffer),
-	 * and the framebuffer code. When switching/pageflipping, the
-	 * framebuffer code has at most two buffers pinned per crtc.
-	 *
-	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-	 * bits with absolutely no headroom. So use 4 bits. */
-	unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+	return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+	return vma->flags & I915_VMA_CLOSED;
+}
+
 static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
 {
 	return vma->active;
@@ -627,7 +638,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
 
 static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
-	return vma->pin_count;
+	return vma->flags & I915_VMA_PIN_MASK;
 }
 
 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
@@ -637,14 +648,14 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
 
 static inline void __i915_vma_pin(struct i915_vma *vma)
 {
-	vma->pin_count++;
+	vma->flags++;
 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
 }
 
 static inline void __i915_vma_unpin(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
-	vma->pin_count--;
+	vma->flags--;
 }
 
 static inline void i915_vma_unpin(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 64d179d4f684..b5776358c05e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -53,7 +53,7 @@ static bool any_vma_pinned(struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->pin_count)
+		if (i915_vma_is_pinned(vma))
 			return true;
 
 	return false;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index bc91ffe614e2..13279610eeec 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -705,7 +705,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 		goto err;
 	}
 
-	vma->bound |= GLOBAL_BIND;
+	vma->flags |= I915_VMA_GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
 	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
 	obj->bind_count++;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d94eb907a23a..cc28ad429dd8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -669,14 +669,14 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
 	if (i915_is_ggtt(vm))
 		vma = i915_gem_obj_to_ggtt(src);
 	use_ggtt = (src->cache_level == I915_CACHE_NONE &&
-		   vma && (vma->bound & GLOBAL_BIND) &&
+		   vma && (vma->flags & I915_VMA_GLOBAL_BIND) &&
 		   reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
 
 	/* Cannot access stolen address directly, try to use the aperture */
 	if (src->stolen) {
 		use_ggtt = true;
 
-		if (!(vma && vma->bound & GLOBAL_BIND))
+		if (!(vma && vma->flags & I915_VMA_GLOBAL_BIND))
 			goto unwind;
 
 		reloc_offset = i915_gem_obj_ggtt_offset(src);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 65/73] drm/i915: Make i915_vma_pin() small and inline
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (63 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 64/73] drm/i915: Combine all i915_vma bitfields into a single set of flags Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 66/73] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin() Chris Wilson
                   ` (8 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

Not only is i915_vma_pin() called for every single object on every single
execbuf, it is usually a simple increment as the VMA is already bound for
execution by the GPU. Rearrange the tests for unbound and pin_count
overflow so that we can do the increment and test very cheaply and
compact enough to inline the operation into execbuf. The trick used is
to note that we can check for an overflow bit (keeping space available
for it inside the flags) at the same time as checking the binding bits.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c     | 18 +++++--------
 drivers/gpu/drm/i915/i915_gem_gtt.h | 53 ++++++++++++++++++++++++++-----------
 2 files changed, 44 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f6e2b38cf66f..6ee4a00537a3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3697,23 +3697,19 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	obj->map_and_fenceable = mappable && fenceable;
 }
 
-int
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+int __i915_vma_do_pin(struct i915_vma *vma,
+		      u64 size, u64 alignment, u64 flags)
 {
-	unsigned int bound;
+	unsigned int bound = vma->flags;
 	int ret;
 
 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
-	bound = vma->flags;
-	if (WARN_ON((bound & I915_VMA_PIN_MASK) == I915_VMA_PIN_MASK))
-		return -EBUSY;
-
-	/* Pin early to prevent the shrinker/eviction logic from destroying
-	 * our vma as we insert and bind.
-	 */
-	__i915_vma_pin(vma);
+	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+		ret = -EBUSY;
+		goto err;
+	}
 
 	if ((bound & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)) == 0) {
 		ret = i915_vma_insert(vma, size, alignment, flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 5839091efaa6..2ba00fd7b662 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -194,13 +194,15 @@ struct i915_vma {
 	 * bits with absolutely no headroom. So use 4 bits.
 	 */
 #define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW	BIT(5)
 
 	/** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND	BIT(5)
-#define I915_VMA_LOCAL_BIND	BIT(6)
+#define I915_VMA_GLOBAL_BIND	BIT(6)
+#define I915_VMA_LOCAL_BIND	BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
 
-#define I915_VMA_GGTT	BIT(7)
-#define I915_VMA_CLOSED BIT(8)
+#define I915_VMA_GGTT	BIT(8)
+#define I915_VMA_CLOSED BIT(9)
 
 	unsigned int active;
 	struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -622,20 +624,39 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
 	return true;
 }
 
-int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
 /* Flags used by pin/bind&friends. */
-#define PIN_MAPPABLE		BIT(0)
-#define PIN_NONBLOCK		BIT(1)
-#define PIN_GLOBAL		BIT(2)
-#define PIN_OFFSET_BIAS		BIT(3)
-#define PIN_USER		BIT(4)
-#define PIN_UPDATE		BIT(5)
-#define PIN_ZONE_4G		BIT(6)
-#define PIN_HIGH		BIT(7)
-#define PIN_OFFSET_FIXED	BIT(8)
+#define PIN_NONBLOCK		BIT(0)
+#define PIN_MAPPABLE		BIT(1)
+#define PIN_ZONE_4G		BIT(2)
+
+#define PIN_MBZ			BIT(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL		BIT(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER		BIT(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE		BIT(8)
+
+#define PIN_HIGH		BIT(9)
+#define PIN_OFFSET_BIAS		BIT(10)
+#define PIN_OFFSET_FIXED	BIT(11)
 #define PIN_OFFSET_MASK		(~4095)
 
+int __i915_vma_do_pin(struct i915_vma *vma,
+		      u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+	/* Pin early to prevent the shrinker/eviction logic from destroying
+	 * our vma as we insert and bind.
+	 */
+	if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+		return 0;
+
+	return __i915_vma_do_pin(vma, size, alignment, flags);
+}
+
 static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
 	return vma->flags & I915_VMA_PIN_MASK;
@@ -649,7 +670,7 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
 static inline void __i915_vma_pin(struct i915_vma *vma)
 {
 	vma->flags++;
-	GEM_BUG_ON(!i915_vma_is_pinned(vma));
+	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
 }
 
 static inline void __i915_vma_unpin(struct i915_vma *vma)
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 66/73] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin()
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (64 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 65/73] drm/i915: Make i915_vma_pin() small and inline Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 67/73] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
                   ` (7 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

Since i915_gem_obj_ggtt_pin() is an idiom breaking curry function for
i915_gem_object_ggtt_pin(), spare us the confusion and remove it.
Removing it now simplifies later patches to change the i915_vma_pin()
(and friends) interface.

v2: Add a redundant GEM_BUG_ON(!view) to
i915_gem_obj_lookup_or_create_ggtt_vma()

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h              |  9 ---------
 drivers/gpu/drm/i915/i915_gem.c              | 17 +++++++----------
 drivers/gpu/drm/i915/i915_gem_context.c      |  5 ++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  2 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c          |  2 ++
 drivers/gpu/drm/i915/i915_gem_render_state.c |  2 +-
 drivers/gpu/drm/i915/i915_guc_submission.c   |  4 ++--
 drivers/gpu/drm/i915/intel_guc_loader.c      |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c             |  8 +++++---
 drivers/gpu/drm/i915/intel_overlay.c         |  3 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 15 ++++++++-------
 11 files changed, 31 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cda8238c952f..7ba99057c317 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3289,15 +3289,6 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
 unsigned long
 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
 
-static inline int __must_check
-i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
-		      uint32_t alignment,
-		      unsigned flags)
-{
-	return i915_gem_object_ggtt_pin(obj, &i915_ggtt_view_normal,
-					0, alignment, flags);
-}
-
 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 				     const struct i915_ggtt_view *view);
 static inline void
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6ee4a00537a3..72935f5c6ed1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -652,7 +652,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
 	uint64_t offset;
 	int ret;
 
-	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
 	if (ret) {
 		ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
 		if (ret)
@@ -946,7 +946,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 	if (obj->tiling_mode != I915_TILING_NONE)
 		return -EFAULT;
 
-	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
+	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+				       PIN_MAPPABLE | PIN_NONBLOCK);
 	if (ret) {
 		ret = insert_mappable_node(i915, &node, PAGE_SIZE);
 		if (ret)
@@ -3711,7 +3712,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 		goto err;
 	}
 
-	if ((bound & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)) == 0) {
+	if ((bound & I915_VMA_BIND_MASK) == 0) {
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
 			goto err;
@@ -3742,7 +3743,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	BUG_ON(!view);
+	if (!view)
+		view = &i915_ggtt_view_normal;
 
 	vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
 	if (IS_ERR(vma))
@@ -3774,12 +3776,7 @@ void
 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 				const struct i915_ggtt_view *view)
 {
-	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
-
-	WARN_ON(!i915_vma_is_pinned(vma));
-	WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
-
-	__i915_vma_unpin(vma);
+	i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
 }
 
 int
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index dc7c0ae73b62..bb72af5320b0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -763,9 +763,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
-	ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
-				    to->ggtt_alignment,
-				    0);
+	ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0,
+				       to->ggtt_alignment, 0);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 888c63368fa0..109bb9d8df4a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1270,7 +1270,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 	if (ret)
 		goto err;
 
-	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
+	ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
 	if (ret)
 		goto err;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 660aa9919f72..2504ff978f3a 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3423,6 +3423,8 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
+	GEM_BUG_ON(!view);
+
 	if (!vma)
 		vma = __i915_gem_vma_create(obj, &ggtt->base, view);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 90236672ac1e..57fd767a2d79 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -191,7 +191,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	if (IS_ERR(so.obj))
 		return PTR_ERR(so.obj);
 
-	ret = i915_gem_obj_ggtt_pin(so.obj, 4096, 0);
+	ret = i915_gem_object_ggtt_pin(so.obj, NULL, 0, 0, 0);
 	if (ret)
 		goto err_obj;
 
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 23bbd0ff86c7..4a1c9749d8d7 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -635,8 +635,8 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
 		return NULL;
 	}
 
-	if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-			PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
+	if (i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
+				     PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
 		i915_gem_object_put(obj);
 		return NULL;
 	}
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index b883efd35e3f..3763e30cc165 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -323,7 +323,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 		return ret;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
+	ret = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
 	if (ret) {
 		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 236801d9c79c..e3596ee41dbc 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -799,8 +799,9 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 	if (ce->pin_count++)
 		return 0;
 
-	ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
-				    PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+	ret = i915_gem_object_ggtt_pin(ce->state, NULL,
+				       0, GEN8_LR_CONTEXT_ALIGN,
+				       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
 	if (ret)
 		goto err;
 
@@ -1205,7 +1206,8 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 		return ret;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
+	ret = i915_gem_object_ggtt_pin(engine->wa_ctx.obj, NULL,
+				       0, PAGE_SIZE, 0);
 	if (ret) {
 		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
 				 ret);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2c598d63c794..217fefc49bf9 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1401,7 +1401,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 		}
 		overlay->flip_addr = reg_bo->phys_handle->busaddr;
 	} else {
-		ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
+		ret = i915_gem_object_ggtt_pin(reg_bo, NULL,
+					       0, PAGE_SIZE, PIN_MAPPABLE);
 		if (ret) {
 			DRM_ERROR("failed to pin overlay register bo\n");
 			goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1659b34143c4..693d8642de08 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -639,7 +639,7 @@ int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
 		goto err;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
+	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, PIN_HIGH);
 	if (ret)
 		goto err_unref;
 
@@ -1896,7 +1896,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 			 * actualy map it).
 			 */
 			flags |= PIN_MAPPABLE;
-		ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
+		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, flags);
 		if (ret) {
 err_unref:
 			i915_gem_object_put(obj);
@@ -1943,7 +1943,7 @@ int intel_ring_pin(struct intel_ring *ring)
 	int ret;
 
 	if (HAS_LLC(dev_priv) && !obj->stolen) {
-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
+		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, flags);
 		if (ret)
 			return ret;
 
@@ -1957,8 +1957,8 @@ int intel_ring_pin(struct intel_ring *ring)
 			goto err_unpin;
 		}
 	} else {
-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-					    flags | PIN_MAPPABLE);
+		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
+					       flags | PIN_MAPPABLE);
 		if (ret)
 			return ret;
 
@@ -2090,7 +2090,8 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
 		return 0;
 
 	if (ce->state) {
-		ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+		ret = i915_gem_object_ggtt_pin(ce->state, NULL, 0,
+					       ctx->ggtt_alignment, 0);
 		if (ret)
 			goto error;
 	}
@@ -2649,7 +2650,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
 			i915.semaphores = 0;
 		} else {
 			i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-			ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+			ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
 			if (ret != 0) {
 				i915_gem_object_put(obj);
 				DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 67/73] drm/i915: Make fb_tracking.lock a spinlock
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (65 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 66/73] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin() Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 68/73] drm/i915: Use atomics to manipulate obj->frontbuffer_bits Chris Wilson
                   ` (6 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx; +Cc: Daniel Vetter

We only need a very lightweight mechanism here as the locking is only
used for co-ordinating a bitfield.

v2: Move the cheap unlikely tests into the caller

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtien <joonas.lahtinen@linux.intel.com>

Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
[After some kerneldoc fixes, which I hope Daniel explains better,
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>]
---
 drivers/gpu/drm/i915/i915_drv.h          |  2 +-
 drivers/gpu/drm/i915/i915_gem.c          |  2 +-
 drivers/gpu/drm/i915/intel_drv.h         | 29 +++++++++++++++---
 drivers/gpu/drm/i915/intel_frontbuffer.c | 52 ++++++++++++++------------------
 4 files changed, 50 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7ba99057c317..b26f5b1f466b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1669,7 +1669,7 @@ struct intel_pipe_crc {
 };
 
 struct i915_frontbuffer_tracking {
-	struct mutex lock;
+	spinlock_t lock;
 
 	/*
 	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 72935f5c6ed1..5d57258d96cd 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4446,7 +4446,7 @@ i915_gem_load_init(struct drm_device *dev)
 
 	dev_priv->mm.interruptible = true;
 
-	mutex_init(&dev_priv->fb_tracking.lock);
+	spin_lock_init(&dev_priv->fb_tracking.lock);
 }
 
 void i915_gem_load_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index e74d851868c5..01056ce8d461 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1135,8 +1135,6 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 
 /* intel_frontbuffer.c */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     enum fb_op_origin origin);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits);
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
@@ -1147,8 +1145,31 @@ unsigned int intel_fb_align_height(struct drm_device *dev,
 				   unsigned int height,
 				   uint32_t pixel_format,
 				   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
-			enum fb_op_origin origin);
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			       enum fb_op_origin origin);
+static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+					   enum fb_op_origin origin)
+{
+	if (!obj->frontbuffer_bits)
+		return;
+
+	__intel_fb_obj_invalidate(obj, origin);
+}
+
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			  bool retire,
+			  enum fb_op_origin origin);
+static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+				      bool retire,
+				      enum fb_op_origin origin)
+{
+	if (!obj->frontbuffer_bits)
+		return;
+
+	__intel_fb_obj_flush(obj, retire, origin);
+}
+
 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
 			      uint64_t fb_modifier, uint32_t pixel_format);
 
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index ac85357010b4..939d0e8cf63d 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -76,24 +76,19 @@
  * until the rendering completes or a flip on this frontbuffer plane is
  * scheduled.
  */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     enum fb_op_origin origin)
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			       enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (!obj->frontbuffer_bits)
-		return;
-
 	if (origin == ORIGIN_CS) {
-		mutex_lock(&dev_priv->fb_tracking.lock);
-		dev_priv->fb_tracking.busy_bits
-			|= obj->frontbuffer_bits;
-		dev_priv->fb_tracking.flip_bits
-			&= ~obj->frontbuffer_bits;
-		mutex_unlock(&dev_priv->fb_tracking.lock);
+		spin_lock(&dev_priv->fb_tracking.lock);
+		dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
+		dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
+		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
 	intel_psr_invalidate(dev, obj->frontbuffer_bits);
@@ -120,9 +115,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	/* Delay flushing when rings are still busy.*/
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	if (!frontbuffer_bits)
 		return;
@@ -142,8 +137,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
  * completed and frontbuffer caching can be started again. If @retire is true
  * then any delayed flushes will be unblocked.
  */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-			bool retire, enum fb_op_origin origin)
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			  bool retire,
+			  enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -151,21 +147,18 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (!obj->frontbuffer_bits)
-		return;
-
 	frontbuffer_bits = obj->frontbuffer_bits;
 
 	if (retire) {
-		mutex_lock(&dev_priv->fb_tracking.lock);
+		spin_lock(&dev_priv->fb_tracking.lock);
 		/* Filter out new bits since rendering started. */
 		frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
 		dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-		mutex_unlock(&dev_priv->fb_tracking.lock);
+		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+	if (frontbuffer_bits)
+		intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
 }
 
 /**
@@ -185,11 +178,11 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	intel_psr_single_frame_update(dev, frontbuffer_bits);
 }
@@ -209,13 +202,14 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Mask any cancelled flips. */
 	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
 	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+	if (frontbuffer_bits)
+		intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
@@ -234,10 +228,10 @@ void intel_frontbuffer_flip(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 68/73] drm/i915: Use atomics to manipulate obj->frontbuffer_bits
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (66 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 67/73] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 69/73] drm/i915: Use dev_priv consistently through the intel_frontbuffer interface Chris Wilson
                   ` (5 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx; +Cc: Daniel Vetter

The individual bits inside obj->frontbuffer_bits are protected by each
plane->mutex, but the whole bitfield may be accessed by multiple KMS
operations simultaneously and so the RMW need to be under atomics.
However, for updating the single field we do not need to mandate that it
be under the struct_mutex, one more step towards its removal as the de
facto BKL.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c      |  6 ++++--
 drivers/gpu/drm/i915/i915_drv.h          |  4 +---
 drivers/gpu/drm/i915/i915_gem.c          | 21 ++++++++++++++-------
 drivers/gpu/drm/i915/intel_display.c     | 18 ++++++------------
 drivers/gpu/drm/i915/intel_drv.h         | 20 ++++++++++++++------
 drivers/gpu/drm/i915/intel_frontbuffer.c | 23 +++++++++--------------
 6 files changed, 48 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index fcfa9ca6b50a..10a346237795 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -138,6 +138,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
+	unsigned int frontbuffer_bits;
 	int pin_count = 0;
 	enum intel_engine_id id;
 
@@ -204,8 +205,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	if (engine)
 		seq_printf(m, " (%s)", engine->name);
 
-	if (obj->frontbuffer_bits)
-		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
+	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (frontbuffer_bits)
+		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
 }
 
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b26f5b1f466b..3de75e82ca76 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2127,8 +2127,6 @@ struct drm_i915_gem_object_ops {
  */
 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER_BITS \
-	(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
 	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
 #define INTEL_FRONTBUFFER_CURSOR(pipe) \
@@ -2216,7 +2214,7 @@ struct drm_i915_gem_object {
 	unsigned int cache_level:3;
 	unsigned int cache_dirty:1;
 
-	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+	atomic_t frontbuffer_bits;
 
 	unsigned int has_wc_mmap;
 	/** Count of VMA actually bound by this object */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5d57258d96cd..c4e5c7c1891b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4031,7 +4031,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	if (obj->stolen)
 		i915_gem_object_unpin_pages(obj);
 
-	WARN_ON(obj->frontbuffer_bits);
+	WARN_ON(atomic_read(&obj->frontbuffer_bits));
 
 	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
@@ -4548,16 +4548,23 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits)
 {
+	/* Control of individual bits within the mask are guarded by
+	 * the owning plane->mutex, i.e. we can never see concurrent
+	 * manipulation of individual bits. But since the bitfield as a whole
+	 * is updated using RMW, we need to use atomics in order to update
+	 * the bits.
+	 */
+	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+		     sizeof(atomic_t) * BITS_PER_BYTE);
+
 	if (old) {
-		WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
-		WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
-		old->frontbuffer_bits &= ~frontbuffer_bits;
+		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
+		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
 	}
 
 	if (new) {
-		WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
-		WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
-		new->frontbuffer_bits |= frontbuffer_bits;
+		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
+		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f9e85146194e..ea048f6b1a1b 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2600,7 +2600,8 @@ valid_fb:
 	primary->fb = primary->state->fb = fb;
 	primary->crtc = primary->state->crtc = &intel_crtc->base;
 	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
-	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
+	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
+		  &obj->frontbuffer_bits);
 }
 
 static void i9xx_update_primary_plane(struct drm_plane *primary,
@@ -13799,19 +13800,12 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
 {
 	struct drm_plane_state *old_plane_state;
 	struct drm_plane *plane;
-	struct drm_i915_gem_object *obj, *old_obj;
-	struct intel_plane *intel_plane;
 	int i;
 
-	mutex_lock(&state->dev->struct_mutex);
-	for_each_plane_in_state(state, plane, old_plane_state, i) {
-		obj = intel_fb_obj(plane->state->fb);
-		old_obj = intel_fb_obj(old_plane_state->fb);
-		intel_plane = to_intel_plane(plane);
-
-		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
-	}
-	mutex_unlock(&state->dev->struct_mutex);
+	for_each_plane_in_state(state, plane, old_plane_state, i)
+		i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
+				  intel_fb_obj(plane->state->fb),
+				  to_intel_plane(plane)->frontbuffer_bit);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 01056ce8d461..5294039cf238 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1147,27 +1147,35 @@ unsigned int intel_fb_align_height(struct drm_device *dev,
 				   uint64_t fb_format_modifier);
 
 void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			       enum fb_op_origin origin);
+			       enum fb_op_origin origin,
+			       unsigned int frontbuffer_bits);
 static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 					   enum fb_op_origin origin)
 {
-	if (!obj->frontbuffer_bits)
+	unsigned int frontbuffer_bits;
+
+	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (!frontbuffer_bits)
 		return;
 
-	__intel_fb_obj_invalidate(obj, origin);
+	__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
 }
 
 void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 			  bool retire,
-			  enum fb_op_origin origin);
+			  enum fb_op_origin origin,
+			  unsigned int frontbuffer_tibst);
 static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 				      bool retire,
 				      enum fb_op_origin origin)
 {
-	if (!obj->frontbuffer_bits)
+	unsigned int frontbuffer_bits;
+
+	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (!frontbuffer_bits)
 		return;
 
-	__intel_fb_obj_flush(obj, retire, origin);
+	__intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
 }
 
 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 939d0e8cf63d..cb7f5a29b48e 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -77,23 +77,22 @@
  * scheduled.
  */
 void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			       enum fb_op_origin origin)
+			       enum fb_op_origin origin,
+			       unsigned int frontbuffer_bits)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
 	if (origin == ORIGIN_CS) {
 		spin_lock(&dev_priv->fb_tracking.lock);
-		dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
-		dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
+		dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
+		dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
 		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_psr_invalidate(dev, obj->frontbuffer_bits);
-	intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
-	intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
+	intel_psr_invalidate(dev, frontbuffer_bits);
+	intel_edp_drrs_invalidate(dev, frontbuffer_bits);
+	intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
@@ -139,15 +138,11 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
  */
 void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 			  bool retire,
-			  enum fb_op_origin origin)
+			  enum fb_op_origin origin,
+			  unsigned int frontbuffer_bits)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned frontbuffer_bits;
-
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-	frontbuffer_bits = obj->frontbuffer_bits;
 
 	if (retire) {
 		spin_lock(&dev_priv->fb_tracking.lock);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 69/73] drm/i915: Use dev_priv consistently through the intel_frontbuffer interface
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (67 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 68/73] drm/i915: Use atomics to manipulate obj->frontbuffer_bits Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 70/73] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
                   ` (4 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

Rather than a mismash of struct drm_device *dev and struct
drm_i915_private *dev_priv being used freely within a function, be
consistent and only pass along dev_priv.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_display.c     | 10 ++++----
 drivers/gpu/drm/i915/intel_dp.c          | 14 +++++-------
 drivers/gpu/drm/i915/intel_drv.h         | 21 +++++++++--------
 drivers/gpu/drm/i915/intel_frontbuffer.c | 39 ++++++++++++--------------------
 drivers/gpu/drm/i915/intel_overlay.c     |  3 +--
 drivers/gpu/drm/i915/intel_psr.c         | 26 +++++++++------------
 6 files changed, 49 insertions(+), 64 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ea048f6b1a1b..33e815c20e55 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4565,12 +4565,11 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc->base.state);
-	struct drm_device *dev = crtc->base.dev;
 	struct drm_plane *primary = crtc->base.primary;
 	struct drm_plane_state *old_pri_state =
 		drm_atomic_get_existing_plane_state(old_state, primary);
 
-	intel_frontbuffer_flip(dev, pipe_config->fb_bits);
+	intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
 
 	crtc->wm.cxsr_allowed = true;
 
@@ -4693,7 +4692,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
 	 * to compute the mask of flip planes precisely. For the time being
 	 * consider this a flip to a NULL plane.
 	 */
-	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+	intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -10951,7 +10950,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 
 	i915_gem_request_put(work->flip_queued_req);
 
-	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
+	intel_frontbuffer_flip_complete(to_i915(dev),
+					to_intel_plane(primary)->frontbuffer_bit);
 	intel_fbc_post_update(crtc);
 	drm_framebuffer_unreference(work->old_fb);
 
@@ -11726,7 +11726,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 			  to_intel_plane(primary)->frontbuffer_bit);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_frontbuffer_flip_prepare(dev,
+	intel_frontbuffer_flip_prepare(to_i915(dev),
 				       to_intel_plane(primary)->frontbuffer_bit);
 
 	trace_i915_flip_request(intel_crtc->plane, obj);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 21b04c3eda41..c761e717ab9d 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -5186,7 +5186,7 @@ unlock:
 
 /**
  * intel_edp_drrs_invalidate - Disable Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called everytime rendering on the given planes start.
@@ -5194,10 +5194,9 @@ unlock:
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
-void intel_edp_drrs_invalidate(struct drm_device *dev,
-		unsigned frontbuffer_bits)
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+			       unsigned int frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -5229,7 +5228,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
 
 /**
  * intel_edp_drrs_flush - Restart Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called every time rendering on the given planes has
@@ -5239,10 +5238,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
-void intel_edp_drrs_flush(struct drm_device *dev,
-		unsigned frontbuffer_bits)
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+			  unsigned int frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 5294039cf238..e9ecf6b2cfe8 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1135,11 +1135,11 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 
 /* intel_frontbuffer.c */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
 				    unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
 				     unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_device *dev,
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
 			    unsigned frontbuffer_bits);
 unsigned int intel_fb_align_height(struct drm_device *dev,
 				   unsigned int height,
@@ -1413,11 +1413,12 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
 void intel_plane_destroy(struct drm_plane *plane);
 void intel_edp_drrs_enable(struct intel_dp *intel_dp);
 void intel_edp_drrs_disable(struct intel_dp *intel_dp);
-void intel_edp_drrs_invalidate(struct drm_device *dev,
-		unsigned frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+			       unsigned int frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+			  unsigned int frontbuffer_bits);
 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
-					 struct intel_digital_port *port);
+				  struct intel_digital_port *port);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1590,13 +1591,13 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
 /* intel_psr.c */
 void intel_psr_enable(struct intel_dp *intel_dp);
 void intel_psr_disable(struct intel_dp *intel_dp);
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits);
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits,
 		     enum fb_op_origin origin);
 void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 				   unsigned frontbuffer_bits);
 
 /* intel_runtime_pm.c */
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index cb7f5a29b48e..d4053137e734 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -80,8 +80,7 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 			       enum fb_op_origin origin,
 			       unsigned int frontbuffer_bits)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
 	if (origin == ORIGIN_CS) {
 		spin_lock(&dev_priv->fb_tracking.lock);
@@ -90,8 +89,8 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_psr_invalidate(dev, frontbuffer_bits);
-	intel_edp_drrs_invalidate(dev, frontbuffer_bits);
+	intel_psr_invalidate(dev_priv, frontbuffer_bits);
+	intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
 	intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
 }
 
@@ -107,12 +106,10 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-static void intel_frontbuffer_flush(struct drm_device *dev,
+static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
 				    unsigned frontbuffer_bits,
 				    enum fb_op_origin origin)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	/* Delay flushing when rings are still busy.*/
 	spin_lock(&dev_priv->fb_tracking.lock);
 	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
@@ -121,8 +118,8 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
 	if (!frontbuffer_bits)
 		return;
 
-	intel_edp_drrs_flush(dev, frontbuffer_bits);
-	intel_psr_flush(dev, frontbuffer_bits, origin);
+	intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
+	intel_psr_flush(dev_priv, frontbuffer_bits, origin);
 	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
@@ -141,8 +138,7 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 			  enum fb_op_origin origin,
 			  unsigned int frontbuffer_bits)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
 	if (retire) {
 		spin_lock(&dev_priv->fb_tracking.lock);
@@ -153,7 +149,7 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 	}
 
 	if (frontbuffer_bits)
-		intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+		intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
@@ -168,18 +164,16 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
 				    unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	spin_lock(&dev_priv->fb_tracking.lock);
 	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
 	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_psr_single_frame_update(dev, frontbuffer_bits);
+	intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
 }
 
 /**
@@ -192,11 +186,9 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
 				     unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Mask any cancelled flips. */
 	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
@@ -204,7 +196,8 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
 	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	if (frontbuffer_bits)
-		intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+		intel_frontbuffer_flush(dev_priv,
+					frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
@@ -218,15 +211,13 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip(struct drm_device *dev,
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
 			    unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
 	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+	intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
 }
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 217fefc49bf9..ad08df49ed48 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -839,8 +839,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	overlay->old_vid_bo = overlay->vid_bo;
 	overlay->vid_bo = new_bo;
 
-	intel_frontbuffer_flip(&dev_priv->drm,
-			       INTEL_FRONTBUFFER_OVERLAY(pipe));
+	intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
 
 	return 0;
 
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 68bd0bb34817..adf2ce0f38c0 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -628,9 +628,8 @@ unlock:
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void intel_psr_exit(struct drm_device *dev)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_dp *intel_dp = dev_priv->psr.enabled;
 	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -639,7 +638,7 @@ static void intel_psr_exit(struct drm_device *dev)
 	if (!dev_priv->psr.active)
 		return;
 
-	if (HAS_DDI(dev)) {
+	if (HAS_DDI(dev_priv)) {
 		val = I915_READ(EDP_PSR_CTL);
 
 		WARN_ON(!(val & EDP_PSR_ENABLE));
@@ -674,7 +673,7 @@ static void intel_psr_exit(struct drm_device *dev)
 
 /**
  * intel_psr_single_frame_update - Single Frame Update
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Some platforms support a single frame update feature that is used to
@@ -682,10 +681,9 @@ static void intel_psr_exit(struct drm_device *dev)
  * So far it is only implemented for Valleyview and Cherryview because
  * hardware requires this to be done before a page flip.
  */
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 				   unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 	u32 val;
@@ -694,7 +692,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
 	 * Single frame update is already supported on BDW+ but it requires
 	 * many W/A and it isn't really needed.
 	 */
-	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
 		return;
 
 	mutex_lock(&dev_priv->psr.lock);
@@ -720,7 +718,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
 
 /**
  * intel_psr_invalidate - Invalidade PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Since the hardware frontbuffer tracking has gaps we need to integrate
@@ -730,10 +728,9 @@ void intel_psr_single_frame_update(struct drm_device *dev,
  *
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  */
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -750,14 +747,14 @@ void intel_psr_invalidate(struct drm_device *dev,
 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
 
 	if (frontbuffer_bits)
-		intel_psr_exit(dev);
+		intel_psr_exit(dev_priv);
 
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
 /**
  * intel_psr_flush - Flush PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  * @origin: which operation caused the flush
  *
@@ -768,10 +765,9 @@ void intel_psr_invalidate(struct drm_device *dev,
  *
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  */
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -789,7 +785,7 @@ void intel_psr_flush(struct drm_device *dev,
 
 	/* By definition flush = invalidate + flush */
 	if (frontbuffer_bits)
-		intel_psr_exit(dev);
+		intel_psr_exit(dev_priv);
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 		if (!work_busy(&dev_priv->psr.work.work))
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 70/73] drm/i915: Move obj->active:5 to obj->flags
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (68 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 69/73] drm/i915: Use dev_priv consistently through the intel_frontbuffer interface Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01 12:46   ` Joonas Lahtinen
  2016-08-01  9:11 ` [PATCH 71/73] drm/i915: Move i915_gem_object_wait_rendering() Chris Wilson
                   ` (3 subsequent siblings)
  73 siblings, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

We are motivated to avoid using a bitfield for obj->active for a couple
of reasons. Firstly, we wish to document our lockless read of obj->active
using READ_ONCE inside i915_gem_busy_ioctl() and that requires an
integral type (i.e. not a bitfield). Secondly, gcc produces abysmal code
when presented with a bitfield and that shows up high on the profiles of
request tracking (mainly due to excess memory traffic as it converts
the bitfield to a register and back and generates frequent AGI in the
process).

v2: BIT, break up a long line in compute the other engines, new paint
for i915_gem_object_is_active (now i915_gem_object_get_active).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_drv.h            | 37 +++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem.c            | 16 ++++++-------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 20 ++++++++++++----
 drivers/gpu/drm/i915/i915_gem_shrinker.c   |  5 ++--
 drivers/gpu/drm/i915/i915_gem_userptr.c    |  2 +-
 6 files changed, 64 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 10a346237795..920a2de95cd5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -91,7 +91,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
 
 static char get_active_flag(struct drm_i915_gem_object *obj)
 {
-	return obj->active ? '*' : ' ';
+	return i915_gem_object_is_active(obj) ? '*' : ' ';
 }
 
 static char get_pin_flag(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3de75e82ca76..db5dc5bd78d8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2155,12 +2155,16 @@ struct drm_i915_gem_object {
 
 	struct list_head batch_pool_link;
 
+	unsigned long flags;
 	/**
 	 * This is set if the object is on the active lists (has pending
 	 * rendering and so a non-zero seqno), and is not set if it i s on
 	 * inactive (ready to be unbound) list.
 	 */
-	unsigned int active:I915_NUM_ENGINES;
+#define I915_BO_ACTIVE_SHIFT 0
+#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
+#define __I915_BO_ACTIVE(bo) \
+	((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
 
 	/**
 	 * This is set if the object has been written to since last bound
@@ -2325,6 +2329,37 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 	return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
 }
 
+static inline unsigned long
+i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
+{
+	return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+	return i915_gem_object_get_active(obj);
+}
+
+static inline void
+i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
+{
+	obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline void
+i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
+{
+	obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline bool
+i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
+				  int engine)
+{
+	return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
 /*
  * Optimised SGL iterator for GEM objects
  */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c4e5c7c1891b..b44a8e036373 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1354,7 +1354,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 
 	if (!readonly) {
 		active = obj->last_read;
-		active_mask = obj->active;
+		active_mask = i915_gem_object_get_active(obj);
 	} else {
 		active_mask = 1;
 		active = &obj->last_write;
@@ -1398,7 +1398,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 	BUG_ON(!dev_priv->mm.interruptible);
 
-	active_mask = obj->active;
+	active_mask = i915_gem_object_get_active(obj);
 	if (!active_mask)
 		return 0;
 
@@ -2361,10 +2361,10 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
 	struct drm_i915_gem_object *obj =
 		container_of(active, struct drm_i915_gem_object, last_read[idx]);
 
-	GEM_BUG_ON((obj->active & (1 << idx)) == 0);
+	GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
 
-	obj->active &= ~(1 << idx);
-	if (obj->active)
+	i915_gem_object_clear_active(obj, idx);
+	if (i915_gem_object_is_active(obj))
 		return;
 
 	/* Bump our place on the bound list to keep it roughly in LRU order
@@ -2668,7 +2668,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 		return -ENOENT;
 	}
 
-	if (!obj->active)
+	if (!i915_gem_object_is_active(obj))
 		goto out;
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -2756,7 +2756,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-	active_mask = obj->active;
+	active_mask = i915_gem_object_get_active(obj);
 	if (!active_mask)
 		return 0;
 
@@ -3802,7 +3802,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * become non-busy without any further actions.
 	 */
 	args->busy = 0;
-	if (obj->active) {
+	if (i915_gem_object_is_active(obj)) {
 		struct drm_i915_gem_request *req;
 		int i;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 109bb9d8df4a..3a68ffaa55e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -433,7 +433,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
 
 static bool object_is_idle(struct drm_i915_gem_object *obj)
 {
-	unsigned long active = obj->active;
+	unsigned long active = i915_gem_object_get_active(obj);
 	int idx;
 
 	for_each_active(active, idx) {
@@ -989,11 +989,21 @@ err:
 	return ret;
 }
 
+static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
+{
+	unsigned int mask;
+
+	mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
+	mask <<= I915_BO_ACTIVE_SHIFT;
+
+	return mask;
+}
+
 static int
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_engine_flag(req->engine);
+	const unsigned int other_rings = eb_other_engines(req);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -1002,7 +1012,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 	list_for_each_entry(vma, vmas, exec_list) {
 		struct drm_i915_gem_object *obj = vma->obj;
 
-		if (obj->active & other_rings) {
+		if (obj->flags & other_rings) {
 			ret = i915_gem_object_sync(obj, req);
 			if (ret)
 				return ret;
@@ -1165,9 +1175,9 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
-	if (obj->active == 0)
+	if (!i915_gem_object_is_active(obj))
 		i915_gem_object_get(obj);
-	obj->active |= 1 << idx;
+	i915_gem_object_set_active(obj, idx);
 	i915_gem_active_set(&obj->last_read[idx], req);
 
 	if (flags & EXEC_OBJECT_WRITE) {
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index b5776358c05e..bcd85bdbc25f 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -182,7 +182,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 			    !is_vmalloc_addr(obj->mapping))
 				continue;
 
-			if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
+			if ((flags & I915_SHRINK_ACTIVE) == 0 &&
+			    i915_gem_object_is_active(obj))
 				continue;
 
 			if (!can_release_pages(obj))
@@ -267,7 +268,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 			count += obj->base.size >> PAGE_SHIFT;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (!obj->active && can_release_pages(obj))
+		if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
 			count += obj->base.size >> PAGE_SHIFT;
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 651a84ba840c..53f64fcc89ef 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -67,7 +67,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
 	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
 	int i, n;
 
-	if (!obj->active)
+	if (!i915_gem_object_is_active(obj))
 		return;
 
 	n = 0;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 71/73] drm/i915: Move i915_gem_object_wait_rendering()
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (69 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 70/73] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 72/73] drm/i915: Enable lockless lookup of request tracking via RCU Chris Wilson
                   ` (2 subsequent siblings)
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

Just move it earlier so that we can use the companion nonblocking
version in a couple of more callsites without having to add a forward
declaration.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 202 ++++++++++++++++++++--------------------
 1 file changed, 101 insertions(+), 101 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b44a8e036373..d9b303252648 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -301,6 +301,107 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	return ret;
 }
 
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly)
+{
+	struct reservation_object *resv;
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int idx;
+
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	if (!readonly) {
+		active = obj->last_read;
+		active_mask = i915_gem_object_get_active(obj);
+	} else {
+		active_mask = 1;
+		active = &obj->last_write;
+	}
+
+	for_each_active(active_mask, idx) {
+		int ret;
+
+		ret = i915_gem_active_wait(&active[idx],
+					   &obj->base.dev->struct_mutex);
+		if (ret)
+			return ret;
+	}
+
+	resv = i915_gem_object_get_dmabuf_resv(obj);
+	if (resv) {
+		long err;
+
+		err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
+							  MAX_SCHEDULE_TIMEOUT);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+					    struct intel_rps_client *rps,
+					    bool readonly)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int ret, i, n = 0;
+
+	lockdep_assert_held(&dev->struct_mutex);
+	GEM_BUG_ON(!to_i915(dev)->mm.interruptible);
+
+	active_mask = i915_gem_object_get_active(obj);
+	if (!active_mask)
+		return 0;
+
+	if (!readonly) {
+		active = obj->last_read;
+	} else {
+		active_mask = 1;
+		active = &obj->last_write;
+	}
+
+	for_each_active(active_mask, i) {
+		struct drm_i915_gem_request *req;
+
+		req = i915_gem_active_get(&active[i],
+					  &obj->base.dev->struct_mutex);
+		if (req)
+			requests[n++] = req;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	ret = 0;
+	for (i = 0; ret == 0 && i < n; i++)
+		ret = i915_wait_request(requests[i], true, NULL, rps);
+	mutex_lock(&dev->struct_mutex);
+
+	for (i = 0; i < n; i++)
+		i915_gem_request_put(requests[i]);
+
+	return ret;
+}
+
+static struct intel_rps_client *to_rps_client(struct drm_file *file)
+{
+	struct drm_i915_file_private *fpriv = file->driver_priv;
+
+	return &fpriv->rps;
+}
+
 int
 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 			    int align)
@@ -1335,107 +1436,6 @@ put_rpm:
 	return ret;
 }
 
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for read access or write
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-			       bool readonly)
-{
-	struct reservation_object *resv;
-	struct i915_gem_active *active;
-	unsigned long active_mask;
-	int idx, ret;
-
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
-	if (!readonly) {
-		active = obj->last_read;
-		active_mask = i915_gem_object_get_active(obj);
-	} else {
-		active_mask = 1;
-		active = &obj->last_write;
-	}
-
-	for_each_active(active_mask, idx) {
-		ret = i915_gem_active_wait(&active[idx],
-					   &obj->base.dev->struct_mutex);
-		if (ret)
-			return ret;
-	}
-
-	resv = i915_gem_object_get_dmabuf_resv(obj);
-	if (resv) {
-		long err;
-
-		err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
-							  MAX_SCHEDULE_TIMEOUT);
-		if (err < 0)
-			return err;
-	}
-
-	return 0;
-}
-
-/* A nonblocking variant of the above wait. This is a highly dangerous routine
- * as the object state may change during this call.
- */
-static __must_check int
-i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
-					    struct intel_rps_client *rps,
-					    bool readonly)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-	struct i915_gem_active *active;
-	unsigned long active_mask;
-	int ret, i, n = 0;
-
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-	BUG_ON(!dev_priv->mm.interruptible);
-
-	active_mask = i915_gem_object_get_active(obj);
-	if (!active_mask)
-		return 0;
-
-	if (!readonly) {
-		active = obj->last_read;
-	} else {
-		active_mask = 1;
-		active = &obj->last_write;
-	}
-
-	for_each_active(active_mask, i) {
-		struct drm_i915_gem_request *req;
-
-		req = i915_gem_active_get(&active[i],
-					  &obj->base.dev->struct_mutex);
-		if (req)
-			requests[n++] = req;
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-	ret = 0;
-	for (i = 0; ret == 0 && i < n; i++)
-		ret = i915_wait_request(requests[i], true, NULL, rps);
-	mutex_lock(&dev->struct_mutex);
-
-	for (i = 0; i < n; i++)
-		i915_gem_request_put(requests[i]);
-
-	return ret;
-}
-
-static struct intel_rps_client *to_rps_client(struct drm_file *file)
-{
-	struct drm_i915_file_private *fpriv = file->driver_priv;
-	return &fpriv->rps;
-}
-
 static enum fb_op_origin
 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
 {
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 72/73] drm/i915: Enable lockless lookup of request tracking via RCU
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (70 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 71/73] drm/i915: Move i915_gem_object_wait_rendering() Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01  9:11 ` [PATCH 73/73] drm/i915: Export our request as a dma-buf fence on the reservation object Chris Wilson
  2016-08-01 11:45 ` ✗ Ro.CI.BAT: failure for series starting with [01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Patchwork
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx; +Cc: Daniel Vetter, Josh Triplett, Goel, Akash

If we enable RCU for the requests (providing a grace period where we can
inspect a "dead" request before it is freed), we can allow callers to
carefully perform lockless lookup of an active request.

However, by enabling deferred freeing of requests, we can potentially
hog a lot of memory when dealing with tens of thousands of requests per
second - with a quick insertion of a synchronize_rcu() inside our
shrinker callback, that issue disappears.

v2: Currently, it is our responsibility to handle reclaim i.e. to avoid
hogging memory with the delayed slab frees. At the moment, we wait for a
grace period in the shrinker, and block for all RCU callbacks on oom.
Suggested alternatives focus on flushing our RCU callback when we have a
certain number of outstanding request frees, and blocking on that flush
after a second high watermark. (So rather than wait for the system to
run out of memory, we stop issuing requests - both are nondeterministic.)

Paul E. McKenney wrote:

Another approach is synchronize_rcu() after some largish number of
requests.  The advantage of this approach is that it throttles the
production of callbacks at the source.  The corresponding disadvantage
is that it slows things up.

Another approach is to use call_rcu(), but if the previous call_rcu()
is still in flight, block waiting for it.  Yet another approach is
the get_state_synchronize_rcu() / cond_synchronize_rcu() pair.  The
idea is to do something like this:

        cond_synchronize_rcu(cookie);
        cookie = get_state_synchronize_rcu();

You would of course do an initial get_state_synchronize_rcu() to
get things going.  This would not block unless there was less than
one grace period's worth of time between invocations.  But this
assumes a busy system, where there is almost always a grace period
in flight.  But you can make that happen as follows:

        cond_synchronize_rcu(cookie);
        cookie = get_state_synchronize_rcu();
        call_rcu(&my_rcu_head, noop_function);

Note that you need additional code to make sure that the old callback
has completed before doing a new one.  Setting and clearing a flag
with appropriate memory ordering control suffices (e.g,. smp_load_acquire()
and smp_store_release()).

v3: More comments on compiler and processor order of operations within
the RCU lookup and discover we can use rcu_access_pointer() here instead.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: "Goel, Akash" <akash.goel@intel.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem.c          |   7 +-
 drivers/gpu/drm/i915/i915_gem_request.c  |   2 +-
 drivers/gpu/drm/i915/i915_gem_request.h  | 140 +++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_gem_shrinker.c |  15 +++-
 4 files changed, 151 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d9b303252648..32ab099152cc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4420,7 +4420,9 @@ i915_gem_load_init(struct drm_device *dev)
 	dev_priv->requests =
 		kmem_cache_create("i915_gem_request",
 				  sizeof(struct drm_i915_gem_request), 0,
-				  SLAB_HWCACHE_ALIGN,
+				  SLAB_HWCACHE_ALIGN |
+				  SLAB_RECLAIM_ACCOUNT |
+				  SLAB_DESTROY_BY_RCU,
 				  NULL);
 
 	INIT_LIST_HEAD(&dev_priv->context_list);
@@ -4456,6 +4458,9 @@ void i915_gem_load_cleanup(struct drm_device *dev)
 	kmem_cache_destroy(dev_priv->requests);
 	kmem_cache_destroy(dev_priv->vmas);
 	kmem_cache_destroy(dev_priv->objects);
+
+	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+	rcu_barrier();
 }
 
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 773b942a3a5f..3fecb8f0e041 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -205,7 +205,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 		prefetchw(next);
 
 		INIT_LIST_HEAD(&active->link);
-		active->request = NULL;
+		RCU_INIT_POINTER(active->request, NULL);
 
 		active->retire(active, request);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 4241d49a024c..48382ac401fd 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -183,6 +183,12 @@ i915_gem_request_get(struct drm_i915_gem_request *req)
 	return to_request(fence_get(&req->fence));
 }
 
+static inline struct drm_i915_gem_request *
+i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
+{
+	return to_request(fence_get_rcu(&req->fence));
+}
+
 static inline void
 i915_gem_request_put(struct drm_i915_gem_request *req)
 {
@@ -286,7 +292,7 @@ typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
 				   struct drm_i915_gem_request *);
 
 struct i915_gem_active {
-	struct drm_i915_gem_request *request;
+	struct drm_i915_gem_request __rcu *request;
 	struct list_head link;
 	i915_gem_retire_fn retire;
 };
@@ -327,13 +333,19 @@ i915_gem_active_set(struct i915_gem_active *active,
 		    struct drm_i915_gem_request *request)
 {
 	list_move(&active->link, &request->active_list);
-	active->request = request;
+	rcu_assign_pointer(active->request, request);
 }
 
 static inline struct drm_i915_gem_request *
 __i915_gem_active_peek(const struct i915_gem_active *active)
 {
-	return active->request;
+	/* Inside the error capture (running with the driver in an unknown
+	 * state), we want to bend the rules slightly (a lot).
+	 *
+	 * Work is in progress to make it safer, in the meantime this keeps
+	 * the known issue from spamming the logs.
+	 */
+	return rcu_dereference_protected(active->request, 1);
 }
 
 /**
@@ -349,7 +361,29 @@ i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
 {
 	struct drm_i915_gem_request *request;
 
-	request = active->request;
+	request = rcu_dereference_protected(active->request,
+					    lockdep_is_held(mutex));
+	if (!request || i915_gem_request_completed(request))
+		return NULL;
+
+	return request;
+}
+
+/**
+ * i915_gem_active_peek_rcu - report the active request being monitored
+ * @active - the active tracker
+ *
+ * i915_gem_active_peek_rcu() returns the current request being tracked if
+ * still active, or NULL. It does not obtain a reference on the request
+ * for the caller, and inspection of the request is only valid under
+ * the RCU lock.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_peek_rcu(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	request = rcu_dereference(active->request);
 	if (!request || i915_gem_request_completed(request))
 		return NULL;
 
@@ -370,6 +404,96 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 }
 
 /**
+ * i915_gem_active_get_rcu - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold the RCU read lock.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get_rcu(const struct i915_gem_active *active)
+{
+	/* Performing a lockless retrieval of the active request is super
+	 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
+	 * slab of request objects will not be freed whilst we hold the
+	 * RCU read lock. It does not guarantee that the request itself
+	 * will not be freed and then *reused*. Viz,
+	 *
+	 * Thread A			Thread B
+	 *
+	 * req = active.request
+	 *				retire(req) -> free(req);
+	 *				(req is now first on the slab freelist)
+	 *				active.request = NULL
+	 *
+	 *				req = new submission on a new object
+	 * ref(req)
+	 *
+	 * To prevent the request from being reused whilst the caller
+	 * uses it, we take a reference like normal. Whilst acquiring
+	 * the reference we check that it is not in a destroyed state
+	 * (refcnt == 0). That prevents the request being reallocated
+	 * whilst the caller holds on to it. To check that the request
+	 * was not reallocated as we acquired the reference we have to
+	 * check that our request remains the active request across
+	 * the lookup, in the same manner as a seqlock. The visibility
+	 * of the pointer versus the reference counting is controlled
+	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
+	 *
+	 * In the middle of all that, we inspect whether the request is
+	 * complete. Retiring is lazy so the request may be completed long
+	 * before the active tracker is updated. Querying whether the
+	 * request is complete is far cheaper (as it involves no locked
+	 * instructions setting cachelines to exclusive) than acquiring
+	 * the reference, so we do it first. The RCU read lock ensures the
+	 * pointer dereference is valid, but does not ensure that the
+	 * seqno nor HWS is the right one! However, if the request was
+	 * reallocated, that means the active tracker's request was complete.
+	 * If the new request is also complete, then both are and we can
+	 * just report the active tracker is idle. If the new request is
+	 * incomplete, then we acquire a reference on it and check that
+	 * it remained the active request.
+	 */
+	do {
+		struct drm_i915_gem_request *request;
+
+		request = rcu_dereference(active->request);
+		if (!request || i915_gem_request_completed(request))
+			return NULL;
+
+		request = i915_gem_request_get_rcu(request);
+
+		/* What stops the following rcu_access_pointer() from occurring
+		 * before the above i915_gem_request_get_rcu()? If we were
+		 * to read the value before pausing to get the reference to
+		 * the request, we may not notice a change in the active
+		 * tracker.
+		 *
+		 * The rcu_access_pointer() is a mere compiler barrier, which
+		 * means both the CPU and compiler are free to perform the
+		 * memory read without constraint. The compiler only has to
+		 * ensure that any operations after the rcu_access_pointer()
+		 * occur afterwards in program order. This means the read may
+		 * be performed earlier by an out-of-order CPU, or adventurous
+		 * compiler.
+		 *
+		 * The atomic operation at the heart of
+		 * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+		 * atomic_inc_not_zero() which is only a full memory barrier
+		 * when successful. That is, if i915_gem_request_get_rcu()
+		 * returns the request (and so with the reference counted
+		 * incremented) then the following read for rcu_access_pointer()
+		 * must occur after the atomic operation and so confirm
+		 * that this request is the one currently being tracked.
+		 */
+		if (!request || request == rcu_access_pointer(active->request))
+			return rcu_pointer_handoff(request);
+
+		i915_gem_request_put(request);
+	} while (1);
+}
+
+/**
  * __i915_gem_active_is_busy - report whether the active tracker is assigned
  * @active - the active tracker
  *
@@ -380,7 +504,7 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 static inline bool
 __i915_gem_active_is_busy(const struct i915_gem_active *active)
 {
-	return __i915_gem_active_peek(active);
+	return rcu_access_pointer(active->request);
 }
 
 /**
@@ -437,7 +561,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
 	struct drm_i915_gem_request *request;
 	int ret;
 
-	request = active->request;
+	request = rcu_dereference_protected(active->request,
+					    lockdep_is_held(mutex));
 	if (!request)
 		return 0;
 
@@ -446,7 +571,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
 		return ret;
 
 	list_del_init(&active->link);
-	active->request = NULL;
+	RCU_INIT_POINTER(active->request, NULL);
+
 	active->retire(active, request);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index bcd85bdbc25f..1341cb55b6f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -205,6 +205,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 		intel_runtime_pm_put(dev_priv);
 
 	i915_gem_retire_requests(dev_priv);
+	/* expedite the RCU grace period to free some request slabs */
+	synchronize_rcu_expedited();
 
 	return count;
 }
@@ -225,10 +227,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
  */
 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
-	return i915_gem_shrink(dev_priv, -1UL,
-			       I915_SHRINK_BOUND |
-			       I915_SHRINK_UNBOUND |
-			       I915_SHRINK_ACTIVE);
+	unsigned long freed;
+
+	freed = i915_gem_shrink(dev_priv, -1UL,
+				I915_SHRINK_BOUND |
+				I915_SHRINK_UNBOUND |
+				I915_SHRINK_ACTIVE);
+	rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+
+	return freed;
 }
 
 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* [PATCH 73/73] drm/i915: Export our request as a dma-buf fence on the reservation object
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (71 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 72/73] drm/i915: Enable lockless lookup of request tracking via RCU Chris Wilson
@ 2016-08-01  9:11 ` Chris Wilson
  2016-08-01 11:45 ` ✗ Ro.CI.BAT: failure for series starting with [01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Patchwork
  73 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01  9:11 UTC (permalink / raw)
  To: intel-gfx

If the GEM objects being rendered with in this request have been
exported via dma-buf to a third party, hook ourselves into the dma-buf
reservation object so that the third party can serialise with our
rendering via the dma-buf fences.

Testcase: igt/prime_busy
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem_dmabuf.c     | 58 ++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 31 ++++++++++++++--
 2 files changed, 84 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 3a00ab3ad06e..c60a8d5bbad0 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -23,9 +23,13 @@
  * Authors:
  *	Dave Airlie <airlied@redhat.com>
  */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
 #include <drm/drmP.h>
+
 #include "i915_drv.h"
-#include <linux/dma-buf.h>
 
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
@@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
 	.end_cpu_access = i915_gem_end_cpu_access,
 };
 
+static void export_fences(struct drm_i915_gem_object *obj,
+			  struct dma_buf *dma_buf)
+{
+	struct reservation_object *resv = dma_buf->resv;
+	struct drm_i915_gem_request *req;
+	unsigned long active;
+	int idx;
+
+	active = __I915_BO_ACTIVE(obj);
+	if (!active)
+		return;
+
+	/* Serialise with execbuf to prevent concurrent fence-loops */
+	mutex_lock(&obj->base.dev->struct_mutex);
+
+	/* Mark the object for future fences before racily adding old fences */
+	obj->base.dma_buf = dma_buf;
+
+	ww_mutex_lock(&resv->lock, NULL);
+
+	for_each_active(active, idx) {
+		req = i915_gem_active_get(&obj->last_read[idx],
+					  &obj->base.dev->struct_mutex);
+		if (!req)
+			continue;
+
+		if (reservation_object_reserve_shared(resv) == 0)
+			reservation_object_add_shared_fence(resv, &req->fence);
+
+		i915_gem_request_put(req);
+	}
+
+	req = i915_gem_active_get(&obj->last_write,
+				  &obj->base.dev->struct_mutex);
+	if (req) {
+		reservation_object_add_excl_fence(resv, &req->fence);
+		i915_gem_request_put(req);
+	}
+
+	ww_mutex_unlock(&resv->lock);
+	mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				      struct drm_gem_object *gem_obj, int flags)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	struct dma_buf *dma_buf;
 
 	exp_info.ops = &i915_dmabuf_ops;
 	exp_info.size = gem_obj->size;
 	exp_info.flags = flags;
 	exp_info.priv = gem_obj;
 
-
 	if (obj->ops->dmabuf_export) {
 		int ret = obj->ops->dmabuf_export(obj);
 		if (ret)
 			return ERR_PTR(ret);
 	}
 
-	return dma_buf_export(&exp_info);
+	dma_buf = dma_buf_export(&exp_info);
+	if (IS_ERR(dma_buf))
+		return dma_buf;
+
+	export_fences(obj, dma_buf);
+	return dma_buf;
 }
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3a68ffaa55e6..f92bfafd1f49 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -26,13 +26,17 @@
  *
  */
 
+#include <linux/dma_remapping.h>
+#include <linux/reservation.h>
+#include <linux/uaccess.h>
+
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
+
 #include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
-#include <linux/dma_remapping.h>
-#include <linux/uaccess.h>
 
 #define  __EXEC_OBJECT_HAS_PIN		(1<<31)
 #define  __EXEC_OBJECT_HAS_FENCE	(1<<30)
@@ -1204,6 +1208,28 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
+static void eb_export_fence(struct drm_i915_gem_object *obj,
+			    struct drm_i915_gem_request *req,
+			    unsigned int flags)
+{
+	struct reservation_object *resv;
+
+	resv = i915_gem_object_get_dmabuf_resv(obj);
+	if (!resv)
+		return;
+
+	/* Ignore errors from failing to allocate the new fence, we can't
+	 * handle an error right now. Worst case should be missed
+	 * synchronisation leading to rendering corruption.
+	 */
+	ww_mutex_lock(&resv->lock, NULL);
+	if (flags & EXEC_OBJECT_WRITE)
+		reservation_object_add_excl_fence(resv, &req->fence);
+	else if (reservation_object_reserve_shared(resv) == 0)
+		reservation_object_add_shared_fence(resv, &req->fence);
+	ww_mutex_unlock(&resv->lock);
+}
+
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
@@ -1223,6 +1249,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		obj->base.read_domains = obj->base.pending_read_domains;
 
 		i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
+		eb_export_fence(obj, req, vma->exec_entry->flags);
 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* Re: [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-01  9:10 ` [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write Chris Wilson
@ 2016-08-01 10:07   ` Joonas Lahtinen
  2016-08-01 10:15     ` Chris Wilson
  2016-08-01 10:21     ` Chris Wilson
  0 siblings, 2 replies; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 10:07 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> Space reservation is already safe with respect to the ring->size
> modulus, but hardware only expects to see values in the range
> 0...ring->size-1 (inclusive) and so requires the modulus to prevent us
> writing the value ring->size instead of 0. As this is only required for
> the register itself, we can defer the modulus to the register update and
> not perform it after every command packet. We keep the
> intel_ring_advance() around in the code to provide demarcation for the
> end-of-packet (which then can be compared against intel_ring_begin() as
> the number of dwords emitted must match the reserved space).
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Dave Gordon <david.s.gordon@intel.com>
> ---
>  drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
>  drivers/gpu/drm/i915/intel_ringbuffer.c |  6 ++++--
>  drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++++++++----
>  3 files changed, 18 insertions(+), 7 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index bf42a66d6624..824f7efe4e64 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
>  	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
>  	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
>  
> -	reg_state[CTX_RING_TAIL+1] = rq->tail;
> +	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
>  
>  	/* True 32b PPGTT with dynamic page allocation: update PDP
>  	 * registers and point the unallocated PDPs to scratch page.
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 3142085b5cc0..21d5e8209400 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1718,7 +1718,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
>  {
>  	struct drm_i915_private *dev_priv = request->i915;
>  
> -	I915_WRITE_TAIL(request->engine, request->tail);
> +	I915_WRITE_TAIL(request->engine,
> +			intel_ring_offset(request->ring, request->tail));
>  }
>  
>  static void
> @@ -2505,7 +2506,8 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
>  		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
>  
>  	/* Now that the ring is fully powered up, update the tail */
> -	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
> +	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
> +		      intel_ring_offset(request->ring, request->tail));
>  	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
>  
>  	/* Let the ring send IDLE messages to the GT again,
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 14d2ea36fb88..9ac96ddb01ee 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -460,14 +460,23 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
>  
>  static inline void intel_ring_advance(struct intel_ring *ring)
>  {
> +	/* Dummy function.
> +	 *
> +	 * This serves as a placeholder in the code so that the reader
> +	 * can compare against the preceding intel_ring_begin() and
> +	 * check that the number of dwords emitted matches the space
> +	 * reserved for the command packet (i.e. the value passed to
> +	 * intel_ring_begin()).
> +	 */
> +}
> +
> +static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
> +{
>  	/* The modulus is required so that we avoid writing
>  	 * request->tail == ring->size, rather than the expected 0,
>  	 * into the RING_TAIL register as that can cause a GPU hang.
> -	 * As this is only strictly required for the request->tail,
> -	 * and only then as we write the value into hardware, we can
> -	 * one day remove the modulus after every command packet.
>  	 */
> -	ring->tail &= ring->size - 1;
> +	return value & (ring->size - 1);
>  }

The comment seems outdated-ish as it speaks of modulus which is nowhere
to be seen. I'd speak of 'masking'. With that,

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas

>  
>  int __intel_ring_space(int head, int tail, int size);
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-01 10:07   ` Joonas Lahtinen
@ 2016-08-01 10:15     ` Chris Wilson
  2016-08-01 14:28       ` Joonas Lahtinen
  2016-08-01 10:21     ` Chris Wilson
  1 sibling, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01 10:15 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Mon, Aug 01, 2016 at 01:07:55PM +0300, Joonas Lahtinen wrote:
> On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> > Space reservation is already safe with respect to the ring->size
> > modulus, but hardware only expects to see values in the range
> > 0...ring->size-1 (inclusive) and so requires the modulus to prevent us
> > writing the value ring->size instead of 0. As this is only required for
> > the register itself, we can defer the modulus to the register update and
> > not perform it after every command packet. We keep the
> > intel_ring_advance() around in the code to provide demarcation for the
> > end-of-packet (which then can be compared against intel_ring_begin() as
> > the number of dwords emitted must match the reserved space).
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > Cc: Dave Gordon <david.s.gordon@intel.com>
> > ---
> >  drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
> >  drivers/gpu/drm/i915/intel_ringbuffer.c |  6 ++++--
> >  drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++++++++----
> >  3 files changed, 18 insertions(+), 7 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > index bf42a66d6624..824f7efe4e64 100644
> > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > @@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
> >  	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
> >  	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
> >  
> > -	reg_state[CTX_RING_TAIL+1] = rq->tail;
> > +	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
> >  
> >  	/* True 32b PPGTT with dynamic page allocation: update PDP
> >  	 * registers and point the unallocated PDPs to scratch page.
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > index 3142085b5cc0..21d5e8209400 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > @@ -1718,7 +1718,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
> >  {
> >  	struct drm_i915_private *dev_priv = request->i915;
> >  
> > -	I915_WRITE_TAIL(request->engine, request->tail);
> > +	I915_WRITE_TAIL(request->engine,
> > +			intel_ring_offset(request->ring, request->tail));
> >  }
> >  
> >  static void
> > @@ -2505,7 +2506,8 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
> >  		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
> >  
> >  	/* Now that the ring is fully powered up, update the tail */
> > -	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
> > +	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
> > +		      intel_ring_offset(request->ring, request->tail));
> >  	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
> >  
> >  	/* Let the ring send IDLE messages to the GT again,
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > index 14d2ea36fb88..9ac96ddb01ee 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > @@ -460,14 +460,23 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
> >  
> >  static inline void intel_ring_advance(struct intel_ring *ring)
> >  {
> > +	/* Dummy function.
> > +	 *
> > +	 * This serves as a placeholder in the code so that the reader
> > +	 * can compare against the preceding intel_ring_begin() and
> > +	 * check that the number of dwords emitted matches the space
> > +	 * reserved for the command packet (i.e. the value passed to
> > +	 * intel_ring_begin()).
> > +	 */
> > +}
> > +
> > +static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
> > +{
> >  	/* The modulus is required so that we avoid writing
> >  	 * request->tail == ring->size, rather than the expected 0,
> >  	 * into the RING_TAIL register as that can cause a GPU hang.
> > -	 * As this is only strictly required for the request->tail,
> > -	 * and only then as we write the value into hardware, we can
> > -	 * one day remove the modulus after every command packet.
> >  	 */
> > -	ring->tail &= ring->size - 1;
> > +	return value & (ring->size - 1);
> >  }
> 
> The comment seems outdated-ish as it speaks of modulus which is nowhere
> to be seen. I'd speak of 'masking'. With that,

The operation is a modulus. The common optimisation for moduli with
a power-of-two divisor being applied.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-01 10:07   ` Joonas Lahtinen
  2016-08-01 10:15     ` Chris Wilson
@ 2016-08-01 10:21     ` Chris Wilson
  1 sibling, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01 10:21 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Mon, Aug 01, 2016 at 01:07:55PM +0300, Joonas Lahtinen wrote:
> On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> > Space reservation is already safe with respect to the ring->size
> > modulus, but hardware only expects to see values in the range
> > 0...ring->size-1 (inclusive) and so requires the modulus to prevent us
> > writing the value ring->size instead of 0. As this is only required for
> > the register itself, we can defer the modulus to the register update and
> > not perform it after every command packet. We keep the
> > intel_ring_advance() around in the code to provide demarcation for the
> > end-of-packet (which then can be compared against intel_ring_begin() as
> > the number of dwords emitted must match the reserved space).
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > Cc: Dave Gordon <david.s.gordon@intel.com>
> > ---
> >  drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
> >  drivers/gpu/drm/i915/intel_ringbuffer.c |  6 ++++--
> >  drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++++++++----
> >  3 files changed, 18 insertions(+), 7 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > index bf42a66d6624..824f7efe4e64 100644
> > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > @@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
> >  	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
> >  	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
> >  
> > -	reg_state[CTX_RING_TAIL+1] = rq->tail;
> > +	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
> >  
> >  	/* True 32b PPGTT with dynamic page allocation: update PDP
> >  	 * registers and point the unallocated PDPs to scratch page.
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > index 3142085b5cc0..21d5e8209400 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > @@ -1718,7 +1718,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
> >  {
> >  	struct drm_i915_private *dev_priv = request->i915;
> >  
> > -	I915_WRITE_TAIL(request->engine, request->tail);
> > +	I915_WRITE_TAIL(request->engine,
> > +			intel_ring_offset(request->ring, request->tail));
> >  }
> >  
> >  static void
> > @@ -2505,7 +2506,8 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
> >  		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
> >  
> >  	/* Now that the ring is fully powered up, update the tail */
> > -	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
> > +	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
> > +		      intel_ring_offset(request->ring, request->tail));
> >  	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
> >  
> >  	/* Let the ring send IDLE messages to the GT again,
> > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > index 14d2ea36fb88..9ac96ddb01ee 100644
> > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > @@ -460,14 +460,23 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
> >  
> >  static inline void intel_ring_advance(struct intel_ring *ring)
> >  {
> > +	/* Dummy function.
> > +	 *
> > +	 * This serves as a placeholder in the code so that the reader
> > +	 * can compare against the preceding intel_ring_begin() and
> > +	 * check that the number of dwords emitted matches the space
> > +	 * reserved for the command packet (i.e. the value passed to
> > +	 * intel_ring_begin()).
> > +	 */
> > +}
> > +
> > +static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
> > +{
> >  	/* The modulus is required so that we avoid writing
> >  	 * request->tail == ring->size, rather than the expected 0,
> >  	 * into the RING_TAIL register as that can cause a GPU hang.
> > -	 * As this is only strictly required for the request->tail,
> > -	 * and only then as we write the value into hardware, we can
> > -	 * one day remove the modulus after every command packet.
> >  	 */
> > -	ring->tail &= ring->size - 1;
> > +	return value & (ring->size - 1);
> >  }
> 
> The comment seems outdated-ish as it speaks of modulus which is nowhere
> to be seen. I'd speak of 'masking'. With that,

Instead of that,

@@ -2081,6 +2082,8 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
        struct intel_ring *ring;
        int ret;
 
+       GEM_BUG_ON(!is_power_of_2(size));
+
        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
        if (ring == NULL) {
                DRM_DEBUG_DRIVER("Failed to allocate ringbuffer %s\n",

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 46/73] drm/i915: Release vma when the handle is closed
  2016-08-01  9:10 ` [PATCH 46/73] drm/i915: Release vma when the handle is closed Chris Wilson
@ 2016-08-01 11:26   ` Joonas Lahtinen
  0 siblings, 0 replies; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 11:26 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> In order to prevent a leak of the vma on shared objects, we need to
> hook into the object_close callback to destroy the vma on the object for
> this file. However, if we destroyed that vma immediately we may cause
> unexpected application stalls as we try to unbind a busy vma - hence we
> defer the unbind to when we retire the vma.
> 
> v2: Keep vma allocated until closed. This is useful for a later
> optimisation, but it is required now in order to handle potential
> recursion of i915_vma_unbind() by retiring itself.
> v3: Comments are important.
> 
> Testcase: igt/gem_ppggtt/flink-and-close-vma-leak
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>

Already added to previous series;

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/i915_drv.c       |  1 +
>  drivers/gpu/drm/i915/i915_drv.h       |  4 +-
>  drivers/gpu/drm/i915/i915_gem.c       | 88 ++++++++++++++++++-----------------
>  drivers/gpu/drm/i915/i915_gem_evict.c |  8 +---
>  drivers/gpu/drm/i915/i915_gem_gtt.c   | 25 ++++++++++
>  drivers/gpu/drm/i915/i915_gem_gtt.h   |  1 +
>  6 files changed, 77 insertions(+), 50 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index 478e8168ad94..869baa6a5196 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -2574,6 +2574,7 @@ static struct drm_driver driver = {
>  	.postclose = i915_driver_postclose,
>  	.set_busid = drm_pci_set_busid,
>  
> +	.gem_close_object = i915_gem_close_object,
>  	.gem_free_object = i915_gem_free_object,
>  	.gem_vm_ops = &i915_gem_vm_ops,
>  
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index a1c4c768c0c3..f470ea195253 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3014,8 +3014,8 @@ struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
>  						  size_t size);
>  struct drm_i915_gem_object *i915_gem_object_create_from_data(
>  		struct drm_device *dev, const void *data, size_t size);
> +void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
>  void i915_gem_free_object(struct drm_gem_object *obj);
> -void i915_gem_vma_destroy(struct i915_vma *vma);
>  
>  /* Flags used by pin/bind&friends. */
>  #define PIN_MAPPABLE	(1<<0)
> @@ -3048,6 +3048,8 @@ int __must_check i915_vma_unbind(struct i915_vma *vma);
>   * _guarantee_ VMA in question is _not in use_ anywhere.
>   */
>  int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma);
> +void i915_vma_close(struct i915_vma *vma);
> +void i915_vma_destroy(struct i915_vma *vma);
>  
>  int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
>  int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 3ebbeb4e0a24..74d24d7e3ca2 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2596,6 +2596,19 @@ out_rearm:
>  	}
>  }
>  
> +void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
> +{
> +	struct drm_i915_gem_object *obj = to_intel_bo(gem);
> +	struct drm_i915_file_private *fpriv = file->driver_priv;
> +	struct i915_vma *vma, *vn;
> +
> +	mutex_lock(&obj->base.dev->struct_mutex);
> +	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
> +		if (vma->vm->file == fpriv)
> +			i915_vma_close(vma);
> +	mutex_unlock(&obj->base.dev->struct_mutex);
> +}
> +
>  /**
>   * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
>   * @dev: drm device pointer
> @@ -2803,26 +2816,32 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>  	if (active && wait) {
>  		int idx;
>  
> +		/* When a closed VMA is retired, it is unbound - eek.
> +		 * In order to prevent it from being recursively closed,
> +		 * take a pin on the vma so that the second unbind is
> +		 * aborted.
> +		 */
> +		vma->pin_count++;
> +
>  		for_each_active(active, idx) {
>  			ret = i915_gem_active_retire(&vma->last_read[idx],
>  						   &vma->vm->dev->struct_mutex);
>  			if (ret)
> -				return ret;
> +				break;
>  		}
>  
> +		vma->pin_count--;
> +		if (ret)
> +			return ret;
> +
>  		GEM_BUG_ON(i915_vma_is_active(vma));
>  	}
>  
>  	if (vma->pin_count)
>  		return -EBUSY;
>  
> -	if (list_empty(&vma->obj_link))
> -		return 0;
> -
> -	if (!drm_mm_node_allocated(&vma->node)) {
> -		i915_gem_vma_destroy(vma);
> -		return 0;
> -	}
> +	if (!drm_mm_node_allocated(&vma->node))
> +		goto destroy;
>  
>  	GEM_BUG_ON(obj->bind_count == 0);
>  	GEM_BUG_ON(!obj->pages);
> @@ -2855,7 +2874,6 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>  	}
>  
>  	drm_mm_remove_node(&vma->node);
> -	i915_gem_vma_destroy(vma);
>  
>  	/* Since the unbound list is global, only move to that list if
>  	 * no more VMAs exist. */
> @@ -2869,6 +2887,10 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
>  	 */
>  	i915_gem_object_unpin_pages(obj);
>  
> +destroy:
> +	if (unlikely(vma->closed))
> +		i915_vma_destroy(vma);
> +
>  	return 0;
>  }
>  
> @@ -3043,7 +3065,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
>  
>  		if (offset & (alignment - 1) || offset + size > end) {
>  			ret = -EINVAL;
> -			goto err_free_vma;
> +			goto err_vma;
>  		}
>  		vma->node.start = offset;
>  		vma->node.size = size;
> @@ -3055,7 +3077,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
>  				ret = drm_mm_reserve_node(&vm->mm, &vma->node);
>  		}
>  		if (ret)
> -			goto err_free_vma;
> +			goto err_vma;
>  	} else {
>  		if (flags & PIN_HIGH) {
>  			search_flag = DRM_MM_SEARCH_BELOW;
> @@ -3080,7 +3102,7 @@ search_free:
>  			if (ret == 0)
>  				goto search_free;
>  
> -			goto err_free_vma;
> +			goto err_vma;
>  		}
>  	}
>  	if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
> @@ -3101,8 +3123,7 @@ search_free:
>  
>  err_remove_node:
>  	drm_mm_remove_node(&vma->node);
> -err_free_vma:
> -	i915_gem_vma_destroy(vma);
> +err_vma:
>  	vma = ERR_PTR(ret);
>  err_unpin:
>  	i915_gem_object_unpin_pages(obj);
> @@ -4051,21 +4072,18 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
>  
>  	trace_i915_gem_object_destroy(obj);
>  
> +	/* All file-owned VMA should have been released by this point through
> +	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
> +	 * However, the object may also be bound into the global GTT (e.g.
> +	 * older GPUs without per-process support, or for direct access through
> +	 * the GTT either for the user or for scanout). Those VMA still need to
> +	 * unbound now.
> +	 */
>  	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
> -		int ret;
> -
> +		GEM_BUG_ON(!vma->is_ggtt);
> +		GEM_BUG_ON(i915_vma_is_active(vma));
>  		vma->pin_count = 0;
> -		ret = __i915_vma_unbind_no_wait(vma);
> -		if (WARN_ON(ret == -ERESTARTSYS)) {
> -			bool was_interruptible;
> -
> -			was_interruptible = dev_priv->mm.interruptible;
> -			dev_priv->mm.interruptible = false;
> -
> -			WARN_ON(i915_vma_unbind(vma));
> -
> -			dev_priv->mm.interruptible = was_interruptible;
> -		}
> +		i915_vma_close(vma);
>  	}
>  	GEM_BUG_ON(obj->bind_count);
>  
> @@ -4129,22 +4147,6 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
>  	return NULL;
>  }
>  
> -void i915_gem_vma_destroy(struct i915_vma *vma)
> -{
> -	WARN_ON(vma->node.allocated);
> -
> -	/* Keep the vma as a placeholder in the execbuffer reservation lists */
> -	if (!list_empty(&vma->exec_list))
> -		return;
> -
> -	if (!vma->is_ggtt)
> -		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
> -
> -	list_del(&vma->obj_link);
> -
> -	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
> -}
> -
>  static void
>  i915_gem_stop_engines(struct drm_device *dev)
>  {
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 81f7b4383d5e..3437ced76cb6 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -182,8 +182,8 @@ found:
>  				       struct i915_vma,
>  				       exec_list);
>  		if (drm_mm_scan_remove_block(&vma->node)) {
> +			vma->pin_count++;
>  			list_move(&vma->exec_list, &eviction_list);
> -			i915_gem_object_get(vma->obj);
>  			continue;
>  		}
>  		list_del_init(&vma->exec_list);
> @@ -191,18 +191,14 @@ found:
>  
>  	/* Unbinding will emit any required flushes */
>  	while (!list_empty(&eviction_list)) {
> -		struct drm_i915_gem_object *obj;
> -
>  		vma = list_first_entry(&eviction_list,
>  				       struct i915_vma,
>  				       exec_list);
>  
> -		obj =  vma->obj;
>  		list_del_init(&vma->exec_list);
> +		vma->pin_count--;
>  		if (ret == 0)
>  			ret = i915_vma_unbind(vma);
> -
> -		i915_gem_object_put(obj);
>  	}
>  
>  	return ret;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index f5f563d0a1ce..e0a864ae9e0b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -3335,6 +3335,31 @@ i915_vma_retire(struct i915_gem_active *active,
>  		return;
>  
>  	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
> +	if (unlikely(vma->closed && !vma->pin_count))
> +		WARN_ON(i915_vma_unbind(vma));
> +}
> +
> +void i915_vma_destroy(struct i915_vma *vma)
> +{
> +	GEM_BUG_ON(vma->node.allocated);
> +	GEM_BUG_ON(i915_vma_is_active(vma));
> +	GEM_BUG_ON(!vma->closed);
> +
> +	list_del(&vma->vm_link);
> +	if (!vma->is_ggtt)
> +		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
> +
> +	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
> +}
> +
> +void i915_vma_close(struct i915_vma *vma)
> +{
> +	GEM_BUG_ON(vma->closed);
> +	vma->closed = true;
> +
> +	list_del_init(&vma->obj_link);
> +	if (!i915_vma_is_active(vma) && !vma->pin_count)
> +		WARN_ON(__i915_vma_unbind_no_wait(vma));
>  }
>  
>  static struct i915_vma *
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index 674e118e3686..a3ea1bda2b1b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -189,6 +189,7 @@ struct i915_vma {
>  #define LOCAL_BIND	(1<<1)
>  	unsigned int bound : 4;
>  	bool is_ggtt : 1;
> +	bool closed : 1;
>  
>  	/**
>  	 * Support different GGTT views into the same object.
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* ✗ Ro.CI.BAT: failure for series starting with [01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
  2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
                   ` (72 preceding siblings ...)
  2016-08-01  9:11 ` [PATCH 73/73] drm/i915: Export our request as a dma-buf fence on the reservation object Chris Wilson
@ 2016-08-01 11:45 ` Patchwork
  73 siblings, 0 replies; 94+ messages in thread
From: Patchwork @ 2016-08-01 11:45 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit
URL   : https://patchwork.freedesktop.org/series/10444/
State : failure

== Summary ==

Series 10444v1 Series without cover letter
http://patchwork.freedesktop.org/api/1.0/series/10444/revisions/1/mbox

Test kms_cursor_legacy:
        Subgroup basic-flip-vs-cursor-legacy:
                pass       -> FAIL       (ro-bdw-i5-5250u)
                pass       -> FAIL       (fi-skl-i7-6700k)
        Subgroup basic-flip-vs-cursor-varying-size:
                fail       -> PASS       (ro-bdw-i5-5250u)

fi-kbl-qkkr      total:239  pass:180  dwarn:28  dfail:0   fail:3   skip:28 
fi-skl-i5-6260u  total:239  pass:223  dwarn:0   dfail:0   fail:2   skip:14 
fi-skl-i7-6700k  total:239  pass:207  dwarn:0   dfail:0   fail:4   skip:28 
ro-bdw-i5-5250u  total:239  pass:218  dwarn:4   dfail:0   fail:1   skip:16 
ro-bdw-i7-5557U  total:239  pass:222  dwarn:1   dfail:0   fail:0   skip:16 
ro-bsw-n3050     total:239  pass:193  dwarn:0   dfail:0   fail:4   skip:42 
ro-skl3-i5-6260u total:239  pass:221  dwarn:0   dfail:0   fail:4   skip:14 
fi-hsw-i7-4770k failed to connect after reboot
fi-snb-i7-2600 failed to connect after reboot
ro-bdw-i7-5600u failed to connect after reboot
ro-byt-n2820 failed to connect after reboot
ro-hsw-i3-4010u failed to connect after reboot
ro-hsw-i7-4770r failed to connect after reboot
ro-ivb2-i7-3770 failed to connect after reboot
ro-ivb-i7-3770 failed to connect after reboot
ro-snb-i7-2620M failed to connect after reboot

Results at /archive/results/CI_IGT_test/RO_Patchwork_1649/

f1b78d2 drm-intel-nightly: 2016y-07m-29d-19h-16m-57s UTC integration manifest
df1c140 drm/i915: Export our request as a dma-buf fence on the reservation object
263d4e8 drm/i915: Enable lockless lookup of request tracking via RCU
8174b2f drm/i915: Move i915_gem_object_wait_rendering()
6f6370b drm/i915: Move obj->active:5 to obj->flags
36251b6 drm/i915: Use dev_priv consistently through the intel_frontbuffer interface
b7ae095 drm/i915: Use atomics to manipulate obj->frontbuffer_bits
4925855 drm/i915: Make fb_tracking.lock a spinlock
27c4661 drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin()
88651c8 drm/i915: Make i915_vma_pin() small and inline
5e6ac34 drm/i915: Combine all i915_vma bitfields into a single set of flags
fbf6f72 drm/i915: Start passing around i915_vma from execbuffer
01bc26b drm/i915: Wrap vma->pin_count accessors with small inline helpers
3b44fa5 drm/i915: Record allocated vma size
50855fb drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private
72b5ff8 drm/i915: Update the GGTT size/alignment query functions
70c2139 drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations
45bb3f4 drm/i915: Split insertion/binding of an object into the VM
80e55ab drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check
32ac54e drm/i915: Pad GTT views of exec objects up to user specified size
bded2eb drm/i915: Fix up vma alignment to be u64
8147236 drm/i915: Remove i915_gem_execbuffer_retire_commands()
94be894 drm/i915: Remove request retirement before each batch
3558502 drm/i915: Double check the active status on the batch pool
70c107e drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something()
0610eca drm/i915: Combine loops within i915_gem_evict_something
67f61eb Revert "drm/i915: Clean up associated VMAs on context destruction"
91a7255 drm/i915: Mark the context and address space as closed
7199dbd drm/i915: Release vma when the handle is closed
6c0a97e drm/i915: Track active vma requests
77ae8f4 drm/i915: i915_vma_move_to_active prep patch
c1876f1 drm/i915: Move request list retirement to i915_gem_request.c
9ebfe3f drm/i915: Double check activity before relocations
405589d drm/i915: s/__i915_wait_request/i915_wait_request/
7ec3b4d drm/i915: Disable waitboosting for a saturated engine
3cce71d drm/i915: Move the special case wait-request handling to its one caller
45d740d3 drm/i915: Convert intel_overlay to request tracking
9865233 drm/i915: Track requests inside each intel_ring
0c3ecda drm/i915: Refactor activity tracking for requests
a9b8ad0 drm/i915: Remove obsolete i915_gem_object_flush_active()
be0e9ed drm/i915: Rename request->list to link for consistency
a90256d drm/i915: Refactor blocking waits
842fd46 drm/i915: Mark up i915_gem_active for locking annotation
f989dd8 drm/i915: Prepare i915_gem_active for annotations
c662f3e drm/i915: Introduce i915_gem_active for request tracking
5dabc19 drm/i915: Kill drop_pages()
12919f3 drm/i915: Be more careful when unbinding vma
db6b7cd drm/i915: Count how many VMA are bound for an object
1a519bb drm/i915: Store owning file on the i915_address_space
8f90320 drm/i915: Split early global GTT initialisation
8038cb4 drm/i915: Amalgamate GGTT/ppGTT vma debug list walkers
660476d drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals
af5b9c6 drm/i915: Simplify calling engine->sync_to
405062b drm/i915: Unify legacy/execlists submit_execbuf callbacks
bc7ac32 drm/i915: Refactor golden render state emission to unconfuse gcc
baa0886 drm/i915: Remove duplicate golden render state init from execlists
02016cc drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores
20eb752 drm/i915: Reuse legacy breadcrumbs + tail emission
8aab65b drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal()
13ff399 drm/i915/lrc: Update function names to match request flow
0cc88f5 drm/i915: Unify request submission
321b485 drm/i915: Move the modulus for ring emission to the register write
ac55bb1 drm/i915: Convert engine->write_tail to operate on a request
5ae9ea0 drm/i915: Remove intel_ring_get_tail()
9a192ed drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START
2e5b9de drm/i915: Simplify request_alloc by returning the allocated request
6010028 drm/i915: Reduce engine->emit_flush() to a single mode parameter
c61846e drm/i915: Remove obsolete engine->gpu_caches_dirty
610eaf6 drm/i915: Rename intel_pin_and_map_ring()
e2e6655 drm/i915: Rename residual ringbuf parameters
9edf5d1 drm/i915: Rename struct intel_ringbuffer to struct intel_ring
33b5475 drm/i915: Rename intel_context[engine].ringbuf
6622992 drm/i915: Rename request->ringbuf to request->ring
a4f21b8 drm/i915: Unify intel_logical_ring_emit and intel_ring_emit

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 54/73] drm/i915: Fix up vma alignment to be u64
  2016-08-01  9:11 ` [PATCH 54/73] drm/i915: Fix up vma alignment to be u64 Chris Wilson
@ 2016-08-01 12:21   ` Joonas Lahtinen
  0 siblings, 0 replies; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 12:21 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2016-08-01 at 10:11 +0100, Chris Wilson wrote:
> This is not the full fix, as we are required to percolate the u64 nature
> down through the drm_mm stack, but this is required now to prevent
> explosions due to mismatch between execbuf (eb_vma_misplaced) and vma
> binding (i915_vma_misplaced) - and reduces the risk of spurious changes
> as we adjust the vma interface in the next patches.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_drv.h       | 14 ++++++--------
>  drivers/gpu/drm/i915/i915_gem.c       | 26 +++++++++++++-------------
>  drivers/gpu/drm/i915/i915_gem_evict.c |  5 +++--
>  3 files changed, 22 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index 2de3d16f7b80..74a31358fd87 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3032,13 +3032,13 @@ void i915_gem_free_object(struct drm_gem_object *obj);
>  int __must_check
>  i915_gem_object_pin(struct drm_i915_gem_object *obj,
>  		    struct i915_address_space *vm,
> -		    uint32_t alignment,
> -		    uint64_t flags);
> +		    u64 alignment,
> +		    u64 flags);
>  int __must_check
>  i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
>  			 const struct i915_ggtt_view *view,
> -			 uint32_t alignment,
> -			 uint64_t flags);
> +			 u64 alignment,
> +			 u64 flags);
>  
>  int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
>  		  u32 flags);
> @@ -3398,11 +3398,9 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
>  
>  /* i915_gem_evict.c */
>  int __must_check i915_gem_evict_something(struct i915_address_space *vm,
> -					  int min_size,
> -					  unsigned alignment,
> +					  u64 min_size, u64 alignment,
>  					  unsigned cache_level,
> -					  unsigned long start,
> -					  unsigned long end,
> +					  u64 start, u64 end,
>  					  unsigned flags);
>  int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
>  int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 3402fec5e33c..6039e46af69a 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2960,8 +2960,8 @@ static struct i915_vma *
>  i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
>  			   struct i915_address_space *vm,
>  			   const struct i915_ggtt_view *ggtt_view,
> -			   unsigned alignment,
> -			   uint64_t flags)
> +			   u64 alignment,
> +			   u64 flags)
>  {
>  	struct drm_device *dev = obj->base.dev;
>  	struct drm_i915_private *dev_priv = to_i915(dev);
> @@ -3020,9 +3020,9 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
>  		alignment = flags & PIN_MAPPABLE ? fence_alignment :
>  						unfenced_alignment;
>  	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
> -		DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
> +		DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
>  			  ggtt_view ? ggtt_view->type : 0,
> -			  alignment);
> +			  (long long)alignment);
>  		return ERR_PTR(-EINVAL);
>  	}
>  
> @@ -3675,7 +3675,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
>  }
>  
>  static bool
> -i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
> +i915_vma_misplaced(struct i915_vma *vma, u64 alignment, u64 flags)
>  {
>  	struct drm_i915_gem_object *obj = vma->obj;
>  
> @@ -3724,8 +3724,8 @@ static int
>  i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
>  		       struct i915_address_space *vm,
>  		       const struct i915_ggtt_view *ggtt_view,
> -		       uint32_t alignment,
> -		       uint64_t flags)
> +		       u64 alignment,
> +		       u64 flags)
>  {
>  	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>  	struct i915_vma *vma;
> @@ -3754,12 +3754,12 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
>  		if (i915_vma_misplaced(vma, alignment, flags)) {
>  			WARN(vma->pin_count,
>  			     "bo is already pinned in %s with incorrect alignment:"
> -			     " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
> +			     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
>  			     " obj->map_and_fenceable=%d\n",
>  			     ggtt_view ? "ggtt" : "ppgtt",
>  			     upper_32_bits(vma->node.start),
>  			     lower_32_bits(vma->node.start),
> -			     alignment,
> +			     (long long)alignment,

Casting should still not be needed according to printk-formats.txt

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

>  			     !!(flags & PIN_MAPPABLE),
>  			     obj->map_and_fenceable);
>  			ret = i915_vma_unbind(vma);
> @@ -3795,8 +3795,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
>  int
>  i915_gem_object_pin(struct drm_i915_gem_object *obj,
>  		    struct i915_address_space *vm,
> -		    uint32_t alignment,
> -		    uint64_t flags)
> +		    u64 alignment,
> +		    u64 flags)
>  {
>  	return i915_gem_object_do_pin(obj, vm,
>  				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
> @@ -3806,8 +3806,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
>  int
>  i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
>  			 const struct i915_ggtt_view *view,
> -			 uint32_t alignment,
> -			 uint64_t flags)
> +			 u64 alignment,
> +			 u64 flags)
>  {
>  	struct drm_device *dev = obj->base.dev;
>  	struct drm_i915_private *dev_priv = to_i915(dev);
> diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
> index 4bce72fa14c4..ef12ecd2b182 100644
> --- a/drivers/gpu/drm/i915/i915_gem_evict.c
> +++ b/drivers/gpu/drm/i915/i915_gem_evict.c
> @@ -84,8 +84,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
>   */
>  int
>  i915_gem_evict_something(struct i915_address_space *vm,
> -			 int min_size, unsigned alignment, unsigned cache_level,
> -			 unsigned long start, unsigned long end,
> +			 u64 min_size, u64 alignment,
> +			 unsigned cache_level,
> +			 u64 start, u64 end,
>  			 unsigned flags)
>  {
>  	struct drm_i915_private *dev_priv = to_i915(vm->dev);
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 59/73] drm/i915: Update the GGTT size/alignment query functions
  2016-08-01  9:11 ` [PATCH 59/73] drm/i915: Update the GGTT size/alignment query functions Chris Wilson
@ 2016-08-01 12:27   ` Joonas Lahtinen
  0 siblings, 0 replies; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 12:27 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2016-08-01 at 10:11 +0100, Chris Wilson wrote:
>  
> -uint32_t
> -i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
> +/**
> + * i915_gem_get_ggtt_size - return required global GTT size for an object
> + * @dev: drm device
> + * @size: object size
> + * @tiling_mode: tiling mode
> + *
> + * Return the required GTT size for an object, taking into account
         + global -------^

> + * potential fence register mapping.
> + */
> +u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
>  {
> -	uint32_t gtt_size;
> +	u64 ggtt_size;
>  

<snip>

> -		gtt_size <<= 1;
> +	while (ggtt_size < size)
> +		ggtt_size <<= 1;
>  
> -	return gtt_size;
> +	return ggtt_size;
>  }
>  
>  /**
> - * i915_gem_get_gtt_alignment - return required GTT alignment for an object
> + * i915_gem_get_ggtt_alignment - return required GTT alignment for an object
                                     + global -----^

>   * @dev: drm device
>   * @size: object size
>   * @tiling_mode: tiling mode
> @@ -1875,15 +1885,16 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
>   * Return the required GTT alignment for an object, taking into account
       + global ---------^

With those,

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas

-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 60/73] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private
  2016-08-01  9:11 ` [PATCH 60/73] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private Chris Wilson
@ 2016-08-01 12:30   ` Joonas Lahtinen
  0 siblings, 0 replies; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 12:30 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2016-08-01 at 10:11 +0100, Chris Wilson wrote:
> For consistency, internal functions should take drm_i915_private rather
> than drm_device. Now that we are subclassing drm_device, there are no
> more size wins, but being consistent is its own blessing.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/i915_drv.h        |  5 +++--
>  drivers/gpu/drm/i915/i915_gem.c        | 30 ++++++++++++++++--------------
>  drivers/gpu/drm/i915/i915_gem_tiling.c |  8 ++++----
>  3 files changed, 23 insertions(+), 20 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index b6e56ecb8637..3d73394b52d7 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -3241,8 +3241,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
>  int i915_gem_open(struct drm_device *dev, struct drm_file *file);
>  void i915_gem_release(struct drm_device *dev, struct drm_file *file);
>  
> -u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode);
> -u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
> +u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
> +			   int tiling_mode);
> +u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
>  				int tiling_mode, bool fenced);
>  
>  int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index 432868eafa60..f11bf6c4f86a 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -1846,25 +1846,26 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
>  
>  /**
>   * i915_gem_get_ggtt_size - return required global GTT size for an object
> - * @dev: drm device
> + * @dev_priv: i915 device
>   * @size: object size
>   * @tiling_mode: tiling mode
>   *
>   * Return the required GTT size for an object, taking into account
>   * potential fence register mapping.
>   */
> -u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
> +u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
> +			   u64 size, int tiling_mode)
>  {
>  	u64 ggtt_size;
>  
>  	GEM_BUG_ON(size == 0);
>  
> -	if (INTEL_GEN(dev) >= 4 ||
> +	if (INTEL_GEN(dev_priv) >= 4 ||
>  	    tiling_mode == I915_TILING_NONE)
>  		return size;
>  
>  	/* Previous chips need a power-of-two fence region when tiling */
> -	if (IS_GEN3(dev))
> +	if (IS_GEN3(dev_priv))
>  		ggtt_size = 1024*1024;
>  	else
>  		ggtt_size = 512*1024;
> @@ -1877,7 +1878,7 @@ u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
>  
>  /**
>   * i915_gem_get_ggtt_alignment - return required GTT alignment for an object
> - * @dev: drm device
> + * @dev_priv: i915 device
>   * @size: object size
>   * @tiling_mode: tiling mode
>   * @fenced: is fenced alignemned required or not
> @@ -1885,7 +1886,7 @@ u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
>   * Return the required GTT alignment for an object, taking into account
>   * potential fence register mapping.
>   */
> -u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
> +u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
>  				int tiling_mode, bool fenced)
>  {
>  	GEM_BUG_ON(size == 0);
> @@ -1894,7 +1895,7 @@ u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
>  	 * Minimum alignment is 4k (GTT page size), but might be greater
>  	 * if a fence register is needed for the object.
>  	 */
> -	if (INTEL_GEN(dev) >= 4 || (!fenced && IS_G33(dev)) ||
> +	if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
>  	    tiling_mode == I915_TILING_NONE)
>  		return 4096;
>  
> @@ -1902,7 +1903,7 @@ u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
>  	 * Previous chips need to be aligned to the size of the smallest
>  	 * fence register that can contain the object.
>  	 */
> -	return i915_gem_get_ggtt_size(dev, size, tiling_mode);
> +	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
>  }
>  
>  static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
> @@ -2991,14 +2992,14 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
>  
>  		view_size = i915_ggtt_view_size(obj, ggtt_view);
>  
> -		fence_size = i915_gem_get_ggtt_size(dev,
> +		fence_size = i915_gem_get_ggtt_size(dev_priv,
>  						    view_size,
>  						    obj->tiling_mode);
> -		fence_alignment = i915_gem_get_ggtt_alignment(dev,
> +		fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
>  							      view_size,
>  							      obj->tiling_mode,
>  							      true);
> -		unfenced_alignment = i915_gem_get_ggtt_alignment(dev,
> +		unfenced_alignment = i915_gem_get_ggtt_alignment(dev_priv,
>  								 view_size,
>  								 obj->tiling_mode,
>  								 false);
> @@ -3702,13 +3703,14 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
>  void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
>  {
>  	struct drm_i915_gem_object *obj = vma->obj;
> +	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>  	bool mappable, fenceable;
>  	u32 fence_size, fence_alignment;
>  
> -	fence_size = i915_gem_get_ggtt_size(obj->base.dev,
> +	fence_size = i915_gem_get_ggtt_size(dev_priv,
>  					    obj->base.size,
>  					    obj->tiling_mode);
> -	fence_alignment = i915_gem_get_ggtt_alignment(obj->base.dev,
> +	fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
>  						      obj->base.size,
>  						      obj->tiling_mode,
>  						      true);
> @@ -3717,7 +3719,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
>  		     (vma->node.start & (fence_alignment - 1)) == 0);
>  
>  	mappable = (vma->node.start + fence_size <=
> -		    to_i915(obj->base.dev)->ggtt.mappable_end);
> +		    dev_priv->ggtt.mappable_end);
>  
>  	obj->map_and_fenceable = mappable && fenceable;
>  }
> diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
> index 4e42da691e4e..b7f9875f69b4 100644
> --- a/drivers/gpu/drm/i915/i915_gem_tiling.c
> +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
> @@ -117,15 +117,16 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
>  static bool
>  i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
>  {
> +	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>  	u32 size;
>  
>  	if (tiling_mode == I915_TILING_NONE)
>  		return true;
>  
> -	if (INTEL_INFO(obj->base.dev)->gen >= 4)
> +	if (INTEL_GEN(dev_priv) >= 4)
>  		return true;
>  
> -	if (IS_GEN3(obj->base.dev)) {
> +	if (IS_GEN3(dev_priv)) {
>  		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
>  			return false;
>  	} else {
> @@ -133,8 +134,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
>  			return false;
>  	}
>  
> -	size = i915_gem_get_ggtt_size(obj->base.dev,
> -				      obj->base.size, tiling_mode);
> +	size = i915_gem_get_ggtt_size(dev_priv, obj->base.size, tiling_mode);
>  	if (i915_gem_obj_ggtt_size(obj) != size)
>  		return false;
>  
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 61/73] drm/i915: Record allocated vma size
  2016-08-01  9:11 ` [PATCH 61/73] drm/i915: Record allocated vma size Chris Wilson
@ 2016-08-01 12:36   ` Joonas Lahtinen
  2016-08-01 12:44     ` Chris Wilson
  0 siblings, 1 reply; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 12:36 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2016-08-01 at 10:11 +0100, Chris Wilson wrote:
> Tracking the size of the VMA as allocated allows us to dramatically
> reduce the complexity of later functions (like inserting the VMA in to
> the drm_mm range manager).
> 

I assume this new revision only removed the extra hunk of code, right?

If so,

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/i915_gem.c     | 103 ++++++++++++++----------------------
>  drivers/gpu/drm/i915/i915_gem_gtt.c |  67 ++++++++---------------
>  drivers/gpu/drm/i915/i915_gem_gtt.h |   5 +-
>  3 files changed, 63 insertions(+), 112 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
> index f11bf6c4f86a..d89fbee5c48a 100644
> --- a/drivers/gpu/drm/i915/i915_gem.c
> +++ b/drivers/gpu/drm/i915/i915_gem.c
> @@ -2976,53 +2976,36 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
>  			       u64 alignment,
>  			       u64 flags)
>  {
> -	struct drm_device *dev = obj->base.dev;
> -	struct drm_i915_private *dev_priv = to_i915(dev);
> -	u64 start, end;
> -	u32 search_flag, alloc_flag;
> +	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
>  	struct i915_vma *vma;
> +	u64 start, end;
> +	u64 min_alignment;
>  	int ret;
>  
> -	if (i915_is_ggtt(vm)) {
> -		u32 fence_size, fence_alignment, unfenced_alignment;
> -		u64 view_size;
> -
> -		if (WARN_ON(!ggtt_view))
> -			return ERR_PTR(-EINVAL);
> -
> -		view_size = i915_ggtt_view_size(obj, ggtt_view);
> -
> -		fence_size = i915_gem_get_ggtt_size(dev_priv,
> -						    view_size,
> -						    obj->tiling_mode);
> -		fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
> -							      view_size,
> -							      obj->tiling_mode,
> -							      true);
> -		unfenced_alignment = i915_gem_get_ggtt_alignment(dev_priv,
> -								 view_size,
> -								 obj->tiling_mode,
> -								 false);
> -		size = max(size, view_size);
> -		if (flags & PIN_MAPPABLE)
> -			size = max_t(u64, size, fence_size);
> -
> -		if (alignment == 0)
> -			alignment = flags & PIN_MAPPABLE ? fence_alignment :
> -				unfenced_alignment;
> -		if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
> -			DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
> -				  ggtt_view ? ggtt_view->type : 0,
> -				  (long long)alignment);
> -			return ERR_PTR(-EINVAL);
> -		}
> -	} else {
> -		size = max_t(u64, size, obj->base.size);
> -		alignment = 4096;
> +	vma = ggtt_view ?
> +		i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
> +		i915_gem_obj_lookup_or_create_vma(obj, vm);
> +	if (IS_ERR(vma))
> +		return vma;
> +
> +	size = max(size, vma->size);
> +	if (flags & PIN_MAPPABLE)
> +		size = i915_gem_get_ggtt_size(dev_priv, size, obj->tiling_mode);
> +
> +	min_alignment =
> +		i915_gem_get_ggtt_alignment(dev_priv, size, obj->tiling_mode,
> +					    flags & PIN_MAPPABLE);
> +	if (alignment == 0)
> +		alignment = min_alignment;
> +	if (alignment & (min_alignment - 1)) {
> +		DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
> +			  alignment, min_alignment);
> +		return ERR_PTR(-EINVAL);
>  	}
>  
>  	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
> -	end = vm->total;
> +
> +	end = vma->vm->total;
>  	if (flags & PIN_MAPPABLE)
>  		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
>  	if (flags & PIN_ZONE_4G)
> @@ -3033,8 +3016,7 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
>  	 * attempt to find space.
>  	 */
>  	if (size > end) {
> -		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
> -			  ggtt_view ? ggtt_view->type : 0,
> +		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
>  			  size, obj->base.size,
>  			  flags & PIN_MAPPABLE ? "mappable" : "total",
>  			  end);
> @@ -3047,31 +3029,27 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
>  
>  	i915_gem_object_pin_pages(obj);
>  
> -	vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
> -			  i915_gem_obj_lookup_or_create_vma(obj, vm);
> -
> -	if (IS_ERR(vma))
> -		goto err_unpin;
> -
>  	if (flags & PIN_OFFSET_FIXED) {
>  		uint64_t offset = flags & PIN_OFFSET_MASK;
> -
> -		if (offset & (alignment - 1) || offset + size > end) {
> +		if (offset & (alignment - 1) || offset > end - size) {
>  			ret = -EINVAL;
> -			goto err_vma;
> +			goto err_unpin;
>  		}
> +
>  		vma->node.start = offset;
>  		vma->node.size = size;
>  		vma->node.color = obj->cache_level;
> -		ret = drm_mm_reserve_node(&vm->mm, &vma->node);
> +		ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
>  		if (ret) {
>  			ret = i915_gem_evict_for_vma(vma);
>  			if (ret == 0)
> -				ret = drm_mm_reserve_node(&vm->mm, &vma->node);
> +				ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
> +			if (ret)
> +				goto err_unpin;
>  		}
> -		if (ret)
> -			goto err_vma;
>  	} else {
> +		u32 search_flag, alloc_flag;
> +
>  		if (flags & PIN_HIGH) {
>  			search_flag = DRM_MM_SEARCH_BELOW;
>  			alloc_flag = DRM_MM_CREATE_TOP;
> @@ -3090,36 +3068,35 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
>  			alignment = 0;
>  
>  search_free:
> -		ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
> +		ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
> +							  &vma->node,
>  							  size, alignment,
>  							  obj->cache_level,
>  							  start, end,
>  							  search_flag,
>  							  alloc_flag);
>  		if (ret) {
> -			ret = i915_gem_evict_something(vm, size, alignment,
> +			ret = i915_gem_evict_something(vma->vm, size, alignment,
>  						       obj->cache_level,
>  						       start, end,
>  						       flags);
>  			if (ret == 0)
>  				goto search_free;
>  
> -			goto err_vma;
> +			goto err_unpin;
>  		}
>  	}
>  	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
>  
>  	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
> -	list_move_tail(&vma->vm_link, &vm->inactive_list);
> +	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
>  	obj->bind_count++;
>  
>  	return vma;
>  
> -err_vma:
> -	vma = ERR_PTR(ret);
>  err_unpin:
>  	i915_gem_object_unpin_pages(obj);
> -	return vma;
> +	return ERR_PTR(ret);
>  }
>  
>  bool
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
> index 31d13aea5461..ae6426e83ee0 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.c
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
> @@ -184,7 +184,7 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
>  {
>  	vma->vm->clear_range(vma->vm,
>  			     vma->node.start,
> -			     vma->obj->base.size,
> +			     vma->size,
>  			     true);
>  }
>  
> @@ -2697,28 +2697,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
>  
>  static void ggtt_unbind_vma(struct i915_vma *vma)
>  {
> -	struct drm_device *dev = vma->vm->dev;
> -	struct drm_i915_private *dev_priv = to_i915(dev);
> -	struct drm_i915_gem_object *obj = vma->obj;
> -	const uint64_t size = min_t(uint64_t,
> -				    obj->base.size,
> -				    vma->node.size);
> +	struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
> +	const u64 size = min(vma->size, vma->node.size);
>  
> -	if (vma->bound & GLOBAL_BIND) {
> +	if (vma->bound & GLOBAL_BIND)
>  		vma->vm->clear_range(vma->vm,
> -				     vma->node.start,
> -				     size,
> +				     vma->node.start, size,
>  				     true);
> -	}
> -
> -	if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
> -		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
>  
> +	if (vma->bound & LOCAL_BIND && appgtt)
>  		appgtt->base.clear_range(&appgtt->base,
> -					 vma->node.start,
> -					 size,
> +					 vma->node.start, size,
>  					 true);
> -	}
>  }
>  
>  void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
> @@ -3367,14 +3357,14 @@ void i915_vma_close(struct i915_vma *vma)
>  static struct i915_vma *
>  __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>  		      struct i915_address_space *vm,
> -		      const struct i915_ggtt_view *ggtt_view)
> +		      const struct i915_ggtt_view *view)
>  {
>  	struct i915_vma *vma;
>  	int i;
>  
>  	GEM_BUG_ON(vm->closed);
>  
> -	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
> +	if (WARN_ON(i915_is_ggtt(vm) != !!view))
>  		return ERR_PTR(-EINVAL);
>  
>  	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
> @@ -3388,12 +3378,22 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
>  	list_add(&vma->vm_link, &vm->unbound_list);
>  	vma->vm = vm;
>  	vma->obj = obj;
> +	vma->size = obj->base.size;
>  	vma->is_ggtt = i915_is_ggtt(vm);
>  
> -	if (i915_is_ggtt(vm))
> -		vma->ggtt_view = *ggtt_view;
> -	else
> +	if (i915_is_ggtt(vm)) {
> +		vma->ggtt_view = *view;
> +		if (view->type == I915_GGTT_VIEW_PARTIAL) {
> +			vma->size = view->params.partial.size;
> +			vma->size <<= PAGE_SHIFT;
> +		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
> +			vma->size =
> +				intel_rotation_info_size(&view->params.rotated);
> +			vma->size <<= PAGE_SHIFT;
> +		}
> +	} else {
>  		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
> +	}
>  
>  	list_add_tail(&vma->obj_link, &obj->vma_list);
>  
> @@ -3678,29 +3678,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
>  	return 0;
>  }
>  
> -/**
> - * i915_ggtt_view_size - Get the size of a GGTT view.
> - * @obj: Object the view is of.
> - * @view: The view in question.
> - *
> - * @return The size of the GGTT view in bytes.
> - */
> -size_t
> -i915_ggtt_view_size(struct drm_i915_gem_object *obj,
> -		    const struct i915_ggtt_view *view)
> -{
> -	if (view->type == I915_GGTT_VIEW_NORMAL) {
> -		return obj->base.size;
> -	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
> -		return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
> -	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
> -		return view->params.partial.size << PAGE_SHIFT;
> -	} else {
> -		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
> -		return obj->base.size;
> -	}
> -}
> -
>  void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
>  {
>  	void __iomem *ptr;
> diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
> index 2ebf07defa4e..2de6f613ac1e 100644
> --- a/drivers/gpu/drm/i915/i915_gem_gtt.h
> +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
> @@ -180,6 +180,7 @@ struct i915_vma {
>  	struct drm_i915_gem_object *obj;
>  	struct i915_address_space *vm;
>  	void __iomem *iomap;
> +	u64 size;
>  
>  	unsigned int active;
>  	struct i915_gem_active last_read[I915_NUM_ENGINES];
> @@ -610,10 +611,6 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
>  	return true;
>  }
>  
> -size_t
> -i915_ggtt_view_size(struct drm_i915_gem_object *obj,
> -		    const struct i915_ggtt_view *view);
> -
>  /**
>   * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
>   * @vma: VMA to iomap
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 61/73] drm/i915: Record allocated vma size
  2016-08-01 12:36   ` Joonas Lahtinen
@ 2016-08-01 12:44     ` Chris Wilson
  0 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01 12:44 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Mon, Aug 01, 2016 at 03:36:27PM +0300, Joonas Lahtinen wrote:
> On ma, 2016-08-01 at 10:11 +0100, Chris Wilson wrote:
> > Tracking the size of the VMA as allocated allows us to dramatically
> > reduce the complexity of later functions (like inserting the VMA in to
> > the drm_mm range manager).
> > 
> 
> I assume this new revision only removed the extra hunk of code, right?

Yeah, it was just rebased after the hunk was moved out. I've been lax in
not mentioning every single time the patches have been touched after a
rebase.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 70/73] drm/i915: Move obj->active:5 to obj->flags
  2016-08-01  9:11 ` [PATCH 70/73] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
@ 2016-08-01 12:46   ` Joonas Lahtinen
  0 siblings, 0 replies; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 12:46 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2016-08-01 at 10:11 +0100, Chris Wilson wrote:
> We are motivated to avoid using a bitfield for obj->active for a couple
> of reasons. Firstly, we wish to document our lockless read of obj->active
> using READ_ONCE inside i915_gem_busy_ioctl() and that requires an
> integral type (i.e. not a bitfield). Secondly, gcc produces abysmal code
> when presented with a bitfield and that shows up high on the profiles of
> request tracking (mainly due to excess memory traffic as it converts
> the bitfield to a register and back and generates frequent AGI in the
> process).
> 
> v2: BIT, break up a long line in compute the other engines, new paint
> for i915_gem_object_is_active (now i915_gem_object_get_active).
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Patch reads much better now,

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 36/73] drm/i915: Refactor activity tracking for requests
  2016-08-01  9:10 ` [PATCH 36/73] drm/i915: Refactor activity tracking for requests Chris Wilson
@ 2016-08-01 12:52   ` Joonas Lahtinen
  0 siblings, 0 replies; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 12:52 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> With the introduction of requests, we amplified the number of atomic
> refcounted objects we use and update every execbuffer; from none to
> several references, and a set of references that need to be changed. We
> also introduced interesting side-effects in the order of retiring
> requests and objects.
> 
> Instead of independently tracking the last request for an object, track
> the active objects for each request. The object will reside in the
> buffer list of its most recent active request and so we reduce the kref
> interchange to a list_move. Now retirements are entirely driven by the
> request, dramatically simplifying activity tracking on the object
> themselves, and removing the ambiguity between retiring objects and
> retiring requests.
> 
> Furthermore with the consolidation of managing the activity tracking
> centrally, we can look forward to using RCU to enable lockless lookup of
> the current active requests for an object. In the future, we will be
> able to query the status or wait upon rendering to an object without
> even touching the struct_mutex BKL.
> 
> All told, less code, simpler and faster, and more extensible.
> 
> v2: Add a typedef for the function pointer for convenience later.
> v3: Make the noop retirement callback explicit. Allow passing NULL to
> the init_gem_active() which is expanded to a common noop function.

s/init_gem_active/init_request_active/

Assuming you kept the changes to those,

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

Regards, Joonas
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-01 10:15     ` Chris Wilson
@ 2016-08-01 14:28       ` Joonas Lahtinen
  2016-08-01 16:17         ` Chris Wilson
  2016-08-01 16:32         ` Chris Wilson
  0 siblings, 2 replies; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 14:28 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

On ma, 2016-08-01 at 11:15 +0100, Chris Wilson wrote:
> On Mon, Aug 01, 2016 at 01:07:55PM +0300, Joonas Lahtinen wrote:
> > 
> > On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> > > 
> > > Space reservation is already safe with respect to the ring->size
> > > modulus, but hardware only expects to see values in the range
> > > 0...ring->size-1 (inclusive) and so requires the modulus to prevent us
> > > writing the value ring->size instead of 0. As this is only required for
> > > the register itself, we can defer the modulus to the register update and
> > > not perform it after every command packet. We keep the
> > > intel_ring_advance() around in the code to provide demarcation for the
> > > end-of-packet (which then can be compared against intel_ring_begin() as
> > > the number of dwords emitted must match the reserved space).
> > > 
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > > Cc: Dave Gordon <david.s.gordon@intel.com>
> > > ---
> > >  drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
> > >  drivers/gpu/drm/i915/intel_ringbuffer.c |  6 ++++--
> > >  drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++++++++----
> > >  3 files changed, 18 insertions(+), 7 deletions(-)
> > > 
> > > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > > index bf42a66d6624..824f7efe4e64 100644
> > > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > > @@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
> > >  	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
> > >  	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
> > >  
> > > -	reg_state[CTX_RING_TAIL+1] = rq->tail;
> > > +	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
> > >  
> > >  	/* True 32b PPGTT with dynamic page allocation: update PDP
> > >  	 * registers and point the unallocated PDPs to scratch page.
> > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > index 3142085b5cc0..21d5e8209400 100644
> > > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > @@ -1718,7 +1718,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
> > >  {
> > >  	struct drm_i915_private *dev_priv = request->i915;
> > >  
> > > -	I915_WRITE_TAIL(request->engine, request->tail);
> > > +	I915_WRITE_TAIL(request->engine,
> > > +			intel_ring_offset(request->ring, request->tail));
> > >  }
> > >  
> > >  static void
> > > @@ -2505,7 +2506,8 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
> > >  		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
> > >  
> > >  	/* Now that the ring is fully powered up, update the tail */
> > > -	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
> > > +	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
> > > +		      intel_ring_offset(request->ring, request->tail));
> > >  	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
> > >  
> > >  	/* Let the ring send IDLE messages to the GT again,
> > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > > index 14d2ea36fb88..9ac96ddb01ee 100644
> > > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> > > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > > @@ -460,14 +460,23 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
> > >  
> > >  static inline void intel_ring_advance(struct intel_ring *ring)
> > >  {
> > > +	/* Dummy function.
> > > +	 *
> > > +	 * This serves as a placeholder in the code so that the reader
> > > +	 * can compare against the preceding intel_ring_begin() and
> > > +	 * check that the number of dwords emitted matches the space
> > > +	 * reserved for the command packet (i.e. the value passed to
> > > +	 * intel_ring_begin()).
> > > +	 */
> > > +}
> > > +
> > > +static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
> > > +{
> > >  	/* The modulus is required so that we avoid writing
> > >  	 * request->tail == ring->size, rather than the expected 0,
> > >  	 * into the RING_TAIL register as that can cause a GPU hang.
> > > -	 * As this is only strictly required for the request->tail,
> > > -	 * and only then as we write the value into hardware, we can
> > > -	 * one day remove the modulus after every command packet.
> > >  	 */
> > > -	ring->tail &= ring->size - 1;
> > > +	return value & (ring->size - 1);
> > >  }
> > The comment seems outdated-ish as it speaks of modulus which is nowhere
> > to be seen. I'd speak of 'masking'. With that,
> The operation is a modulus. The common optimisation for moduli with
> a power-of-two divisor being applied.

Yes, I'm perfectly aware of that, but as discussed in IRC, that
information would be good to be local. Modulus in this case a trick to
achieve value == ring->size ? 0 : value; Which would again be more
informative, we do not expect wrap, but we have one special value.

Regards, Joonas

> -Chris
> 
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 14/73] drm/i915: Unify request submission
  2016-08-01  9:10 ` [PATCH 14/73] drm/i915: Unify request submission Chris Wilson
@ 2016-08-01 14:30   ` Joonas Lahtinen
  2016-08-01 17:17   ` [PATCH v2] " Chris Wilson
  1 sibling, 0 replies; 94+ messages in thread
From: Joonas Lahtinen @ 2016-08-01 14:30 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx

On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> Move request submission from emit_request into its own common vfunc
> from i915_add_request().
> 
> v2: Convert I915_DISPATCH_flags to BIT(x) whilst passing
> v3: Rename a few functions to match.
> v4: Reenable execlists submission after disabling guc.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-23-git-send-email-chris@chris-wilson.co.uk
> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v3

I'll keep mine, Dave might want to give a look too (CC'd).

Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>

> ---
>  drivers/gpu/drm/i915/i915_gem_request.c    |  8 +++-----
>  drivers/gpu/drm/i915/i915_guc_submission.c | 12 +++++++++---
>  drivers/gpu/drm/i915/intel_guc.h           |  1 -
>  drivers/gpu/drm/i915/intel_lrc.c           | 26 +++++++++++++++-----------
>  drivers/gpu/drm/i915/intel_lrc.h           |  2 ++
>  drivers/gpu/drm/i915/intel_ringbuffer.c    | 23 +++++++++--------------
>  drivers/gpu/drm/i915/intel_ringbuffer.h    | 27 +++++++++++++--------------
>  7 files changed, 51 insertions(+), 48 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index a885905df3bb..e378eb61979b 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -466,12 +466,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  	 */
>  	request->postfix = ring->tail;
>  
> -	if (i915.enable_execlists)
> -		ret = engine->emit_request(request);
> -	else
> -		ret = engine->add_request(request);
>  	/* Not allowed to fail! */
> -	WARN(ret, "emit|add_request failed: %d!\n", ret);
> +	ret = engine->emit_request(request);
> +	WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
>  
>  	/* Sanity check that the reserved size was large enough. */
>  	ret = ring->tail - request_start;
> @@ -483,6 +480,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
>  		  reserved_tail, ret);
>  
>  	i915_gem_mark_busy(engine);
> +	engine->submit_request(request);
>  }
>  
>  static unsigned long local_clock_us(unsigned int *cpu)
> diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
> index eccd34832fe6..23bbd0ff86c7 100644
> --- a/drivers/gpu/drm/i915/i915_guc_submission.c
> +++ b/drivers/gpu/drm/i915/i915_guc_submission.c
> @@ -585,7 +585,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
>   * The only error here arises if the doorbell hardware isn't functioning
>   * as expected, which really shouln't happen.
>   */
> -int i915_guc_submit(struct drm_i915_gem_request *rq)
> +static void i915_guc_submit(struct drm_i915_gem_request *rq)
>  {
>  	unsigned int engine_id = rq->engine->id;
>  	struct intel_guc *guc = &rq->i915->guc;
> @@ -602,8 +602,6 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
>  
>  	guc->submissions[engine_id] += 1;
>  	guc->last_seqno[engine_id] = rq->fence.seqno;
> -
> -	return b_ret;
>  }
>  
>  /*
> @@ -992,6 +990,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
>  {
>  	struct intel_guc *guc = &dev_priv->guc;
>  	struct i915_guc_client *client;
> +	struct intel_engine_cs *engine;
>  
>  	/* client for execbuf submission */
>  	client = guc_client_alloc(dev_priv,
> @@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
>  	host2guc_sample_forcewake(guc, client);
>  	guc_init_doorbell_hw(guc);
>  
> +	/* Take over from manual control of ELSP (execlists) */
> +	for_each_engine(engine, dev_priv)
> +		engine->submit_request = i915_guc_submit;
> +
>  	return 0;
>  }
>  
> @@ -1015,6 +1018,9 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
>  
>  	guc_client_free(dev_priv, guc->execbuf_client);
>  	guc->execbuf_client = NULL;
> +
> +	/* Revert back to manual ELSP submission */
> +	intel_execlists_enable_submission(dev_priv);
>  }
>  
>  void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
> diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
> index 3e3e743740c0..623cf26cd784 100644
> --- a/drivers/gpu/drm/i915/intel_guc.h
> +++ b/drivers/gpu/drm/i915/intel_guc.h
> @@ -160,7 +160,6 @@ extern int intel_guc_resume(struct drm_device *dev);
>  int i915_guc_submission_init(struct drm_i915_private *dev_priv);
>  int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
>  int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
> -int i915_guc_submit(struct drm_i915_gem_request *rq);
>  void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
>  void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
>  
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 824f7efe4e64..8a72ee86e825 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -738,7 +738,7 @@ err_unpin:
>  }
>  
>  /*
> - * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
> + * intel_logical_ring_advance() - advance the tail and prepare for submission
>   * @request: Request to advance the logical ringbuffer of.
>   *
>   * The tail is updated in our logical ringbuffer struct, not in the actual context. What
> @@ -747,7 +747,7 @@ err_unpin:
>   * point, the tail *inside* the context is updated and the ELSP written to.
>   */
>  static int
> -intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
> +intel_logical_ring_advance(struct drm_i915_gem_request *request)
>  {
>  	struct intel_ring *ring = request->ring;
>  	struct intel_engine_cs *engine = request->engine;
> @@ -773,12 +773,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
>  	 */
>  	request->previous_context = engine->last_context;
>  	engine->last_context = request->ctx;
> -
> -	if (i915.enable_guc_submission)
> -		i915_guc_submit(request);
> -	else
> -		execlists_context_queue(request);
> -
>  	return 0;
>  }
>  
> @@ -1770,7 +1764,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
>  	intel_ring_emit(ring, request->fence.seqno);
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
>  	intel_ring_emit(ring, MI_NOOP);
> -	return intel_logical_ring_advance_and_submit(request);
> +	return intel_logical_ring_advance(request);
>  }
>  
>  static int gen8_emit_request_render(struct drm_i915_gem_request *request)
> @@ -1801,7 +1795,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
>  	intel_ring_emit(ring, 0);
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
>  	intel_ring_emit(ring, MI_NOOP);
> -	return intel_logical_ring_advance_and_submit(request);
> +	return intel_logical_ring_advance(request);
>  }
>  
>  static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
> @@ -1902,13 +1896,23 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
>  	engine->i915 = NULL;
>  }
>  
> +void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
> +{
> +	struct intel_engine_cs *engine;
> +
> +	for_each_engine(engine, dev_priv)
> +		engine->submit_request = execlists_context_queue;
> +}
> +
>  static void
>  logical_ring_default_vfuncs(struct intel_engine_cs *engine)
>  {
>  	/* Default vfuncs which can be overriden by each engine. */
>  	engine->init_hw = gen8_init_common_ring;
> -	engine->emit_request = gen8_emit_request;
>  	engine->emit_flush = gen8_emit_flush;
> +	engine->emit_request = gen8_emit_request;
> +	engine->submit_request = execlists_context_queue;
> +
>  	engine->irq_enable = gen8_logical_ring_enable_irq;
>  	engine->irq_disable = gen8_logical_ring_disable_irq;
>  	engine->emit_bb_start = gen8_emit_bb_start;
> diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
> index 33e0193e5451..bdd764af6d06 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.h
> +++ b/drivers/gpu/drm/i915/intel_lrc.h
> @@ -95,6 +95,8 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
>  /* Execlists */
>  int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
>  				    int enable_execlists);
> +void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
> +
>  struct i915_execbuffer_params;
>  int intel_execlists_submission(struct i915_execbuffer_params *params,
>  			       struct drm_i915_gem_execbuffer2 *args,
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 21d5e8209400..abea42408b5a 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1428,15 +1428,14 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
>  }
>  
>  /**
> - * gen6_add_request - Update the semaphore mailbox registers
> + * gen6_emit_request - Update the semaphore mailbox registers
>   *
>   * @request - request to write to the ring
>   *
>   * Update the mailbox registers in the *other* rings with the current seqno.
>   * This acts like a signal in the canonical semaphore.
>   */
> -static int
> -gen6_add_request(struct drm_i915_gem_request *req)
> +static int gen6_emit_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_engine_cs *engine = req->engine;
>  	struct intel_ring *ring = req->ring;
> @@ -1457,13 +1456,11 @@ gen6_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_advance(ring);
>  
>  	req->tail = ring->tail;
> -	engine->submit_request(req);
>  
>  	return 0;
>  }
>  
> -static int
> -gen8_render_add_request(struct drm_i915_gem_request *req)
> +static int gen8_render_emit_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_engine_cs *engine = req->engine;
>  	struct intel_ring *ring = req->ring;
> @@ -1487,9 +1484,9 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_emit(ring, 0);
>  	intel_ring_emit(ring, MI_USER_INTERRUPT);
>  	intel_ring_emit(ring, MI_NOOP);
> +	intel_ring_advance(ring);
>  
>  	req->tail = ring->tail;
> -	engine->submit_request(req);
>  
>  	return 0;
>  }
> @@ -1692,8 +1689,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
>  	return 0;
>  }
>  
> -static int
> -i9xx_add_request(struct drm_i915_gem_request *req)
> +static int i9xx_emit_request(struct drm_i915_gem_request *req)
>  {
>  	struct intel_ring *ring = req->ring;
>  	int ret;
> @@ -1709,7 +1705,6 @@ i9xx_add_request(struct drm_i915_gem_request *req)
>  	intel_ring_advance(ring);
>  
>  	req->tail = ring->tail;
> -	req->engine->submit_request(req);
>  
>  	return 0;
>  }
> @@ -2812,11 +2807,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
>  				      struct intel_engine_cs *engine)
>  {
>  	engine->init_hw = init_ring_common;
> -	engine->submit_request = i9xx_submit_request;
>  
> -	engine->add_request = i9xx_add_request;
> +	engine->emit_request = i9xx_emit_request;
>  	if (INTEL_GEN(dev_priv) >= 6)
> -		engine->add_request = gen6_add_request;
> +		engine->emit_request = gen6_emit_request;
> +	engine->submit_request = i9xx_submit_request;
>  
>  	if (INTEL_GEN(dev_priv) >= 8)
>  		engine->emit_bb_start = gen8_emit_bb_start;
> @@ -2845,7 +2840,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
>  
>  	if (INTEL_GEN(dev_priv) >= 8) {
>  		engine->init_context = intel_rcs_ctx_init;
> -		engine->add_request = gen8_render_add_request;
> +		engine->emit_request = gen8_render_emit_request;
>  		engine->emit_flush = gen8_render_ring_flush;
>  		if (i915.semaphores)
>  			engine->semaphore.signal = gen8_rcs_signal;
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 9ac96ddb01ee..5e06d6fbd506 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -204,7 +204,19 @@ struct intel_engine_cs {
>  
>  	int		(*init_context)(struct drm_i915_gem_request *req);
>  
> -	int		(*add_request)(struct drm_i915_gem_request *req);
> +	int		(*emit_flush)(struct drm_i915_gem_request *request,
> +				      u32 mode);
> +#define EMIT_INVALIDATE	BIT(0)
> +#define EMIT_FLUSH	BIT(1)
> +#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
> +	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
> +					 u64 offset, u32 length,
> +					 unsigned int dispatch_flags);
> +#define I915_DISPATCH_SECURE BIT(0)
> +#define I915_DISPATCH_PINNED BIT(1)
> +#define I915_DISPATCH_RS     BIT(2)
> +	int		(*emit_request)(struct drm_i915_gem_request *req);
> +	void		(*submit_request)(struct drm_i915_gem_request *req);
>  	/* Some chipsets are not quite as coherent as advertised and need
>  	 * an expensive kick to force a true read of the up-to-date seqno.
>  	 * However, the up-to-date seqno is not always required and the last
> @@ -282,19 +294,6 @@ struct intel_engine_cs {
>  	unsigned int idle_lite_restore_wa;
>  	bool disable_lite_restore_wa;
>  	u32 ctx_desc_template;
> -	int		(*emit_request)(struct drm_i915_gem_request *request);
> -	int		(*emit_flush)(struct drm_i915_gem_request *request,
> -				      u32 mode);
> -#define EMIT_INVALIDATE	BIT(0)
> -#define EMIT_FLUSH	BIT(1)
> -#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
> -	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
> -					 u64 offset, u32 length,
> -					 unsigned int dispatch_flags);
> -#define I915_DISPATCH_SECURE 0x1
> -#define I915_DISPATCH_PINNED 0x2
> -#define I915_DISPATCH_RS     0x4
> -	void		(*submit_request)(struct drm_i915_gem_request *req);
>  
>  	/**
>  	 * List of objects currently involved in rendering from the
-- 
Joonas Lahtinen
Open Source Technology Center
Intel Corporation
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-01 14:28       ` Joonas Lahtinen
@ 2016-08-01 16:17         ` Chris Wilson
  2016-08-01 16:23           ` Chris Wilson
  2016-08-01 16:32         ` Chris Wilson
  1 sibling, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01 16:17 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Mon, Aug 01, 2016 at 05:28:34PM +0300, Joonas Lahtinen wrote:
> On ma, 2016-08-01 at 11:15 +0100, Chris Wilson wrote:
> > On Mon, Aug 01, 2016 at 01:07:55PM +0300, Joonas Lahtinen wrote:
> > > 
> > > On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> > > > 
> > > > Space reservation is already safe with respect to the ring->size
> > > > modulus, but hardware only expects to see values in the range
> > > > 0...ring->size-1 (inclusive) and so requires the modulus to prevent us
> > > > writing the value ring->size instead of 0. As this is only required for
> > > > the register itself, we can defer the modulus to the register update and
> > > > not perform it after every command packet. We keep the
> > > > intel_ring_advance() around in the code to provide demarcation for the
> > > > end-of-packet (which then can be compared against intel_ring_begin() as
> > > > the number of dwords emitted must match the reserved space).
> > > > 
> > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > > > Cc: Dave Gordon <david.s.gordon@intel.com>
> > > > ---
> > > >  drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
> > > >  drivers/gpu/drm/i915/intel_ringbuffer.c |  6 ++++--
> > > >  drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++++++++----
> > > >  3 files changed, 18 insertions(+), 7 deletions(-)
> > > > 
> > > > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > > > index bf42a66d6624..824f7efe4e64 100644
> > > > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > > > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > > > @@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
> > > >  	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
> > > >  	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
> > > >  
> > > > -	reg_state[CTX_RING_TAIL+1] = rq->tail;
> > > > +	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
> > > >  
> > > >  	/* True 32b PPGTT with dynamic page allocation: update PDP
> > > >  	 * registers and point the unallocated PDPs to scratch page.
> > > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > > index 3142085b5cc0..21d5e8209400 100644
> > > > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > > @@ -1718,7 +1718,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
> > > >  {
> > > >  	struct drm_i915_private *dev_priv = request->i915;
> > > >  
> > > > -	I915_WRITE_TAIL(request->engine, request->tail);
> > > > +	I915_WRITE_TAIL(request->engine,
> > > > +			intel_ring_offset(request->ring, request->tail));
> > > >  }
> > > >  
> > > >  static void
> > > > @@ -2505,7 +2506,8 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
> > > >  		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
> > > >  
> > > >  	/* Now that the ring is fully powered up, update the tail */
> > > > -	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
> > > > +	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
> > > > +		      intel_ring_offset(request->ring, request->tail));
> > > >  	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
> > > >  
> > > >  	/* Let the ring send IDLE messages to the GT again,
> > > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > > > index 14d2ea36fb88..9ac96ddb01ee 100644
> > > > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> > > > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > > > @@ -460,14 +460,23 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
> > > >  
> > > >  static inline void intel_ring_advance(struct intel_ring *ring)
> > > >  {
> > > > +	/* Dummy function.
> > > > +	 *
> > > > +	 * This serves as a placeholder in the code so that the reader
> > > > +	 * can compare against the preceding intel_ring_begin() and
> > > > +	 * check that the number of dwords emitted matches the space
> > > > +	 * reserved for the command packet (i.e. the value passed to
> > > > +	 * intel_ring_begin()).
> > > > +	 */
> > > > +}
> > > > +
> > > > +static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
> > > > +{
> > > >  	/* The modulus is required so that we avoid writing
> > > >  	 * request->tail == ring->size, rather than the expected 0,
> > > >  	 * into the RING_TAIL register as that can cause a GPU hang.
> > > > -	 * As this is only strictly required for the request->tail,
> > > > -	 * and only then as we write the value into hardware, we can
> > > > -	 * one day remove the modulus after every command packet.
> > > >  	 */
> > > > -	ring->tail &= ring->size - 1;
> > > > +	return value & (ring->size - 1);
> > > >  }
> > > The comment seems outdated-ish as it speaks of modulus which is nowhere
> > > to be seen. I'd speak of 'masking'. With that,
> > The operation is a modulus. The common optimisation for moduli with
> > a power-of-two divisor being applied.
> 
> Yes, I'm perfectly aware of that, but as discussed in IRC, that
> information would be good to be local. Modulus in this case a trick to
> achieve value == ring->size ? 0 : value; Which would again be more
> informative, we do not expect wrap, but we have one special value.

I don't view it as a trick. The ring is formed by the modulus.  You
might argue that because we haven't strictly applied the modulus before
this point, the trick is that we've optimised away the modulus for the
normal wrap case that left this single value. Following that reasoning,
if we just:

diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 15acaf6..d54c210 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2370,7 +2370,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
 
        total_bytes = bytes + req->reserved_space;
 
-       if (unlikely(bytes > remain_usable)) {
+       if (unlikely(bytes >= remain_usable)) {

we can make this late modulus vanish.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* Re: [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-01 16:17         ` Chris Wilson
@ 2016-08-01 16:23           ` Chris Wilson
  0 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01 16:23 UTC (permalink / raw)
  To: Joonas Lahtinen, intel-gfx, Dave Gordon

On Mon, Aug 01, 2016 at 05:17:51PM +0100, Chris Wilson wrote:
> Following that reasoning, if we just:
> 
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 15acaf6..d54c210 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -2370,7 +2370,7 @@ int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
>  
>         total_bytes = bytes + req->reserved_space;
>  
> -       if (unlikely(bytes > remain_usable)) {
> +       if (unlikely(bytes >= remain_usable)) {
> 
> we can make this late modulus vanish.

Sorry, that idea is wrong at present - as we need to fixup the tail
afterwards.  It's not until later we talk about moving the tail
advancement into the equivalent of intel_ring_begin().
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-01 14:28       ` Joonas Lahtinen
  2016-08-01 16:17         ` Chris Wilson
@ 2016-08-01 16:32         ` Chris Wilson
  2016-08-02  9:42           ` Dave Gordon
  1 sibling, 1 reply; 94+ messages in thread
From: Chris Wilson @ 2016-08-01 16:32 UTC (permalink / raw)
  To: Joonas Lahtinen; +Cc: intel-gfx

On Mon, Aug 01, 2016 at 05:28:34PM +0300, Joonas Lahtinen wrote:
> On ma, 2016-08-01 at 11:15 +0100, Chris Wilson wrote:
> > On Mon, Aug 01, 2016 at 01:07:55PM +0300, Joonas Lahtinen wrote:
> > > 
> > > On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> > > > 
> > > > Space reservation is already safe with respect to the ring->size
> > > > modulus, but hardware only expects to see values in the range
> > > > 0...ring->size-1 (inclusive) and so requires the modulus to prevent us
> > > > writing the value ring->size instead of 0. As this is only required for
> > > > the register itself, we can defer the modulus to the register update and
> > > > not perform it after every command packet. We keep the
> > > > intel_ring_advance() around in the code to provide demarcation for the
> > > > end-of-packet (which then can be compared against intel_ring_begin() as
> > > > the number of dwords emitted must match the reserved space).
> > > > 
> > > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > > > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > > > Cc: Dave Gordon <david.s.gordon@intel.com>
> > > > ---
> > > >  drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
> > > >  drivers/gpu/drm/i915/intel_ringbuffer.c |  6 ++++--
> > > >  drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++++++++----
> > > >  3 files changed, 18 insertions(+), 7 deletions(-)
> > > > 
> > > > diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> > > > index bf42a66d6624..824f7efe4e64 100644
> > > > --- a/drivers/gpu/drm/i915/intel_lrc.c
> > > > +++ b/drivers/gpu/drm/i915/intel_lrc.c
> > > > @@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
> > > >  	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
> > > >  	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
> > > >  
> > > > -	reg_state[CTX_RING_TAIL+1] = rq->tail;
> > > > +	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
> > > >  
> > > >  	/* True 32b PPGTT with dynamic page allocation: update PDP
> > > >  	 * registers and point the unallocated PDPs to scratch page.
> > > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > > index 3142085b5cc0..21d5e8209400 100644
> > > > --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> > > > @@ -1718,7 +1718,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
> > > >  {
> > > >  	struct drm_i915_private *dev_priv = request->i915;
> > > >  
> > > > -	I915_WRITE_TAIL(request->engine, request->tail);
> > > > +	I915_WRITE_TAIL(request->engine,
> > > > +			intel_ring_offset(request->ring, request->tail));
> > > >  }
> > > >  
> > > >  static void
> > > > @@ -2505,7 +2506,8 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
> > > >  		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
> > > >  
> > > >  	/* Now that the ring is fully powered up, update the tail */
> > > > -	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
> > > > +	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
> > > > +		      intel_ring_offset(request->ring, request->tail));
> > > >  	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
> > > >  
> > > >  	/* Let the ring send IDLE messages to the GT again,
> > > > diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > > > index 14d2ea36fb88..9ac96ddb01ee 100644
> > > > --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> > > > +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> > > > @@ -460,14 +460,23 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
> > > >  
> > > >  static inline void intel_ring_advance(struct intel_ring *ring)
> > > >  {
> > > > +	/* Dummy function.
> > > > +	 *
> > > > +	 * This serves as a placeholder in the code so that the reader
> > > > +	 * can compare against the preceding intel_ring_begin() and
> > > > +	 * check that the number of dwords emitted matches the space
> > > > +	 * reserved for the command packet (i.e. the value passed to
> > > > +	 * intel_ring_begin()).
> > > > +	 */
> > > > +}
> > > > +
> > > > +static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
> > > > +{
> > > >  	/* The modulus is required so that we avoid writing
> > > >  	 * request->tail == ring->size, rather than the expected 0,
> > > >  	 * into the RING_TAIL register as that can cause a GPU hang.
> > > > -	 * As this is only strictly required for the request->tail,
> > > > -	 * and only then as we write the value into hardware, we can
> > > > -	 * one day remove the modulus after every command packet.
> > > >  	 */
> > > > -	ring->tail &= ring->size - 1;
> > > > +	return value & (ring->size - 1);
> > > >  }
> > > The comment seems outdated-ish as it speaks of modulus which is nowhere
> > > to be seen. I'd speak of 'masking'. With that,
> > The operation is a modulus. The common optimisation for moduli with
> > a power-of-two divisor being applied.
> 
> Yes, I'm perfectly aware of that, but as discussed in IRC, that
> information would be good to be local. Modulus in this case a trick to
> achieve value == ring->size ? 0 : value; Which would again be more
> informative, we do not expect wrap, but we have one special value.

Ditch all the verbage, and go with the single line:
	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
that just fits into 80cols.

Keep it to why not how and we need not argue about language.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* [PATCH v2] drm/i915: Unify request submission
  2016-08-01  9:10 ` [PATCH 14/73] drm/i915: Unify request submission Chris Wilson
  2016-08-01 14:30   ` Joonas Lahtinen
@ 2016-08-01 17:17   ` Chris Wilson
  1 sibling, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-01 17:17 UTC (permalink / raw)
  To: intel-gfx

Move request submission from emit_request into its own common vfunc
from i915_add_request().

v2: Convert I915_DISPATCH_flags to BIT(x) whilst passing
v3: Rename a few functions to match.
v4: Reenable execlists submission after disabling guc.
v5: Be aware that everyone calls i915_guc_submission_disable()!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-23-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> #v3
---
 drivers/gpu/drm/i915/i915_gem_request.c    |  8 +++-----
 drivers/gpu/drm/i915/i915_guc_submission.c | 15 ++++++++++++---
 drivers/gpu/drm/i915/intel_guc.h           |  1 -
 drivers/gpu/drm/i915/intel_lrc.c           | 26 +++++++++++++++-----------
 drivers/gpu/drm/i915/intel_lrc.h           |  2 ++
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 23 +++++++++--------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    | 27 +++++++++++++--------------
 7 files changed, 54 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index a885905df3bb..e378eb61979b 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -466,12 +466,9 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 */
 	request->postfix = ring->tail;
 
-	if (i915.enable_execlists)
-		ret = engine->emit_request(request);
-	else
-		ret = engine->add_request(request);
 	/* Not allowed to fail! */
-	WARN(ret, "emit|add_request failed: %d!\n", ret);
+	ret = engine->emit_request(request);
+	WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
 
 	/* Sanity check that the reserved size was large enough. */
 	ret = ring->tail - request_start;
@@ -483,6 +480,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 		  reserved_tail, ret);
 
 	i915_gem_mark_busy(engine);
+	engine->submit_request(request);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index eccd34832fe6..5de867585275 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -585,7 +585,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
  * The only error here arises if the doorbell hardware isn't functioning
  * as expected, which really shouln't happen.
  */
-int i915_guc_submit(struct drm_i915_gem_request *rq)
+static void i915_guc_submit(struct drm_i915_gem_request *rq)
 {
 	unsigned int engine_id = rq->engine->id;
 	struct intel_guc *guc = &rq->i915->guc;
@@ -602,8 +602,6 @@ int i915_guc_submit(struct drm_i915_gem_request *rq)
 
 	guc->submissions[engine_id] += 1;
 	guc->last_seqno[engine_id] = rq->fence.seqno;
-
-	return b_ret;
 }
 
 /*
@@ -992,6 +990,7 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
 	struct i915_guc_client *client;
+	struct intel_engine_cs *engine;
 
 	/* client for execbuf submission */
 	client = guc_client_alloc(dev_priv,
@@ -1006,6 +1005,10 @@ int i915_guc_submission_enable(struct drm_i915_private *dev_priv)
 	host2guc_sample_forcewake(guc, client);
 	guc_init_doorbell_hw(guc);
 
+	/* Take over from manual control of ELSP (execlists) */
+	for_each_engine(engine, dev_priv)
+		engine->submit_request = i915_guc_submit;
+
 	return 0;
 }
 
@@ -1013,8 +1016,14 @@ void i915_guc_submission_disable(struct drm_i915_private *dev_priv)
 {
 	struct intel_guc *guc = &dev_priv->guc;
 
+	if (!guc->execbuf_client)
+		return;
+
 	guc_client_free(dev_priv, guc->execbuf_client);
 	guc->execbuf_client = NULL;
+
+	/* Revert back to manual ELSP submission */
+	intel_execlists_enable_submission(dev_priv);
 }
 
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index 3e3e743740c0..623cf26cd784 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -160,7 +160,6 @@ extern int intel_guc_resume(struct drm_device *dev);
 int i915_guc_submission_init(struct drm_i915_private *dev_priv);
 int i915_guc_submission_enable(struct drm_i915_private *dev_priv);
 int i915_guc_wq_check_space(struct drm_i915_gem_request *rq);
-int i915_guc_submit(struct drm_i915_gem_request *rq);
 void i915_guc_submission_disable(struct drm_i915_private *dev_priv);
 void i915_guc_submission_fini(struct drm_i915_private *dev_priv);
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 824f7efe4e64..8a72ee86e825 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -738,7 +738,7 @@ err_unpin:
 }
 
 /*
- * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
+ * intel_logical_ring_advance() - advance the tail and prepare for submission
  * @request: Request to advance the logical ringbuffer of.
  *
  * The tail is updated in our logical ringbuffer struct, not in the actual context. What
@@ -747,7 +747,7 @@ err_unpin:
  * point, the tail *inside* the context is updated and the ELSP written to.
  */
 static int
-intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
+intel_logical_ring_advance(struct drm_i915_gem_request *request)
 {
 	struct intel_ring *ring = request->ring;
 	struct intel_engine_cs *engine = request->engine;
@@ -773,12 +773,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
 	 */
 	request->previous_context = engine->last_context;
 	engine->last_context = request->ctx;
-
-	if (i915.enable_guc_submission)
-		i915_guc_submit(request);
-	else
-		execlists_context_queue(request);
-
 	return 0;
 }
 
@@ -1770,7 +1764,7 @@ static int gen8_emit_request(struct drm_i915_gem_request *request)
 	intel_ring_emit(ring, request->fence.seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
-	return intel_logical_ring_advance_and_submit(request);
+	return intel_logical_ring_advance(request);
 }
 
 static int gen8_emit_request_render(struct drm_i915_gem_request *request)
@@ -1801,7 +1795,7 @@ static int gen8_emit_request_render(struct drm_i915_gem_request *request)
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
-	return intel_logical_ring_advance_and_submit(request);
+	return intel_logical_ring_advance(request);
 }
 
 static int intel_lr_context_render_state_init(struct drm_i915_gem_request *req)
@@ -1902,13 +1896,23 @@ void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
 	engine->i915 = NULL;
 }
 
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	for_each_engine(engine, dev_priv)
+		engine->submit_request = execlists_context_queue;
+}
+
 static void
 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
 {
 	/* Default vfuncs which can be overriden by each engine. */
 	engine->init_hw = gen8_init_common_ring;
-	engine->emit_request = gen8_emit_request;
 	engine->emit_flush = gen8_emit_flush;
+	engine->emit_request = gen8_emit_request;
+	engine->submit_request = execlists_context_queue;
+
 	engine->irq_enable = gen8_logical_ring_enable_irq;
 	engine->irq_disable = gen8_logical_ring_disable_irq;
 	engine->emit_bb_start = gen8_emit_bb_start;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 33e0193e5451..bdd764af6d06 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -95,6 +95,8 @@ uint64_t intel_lr_context_descriptor(struct i915_gem_context *ctx,
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_i915_private *dev_priv,
 				    int enable_execlists);
+void intel_execlists_enable_submission(struct drm_i915_private *dev_priv);
+
 struct i915_execbuffer_params;
 int intel_execlists_submission(struct i915_execbuffer_params *params,
 			       struct drm_i915_gem_execbuffer2 *args,
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index a03935338019..8250db767a1a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1428,15 +1428,14 @@ static int gen6_signal(struct drm_i915_gem_request *signaller_req,
 }
 
 /**
- * gen6_add_request - Update the semaphore mailbox registers
+ * gen6_emit_request - Update the semaphore mailbox registers
  *
  * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
-static int
-gen6_add_request(struct drm_i915_gem_request *req)
+static int gen6_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
@@ -1457,13 +1456,11 @@ gen6_add_request(struct drm_i915_gem_request *req)
 	intel_ring_advance(ring);
 
 	req->tail = ring->tail;
-	engine->submit_request(req);
 
 	return 0;
 }
 
-static int
-gen8_render_add_request(struct drm_i915_gem_request *req)
+static int gen8_render_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
@@ -1487,9 +1484,9 @@ gen8_render_add_request(struct drm_i915_gem_request *req)
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
 
 	req->tail = ring->tail;
-	engine->submit_request(req);
 
 	return 0;
 }
@@ -1692,8 +1689,7 @@ bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
 	return 0;
 }
 
-static int
-i9xx_add_request(struct drm_i915_gem_request *req)
+static int i9xx_emit_request(struct drm_i915_gem_request *req)
 {
 	struct intel_ring *ring = req->ring;
 	int ret;
@@ -1709,7 +1705,6 @@ i9xx_add_request(struct drm_i915_gem_request *req)
 	intel_ring_advance(ring);
 
 	req->tail = ring->tail;
-	req->engine->submit_request(req);
 
 	return 0;
 }
@@ -2814,11 +2809,11 @@ static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
 				      struct intel_engine_cs *engine)
 {
 	engine->init_hw = init_ring_common;
-	engine->submit_request = i9xx_submit_request;
 
-	engine->add_request = i9xx_add_request;
+	engine->emit_request = i9xx_emit_request;
 	if (INTEL_GEN(dev_priv) >= 6)
-		engine->add_request = gen6_add_request;
+		engine->emit_request = gen6_emit_request;
+	engine->submit_request = i9xx_submit_request;
 
 	if (INTEL_GEN(dev_priv) >= 8)
 		engine->emit_bb_start = gen8_emit_bb_start;
@@ -2847,7 +2842,7 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->add_request = gen8_render_add_request;
+		engine->emit_request = gen8_render_emit_request;
 		engine->emit_flush = gen8_render_ring_flush;
 		if (i915.semaphores)
 			engine->semaphore.signal = gen8_rcs_signal;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 7d4a2814b331..5f44000f7138 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -204,7 +204,19 @@ struct intel_engine_cs {
 
 	int		(*init_context)(struct drm_i915_gem_request *req);
 
-	int		(*add_request)(struct drm_i915_gem_request *req);
+	int		(*emit_flush)(struct drm_i915_gem_request *request,
+				      u32 mode);
+#define EMIT_INVALIDATE	BIT(0)
+#define EMIT_FLUSH	BIT(1)
+#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
+	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
+					 u64 offset, u32 length,
+					 unsigned int dispatch_flags);
+#define I915_DISPATCH_SECURE BIT(0)
+#define I915_DISPATCH_PINNED BIT(1)
+#define I915_DISPATCH_RS     BIT(2)
+	int		(*emit_request)(struct drm_i915_gem_request *req);
+	void		(*submit_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
 	 * However, the up-to-date seqno is not always required and the last
@@ -282,19 +294,6 @@ struct intel_engine_cs {
 	unsigned int idle_lite_restore_wa;
 	bool disable_lite_restore_wa;
 	u32 ctx_desc_template;
-	int		(*emit_request)(struct drm_i915_gem_request *request);
-	int		(*emit_flush)(struct drm_i915_gem_request *request,
-				      u32 mode);
-#define EMIT_INVALIDATE	BIT(0)
-#define EMIT_FLUSH	BIT(1)
-#define EMIT_BARRIER	(EMIT_INVALIDATE | EMIT_FLUSH)
-	int		(*emit_bb_start)(struct drm_i915_gem_request *req,
-					 u64 offset, u32 length,
-					 unsigned int dispatch_flags);
-#define I915_DISPATCH_SECURE 0x1
-#define I915_DISPATCH_PINNED 0x2
-#define I915_DISPATCH_RS     0x4
-	void		(*submit_request)(struct drm_i915_gem_request *req);
 
 	/**
 	 * List of objects currently involved in rendering from the
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 94+ messages in thread

* Re: [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-01 16:32         ` Chris Wilson
@ 2016-08-02  9:42           ` Dave Gordon
  2016-08-02 10:14             ` Chris Wilson
  0 siblings, 1 reply; 94+ messages in thread
From: Dave Gordon @ 2016-08-02  9:42 UTC (permalink / raw)
  To: Chris Wilson, Joonas Lahtinen, intel-gfx

On 01/08/16 17:32, Chris Wilson wrote:
> On Mon, Aug 01, 2016 at 05:28:34PM +0300, Joonas Lahtinen wrote:
>> On ma, 2016-08-01 at 11:15 +0100, Chris Wilson wrote:
>>> On Mon, Aug 01, 2016 at 01:07:55PM +0300, Joonas Lahtinen wrote:
>>>>
>>>> On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
>>>>>
>>>>> Space reservation is already safe with respect to the ring->size
>>>>> modulus, but hardware only expects to see values in the range
>>>>> 0...ring->size-1 (inclusive) and so requires the modulus to prevent us
>>>>> writing the value ring->size instead of 0. As this is only required for
>>>>> the register itself, we can defer the modulus to the register update and
>>>>> not perform it after every command packet. We keep the
>>>>> intel_ring_advance() around in the code to provide demarcation for the
>>>>> end-of-packet (which then can be compared against intel_ring_begin() as
>>>>> the number of dwords emitted must match the reserved space).
>>>>>
>>>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>>>> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
>>>>> Cc: Dave Gordon <david.s.gordon@intel.com>
>>>>> ---
>>>>>  drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
>>>>>  drivers/gpu/drm/i915/intel_ringbuffer.c |  6 ++++--
>>>>>  drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++++++++----
>>>>>  3 files changed, 18 insertions(+), 7 deletions(-)
>>>>>
>>>>> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
>>>>> index bf42a66d6624..824f7efe4e64 100644
>>>>> --- a/drivers/gpu/drm/i915/intel_lrc.c
>>>>> +++ b/drivers/gpu/drm/i915/intel_lrc.c
>>>>> @@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
>>>>>  	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
>>>>>  	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
>>>>>
>>>>> -	reg_state[CTX_RING_TAIL+1] = rq->tail;
>>>>> +	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
>>>>>
>>>>>  	/* True 32b PPGTT with dynamic page allocation: update PDP
>>>>>  	 * registers and point the unallocated PDPs to scratch page.
>>>>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
>>>>> index 3142085b5cc0..21d5e8209400 100644
>>>>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
>>>>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
>>>>> @@ -1718,7 +1718,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
>>>>>  {
>>>>>  	struct drm_i915_private *dev_priv = request->i915;
>>>>>
>>>>> -	I915_WRITE_TAIL(request->engine, request->tail);
>>>>> +	I915_WRITE_TAIL(request->engine,
>>>>> +			intel_ring_offset(request->ring, request->tail));
>>>>>  }
>>>>>
>>>>>  static void
>>>>> @@ -2505,7 +2506,8 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
>>>>>  		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
>>>>>
>>>>>  	/* Now that the ring is fully powered up, update the tail */
>>>>> -	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
>>>>> +	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
>>>>> +		      intel_ring_offset(request->ring, request->tail));
>>>>>  	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
>>>>>
>>>>>  	/* Let the ring send IDLE messages to the GT again,
>>>>> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
>>>>> index 14d2ea36fb88..9ac96ddb01ee 100644
>>>>> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
>>>>> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
>>>>> @@ -460,14 +460,23 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
>>>>>
>>>>>  static inline void intel_ring_advance(struct intel_ring *ring)
>>>>>  {
>>>>> +	/* Dummy function.
>>>>> +	 *
>>>>> +	 * This serves as a placeholder in the code so that the reader
>>>>> +	 * can compare against the preceding intel_ring_begin() and
>>>>> +	 * check that the number of dwords emitted matches the space
>>>>> +	 * reserved for the command packet (i.e. the value passed to
>>>>> +	 * intel_ring_begin()).
>>>>> +	 */
>>>>> +}
>>>>> +
>>>>> +static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
>>>>> +{
>>>>>  	/* The modulus is required so that we avoid writing
>>>>>  	 * request->tail == ring->size, rather than the expected 0,
>>>>>  	 * into the RING_TAIL register as that can cause a GPU hang.
>>>>> -	 * As this is only strictly required for the request->tail,
>>>>> -	 * and only then as we write the value into hardware, we can
>>>>> -	 * one day remove the modulus after every command packet.
>>>>>  	 */
>>>>> -	ring->tail &= ring->size - 1;
>>>>> +	return value & (ring->size - 1);
>>>>>  }
>>>> The comment seems outdated-ish as it speaks of modulus which is nowhere
>>>> to be seen. I'd speak of 'masking'. With that,
>>> The operation is a modulus. The common optimisation for moduli with
>>> a power-of-two divisor being applied.
>>
>> Yes, I'm perfectly aware of that, but as discussed in IRC, that
>> information would be good to be local. Modulus in this case a trick to
>> achieve value == ring->size ? 0 : value; Which would again be more
>> informative, we do not expect wrap, but we have one special value.
>
> Ditch all the verbage, and go with the single line:
> 	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
> that just fits into 80cols.
>
> Keep it to why not how and we need not argue about language.
> -Chris

Well yes, except this function isn't writing anything to anywhere, so it 
seems rather disconnected from the point of use. Perhaps

/* ensure value (written to TAIL) is strictly less than ring->size */

which is even shorter :)

BTW, a completely different way to avoid this problem would just be to 
ensure the packet never finished at the last word of the ring. Setting 
effective_size = size-1 would do just that!

If we set effective_size < size for engines that don't like TAIL == 
ring->size (as well as the ones that don't like the last cacheline) we 
could then remove the don't-wrap-midsequence restriction from the 
others, which would make better use of the ring space :)

.Dave.

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

* Re: [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write
  2016-08-02  9:42           ` Dave Gordon
@ 2016-08-02 10:14             ` Chris Wilson
  0 siblings, 0 replies; 94+ messages in thread
From: Chris Wilson @ 2016-08-02 10:14 UTC (permalink / raw)
  To: Dave Gordon; +Cc: intel-gfx

On Tue, Aug 02, 2016 at 10:42:47AM +0100, Dave Gordon wrote:
> On 01/08/16 17:32, Chris Wilson wrote:
> >On Mon, Aug 01, 2016 at 05:28:34PM +0300, Joonas Lahtinen wrote:
> >>On ma, 2016-08-01 at 11:15 +0100, Chris Wilson wrote:
> >>>On Mon, Aug 01, 2016 at 01:07:55PM +0300, Joonas Lahtinen wrote:
> >>>>
> >>>>On ma, 2016-08-01 at 10:10 +0100, Chris Wilson wrote:
> >>>>>
> >>>>>Space reservation is already safe with respect to the ring->size
> >>>>>modulus, but hardware only expects to see values in the range
> >>>>>0...ring->size-1 (inclusive) and so requires the modulus to prevent us
> >>>>>writing the value ring->size instead of 0. As this is only required for
> >>>>>the register itself, we can defer the modulus to the register update and
> >>>>>not perform it after every command packet. We keep the
> >>>>>intel_ring_advance() around in the code to provide demarcation for the
> >>>>>end-of-packet (which then can be compared against intel_ring_begin() as
> >>>>>the number of dwords emitted must match the reserved space).
> >>>>>
> >>>>>Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>>>>Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> >>>>>Cc: Dave Gordon <david.s.gordon@intel.com>
> >>>>>---
> >>>>> drivers/gpu/drm/i915/intel_lrc.c        |  2 +-
> >>>>> drivers/gpu/drm/i915/intel_ringbuffer.c |  6 ++++--
> >>>>> drivers/gpu/drm/i915/intel_ringbuffer.h | 17 +++++++++++++----
> >>>>> 3 files changed, 18 insertions(+), 7 deletions(-)
> >>>>>
> >>>>>diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> >>>>>index bf42a66d6624..824f7efe4e64 100644
> >>>>>--- a/drivers/gpu/drm/i915/intel_lrc.c
> >>>>>+++ b/drivers/gpu/drm/i915/intel_lrc.c
> >>>>>@@ -373,7 +373,7 @@ static void execlists_update_context(struct drm_i915_gem_request *rq)
> >>>>> 	struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt;
> >>>>> 	uint32_t *reg_state = rq->ctx->engine[engine->id].lrc_reg_state;
> >>>>>
> >>>>>-	reg_state[CTX_RING_TAIL+1] = rq->tail;
> >>>>>+	reg_state[CTX_RING_TAIL+1] = intel_ring_offset(rq->ring, rq->tail);
> >>>>>
> >>>>> 	/* True 32b PPGTT with dynamic page allocation: update PDP
> >>>>> 	 * registers and point the unallocated PDPs to scratch page.
> >>>>>diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >>>>>index 3142085b5cc0..21d5e8209400 100644
> >>>>>--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> >>>>>+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> >>>>>@@ -1718,7 +1718,8 @@ static void i9xx_submit_request(struct drm_i915_gem_request *request)
> >>>>> {
> >>>>> 	struct drm_i915_private *dev_priv = request->i915;
> >>>>>
> >>>>>-	I915_WRITE_TAIL(request->engine, request->tail);
> >>>>>+	I915_WRITE_TAIL(request->engine,
> >>>>>+			intel_ring_offset(request->ring, request->tail));
> >>>>> }
> >>>>>
> >>>>> static void
> >>>>>@@ -2505,7 +2506,8 @@ static void gen6_bsd_submit_request(struct drm_i915_gem_request *request)
> >>>>> 		DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
> >>>>>
> >>>>> 	/* Now that the ring is fully powered up, update the tail */
> >>>>>-	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base), request->tail);
> >>>>>+	I915_WRITE_FW(RING_TAIL(request->engine->mmio_base),
> >>>>>+		      intel_ring_offset(request->ring, request->tail));
> >>>>> 	POSTING_READ_FW(RING_TAIL(request->engine->mmio_base));
> >>>>>
> >>>>> 	/* Let the ring send IDLE messages to the GT again,
> >>>>>diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> >>>>>index 14d2ea36fb88..9ac96ddb01ee 100644
> >>>>>--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> >>>>>+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> >>>>>@@ -460,14 +460,23 @@ static inline void intel_ring_emit_reg(struct intel_ring *ring, i915_reg_t reg)
> >>>>>
> >>>>> static inline void intel_ring_advance(struct intel_ring *ring)
> >>>>> {
> >>>>>+	/* Dummy function.
> >>>>>+	 *
> >>>>>+	 * This serves as a placeholder in the code so that the reader
> >>>>>+	 * can compare against the preceding intel_ring_begin() and
> >>>>>+	 * check that the number of dwords emitted matches the space
> >>>>>+	 * reserved for the command packet (i.e. the value passed to
> >>>>>+	 * intel_ring_begin()).
> >>>>>+	 */
> >>>>>+}
> >>>>>+
> >>>>>+static inline u32 intel_ring_offset(struct intel_ring *ring, u32 value)
> >>>>>+{
> >>>>> 	/* The modulus is required so that we avoid writing
> >>>>> 	 * request->tail == ring->size, rather than the expected 0,
> >>>>> 	 * into the RING_TAIL register as that can cause a GPU hang.
> >>>>>-	 * As this is only strictly required for the request->tail,
> >>>>>-	 * and only then as we write the value into hardware, we can
> >>>>>-	 * one day remove the modulus after every command packet.
> >>>>> 	 */
> >>>>>-	ring->tail &= ring->size - 1;
> >>>>>+	return value & (ring->size - 1);
> >>>>> }
> >>>>The comment seems outdated-ish as it speaks of modulus which is nowhere
> >>>>to be seen. I'd speak of 'masking'. With that,
> >>>The operation is a modulus. The common optimisation for moduli with
> >>>a power-of-two divisor being applied.
> >>
> >>Yes, I'm perfectly aware of that, but as discussed in IRC, that
> >>information would be good to be local. Modulus in this case a trick to
> >>achieve value == ring->size ? 0 : value; Which would again be more
> >>informative, we do not expect wrap, but we have one special value.
> >
> >Ditch all the verbage, and go with the single line:
> >	/* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */
> >that just fits into 80cols.
> >
> >Keep it to why not how and we need not argue about language.
> >-Chris
> 
> Well yes, except this function isn't writing anything to anywhere,
> so it seems rather disconnected from the point of use. Perhaps
> 
> /* ensure value (written to TAIL) is strictly less than ring->size */
> 
> which is even shorter :)
> 
> BTW, a completely different way to avoid this problem would just be
> to ensure the packet never finished at the last word of the ring.
> Setting effective_size = size-1 would do just that!
> 
> If we set effective_size < size for engines that don't like TAIL ==
> ring->size (as well as the ones that don't like the last cacheline)
> we could then remove the don't-wrap-midsequence restriction from the
> others, which would make better use of the ring space :)

We can't just remove that as we still have the limitation that we can't
split instructions, and the intel_ring_begin() is all the information we
have at that point regarding instruction length.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 94+ messages in thread

end of thread, other threads:[~2016-08-02 10:14 UTC | newest]

Thread overview: 94+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
2016-08-01  9:10 ` [PATCH 01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
2016-08-01  9:10 ` [PATCH 02/73] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
2016-08-01  9:10 ` [PATCH 03/73] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
2016-08-01  9:10 ` [PATCH 04/73] drm/i915: Rename struct intel_ringbuffer to struct intel_ring Chris Wilson
2016-08-01  9:10 ` [PATCH 05/73] drm/i915: Rename residual ringbuf parameters Chris Wilson
2016-08-01  9:10 ` [PATCH 06/73] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
2016-08-01  9:10 ` [PATCH 07/73] drm/i915: Remove obsolete engine->gpu_caches_dirty Chris Wilson
2016-08-01  9:10 ` [PATCH 08/73] drm/i915: Reduce engine->emit_flush() to a single mode parameter Chris Wilson
2016-08-01  9:10 ` [PATCH 09/73] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
2016-08-01  9:10 ` [PATCH 10/73] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
2016-08-01  9:10 ` [PATCH 11/73] drm/i915: Remove intel_ring_get_tail() Chris Wilson
2016-08-01  9:10 ` [PATCH 12/73] drm/i915: Convert engine->write_tail to operate on a request Chris Wilson
2016-08-01  9:10 ` [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write Chris Wilson
2016-08-01 10:07   ` Joonas Lahtinen
2016-08-01 10:15     ` Chris Wilson
2016-08-01 14:28       ` Joonas Lahtinen
2016-08-01 16:17         ` Chris Wilson
2016-08-01 16:23           ` Chris Wilson
2016-08-01 16:32         ` Chris Wilson
2016-08-02  9:42           ` Dave Gordon
2016-08-02 10:14             ` Chris Wilson
2016-08-01 10:21     ` Chris Wilson
2016-08-01  9:10 ` [PATCH 14/73] drm/i915: Unify request submission Chris Wilson
2016-08-01 14:30   ` Joonas Lahtinen
2016-08-01 17:17   ` [PATCH v2] " Chris Wilson
2016-08-01  9:10 ` [PATCH 15/73] drm/i915/lrc: Update function names to match request flow Chris Wilson
2016-08-01  9:10 ` [PATCH 16/73] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal() Chris Wilson
2016-08-01  9:10 ` [PATCH 17/73] drm/i915: Reuse legacy breadcrumbs + tail emission Chris Wilson
2016-08-01  9:10 ` [PATCH 18/73] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores Chris Wilson
2016-08-01  9:10 ` [PATCH 19/73] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
2016-08-01  9:10 ` [PATCH 20/73] drm/i915: Refactor golden render state emission to unconfuse gcc Chris Wilson
2016-08-01  9:10 ` [PATCH 21/73] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
2016-08-01  9:10 ` [PATCH 22/73] drm/i915: Simplify calling engine->sync_to Chris Wilson
2016-08-01  9:10 ` [PATCH 23/73] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals Chris Wilson
2016-08-01  9:10 ` [PATCH 24/73] drm/i915: Amalgamate GGTT/ppGTT vma debug list walkers Chris Wilson
2016-08-01  9:10 ` [PATCH 25/73] drm/i915: Split early global GTT initialisation Chris Wilson
2016-08-01  9:10 ` [PATCH 26/73] drm/i915: Store owning file on the i915_address_space Chris Wilson
2016-08-01  9:10 ` [PATCH 27/73] drm/i915: Count how many VMA are bound for an object Chris Wilson
2016-08-01  9:10 ` [PATCH 28/73] drm/i915: Be more careful when unbinding vma Chris Wilson
2016-08-01  9:10 ` [PATCH 29/73] drm/i915: Kill drop_pages() Chris Wilson
2016-08-01  9:10 ` [PATCH 30/73] drm/i915: Introduce i915_gem_active for request tracking Chris Wilson
2016-08-01  9:10 ` [PATCH 31/73] drm/i915: Prepare i915_gem_active for annotations Chris Wilson
2016-08-01  9:10 ` [PATCH 32/73] drm/i915: Mark up i915_gem_active for locking annotation Chris Wilson
2016-08-01  9:10 ` [PATCH 33/73] drm/i915: Refactor blocking waits Chris Wilson
2016-08-01  9:10 ` [PATCH 34/73] drm/i915: Rename request->list to link for consistency Chris Wilson
2016-08-01  9:10 ` [PATCH 35/73] drm/i915: Remove obsolete i915_gem_object_flush_active() Chris Wilson
2016-08-01  9:10 ` [PATCH 36/73] drm/i915: Refactor activity tracking for requests Chris Wilson
2016-08-01 12:52   ` Joonas Lahtinen
2016-08-01  9:10 ` [PATCH 37/73] drm/i915: Track requests inside each intel_ring Chris Wilson
2016-08-01  9:10 ` [PATCH 38/73] drm/i915: Convert intel_overlay to request tracking Chris Wilson
2016-08-01  9:10 ` [PATCH 39/73] drm/i915: Move the special case wait-request handling to its one caller Chris Wilson
2016-08-01  9:10 ` [PATCH 40/73] drm/i915: Disable waitboosting for a saturated engine Chris Wilson
2016-08-01  9:10 ` [PATCH 41/73] drm/i915: s/__i915_wait_request/i915_wait_request/ Chris Wilson
2016-08-01  9:10 ` [PATCH 42/73] drm/i915: Double check activity before relocations Chris Wilson
2016-08-01  9:10 ` [PATCH 43/73] drm/i915: Move request list retirement to i915_gem_request.c Chris Wilson
2016-08-01  9:10 ` [PATCH 44/73] drm/i915: i915_vma_move_to_active prep patch Chris Wilson
2016-08-01  9:10 ` [PATCH 45/73] drm/i915: Track active vma requests Chris Wilson
2016-08-01  9:10 ` [PATCH 46/73] drm/i915: Release vma when the handle is closed Chris Wilson
2016-08-01 11:26   ` Joonas Lahtinen
2016-08-01  9:10 ` [PATCH 47/73] drm/i915: Mark the context and address space as closed Chris Wilson
2016-08-01  9:10 ` [PATCH 48/73] Revert "drm/i915: Clean up associated VMAs on context destruction" Chris Wilson
2016-08-01  9:10 ` [PATCH 49/73] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
2016-08-01  9:10 ` [PATCH 50/73] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something() Chris Wilson
2016-08-01  9:10 ` [PATCH 51/73] drm/i915: Double check the active status on the batch pool Chris Wilson
2016-08-01  9:11 ` [PATCH 52/73] drm/i915: Remove request retirement before each batch Chris Wilson
2016-08-01  9:11 ` [PATCH 53/73] drm/i915: Remove i915_gem_execbuffer_retire_commands() Chris Wilson
2016-08-01  9:11 ` [PATCH 54/73] drm/i915: Fix up vma alignment to be u64 Chris Wilson
2016-08-01 12:21   ` Joonas Lahtinen
2016-08-01  9:11 ` [PATCH 55/73] drm/i915: Pad GTT views of exec objects up to user specified size Chris Wilson
2016-08-01  9:11 ` [PATCH 56/73] drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check Chris Wilson
2016-08-01  9:11 ` [PATCH 57/73] drm/i915: Split insertion/binding of an object into the VM Chris Wilson
2016-08-01  9:11 ` [PATCH 58/73] drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations Chris Wilson
2016-08-01  9:11 ` [PATCH 59/73] drm/i915: Update the GGTT size/alignment query functions Chris Wilson
2016-08-01 12:27   ` Joonas Lahtinen
2016-08-01  9:11 ` [PATCH 60/73] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private Chris Wilson
2016-08-01 12:30   ` Joonas Lahtinen
2016-08-01  9:11 ` [PATCH 61/73] drm/i915: Record allocated vma size Chris Wilson
2016-08-01 12:36   ` Joonas Lahtinen
2016-08-01 12:44     ` Chris Wilson
2016-08-01  9:11 ` [PATCH 62/73] drm/i915: Wrap vma->pin_count accessors with small inline helpers Chris Wilson
2016-08-01  9:11 ` [PATCH 63/73] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
2016-08-01  9:11 ` [PATCH 64/73] drm/i915: Combine all i915_vma bitfields into a single set of flags Chris Wilson
2016-08-01  9:11 ` [PATCH 65/73] drm/i915: Make i915_vma_pin() small and inline Chris Wilson
2016-08-01  9:11 ` [PATCH 66/73] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin() Chris Wilson
2016-08-01  9:11 ` [PATCH 67/73] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
2016-08-01  9:11 ` [PATCH 68/73] drm/i915: Use atomics to manipulate obj->frontbuffer_bits Chris Wilson
2016-08-01  9:11 ` [PATCH 69/73] drm/i915: Use dev_priv consistently through the intel_frontbuffer interface Chris Wilson
2016-08-01  9:11 ` [PATCH 70/73] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
2016-08-01 12:46   ` Joonas Lahtinen
2016-08-01  9:11 ` [PATCH 71/73] drm/i915: Move i915_gem_object_wait_rendering() Chris Wilson
2016-08-01  9:11 ` [PATCH 72/73] drm/i915: Enable lockless lookup of request tracking via RCU Chris Wilson
2016-08-01  9:11 ` [PATCH 73/73] drm/i915: Export our request as a dma-buf fence on the reservation object Chris Wilson
2016-08-01 11:45 ` ✗ Ro.CI.BAT: failure for series starting with [01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.