All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 07/73] drm/i915: Remove obsolete engine->gpu_caches_dirty
Date: Mon,  1 Aug 2016 10:10:15 +0100	[thread overview]
Message-ID: <1470042681-25318-8-git-send-email-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <1470042681-25318-1-git-send-email-chris@chris-wilson.co.uk>

Space for flushing the GPU cache prior to completing the request is
preallocated and so cannot fail - the GPU caches will always be flushed
along with the completed request. This means we no longer have to track
whether the GPU cache is dirty between batches like we had to with the
outstanding_lazy_seqno.

With the removal of the duplication in the per-backend entry points for
emitting the obsolete lazy flush, we can then further unify the
engine->emit_flush.

v2: Expand a bit on the legacy of gpu_caches_dirty

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: http://patchwork.freedesktop.org/patch/msgid/1469432687-22756-18-git-send-email-chris@chris-wilson.co.uk
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_context.c    |  2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  9 +---
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 11 +++--
 drivers/gpu/drm/i915/i915_gem_request.c    |  8 ++--
 drivers/gpu/drm/i915/intel_lrc.c           | 47 +++----------------
 drivers/gpu/drm/i915/intel_lrc.h           |  2 -
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 72 +++++++-----------------------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |  7 ---
 8 files changed, 37 insertions(+), 121 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3336a5fcd029..beece8feb8fe 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -568,7 +568,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
 	 * itlb_before_ctx_switch.
 	 */
 	if (IS_GEN6(dev_priv)) {
-		ret = engine->flush(req, I915_GEM_GPU_DOMAINS, 0);
+		ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d0ef675fb169..35c4c595e5ba 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -998,10 +998,8 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 	if (flush_domains & I915_GEM_DOMAIN_GTT)
 		wmb();
 
-	/* Unconditionally invalidate gpu caches and ensure that we do flush
-	 * any residual writes from the previous batch.
-	 */
-	return intel_engine_invalidate_all_caches(req);
+	/* Unconditionally invalidate GPU caches and TLBs. */
+	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
 }
 
 static bool
@@ -1163,9 +1161,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 static void
 i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
 {
-	/* Unconditionally force add_request to emit a full flush. */
-	params->engine->gpu_caches_dirty = true;
-
 	/* Add a breadcrumb for the completion of the batch buffer */
 	__i915_add_request(params->request, params->batch_obj, true);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index ebfa0406a6a1..39fa9eb10514 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1666,7 +1666,8 @@ static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req,
+				 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1693,7 +1694,8 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 	int ret;
 
 	/* NB: TLBs must be flushed and invalidated before a switch */
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+	ret = engine->emit_flush(req,
+				 I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1711,8 +1713,9 @@ static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
 
 	/* XXX: RCS is the only one to auto invalidate the TLBs? */
 	if (engine->id != RCS) {
-		ret = engine->flush(req,
-				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		ret = engine->emit_flush(req,
+					 I915_GEM_GPU_DOMAINS,
+					 I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 942b5b1f1602..7e3206051ced 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -451,12 +451,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
 	 * what.
 	 */
 	if (flush_caches) {
-		if (i915.enable_execlists)
-			ret = logical_ring_flush_all_caches(request);
-		else
-			ret = intel_engine_flush_all_caches(request);
+		ret = engine->emit_flush(request, 0, I915_GEM_GPU_DOMAINS);
+
 		/* Not allowed to fail! */
-		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
+		WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
 	}
 
 	trace_i915_gem_request_add(request);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index e4f809f879c6..33ff9f6d588f 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -642,24 +642,6 @@ static void execlists_context_queue(struct drm_i915_gem_request *request)
 	spin_unlock_bh(&engine->execlist_lock);
 }
 
-static int logical_ring_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	uint32_t flush_domains;
-	int ret;
-
-	flush_domains = 0;
-	if (engine->gpu_caches_dirty)
-		flush_domains = I915_GEM_GPU_DOMAINS;
-
-	ret = engine->emit_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-	if (ret)
-		return ret;
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
 static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 				 struct list_head *vmas)
 {
@@ -690,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req,
 	/* Unconditionally invalidate gpu caches and ensure that we do flush
 	 * any residual writes from the previous batch.
 	 */
-	return logical_ring_invalidate_all_caches(req);
+	return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0);
 }
 
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request)
@@ -930,22 +912,6 @@ void intel_logical_ring_stop(struct intel_engine_cs *engine)
 	I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
 }
 
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	if (!engine->gpu_caches_dirty)
-		return 0;
-
-	ret = engine->emit_flush(req, 0, I915_GEM_GPU_DOMAINS);
-	if (ret)
-		return ret;
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
 static int intel_lr_context_pin(struct i915_gem_context *ctx,
 				struct intel_engine_cs *engine)
 {
@@ -1026,15 +992,15 @@ void intel_lr_context_unpin(struct i915_gem_context *ctx,
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
 	int ret, i;
-	struct intel_engine_cs *engine = req->engine;
 	struct intel_ring *ring = req->ring;
 	struct i915_workarounds *w = &req->i915->workarounds;
 
 	if (w->count == 0)
 		return 0;
 
-	engine->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -1051,8 +1017,9 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	engine->gpu_caches_dirty = true;
-	ret = logical_ring_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index d26fb44549e5..33e0193e5451 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -72,8 +72,6 @@ int logical_xcs_ring_init(struct intel_engine_cs *engine);
 
 int intel_engines_init(struct drm_device *dev);
 
-int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
-
 /* Logical Ring Contexts */
 
 /* One extra page is added before LRC for GuC as shared data */
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e7a7f67ab06d..9e4b49644553 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -688,8 +688,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 	if (w->count == 0)
 		return 0;
 
-	req->engine->gpu_caches_dirty = true;
-	ret = intel_engine_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -706,8 +707,9 @@ static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
 
 	intel_ring_advance(ring);
 
-	req->engine->gpu_caches_dirty = true;
-	ret = intel_engine_flush_all_caches(req);
+	ret = req->engine->emit_flush(req,
+				      I915_GEM_GPU_DOMAINS,
+				      I915_GEM_GPU_DOMAINS);
 	if (ret)
 		return ret;
 
@@ -2860,21 +2862,21 @@ int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
 	if (INTEL_GEN(dev_priv) >= 8) {
 		engine->init_context = intel_rcs_ctx_init;
 		engine->add_request = gen8_render_add_request;
-		engine->flush = gen8_render_ring_flush;
+		engine->emit_flush = gen8_render_ring_flush;
 		if (i915.semaphores)
 			engine->semaphore.signal = gen8_rcs_signal;
 	} else if (INTEL_GEN(dev_priv) >= 6) {
 		engine->init_context = intel_rcs_ctx_init;
-		engine->flush = gen7_render_ring_flush;
+		engine->emit_flush = gen7_render_ring_flush;
 		if (IS_GEN6(dev_priv))
-			engine->flush = gen6_render_ring_flush;
+			engine->emit_flush = gen6_render_ring_flush;
 	} else if (IS_GEN5(dev_priv)) {
-		engine->flush = gen4_render_ring_flush;
+		engine->emit_flush = gen4_render_ring_flush;
 	} else {
 		if (INTEL_GEN(dev_priv) < 4)
-			engine->flush = gen2_render_ring_flush;
+			engine->emit_flush = gen2_render_ring_flush;
 		else
-			engine->flush = gen4_render_ring_flush;
+			engine->emit_flush = gen4_render_ring_flush;
 		engine->irq_enable_mask = I915_USER_INTERRUPT;
 	}
 
@@ -2911,12 +2913,12 @@ int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
 		/* gen6 bsd needs a special wa for tail updates */
 		if (IS_GEN6(dev_priv))
 			engine->write_tail = gen6_bsd_ring_write_tail;
-		engine->flush = gen6_bsd_ring_flush;
+		engine->emit_flush = gen6_bsd_ring_flush;
 		if (INTEL_GEN(dev_priv) < 8)
 			engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
 	} else {
 		engine->mmio_base = BSD_RING_BASE;
-		engine->flush = bsd_ring_flush;
+		engine->emit_flush = bsd_ring_flush;
 		if (IS_GEN5(dev_priv))
 			engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
 		else
@@ -2935,7 +2937,7 @@ int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_bsd_ring_flush;
+	engine->emit_flush = gen6_bsd_ring_flush;
 
 	return intel_init_ring_buffer(engine);
 }
@@ -2946,7 +2948,7 @@ int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_ring_flush;
+	engine->emit_flush = gen6_ring_flush;
 	if (INTEL_GEN(dev_priv) < 8)
 		engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
 
@@ -2959,7 +2961,7 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 
 	intel_ring_default_vfuncs(dev_priv, engine);
 
-	engine->flush = gen6_ring_flush;
+	engine->emit_flush = gen6_ring_flush;
 
 	if (INTEL_GEN(dev_priv) < 8) {
 		engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
@@ -2970,46 +2972,6 @@ int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
 	return intel_init_ring_buffer(engine);
 }
 
-int
-intel_engine_flush_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	if (!engine->gpu_caches_dirty)
-		return 0;
-
-	ret = engine->flush(req, 0, I915_GEM_GPU_DOMAINS);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_flush(req, 0, I915_GEM_GPU_DOMAINS);
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
-int
-intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	uint32_t flush_domains;
-	int ret;
-
-	flush_domains = 0;
-	if (engine->gpu_caches_dirty)
-		flush_domains = I915_GEM_GPU_DOMAINS;
-
-	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-	if (ret)
-		return ret;
-
-	trace_i915_gem_ring_flush(req, I915_GEM_GPU_DOMAINS, flush_domains);
-
-	engine->gpu_caches_dirty = false;
-	return 0;
-}
-
 void intel_engine_stop(struct intel_engine_cs *engine)
 {
 	int ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index ba54ffcdd55a..00723401f98c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -206,9 +206,6 @@ struct intel_engine_cs {
 
 	void		(*write_tail)(struct intel_engine_cs *engine,
 				      u32 value);
-	int __must_check (*flush)(struct drm_i915_gem_request *req,
-				  u32	invalidate_domains,
-				  u32	flush_domains);
 	int		(*add_request)(struct drm_i915_gem_request *req);
 	/* Some chipsets are not quite as coherent as advertised and need
 	 * an expensive kick to force a true read of the up-to-date seqno.
@@ -325,8 +322,6 @@ struct intel_engine_cs {
 	 */
 	u32 last_submitted_seqno;
 
-	bool gpu_caches_dirty;
-
 	struct i915_gem_context *last_context;
 
 	struct intel_engine_hangcheck hangcheck;
@@ -474,8 +469,6 @@ void intel_ring_update_space(struct intel_ring *ring);
 
 int __must_check intel_engine_idle(struct intel_engine_cs *engine);
 void intel_engine_init_seqno(struct intel_engine_cs *engine, u32 seqno);
-int intel_engine_flush_all_caches(struct drm_i915_gem_request *req);
-int intel_engine_invalidate_all_caches(struct drm_i915_gem_request *req);
 
 int intel_init_pipe_control(struct intel_engine_cs *engine, int size);
 void intel_fini_pipe_control(struct intel_engine_cs *engine);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2016-08-01  9:11 UTC|newest]

Thread overview: 94+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-08-01  9:10 A few bug fixes leading to exporting prime fences [mostly reviewed] Chris Wilson
2016-08-01  9:10 ` [PATCH 01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
2016-08-01  9:10 ` [PATCH 02/73] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
2016-08-01  9:10 ` [PATCH 03/73] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
2016-08-01  9:10 ` [PATCH 04/73] drm/i915: Rename struct intel_ringbuffer to struct intel_ring Chris Wilson
2016-08-01  9:10 ` [PATCH 05/73] drm/i915: Rename residual ringbuf parameters Chris Wilson
2016-08-01  9:10 ` [PATCH 06/73] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
2016-08-01  9:10 ` Chris Wilson [this message]
2016-08-01  9:10 ` [PATCH 08/73] drm/i915: Reduce engine->emit_flush() to a single mode parameter Chris Wilson
2016-08-01  9:10 ` [PATCH 09/73] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
2016-08-01  9:10 ` [PATCH 10/73] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
2016-08-01  9:10 ` [PATCH 11/73] drm/i915: Remove intel_ring_get_tail() Chris Wilson
2016-08-01  9:10 ` [PATCH 12/73] drm/i915: Convert engine->write_tail to operate on a request Chris Wilson
2016-08-01  9:10 ` [PATCH 13/73] drm/i915: Move the modulus for ring emission to the register write Chris Wilson
2016-08-01 10:07   ` Joonas Lahtinen
2016-08-01 10:15     ` Chris Wilson
2016-08-01 14:28       ` Joonas Lahtinen
2016-08-01 16:17         ` Chris Wilson
2016-08-01 16:23           ` Chris Wilson
2016-08-01 16:32         ` Chris Wilson
2016-08-02  9:42           ` Dave Gordon
2016-08-02 10:14             ` Chris Wilson
2016-08-01 10:21     ` Chris Wilson
2016-08-01  9:10 ` [PATCH 14/73] drm/i915: Unify request submission Chris Wilson
2016-08-01 14:30   ` Joonas Lahtinen
2016-08-01 17:17   ` [PATCH v2] " Chris Wilson
2016-08-01  9:10 ` [PATCH 15/73] drm/i915/lrc: Update function names to match request flow Chris Wilson
2016-08-01  9:10 ` [PATCH 16/73] drm/i915: Stop passing caller's num_dwords to engine->semaphore.signal() Chris Wilson
2016-08-01  9:10 ` [PATCH 17/73] drm/i915: Reuse legacy breadcrumbs + tail emission Chris Wilson
2016-08-01  9:10 ` [PATCH 18/73] drm/i915/ringbuffer: Specialise SNB+ request emission for semaphores Chris Wilson
2016-08-01  9:10 ` [PATCH 19/73] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
2016-08-01  9:10 ` [PATCH 20/73] drm/i915: Refactor golden render state emission to unconfuse gcc Chris Wilson
2016-08-01  9:10 ` [PATCH 21/73] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
2016-08-01  9:10 ` [PATCH 22/73] drm/i915: Simplify calling engine->sync_to Chris Wilson
2016-08-01  9:10 ` [PATCH 23/73] drm/i915: Rename engine->semaphore.sync_to, engine->sempahore.signal locals Chris Wilson
2016-08-01  9:10 ` [PATCH 24/73] drm/i915: Amalgamate GGTT/ppGTT vma debug list walkers Chris Wilson
2016-08-01  9:10 ` [PATCH 25/73] drm/i915: Split early global GTT initialisation Chris Wilson
2016-08-01  9:10 ` [PATCH 26/73] drm/i915: Store owning file on the i915_address_space Chris Wilson
2016-08-01  9:10 ` [PATCH 27/73] drm/i915: Count how many VMA are bound for an object Chris Wilson
2016-08-01  9:10 ` [PATCH 28/73] drm/i915: Be more careful when unbinding vma Chris Wilson
2016-08-01  9:10 ` [PATCH 29/73] drm/i915: Kill drop_pages() Chris Wilson
2016-08-01  9:10 ` [PATCH 30/73] drm/i915: Introduce i915_gem_active for request tracking Chris Wilson
2016-08-01  9:10 ` [PATCH 31/73] drm/i915: Prepare i915_gem_active for annotations Chris Wilson
2016-08-01  9:10 ` [PATCH 32/73] drm/i915: Mark up i915_gem_active for locking annotation Chris Wilson
2016-08-01  9:10 ` [PATCH 33/73] drm/i915: Refactor blocking waits Chris Wilson
2016-08-01  9:10 ` [PATCH 34/73] drm/i915: Rename request->list to link for consistency Chris Wilson
2016-08-01  9:10 ` [PATCH 35/73] drm/i915: Remove obsolete i915_gem_object_flush_active() Chris Wilson
2016-08-01  9:10 ` [PATCH 36/73] drm/i915: Refactor activity tracking for requests Chris Wilson
2016-08-01 12:52   ` Joonas Lahtinen
2016-08-01  9:10 ` [PATCH 37/73] drm/i915: Track requests inside each intel_ring Chris Wilson
2016-08-01  9:10 ` [PATCH 38/73] drm/i915: Convert intel_overlay to request tracking Chris Wilson
2016-08-01  9:10 ` [PATCH 39/73] drm/i915: Move the special case wait-request handling to its one caller Chris Wilson
2016-08-01  9:10 ` [PATCH 40/73] drm/i915: Disable waitboosting for a saturated engine Chris Wilson
2016-08-01  9:10 ` [PATCH 41/73] drm/i915: s/__i915_wait_request/i915_wait_request/ Chris Wilson
2016-08-01  9:10 ` [PATCH 42/73] drm/i915: Double check activity before relocations Chris Wilson
2016-08-01  9:10 ` [PATCH 43/73] drm/i915: Move request list retirement to i915_gem_request.c Chris Wilson
2016-08-01  9:10 ` [PATCH 44/73] drm/i915: i915_vma_move_to_active prep patch Chris Wilson
2016-08-01  9:10 ` [PATCH 45/73] drm/i915: Track active vma requests Chris Wilson
2016-08-01  9:10 ` [PATCH 46/73] drm/i915: Release vma when the handle is closed Chris Wilson
2016-08-01 11:26   ` Joonas Lahtinen
2016-08-01  9:10 ` [PATCH 47/73] drm/i915: Mark the context and address space as closed Chris Wilson
2016-08-01  9:10 ` [PATCH 48/73] Revert "drm/i915: Clean up associated VMAs on context destruction" Chris Wilson
2016-08-01  9:10 ` [PATCH 49/73] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
2016-08-01  9:10 ` [PATCH 50/73] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something() Chris Wilson
2016-08-01  9:10 ` [PATCH 51/73] drm/i915: Double check the active status on the batch pool Chris Wilson
2016-08-01  9:11 ` [PATCH 52/73] drm/i915: Remove request retirement before each batch Chris Wilson
2016-08-01  9:11 ` [PATCH 53/73] drm/i915: Remove i915_gem_execbuffer_retire_commands() Chris Wilson
2016-08-01  9:11 ` [PATCH 54/73] drm/i915: Fix up vma alignment to be u64 Chris Wilson
2016-08-01 12:21   ` Joonas Lahtinen
2016-08-01  9:11 ` [PATCH 55/73] drm/i915: Pad GTT views of exec objects up to user specified size Chris Wilson
2016-08-01  9:11 ` [PATCH 56/73] drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check Chris Wilson
2016-08-01  9:11 ` [PATCH 57/73] drm/i915: Split insertion/binding of an object into the VM Chris Wilson
2016-08-01  9:11 ` [PATCH 58/73] drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations Chris Wilson
2016-08-01  9:11 ` [PATCH 59/73] drm/i915: Update the GGTT size/alignment query functions Chris Wilson
2016-08-01 12:27   ` Joonas Lahtinen
2016-08-01  9:11 ` [PATCH 60/73] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private Chris Wilson
2016-08-01 12:30   ` Joonas Lahtinen
2016-08-01  9:11 ` [PATCH 61/73] drm/i915: Record allocated vma size Chris Wilson
2016-08-01 12:36   ` Joonas Lahtinen
2016-08-01 12:44     ` Chris Wilson
2016-08-01  9:11 ` [PATCH 62/73] drm/i915: Wrap vma->pin_count accessors with small inline helpers Chris Wilson
2016-08-01  9:11 ` [PATCH 63/73] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
2016-08-01  9:11 ` [PATCH 64/73] drm/i915: Combine all i915_vma bitfields into a single set of flags Chris Wilson
2016-08-01  9:11 ` [PATCH 65/73] drm/i915: Make i915_vma_pin() small and inline Chris Wilson
2016-08-01  9:11 ` [PATCH 66/73] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin() Chris Wilson
2016-08-01  9:11 ` [PATCH 67/73] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
2016-08-01  9:11 ` [PATCH 68/73] drm/i915: Use atomics to manipulate obj->frontbuffer_bits Chris Wilson
2016-08-01  9:11 ` [PATCH 69/73] drm/i915: Use dev_priv consistently through the intel_frontbuffer interface Chris Wilson
2016-08-01  9:11 ` [PATCH 70/73] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
2016-08-01 12:46   ` Joonas Lahtinen
2016-08-01  9:11 ` [PATCH 71/73] drm/i915: Move i915_gem_object_wait_rendering() Chris Wilson
2016-08-01  9:11 ` [PATCH 72/73] drm/i915: Enable lockless lookup of request tracking via RCU Chris Wilson
2016-08-01  9:11 ` [PATCH 73/73] drm/i915: Export our request as a dma-buf fence on the reservation object Chris Wilson
2016-08-01 11:45 ` ✗ Ro.CI.BAT: failure for series starting with [01/73] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1470042681-25318-8-git-send-email-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.