All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 26/70] drm/i915: Map the execlists context regs once during pinning
Date: Tue,  7 Apr 2015 16:20:50 +0100	[thread overview]
Message-ID: <1428420094-18352-27-git-send-email-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <1428420094-18352-1-git-send-email-chris@chris-wilson.co.uk>

When we pin the execlists context on queuing, it the ideal time to map
the register page that we need to update when we submit the request to
the hardware (and keep it around for future requests).

This avoids having to do an atomic kmap on every submission. On the
other hand, it does depend upon correct request construction.

v2: Rebase

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c         |  10 --
 drivers/gpu/drm/i915/intel_lrc.c        | 189 ++++++++++++--------------------
 drivers/gpu/drm/i915/intel_lrc.h        |   2 -
 drivers/gpu/drm/i915/intel_ringbuffer.h |   1 +
 4 files changed, 73 insertions(+), 129 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f94fe2ba4f6f..071800553a43 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2589,13 +2589,6 @@ void i915_gem_request_free(struct kref *req_ref)
 	struct intel_context *ctx = req->ctx;
 
 	if (ctx) {
-		if (i915.enable_execlists) {
-			struct intel_engine_cs *ring = req->ring;
-
-			if (ctx != ring->default_context)
-				intel_lr_context_unpin(ring, ctx);
-		}
-
 		i915_gem_context_unreference(ctx);
 	}
 
@@ -2699,9 +2692,6 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 				execlist_link);
 		list_del(&submit_req->execlist_link);
 
-		if (submit_req->ctx != ring->default_context)
-			intel_lr_context_unpin(ring, submit_req->ctx);
-
 		i915_gem_request_unreference(submit_req);
 	}
 
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 8acfcf39e72d..4c985e186e3a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -203,9 +203,6 @@ enum {
 };
 #define GEN8_CTX_ID_SHIFT 32
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
-
 /**
  * intel_sanitize_enable_execlists() - sanitize i915.enable_execlists
  * @dev: DRM device.
@@ -318,47 +315,18 @@ static void execlists_elsp_write(struct intel_engine_cs *ring,
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
 }
 
-static int execlists_update_context(struct drm_i915_gem_object *ctx_obj,
-				    struct drm_i915_gem_object *ring_obj,
-				    u32 tail)
-{
-	struct page *page;
-	uint32_t *reg_state;
-
-	page = i915_gem_object_get_page(ctx_obj, 1);
-	reg_state = kmap_atomic(page);
-
-	reg_state[CTX_RING_TAIL+1] = tail;
-	reg_state[CTX_RING_BUFFER_START+1] = i915_gem_obj_ggtt_offset(ring_obj);
-
-	kunmap_atomic(reg_state);
-
-	return 0;
-}
-
 static void execlists_submit_contexts(struct intel_engine_cs *ring,
 				      struct intel_context *to0, u32 tail0,
 				      struct intel_context *to1, u32 tail1)
 {
 	struct drm_i915_gem_object *ctx_obj0 = to0->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf0 = to0->engine[ring->id].ringbuf;
 	struct drm_i915_gem_object *ctx_obj1 = NULL;
-	struct intel_ringbuffer *ringbuf1 = NULL;
-
-	BUG_ON(!ctx_obj0);
-	WARN_ON(!i915_gem_obj_is_pinned(ctx_obj0));
-	WARN_ON(!i915_gem_obj_is_pinned(ringbuf0->obj));
 
-	execlists_update_context(ctx_obj0, ringbuf0->obj, tail0);
+	to0->engine[ring->id].ringbuf->regs[CTX_RING_TAIL+1] = tail0;
 
 	if (to1) {
-		ringbuf1 = to1->engine[ring->id].ringbuf;
+		to1->engine[ring->id].ringbuf->regs[CTX_RING_TAIL+1] = tail1;
 		ctx_obj1 = to1->engine[ring->id].state;
-		BUG_ON(!ctx_obj1);
-		WARN_ON(!i915_gem_obj_is_pinned(ctx_obj1));
-		WARN_ON(!i915_gem_obj_is_pinned(ringbuf1->obj));
-
-		execlists_update_context(ctx_obj1, ringbuf1->obj, tail1);
 	}
 
 	execlists_elsp_write(ring, ctx_obj0, ctx_obj1);
@@ -500,29 +468,17 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
 				   struct drm_i915_gem_request *request)
 {
 	struct drm_i915_gem_request *cursor;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
 	int num_elements = 0;
 
-	if (to != ring->default_context)
-		intel_lr_context_pin(ring, to);
+	if (WARN_ON(request == NULL))
+		return -ENODEV;
+
+	if (WARN_ON(to->engine[ring->id].pin_count == 0))
+		return -ENODEV;
+
+	i915_gem_request_reference(request);
+	WARN_ON(to != request->ctx);
 
-	if (!request) {
-		/*
-		 * If there isn't a request associated with this submission,
-		 * create one as a temporary holder.
-		 */
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return -ENOMEM;
-		request->ring = ring;
-		request->ctx = to;
-		kref_init(&request->ref);
-		request->uniq = dev_priv->request_uniq++;
-		i915_gem_context_reference(request->ctx);
-	} else {
-		i915_gem_request_reference(request);
-		WARN_ON(to != request->ctx);
-	}
 	request->tail = tail;
 
 	spin_lock_irq(&ring->execlist_lock);
@@ -608,16 +564,47 @@ static int execlists_move_to_gpu(struct intel_ringbuffer *ringbuf,
 	return logical_ring_invalidate_all_caches(ringbuf, ctx);
 }
 
+static int intel_lr_context_pin(struct intel_engine_cs *ring,
+				struct intel_context *ctx)
+{
+	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+	int ret;
+
+	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+	if (ctx->engine[ring->id].pin_count++)
+		return 0;
+
+	ret = i915_gem_obj_ggtt_pin(ctx_obj, GEN8_LR_CONTEXT_ALIGN, 0);
+	if (ret)
+		goto reset_pin_count;
+
+	ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
+	if (ret)
+		goto unpin_ctx_obj;
+
+	ringbuf->regs = kmap(i915_gem_object_get_page(ctx_obj, 1));
+	ringbuf->regs[CTX_RING_BUFFER_START+1] =
+		i915_gem_obj_ggtt_offset(ringbuf->obj);
+
+	return 0;
+
+unpin_ctx_obj:
+	i915_gem_object_ggtt_unpin(ctx_obj);
+reset_pin_count:
+	ctx->engine[ring->id].pin_count = 0;
+
+	return ret;
+}
+
 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request,
 					    struct intel_context *ctx)
 {
 	int ret;
 
-	if (ctx != request->ring->default_context) {
-		ret = intel_lr_context_pin(request->ring, ctx);
-		if (ret)
-			return ret;
-	}
+	ret = intel_lr_context_pin(request->ring, ctx);
+	if (ret)
+		return ret;
 
 	request->ringbuf = ctx->engine[request->ring->id].ringbuf;
 	request->ctx     = ctx;
@@ -929,30 +916,42 @@ int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
 	return 0;
 }
 
+static void intel_lr_context_unpin(struct intel_engine_cs *ring,
+				   struct intel_context *ctx)
+{
+	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
+	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+
+	if (--ctx->engine[ring->id].pin_count)
+		return;
+
+	kunmap(i915_gem_object_get_page(ctx_obj, 1));
+	ringbuf->regs = NULL;
+
+	intel_unpin_ringbuffer_obj(ringbuf);
+	i915_gem_object_ggtt_unpin(ctx_obj);
+}
+
 void intel_execlists_retire_requests(struct intel_engine_cs *ring)
 {
-	struct drm_i915_gem_request *req, *tmp;
-	struct drm_i915_private *dev_priv = ring->dev->dev_private;
-	struct list_head retired_list;
+	struct list_head list;
 
 	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
 	if (list_empty(&ring->execlist_retired_req_list))
 		return;
 
-	INIT_LIST_HEAD(&retired_list);
 	spin_lock_irq(&ring->execlist_lock);
-	list_replace_init(&ring->execlist_retired_req_list, &retired_list);
+	list_replace_init(&ring->execlist_retired_req_list, &list);
 	spin_unlock_irq(&ring->execlist_lock);
 
-	list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
-		struct intel_context *ctx = req->ctx;
-		struct drm_i915_gem_object *ctx_obj =
-				ctx->engine[ring->id].state;
+	while (!list_empty(&list)) {
+		struct drm_i915_gem_request *rq;
+
+		rq = list_first_entry(&list, typeof(*rq), execlist_link);
+		list_del(&rq->execlist_link);
 
-		if (ctx_obj && (ctx != ring->default_context))
-			intel_lr_context_unpin(ring, ctx);
-		list_del(&req->execlist_link);
-		i915_gem_request_unreference(req);
+		intel_lr_context_unpin(ring, rq->ctx);
+		i915_gem_request_unreference(rq);
 	}
 }
 
@@ -995,50 +994,6 @@ int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf,
 	return 0;
 }
 
-static int intel_lr_context_pin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
-{
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
-	int ret = 0;
-
-	WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-	if (ctx->engine[ring->id].pin_count++ == 0) {
-		ret = i915_gem_obj_ggtt_pin(ctx_obj,
-				GEN8_LR_CONTEXT_ALIGN, 0);
-		if (ret)
-			goto reset_pin_count;
-
-		ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
-		if (ret)
-			goto unpin_ctx_obj;
-	}
-
-	return ret;
-
-unpin_ctx_obj:
-	i915_gem_object_ggtt_unpin(ctx_obj);
-reset_pin_count:
-	ctx->engine[ring->id].pin_count = 0;
-
-	return ret;
-}
-
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx)
-{
-	struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-	struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
-
-	if (ctx_obj) {
-		WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-		if (--ctx->engine[ring->id].pin_count == 0) {
-			intel_unpin_ringbuffer_obj(ringbuf);
-			i915_gem_object_ggtt_unpin(ctx_obj);
-		}
-	}
-}
-
 static int intel_logical_ring_workarounds_emit(struct intel_engine_cs *ring,
 					       struct intel_context *ctx)
 {
@@ -1967,7 +1922,7 @@ error_unpin_ctx:
 }
 
 void intel_lr_context_reset(struct drm_device *dev,
-			struct intel_context *ctx)
+			    struct intel_context *ctx)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_engine_cs *ring;
diff --git a/drivers/gpu/drm/i915/intel_lrc.h b/drivers/gpu/drm/i915/intel_lrc.h
index 04d3a6d8b207..b6fd4c2e8b6e 100644
--- a/drivers/gpu/drm/i915/intel_lrc.h
+++ b/drivers/gpu/drm/i915/intel_lrc.h
@@ -70,8 +70,6 @@ static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
 void intel_lr_context_free(struct intel_context *ctx);
 int intel_lr_context_deferred_create(struct intel_context *ctx,
 				     struct intel_engine_cs *ring);
-void intel_lr_context_unpin(struct intel_engine_cs *ring,
-		struct intel_context *ctx);
 void intel_lr_context_reset(struct drm_device *dev,
 			struct intel_context *ctx);
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 39f6dfc0ee54..0f0325e88b5a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -97,6 +97,7 @@ struct intel_ring_hangcheck {
 struct intel_ringbuffer {
 	struct drm_i915_gem_object *obj;
 	void __iomem *virtual_start;
+	uint32_t *regs;
 
 	struct intel_engine_cs *ring;
 
-- 
2.1.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2015-04-07 15:22 UTC|newest]

Thread overview: 113+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-04-07 15:20 Low hanging fruit take 2 Chris Wilson
2015-04-07 15:20 ` [PATCH 01/70] drm/i915: Cache last obj->pages location for i915_gem_object_get_page() Chris Wilson
2015-04-08 11:16   ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 02/70] drm/i915: Fix the flip synchronisation to consider mmioflips Chris Wilson
2015-04-07 15:20 ` [PATCH 03/70] drm/i915: Ensure cache flushes prior to doing CS flips Chris Wilson
2015-04-08 11:23   ` Daniel Vetter
2015-04-08 11:29     ` Chris Wilson
2015-04-07 15:20 ` [PATCH 04/70] drm/i915: Agressive downclocking on Baytrail Chris Wilson
2015-04-07 15:20 ` [PATCH 05/70] drm/i915: Fix computation of last_adjustment for RPS autotuning Chris Wilson
2015-04-07 15:20 ` [PATCH 06/70] drm/i915: Fix race on unreferencing the wrong mmio-flip-request Chris Wilson
2015-04-08 11:30   ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 07/70] drm/i915: Boost GPU frequency if we detect outstanding pageflips Chris Wilson
2015-04-08 11:31   ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 08/70] drm/i915: Deminish contribution of wait-boosting from clients Chris Wilson
2015-04-07 15:20 ` [PATCH 09/70] drm/i915: Re-enable RPS wait-boosting for all engines Chris Wilson
2015-04-07 15:20 ` [PATCH 10/70] drm/i915: Split i915_gem_batch_pool into its own header Chris Wilson
2015-04-07 15:20 ` [PATCH 11/70] drm/i915: Tidy batch pool logic Chris Wilson
2015-04-07 15:20 ` [PATCH 12/70] drm/i915: Split the batch pool by engine Chris Wilson
2015-04-07 15:20 ` [PATCH 13/70] drm/i915: Free batch pool when idle Chris Wilson
2015-04-07 15:20 ` [PATCH 14/70] drm/i915: Split batch pool into size buckets Chris Wilson
2015-04-07 15:20 ` [PATCH 15/70] drm/i915: Include active flag when describing objects in debugfs Chris Wilson
2015-04-08 11:33   ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 16/70] drm/i915: Suppress empty lines from debugfs/i915_gem_objects Chris Wilson
2015-04-08 11:34   ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 17/70] drm/i915: Optimistically spin for the request completion Chris Wilson
2015-04-08 11:39   ` Daniel Vetter
2015-04-08 13:43     ` Rantala, Valtteri
2015-04-08 14:15       ` Daniel Vetter
2015-04-13 11:34   ` Tvrtko Ursulin
2015-04-13 12:25     ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 18/70] drm/i915: Implement inter-engine read-read optimisations Chris Wilson
2015-04-14 13:51   ` Tvrtko Ursulin
2015-04-14 14:00     ` Chris Wilson
2015-04-07 15:20 ` [PATCH 19/70] drm/i915: Inline check required for object syncing prior to execbuf Chris Wilson
2015-04-07 15:20 ` [PATCH 20/70] drm/i915: Limit ring synchronisation (sw sempahores) RPS boosts Chris Wilson
2015-04-08 11:46   ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 21/70] drm/i915: Limit mmio flip " Chris Wilson
2015-04-07 15:20 ` [PATCH 22/70] drm/i915: Reduce frequency of unspecific HSW reg debugging Chris Wilson
2015-04-07 15:20 ` [PATCH 23/70] drm/i915: Record ring->start address in error state Chris Wilson
2015-04-08 11:47   ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 24/70] drm/i915: Use simpler form of spin_lock_irq(execlist_lock) Chris Wilson
2015-04-07 15:20 ` [PATCH 25/70] drm/i915: Use the global runtime-pm wakelock for a busy GPU for execlists Chris Wilson
2015-04-07 15:20 ` Chris Wilson [this message]
2015-04-07 15:20 ` [PATCH 27/70] drm/i915: Remove vestigal DRI1 ring quiescing code Chris Wilson
2015-04-09 15:02   ` Daniel Vetter
2015-04-09 15:24     ` Chris Wilson
2015-04-09 15:31       ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 28/70] drm/i915: Overhaul execlist submission Chris Wilson
2015-04-07 15:20 ` [PATCH 29/70] drm/i915: Move the execlists retirement to the right spot Chris Wilson
2015-04-07 15:20 ` [PATCH 30/70] drm/i915: Map the ringbuffer using WB on LLC machines Chris Wilson
2015-04-07 15:20 ` [PATCH 31/70] drm/i915: Refactor duplicate object vmap functions Chris Wilson
2015-04-07 15:20 ` [PATCH 32/70] drm/i915: Treat ringbuffer writes as write to normal memory Chris Wilson
2015-04-07 15:20 ` [PATCH 33/70] drm/i915: Use a separate slab for requests Chris Wilson
2015-05-22 14:48   ` Robert Beckett
2015-04-07 15:20 ` [PATCH 34/70] drm/i915: Use a separate slab for vmas Chris Wilson
2015-04-10  8:32   ` Daniel Vetter
2015-04-07 15:20 ` [PATCH 35/70] drm/i915: Use the new rq->i915 field where appropriate Chris Wilson
2015-04-07 15:21 ` [PATCH 36/70] drm/i915: Reduce the pointer dance of i915_is_ggtt() Chris Wilson
2015-04-07 15:21 ` [PATCH 37/70] drm/i915: Squash more pointer indirection for i915_is_gtt Chris Wilson
2015-04-07 15:21 ` [PATCH 38/70] drm/i915: Reduce locking in execlist command submission Chris Wilson
2015-04-07 15:21 ` [PATCH 39/70] drm/i915: Reduce more " Chris Wilson
2015-04-07 15:21 ` [PATCH 40/70] drm/i915: Reduce locking in gen8 IRQ handler Chris Wilson
2015-04-07 15:21 ` [PATCH 41/70] drm/i915: Tidy " Chris Wilson
2015-04-10  8:36   ` Daniel Vetter
2015-04-07 15:21 ` [PATCH 42/70] drm/i915: Remove request retirement before each batch Chris Wilson
2015-04-07 15:21 ` [PATCH 43/70] drm/i915: Cache the GGTT offset for the execlists context Chris Wilson
2015-04-07 15:21 ` [PATCH 44/70] drm/i915: Prefer to check for idleness in worker rather than sync-flush Chris Wilson
2015-04-10  8:37   ` Daniel Vetter
2015-04-07 15:21 ` [PATCH 45/70] drm/i915: Remove request->uniq Chris Wilson
2015-04-10  8:38   ` Daniel Vetter
2015-04-07 15:21 ` [PATCH 46/70] drm/i915: Cache the reset_counter for the request Chris Wilson
2015-04-07 15:21 ` [PATCH 47/70] drm/i915: Allocate context objects from stolen Chris Wilson
2015-04-10  8:39   ` Daniel Vetter
2015-04-07 15:21 ` [PATCH 48/70] drm/i915: Introduce an internal allocator for disposable private objects Chris Wilson
2015-04-07 15:21 ` [PATCH 49/70] drm/i915: Do not zero initialise page tables Chris Wilson
2015-04-07 15:21 ` [PATCH 50/70] drm/i915: The argument for postfix is redundant Chris Wilson
2015-04-10  8:53   ` Daniel Vetter
2015-04-10  9:00     ` Chris Wilson
2015-04-10  9:32       ` Daniel Vetter
2015-04-10  9:45         ` Chris Wilson
2015-04-07 15:21 ` [PATCH 51/70] drm/i915: Record the position of the start of the request Chris Wilson
2015-04-07 15:21 ` [PATCH 52/70] drm/i915: Cache the execlist ctx descriptor Chris Wilson
2015-04-07 15:21 ` [PATCH 53/70] drm/i915: Eliminate vmap overhead for cmd parser Chris Wilson
2015-04-07 15:21 ` [PATCH 54/70] drm/i915: Cache last cmd descriptor when parsing Chris Wilson
2015-04-07 15:21 ` [PATCH 55/70] drm/i915: Use WC copies on !llc platforms for the command parser Chris Wilson
2015-04-07 15:21 ` [PATCH 56/70] drm/i915: Cache kmap between relocations Chris Wilson
2015-04-07 15:21 ` [PATCH 57/70] drm/i915: intel_ring_initialized() must be simple and inline Chris Wilson
2015-12-08 15:02   ` [PATCH 0/1] " Dave Gordon
2015-12-08 15:02     ` [PATCH 1/1] " Dave Gordon
2015-12-10 10:24       ` Daniel Vetter
2015-04-07 15:21 ` [PATCH 58/70] drm/i915: Before shrink_all we only need to idle the GPU Chris Wilson
2015-04-07 15:21 ` [PATCH 59/70] drm/i915: Simplify object is-pinned checking for shrinker Chris Wilson
2015-04-07 16:28 ` Chris Wilson
2015-04-07 16:28   ` [PATCH 60/70] drm/i915: Make evict-everything more robust Chris Wilson
2015-04-07 16:28   ` [PATCH 61/70] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
2015-04-14 14:52     ` Tvrtko Ursulin
2015-04-14 15:05       ` Chris Wilson
2015-04-14 15:15         ` Tvrtko Ursulin
2015-04-07 16:28   ` [PATCH 62/70] drm/i915: Reduce locking inside busy ioctl Chris Wilson
2015-04-07 16:28   ` [PATCH 63/70] drm/i915: Reduce locking inside swfinish ioctl Chris Wilson
2015-04-10  9:14     ` Daniel Vetter
2015-04-15  9:03       ` Chris Wilson
2015-04-15  9:33         ` Daniel Vetter
2015-04-15  9:38           ` Chris Wilson
2015-04-07 16:28   ` [PATCH 64/70] drm/i915: Remove pinned check from madvise ioctl Chris Wilson
2015-04-07 16:28   ` [PATCH 65/70] drm/i915: Reduce locking for gen6+ GT interrupts Chris Wilson
2015-04-07 16:28   ` [PATCH 66/70] drm/i915: Remove obj->pin_mappable Chris Wilson
2015-04-13 11:35     ` Tvrtko Ursulin
2015-04-13 12:30       ` Daniel Vetter
2015-04-07 16:28   ` [PATCH 67/70] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
2015-04-07 16:28   ` [PATCH 68/70] drm/i915: Simplify vma-walker for i915_gem_objects Chris Wilson
2015-04-07 16:28   ` [PATCH 69/70] drm/i915: Skip holding an object reference for execbuf preparation Chris Wilson
2015-04-07 16:28   ` [PATCH 70/70] drm/i915: Use vma as the primary token for managing binding Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1428420094-18352-27-git-send-email-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.