intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* next tidbits - use LLC on SNB
@ 2011-03-22 13:51 Chris Wilson
  2011-03-22 13:51 ` [PATCH 01/15] drm/i915: Enable use of GPU semaphores to sync page-flips " Chris Wilson
                   ` (14 more replies)
  0 siblings, 15 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

I had some fun playing around with Eric's patch to enable use of LLC on
SandyBridge and built up a fair amount of complementary changes
-Chris

^ permalink raw reply	[flat|nested] 23+ messages in thread

* [PATCH 01/15] drm/i915: Enable use of GPU semaphores to sync page-flips on SNB
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 20:44   ` Keith Packard
  2011-03-22 13:51 ` [PATCH 02/15] drm/i915: Accurately track flushed domains Chris Wilson
                   ` (13 subsequent siblings)
  14 siblings, 1 reply; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h            |    9 ++++-
 drivers/gpu/drm/i915/i915_gem.c            |   52 ++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   43 +----------------------
 drivers/gpu/drm/i915/intel_display.c       |    6 +++-
 4 files changed, 55 insertions(+), 55 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index bcbcb53..8ebf946 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -925,6 +925,7 @@ enum intel_chip_family {
 #define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
 #define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
+#define HAS_GPU_SEMAPHORES(dev) (INTEL_INFO(dev)->gen >= 6 && i915_semaphores)
 
 #define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
@@ -1169,8 +1170,8 @@ int __must_check
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
 				  bool write);
 int __must_check
-i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
-				     struct intel_ring_buffer *pipelined);
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj);
+
 int i915_gem_attach_phys_object(struct drm_device *dev,
 				struct drm_i915_gem_object *obj,
 				int id,
@@ -1180,6 +1181,10 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
 void i915_gem_free_all_phys_object(struct drm_device *dev);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
+int __must_check
+i915_gem_object_move_to_ring(struct drm_i915_gem_object *obj,
+			     struct intel_ring_buffer *to);
+
 uint32_t
 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 45187e0..aa8a1b1 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1612,6 +1612,47 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 	obj->pages = NULL;
 }
 
+int
+i915_gem_object_move_to_ring(struct drm_i915_gem_object *obj,
+			     struct intel_ring_buffer *to)
+{
+	struct intel_ring_buffer *from = obj->ring;
+	u32 seqno;
+	int ret, idx;
+
+	if (from == NULL || to == from)
+		return 0;
+
+	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
+	if (to == NULL || !HAS_GPU_SEMAPHORES(obj->base.dev))
+		return i915_gem_object_wait_rendering(obj);
+
+	idx = intel_ring_sync_index(from, to);
+
+	seqno = obj->last_rendering_seqno;
+	if (seqno <= from->sync_seqno[idx])
+		return 0;
+
+	if (seqno == from->outstanding_lazy_request) {
+		struct drm_i915_gem_request *request;
+
+		request = kzalloc(sizeof(*request), GFP_KERNEL);
+		if (request == NULL)
+			return -ENOMEM;
+
+		ret = i915_add_request(from, NULL, request);
+		if (ret) {
+			kfree(request);
+			return ret;
+		}
+
+		seqno = request->seqno;
+	}
+
+	from->sync_seqno[idx] = seqno;
+	return intel_ring_sync(to, from, seqno - 1);
+}
+
 void
 i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 			       struct intel_ring_buffer *ring,
@@ -3007,8 +3048,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  * wait, as in modesetting process we're not supposed to be interrupted.
  */
 int
-i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
-				     struct intel_ring_buffer *pipelined)
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj)
 {
 	uint32_t old_read_domains;
 	int ret;
@@ -3021,14 +3061,6 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
-
-	/* Currently, we are always called from an non-interruptible context. */
-	if (pipelined != obj->ring) {
-		ret = i915_gem_object_wait_rendering(obj);
-		if (ret)
-			return ret;
-	}
-
 	i915_gem_object_flush_cpu_write_domain(obj);
 
 	old_read_domains = obj->base.read_domains;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f98a213..ae24eb9 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -739,47 +739,6 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
 }
 
 static int
-i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
-			       struct intel_ring_buffer *to)
-{
-	struct intel_ring_buffer *from = obj->ring;
-	u32 seqno;
-	int ret, idx;
-
-	if (from == NULL || to == from)
-		return 0;
-
-	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
-	if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
-		return i915_gem_object_wait_rendering(obj);
-
-	idx = intel_ring_sync_index(from, to);
-
-	seqno = obj->last_rendering_seqno;
-	if (seqno <= from->sync_seqno[idx])
-		return 0;
-
-	if (seqno == from->outstanding_lazy_request) {
-		struct drm_i915_gem_request *request;
-
-		request = kzalloc(sizeof(*request), GFP_KERNEL);
-		if (request == NULL)
-			return -ENOMEM;
-
-		ret = i915_add_request(from, NULL, request);
-		if (ret) {
-			kfree(request);
-			return ret;
-		}
-
-		seqno = request->seqno;
-	}
-
-	from->sync_seqno[idx] = seqno;
-	return intel_ring_sync(to, from, seqno - 1);
-}
-
-static int
 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
 {
 	u32 plane, flip_mask;
@@ -840,7 +799,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 	}
 
 	list_for_each_entry(obj, objects, exec_list) {
-		ret = i915_gem_execbuffer_sync_rings(obj, ring);
+		ret = i915_gem_object_move_to_ring(obj, ring);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 432fc04..3b0733d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2099,7 +2099,11 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
 	if (ret)
 		goto err_interruptible;
 
-	ret = i915_gem_object_set_to_display_plane(obj, pipelined);
+	ret = i915_gem_object_set_to_display_plane(obj);
+	if (ret)
+		goto err_unpin;
+
+	ret = i915_gem_object_move_to_ring(obj, pipelined);
 	if (ret)
 		goto err_unpin;
 
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 02/15] drm/i915: Accurately track flushed domains
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
  2011-03-22 13:51 ` [PATCH 01/15] drm/i915: Enable use of GPU semaphores to sync page-flips " Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 21:07   ` Keith Packard
  2011-03-22 13:51 ` [PATCH 03/15] drm/i915: Track last read/write seqno independently Chris Wilson
                   ` (12 subsequent siblings)
  14 siblings, 1 reply; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

---
 drivers/gpu/drm/i915/i915_drv.h            |   10 +-
 drivers/gpu/drm/i915/i915_gem.c            |  166 +++++++++++++++++-----------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   33 +-----
 drivers/gpu/drm/i915/intel_overlay.c       |    4 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    |   68 ++++--------
 drivers/gpu/drm/i915/intel_ringbuffer.h    |    7 +-
 6 files changed, 138 insertions(+), 150 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8ebf946..ed970bd 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1134,18 +1134,21 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 	return (int32_t)(seq1 - seq2) >= 0;
 }
 
+u32 __i915_gem_get_seqno(struct drm_device *dev);
+
 static inline u32
 i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = ring->dev->dev_private;
-	return ring->outstanding_lazy_request = dev_priv->next_seqno;
+	if (ring->outstanding_lazy_request == 0)
+		ring->outstanding_lazy_request = __i915_gem_get_seqno(ring->dev);
+	return ring->outstanding_lazy_request;
 }
 
 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
 					   struct intel_ring_buffer *pipelined);
 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
 
-void i915_gem_retire_requests(struct drm_device *dev);
+bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_reset(struct drm_device *dev);
 void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
 int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
@@ -1161,7 +1164,6 @@ void i915_gem_do_init(struct drm_device *dev,
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
 int __must_check i915_add_request(struct intel_ring_buffer *ring,
-				  struct drm_file *file,
 				  struct drm_i915_gem_request *request);
 int __must_check i915_wait_request(struct intel_ring_buffer *ring,
 				   uint32_t seqno);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aa8a1b1..d8a0f7b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1640,13 +1640,11 @@ i915_gem_object_move_to_ring(struct drm_i915_gem_object *obj,
 		if (request == NULL)
 			return -ENOMEM;
 
-		ret = i915_add_request(from, NULL, request);
+		ret = i915_add_request(from, request);
 		if (ret) {
 			kfree(request);
 			return ret;
 		}
-
-		seqno = request->seqno;
 	}
 
 	from->sync_seqno[idx] = seqno;
@@ -1759,11 +1757,12 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
 	return obj->madv == I915_MADV_DONTNEED;
 }
 
-static void
+static u32
 i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
 			       uint32_t flush_domains)
 {
 	struct drm_i915_gem_object *obj, *next;
+	u32 seqno = 0;
 
 	list_for_each_entry_safe(obj, next,
 				 &ring->gpu_write_list,
@@ -1771,21 +1770,37 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
 		if (obj->base.write_domain & flush_domains) {
 			uint32_t old_write_domain = obj->base.write_domain;
 
+			seqno = i915_gem_next_request_seqno(ring);
+
 			obj->base.write_domain = 0;
 			list_del_init(&obj->gpu_write_list);
-			i915_gem_object_move_to_active(obj, ring,
-						       i915_gem_next_request_seqno(ring));
+			i915_gem_object_move_to_active(obj, ring, seqno);
 
 			trace_i915_gem_object_change_domain(obj,
 							    obj->base.read_domains,
 							    old_write_domain);
 		}
 	}
+
+	return seqno;
+}
+
+u32 __i915_gem_get_seqno(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 seqno;
+
+	seqno = dev_priv->next_seqno;
+
+	/* reserve 0 for non-seqno */
+	if (++dev_priv->next_seqno == 0)
+		dev_priv->next_seqno = 1;
+
+	return seqno;
 }
 
 int
 i915_add_request(struct intel_ring_buffer *ring,
-		 struct drm_file *file,
 		 struct drm_i915_gem_request *request)
 {
 	drm_i915_private_t *dev_priv = ring->dev->dev_private;
@@ -1793,11 +1808,10 @@ i915_add_request(struct intel_ring_buffer *ring,
 	int was_empty;
 	int ret;
 
-	BUG_ON(request == NULL);
-
-	ret = ring->add_request(ring, &seqno);
+	seqno = ring->outstanding_lazy_request;
+	ret = ring->add_request(ring, seqno);
 	if (ret)
-	    return ret;
+		return ret;
 
 	trace_i915_gem_request_add(ring, seqno);
 
@@ -1807,17 +1821,18 @@ i915_add_request(struct intel_ring_buffer *ring,
 	was_empty = list_empty(&ring->request_list);
 	list_add_tail(&request->list, &ring->request_list);
 
-	if (file) {
-		struct drm_i915_file_private *file_priv = file->driver_priv;
+	if (ring->lazy_dispatch) {
+		struct drm_i915_file_private *file;
+
+		file = ring->lazy_dispatch->driver_priv;
 
-		spin_lock(&file_priv->mm.lock);
-		request->file_priv = file_priv;
-		list_add_tail(&request->client_list,
-			      &file_priv->mm.request_list);
-		spin_unlock(&file_priv->mm.lock);
+		spin_lock(&file->mm.lock);
+		request->file_priv = file;
+		list_add_tail(&request->client_list, &file->mm.request_list);
+		spin_unlock(&file->mm.lock);
 	}
 
-	ring->outstanding_lazy_request = false;
+	ring->outstanding_lazy_request = 0;
 
 	if (!dev_priv->mm.suspended) {
 		mod_timer(&dev_priv->hangcheck_timer,
@@ -1932,17 +1947,50 @@ void i915_gem_reset(struct drm_device *dev)
 	i915_gem_reset_fences(dev);
 }
 
+static bool
+i915_ring_outstanding_dispatch(struct intel_ring_buffer *ring)
+{
+	u32 last_request, last_dispatch;
+
+	if (!ring->outstanding_lazy_request)
+		return false;
+
+	if (list_empty(&ring->active_list))
+		return false;
+
+	if (list_empty(&ring->request_list))
+		return false;
+
+	last_request = list_entry(ring->request_list.prev,
+				  struct drm_i915_gem_request,
+				  list)->seqno;
+
+	last_dispatch = list_entry(ring->active_list.prev,
+				   struct drm_i915_gem_object,
+				   ring_list)->last_rendering_seqno;
+
+	return !i915_seqno_passed(last_request, last_dispatch);
+}
+
 /**
  * This function clears the request list as sequence numbers are passed.
  */
-static void
+static bool
 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
 	uint32_t seqno;
 	int i;
 
+	if (i915_ring_outstanding_dispatch(ring)) {
+		struct drm_i915_gem_request *request;
+
+		request = kzalloc(sizeof(*request), GFP_KERNEL);
+		if (request && i915_add_request(ring, request))
+			kfree(request);
+	}
+
 	if (list_empty(&ring->request_list))
-		return;
+		return false;
 
 	WARN_ON(i915_verify_lists(ring->dev));
 
@@ -1995,12 +2043,15 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 	}
 
 	WARN_ON(i915_verify_lists(ring->dev));
+
+	return !list_empty(&ring->active_list);
 }
 
-void
+bool
 i915_gem_retire_requests(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	bool active = false;
 	int i;
 
 	if (!list_empty(&dev_priv->mm.deferred_free_list)) {
@@ -2018,7 +2069,9 @@ i915_gem_retire_requests(struct drm_device *dev)
 	}
 
 	for (i = 0; i < I915_NUM_RINGS; i++)
-		i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+		active |= i915_gem_retire_requests_ring(&dev_priv->ring[i]);
+
+	return active;
 }
 
 static void
@@ -2026,7 +2079,6 @@ i915_gem_retire_work_handler(struct work_struct *work)
 {
 	drm_i915_private_t *dev_priv;
 	struct drm_device *dev;
-	bool idle;
 	int i;
 
 	dev_priv = container_of(work, drm_i915_private_t,
@@ -2039,31 +2091,20 @@ i915_gem_retire_work_handler(struct work_struct *work)
 		return;
 	}
 
-	i915_gem_retire_requests(dev);
-
 	/* Send a periodic flush down the ring so we don't hold onto GEM
 	 * objects indefinitely.
 	 */
-	idle = true;
 	for (i = 0; i < I915_NUM_RINGS; i++) {
 		struct intel_ring_buffer *ring = &dev_priv->ring[i];
+		int ret;
 
-		if (!list_empty(&ring->gpu_write_list)) {
-			struct drm_i915_gem_request *request;
-			int ret;
-
+		if (!list_empty(&ring->gpu_write_list))
 			ret = i915_gem_flush_ring(ring,
 						  0, I915_GEM_GPU_DOMAINS);
-			request = kzalloc(sizeof(*request), GFP_KERNEL);
-			if (ret || request == NULL ||
-			    i915_add_request(ring, NULL, request))
-			    kfree(request);
-		}
-
-		idle &= list_empty(&ring->request_list);
+		(void)ret;
 	}
 
-	if (!dev_priv->mm.suspended && !idle)
+	if (i915_gem_retire_requests(dev))
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 
 	mutex_unlock(&dev->struct_mutex);
@@ -2103,13 +2144,11 @@ i915_wait_request(struct intel_ring_buffer *ring,
 		if (request == NULL)
 			return -ENOMEM;
 
-		ret = i915_add_request(ring, NULL, request);
+		ret = i915_add_request(ring, request);
 		if (ret) {
 			kfree(request);
 			return ret;
 		}
-
-		seqno = request->seqno;
 	}
 
 	if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
@@ -2254,23 +2293,32 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 }
 
 int
-i915_gem_flush_ring(struct intel_ring_buffer *ring,
-		    uint32_t invalidate_domains,
-		    uint32_t flush_domains)
+i915_gem_flush_ring(struct intel_ring_buffer *ring, u32 invalidate, u32 flush)
 {
 	int ret;
 
-	if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0)
+	if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0)
 		return 0;
 
-	trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains);
+	trace_i915_gem_ring_flush(ring, invalidate, flush);
 
-	ret = ring->flush(ring, invalidate_domains, flush_domains);
+	ret = ring->flush(ring, &invalidate, &flush);
 	if (ret)
 		return ret;
 
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		i915_gem_process_flushing_list(ring, flush_domains);
+	if (flush & I915_GEM_GPU_DOMAINS &&
+	    i915_gem_process_flushing_list(ring, flush)) {
+		struct drm_i915_gem_request *request;
+
+		request = kzalloc(sizeof(*request), GFP_KERNEL);
+		if (request) {
+			ret = i915_add_request(ring, request);
+			if (ret) {
+				kfree(request);
+				return ret;
+			}
+		}
+	}
 
 	return 0;
 }
@@ -2283,8 +2331,7 @@ static int i915_ring_idle(struct intel_ring_buffer *ring)
 		return 0;
 
 	if (!list_empty(&ring->gpu_write_list)) {
-		ret = i915_gem_flush_ring(ring,
-				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+		ret = i915_gem_flush_ring(ring, 0, I915_GEM_GPU_DOMAINS);
 		if (ret)
 			return ret;
 	}
@@ -3501,22 +3548,9 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 		 * use this buffer rather sooner than later, so issuing the required
 		 * flush earlier is beneficial.
 		 */
-		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+		if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
 			ret = i915_gem_flush_ring(obj->ring,
 						  0, obj->base.write_domain);
-		} else if (obj->ring->outstanding_lazy_request ==
-			   obj->last_rendering_seqno) {
-			struct drm_i915_gem_request *request;
-
-			/* This ring is not being cleared by active usage,
-			 * so emit a request to do so.
-			 */
-			request = kzalloc(sizeof(*request), GFP_KERNEL);
-			if (request)
-				ret = i915_add_request(obj->ring, NULL,request);
-			else
-				ret = -ENOMEM;
-		}
 
 		/* Update the active list for the hardware's current position.
 		 * Otherwise this only updates on a delayed timer or when irqs
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index ae24eb9..60aaf99 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -873,36 +873,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
 	}
 }
 
-static void
-i915_gem_execbuffer_retire_commands(struct drm_device *dev,
-				    struct drm_file *file,
-				    struct intel_ring_buffer *ring)
-{
-	struct drm_i915_gem_request *request;
-	u32 invalidate;
-
-	/*
-	 * Ensure that the commands in the batch buffer are
-	 * finished before the interrupt fires.
-	 *
-	 * The sampler always gets flushed on i965 (sigh).
-	 */
-	invalidate = I915_GEM_DOMAIN_COMMAND;
-	if (INTEL_INFO(dev)->gen >= 4)
-		invalidate |= I915_GEM_DOMAIN_SAMPLER;
-	if (ring->flush(ring, invalidate, 0)) {
-		i915_gem_next_request_seqno(ring);
-		return;
-	}
-
-	/* Add a breadcrumb for the completion of the batch buffer */
-	request = kzalloc(sizeof(*request), GFP_KERNEL);
-	if (request == NULL || i915_add_request(ring, file, request)) {
-		i915_gem_next_request_seqno(ring);
-		kfree(request);
-	}
-}
-
 static int
 i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		       struct drm_file *file,
@@ -1089,6 +1059,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto err;
 	}
 	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+	batch_obj->base.pending_write_domain = I915_GEM_DOMAIN_COMMAND;
 
 	ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
 	if (ret)
@@ -1132,7 +1103,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	}
 
 	i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
-	i915_gem_execbuffer_retire_commands(dev, file, ring);
+	ring->lazy_dispatch = file;
 
 err:
 	eb_destroy(eb);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a670c00..25edf0e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -220,7 +220,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 	int ret;
 
 	BUG_ON(overlay->last_flip_req);
-	ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+	ret = i915_add_request(LP_RING(dev_priv), request);
 	if (ret) {
 	    kfree(request);
 	    return ret;
@@ -361,7 +361,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	OUT_RING(flip_addr);
         ADVANCE_LP_RING();
 
-	ret = i915_add_request(LP_RING(dev_priv), NULL, request);
+	ret = i915_add_request(LP_RING(dev_priv), request);
 	if (ret) {
 		kfree(request);
 		return ret;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e9e6f71..cd34e43 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -42,24 +42,9 @@ static inline int ring_space(struct intel_ring_buffer *ring)
 	return space;
 }
 
-static u32 i915_gem_get_seqno(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 seqno;
-
-	seqno = dev_priv->next_seqno;
-
-	/* reserve 0 for non-seqno */
-	if (++dev_priv->next_seqno == 0)
-		dev_priv->next_seqno = 1;
-
-	return seqno;
-}
-
 static int
 render_ring_flush(struct intel_ring_buffer *ring,
-		  u32	invalidate_domains,
-		  u32	flush_domains)
+		  u32 *invalidate, u32 *flush)
 {
 	struct drm_device *dev = ring->dev;
 	u32 cmd;
@@ -94,21 +79,23 @@ render_ring_flush(struct intel_ring_buffer *ring,
 	 */
 
 	cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
-	if ((invalidate_domains|flush_domains) &
-	    I915_GEM_DOMAIN_RENDER)
+	if ((*invalidate | *flush) & I915_GEM_DOMAIN_RENDER) {
 		cmd &= ~MI_NO_WRITE_FLUSH;
+		*flush |= I915_GEM_DOMAIN_RENDER;
+	}
+
 	if (INTEL_INFO(dev)->gen < 4) {
 		/*
 		 * On the 965, the sampler cache always gets flushed
 		 * and this bit is reserved.
 		 */
-		if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+		if (*invalidate & I915_GEM_DOMAIN_SAMPLER)
 			cmd |= MI_READ_FLUSH;
 	}
-	if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+	if (*invalidate & I915_GEM_DOMAIN_INSTRUCTION)
 		cmd |= MI_EXE_FLUSH;
 
-	if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+	if (*invalidate & I915_GEM_DOMAIN_COMMAND &&
 	    (IS_G4X(dev) || IS_GEN5(dev)))
 		cmd |= MI_INVALIDATE_ISP;
 
@@ -120,6 +107,7 @@ render_ring_flush(struct intel_ring_buffer *ring,
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
 
+	*flush |= I915_GEM_DOMAIN_COMMAND;
 	return 0;
 }
 
@@ -336,16 +324,14 @@ update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
 
 static int
 gen6_add_request(struct intel_ring_buffer *ring,
-		 u32 *result)
+		 u32 seqno)
 {
-	u32 seqno;
 	int ret;
 
 	ret = intel_ring_begin(ring, 10);
 	if (ret)
 		return ret;
 
-	seqno = i915_gem_get_seqno(ring->dev);
 	update_semaphore(ring, 0, seqno);
 	update_semaphore(ring, 1, seqno);
 
@@ -355,7 +341,6 @@ gen6_add_request(struct intel_ring_buffer *ring,
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_advance(ring);
 
-	*result = seqno;
 	return 0;
 }
 
@@ -394,10 +379,8 @@ do {									\
 
 static int
 pc_render_add_request(struct intel_ring_buffer *ring,
-		      u32 *result)
+		      u32 seqno)
 {
-	struct drm_device *dev = ring->dev;
-	u32 seqno = i915_gem_get_seqno(dev);
 	struct pipe_control *pc = ring->private;
 	u32 scratch_addr = pc->gtt_offset + 128;
 	int ret;
@@ -438,16 +421,13 @@ pc_render_add_request(struct intel_ring_buffer *ring,
 	intel_ring_emit(ring, 0);
 	intel_ring_advance(ring);
 
-	*result = seqno;
 	return 0;
 }
 
 static int
 render_ring_add_request(struct intel_ring_buffer *ring,
-			u32 *result)
+			u32 seqno)
 {
-	struct drm_device *dev = ring->dev;
-	u32 seqno = i915_gem_get_seqno(dev);
 	int ret;
 
 	ret = intel_ring_begin(ring, 4);
@@ -460,7 +440,6 @@ render_ring_add_request(struct intel_ring_buffer *ring,
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_advance(ring);
 
-	*result = seqno;
 	return 0;
 }
 
@@ -561,8 +540,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
 
 static int
 bsd_ring_flush(struct intel_ring_buffer *ring,
-	       u32     invalidate_domains,
-	       u32     flush_domains)
+	       u32 *invalidate, u32 *flush)
 {
 	int ret;
 
@@ -573,29 +551,27 @@ bsd_ring_flush(struct intel_ring_buffer *ring,
 	intel_ring_emit(ring, MI_FLUSH);
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
+
+	*flush |= I915_GEM_GPU_DOMAINS;
 	return 0;
 }
 
 static int
 ring_add_request(struct intel_ring_buffer *ring,
-		 u32 *result)
+		 u32 seqno)
 {
-	u32 seqno;
 	int ret;
 
 	ret = intel_ring_begin(ring, 4);
 	if (ret)
 		return ret;
 
-	seqno = i915_gem_get_seqno(ring->dev);
-
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
 	intel_ring_emit(ring, seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_advance(ring);
 
-	*result = seqno;
 	return 0;
 }
 
@@ -1046,7 +1022,7 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
 }
 
 static int gen6_ring_flush(struct intel_ring_buffer *ring,
-			   u32 invalidate, u32 flush)
+			   u32 *invalidate, u32 *flush)
 {
 	uint32_t cmd;
 	int ret;
@@ -1056,13 +1032,15 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (invalidate & I915_GEM_GPU_DOMAINS)
+	if (*invalidate & I915_GEM_GPU_DOMAINS)
 		cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
 	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
+
+	*flush |= I915_GEM_GPU_DOMAINS;
 	return 0;
 }
 
@@ -1217,7 +1195,7 @@ static int blt_ring_begin(struct intel_ring_buffer *ring,
 }
 
 static int blt_ring_flush(struct intel_ring_buffer *ring,
-			  u32 invalidate, u32 flush)
+			  u32 *invalidate, u32 *flush)
 {
 	uint32_t cmd;
 	int ret;
@@ -1227,13 +1205,15 @@ static int blt_ring_flush(struct intel_ring_buffer *ring,
 		return ret;
 
 	cmd = MI_FLUSH_DW;
-	if (invalidate & I915_GEM_DOMAIN_RENDER)
+	if (*invalidate & I915_GEM_DOMAIN_RENDER)
 		cmd |= MI_INVALIDATE_TLB;
 	intel_ring_emit(ring, cmd);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, 0);
 	intel_ring_emit(ring, MI_NOOP);
 	intel_ring_advance(ring);
+
+	*flush |= I915_GEM_GPU_DOMAINS;
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index f23cc5f..0241f7f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -70,10 +70,10 @@ struct  intel_ring_buffer {
 	void		(*write_tail)(struct intel_ring_buffer *ring,
 				      u32 value);
 	int __must_check (*flush)(struct intel_ring_buffer *ring,
-				  u32	invalidate_domains,
-				  u32	flush_domains);
+				  u32	*invalidate,
+				  u32	*flush);
 	int		(*add_request)(struct intel_ring_buffer *ring,
-				       u32 *seqno);
+				       u32 seqno);
 	u32		(*get_seqno)(struct intel_ring_buffer *ring);
 	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
 					       u32 offset, u32 length);
@@ -110,6 +110,7 @@ struct  intel_ring_buffer {
 	 * Do we have some not yet emitted requests outstanding?
 	 */
 	u32 outstanding_lazy_request;
+	struct drm_file *lazy_dispatch;
 
 	wait_queue_head_t irq_queue;
 	drm_local_map_t map;
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 03/15] drm/i915: Track last read/write seqno independently
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
  2011-03-22 13:51 ` [PATCH 01/15] drm/i915: Enable use of GPU semaphores to sync page-flips " Chris Wilson
  2011-03-22 13:51 ` [PATCH 02/15] drm/i915: Accurately track flushed domains Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-23 17:16   ` Eric Anholt
  2011-03-22 13:51 ` [PATCH 04/15] drm/i915: Rename agp_type to cache_level Chris Wilson
                   ` (11 subsequent siblings)
  14 siblings, 1 reply; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |    5 ++-
 drivers/gpu/drm/i915/i915_drv.h            |   17 +++------
 drivers/gpu/drm/i915/i915_gem.c            |   55 +++++++++++++++++++---------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |    1 -
 drivers/gpu/drm/i915/i915_irq.c            |    4 +-
 5 files changed, 48 insertions(+), 34 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 87c8e29..2aedceb 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -118,14 +118,15 @@ static const char *agp_type_str(int type)
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-	seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
+	seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d %d%s%s%s",
 		   &obj->base,
 		   get_pin_flag(obj),
 		   get_tiling_flag(obj),
 		   obj->base.size,
 		   obj->base.read_domains,
 		   obj->base.write_domain,
-		   obj->last_rendering_seqno,
+		   obj->last_read_seqno,
+		   obj->last_write_seqno,
 		   obj->last_fenced_seqno,
 		   agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
 		   obj->dirty ? " dirty" : "",
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ed970bd..967a599 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -554,7 +554,7 @@ typedef struct drm_i915_private {
 		 * List of objects currently involved in rendering.
 		 *
 		 * Includes buffers having the contents of their GPU caches
-		 * flushed, not necessarily primitives.  last_rendering_seqno
+		 * flushed, not necessarily primitives.  last_read_seqno
 		 * represents when the rendering involved will be completed.
 		 *
 		 * A reference is held on the buffer while on this list.
@@ -566,7 +566,7 @@ typedef struct drm_i915_private {
 		 * still have a write_domain which needs to be flushed before
 		 * unbinding.
 		 *
-		 * last_rendering_seqno is 0 while an object is in this list.
+		 * last_read_seqno is 0 while an object is in this list.
 		 *
 		 * A reference is held on the buffer while on this list.
 		 */
@@ -576,7 +576,7 @@ typedef struct drm_i915_private {
 		 * LRU list of objects which are not in the ringbuffer and
 		 * are ready to unbind, but are still in the GTT.
 		 *
-		 * last_rendering_seqno is 0 while an object is in this list.
+		 * All seqno are 0 while an object is in this list.
 		 *
 		 * A reference is not held on the buffer while on this list,
 		 * as merely being GTT-bound shouldn't prevent its being
@@ -734,12 +734,6 @@ struct drm_i915_gem_object {
 	unsigned int dirty : 1;
 
 	/**
-	 * This is set if the object has been written to since the last
-	 * GPU flush.
-	 */
-	unsigned int pending_gpu_write : 1;
-
-	/**
 	 * Fence register bits (if any) for this object.  Will be set
 	 * as needed when mapped into the GTT.
 	 * Protected by dev->struct_mutex.
@@ -814,7 +808,8 @@ struct drm_i915_gem_object {
 	uint32_t gtt_offset;
 
 	/** Breadcrumb of last rendering to the buffer. */
-	uint32_t last_rendering_seqno;
+	uint32_t last_read_seqno;
+	uint32_t last_write_seqno;
 	struct intel_ring_buffer *ring;
 
 	/** Breadcrumb of last fenced GPU access to the buffer. */
@@ -859,7 +854,7 @@ struct drm_i915_gem_object {
  * and may be associated with active buffers to be retired.
  *
  * By keeping this list, we can avoid having to do questionable
- * sequence-number comparisons on buffer last_rendering_seqnos, and associate
+ * sequence-number comparisons on buffer last_read_seqnos, and associate
  * an emission time with seqnos for tracking how far ahead of the GPU we are.
  */
 struct drm_i915_gem_request {
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d8a0f7b..f63d33c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1623,13 +1623,17 @@ i915_gem_object_move_to_ring(struct drm_i915_gem_object *obj,
 	if (from == NULL || to == from)
 		return 0;
 
-	/* XXX gpu semaphores are implicated in various hard hangs on SNB */
+	ret = i915_gem_object_flush_gpu_write_domain(obj);
+	if (ret)
+		return ret;
+
 	if (to == NULL || !HAS_GPU_SEMAPHORES(obj->base.dev))
-		return i915_gem_object_wait_rendering(obj);
+		return i915_wait_request(obj->ring,
+					 obj->last_read_seqno);
 
 	idx = intel_ring_sync_index(from, to);
 
-	seqno = obj->last_rendering_seqno;
+	seqno = obj->last_read_seqno;
 	if (seqno <= from->sync_seqno[idx])
 		return 0;
 
@@ -1672,7 +1676,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
 	list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
 	list_move_tail(&obj->ring_list, &ring->active_list);
 
-	obj->last_rendering_seqno = seqno;
+	obj->last_read_seqno = seqno;
 	if (obj->fenced_gpu_access) {
 		struct drm_i915_fence_reg *reg;
 
@@ -1689,7 +1693,7 @@ static void
 i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
 {
 	list_del_init(&obj->ring_list);
-	obj->last_rendering_seqno = 0;
+	obj->last_read_seqno = 0;
 }
 
 static void
@@ -1722,10 +1726,10 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 	i915_gem_object_move_off_active(obj);
 	obj->fenced_gpu_access = false;
 
+	obj->last_write_seqno = 0;
 	obj->last_fenced_seqno = 0;
 
 	obj->active = 0;
-	obj->pending_gpu_write = false;
 	drm_gem_object_unreference(&obj->base);
 
 	WARN_ON(i915_verify_lists(dev));
@@ -1773,6 +1777,7 @@ i915_gem_process_flushing_list(struct intel_ring_buffer *ring,
 			seqno = i915_gem_next_request_seqno(ring);
 
 			obj->base.write_domain = 0;
+			obj->last_write_seqno = seqno;
 			list_del_init(&obj->gpu_write_list);
 			i915_gem_object_move_to_active(obj, ring, seqno);
 
@@ -1967,7 +1972,7 @@ i915_ring_outstanding_dispatch(struct intel_ring_buffer *ring)
 
 	last_dispatch = list_entry(ring->active_list.prev,
 				   struct drm_i915_gem_object,
-				   ring_list)->last_rendering_seqno;
+				   ring_list)->last_read_seqno;
 
 	return !i915_seqno_passed(last_request, last_dispatch);
 }
@@ -2027,7 +2032,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 				      struct drm_i915_gem_object,
 				      ring_list);
 
-		if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
+		if (!i915_seqno_passed(seqno, obj->last_read_seqno))
 			break;
 
 		if (obj->base.write_domain != 0)
@@ -2222,7 +2227,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
 	 * it.
 	 */
 	if (obj->active) {
-		ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
+		ret = i915_wait_request(obj->ring, obj->last_read_seqno);
 		if (ret)
 			return ret;
 	}
@@ -2565,7 +2570,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
 			return ret;
 
 		/* Since last_fence_seqno can retire much earlier than
-		 * last_rendering_seqno, we track that here for efficiency.
+		 * last_read_seqno, we track that here for efficiency.
 		 * (With a catch-all in move_to_inactive() to prevent very
 		 * old seqno from lying around.)
 		 */
@@ -3048,6 +3053,7 @@ int
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	uint32_t old_write_domain, old_read_domains;
+	u32 seqno;
 	int ret;
 
 	/* Not valid to be called on unbound objects. */
@@ -3061,10 +3067,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	if (ret)
 		return ret;
 
-	if (obj->pending_gpu_write || write) {
-		ret = i915_gem_object_wait_rendering(obj);
+	seqno = write ? obj->last_read_seqno : obj->last_write_seqno;
+	if (seqno) {
+		ret = i915_wait_request(obj->ring, seqno);
 		if (ret)
 			return ret;
+
+		obj->last_write_seqno = 0;
 	}
 
 	i915_gem_object_flush_cpu_write_domain(obj);
@@ -3147,6 +3156,7 @@ static int
 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 {
 	uint32_t old_write_domain, old_read_domains;
+	u32 seqno;
 	int ret;
 
 	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
@@ -3156,9 +3166,14 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 	if (ret)
 		return ret;
 
-	ret = i915_gem_object_wait_rendering(obj);
-	if (ret)
-		return ret;
+	seqno = write ? obj->last_read_seqno : obj->last_write_seqno;
+	if (seqno) {
+		ret = i915_wait_request(obj->ring, seqno);
+		if (ret)
+			return ret;
+
+		obj->last_write_seqno = 0;
+	}
 
 	i915_gem_object_flush_gtt_write_domain(obj);
 
@@ -3254,9 +3269,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
 	if (ret)
 		return ret;
 
-	ret = i915_gem_object_wait_rendering(obj);
-	if (ret)
-		return ret;
+	if (obj->last_write_seqno) {
+		ret = i915_wait_request(obj->ring, obj->last_write_seqno);
+		if (ret)
+			return ret;
+
+		obj->last_write_seqno = 0;
+	}
 
 	i915_gem_object_flush_gtt_write_domain(obj);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 60aaf99..3c54911 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -863,7 +863,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
 		i915_gem_object_move_to_active(obj, ring, seqno);
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
-			obj->pending_gpu_write = true;
 			list_move_tail(&obj->gpu_write_list,
 				       &ring->gpu_write_list);
 			intel_mark_busy(ring->dev, obj);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 188b497..c1e4368 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -662,7 +662,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
 	list_for_each_entry(obj, head, mm_list) {
 		err->size = obj->base.size;
 		err->name = obj->base.name;
-		err->seqno = obj->last_rendering_seqno;
+		err->seqno = obj->last_read_seqno;
 		err->gtt_offset = obj->gtt_offset;
 		err->read_domains = obj->base.read_domains;
 		err->write_domain = obj->base.write_domain;
@@ -731,7 +731,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
 		if (obj->ring != ring)
 			continue;
 
-		if (i915_seqno_passed(seqno, obj->last_rendering_seqno))
+		if (i915_seqno_passed(seqno, obj->last_read_seqno))
 			continue;
 
 		if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 04/15] drm/i915: Rename agp_type to cache_level
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (2 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 03/15] drm/i915: Track last read/write seqno independently Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:51 ` [PATCH 05/15] drm/i915: Mark the cursor and the overlay as being part of the display planes Chris Wilson
                   ` (10 subsequent siblings)
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

... to clarify just how we use it inside the driver. We still need to
translate through agp_type for interface into the fake AGP driver.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_debugfs.c     |   11 ++++++-----
 drivers/gpu/drm/i915/i915_drv.h         |   12 +++++++++---
 drivers/gpu/drm/i915/i915_gem.c         |    2 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c     |   22 ++++++++++++++++------
 drivers/gpu/drm/i915/i915_irq.c         |    2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c |    4 ++--
 6 files changed, 35 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 2aedceb..a635df0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -106,11 +106,12 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
     }
 }
 
-static const char *agp_type_str(int type)
+static const char *cache_level_str(int type)
 {
 	switch (type) {
-	case 0: return " uncached";
-	case 1: return " snooped";
+	case I915_CACHE_NONE: return " uncached";
+	case I915_CACHE_LLC: return " snooped (LLC)";
+	case I915_CACHE_LLC_MLC: return " snooped (LLC+MLC)";
 	default: return "";
 	}
 }
@@ -128,7 +129,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->last_read_seqno,
 		   obj->last_write_seqno,
 		   obj->last_fenced_seqno,
-		   agp_type_str(obj->agp_type == AGP_USER_CACHED_MEMORY),
+		   cache_level_str(obj->cache_level),
 		   obj->dirty ? " dirty" : "",
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
@@ -715,7 +716,7 @@ static void print_error_buffers(struct seq_file *m,
 			   dirty_flag(err->dirty),
 			   purgeable_flag(err->purgeable),
 			   ring_str(err->ring),
-			   agp_type_str(err->agp_type));
+			   cache_level_str(err->cache_level));
 
 		if (err->name)
 			seq_printf(m, " (name: %d)", err->name);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 967a599..acc0a86 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -189,7 +189,7 @@ struct drm_i915_error_state {
 		u32 dirty:1;
 		u32 purgeable:1;
 		u32 ring:4;
-		u32 agp_type:1;
+		u32 cache_level:2;
 	} *active_bo, *pinned_bo;
 	u32 active_bo_count, pinned_bo_count;
 	struct intel_overlay_error_state *overlay;
@@ -705,6 +705,12 @@ typedef struct drm_i915_private {
 	struct drm_property *broadcast_rgb_property;
 } drm_i915_private_t;
 
+enum i915_cache_level {
+	I915_CACHE_NONE,
+	I915_CACHE_LLC,
+	I915_CACHE_LLC_MLC, /* gen6+ */
+};
+
 struct drm_i915_gem_object {
 	struct drm_gem_object base;
 
@@ -785,6 +791,8 @@ struct drm_i915_gem_object {
 	unsigned int pending_fenced_gpu_access:1;
 	unsigned int fenced_gpu_access:1;
 
+	unsigned int cache_level:2;
+
 	struct page **pages;
 
 	/**
@@ -821,8 +829,6 @@ struct drm_i915_gem_object {
 	/** Record of address bit 17 of each page at last unbind. */
 	unsigned long *bit_17;
 
-	/** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */
-	uint32_t agp_type;
 
 	/**
 	 * If present, while GEM_DOMAIN_CPU is in the read domain this array
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f63d33c..937432e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3662,7 +3662,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 
-	obj->agp_type = AGP_USER_MEMORY;
+	obj->cache_level = I915_CACHE_NONE;
 	obj->base.driver_private = NULL;
 	obj->fence_reg = I915_FENCE_REG_NONE;
 	INIT_LIST_HEAD(&obj->mm_list);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b0abdc6..af3c0e6 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -29,6 +29,17 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+/* XXX kill agp_type! */
+static uint32_t cache_level_to_agp_type(enum i915_cache_level cache_level)
+{
+	switch (cache_level) {
+	default:
+	case I915_CACHE_NONE: return AGP_USER_MEMORY;
+	case I915_CACHE_LLC: return AGP_USER_CACHED_MEMORY;
+	case I915_CACHE_LLC_MLC: return AGP_USER_CACHED_MEMORY_LLC_MLC;
+	}
+}
+
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -46,15 +57,14 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 
 			intel_gtt_insert_sg_entries(obj->sg_list,
 						    obj->num_sg,
-						    obj->gtt_space->start
-							>> PAGE_SHIFT,
-						    obj->agp_type);
+						    obj->gtt_space->start >> PAGE_SHIFT,
+						    cache_level_to_agp_type(obj->cache_level));
 		} else
 			intel_gtt_insert_pages(obj->gtt_space->start
 						   >> PAGE_SHIFT,
 					       obj->base.size >> PAGE_SHIFT,
 					       obj->pages,
-					       obj->agp_type);
+					       cache_level_to_agp_type(obj->cache_level));
 	}
 
 	intel_gtt_chipset_flush();
@@ -77,12 +87,12 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
 		intel_gtt_insert_sg_entries(obj->sg_list,
 					    obj->num_sg,
 					    obj->gtt_space->start >> PAGE_SHIFT,
-					    obj->agp_type);
+					    cache_level_to_agp_type(obj->cache_level));
 	} else
 		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
 				       obj->base.size >> PAGE_SHIFT,
 				       obj->pages,
-				       obj->agp_type);
+				       cache_level_to_agp_type(obj->cache_level));
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index c1e4368..a35ec39 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -676,7 +676,7 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
 		err->dirty = obj->dirty;
 		err->purgeable = obj->madv != I915_MADV_WILLNEED;
 		err->ring = obj->ring ? obj->ring->id : 0;
-		err->agp_type = obj->agp_type == AGP_USER_CACHED_MEMORY;
+		err->cache_level = obj->cache_level;
 
 		if (++i == count)
 			break;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index cd34e43..783b8b0 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -224,7 +224,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
 		ret = -ENOMEM;
 		goto err;
 	}
-	obj->agp_type = AGP_USER_CACHED_MEMORY;
+	obj->cache_level = I915_CACHE_LLC;
 
 	ret = i915_gem_object_pin(obj, 4096, true);
 	if (ret)
@@ -735,7 +735,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
 		ret = -ENOMEM;
 		goto err;
 	}
-	obj->agp_type = AGP_USER_CACHED_MEMORY;
+	obj->cache_level = I915_CACHE_LLC;
 
 	ret = i915_gem_object_pin(obj, 4096, true);
 	if (ret != 0) {
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 05/15] drm/i915: Mark the cursor and the overlay as being part of the display planes
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (3 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 04/15] drm/i915: Rename agp_type to cache_level Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:51 ` [PATCH 06/15] drm/i915: Add an interface to dynamically change the cache level Chris Wilson
                   ` (9 subsequent siblings)
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_display.c |    2 +-
 drivers/gpu/drm/i915/intel_overlay.c |    2 +-
 2 files changed, 2 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b0733d..f96620d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5359,7 +5359,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 			goto fail_locked;
 		}
 
-		ret = i915_gem_object_set_to_gtt_domain(obj, 0);
+		ret = i915_gem_object_set_to_display_plane(obj);
 		if (ret) {
 			DRM_ERROR("failed to move cursor bo into the GTT\n");
 			goto fail_unpin;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 25edf0e..f0dbdc4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -777,7 +777,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
+	ret = i915_gem_object_set_to_display_plane(new_bo);
 	if (ret != 0)
 		goto out_unpin;
 
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 06/15] drm/i915: Add an interface to dynamically change the cache level
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (4 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 05/15] drm/i915: Mark the cursor and the overlay as being part of the display planes Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:51 ` [PATCH 07/15] drm/i915: Do not clflush snooped objects Chris Wilson
                   ` (8 subsequent siblings)
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_drv.h         |    6 +++++-
 drivers/gpu/drm/i915/i915_gem.c         |   24 +++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem_gtt.c     |    8 +++++---
 drivers/gpu/drm/i915/intel_ringbuffer.c |    6 ++++--
 4 files changed, 37 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index acc0a86..3e187d6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1191,9 +1191,13 @@ i915_gem_object_move_to_ring(struct drm_i915_gem_object *obj,
 uint32_t
 i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj);
 
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+				    enum i915_cache_level cache_level);
+
 /* i915_gem_gtt.c */
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+					  enum i915_cache_level cache_level);
 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 
 /* i915_gem_evict.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 937432e..3f2e212 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2932,7 +2932,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 		return ret;
 	}
 
-	ret = i915_gem_gtt_bind_object(obj);
+	ret = i915_gem_gtt_bind_object(obj, obj->cache_level);
 	if (ret) {
 		i915_gem_object_put_pages_gtt(obj);
 		drm_mm_put_block(obj->gtt_space);
@@ -3099,6 +3099,28 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 	return 0;
 }
 
+int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
+				    enum i915_cache_level cache_level)
+{
+	int ret;
+
+	if (obj->cache_level == cache_level)
+		return 0;
+
+	if (obj->gtt_space) {
+		ret = i915_gem_object_flush_gpu(obj);
+		if (ret)
+			return ret;
+
+		ret = i915_gem_gtt_bind_object(obj, cache_level);
+		if (ret)
+			return ret;
+	}
+
+	obj->cache_level = cache_level;
+	return 0;
+}
+
 /*
  * Prepare buffer for display plane. Use uninterruptible for possible flush
  * wait, as in modesetting process we're not supposed to be interrupted.
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index af3c0e6..47d8bad 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -70,10 +70,12 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 	intel_gtt_chipset_flush();
 }
 
-int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+			     enum i915_cache_level cache_level)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	uint32_t agp_type = cache_level_to_agp_type(cache_level);
 	int ret;
 
 	if (dev_priv->mm.gtt->needs_dmar) {
@@ -87,12 +89,12 @@ int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
 		intel_gtt_insert_sg_entries(obj->sg_list,
 					    obj->num_sg,
 					    obj->gtt_space->start >> PAGE_SHIFT,
-					    cache_level_to_agp_type(obj->cache_level));
+					    agp_type);
 	} else
 		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
 				       obj->base.size >> PAGE_SHIFT,
 				       obj->pages,
-				       cache_level_to_agp_type(obj->cache_level));
+				       agp_type);
 
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 783b8b0..1c5a7ed 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -224,7 +224,8 @@ init_pipe_control(struct intel_ring_buffer *ring)
 		ret = -ENOMEM;
 		goto err;
 	}
-	obj->cache_level = I915_CACHE_LLC;
+
+	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
 	ret = i915_gem_object_pin(obj, 4096, true);
 	if (ret)
@@ -735,7 +736,8 @@ static int init_status_page(struct intel_ring_buffer *ring)
 		ret = -ENOMEM;
 		goto err;
 	}
-	obj->cache_level = I915_CACHE_LLC;
+
+	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
 	ret = i915_gem_object_pin(obj, 4096, true);
 	if (ret != 0) {
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 07/15] drm/i915: Do not clflush snooped objects
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (5 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 06/15] drm/i915: Add an interface to dynamically change the cache level Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:51 ` [PATCH 08/15] drm/i915: Use the uncached domain for the display planes Chris Wilson
                   ` (7 subsequent siblings)
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

Rely on the GPU snooping into the CPU cache for appropriately bound
objects on MI_FLUSH. Or perhaps one day we will have a cache-coherent
CPU/GPU package...

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |    8 ++++++++
 1 files changed, 8 insertions(+), 0 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3f2e212..9d8a1e8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2979,6 +2979,14 @@ i915_gem_clflush_object(struct drm_i915_gem_object *obj)
 	if (obj->pages == NULL)
 		return;
 
+	/* If the GPU is snooping the contents of the CPU cache,
+	 * we do not need to clear the CPU cache lines. Instead we need
+	 * to be sure to flush/invalidate the RENDER cache when the contents
+	 * must be refreshed.
+	 */
+	if (obj->cache_level != I915_CACHE_NONE)
+		return;
+
 	trace_i915_gem_object_clflush(obj);
 
 	drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 08/15] drm/i915: Use the uncached domain for the display planes
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (6 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 07/15] drm/i915: Do not clflush snooped objects Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:51 ` [PATCH 09/15] drm/i915: Refactor pwrite/pread to use single copy of get_user_pages Chris Wilson
                   ` (6 subsequent siblings)
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

From: Eric Anholt <eric@anholt.net>

The simplest and common method for ensuring scanout coherency on all
chipsets is to mark the scanout buffers as uncached (and for
userspace to remember to flush the render cache every so often).

We can improve upon this for later generations by marking scanout
objects as GFDT and only flush those cachelines when required. However,
we start simple.

Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |   13 +++++++++++++
 1 files changed, 13 insertions(+), 0 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9d8a1e8..abfa855 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3149,6 +3149,19 @@ i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj)
 
 	i915_gem_object_flush_cpu_write_domain(obj);
 
+	/* The display engine is not coherent with the LLC cache on gen6.  As
+	 * a result, we make sure that the pinning that is about to occur is
+	 * done with uncached PTEs. This is lowest common denominator for all
+	 * chipsets.
+	 *
+	 * However for gen6+, we could do better by using the GFDT bit instead
+	 * of uncaching, which would allow us to flush all the LLC-cached data
+	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
+	 */
+	ret = i915_gem_object_set_cache_level(obj, I915_CACHE_NONE);
+	if (ret)
+		return ret;
+
 	old_read_domains = obj->base.read_domains;
 	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 09/15] drm/i915: Refactor pwrite/pread to use single copy of get_user_pages
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (7 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 08/15] drm/i915: Use the uncached domain for the display planes Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:51 ` [PATCH 10/15] drm/i915: Use kmap_atomic for shmem pread Chris Wilson
                   ` (5 subsequent siblings)
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

Replace the three nearly identical copies of the code with a single
function. And take advantage of the opportunity to do some
micro-optimisation: avoid the vmalloc if at all possible and also avoid
dropping the lock unless we are forced to acquire the mm semaphore.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |  130 ++++++++++++++++++++++-----------------
 1 files changed, 73 insertions(+), 57 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index abfa855..7f9be7a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -257,6 +257,54 @@ static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 		obj->tiling_mode != I915_TILING_NONE;
 }
 
+static int
+i915_gem_get_user_pages(struct drm_device *dev,
+			unsigned long addr,
+			bool write,
+			int *num_pages,
+			struct page ***pages_out)
+{
+	struct page **pages;
+	int pinned, ret;
+	int n = *num_pages;
+
+	pages = kmalloc(n*sizeof(struct page *), GFP_KERNEL);
+	if (pages == NULL) {
+		pages = drm_malloc_ab(n, sizeof(struct page *));
+		if (pages == NULL) {
+			*num_pages = 0;
+			return -ENOMEM;
+		}
+	}
+
+	pinned = __get_user_pages_fast(addr, n, write, pages);
+	if (pinned < n) {
+		struct mm_struct *mm = current->mm;
+
+		mutex_unlock(&dev->struct_mutex);
+		down_read(&mm->mmap_sem);
+		ret = get_user_pages(current, mm,
+				     addr + (pinned << PAGE_SHIFT),
+				     n - pinned,
+				     write, 0,
+				     pages + pinned,
+				     NULL);
+		up_read(&mm->mmap_sem);
+		mutex_lock(&dev->struct_mutex);
+		if (ret > 0)
+			pinned += ret;
+	}
+
+	ret = 0;
+	if (pinned < n)
+		ret = -EFAULT;
+
+	*num_pages = pinned;
+	*pages_out = pages;
+	return ret;
+}
+
+
 static inline void
 slow_shmem_copy(struct page *dst_page,
 		int dst_offset,
@@ -398,11 +446,11 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
 			  struct drm_file *file)
 {
 	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
-	struct mm_struct *mm = current->mm;
 	struct page **user_pages;
 	ssize_t remain;
-	loff_t offset, pinned_pages, i;
-	loff_t first_data_page, last_data_page, num_pages;
+	loff_t offset;
+	loff_t first_data_page, last_data_page;
+	int num_pages, i;
 	int shmem_page_offset;
 	int data_page_index, data_page_offset;
 	int page_length;
@@ -420,20 +468,10 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 	num_pages = last_data_page - first_data_page + 1;
 
-	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
-	if (user_pages == NULL)
-		return -ENOMEM;
-
-	mutex_unlock(&dev->struct_mutex);
-	down_read(&mm->mmap_sem);
-	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
-				      num_pages, 1, 0, user_pages, NULL);
-	up_read(&mm->mmap_sem);
-	mutex_lock(&dev->struct_mutex);
-	if (pinned_pages < num_pages) {
-		ret = -EFAULT;
+	ret = i915_gem_get_user_pages(dev, data_ptr, true,
+				      &num_pages, &user_pages);
+	if (ret)
 		goto out;
-	}
 
 	ret = i915_gem_object_set_cpu_read_domain_range(obj,
 							args->offset,
@@ -494,7 +532,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
 	}
 
 out:
-	for (i = 0; i < pinned_pages; i++) {
+	for (i = 0; i < num_pages; i++) {
 		SetPageDirty(user_pages[i]);
 		mark_page_accessed(user_pages[i]);
 		page_cache_release(user_pages[i]);
@@ -679,10 +717,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	ssize_t remain;
 	loff_t gtt_page_base, offset;
-	loff_t first_data_page, last_data_page, num_pages;
-	loff_t pinned_pages, i;
+	loff_t first_data_page, last_data_page;
+	int num_pages, i;
 	struct page **user_pages;
-	struct mm_struct *mm = current->mm;
 	int gtt_page_offset, data_page_offset, data_page_index, page_length;
 	int ret;
 	uint64_t data_ptr = args->data_ptr;
@@ -697,28 +734,18 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 	num_pages = last_data_page - first_data_page + 1;
 
-	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
-	if (user_pages == NULL)
-		return -ENOMEM;
-
-	mutex_unlock(&dev->struct_mutex);
-	down_read(&mm->mmap_sem);
-	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
-				      num_pages, 0, 0, user_pages, NULL);
-	up_read(&mm->mmap_sem);
-	mutex_lock(&dev->struct_mutex);
-	if (pinned_pages < num_pages) {
-		ret = -EFAULT;
-		goto out_unpin_pages;
-	}
+	ret = i915_gem_get_user_pages(dev, data_ptr, false,
+				      &num_pages, &user_pages);
+	if (ret)
+		goto out;
 
 	ret = i915_gem_object_set_to_gtt_domain(obj, true);
 	if (ret)
-		goto out_unpin_pages;
+		goto out;
 
 	ret = i915_gem_object_put_fence(obj);
 	if (ret)
-		goto out_unpin_pages;
+		goto out;
 
 	offset = obj->gtt_offset + args->offset;
 
@@ -753,8 +780,8 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
 		data_ptr += page_length;
 	}
 
-out_unpin_pages:
-	for (i = 0; i < pinned_pages; i++)
+out:
+	for (i = 0; i < num_pages; i++)
 		page_cache_release(user_pages[i]);
 	drm_free_large(user_pages);
 
@@ -803,11 +830,11 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
-		vaddr = kmap_atomic(page, KM_USER0);
+		vaddr = kmap_atomic(page);
 		ret = __copy_from_user_inatomic(vaddr + page_offset,
 						user_data,
 						page_length);
-		kunmap_atomic(vaddr, KM_USER0);
+		kunmap_atomic(vaddr);
 
 		set_page_dirty(page);
 		mark_page_accessed(page);
@@ -842,11 +869,10 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
 			   struct drm_file *file)
 {
 	struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
-	struct mm_struct *mm = current->mm;
 	struct page **user_pages;
 	ssize_t remain;
-	loff_t offset, pinned_pages, i;
-	loff_t first_data_page, last_data_page, num_pages;
+	loff_t first_data_page, last_data_page, offset;
+	int num_pages, i;
 	int shmem_page_offset;
 	int data_page_index,  data_page_offset;
 	int page_length;
@@ -864,20 +890,10 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
 	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
 	num_pages = last_data_page - first_data_page + 1;
 
-	user_pages = drm_malloc_ab(num_pages, sizeof(struct page *));
-	if (user_pages == NULL)
-		return -ENOMEM;
-
-	mutex_unlock(&dev->struct_mutex);
-	down_read(&mm->mmap_sem);
-	pinned_pages = get_user_pages(current, mm, (uintptr_t)args->data_ptr,
-				      num_pages, 0, 0, user_pages, NULL);
-	up_read(&mm->mmap_sem);
-	mutex_lock(&dev->struct_mutex);
-	if (pinned_pages < num_pages) {
-		ret = -EFAULT;
+	ret = i915_gem_get_user_pages(dev, data_ptr, false,
+				      &num_pages, &user_pages);
+	if (ret)
 		goto out;
-	}
 
 	ret = i915_gem_object_set_to_cpu_domain(obj, 1);
 	if (ret)
@@ -940,7 +956,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
 	}
 
 out:
-	for (i = 0; i < pinned_pages; i++)
+	for (i = 0; i < num_pages; i++)
 		page_cache_release(user_pages[i]);
 	drm_free_large(user_pages);
 
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 10/15] drm/i915: Use kmap_atomic for shmem pread
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (8 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 09/15] drm/i915: Refactor pwrite/pread to use single copy of get_user_pages Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:51 ` [PATCH 11/15] drm/i915: Use the CPU domain for snooped pwrites Chris Wilson
                   ` (4 subsequent siblings)
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

We only hold the mapped pages for the duration of the memcpy, and never
sleep with them, so we can safely use the cheaper atomic variants of
kmap.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |   16 ++++++++--------
 1 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7f9be7a..96d4e64 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -314,13 +314,13 @@ slow_shmem_copy(struct page *dst_page,
 {
 	char *dst_vaddr, *src_vaddr;
 
-	dst_vaddr = kmap(dst_page);
-	src_vaddr = kmap(src_page);
+	dst_vaddr = kmap_atomic(dst_page);
+	src_vaddr = kmap_atomic(src_page);
 
 	memcpy(dst_vaddr + dst_offset, src_vaddr + src_offset, length);
 
-	kunmap(src_page);
-	kunmap(dst_page);
+	kunmap_atomic(src_vaddr);
+	kunmap_atomic(dst_vaddr);
 }
 
 static inline void
@@ -343,8 +343,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
 					       cpu_page, cpu_offset, length);
 	}
 
-	gpu_vaddr = kmap(gpu_page);
-	cpu_vaddr = kmap(cpu_page);
+	gpu_vaddr = kmap_atomic(gpu_page);
+	cpu_vaddr = kmap_atomic(cpu_page);
 
 	/* Copy the data, XORing A6 with A17 (1). The user already knows he's
 	 * XORing with the other bits (A9 for Y, A9 and A10 for X)
@@ -368,8 +368,8 @@ slow_shmem_bit17_copy(struct page *gpu_page,
 		length -= this_length;
 	}
 
-	kunmap(cpu_page);
-	kunmap(gpu_page);
+	kunmap_atomic(cpu_vaddr);
+	kunmap_atomic(gpu_vaddr);
 }
 
 /**
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 11/15] drm/i915: Use the CPU domain for snooped pwrites
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (9 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 10/15] drm/i915: Use kmap_atomic for shmem pread Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:51 ` [PATCH 12/15] drm/i915: s/addr & ~PAGE_MASK/offset_in_page(addr)/ Chris Wilson
                   ` (3 subsequent siblings)
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |    1 +
 1 files changed, 1 insertions(+), 0 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 96d4e64..afb5aaf 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1017,6 +1017,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
 	if (obj->phys_obj)
 		ret = i915_gem_phys_pwrite(dev, obj, args, file);
 	else if (obj->gtt_space &&
+		 obj->cache_level == I915_CACHE_NONE &&
 		 obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
 		ret = i915_gem_object_pin(obj, 0, true);
 		if (ret)
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 12/15] drm/i915: s/addr & ~PAGE_MASK/offset_in_page(addr)/
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (10 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 11/15] drm/i915: Use the CPU domain for snooped pwrites Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:51 ` [PATCH 13/15] drm/i915: Implement GTT variants of pread Chris Wilson
                   ` (2 subsequent siblings)
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

Convert our open coded offset_in_page() to the common macro.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |   21 ++++++++++-----------
 1 files changed, 10 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index afb5aaf..37a8a29 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -404,7 +404,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
 		 * page_offset = offset within page
 		 * page_length = bytes to copy for this page
 		 */
-		page_offset = offset & (PAGE_SIZE-1);
+		page_offset = offset_in_page(offset);
 		page_length = remain;
 		if ((page_offset + remain) > PAGE_SIZE)
 			page_length = PAGE_SIZE - page_offset;
@@ -493,9 +493,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
 		 * data_page_offset = offset with data_page_index page.
 		 * page_length = bytes to copy for this page
 		 */
-		shmem_page_offset = offset & ~PAGE_MASK;
+		shmem_page_offset = offset_in_page(offset);
 		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
-		data_page_offset = data_ptr & ~PAGE_MASK;
+		data_page_offset = offset_in_page(data_ptr);
 
 		page_length = remain;
 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
@@ -678,8 +678,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 		 * page_offset = offset within page
 		 * page_length = bytes to copy for this page
 		 */
-		page_base = (offset & ~(PAGE_SIZE-1));
-		page_offset = offset & (PAGE_SIZE-1);
+		page_base = offset & PAGE_MASK;
+		page_offset = offset_in_page(offset);
 		page_length = remain;
 		if ((page_offset + remain) > PAGE_SIZE)
 			page_length = PAGE_SIZE - page_offset;
@@ -690,7 +690,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 		 */
 		if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
 				    page_offset, user_data, page_length))
-
 			return -EFAULT;
 
 		remain -= page_length;
@@ -759,9 +758,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
 		 * page_length = bytes to copy for this page
 		 */
 		gtt_page_base = offset & PAGE_MASK;
-		gtt_page_offset = offset & ~PAGE_MASK;
+		gtt_page_offset = offset_in_page(offset);
 		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
-		data_page_offset = data_ptr & ~PAGE_MASK;
+		data_page_offset = offset_in_page(data_ptr);
 
 		page_length = remain;
 		if ((gtt_page_offset + page_length) > PAGE_SIZE)
@@ -820,7 +819,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
 		 * page_offset = offset within page
 		 * page_length = bytes to copy for this page
 		 */
-		page_offset = offset & (PAGE_SIZE-1);
+		page_offset = offset_in_page(offset);
 		page_length = remain;
 		if ((page_offset + remain) > PAGE_SIZE)
 			page_length = PAGE_SIZE - page_offset;
@@ -914,9 +913,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
 		 * data_page_offset = offset with data_page_index page.
 		 * page_length = bytes to copy for this page
 		 */
-		shmem_page_offset = offset & ~PAGE_MASK;
+		shmem_page_offset = offset_in_page(offset);
 		data_page_index = data_ptr / PAGE_SIZE - first_data_page;
-		data_page_offset = data_ptr & ~PAGE_MASK;
+		data_page_offset = offset_in_page(data_ptr);
 
 		page_length = remain;
 		if ((shmem_page_offset + page_length) > PAGE_SIZE)
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 13/15] drm/i915: Implement GTT variants of pread
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (11 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 12/15] drm/i915: s/addr & ~PAGE_MASK/offset_in_page(addr)/ Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-23 17:23   ` Eric Anholt
  2011-03-22 13:51 ` [PATCH 14/15] drm/i915: Cleanup flush|invalidate domain tracking for set_to_gpu Chris Wilson
  2011-03-22 13:52 ` [PATCH 15/15] drm/i915: Use the LLC mode on gen6 for everything but display Chris Wilson
  14 siblings, 1 reply; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |  184 ++++++++++++++++++++++++++++++++++++--
 1 files changed, 174 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 37a8a29..8f60bc5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -542,6 +542,146 @@ out:
 	return ret;
 }
 
+static int
+i915_gem_gtt_pread_fast(struct drm_device *dev,
+			struct drm_i915_gem_object *obj,
+			struct drm_i915_gem_pread *args,
+			struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	ssize_t remain;
+	loff_t offset, page_base;
+	char __user *user_data;
+	int page_offset, page_length;
+
+	user_data = (char __user *) (uintptr_t) args->data_ptr;
+	remain = args->size;
+
+	offset = obj->gtt_offset + args->offset;
+
+	while (remain > 0) {
+		u8 __iomem *vaddr;
+		unsigned long unwritten;
+
+		/* Operation in this page
+		 *
+		 * page_base = page offset within aperture
+		 * page_offset = offset within page
+		 * page_length = bytes to copy for this page
+		 */
+		page_base = offset & PAGE_MASK;
+		page_offset = offset_in_page(offset);
+		page_length = remain;
+		if ((page_offset + remain) > PAGE_SIZE)
+			page_length = PAGE_SIZE - page_offset;
+
+		/* If we get a fault while copying data, then (presumably) our
+		 * source page isn't available.  Return the error and we'll
+		 * retry in the slow path.
+		 */
+
+		vaddr = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+						 page_base);
+		unwritten = __copy_to_user_inatomic(user_data,
+						    vaddr + page_offset,
+						    page_length);
+		io_mapping_unmap_atomic(vaddr);
+
+		if (unwritten)
+			return -EFAULT;
+
+		remain -= page_length;
+		user_data += page_length;
+		offset += page_length;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_gtt_pread_slow(struct drm_device *dev,
+                       struct drm_i915_gem_object *obj,
+                       struct drm_i915_gem_pread *args,
+                       struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	ssize_t remain;
+	loff_t gtt_page_base, offset;
+	loff_t first_data_page, last_data_page;
+	int num_pages, i;
+	struct page **user_pages;
+	int gtt_page_offset, user_page_offset, user_page_index, page_length;
+	int ret;
+	uint64_t data_ptr = args->data_ptr;
+
+	remain = args->size;
+
+	/* Pin the user pages containing the data.  We can't fault while
+	 * holding the struct mutex, and all of the pwrite implementations
+	 * want to hold it while dereferencing the user data.
+	 */
+	first_data_page = data_ptr / PAGE_SIZE;
+	last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
+	num_pages = last_data_page - first_data_page + 1;
+
+	ret = i915_gem_get_user_pages(dev, data_ptr, false,
+				      &num_pages, &user_pages);
+	if (ret)
+		goto out;
+
+	ret = i915_gem_object_set_to_gtt_domain(obj, false);
+	if (ret)
+		goto out;
+
+	offset = obj->gtt_offset + args->offset;
+
+	while (remain > 0) {
+		u8 __iomem *src_vaddr;
+		u8 *dst_vaddr;
+
+		/* Operation in this page
+		 *
+		 * gtt_page_base = page offset within aperture
+		 * gtt_page_offset = offset within page in aperture
+		 * user_page_index = page number in get_user_pages return
+		 * user_page_offset = offset with user_page_index page.
+		 * page_length = bytes to copy for this page
+		 */
+		gtt_page_base = offset & PAGE_MASK;
+		gtt_page_offset = offset_in_page(offset);
+		user_page_index = data_ptr / PAGE_SIZE - first_data_page;
+		user_page_offset = offset_in_page(data_ptr);
+
+		page_length = remain;
+		if ((gtt_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - gtt_page_offset;
+		if ((user_page_offset + page_length) > PAGE_SIZE)
+			page_length = PAGE_SIZE - user_page_offset;
+
+		src_vaddr = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+						     gtt_page_base);
+		dst_vaddr = kmap_atomic(user_pages[user_page_index]);
+
+		memcpy_fromio(dst_vaddr + user_page_offset,
+			      src_vaddr + gtt_page_offset,
+			      page_length);
+
+		kunmap_atomic(dst_vaddr);
+		io_mapping_unmap_atomic(src_vaddr);
+
+		remain -= page_length;
+		offset += page_length;
+		data_ptr += page_length;
+	}
+
+out:
+	for (i = 0; i < num_pages; i++)
+		page_cache_release(user_pages[i]);
+	drm_free_large(user_pages);
+
+	return ret;
+}
+
 /**
  * Reads data from the object referenced by handle.
  *
@@ -587,17 +727,41 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 
 	trace_i915_gem_object_pread(obj, args->offset, args->size);
 
-	ret = i915_gem_object_set_cpu_read_domain_range(obj,
-							args->offset,
-							args->size);
-	if (ret)
-		goto out;
+	if (obj->gtt_space &&
+	    obj->map_and_fenceable &&
+	    obj->cache_level == I915_CACHE_NONE &&
+	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+		ret = i915_gem_object_pin(obj, 0, true);
+		if (ret)
+			goto out;
+
+		ret = i915_gem_object_set_to_gtt_domain(obj, false);
+		if (ret)
+			goto out_unpin;
 
-	ret = -EFAULT;
-	if (!i915_gem_object_needs_bit17_swizzle(obj))
-		ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
-	if (ret == -EFAULT)
-		ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
+		ret = i915_gem_object_put_fence(obj);
+		if (ret)
+			goto out_unpin;
+
+		ret = i915_gem_gtt_pread_fast(dev, obj, args, file);
+		if (ret == -EFAULT)
+			ret = i915_gem_gtt_pread_slow(dev, obj, args, file);
+
+out_unpin:
+		i915_gem_object_unpin(obj);
+	} else {
+		ret = i915_gem_object_set_cpu_read_domain_range(obj,
+								args->offset,
+								args->size);
+		if (ret)
+			goto out;
+
+		ret = -EFAULT;
+		if (!i915_gem_object_needs_bit17_swizzle(obj))
+			ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
+		if (ret == -EFAULT)
+			ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
+	}
 
 out:
 	drm_gem_object_unreference(&obj->base);
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 14/15] drm/i915: Cleanup flush|invalidate domain tracking for set_to_gpu
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (12 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 13/15] drm/i915: Implement GTT variants of pread Chris Wilson
@ 2011-03-22 13:51 ` Chris Wilson
  2011-03-22 13:52 ` [PATCH 15/15] drm/i915: Use the LLC mode on gen6 for everything but display Chris Wilson
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:51 UTC (permalink / raw)
  To: intel-gfx

After moving the write flushing into the move_to_ring, we can simplify
the execbuffer flush by not having to compute inter-ring flushes.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |   73 +++++++---------------------
 1 files changed, 17 insertions(+), 56 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3c54911..d04bd3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -36,7 +36,6 @@
 struct change_domains {
 	uint32_t invalidate_domains;
 	uint32_t flush_domains;
-	uint32_t flush_rings;
 	uint32_t flips;
 };
 
@@ -156,7 +155,7 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
 				  struct intel_ring_buffer *ring,
 				  struct change_domains *cd)
 {
-	uint32_t invalidate_domains = 0, flush_domains = 0;
+	uint32_t flush = 0;
 
 	/*
 	 * If the object isn't moving to a new write domain,
@@ -172,22 +171,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
 	 * write domain
 	 */
 	if (obj->base.write_domain &&
-	    (obj->base.write_domain != obj->base.pending_read_domains ||
-	     obj->ring != ring)) {
-		flush_domains |= obj->base.write_domain;
-		invalidate_domains |=
-			obj->base.pending_read_domains & ~obj->base.write_domain;
-	}
+	    obj->base.write_domain != obj->base.pending_read_domains)
+		flush = obj->base.write_domain;
 	/*
 	 * Invalidate any read caches which may have
 	 * stale data. That is, any new read domains.
 	 */
-	invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
-	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+	if (flush & I915_GEM_DOMAIN_CPU)
 		i915_gem_clflush_object(obj);
 
 	/* blow away mappings if mapped through GTT */
-	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
+	if (flush & I915_GEM_DOMAIN_GTT)
 		i915_gem_release_mmap(obj);
 
 	if (obj->base.pending_write_domain)
@@ -199,15 +193,12 @@ i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
 	 * write_domains).  So if we have a current write domain that we
 	 * aren't changing, set pending_write_domain to that.
 	 */
-	if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+	if (flush == 0 && obj->base.pending_write_domain == 0)
 		obj->base.pending_write_domain = obj->base.write_domain;
 
-	cd->invalidate_domains |= invalidate_domains;
-	cd->flush_domains |= flush_domains;
-	if (flush_domains & I915_GEM_GPU_DOMAINS)
-		cd->flush_rings |= obj->ring->id;
-	if (invalidate_domains & I915_GEM_GPU_DOMAINS)
-		cd->flush_rings |= ring->id;
+	cd->flush_domains |= flush;
+	cd->invalidate_domains |=
+		obj->base.pending_read_domains & ~obj->base.read_domains;
 }
 
 struct eb_objects {
@@ -710,35 +701,6 @@ err:
 }
 
 static int
-i915_gem_execbuffer_flush(struct drm_device *dev,
-			  uint32_t invalidate_domains,
-			  uint32_t flush_domains,
-			  uint32_t flush_rings)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int i, ret;
-
-	if (flush_domains & I915_GEM_DOMAIN_CPU)
-		intel_gtt_chipset_flush();
-
-	if (flush_domains & I915_GEM_DOMAIN_GTT)
-		wmb();
-
-	if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
-		for (i = 0; i < I915_NUM_RINGS; i++)
-			if (flush_rings & (1 << i)) {
-				ret = i915_gem_flush_ring(&dev_priv->ring[i],
-							  invalidate_domains,
-							  flush_domains);
-				if (ret)
-					return ret;
-			}
-	}
-
-	return 0;
-}
-
-static int
 i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring, u32 flips)
 {
 	u32 plane, flip_mask;
@@ -783,14 +745,11 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 	list_for_each_entry(obj, objects, exec_list)
 		i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
 
-	if (cd.invalidate_domains | cd.flush_domains) {
-		ret = i915_gem_execbuffer_flush(ring->dev,
-						cd.invalidate_domains,
-						cd.flush_domains,
-						cd.flush_rings);
-		if (ret)
-			return ret;
-	}
+	if (cd.flush_domains & I915_GEM_DOMAIN_CPU)
+		intel_gtt_chipset_flush();
+
+	if (cd.flush_domains & I915_GEM_DOMAIN_GTT)
+		wmb();
 
 	if (cd.flips) {
 		ret = i915_gem_execbuffer_wait_for_flips(ring, cd.flips);
@@ -804,7 +763,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
 			return ret;
 	}
 
-	return 0;
+	return i915_gem_flush_ring(ring,
+				   cd.invalidate_domains,
+				   cd.flush_domains);
 }
 
 static bool
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* [PATCH 15/15] drm/i915: Use the LLC mode on gen6 for everything but display.
  2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
                   ` (13 preceding siblings ...)
  2011-03-22 13:51 ` [PATCH 14/15] drm/i915: Cleanup flush|invalidate domain tracking for set_to_gpu Chris Wilson
@ 2011-03-22 13:52 ` Chris Wilson
  14 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-22 13:52 UTC (permalink / raw)
  To: intel-gfx

From: Eric Anholt <eric@anholt.net>

This provided a 10.4% +/- 1.5% (n=3) performance improvement on
openarena on my laptop.  We have more room to improve with doing LLC
caching for display using GFDT, and in doing LLC+MLC caching, but this
was an easy performance win and incremental improvement toward those
two.

Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c |   18 +++++++++++++++++-
 1 files changed, 17 insertions(+), 1 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8f60bc5..83792f4 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3885,7 +3885,23 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
 	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
 	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
 
-	obj->cache_level = I915_CACHE_NONE;
+	if (IS_GEN6(dev)) {
+		/* On Gen6, we can have the GPU use the LLC (the CPU
+		 * cache) for about a 10% performance improvement
+		 * compared to uncached.  Graphics requests other than
+		 * display scanout are coherent with the CPU in
+		 * accessing this cache.  This means in this mode we
+		 * don't need to clflush on the CPU side, and on the
+		 * GPU side we only need to flush internal caches to
+		 * get data visible to the CPU.
+		 *
+		 * However, we maintain the display planes as UC, and so
+		 * need to rebind when first used as such.
+		 */
+		obj->cache_level = I915_CACHE_LLC;
+	} else
+		obj->cache_level = I915_CACHE_NONE;
+
 	obj->base.driver_private = NULL;
 	obj->fence_reg = I915_FENCE_REG_NONE;
 	INIT_LIST_HEAD(&obj->mm_list);
-- 
1.7.4.1

^ permalink raw reply related	[flat|nested] 23+ messages in thread

* Re: [PATCH 01/15] drm/i915: Enable use of GPU semaphores to sync page-flips on SNB
  2011-03-22 13:51 ` [PATCH 01/15] drm/i915: Enable use of GPU semaphores to sync page-flips " Chris Wilson
@ 2011-03-22 20:44   ` Keith Packard
  0 siblings, 0 replies; 23+ messages in thread
From: Keith Packard @ 2011-03-22 20:44 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 268 bytes --]


Might be nice to have the patch message mention that the bulk of this
patch is simply renaming and moving i915_gem_execbuffer_sync_rings. In
fact, doing this in two patches would make the actual change a whole lot
easier to find.

-- 
keith.packard@intel.com

[-- Attachment #1.2: Type: application/pgp-signature, Size: 189 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 02/15] drm/i915: Accurately track flushed domains
  2011-03-22 13:51 ` [PATCH 02/15] drm/i915: Accurately track flushed domains Chris Wilson
@ 2011-03-22 21:07   ` Keith Packard
  0 siblings, 0 replies; 23+ messages in thread
From: Keith Packard @ 2011-03-22 21:07 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 2019 bytes --]


I'd love to see the API changes done in separate patches so they can be
reviewed without being mixed in with the actual fixes. This patch
conflates the changing of the sequence number management, ring
synchronization, elimination of the file argument to i915_add_request
with the changes in flush domains.

Also, a commit message which describes in detail what is changing and
why would really help verify that the patch does what it is supposed
to.

>  static inline u32
>  i915_gem_next_request_seqno(struct intel_ring_buffer *ring)
>  {
> -	drm_i915_private_t *dev_priv = ring->dev->dev_private;
> -	return ring->outstanding_lazy_request = dev_priv->next_seqno;
> +	if (ring->outstanding_lazy_request == 0)
> +		ring->outstanding_lazy_request = __i915_gem_get_seqno(ring->dev);
> +	return ring->outstanding_lazy_request;
>  }

Why did the code not allocate a seqno for this before? Why is it
necessary now?

> +	if (i915_ring_outstanding_dispatch(ring)) {
> +		struct drm_i915_gem_request *request;
> +
> +		request = kzalloc(sizeof(*request), GFP_KERNEL);
> +		if (request && i915_add_request(ring, request))
> +			kfree(request);
> +	}
> +

Why is it OK to not add this request when out of memory?

> +		if (!list_empty(&ring->gpu_write_list))
>  			ret = i915_gem_flush_ring(ring,
>  						  0, I915_GEM_GPU_DOMAINS);
> -			request = kzalloc(sizeof(*request), GFP_KERNEL);
> -			if (ret || request == NULL ||
> -			    i915_add_request(ring, NULL, request))
> -			    kfree(request);
> -		}
> -
> -		idle &= list_empty(&ring->request_list);
> +		(void)ret;

If you're going to ignore a __must_check return value, you might at
least justify it in a comment.

>  int
> -i915_gem_flush_ring(struct intel_ring_buffer *ring,
> -		    uint32_t invalidate_domains,
> -		    uint32_t flush_domains)
> +i915_gem_flush_ring(struct intel_ring_buffer *ring, u32 invalidate,
> u32 flush)

Seems like gratuitous variable renaming here.

-- 
keith.packard@intel.com

[-- Attachment #1.2: Type: application/pgp-signature, Size: 189 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 03/15] drm/i915: Track last read/write seqno independently
  2011-03-22 13:51 ` [PATCH 03/15] drm/i915: Track last read/write seqno independently Chris Wilson
@ 2011-03-23 17:16   ` Eric Anholt
  0 siblings, 0 replies; 23+ messages in thread
From: Eric Anholt @ 2011-03-23 17:16 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 308 bytes --]

On Tue, 22 Mar 2011 13:51:48 +0000, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Changes like this (making this complicated driver more complicated) need
some sort of justification in the commit log.  Did it make some app
faster?  How much faster?

[-- Attachment #1.2: Type: application/pgp-signature, Size: 197 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 13/15] drm/i915: Implement GTT variants of pread
  2011-03-22 13:51 ` [PATCH 13/15] drm/i915: Implement GTT variants of pread Chris Wilson
@ 2011-03-23 17:23   ` Eric Anholt
  2011-03-23 17:30     ` Chris Wilson
  0 siblings, 1 reply; 23+ messages in thread
From: Eric Anholt @ 2011-03-23 17:23 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 1217 bytes --]

On Tue, 22 Mar 2011 13:51:58 +0000, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Again, needs more justification.  For example, I would expect us not to
bother with the GTT on SNB pread/pwrite, and just read the backing pages
(they're in our cache, no need to clflush or anything) like we already
have code to do.  And one note below.

> @@ -587,17 +727,41 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
>  
>  	trace_i915_gem_object_pread(obj, args->offset, args->size);
>  
> -	ret = i915_gem_object_set_cpu_read_domain_range(obj,
> -							args->offset,
> -							args->size);
> -	if (ret)
> -		goto out;
> +	if (obj->gtt_space &&
> +	    obj->map_and_fenceable &&
> +	    obj->cache_level == I915_CACHE_NONE &&
> +	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
> +		ret = i915_gem_object_pin(obj, 0, true);
> +		if (ret)
> +			goto out;
> +
> +		ret = i915_gem_object_set_to_gtt_domain(obj, false);
> +		if (ret)
> +			goto out_unpin;

So, previously if I pread a tiled object, I got the tiled data.  Now
you're giving me maybe the tiled data, maybe the data untiled by reading
through a fence.

[-- Attachment #1.2: Type: application/pgp-signature, Size: 197 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 13/15] drm/i915: Implement GTT variants of pread
  2011-03-23 17:23   ` Eric Anholt
@ 2011-03-23 17:30     ` Chris Wilson
  2011-03-24 17:44       ` Keith Packard
  0 siblings, 1 reply; 23+ messages in thread
From: Chris Wilson @ 2011-03-23 17:30 UTC (permalink / raw)
  To: Eric Anholt, intel-gfx

On Wed, 23 Mar 2011 10:23:46 -0700, Eric Anholt <eric@anholt.net> wrote:
> On Tue, 22 Mar 2011 13:51:58 +0000, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> 
> Again, needs more justification.  For example, I would expect us not to
> bother with the GTT on SNB pread/pwrite, and just read the backing pages
> (they're in our cache, no need to clflush or anything) like we already
> have code to do.  And one note below.

Right, so snooped pages goes through the shmem paths, nothing changes. Not
everything is as nice as SNB and keeping the pages in the GTT is a benefit
for everything else.

[snip]
> So, previously if I pread a tiled object, I got the tiled data.  Now
> you're giving me maybe the tiled data, maybe the data untiled by reading
> through a fence.

No, it clears the fence before reading to maintain the existing API that
isn't that useful in current practice.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 13/15] drm/i915: Implement GTT variants of pread
  2011-03-23 17:30     ` Chris Wilson
@ 2011-03-24 17:44       ` Keith Packard
  2011-03-24 19:47         ` Chris Wilson
  0 siblings, 1 reply; 23+ messages in thread
From: Keith Packard @ 2011-03-24 17:44 UTC (permalink / raw)
  To: Chris Wilson, Eric Anholt, intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 319 bytes --]

On Wed, 23 Mar 2011 17:30:22 +0000, Chris Wilson <chris@chris-wilson.co.uk> wrote:

> Right, so snooped pages goes through the shmem paths, nothing changes. Not
> everything is as nice as SNB and keeping the pages in the GTT is a benefit
> for everything else.

Where's the data.

-- 
keith.packard@intel.com

[-- Attachment #1.2: Type: application/pgp-signature, Size: 189 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 23+ messages in thread

* Re: [PATCH 13/15] drm/i915: Implement GTT variants of pread
  2011-03-24 17:44       ` Keith Packard
@ 2011-03-24 19:47         ` Chris Wilson
  0 siblings, 0 replies; 23+ messages in thread
From: Chris Wilson @ 2011-03-24 19:47 UTC (permalink / raw)
  To: Keith Packard, Eric Anholt, intel-gfx

On Thu, 24 Mar 2011 10:44:40 -0700, Keith Packard <keithp@keithp.com> wrote:
> On Wed, 23 Mar 2011 17:30:22 +0000, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> 
> > Right, so snooped pages goes through the shmem paths, nothing changes. Not
> > everything is as nice as SNB and keeping the pages in the GTT is a benefit
> > for everything else.
> 
> Where's the data.

Hmm. Right, I was thinking of the read-modify-write cycle of GDK, but none
of that will hit the linear pread. And even for that there is a better
method.

Shelved until proven useful.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre

^ permalink raw reply	[flat|nested] 23+ messages in thread

end of thread, other threads:[~2011-03-24 19:47 UTC | newest]

Thread overview: 23+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-03-22 13:51 next tidbits - use LLC on SNB Chris Wilson
2011-03-22 13:51 ` [PATCH 01/15] drm/i915: Enable use of GPU semaphores to sync page-flips " Chris Wilson
2011-03-22 20:44   ` Keith Packard
2011-03-22 13:51 ` [PATCH 02/15] drm/i915: Accurately track flushed domains Chris Wilson
2011-03-22 21:07   ` Keith Packard
2011-03-22 13:51 ` [PATCH 03/15] drm/i915: Track last read/write seqno independently Chris Wilson
2011-03-23 17:16   ` Eric Anholt
2011-03-22 13:51 ` [PATCH 04/15] drm/i915: Rename agp_type to cache_level Chris Wilson
2011-03-22 13:51 ` [PATCH 05/15] drm/i915: Mark the cursor and the overlay as being part of the display planes Chris Wilson
2011-03-22 13:51 ` [PATCH 06/15] drm/i915: Add an interface to dynamically change the cache level Chris Wilson
2011-03-22 13:51 ` [PATCH 07/15] drm/i915: Do not clflush snooped objects Chris Wilson
2011-03-22 13:51 ` [PATCH 08/15] drm/i915: Use the uncached domain for the display planes Chris Wilson
2011-03-22 13:51 ` [PATCH 09/15] drm/i915: Refactor pwrite/pread to use single copy of get_user_pages Chris Wilson
2011-03-22 13:51 ` [PATCH 10/15] drm/i915: Use kmap_atomic for shmem pread Chris Wilson
2011-03-22 13:51 ` [PATCH 11/15] drm/i915: Use the CPU domain for snooped pwrites Chris Wilson
2011-03-22 13:51 ` [PATCH 12/15] drm/i915: s/addr & ~PAGE_MASK/offset_in_page(addr)/ Chris Wilson
2011-03-22 13:51 ` [PATCH 13/15] drm/i915: Implement GTT variants of pread Chris Wilson
2011-03-23 17:23   ` Eric Anholt
2011-03-23 17:30     ` Chris Wilson
2011-03-24 17:44       ` Keith Packard
2011-03-24 19:47         ` Chris Wilson
2011-03-22 13:51 ` [PATCH 14/15] drm/i915: Cleanup flush|invalidate domain tracking for set_to_gpu Chris Wilson
2011-03-22 13:52 ` [PATCH 15/15] drm/i915: Use the LLC mode on gen6 for everything but display Chris Wilson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).