All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zou Nan hai <nanhai.zou@intel.com>
To: intel-gfx <intel-gfx@lists.freedesktop.org>,
	Anholt Eric <eric.anholt@intel.com>
Subject: [PATCH 2/3] convert some gem structures to per-ring V1
Date: Wed, 19 May 2010 17:35:14 +0800	[thread overview]
Message-ID: <1274261714-21098-1-git-send-email-nanhai.zou@intel.com> (raw)

mark gem object on which ring with exec flag.
request list and active_list were moved into
intel_ring_buffer structure.

Signed-off-by: Zou Nan hai <nanhai.zou@intel.com>
Signed-off-by: Xiang Hai hao <haihao.xiang@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c  |   12 +-
 drivers/gpu/drm/i915/i915_drv.c      |    2 +-
 drivers/gpu/drm/i915/i915_drv.h      |   43 ++---
 drivers/gpu/drm/i915/i915_gem.c      |  335 +++++++++++++---------------------
 drivers/gpu/drm/i915/i915_irq.c      |   24 ++-
 drivers/gpu/drm/i915/intel_overlay.c |   44 +++--
 6 files changed, 203 insertions(+), 257 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4fddf09..c864858 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -77,7 +77,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 	case ACTIVE_LIST:
 		seq_printf(m, "Active:\n");
 		lock = &dev_priv->mm.active_list_lock;
-		head = &dev_priv->mm.active_list;
+		head = &dev_priv->render_ring.active_list;
 		break;
 	case INACTIVE_LIST:
 		seq_printf(m, "Inactive:\n");
@@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 	struct drm_i915_gem_request *gem_request;
 
 	seq_printf(m, "Request:\n");
-	list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+	list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
+			list) {
 		seq_printf(m, "    %d @ %d\n",
 			   gem_request->seqno,
 			   (int) (jiffies - gem_request->emitted_jiffies));
@@ -145,7 +146,7 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
 
 	if (dev_priv->hw_status_page != NULL) {
 		seq_printf(m, "Current sequence: %d\n",
-			   i915_get_gem_seqno(dev));
+			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
 	} else {
 		seq_printf(m, "Current sequence: hws uninitialized\n");
 	}
@@ -197,7 +198,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		   atomic_read(&dev_priv->irq_received));
 	if (dev_priv->hw_status_page != NULL) {
 		seq_printf(m, "Current sequence:    %d\n",
-			   i915_get_gem_seqno(dev));
+			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
 	} else {
 		seq_printf(m, "Current sequence:    hws uninitialized\n");
 	}
@@ -287,7 +288,8 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
 
 	spin_lock(&dev_priv->mm.active_list_lock);
 
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+	list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
+			list) {
 		obj = &obj_priv->base;
 		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
 		    ret = i915_gem_object_get_pages(obj, 0);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c57c54f..d40f62d 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -340,7 +340,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
 	/*
 	 * Clear request list
 	 */
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev, &dev_priv->render_ring);
 
 	if (need_display)
 		i915_save_display(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index aa6c9b5..258b4d3 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -505,18 +505,7 @@ typedef struct drm_i915_private {
 		 */
 		struct list_head shrink_list;
 
-		/**
-		 * List of objects currently involved in rendering from the
-		 * ringbuffer.
-		 *
-		 * Includes buffers having the contents of their GPU caches
-		 * flushed, not necessarily primitives.  last_rendering_seqno
-		 * represents when the rendering involved will be completed.
-		 *
-		 * A reference is held on the buffer while on this list.
-		 */
 		spinlock_t active_list_lock;
-		struct list_head active_list;
 
 		/**
 		 * List of objects which are not in the ringbuffer but which
@@ -554,12 +543,6 @@ typedef struct drm_i915_private {
 		struct list_head fence_list;
 
 		/**
-		 * List of breadcrumbs associated with GPU requests currently
-		 * outstanding.
-		 */
-		struct list_head request_list;
-
-		/**
 		 * We leave the user IRQ off as much as possible,
 		 * but this means that requests will finish and never
 		 * be retired once the system goes idle. Set a timer to
@@ -683,6 +666,9 @@ struct drm_i915_gem_object {
 	 */
 	uint32_t gtt_offset;
 
+	/* Which ring is refering to is this object */
+	struct intel_ring_buffer *ring;
+
 	/**
 	 * Fake offset for use by mmap(2)
 	 */
@@ -756,6 +742,9 @@ struct drm_i915_gem_object {
  * an emission time with seqnos for tracking how far ahead of the GPU we are.
  */
 struct drm_i915_gem_request {
+	/** On Which ring this request was generated */
+	struct intel_ring_buffer *ring;
+
 	/** GEM sequence number associated with this request. */
 	uint32_t seqno;
 
@@ -918,11 +907,13 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
 int i915_gem_object_unbind(struct drm_gem_object *obj);
 void i915_gem_release_mmap(struct drm_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev,
+		struct intel_ring_buffer *ring);
 bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev,
+		 struct intel_ring_buffer *ring);
 void i915_gem_retire_work_handler(struct work_struct *work);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -933,9 +924,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 		     unsigned long end);
 int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-			  uint32_t flush_domains);
-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+uint32_t i915_add_request(struct drm_device *dev,
+		struct drm_file *file_priv,
+		uint32_t flush_domains,
+		struct intel_ring_buffer *ring);
+int i915_do_wait_request(struct drm_device *dev,
+		uint32_t seqno, int interruptible,
+		struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
 				      int write);
@@ -1024,7 +1019,7 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {			\
 	if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
 			== NULL)					\
-		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
+			LOCK_TEST_WITH_RETURN(dev, file_priv);		\
 } while (0)
 
 #define I915_READ(reg)          readl(dev_priv->regs + (reg))
@@ -1041,7 +1036,7 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
 #define BEGIN_LP_RING(n)  do { \
 	drm_i915_private_t *dev_priv = dev->dev_private;                \
-	if (I915_VERBOSE) 						\
+	if (I915_VERBOSE)						\
 		DRM_DEBUG("   BEGIN_LP_RING %x\n", (int)(n));		\
 	intel_ring_begin(dev, &dev_priv->render_ring, 4*(n));		\
 } while (0)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index e2b97fd..bb5879f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -56,6 +56,8 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
 				struct drm_i915_gem_pwrite *args,
 				struct drm_file *file_priv);
 
+
+
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
 
@@ -1484,11 +1486,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
 }
 
 static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+			       struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	BUG_ON(ring == NULL);
+	obj_priv->ring = ring;
 
 	/* Add a reference if we're newly entering the active list. */
 	if (!obj_priv->active) {
@@ -1497,8 +1502,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
 	}
 	/* Move from whatever list we were on to the tail of execution. */
 	spin_lock(&dev_priv->mm.active_list_lock);
-	list_move_tail(&obj_priv->list,
-		       &dev_priv->mm.active_list);
+	list_move_tail(&obj_priv->list, &ring->active_list);
 	spin_unlock(&dev_priv->mm.active_list_lock);
 	obj_priv->last_rendering_seqno = seqno;
 }
@@ -1551,6 +1555,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 	BUG_ON(!list_empty(&obj_priv->gpu_write_list));
 
 	obj_priv->last_rendering_seqno = 0;
+	obj_priv->ring = NULL;
 	if (obj_priv->active) {
 		obj_priv->active = 0;
 		drm_gem_object_unreference(obj);
@@ -1560,7 +1565,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 
 static void
 i915_gem_process_flushing_list(struct drm_device *dev,
-			       uint32_t flush_domains, uint32_t seqno)
+			       uint32_t flush_domains, uint32_t seqno,
+			       struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv, *next;
@@ -1571,12 +1577,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 		struct drm_gem_object *obj = &obj_priv->base;
 
 		if ((obj->write_domain & flush_domains) ==
-		    obj->write_domain) {
+		    obj->write_domain &&
+		    obj_priv->ring->ring_flag == ring->ring_flag) {
 			uint32_t old_write_domain = obj->write_domain;
 
 			obj->write_domain = 0;
 			list_del_init(&obj_priv->gpu_write_list);
-			i915_gem_object_move_to_active(obj, seqno);
+			i915_gem_object_move_to_active(obj, seqno, ring);
 
 			/* update the fence lru list */
 			if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -1593,13 +1600,6 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 	}
 }
 
-#define PIPE_CONTROL_FLUSH(addr)					\
-	OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
-		 PIPE_CONTROL_DEPTH_STALL);				\
-	OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT);			\
-	OUT_RING(0);							\
-	OUT_RING(0);							\
-
 /**
  * Creates a new sequence number, emitting a write of it to the status page
  * plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1610,7 +1610,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
  */
 uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-		 uint32_t flush_domains)
+		 uint32_t flush_domains, struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_file_private *i915_file_priv = NULL;
@@ -1624,63 +1624,14 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 	request = kzalloc(sizeof(*request), GFP_KERNEL);
 	if (request == NULL)
 		return 0;
-
-	/* Grab the seqno we're going to make this request be, and bump the
-	 * next (skipping 0 so it can be the reserved no-seqno value).
-	 */
-	seqno = dev_priv->mm.next_gem_seqno;
-	dev_priv->mm.next_gem_seqno++;
-	if (dev_priv->mm.next_gem_seqno == 0)
-		dev_priv->mm.next_gem_seqno++;
-
-	if (HAS_PIPE_CONTROL(dev)) {
-		u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
-
-		/*
-		 * Workaround qword write incoherence by flushing the
-		 * PIPE_NOTIFY buffers out to memory before requesting
-		 * an interrupt.
-		 */
-		BEGIN_LP_RING(32);
-		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128; /* write to separate cachelines */
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		scratch_addr += 128;
-		PIPE_CONTROL_FLUSH(scratch_addr);
-		OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			 PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
-			 PIPE_CONTROL_NOTIFY);
-		OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
-		OUT_RING(seqno);
-		OUT_RING(0);
-		ADVANCE_LP_RING();
-	} else {
-		BEGIN_LP_RING(4);
-		OUT_RING(MI_STORE_DWORD_INDEX);
-		OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-		OUT_RING(seqno);
-
-		OUT_RING(MI_USER_INTERRUPT);
-		ADVANCE_LP_RING();
-	}
-
-	DRM_DEBUG_DRIVER("%d\n", seqno);
+	seqno = ring->add_request(dev, ring, file_priv, flush_domains);
 
 	request->seqno = seqno;
+	request->ring = ring;
 	request->emitted_jiffies = jiffies;
-	was_empty = list_empty(&dev_priv->mm.request_list);
-	list_add_tail(&request->list, &dev_priv->mm.request_list);
+	was_empty = list_empty(&ring->request_list);
+	list_add_tail(&request->list, &ring->request_list);
+
 	if (i915_file_priv) {
 		list_add_tail(&request->client_list,
 			      &i915_file_priv->mm.request_list);
@@ -1692,7 +1643,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 	 * domain we're flushing with our flush.
 	 */
 	if (flush_domains != 0) 
-		i915_gem_process_flushing_list(dev, flush_domains, seqno);
+		i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
 
 	if (!dev_priv->mm.suspended) {
 		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1709,18 +1660,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  * before signalling the CPU
  */
 static uint32_t
-i915_retire_commands(struct drm_device *dev)
+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
-	uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
 	uint32_t flush_domains = 0;
 
 	/* The sampler always gets flushed on i965 (sigh) */
 	if (IS_I965G(dev))
 		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-	BEGIN_LP_RING(2);
-	OUT_RING(cmd);
-	OUT_RING(0); /* noop */
-	ADVANCE_LP_RING();
+
+	ring->flush(dev, ring,
+			I915_GEM_DOMAIN_COMMAND, flush_domains);
 	return flush_domains;
 }
 
@@ -1740,11 +1689,11 @@ i915_gem_retire_request(struct drm_device *dev,
 	 * by the ringbuffer to the flushing/inactive lists as appropriate.
 	 */
 	spin_lock(&dev_priv->mm.active_list_lock);
-	while (!list_empty(&dev_priv->mm.active_list)) {
+	while (!list_empty(&request->ring->active_list)) {
 		struct drm_gem_object *obj;
 		struct drm_i915_gem_object *obj_priv;
 
-		obj_priv = list_first_entry(&dev_priv->mm.active_list,
+		obj_priv = list_first_entry(&request->ring->active_list,
 					    struct drm_i915_gem_object,
 					    list);
 		obj = &obj_priv->base;
@@ -1753,6 +1702,7 @@ i915_gem_retire_request(struct drm_device *dev,
 		 * list, then the oldest in the list must still be newer than
 		 * this seqno.
 		 */
+
 		if (obj_priv->last_rendering_seqno != request->seqno)
 			goto out;
 
@@ -1791,35 +1741,33 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 }
 
 uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+i915_get_gem_seqno(struct drm_device *dev,
+		 struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	if (HAS_PIPE_CONTROL(dev))
-		return ((volatile u32 *)(dev_priv->seqno_page))[0];
-	else
-		return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+	return ring->get_gem_seqno(dev, ring);
 }
 
 /**
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t seqno;
 
-	if (!dev_priv->hw_status_page || list_empty(&dev_priv->mm.request_list))
+	if (!ring->status_page.page_addr
+			|| list_empty(&ring->request_list))
 		return;
 
-	seqno = i915_get_gem_seqno(dev);
+	seqno = i915_get_gem_seqno(dev, ring);
 
-	while (!list_empty(&dev_priv->mm.request_list)) {
+	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 		uint32_t retiring_seqno;
 
-		request = list_first_entry(&dev_priv->mm.request_list,
+		request = list_first_entry(&ring->request_list,
 					   struct drm_i915_gem_request,
 					   list);
 		retiring_seqno = request->seqno;
@@ -1837,7 +1785,7 @@ i915_gem_retire_requests(struct drm_device *dev)
 
 	if (unlikely (dev_priv->trace_irq_seqno &&
 		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-		i915_user_irq_put(dev);
+		ring->user_irq_put(dev, ring);
 		dev_priv->trace_irq_seqno = 0;
 	}
 }
@@ -1853,15 +1801,17 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	dev = dev_priv->dev;
 
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
 	if (!dev_priv->mm.suspended &&
-	    !list_empty(&dev_priv->mm.request_list))
+			(!list_empty(&dev_priv->render_ring.request_list)))
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 	mutex_unlock(&dev->struct_mutex);
 }
 
 int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+		int interruptible, struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 ier;
@@ -1872,7 +1822,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 	if (atomic_read(&dev_priv->mm.wedged))
 		return -EIO;
 
-	if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+	if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
 		if (HAS_PCH_SPLIT(dev))
 			ier = I915_READ(DEIER) | I915_READ(GTIER);
 		else
@@ -1886,19 +1836,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 
 		trace_i915_gem_request_wait_begin(dev, seqno);
 
-		dev_priv->mm.waiting_gem_seqno = seqno;
-		i915_user_irq_get(dev);
+		ring->waiting_gem_seqno = seqno;
+		ring->user_irq_get(dev, ring);
 		if (interruptible)
-			ret = wait_event_interruptible(dev_priv->irq_queue,
-				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
-				atomic_read(&dev_priv->mm.wedged));
+			ret = wait_event_interruptible(ring->irq_queue,
+				i915_seqno_passed(
+					ring->get_gem_seqno(dev, ring), seqno)
+				|| atomic_read(&dev_priv->mm.wedged));
 		else
-			wait_event(dev_priv->irq_queue,
-				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
-				atomic_read(&dev_priv->mm.wedged));
+			wait_event(ring->irq_queue,
+				i915_seqno_passed(
+					ring->get_gem_seqno(dev, ring), seqno)
+				|| atomic_read(&dev_priv->mm.wedged));
 
-		i915_user_irq_put(dev);
-		dev_priv->mm.waiting_gem_seqno = 0;
+		ring->user_irq_put(dev, ring);
+		ring->waiting_gem_seqno = 0;
 
 		trace_i915_gem_request_wait_end(dev, seqno);
 	}
@@ -1907,7 +1859,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 
 	if (ret && ret != -ERESTARTSYS)
 		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-			  __func__, ret, seqno, i915_get_gem_seqno(dev));
+			  __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
 
 	/* Directly dispatch request retiring.  While we have the work queue
 	 * to handle this, the waiter on a request often wants an associated
@@ -1915,7 +1867,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 	 * a separate wait queue to handle that.
 	 */
 	if (ret == 0)
-		i915_gem_retire_requests(dev);
+		i915_gem_retire_requests(dev, ring);
 
 	return ret;
 }
@@ -1925,12 +1877,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
  * request and object lists appropriately for that event.
  */
 static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno,
+		struct intel_ring_buffer *ring)
 {
-	return i915_do_wait_request(dev, seqno, 1);
+	return i915_do_wait_request(dev, seqno, 1, ring);
 }
 
-
 static void
 i915_gem_flush(struct drm_device *dev,
 	       uint32_t invalidate_domains,
@@ -1944,6 +1896,19 @@ i915_gem_flush(struct drm_device *dev,
 			flush_domains);
 }
 
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+	       uint32_t invalidate_domains,
+	       uint32_t flush_domains,
+	       struct intel_ring_buffer *ring)
+{
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		drm_agp_chipset_flush(dev);
+	ring->flush(dev, ring,
+			invalidate_domains,
+			flush_domains);
+}
+
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
@@ -1968,7 +1933,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
 		DRM_INFO("%s: object %p wait for seqno %08x\n",
 			  __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-		ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+		ret = i915_wait_request(dev,
+				obj_priv->last_rendering_seqno, obj_priv->ring);
 		if (ret != 0)
 			return ret;
 	}
@@ -2085,10 +2051,11 @@ i915_gpu_idle(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	bool lists_empty;
 	uint32_t seqno;
+	int ret;
 
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->mm.active_list);
+		       list_empty(&dev_priv->render_ring.active_list);
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	if (lists_empty)
@@ -2096,11 +2063,13 @@ i915_gpu_idle(struct drm_device *dev)
 
 	/* Flush everything onto the inactive list. */
 	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-	seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+	seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+			&dev_priv->render_ring);
 	if (seqno == 0)
 		return -ENOMEM;
+	ret = i915_wait_request(dev, seqno, &dev_priv->render_ring);
 
-	return i915_wait_request(dev, seqno);
+	return ret;
 }
 
 static int
@@ -2113,7 +2082,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->mm.active_list));
+		       list_empty(&dev_priv->render_ring.active_list));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	if (lists_empty)
@@ -2133,7 +2102,7 @@ i915_gem_evict_everything(struct drm_device *dev)
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->mm.active_list));
+		       list_empty(&dev_priv->render_ring.active_list));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 	BUG_ON(!lists_empty);
 
@@ -2147,8 +2116,9 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 	struct drm_gem_object *obj;
 	int ret;
 
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 	for (;;) {
-		i915_gem_retire_requests(dev);
+		i915_gem_retire_requests(dev, render_ring);
 
 		/* If there's an inactive buffer available now, grab it
 		 * and be done.
@@ -2172,14 +2142,15 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 		 * things, wait for the next to finish and hopefully leave us
 		 * a buffer to evict.
 		 */
-		if (!list_empty(&dev_priv->mm.request_list)) {
+		if (!list_empty(&render_ring->request_list)) {
 			struct drm_i915_gem_request *request;
 
-			request = list_first_entry(&dev_priv->mm.request_list,
-						   struct drm_i915_gem_request,
+			request = list_first_entry(&render_ring->request_list,
+						struct drm_i915_gem_request,
 						   list);
 
-			ret = i915_wait_request(dev, request->seqno);
+			ret = i915_wait_request(dev,
+					request->seqno, request->ring);
 			if (ret)
 				return ret;
 
@@ -2206,10 +2177,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 			if (obj != NULL) {
 				uint32_t seqno;
 
-				i915_gem_flush(dev,
+				i915_gem_flush_ring(dev,
+					       obj->write_domain,
 					       obj->write_domain,
-					       obj->write_domain);
-				seqno = i915_add_request(dev, NULL, obj->write_domain);
+					       obj_priv->ring);
+				seqno = i915_add_request(dev, NULL,
+						obj->write_domain,
+						obj_priv->ring);
 				if (seqno == 0)
 					return -ENOMEM;
 				continue;
@@ -2745,14 +2719,13 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	uint32_t old_write_domain;
-
+	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
 		return;
-
 	/* Queue the GPU write cache flushing we need. */
 	old_write_domain = obj->write_domain;
 	i915_gem_flush(dev, 0, obj->write_domain);
-	(void) i915_add_request(dev, NULL, obj->write_domain);
+	(void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
 	BUG_ON(obj->write_domain);
 
 	trace_i915_gem_object_change_domain(obj,
@@ -2829,11 +2802,9 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 	uint32_t old_write_domain, old_read_domains;
 	int ret;
-
 	/* Not valid to be called on unbound objects. */
 	if (obj_priv->gtt_space == NULL)
 		return -EINVAL;
-
 	i915_gem_object_flush_gpu_write_domain(obj);
 	/* Wait on any GPU rendering and flushing to occur. */
 	ret = i915_gem_object_wait_rendering(obj);
@@ -2892,7 +2863,10 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
 		DRM_INFO("%s: object %p wait for seqno %08x\n",
 			  __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-		ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+		ret = i915_do_wait_request(dev,
+				obj_priv->last_rendering_seqno,
+				0,
+				obj_priv->ring);
 		if (ret != 0)
 			return ret;
 	}
@@ -3483,61 +3457,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 	return 0;
 }
 
-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
-			      struct drm_i915_gem_execbuffer2 *exec,
-			      struct drm_clip_rect *cliprects,
-			      uint64_t exec_offset)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int nbox = exec->num_cliprects;
-	int i = 0, count;
-	uint32_t exec_start, exec_len;
-
-	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-	exec_len = (uint32_t) exec->batch_len;
-
-	trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
-	count = nbox ? nbox : 1;
-
-	for (i = 0; i < count; i++) {
-		if (i < nbox) {
-			int ret = i915_emit_box(dev, cliprects, i,
-						exec->DR1, exec->DR4);
-			if (ret)
-				return ret;
-		}
-
-		if (IS_I830(dev) || IS_845G(dev)) {
-			BEGIN_LP_RING(4);
-			OUT_RING(MI_BATCH_BUFFER);
-			OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-			OUT_RING(exec_start + exec_len - 4);
-			OUT_RING(0);
-			ADVANCE_LP_RING();
-		} else {
-			BEGIN_LP_RING(2);
-			if (IS_I965G(dev)) {
-				OUT_RING(MI_BATCH_BUFFER_START |
-					 (2 << 6) |
-					 MI_BATCH_NON_SECURE_I965);
-				OUT_RING(exec_start);
-			} else {
-				OUT_RING(MI_BATCH_BUFFER_START |
-					 (2 << 6));
-				OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-			}
-			ADVANCE_LP_RING();
-		}
-	}
-
-	/* XXX breadcrumb */
-	return 0;
-}
-
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3566,7 +3485,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
 		if (time_after_eq(request->emitted_jiffies, recent_enough))
 			break;
 
-		ret = i915_wait_request(dev, request->seqno);
+		ret = i915_wait_request(dev, request->seqno, request->ring);
 		if (ret != 0)
 			break;
 	}
@@ -3723,6 +3642,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	uint32_t seqno, flush_domains, reloc_index;
 	int pin_tries, flips;
 
+	struct intel_ring_buffer *ring = NULL;
+
 #if WATCH_EXEC
 	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
 		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3780,6 +3701,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto pre_mutex_err;
 	}
 
+	ring = &dev_priv->render_ring;
+
 	/* Look up object handles */
 	flips = 0;
 	for (i = 0; i < args->buffer_count; i++) {
@@ -3913,9 +3836,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		i915_gem_flush(dev,
 			       dev->invalidate_domains,
 			       dev->flush_domains);
-		if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+		if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
 			(void)i915_add_request(dev, file_priv,
-					       dev->flush_domains);
+					dev->flush_domains,
+					&dev_priv->render_ring);
+
+		}
 	}
 
 	for (i = 0; i < args->buffer_count; i++) {
@@ -3952,7 +3878,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 #endif
 
 	/* Exec the batchbuffer */
-	ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+	ret = ring->dispatch_gem_execbuffer(dev, ring, args,
+			cliprects, exec_offset);
 	if (ret) {
 		DRM_ERROR("dispatch failed %d\n", ret);
 		goto err;
@@ -3962,7 +3889,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 * Ensure that the commands in the batch buffer are
 	 * finished before the interrupt fires
 	 */
-	flush_domains = i915_retire_commands(dev);
+	flush_domains = i915_retire_commands(dev, ring);
 
 	i915_verify_inactive(dev, __FILE__, __LINE__);
 
@@ -3973,12 +3900,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 * *some* interrupts representing completion of buffers that we can
 	 * wait on when trying to clear up gtt space).
 	 */
-	seqno = i915_add_request(dev, file_priv, flush_domains);
+	seqno = i915_add_request(dev, file_priv, flush_domains, ring);
 	BUG_ON(seqno == 0);
 	for (i = 0; i < args->buffer_count; i++) {
 		struct drm_gem_object *obj = object_list[i];
+		obj_priv = to_intel_bo(obj);
 
-		i915_gem_object_move_to_active(obj, seqno);
+		i915_gem_object_move_to_active(obj, seqno, ring);
 #if WATCH_LRU
 		DRM_INFO("%s: move to exec list %p\n", __func__, obj);
 #endif
@@ -4090,7 +4018,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 	exec2.DR4 = args->DR4;
 	exec2.num_cliprects = args->num_cliprects;
 	exec2.cliprects_ptr = args->cliprects_ptr;
-	exec2.flags = 0;
+	exec2.flags = I915_EXEC_RENDER;
 
 	ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
 	if (!ret) {
@@ -4177,6 +4105,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
 	int ret;
 
 	i915_verify_inactive(dev, __FILE__, __LINE__);
+
 	if (obj_priv->gtt_space == NULL) {
 		ret = i915_gem_object_bind_to_gtt(obj, alignment);
 		if (ret)
@@ -4329,6 +4258,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_busy *args = data;
 	struct drm_gem_object *obj;
 	struct drm_i915_gem_object *obj_priv;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL) {
@@ -4343,7 +4273,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * actually unmasked, and our working set ends up being larger than
 	 * required.
 	 */
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev, &dev_priv->render_ring);
 
 	obj_priv = to_intel_bo(obj);
 	/* Don't count being on the flushing list against the object being
@@ -4425,7 +4355,6 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
 	obj = kzalloc(sizeof(*obj), GFP_KERNEL);
 	if (obj == NULL)
 		return NULL;
-
 	if (drm_gem_object_init(dev, &obj->base, size) != 0) {
 		kfree(obj);
 		return NULL;
@@ -4511,7 +4440,7 @@ i915_gem_idle(struct drm_device *dev)
 	mutex_lock(&dev->struct_mutex);
 
 	if (dev_priv->mm.suspended ||
-			dev_priv->render_ring.gem_object == NULL) {
+			(dev_priv->render_ring.gem_object == NULL)) {
 		mutex_unlock(&dev->struct_mutex);
 		return 0;
 	}
@@ -4592,7 +4521,6 @@ err:
 	return ret;
 }
 
-
 static void
 i915_gem_cleanup_pipe_control(struct drm_device *dev)
 {
@@ -4666,12 +4594,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 	}
 
 	spin_lock(&dev_priv->mm.active_list_lock);
-	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-	BUG_ON(!list_empty(&dev_priv->mm.request_list));
+	BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
 	mutex_unlock(&dev->struct_mutex);
 
 	drm_irq_install(dev);
@@ -4710,18 +4638,16 @@ i915_gem_load(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	spin_lock_init(&dev_priv->mm.active_list_lock);
-	INIT_LIST_HEAD(&dev_priv->mm.active_list);
 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 	INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-	INIT_LIST_HEAD(&dev_priv->mm.request_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+	INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
 	for (i = 0; i < 16; i++)
 		INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 			  i915_gem_retire_work_handler);
-	dev_priv->mm.next_gem_seqno = 1;
-
 	spin_lock(&shrink_list_lock);
 	list_add(&dev_priv->mm.shrink_list, &shrink_list);
 	spin_unlock(&shrink_list_lock);
@@ -4953,7 +4879,7 @@ i915_gpu_is_active(struct drm_device *dev)
 
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->mm.active_list);
+		      list_empty(&dev_priv->render_ring.active_list);
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	return !lists_empty;
@@ -4998,8 +4924,7 @@ rescan:
 			continue;
 
 		spin_unlock(&shrink_list_lock);
-
-		i915_gem_retire_requests(dev);
+		i915_gem_retire_requests(dev, &dev_priv->render_ring);
 
 		list_for_each_entry_safe(obj_priv, next_obj,
 					 &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9ff3202..2d2a213 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -332,6 +332,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 	int ret = IRQ_NONE;
 	u32 de_iir, gt_iir, de_ier, pch_iir;
 	struct drm_i915_master_private *master_priv;
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 	/* disable master interrupt before clearing iir  */
 	de_ier = I915_READ(DEIER);
@@ -355,7 +356,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 	}
 
 	if (gt_iir & GT_PIPE_NOTIFY) {
-		u32 seqno = i915_get_gem_seqno(dev);
+		u32 seqno = i915_get_gem_seqno(dev, render_ring);
 		dev_priv->mm.irq_gem_seqno = seqno;
 		trace_i915_gem_request_complete(dev, seqno);
 		DRM_WAKEUP(&dev_priv->irq_queue);
@@ -585,7 +586,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 		return;
 	}
 
-	error->seqno = i915_get_gem_seqno(dev);
+	error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
 	error->pipeastat = I915_READ(PIPEASTAT);
@@ -613,7 +614,8 @@ static void i915_capture_error_state(struct drm_device *dev)
 	batchbuffer[0] = NULL;
 	batchbuffer[1] = NULL;
 	count = 0;
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+	list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
+			list) {
 		struct drm_gem_object *obj = &obj_priv->base;
 
 		if (batchbuffer[0] == NULL &&
@@ -650,7 +652,8 @@ static void i915_capture_error_state(struct drm_device *dev)
 
 	if (error->active_bo) {
 		int i = 0;
-		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+		list_for_each_entry(obj_priv,
+				&dev_priv->render_ring.active_list, list) {
 			struct drm_gem_object *obj = &obj_priv->base;
 
 			error->active_bo[i].size = obj->size;
@@ -847,6 +850,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 	unsigned long irqflags;
 	int irq_received;
 	int ret = IRQ_NONE;
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 	atomic_inc(&dev_priv->irq_received);
 
@@ -927,7 +931,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 		}
 
 		if (iir & I915_USER_INTERRUPT) {
-			u32 seqno = i915_get_gem_seqno(dev);
+			u32 seqno =
+				i915_get_gem_seqno(dev, render_ring);
 			dev_priv->mm.irq_gem_seqno = seqno;
 			trace_i915_gem_request_complete(dev, seqno);
 			DRM_WAKEUP(&dev_priv->irq_queue);
@@ -1231,7 +1236,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 
 struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+	return list_entry(dev_priv->render_ring.request_list.prev,
+			struct drm_i915_gem_request, list);
 }
 
 /**
@@ -1256,8 +1262,10 @@ void i915_hangcheck_elapsed(unsigned long data)
 		acthd = I915_READ(ACTHD_I965);
 
 	/* If all work is done then ACTHD clearly hasn't advanced. */
-	if (list_empty(&dev_priv->mm.request_list) ||
-		       i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+	if (list_empty(&dev_priv->render_ring.request_list) ||
+			i915_seqno_passed(i915_get_gem_seqno(dev,
+					&dev_priv->render_ring),
+			       i915_get_tail_request(dev)->seqno)) {
 		dev_priv->hangcheck_count = 0;
 		return;
 	}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 93da837..d7ad513 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -212,6 +212,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	int ret;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	BUG_ON(overlay->active);
 
@@ -225,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	OUT_RING(MI_NOOP);
 	ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev,
+			overlay->last_flip_req, 1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -262,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
 	OUT_RING(flip_addr);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 }
 
 static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -273,7 +277,8 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
 	u32 tmp;
 
 	if (overlay->last_flip_req != 0) {
-		ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+		ret = i915_do_wait_request(dev, overlay->last_flip_req,
+				1, &dev_priv->render_ring);
 		if (ret == 0) {
 			overlay->last_flip_req = 0;
 
@@ -292,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -310,6 +317,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	u32 flip_addr = overlay->flip_addr;
 	struct drm_device *dev = overlay->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 
 	BUG_ON(!overlay->active);
@@ -330,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -348,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
 	ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -385,6 +397,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_gem_object *obj;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 flip_addr;
 	int ret;
 
@@ -392,12 +405,14 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
 		return -EIO;
 
 	if (overlay->last_flip_req == 0) {
-		overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+		overlay->last_flip_req =
+			i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 		if (overlay->last_flip_req == 0)
 			return -ENOMEM;
 	}
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			interruptible, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -421,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
 			OUT_RING(MI_NOOP);
 			ADVANCE_LP_RING();
 
-			overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+			overlay->last_flip_req = i915_add_request(dev, NULL,
+					0, &dev_priv->render_ring);
 			if (overlay->last_flip_req == 0)
 				return -ENOMEM;
 
 			ret = i915_do_wait_request(dev, overlay->last_flip_req,
-					interruptible);
+					interruptible, &dev_priv->render_ring);
 			if (ret != 0)
 				return ret;
 
-- 
1.7.1

                 reply	other threads:[~2010-05-19  9:35 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1274261714-21098-1-git-send-email-nanhai.zou@intel.com \
    --to=nanhai.zou@intel.com \
    --cc=eric.anholt@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.