All of lore.kernel.org
 help / color / mirror / Atom feed
From: Zou Nan hai <nanhai.zou@intel.com>
To: Anholt Eric <eric.anholt@intel.com>,
	Intel GFX <intel-gfx@lists.freedesktop.org>
Subject: [PATCH 4/4] adapt intel_ring_buffer into gem
Date: Thu,  6 May 2010 09:20:05 +0800	[thread overview]
Message-ID: <1273108805-1354-4-git-send-email-nanhai.zou@intel.com> (raw)
In-Reply-To: <1273108805-1354-1-git-send-email-nanhai.zou@intel.com>

remove single active_list amd request_list, seperate them into
every intel_ring_buffer structure.
use a flag in execbuffer2 to decide on which ring to run the
command
legacy ioctl are consider to run on render ring by default

Signed-off-by: Xiang Haihao <haihao.xiang@intel.com>
Signed-off-by: Zou Nanhai <nanhai.zou@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c   |   23 ++-
 drivers/gpu/drm/i915/i915_dma.c       |    4 +-
 drivers/gpu/drm/i915/i915_drv.c       |    5 +-
 drivers/gpu/drm/i915/i915_drv.h       |   64 ++----
 drivers/gpu/drm/i915/i915_gem.c       |  355 ++++++++++++++++++---------------
 drivers/gpu/drm/i915/i915_gem_debug.c |   11 +-
 drivers/gpu/drm/i915/i915_irq.c       |   98 ++++-----
 drivers/gpu/drm/i915/intel_overlay.c  |   45 +++--
 8 files changed, 322 insertions(+), 283 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ea81ec1..7bf08c5 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -76,7 +76,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 	case ACTIVE_LIST:
 		seq_printf(m, "Active:\n");
 		lock = &dev_priv->mm.active_list_lock;
-		head = &dev_priv->mm.active_list;
+		head = &dev_priv->render_ring.active_list;
 		break;
 	case INACTIVE_LIST:
 		seq_printf(m, "Inactive:\n");
@@ -129,7 +129,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 	struct drm_i915_gem_request *gem_request;
 
 	seq_printf(m, "Request:\n");
-	list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
+	list_for_each_entry(gem_request,
+			&dev_priv->render_ring.request_list, list) {
 		seq_printf(m, "    %d @ %d\n",
 			   gem_request->seqno,
 			   (int) (jiffies - gem_request->emitted_jiffies));
@@ -145,13 +146,14 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
 
 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
 		seq_printf(m, "Current sequence: %d\n",
-			   i915_get_gem_seqno(dev));
+			   i915_get_gem_seqno(dev, &dev_priv->render_ring));
 	} else {
 		seq_printf(m, "Current sequence: hws uninitialized\n");
 	}
 	seq_printf(m, "Waiter sequence:  %d\n",
-			dev_priv->mm.waiting_gem_seqno);
-	seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+			dev_priv->render_ring.waiting_gem_seqno);
+	seq_printf(m, "IRQ sequence:     %d\n",
+			dev_priv->render_ring.irq_gem_seqno);
 	return 0;
 }
 
@@ -197,14 +199,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		   atomic_read(&dev_priv->irq_received));
 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
 		seq_printf(m, "Current sequence:    %d\n",
-			   i915_get_gem_seqno(dev));
+			   i915_get_gem_seqno(dev, &dev_priv->render_ring));
 	} else {
 		seq_printf(m, "Current sequence:    hws uninitialized\n");
 	}
 	seq_printf(m, "Waiter sequence:     %d\n",
-		   dev_priv->mm.waiting_gem_seqno);
+		   dev_priv->render_ring.waiting_gem_seqno);
 	seq_printf(m, "IRQ sequence:        %d\n",
-		   dev_priv->mm.irq_gem_seqno);
+		   dev_priv->render_ring.irq_gem_seqno);
 	return 0;
 }
 
@@ -287,7 +289,8 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
 
 	spin_lock(&dev_priv->mm.active_list_lock);
 
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+	list_for_each_entry(obj_priv,
+			&dev_priv->render_ring.active_list, list) {
 		obj = obj_priv->obj;
 		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
 		    ret = i915_gem_object_get_pages(obj, 0);
@@ -682,7 +685,7 @@ i915_wedged_write(struct file *filp,
 
 	atomic_set(&dev_priv->mm.wedged, val);
 	if (val) {
-		DRM_WAKEUP(&dev_priv->irq_queue);
+		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
 		queue_work(dev_priv->wq, &dev_priv->error_work);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 063f355..0c0e1d7 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -81,7 +81,8 @@ static void i915_free_hws(struct drm_device *dev)
 		dev_priv->status_page_dmah = NULL;
 	}
 
-	if (dev_priv->status_gfx_addr) {
+	if (dev_priv->render_ring.status_page.gfx_addr) {
+		dev_priv->render_ring.status_page.gfx_addr = 0;
 		dev_priv->status_gfx_addr = 0;
 		drm_core_ioremapfree(&dev_priv->hws_map, dev);
 	}
@@ -1616,7 +1617,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 	spin_lock_init(&dev_priv->user_irq_lock);
 	spin_lock_init(&dev_priv->error_lock);
-	dev_priv->user_irq_refcount = 0;
 	dev_priv->trace_irq_seqno = 0;
 
 	ret = drm_vblank_init(dev, I915_NUM_PIPE);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index be0f31b..a67680e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -339,7 +339,9 @@ int i965_reset(struct drm_device *dev, u8 flags)
 	/*
 	 * Clear request list
 	 */
-	i915_gem_retire_requests(dev);
+
+	i915_gem_retire_requests(dev, &dev_priv->render_ring);
+	i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
 
 	if (need_display)
 		i915_save_display(dev);
@@ -388,6 +390,7 @@ int i965_reset(struct drm_device *dev, u8 flags)
 	 */
 	if (drm_core_check_feature(dev, DRIVER_MODESET) ||
 	    !dev_priv->mm.suspended) {
+
 		struct intel_ring_buffer *ring = &dev_priv->render_ring;
 		dev_priv->mm.suspended = 0;
 		ring->init(dev, ring);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ee0a15e..c066097 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -83,6 +83,7 @@ enum plane {
 #define I915_GEM_PHYS_OVERLAY_REGS 3
 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
 
+
 struct drm_i915_gem_phys_object {
 	int id;
 	struct page **page_list;
@@ -237,9 +238,10 @@ typedef struct drm_i915_private {
 	void __iomem *regs;
 
 	struct pci_dev *bridge_dev;
+
 	struct intel_ring_buffer render_ring;
-	struct intel_ring_buffer bsd_ring;
 
+	struct intel_ring_buffer bsd_ring;
 
 	drm_dma_handle_t *status_page_dmah;
 	dma_addr_t dma_status_page;
@@ -257,12 +259,10 @@ typedef struct drm_i915_private {
 	int current_page;
 	int page_flipping;
 
-	wait_queue_head_t irq_queue;
 	atomic_t irq_received;
 	/** Protects user_irq_refcount and irq_mask_reg */
 	spinlock_t user_irq_lock;
-	/** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
-	int user_irq_refcount;
+
 	u32 trace_irq_seqno;
 	/** Cached value of IMR to avoid reads in updating the bitfield */
 	u32 irq_mask_reg;
@@ -508,18 +508,7 @@ typedef struct drm_i915_private {
 		 */
 		struct list_head shrink_list;
 
-		/**
-		 * List of objects currently involved in rendering from the
-		 * ringbuffer.
-		 *
-		 * Includes buffers having the contents of their GPU caches
-		 * flushed, not necessarily primitives.  last_rendering_seqno
-		 * represents when the rendering involved will be completed.
-		 *
-		 * A reference is held on the buffer while on this list.
-		 */
 		spinlock_t active_list_lock;
-		struct list_head active_list;
 
 		/**
 		 * List of objects which are not in the ringbuffer but which
@@ -557,12 +546,6 @@ typedef struct drm_i915_private {
 		struct list_head fence_list;
 
 		/**
-		 * List of breadcrumbs associated with GPU requests currently
-		 * outstanding.
-		 */
-		struct list_head request_list;
-
-		/**
 		 * We leave the user IRQ off as much as possible,
 		 * but this means that requests will finish and never
 		 * be retired once the system goes idle. Set a timer to
@@ -571,18 +554,6 @@ typedef struct drm_i915_private {
 		 */
 		struct delayed_work retire_work;
 
-		uint32_t next_gem_seqno;
-
-		/**
-		 * Waiting sequence number, if any
-		 */
-		uint32_t waiting_gem_seqno;
-
-		/**
-		 * Last seq seen at irq time
-		 */
-		uint32_t irq_gem_seqno;
-
 		/**
 		 * Flag if the X Server, and thus DRM, is not currently in
 		 * control of the device.
@@ -735,6 +706,9 @@ struct drm_i915_gem_object {
 	 */
 	int madv;
 
+	/* Which ring refers to is this object */
+	struct intel_ring_buffer *ring;
+
 	/**
 	 * Number of crtcs where this object is currently the fb, but
 	 * will be page flipped away on the next vblank.  When it
@@ -756,6 +730,9 @@ struct drm_i915_gem_object {
  * an emission time with seqnos for tracking how far ahead of the GPU we are.
  */
 struct drm_i915_gem_request {
+	/** On Which ring this request is generated */
+	struct intel_ring_buffer *ring;
+
 	/** GEM sequence number associated with this request. */
 	uint32_t seqno;
 
@@ -820,9 +797,7 @@ extern int i915_irq_emit(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
-void i915_user_irq_get(struct drm_device *dev);
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno);
-void i915_user_irq_put(struct drm_device *dev);
 extern void i915_enable_interrupt (struct drm_device *dev);
 
 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
@@ -846,7 +821,6 @@ extern void ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv,
 extern void ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv,
 		u32 mask);
 
-
 void
 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
@@ -917,11 +891,13 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
 int i915_gem_object_unbind(struct drm_gem_object *obj);
 void i915_gem_release_mmap(struct drm_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev);
+uint32_t i915_get_gem_seqno(struct drm_device *dev,
+		struct intel_ring_buffer *ring);
 bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
 int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
 int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
-void i915_gem_retire_requests(struct drm_device *dev);
+void i915_gem_retire_requests(struct drm_device *dev,
+		struct intel_ring_buffer *ring);
 void i915_gem_retire_work_handler(struct work_struct *work);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
@@ -932,9 +908,13 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 		     unsigned long end);
 int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-			  uint32_t flush_domains);
-int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
+int i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+		int interruptible,
+		struct intel_ring_buffer *ring);
+uint32_t i915_add_request(struct drm_device *dev,
+		struct drm_file *file_priv,
+		uint32_t flush_domains,
+		struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
 				      int write);
@@ -1121,7 +1101,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 			 (dev)->pci_device == 0x2A42 ||		\
 			 (dev)->pci_device == 0x2E42)
 
-#define HAS_BSD(dev)           (IS_IRONLAKE(dev) || IS_G4X(dev))
+#define HAS_BSD(dev)		(IS_IRONLAKE(dev) || IS_G4X(dev))
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 450c67e..ec73a76 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -55,6 +55,8 @@ static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *o
 				struct drm_i915_gem_pwrite *args,
 				struct drm_file *file_priv);
 
+
+
 static LIST_HEAD(shrink_list);
 static DEFINE_SPINLOCK(shrink_list_lock);
 
@@ -1481,11 +1483,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
 }
 
 static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
+i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno,
+			       struct intel_ring_buffer *ring)
 {
 	struct drm_device *dev = obj->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+	BUG_ON(ring == NULL);
+	obj_priv->ring = ring;
 
 	/* Add a reference if we're newly entering the active list. */
 	if (!obj_priv->active) {
@@ -1494,8 +1499,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
 	}
 	/* Move from whatever list we were on to the tail of execution. */
 	spin_lock(&dev_priv->mm.active_list_lock);
-	list_move_tail(&obj_priv->list,
-		       &dev_priv->mm.active_list);
+	list_move_tail(&obj_priv->list, &ring->active_list);
 	spin_unlock(&dev_priv->mm.active_list_lock);
 	obj_priv->last_rendering_seqno = seqno;
 }
@@ -1548,6 +1552,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 	BUG_ON(!list_empty(&obj_priv->gpu_write_list));
 
 	obj_priv->last_rendering_seqno = 0;
+	obj_priv->ring = NULL;
 	if (obj_priv->active) {
 		obj_priv->active = 0;
 		drm_gem_object_unreference(obj);
@@ -1557,7 +1562,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
 
 static void
 i915_gem_process_flushing_list(struct drm_device *dev,
-			       uint32_t flush_domains, uint32_t seqno)
+			       uint32_t flush_domains, uint32_t seqno,
+			       struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv, *next;
@@ -1568,12 +1574,13 @@ i915_gem_process_flushing_list(struct drm_device *dev,
 		struct drm_gem_object *obj = obj_priv->obj;
 
 		if ((obj->write_domain & flush_domains) ==
-		    obj->write_domain) {
+		    obj->write_domain &&
+		    obj_priv->ring->ring_flag == ring->ring_flag) {
 			uint32_t old_write_domain = obj->write_domain;
 
 			obj->write_domain = 0;
 			list_del_init(&obj_priv->gpu_write_list);
-			i915_gem_object_move_to_active(obj, seqno);
+			i915_gem_object_move_to_active(obj, seqno, ring);
 
 			/* update the fence lru list */
 			if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
@@ -1597,7 +1604,7 @@ i915_gem_process_flushing_list(struct drm_device *dev,
  */
 uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
-		 uint32_t flush_domains)
+		 uint32_t flush_domains, struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_file_private *i915_file_priv = NULL;
@@ -1612,28 +1619,13 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 	if (request == NULL)
 		return 0;
 
-	/* Grab the seqno we're going to make this request be, and bump the
-	 * next (skipping 0 so it can be the reserved no-seqno value).
-	 */
-	seqno = dev_priv->mm.next_gem_seqno;
-	dev_priv->mm.next_gem_seqno++;
-	if (dev_priv->mm.next_gem_seqno == 0)
-		dev_priv->mm.next_gem_seqno++;
-
-	BEGIN_LP_RING(4);
-	OUT_RING(MI_STORE_DWORD_INDEX);
-	OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	OUT_RING(seqno);
-
-	OUT_RING(MI_USER_INTERRUPT);
-	ADVANCE_LP_RING();
-
-	DRM_DEBUG_DRIVER("%d\n", seqno);
+	seqno = ring->add_request(dev, ring, file_priv, flush_domains);
 
 	request->seqno = seqno;
+	request->ring = ring;
 	request->emitted_jiffies = jiffies;
-	was_empty = list_empty(&dev_priv->mm.request_list);
-	list_add_tail(&request->list, &dev_priv->mm.request_list);
+	was_empty = list_empty(&ring->request_list);
+	list_add_tail(&request->list, &ring->request_list);
 	if (i915_file_priv) {
 		list_add_tail(&request->client_list,
 			      &i915_file_priv->mm.request_list);
@@ -1645,7 +1637,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 	 * domain we're flushing with our flush.
 	 */
 	if (flush_domains != 0) 
-		i915_gem_process_flushing_list(dev, flush_domains, seqno);
+		i915_gem_process_flushing_list(dev, flush_domains, seqno, ring);
 
 	if (!dev_priv->mm.suspended) {
 		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1662,18 +1654,16 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
  * before signalling the CPU
  */
 static uint32_t
-i915_retire_commands(struct drm_device *dev)
+i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
-	uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
 	uint32_t flush_domains = 0;
 
 	/* The sampler always gets flushed on i965 (sigh) */
 	if (IS_I965G(dev))
 		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-	BEGIN_LP_RING(2);
-	OUT_RING(cmd);
-	OUT_RING(0); /* noop */
-	ADVANCE_LP_RING();
+
+	ring->flush(dev, ring,
+			I915_GEM_DOMAIN_COMMAND, flush_domains);
 	return flush_domains;
 }
 
@@ -1693,11 +1683,11 @@ i915_gem_retire_request(struct drm_device *dev,
 	 * by the ringbuffer to the flushing/inactive lists as appropriate.
 	 */
 	spin_lock(&dev_priv->mm.active_list_lock);
-	while (!list_empty(&dev_priv->mm.active_list)) {
+	while (!list_empty(&request->ring->active_list)) {
 		struct drm_gem_object *obj;
 		struct drm_i915_gem_object *obj_priv;
 
-		obj_priv = list_first_entry(&dev_priv->mm.active_list,
+		obj_priv = list_first_entry(&request->ring->active_list,
 					    struct drm_i915_gem_object,
 					    list);
 		obj = obj_priv->obj;
@@ -1706,6 +1696,7 @@ i915_gem_retire_request(struct drm_device *dev,
 		 * list, then the oldest in the list must still be newer than
 		 * this seqno.
 		 */
+
 		if (obj_priv->last_rendering_seqno != request->seqno)
 			goto out;
 
@@ -1744,39 +1735,37 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
 }
 
 uint32_t
-i915_get_gem_seqno(struct drm_device *dev)
+i915_get_gem_seqno(struct drm_device *dev, struct intel_ring_buffer *ring)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+	return ring->get_gem_seqno(dev, ring);
 }
 
 /**
  * This function clears the request list as sequence numbers are passed.
  */
 void
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_device *dev,
+		struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t seqno;
 
-	if (!dev_priv->render_ring.status_page.page_addr
-			|| list_empty(&dev_priv->mm.request_list))
+	if (!ring->status_page.page_addr || list_empty(&ring->request_list))
 		return;
 
-	seqno = i915_get_gem_seqno(dev);
+	seqno = i915_get_gem_seqno(dev, ring);
 
-	while (!list_empty(&dev_priv->mm.request_list)) {
+	while (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 		uint32_t retiring_seqno;
 
-		request = list_first_entry(&dev_priv->mm.request_list,
+		request = list_first_entry(&ring->request_list,
 					   struct drm_i915_gem_request,
 					   list);
 		retiring_seqno = request->seqno;
 
 		if (i915_seqno_passed(seqno, retiring_seqno) ||
-		    atomic_read(&dev_priv->mm.wedged)) {
+				atomic_read(&dev_priv->mm.wedged)) {
 			i915_gem_retire_request(dev, request);
 
 			list_del(&request->list);
@@ -1788,7 +1777,7 @@ i915_gem_retire_requests(struct drm_device *dev)
 
 	if (unlikely (dev_priv->trace_irq_seqno &&
 		      i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
-		i915_user_irq_put(dev);
+		ring->user_irq_put(dev, ring);
 		dev_priv->trace_irq_seqno = 0;
 	}
 }
@@ -1804,15 +1793,22 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	dev = dev_priv->dev;
 
 	mutex_lock(&dev->struct_mutex);
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+	if (HAS_BSD(dev))
+		i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
+
 	if (!dev_priv->mm.suspended &&
-	    !list_empty(&dev_priv->mm.request_list))
+		(!list_empty(&dev_priv->render_ring.request_list) ||
+			(HAS_BSD(dev) &&
+			 !list_empty(&dev_priv->bsd_ring.request_list))))
 		queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
 	mutex_unlock(&dev->struct_mutex);
 }
 
 int
-i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
+		int interruptible, struct intel_ring_buffer *ring)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 ier;
@@ -1823,7 +1819,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 	if (atomic_read(&dev_priv->mm.wedged))
 		return -EIO;
 
-	if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+	if (!i915_seqno_passed(ring->get_gem_seqno(dev, ring), seqno)) {
 		if (HAS_PCH_SPLIT(dev))
 			ier = I915_READ(DEIER) | I915_READ(GTIER);
 		else
@@ -1837,19 +1833,21 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 
 		trace_i915_gem_request_wait_begin(dev, seqno);
 
-		dev_priv->mm.waiting_gem_seqno = seqno;
-		i915_user_irq_get(dev);
+		ring->waiting_gem_seqno = seqno;
+		ring->user_irq_get(dev, ring);
 		if (interruptible)
-			ret = wait_event_interruptible(dev_priv->irq_queue,
-				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
-				atomic_read(&dev_priv->mm.wedged));
+			ret = wait_event_interruptible(ring->irq_queue,
+				i915_seqno_passed(
+					ring->get_gem_seqno(dev, ring), seqno)
+				|| atomic_read(&dev_priv->mm.wedged));
 		else
-			wait_event(dev_priv->irq_queue,
-				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
-				atomic_read(&dev_priv->mm.wedged));
+			wait_event(ring->irq_queue,
+				i915_seqno_passed(
+					ring->get_gem_seqno(dev, ring), seqno)
+				|| atomic_read(&dev_priv->mm.wedged));
 
-		i915_user_irq_put(dev);
-		dev_priv->mm.waiting_gem_seqno = 0;
+		ring->user_irq_put(dev, ring);
+		ring->waiting_gem_seqno = 0;
 
 		trace_i915_gem_request_wait_end(dev, seqno);
 	}
@@ -1858,7 +1856,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 
 	if (ret && ret != -ERESTARTSYS)
 		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
-			  __func__, ret, seqno, i915_get_gem_seqno(dev));
+			  __func__, ret, seqno, ring->get_gem_seqno(dev, ring));
 
 	/* Directly dispatch request retiring.  While we have the work queue
 	 * to handle this, the waiter on a request often wants an associated
@@ -1866,7 +1864,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 	 * a separate wait queue to handle that.
 	 */
 	if (ret == 0)
-		i915_gem_retire_requests(dev);
+		i915_gem_retire_requests(dev, ring);
 
 	return ret;
 }
@@ -1876,9 +1874,10 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
  * request and object lists appropriately for that event.
  */
 static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+i915_wait_request(struct drm_device *dev, uint32_t seqno,
+		struct intel_ring_buffer *ring)
 {
-	return i915_do_wait_request(dev, seqno, 1);
+	return i915_do_wait_request(dev, seqno, 1, ring);
 }
 
 static void
@@ -1896,8 +1895,21 @@ i915_gem_flush(struct drm_device *dev,
 
 	if (HAS_BSD(dev))
 		dev_priv->bsd_ring.flush(dev, &dev_priv->bsd_ring,
-				invalidate_domains,
-				flush_domains);
+					invalidate_domains,
+					flush_domains);
+}
+
+static void
+i915_gem_flush_ring(struct drm_device *dev,
+	       uint32_t invalidate_domains,
+	       uint32_t flush_domains,
+	       struct intel_ring_buffer *ring)
+{
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		drm_agp_chipset_flush(dev);
+	ring->flush(dev, ring,
+			invalidate_domains,
+			flush_domains);
 }
 
 /**
@@ -1924,7 +1936,8 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
 		DRM_INFO("%s: object %p wait for seqno %08x\n",
 			  __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-		ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+		ret = i915_wait_request(dev,
+				obj_priv->last_rendering_seqno, obj_priv->ring);
 		if (ret != 0)
 			return ret;
 	}
@@ -2040,11 +2053,14 @@ i915_gpu_idle(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	bool lists_empty;
-	uint32_t seqno;
+	uint32_t seqno1, seqno2;
+	int ret;
 
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
-		      list_empty(&dev_priv->mm.active_list);
+		       list_empty(&dev_priv->render_ring.active_list) &&
+		       (!HAS_BSD(dev)
+			|| list_empty(&dev_priv->bsd_ring.active_list));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	if (lists_empty)
@@ -2052,11 +2068,25 @@ i915_gpu_idle(struct drm_device *dev)
 
 	/* Flush everything onto the inactive list. */
 	i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
-	seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-	if (seqno == 0)
+	seqno1 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+			&dev_priv->render_ring);
+	if (seqno1 == 0)
 		return -ENOMEM;
+	ret = i915_wait_request(dev, seqno1, &dev_priv->render_ring);
+
+	if (HAS_BSD(dev)) {
+		seqno2 = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS,
+				&dev_priv->bsd_ring);
+		if (seqno2 == 0)
+			return -ENOMEM;
+
+		ret = i915_wait_request(dev, seqno2, &dev_priv->bsd_ring);
+		if (ret)
+			return ret;
+	}
+
 
-	return i915_wait_request(dev, seqno);
+	return ret;
 }
 
 static int
@@ -2069,7 +2099,9 @@ i915_gem_evict_everything(struct drm_device *dev)
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->mm.active_list));
+		       list_empty(&dev_priv->render_ring.active_list) &&
+		       (!HAS_BSD(dev)
+			|| list_empty(&dev_priv->bsd_ring.active_list)));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	if (lists_empty)
@@ -2089,7 +2121,9 @@ i915_gem_evict_everything(struct drm_device *dev)
 	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->mm.active_list));
+		       list_empty(&dev_priv->render_ring.active_list) &&
+		       (!HAS_BSD(dev)
+			|| list_empty(&dev_priv->bsd_ring.active_list)));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 	BUG_ON(!lists_empty);
 
@@ -2103,8 +2137,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 	struct drm_gem_object *obj;
 	int ret;
 
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+	struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
 	for (;;) {
-		i915_gem_retire_requests(dev);
+		i915_gem_retire_requests(dev, render_ring);
+
+		if (HAS_BSD(dev))
+			i915_gem_retire_requests(dev, bsd_ring);
 
 		/* If there's an inactive buffer available now, grab it
 		 * and be done.
@@ -2128,14 +2167,30 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 		 * things, wait for the next to finish and hopefully leave us
 		 * a buffer to evict.
 		 */
-		if (!list_empty(&dev_priv->mm.request_list)) {
+		if (!list_empty(&render_ring->request_list)) {
+			struct drm_i915_gem_request *request;
+
+			request = list_first_entry(&render_ring->request_list,
+						struct drm_i915_gem_request,
+						   list);
+
+			ret = i915_wait_request(dev,
+					request->seqno, request->ring);
+			if (ret)
+				return ret;
+
+			continue;
+		}
+
+		if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
 			struct drm_i915_gem_request *request;
 
-			request = list_first_entry(&dev_priv->mm.request_list,
+			request = list_first_entry(&bsd_ring->request_list,
 						   struct drm_i915_gem_request,
 						   list);
 
-			ret = i915_wait_request(dev, request->seqno);
+			ret = i915_wait_request(dev,
+					request->seqno, request->ring);
 			if (ret)
 				return ret;
 
@@ -2162,10 +2217,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size)
 			if (obj != NULL) {
 				uint32_t seqno;
 
-				i915_gem_flush(dev,
+				i915_gem_flush_ring(dev,
+					       obj->write_domain,
 					       obj->write_domain,
-					       obj->write_domain);
-				seqno = i915_add_request(dev, NULL, obj->write_domain);
+					       obj_priv->ring);
+				seqno = i915_add_request(dev, NULL,
+						obj->write_domain,
+						obj_priv->ring);
 				if (seqno == 0)
 					return -ENOMEM;
 				continue;
@@ -2691,6 +2749,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 {
 	struct drm_device *dev = obj->dev;
 	uint32_t old_write_domain;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
 
 	if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
 		return;
@@ -2698,7 +2757,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
 	/* Queue the GPU write cache flushing we need. */
 	old_write_domain = obj->write_domain;
 	i915_gem_flush(dev, 0, obj->write_domain);
-	(void) i915_add_request(dev, NULL, obj->write_domain);
+	(void) i915_add_request(dev, NULL, obj->write_domain, obj_priv->ring);
 	BUG_ON(obj->write_domain);
 
 	trace_i915_gem_object_change_domain(obj,
@@ -2838,7 +2897,10 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
 		DRM_INFO("%s: object %p wait for seqno %08x\n",
 			  __func__, obj, obj_priv->last_rendering_seqno);
 #endif
-		ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
+		ret = i915_do_wait_request(dev,
+				obj_priv->last_rendering_seqno,
+				0,
+				obj_priv->ring);
 		if (ret != 0)
 			return ret;
 	}
@@ -3429,61 +3491,6 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
 	return 0;
 }
 
-/** Dispatch a batchbuffer to the ring
- */
-static int
-i915_dispatch_gem_execbuffer(struct drm_device *dev,
-			      struct drm_i915_gem_execbuffer2 *exec,
-			      struct drm_clip_rect *cliprects,
-			      uint64_t exec_offset)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	int nbox = exec->num_cliprects;
-	int i = 0, count;
-	uint32_t exec_start, exec_len;
-
-	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
-	exec_len = (uint32_t) exec->batch_len;
-
-	trace_i915_gem_request_submit(dev, dev_priv->mm.next_gem_seqno + 1);
-
-	count = nbox ? nbox : 1;
-
-	for (i = 0; i < count; i++) {
-		if (i < nbox) {
-			int ret = i915_emit_box(dev, cliprects, i,
-						exec->DR1, exec->DR4);
-			if (ret)
-				return ret;
-		}
-
-		if (IS_I830(dev) || IS_845G(dev)) {
-			BEGIN_LP_RING(4);
-			OUT_RING(MI_BATCH_BUFFER);
-			OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-			OUT_RING(exec_start + exec_len - 4);
-			OUT_RING(0);
-			ADVANCE_LP_RING();
-		} else {
-			BEGIN_LP_RING(2);
-			if (IS_I965G(dev)) {
-				OUT_RING(MI_BATCH_BUFFER_START |
-					 (2 << 6) |
-					 MI_BATCH_NON_SECURE_I965);
-				OUT_RING(exec_start);
-			} else {
-				OUT_RING(MI_BATCH_BUFFER_START |
-					 (2 << 6));
-				OUT_RING(exec_start | MI_BATCH_NON_SECURE);
-			}
-			ADVANCE_LP_RING();
-		}
-	}
-
-	/* XXX breadcrumb */
-	return 0;
-}
-
 /* Throttle our rendering by waiting until the ring has completed our requests
  * emitted over 20 msec ago.
  *
@@ -3512,7 +3519,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
 		if (time_after_eq(request->emitted_jiffies, recent_enough))
 			break;
 
-		ret = i915_wait_request(dev, request->seqno);
+		ret = i915_wait_request(dev, request->seqno, request->ring);
 		if (ret != 0)
 			break;
 	}
@@ -3669,6 +3676,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	uint32_t seqno, flush_domains, reloc_index;
 	int pin_tries, flips;
 
+	struct intel_ring_buffer *ring = NULL;
+
 #if WATCH_EXEC
 	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
 		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
@@ -3726,6 +3735,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto pre_mutex_err;
 	}
 
+	if (args->flags & I915_EXEC_BSD) {
+		BUG_ON(!HAS_BSD(dev));
+		ring = &dev_priv->bsd_ring;
+	} else {
+		ring = &dev_priv->render_ring;
+	}
+
 	/* Look up object handles */
 	flips = 0;
 	for (i = 0; i < args->buffer_count; i++) {
@@ -3859,9 +3875,16 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		i915_gem_flush(dev,
 			       dev->invalidate_domains,
 			       dev->flush_domains);
-		if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
+		if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
 			(void)i915_add_request(dev, file_priv,
-					       dev->flush_domains);
+					dev->flush_domains,
+					&dev_priv->render_ring);
+
+			if (HAS_BSD(dev))
+				(void)i915_add_request(dev, file_priv,
+						dev->flush_domains,
+						&dev_priv->bsd_ring);
+		}
 	}
 
 	for (i = 0; i < args->buffer_count; i++) {
@@ -3898,7 +3921,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 #endif
 
 	/* Exec the batchbuffer */
-	ret = i915_dispatch_gem_execbuffer(dev, args, cliprects, exec_offset);
+	ret = ring->dispatch_gem_execbuffer(dev, ring, args,
+			cliprects, exec_offset);
 	if (ret) {
 		DRM_ERROR("dispatch failed %d\n", ret);
 		goto err;
@@ -3908,7 +3932,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 * Ensure that the commands in the batch buffer are
 	 * finished before the interrupt fires
 	 */
-	flush_domains = i915_retire_commands(dev);
+	flush_domains = i915_retire_commands(dev, ring);
 
 	i915_verify_inactive(dev, __FILE__, __LINE__);
 
@@ -3919,12 +3943,13 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	 * *some* interrupts representing completion of buffers that we can
 	 * wait on when trying to clear up gtt space).
 	 */
-	seqno = i915_add_request(dev, file_priv, flush_domains);
+	seqno = i915_add_request(dev, file_priv, flush_domains, ring);
 	BUG_ON(seqno == 0);
 	for (i = 0; i < args->buffer_count; i++) {
 		struct drm_gem_object *obj = object_list[i];
+		obj_priv = obj->driver_private;
 
-		i915_gem_object_move_to_active(obj, seqno);
+		i915_gem_object_move_to_active(obj, seqno, ring);
 #if WATCH_LRU
 		DRM_INFO("%s: move to exec list %p\n", __func__, obj);
 #endif
@@ -4036,7 +4061,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 	exec2.DR4 = args->DR4;
 	exec2.num_cliprects = args->num_cliprects;
 	exec2.cliprects_ptr = args->cliprects_ptr;
-	exec2.flags = 0;
+	exec2.flags = I915_EXEC_RENDER;
 
 	ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
 	if (!ret) {
@@ -4275,6 +4300,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	struct drm_i915_gem_busy *args = data;
 	struct drm_gem_object *obj;
 	struct drm_i915_gem_object *obj_priv;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL) {
@@ -4289,7 +4315,10 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * actually unmasked, and our working set ends up being larger than
 	 * required.
 	 */
-	i915_gem_retire_requests(dev);
+	i915_gem_retire_requests(dev, &dev_priv->render_ring);
+
+	if (HAS_BSD(dev))
+		i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
 
 	obj_priv = to_intel_bo(obj);
 	/* Don't count being on the flushing list against the object being
@@ -4451,7 +4480,9 @@ i915_gem_idle(struct drm_device *dev)
 	mutex_lock(&dev->struct_mutex);
 
 	if (dev_priv->mm.suspended ||
-			dev_priv->render_ring.gem_object == NULL) {
+			(dev_priv->render_ring.gem_object == NULL) ||
+			(HAS_BSD(dev) &&
+			 dev_priv->bsd_ring.gem_object == NULL)) {
 		mutex_unlock(&dev->struct_mutex);
 		return 0;
 	}
@@ -4503,10 +4534,11 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
 			0, PAGE_SIZE);
 	}
 	ret = intel_init_ring_buffer(dev, &dev_priv->render_ring);
-	if (!ret && HAS_BSD(dev)) {
+	if (HAS_BSD(dev)) {
 		dev_priv->bsd_ring = bsd_ring;
 		ret = intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
 	}
+
 	return ret;
 }
 
@@ -4514,10 +4546,10 @@ void
 i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
+
 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
 	if (HAS_BSD(dev))
 		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
-
 }
 
 int
@@ -4545,12 +4577,14 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 	}
 
 	spin_lock(&dev_priv->mm.active_list_lock);
-	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
+	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.active_list));
 	spin_unlock(&dev_priv->mm.active_list_lock);
 
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
-	BUG_ON(!list_empty(&dev_priv->mm.request_list));
+	BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
+	BUG_ON(HAS_BSD(dev) && !list_empty(&dev_priv->bsd_ring.request_list));
 	mutex_unlock(&dev->struct_mutex);
 
 	drm_irq_install(dev);
@@ -4589,15 +4623,20 @@ i915_gem_load(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
 	spin_lock_init(&dev_priv->mm.active_list_lock);
-	INIT_LIST_HEAD(&dev_priv->mm.active_list);
 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 	INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
-	INIT_LIST_HEAD(&dev_priv->mm.request_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
+	INIT_LIST_HEAD(&dev_priv->render_ring.active_list);
+	INIT_LIST_HEAD(&dev_priv->render_ring.request_list);
+
+	if (HAS_BSD(dev)) {
+		INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list);
+		INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list);
+	}
+
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
 			  i915_gem_retire_work_handler);
-	dev_priv->mm.next_gem_seqno = 1;
 
 	spin_lock(&shrink_list_lock);
 	list_add(&dev_priv->mm.shrink_list, &shrink_list);
@@ -4860,8 +4899,10 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
 			continue;
 
 		spin_unlock(&shrink_list_lock);
+		i915_gem_retire_requests(dev, &dev_priv->render_ring);
 
-		i915_gem_retire_requests(dev);
+		if (HAS_BSD(dev))
+			i915_gem_retire_requests(dev, &dev_priv->bsd_ring);
 
 		list_for_each_entry_safe(obj_priv, next_obj,
 					 &dev_priv->mm.inactive_list,
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 35507cf..830680b 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -106,12 +106,21 @@ i915_dump_lru(struct drm_device *dev, const char *where)
 
 	DRM_INFO("active list %s {\n", where);
 	spin_lock(&dev_priv->mm.active_list_lock);
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+	list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
 			    list)
 	{
 		DRM_INFO("    %p: %08x\n", obj_priv,
 			 obj_priv->last_rendering_seqno);
 	}
+
+	if (HAS_BSD(dev)) {
+		list_for_each_entry(obj_priv, &dev_priv->bsd_ring.active_list,
+				list)
+		{
+			DRM_INFO("    %p: %08x\n", obj_priv,
+				obj_priv->last_rendering_seqno);
+		}
+	}
 	spin_unlock(&dev_priv->mm.active_list_lock);
 	DRM_INFO("}\n");
 	DRM_INFO("flushing list %s {\n", where);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1e3299a..5312571 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -52,7 +52,7 @@
 	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 
 /** Interrupts that we mask and unmask at runtime. */
-#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT | I915_BSD_USER_INTERRUPT)
 
 #define I915_PIPE_VBLANK_STATUS	(PIPE_START_VBLANK_INTERRUPT_STATUS |\
 				 PIPE_VBLANK_INTERRUPT_STATUS)
@@ -330,7 +330,7 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 	int ret = IRQ_NONE;
 	u32 de_iir, gt_iir, de_ier, pch_iir;
 	struct drm_i915_master_private *master_priv;
-
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 	/* disable master interrupt before clearing iir  */
 	de_ier = I915_READ(DEIER);
 	I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
@@ -353,14 +353,17 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 	}
 
 	if (gt_iir & GT_USER_INTERRUPT) {
-		u32 seqno = i915_get_gem_seqno(dev);
-		dev_priv->mm.irq_gem_seqno = seqno;
+		u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
+		render_ring->irq_gem_seqno = seqno;
 		trace_i915_gem_request_complete(dev, seqno);
-		DRM_WAKEUP(&dev_priv->irq_queue);
+		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
 		dev_priv->hangcheck_count = 0;
 		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
 	}
 
+	if (gt_iir & GT_BSD_USER_INTERRUPT)
+		DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
 	if (de_iir & DE_GSE)
 		ironlake_opregion_gse_intr(dev);
 
@@ -552,7 +555,6 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
 	return bbaddr;
 }
 
-
 /**
  * i915_capture_error_state - capture an error record for later analysis
  * @dev: drm device
@@ -584,7 +586,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 		return;
 	}
 
-	error->seqno = i915_get_gem_seqno(dev);
+	error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
 	error->pipeastat = I915_READ(PIPEASTAT);
@@ -612,7 +614,8 @@ static void i915_capture_error_state(struct drm_device *dev)
 	batchbuffer[0] = NULL;
 	batchbuffer[1] = NULL;
 	count = 0;
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+	list_for_each_entry(obj_priv,
+			&dev_priv->render_ring.active_list, list) {
 		struct drm_gem_object *obj = obj_priv->obj;
 
 		if (batchbuffer[0] == NULL &&
@@ -649,7 +652,8 @@ static void i915_capture_error_state(struct drm_device *dev)
 
 	if (error->active_bo) {
 		int i = 0;
-		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+		list_for_each_entry(obj_priv,
+				&dev_priv->render_ring.active_list, list) {
 			struct drm_gem_object *obj = obj_priv->obj;
 
 			error->active_bo[i].size = obj->size;
@@ -827,7 +831,7 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
 		/*
 		 * Wakeup waiting processes so they don't hang
 		 */
-		DRM_WAKEUP(&dev_priv->irq_queue);
+		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
 	}
 
 	queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -846,6 +850,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 	unsigned long irqflags;
 	int irq_received;
 	int ret = IRQ_NONE;
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 	atomic_inc(&dev_priv->irq_received);
 
@@ -926,14 +931,18 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 		}
 
 		if (iir & I915_USER_INTERRUPT) {
-			u32 seqno = i915_get_gem_seqno(dev);
-			dev_priv->mm.irq_gem_seqno = seqno;
+			u32 seqno =
+				render_ring->get_gem_seqno(dev, render_ring);
+			render_ring->irq_gem_seqno = seqno;
 			trace_i915_gem_request_complete(dev, seqno);
-			DRM_WAKEUP(&dev_priv->irq_queue);
+			DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
 			dev_priv->hangcheck_count = 0;
 			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
 		}
 
+		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
+			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+
 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
 			intel_prepare_page_flip(dev, 0);
 
@@ -982,9 +991,7 @@ static int i915_emit_irq(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
-
 	i915_kernel_lost_context(dev);
-
 	DRM_DEBUG_DRIVER("\n");
 
 	dev_priv->counter++;
@@ -1003,43 +1010,13 @@ static int i915_emit_irq(struct drm_device * dev)
 	return dev_priv->counter;
 }
 
-void i915_user_irq_get(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
-		else
-			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-	}
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
-void i915_user_irq_put(struct drm_device *dev)
-{
-	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long irqflags;
-
-	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
-	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
-		if (HAS_PCH_SPLIT(dev))
-			ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
-		else
-			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-	}
-	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
-}
-
 void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 	if (dev_priv->trace_irq_seqno == 0)
-		i915_user_irq_get(dev);
+		render_ring->user_irq_get(dev, render_ring);
 
 	dev_priv->trace_irq_seqno = seqno;
 }
@@ -1049,6 +1026,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 	int ret = 0;
+	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
 		  READ_BREADCRUMB(dev_priv));
@@ -1062,10 +1040,10 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 	if (master_priv->sarea_priv)
 		master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
-	i915_user_irq_get(dev);
-	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
+	render_ring->user_irq_get(dev, render_ring);
+	DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
-	i915_user_irq_put(dev);
+	render_ring->user_irq_put(dev, render_ring);
 
 	if (ret == -EBUSY) {
 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1230,9 +1208,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 	return -EINVAL;
 }
 
-struct drm_i915_gem_request *i915_get_tail_request(struct drm_device *dev) {
+struct drm_i915_gem_request *
+i915_get_tail_request(struct drm_device *dev)
+{
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	return list_entry(dev_priv->mm.request_list.prev, struct drm_i915_gem_request, list);
+	return list_entry(dev_priv->render_ring.request_list.prev,
+			struct drm_i915_gem_request, list);
 }
 
 /**
@@ -1257,8 +1238,10 @@ void i915_hangcheck_elapsed(unsigned long data)
 		acthd = I915_READ(ACTHD_I965);
 
 	/* If all work is done then ACTHD clearly hasn't advanced. */
-	if (list_empty(&dev_priv->mm.request_list) ||
-		       i915_seqno_passed(i915_get_gem_seqno(dev), i915_get_tail_request(dev)->seqno)) {
+	if (list_empty(&dev_priv->render_ring.request_list) ||
+		i915_seqno_passed(i915_get_gem_seqno(dev,
+				&dev_priv->render_ring),
+			i915_get_tail_request(dev)->seqno)) {
 		dev_priv->hangcheck_count = 0;
 		return;
 	}
@@ -1311,7 +1294,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 	/* enable kind of interrupts always enabled */
 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
-	u32 render_mask = GT_USER_INTERRUPT;
+	u32 render_mask = GT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
 	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
 			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
 
@@ -1388,7 +1371,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
 	u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
 	u32 error_mask;
 
-	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+
+	if (HAS_BSD(dev))
+		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
 
 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2e490ad..d7ed6bc 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -212,6 +212,8 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 {
 	struct drm_device *dev = overlay->dev;
 	int ret;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
 	BUG_ON(overlay->active);
 
 	overlay->active = 1;
@@ -224,11 +226,13 @@ static int intel_overlay_on(struct intel_overlay *overlay)
 	OUT_RING(MI_NOOP);
 	ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev,
+			overlay->last_flip_req, 1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -261,7 +265,8 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
 	OUT_RING(flip_addr);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 }
 
 static int intel_overlay_wait_flip(struct intel_overlay *overlay)
@@ -272,7 +277,8 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
 	u32 tmp;
 
 	if (overlay->last_flip_req != 0) {
-		ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+		ret = i915_do_wait_request(dev, overlay->last_flip_req,
+				1, &dev_priv->render_ring);
 		if (ret == 0) {
 			overlay->last_flip_req = 0;
 
@@ -291,11 +297,13 @@ static int intel_overlay_wait_flip(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -309,6 +317,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
 {
 	u32 flip_addr = overlay->flip_addr;
 	struct drm_device *dev = overlay->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret;
 
 	BUG_ON(!overlay->active);
@@ -329,11 +338,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
         ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -347,11 +358,13 @@ static int intel_overlay_off(struct intel_overlay *overlay)
         OUT_RING(MI_NOOP);
 	ADVANCE_LP_RING();
 
-	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	overlay->last_flip_req =
+		i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 	if (overlay->last_flip_req == 0)
 		return -ENOMEM;
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			1, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -384,6 +397,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
 {
 	struct drm_device *dev = overlay->dev;
 	struct drm_gem_object *obj;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 flip_addr;
 	int ret;
 
@@ -391,12 +405,14 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
 		return -EIO;
 
 	if (overlay->last_flip_req == 0) {
-		overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+		overlay->last_flip_req =
+			i915_add_request(dev, NULL, 0, &dev_priv->render_ring);
 		if (overlay->last_flip_req == 0)
 			return -ENOMEM;
 	}
 
-	ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+	ret = i915_do_wait_request(dev, overlay->last_flip_req,
+			interruptible, &dev_priv->render_ring);
 	if (ret != 0)
 		return ret;
 
@@ -420,12 +436,13 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
 			OUT_RING(MI_NOOP);
 			ADVANCE_LP_RING();
 
-			overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+			overlay->last_flip_req = i915_add_request(dev, NULL,
+					0, &dev_priv->render_ring);
 			if (overlay->last_flip_req == 0)
 				return -ENOMEM;
 
 			ret = i915_do_wait_request(dev, overlay->last_flip_req,
-					interruptible);
+					interruptible, &dev_priv->render_ring);
 			if (ret != 0)
 				return ret;
 
-- 
1.7.1

  parent reply	other threads:[~2010-05-06  1:20 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2010-05-06  1:20 [PATCH 1/4] introduce intel_ring_buffer structure Zou Nan hai
2010-05-06  1:20 ` [PATCH 2/4] convert render engine to use intel_ring_buffer Zou Nan hai
2010-05-06  1:20 ` [PATCH 3/4] add BSD ring buffer support Zou Nan hai
2010-05-06  1:20 ` Zou Nan hai [this message]
2010-05-07 21:34 ` [PATCH 1/4] introduce intel_ring_buffer structure Eric Anholt
2010-05-08 18:15   ` Thomas Bächler
2010-05-11 22:24 ` Daniel Vetter
2010-05-14  1:39   ` Zou, Nanhai
2010-05-14  9:51     ` Daniel Vetter
2010-05-14 15:03       ` Daniel Vetter
2010-05-17  1:59         ` Zou, Nanhai
2010-05-17 17:52           ` Daniel Vetter
2010-05-14 17:43     ` Owain Ainsworth
2010-05-17  1:43       ` Zou, Nanhai
2010-05-17 17:42         ` Daniel Vetter
2010-05-18  2:20           ` Zou, Nanhai
2010-05-18 16:19             ` Simon Farnsworth
2010-05-19  1:09               ` Zou, Nanhai
2010-05-19  9:00                 ` Simon Farnsworth
2010-05-19 16:54                   ` Eric Anholt
2010-05-19 17:33                     ` Simon Farnsworth
  -- strict thread matches above, loose matches on Subject: below --
2010-05-05  3:17 Zou Nan hai
2010-05-05  3:17 ` [PATCH 4/4] adapt intel_ring_buffer into gem Zou Nan hai

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1273108805-1354-4-git-send-email-nanhai.zou@intel.com \
    --to=nanhai.zou@intel.com \
    --cc=eric.anholt@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.