All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 044/190] drm/i915: Move GEM request routines to i915_gem_request.c
Date: Mon, 11 Jan 2016 09:16:55 +0000	[thread overview]
Message-ID: <1452503961-14837-44-git-send-email-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <1452503961-14837-1-git-send-email-chris@chris-wilson.co.uk>

Migrate the request operations out of the main body of i915_gem.c and
into their own C file for easier expansion.

v2: Move __i915_add_request() across as well

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile           |   1 +
 drivers/gpu/drm/i915/i915_drv.h         | 205 +---------
 drivers/gpu/drm/i915/i915_gem.c         | 652 +------------------------------
 drivers/gpu/drm/i915/i915_gem_request.c | 659 ++++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/i915_gem_request.h | 223 +++++++++++
 5 files changed, 895 insertions(+), 845 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_gem_request.c
 create mode 100644 drivers/gpu/drm/i915/i915_gem_request.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 99ce591c8574..b0a83215db80 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -31,6 +31,7 @@ i915-y += i915_cmd_parser.o \
 	  i915_gem_gtt.o \
 	  i915_gem.o \
 	  i915_gem_render_state.o \
+	  i915_gem_request.o \
 	  i915_gem_shrinker.o \
 	  i915_gem_stolen.o \
 	  i915_gem_tiling.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 57e450e25ad6..ee146ce02412 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -41,6 +41,7 @@
 #include "intel_lrc.h"
 #include "i915_gem_gtt.h"
 #include "i915_gem_render_state.h"
+#include "i915_gem_request.h"
 #include <linux/io-mapping.h>
 #include <linux/i2c.h>
 #include <linux/i2c-algo-bit.h>
@@ -2162,179 +2163,15 @@ struct drm_i915_gem_object {
 };
 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
 
-void i915_gem_track_fb(struct drm_i915_gem_object *old,
-		       struct drm_i915_gem_object *new,
-		       unsigned frontbuffer_bits);
-
-/**
- * Request queue structure.
- *
- * The request queue allows us to note sequence numbers that have been emitted
- * and may be associated with active buffers to be retired.
- *
- * By keeping this list, we can avoid having to do questionable sequence
- * number comparisons on buffer last_read|write_seqno. It also allows an
- * emission time to be associated with the request for tracking how far ahead
- * of the GPU the submission is.
- *
- * The requests are reference counted, so upon creation they should have an
- * initial reference taken using kref_init
- */
-struct drm_i915_gem_request {
-	struct kref ref;
-
-	/** On Which ring this request was generated */
-	struct drm_i915_private *i915;
-	struct intel_engine_cs *ring;
-	unsigned reset_counter;
-
-	 /** GEM sequence number associated with the previous request,
-	  * when the HWS breadcrumb is equal to this the GPU is processing
-	  * this request.
-	  */
-	u32 previous_seqno;
-
-	 /** GEM sequence number associated with this request,
-	  * when the HWS breadcrumb is equal or greater than this the GPU
-	  * has finished processing this request.
-	  */
-	u32 seqno;
-
-	/** Position in the ringbuffer of the start of the request */
-	u32 head;
-
-	/**
-	 * Position in the ringbuffer of the start of the postfix.
-	 * This is required to calculate the maximum available ringbuffer
-	 * space without overwriting the postfix.
-	 */
-	 u32 postfix;
-
-	/** Position in the ringbuffer of the end of the whole request */
-	u32 tail;
-
-	/**
-	 * Context and ring buffer related to this request
-	 * Contexts are refcounted, so when this request is associated with a
-	 * context, we must increment the context's refcount, to guarantee that
-	 * it persists while any request is linked to it. Requests themselves
-	 * are also refcounted, so the request will only be freed when the last
-	 * reference to it is dismissed, and the code in
-	 * i915_gem_request_free() will then decrement the refcount on the
-	 * context.
-	 */
-	struct intel_context *ctx;
-	struct intel_ringbuffer *ringbuf;
-
-	/** Batch buffer related to this request if any (used for
-	    error state dump only) */
-	struct drm_i915_gem_object *batch_obj;
-
-	/** Time at which this request was emitted, in jiffies. */
-	unsigned long emitted_jiffies;
-
-	/** global list entry for this request */
-	struct list_head list;
-
-	struct drm_i915_file_private *file_priv;
-	/** file_priv list entry for this request */
-	struct list_head client_list;
-
-	/** process identifier submitting this request */
-	struct pid *pid;
-
-	/**
-	 * The ELSP only accepts two elements at a time, so we queue
-	 * context/tail pairs on a given queue (ring->execlist_queue) until the
-	 * hardware is available. The queue serves a double purpose: we also use
-	 * it to keep track of the up to 2 contexts currently in the hardware
-	 * (usually one in execution and the other queued up by the GPU): We
-	 * only remove elements from the head of the queue when the hardware
-	 * informs us that an element has been completed.
-	 *
-	 * All accesses to the queue are mediated by a spinlock
-	 * (ring->execlist_lock).
-	 */
-
-	/** Execlist link in the submission queue.*/
-	struct list_head execlist_link;
-
-	/** Execlists no. of times this request has been sent to the ELSP */
-	int elsp_submitted;
-
-};
-
 #ifdef CONFIG_DRM_I915_DEBUG_GEM
 #define GEM_BUG_ON(expr) BUG_ON(expr)
 #else
 #define GEM_BUG_ON(expr)
 #endif
 
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx,
-			   struct drm_i915_gem_request **req_out);
-void i915_gem_request_cancel(struct drm_i915_gem_request *req);
-void i915_gem_request_free(struct kref *req_ref);
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-				   struct drm_file *file);
-
-static inline uint32_t
-i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
-{
-	return req ? req->seqno : 0;
-}
-
-static inline struct intel_engine_cs *
-i915_gem_request_get_ring(struct drm_i915_gem_request *req)
-{
-	return req ? req->ring : NULL;
-}
-
-static inline struct drm_i915_gem_request *
-i915_gem_request_reference(struct drm_i915_gem_request *req)
-{
-	if (req)
-		kref_get(&req->ref);
-	return req;
-}
-
-static inline void
-i915_gem_request_unreference(struct drm_i915_gem_request *req)
-{
-	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
-	kref_put(&req->ref, i915_gem_request_free);
-}
-
-static inline void
-i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
-{
-	struct drm_device *dev;
-
-	if (!req)
-		return;
-
-	dev = req->ring->dev;
-	if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
-		mutex_unlock(&dev->struct_mutex);
-}
-
-static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
-					   struct drm_i915_gem_request *src)
-{
-	if (src)
-		i915_gem_request_reference(src);
-
-	if (*pdst)
-		i915_gem_request_unreference(*pdst);
-
-	*pdst = src;
-}
-
-/*
- * XXX: i915_gem_request_completed should be here but currently needs the
- * definition of i915_seqno_passed() which is below. It will be moved in
- * a later patch when the call to i915_seqno_passed() is obsoleted...
- */
+void i915_gem_track_fb(struct drm_i915_gem_object *old,
+		       struct drm_i915_gem_object *new,
+		       unsigned frontbuffer_bits);
 
 /*
  * A command that requires special handling by the command parser.
@@ -2956,28 +2793,6 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
 			 struct drm_mode_create_dumb *args);
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
 		      uint32_t handle, uint64_t *offset);
-/**
- * Returns true if seq1 is later than seq2.
- */
-static inline bool
-i915_seqno_passed(uint32_t seq1, uint32_t seq2)
-{
-	return (int32_t)(seq1 - seq2) >= 0;
-}
-
-static inline bool i915_gem_request_started(struct drm_i915_gem_request *req)
-{
-	return i915_seqno_passed(intel_ring_get_seqno(req->ring),
-				 req->previous_seqno);
-}
-
-static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req)
-{
-	return i915_seqno_passed(intel_ring_get_seqno(req->ring),
-				 req->seqno);
-}
-
-int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
 
 struct drm_i915_gem_request *
@@ -3036,18 +2851,6 @@ void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-void __i915_add_request(struct drm_i915_gem_request *req,
-			struct drm_i915_gem_object *batch_obj,
-			bool flush_caches);
-#define i915_add_request(req) \
-	__i915_add_request(req, NULL, true)
-#define i915_add_request_no_flush(req) \
-	__i915_add_request(req, NULL, false)
-int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
-			s64 *timeout,
-			struct intel_rps_client *rps);
-int __must_check i915_wait_request(struct drm_i915_gem_request *req);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ea9344503bf6..68a25617ca7a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1103,365 +1103,6 @@ put_rpm:
 	return ret;
 }
 
-static int
-i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
-{
-	if (__i915_terminally_wedged(reset_counter))
-		return -EIO;
-
-	if (__i915_reset_in_progress(reset_counter)) {
-		/* Non-interruptible callers can't handle -EAGAIN, hence return
-		 * -EIO unconditionally for these. */
-		if (!interruptible)
-			return -EIO;
-
-		return -EAGAIN;
-	}
-
-	return 0;
-}
-
-static unsigned long local_clock_us(unsigned *cpu)
-{
-	unsigned long t;
-
-	/* Cheaply and approximately convert from nanoseconds to microseconds.
-	 * The result and subsequent calculations are also defined in the same
-	 * approximate microseconds units. The principal source of timing
-	 * error here is from the simple truncation.
-	 *
-	 * Note that local_clock() is only defined wrt to the current CPU;
-	 * the comparisons are no longer valid if we switch CPUs. Instead of
-	 * blocking preemption for the entire busywait, we can detect the CPU
-	 * switch and use that as indicator of system load and a reason to
-	 * stop busywaiting, see busywait_stop().
-	 */
-	*cpu = get_cpu();
-	t = local_clock() >> 10;
-	put_cpu();
-
-	return t;
-}
-
-static bool busywait_stop(unsigned long timeout, unsigned cpu)
-{
-	unsigned this_cpu;
-
-	if (time_after(local_clock_us(&this_cpu), timeout))
-		return true;
-
-	return this_cpu != cpu;
-}
-
-static bool __i915_spin_request(struct drm_i915_gem_request *req,
-				struct intel_wait *wait,
-				int state)
-{
-	unsigned long timeout;
-	unsigned cpu;
-
-	/* When waiting for high frequency requests, e.g. during synchronous
-	 * rendering split between the CPU and GPU, the finite amount of time
-	 * required to set up the irq and wait upon it limits the response
-	 * rate. By busywaiting on the request completion for a short while we
-	 * can service the high frequency waits as quick as possible. However,
-	 * if it is a slow request, we want to sleep as quickly as possible.
-	 * The tradeoff between waiting and sleeping is roughly the time it
-	 * takes to sleep on a request, on the order of a microsecond.
-	 */
-
-	/* Only spin if we know the GPU is processing this request */
-	if (!i915_gem_request_started(req))
-		return false;
-
-	timeout = local_clock_us(&cpu) + 5;
-	do {
-		if (i915_gem_request_completed(req))
-			return true;
-
-		if (signal_pending_state(state, wait->task))
-			break;
-
-		if (busywait_stop(timeout, cpu))
-			break;
-
-		cpu_relax_lowlatency();
-
-		/* Break the loop if we have consumed the timeslice (or been
-		 * preempted) or when either the background thread has
-		 * enabled the interrupt, or the IRQ itself has fired.
-		 */
-	} while (!need_resched() && wait->task->state == state);
-
-	return false;
-}
-
-/**
- * __i915_wait_request - wait until execution of request has finished
- * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- *
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
- *
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-int __i915_wait_request(struct drm_i915_gem_request *req,
-			bool interruptible,
-			s64 *timeout,
-			struct intel_rps_client *rps)
-{
-	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-	struct intel_wait wait;
-	unsigned long timeout_remain;
-	int ret = 0;
-
-	might_sleep();
-
-	if (list_empty(&req->list))
-		return 0;
-
-	if (i915_gem_request_completed(req))
-		return 0;
-
-	timeout_remain = MAX_SCHEDULE_TIMEOUT;
-	if (timeout) {
-		if (WARN_ON(*timeout < 0))
-			return -EINVAL;
-
-		if (*timeout == 0)
-			return -ETIME;
-
-		/* Record current time in case interrupted, or wedged */
-		timeout_remain = nsecs_to_jiffies_timeout(*timeout);
-		*timeout += ktime_get_raw_ns();
-	}
-
-	trace_i915_gem_request_wait_begin(req);
-
-	/* This client is about to stall waiting for the GPU. In many cases
-	 * this is undesirable and limits the throughput of the system, as
-	 * many clients cannot continue processing user input/output whilst
-	 * blocked. RPS autotuning may take tens of milliseconds to respond
-	 * to the GPU load and thus incurs additional latency for the client.
-	 * We can circumvent that by promoting the GPU frequency to maximum
-	 * before we wait. This makes the GPU throttle up much more quickly
-	 * (good for benchmarks and user experience, e.g. window animations),
-	 * but at a cost of spending more power processing the workload
-	 * (bad for battery). Not all clients even want their results
-	 * immediately and for them we should just let the GPU select its own
-	 * frequency to maximise efficiency. To prevent a single client from
-	 * forcing the clocks too high for the whole system, we only allow
-	 * each client to waitboost once in a busy period.
-	 */
-	if (INTEL_INFO(req->i915)->gen >= 6)
-		gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
-
-	intel_wait_init(&wait, req->seqno);
-	set_task_state(wait.task, state);
-
-	/* Optimistic spin for the next ~jiffie before touching IRQs */
-	if (intel_engine_add_wait(req->ring, &wait)) {
-		if (__i915_spin_request(req, &wait, state))
-			goto complete;
-
-		/* In order to check that we haven't missed the interrupt
-		 * as we enabled it, we need to kick ourselves to do a
-		 * coherent check on the seqno before we sleep.
-		 */
-		if (intel_engine_enable_wait_irq(req->ring, &wait))
-			goto wakeup;
-	}
-
-	for (;;) {
-		if (signal_pending_state(state, wait.task)) {
-			ret = -ERESTARTSYS;
-			break;
-		}
-
-		/* Ensure that even if the GPU hangs, we get woken up. */
-		i915_queue_hangcheck(req->i915);
-
-		timeout_remain = io_schedule_timeout(timeout_remain);
-		if (timeout_remain == 0) {
-			ret = -ETIME;
-			break;
-		}
-
-		if (intel_wait_complete(&wait))
-			break;
-
-wakeup:
-		set_task_state(wait.task, state);
-
-		/* Carefully check if the request is complete, giving time
-		 * for the seqno to be visible following the interrupt.
-		 * We also have to check in case we are kicked by the GPU
-		 * reset in order to drop the struct_mutex.
-		 */
-		if (__i915_request_irq_complete(req))
-			break;
-	}
-
-complete:
-	intel_engine_remove_wait(req->ring, &wait);
-	__set_task_state(wait.task, TASK_RUNNING);
-	trace_i915_gem_request_wait_end(req);
-
-	if (timeout) {
-		*timeout -= ktime_get_raw_ns();
-		if (*timeout < 0)
-			*timeout = 0;
-
-		/*
-		 * Apparently ktime isn't accurate enough and occasionally has a
-		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
-		 * things up to make the test happy. We allow up to 1 jiffy.
-		 *
-		 * This is a regrssion from the timespec->ktime conversion.
-		 */
-		if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
-			*timeout = 0;
-	}
-
-	if (ret == 0 && rps && req->seqno == req->ring->last_submitted_seqno) {
-		/* The GPU is now idle and this client has stalled.
-		 * Since no other client has submitted a request in the
-		 * meantime, assume that this client is the only one
-		 * supplying work to the GPU but is unable to keep that
-		 * work supplied because it is waiting. Since the GPU is
-		 * then never kept fully busy, RPS autoclocking will
-		 * keep the clocks relatively low, causing further delays.
-		 * Compensate by giving the synchronous client credit for
-		 * a waitboost next time.
-		 */
-		spin_lock(&req->i915->rps.client_lock);
-		list_del_init(&rps->link);
-		spin_unlock(&req->i915->rps.client_lock);
-	}
-
-	return ret;
-}
-
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-				   struct drm_file *file)
-{
-	struct drm_i915_private *dev_private;
-	struct drm_i915_file_private *file_priv;
-
-	WARN_ON(!req || !file || req->file_priv);
-
-	if (!req || !file)
-		return -EINVAL;
-
-	if (req->file_priv)
-		return -EINVAL;
-
-	dev_private = req->ring->dev->dev_private;
-	file_priv = file->driver_priv;
-
-	spin_lock(&file_priv->mm.lock);
-	req->file_priv = file_priv;
-	list_add_tail(&req->client_list, &file_priv->mm.request_list);
-	spin_unlock(&file_priv->mm.lock);
-
-	req->pid = get_pid(task_pid(current));
-
-	return 0;
-}
-
-static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
-{
-	struct drm_i915_file_private *file_priv = request->file_priv;
-
-	if (!file_priv)
-		return;
-
-	spin_lock(&file_priv->mm.lock);
-	list_del(&request->client_list);
-	request->file_priv = NULL;
-	spin_unlock(&file_priv->mm.lock);
-
-	put_pid(request->pid);
-	request->pid = NULL;
-}
-
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
-{
-	trace_i915_gem_request_retire(request);
-
-	/* We know the GPU must have read the request to have
-	 * sent us the seqno + interrupt, so use the position
-	 * of tail of the request to update the last known position
-	 * of the GPU head.
-	 *
-	 * Note this requires that we are always called in request
-	 * completion order.
-	 */
-	request->ringbuf->last_retired_head = request->postfix;
-
-	list_del_init(&request->list);
-	i915_gem_request_remove_from_client(request);
-
-	i915_gem_request_unreference(request);
-}
-
-static void
-__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->ring;
-	struct drm_i915_gem_request *tmp;
-
-	lockdep_assert_held(&engine->dev->struct_mutex);
-
-	if (list_empty(&req->list))
-		return;
-
-	do {
-		tmp = list_first_entry(&engine->request_list,
-				       typeof(*tmp), list);
-
-		i915_gem_request_retire(tmp);
-	} while (tmp != req);
-
-	WARN_ON(i915_verify_lists(engine->dev));
-}
-
-/**
- * Waits for a request to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct drm_i915_gem_request *req)
-{
-	struct drm_device *dev;
-	struct drm_i915_private *dev_priv;
-	bool interruptible;
-	int ret;
-
-	BUG_ON(req == NULL);
-
-	dev = req->ring->dev;
-	dev_priv = dev->dev_private;
-	interruptible = dev_priv->mm.interruptible;
-
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-
-	ret = __i915_wait_request(req, interruptible, NULL, NULL);
-	if (ret)
-		return ret;
-
-	__i915_gem_request_retire__upto(req);
-	return 0;
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
@@ -1515,7 +1156,7 @@ i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
 	else if (obj->last_write_req == req)
 		i915_gem_object_retire__write(obj);
 
-	__i915_gem_request_retire__upto(req);
+	i915_gem_request_retire_upto(req);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -2441,94 +2082,6 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
 	drm_gem_object_unreference(&obj->base);
 }
 
-static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_engine_cs *ring;
-	int ret, i, j;
-
-	/* Carefully retire all requests without writing to the rings */
-	for_each_ring(ring, dev_priv, i) {
-		ret = intel_ring_idle(ring);
-		if (ret)
-			return ret;
-	}
-	i915_gem_retire_requests(dev);
-
-	/* Finally reset hw state */
-	for_each_ring(ring, dev_priv, i) {
-		intel_ring_init_seqno(ring, seqno);
-
-		for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
-			ring->semaphore.sync_seqno[j] = 0;
-	}
-
-	return 0;
-}
-
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret;
-
-	if (seqno == 0)
-		return -EINVAL;
-
-	/* HWS page needs to be set less than what we
-	 * will inject to ring
-	 */
-	ret = i915_gem_init_seqno(dev, seqno - 1);
-	if (ret)
-		return ret;
-
-	/* Carefully set the last_seqno value so that wrap
-	 * detection still works
-	 */
-	dev_priv->next_seqno = seqno;
-	dev_priv->last_seqno = seqno - 1;
-	if (dev_priv->last_seqno == 0)
-		dev_priv->last_seqno--;
-
-	return 0;
-}
-
-int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	/* reserve 0 for non-seqno */
-	if (dev_priv->next_seqno == 0) {
-		int ret = i915_gem_init_seqno(dev, 0);
-		if (ret)
-			return ret;
-
-		dev_priv->next_seqno = 1;
-	}
-
-	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
-	return 0;
-}
-
-static void i915_gem_mark_busy(struct drm_i915_private *dev_priv)
-{
-	if (dev_priv->mm.busy)
-		return;
-
-	intel_runtime_pm_get_noresume(dev_priv);
-
-	i915_update_gfx_val(dev_priv);
-	if (INTEL_INFO(dev_priv)->gen >= 6)
-		gen6_rps_busy(dev_priv);
-
-	queue_delayed_work(dev_priv->wq,
-			   &dev_priv->mm.retire_work,
-			   round_jiffies_up_relative(HZ));
-
-	dev_priv->mm.busy = true;
-}
-
 static void i915_gem_mark_idle(struct drm_i915_private *dev_priv)
 {
 	dev_priv->mm.busy = false;
@@ -2542,92 +2095,6 @@ static void i915_gem_mark_idle(struct drm_i915_private *dev_priv)
 	intel_runtime_pm_put(dev_priv);
 }
 
-/*
- * NB: This function is not allowed to fail. Doing so would mean the the
- * request is not being tracked for completion but the work itself is
- * going to happen on the hardware. This would be a Bad Thing(tm).
- */
-void __i915_add_request(struct drm_i915_gem_request *request,
-			struct drm_i915_gem_object *obj,
-			bool flush_caches)
-{
-	struct intel_engine_cs *ring;
-	struct drm_i915_private *dev_priv;
-	struct intel_ringbuffer *ringbuf;
-	u32 request_start;
-	int ret;
-
-	if (WARN_ON(request == NULL))
-		return;
-
-	ring = request->ring;
-	dev_priv = ring->dev->dev_private;
-	ringbuf = request->ringbuf;
-
-	/*
-	 * To ensure that this call will not fail, space for its emissions
-	 * should already have been reserved in the ring buffer. Let the ring
-	 * know that it is time to use that space up.
-	 */
-	intel_ring_reserved_space_use(ringbuf);
-
-	request_start = intel_ring_get_tail(ringbuf);
-	/*
-	 * Emit any outstanding flushes - execbuf can fail to emit the flush
-	 * after having emitted the batchbuffer command. Hence we need to fix
-	 * things up similar to emitting the lazy request. The difference here
-	 * is that the flush _must_ happen before the next request, no matter
-	 * what.
-	 */
-	if (flush_caches) {
-		if (i915.enable_execlists)
-			ret = logical_ring_flush_all_caches(request);
-		else
-			ret = intel_ring_flush_all_caches(request);
-		/* Not allowed to fail! */
-		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
-	}
-
-	/* Record the position of the start of the request so that
-	 * should we detect the updated seqno part-way through the
-	 * GPU processing the request, we never over-estimate the
-	 * position of the head.
-	 */
-	request->postfix = intel_ring_get_tail(ringbuf);
-
-	if (i915.enable_execlists)
-		ret = ring->emit_request(request);
-	else {
-		ret = ring->add_request(request);
-
-		request->tail = intel_ring_get_tail(ringbuf);
-	}
-	/* Not allowed to fail! */
-	WARN(ret, "emit|add_request failed: %d!\n", ret);
-
-	request->head = request_start;
-
-	/* Whilst this request exists, batch_obj will be on the
-	 * active_list, and so will hold the active reference. Only when this
-	 * request is retired will the the batch_obj be moved onto the
-	 * inactive_list and lose its active reference. Hence we do not need
-	 * to explicitly hold another reference here.
-	 */
-	request->batch_obj = obj;
-
-	request->emitted_jiffies = jiffies;
-	request->previous_seqno = ring->last_submitted_seqno;
-	ring->last_submitted_seqno = request->seqno;
-	list_add_tail(&request->list, &ring->request_list);
-
-	trace_i915_gem_request_add(request);
-
-	i915_gem_mark_busy(dev_priv);
-
-	/* Sanity check that the reserved size was large enough. */
-	intel_ring_reserved_space_end(ringbuf);
-}
-
 static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
 				   const struct intel_context *ctx)
 {
@@ -2666,109 +2133,6 @@ static void i915_set_reset_status(struct drm_i915_private *dev_priv,
 	}
 }
 
-void i915_gem_request_free(struct kref *req_ref)
-{
-	struct drm_i915_gem_request *req = container_of(req_ref,
-						 typeof(*req), ref);
-	struct intel_context *ctx = req->ctx;
-
-	if (req->file_priv)
-		i915_gem_request_remove_from_client(req);
-
-	if (ctx) {
-		if (i915.enable_execlists) {
-			if (ctx != req->ring->default_context)
-				intel_lr_context_unpin(req);
-		}
-
-		i915_gem_context_unreference(ctx);
-	}
-
-	kmem_cache_free(req->i915->requests, req);
-}
-
-int i915_gem_request_alloc(struct intel_engine_cs *ring,
-			   struct intel_context *ctx,
-			   struct drm_i915_gem_request **req_out)
-{
-	struct drm_i915_private *dev_priv = to_i915(ring->dev);
-	unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-	struct drm_i915_gem_request *req;
-	int ret;
-
-	if (!req_out)
-		return -EINVAL;
-
-	*req_out = NULL;
-
-	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
-	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
-	 * and restart.
-	 */
-	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
-	if (ret)
-		return ret;
-
-	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
-	if (req == NULL)
-		return -ENOMEM;
-
-	ret = i915_gem_get_seqno(ring->dev, &req->seqno);
-	if (ret)
-		goto err;
-
-	kref_init(&req->ref);
-	req->i915 = dev_priv;
-	req->ring = ring;
-	req->reset_counter = reset_counter;
-	req->ctx  = ctx;
-	i915_gem_context_reference(req->ctx);
-
-	if (i915.enable_execlists)
-		ret = intel_logical_ring_alloc_request_extras(req);
-	else
-		ret = intel_ring_alloc_request_extras(req);
-	if (ret) {
-		i915_gem_context_unreference(req->ctx);
-		goto err;
-	}
-
-	/*
-	 * Reserve space in the ring buffer for all the commands required to
-	 * eventually emit this request. This is to guarantee that the
-	 * i915_add_request() call can't fail. Note that the reserve may need
-	 * to be redone if the request is not actually submitted straight
-	 * away, e.g. because a GPU scheduler has deferred it.
-	 */
-	if (i915.enable_execlists)
-		ret = intel_logical_ring_reserve_space(req);
-	else
-		ret = intel_ring_reserve_space(req);
-	if (ret) {
-		/*
-		 * At this point, the request is fully allocated even if not
-		 * fully prepared. Thus it can be cleaned up using the proper
-		 * free code.
-		 */
-		i915_gem_request_cancel(req);
-		return ret;
-	}
-
-	*req_out = req;
-	return 0;
-
-err:
-	kmem_cache_free(dev_priv->requests, req);
-	return ret;
-}
-
-void i915_gem_request_cancel(struct drm_i915_gem_request *req)
-{
-	intel_ring_reserved_space_cancel(req->ringbuf);
-
-	i915_gem_request_unreference(req);
-}
-
 struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *ring)
 {
@@ -2850,14 +2214,14 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 	 * implicit references on things like e.g. ppgtt address spaces through
 	 * the request.
 	 */
-	while (!list_empty(&ring->request_list)) {
+	if (!list_empty(&ring->request_list)) {
 		struct drm_i915_gem_request *request;
 
-		request = list_first_entry(&ring->request_list,
-					   struct drm_i915_gem_request,
-					   list);
+		request = list_last_entry(&ring->request_list,
+					  struct drm_i915_gem_request,
+					  list);
 
-		i915_gem_request_retire(request);
+		i915_gem_request_retire_upto(request);
 	}
 
 	/* Having flushed all requests from all queues, we know that all
@@ -2922,7 +2286,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 		if (!i915_gem_request_completed(request))
 			break;
 
-		i915_gem_request_retire(request);
+		i915_gem_request_retire_upto(request);
 	}
 
 	/* Move any buffers on the active list that are no longer referenced
@@ -3053,7 +2417,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
 			goto retire;
 
 		if (i915_gem_request_completed(req)) {
-			__i915_gem_request_retire__upto(req);
+			i915_gem_request_retire_upto(req);
 retire:
 			i915_gem_object_retire__read(obj, i);
 		}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
new file mode 100644
index 000000000000..b4ede6dd7b20
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -0,0 +1,659 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "i915_drv.h"
+
+static int
+i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
+{
+	if (__i915_terminally_wedged(reset_counter))
+		return -EIO;
+
+	if (__i915_reset_in_progress(reset_counter)) {
+		/* Non-interruptible callers can't handle -EAGAIN, hence return
+		 * -EIO unconditionally for these. */
+		if (!interruptible)
+			return -EIO;
+
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static int
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
+{
+	struct intel_engine_cs *ring;
+	int ret, i, j;
+
+	/* Carefully retire all requests without writing to the rings */
+	for_each_ring(ring, dev_priv, i) {
+		ret = intel_ring_idle(ring);
+		if (ret)
+			return ret;
+	}
+	i915_gem_retire_requests(dev_priv->dev);
+
+	/* Finally reset hw state */
+	for_each_ring(ring, dev_priv, i) {
+		intel_ring_init_seqno(ring, seqno);
+
+		for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
+			ring->semaphore.sync_seqno[j] = 0;
+	}
+
+	return 0;
+}
+
+int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	if (seqno == 0)
+		return -EINVAL;
+
+	/* HWS page needs to be set less than what we
+	 * will inject to ring
+	 */
+	ret = i915_gem_init_seqno(dev_priv, seqno - 1);
+	if (ret)
+		return ret;
+
+	/* Carefully set the last_seqno value so that wrap
+	 * detection still works
+	 */
+	dev_priv->next_seqno = seqno;
+	dev_priv->last_seqno = seqno - 1;
+	if (dev_priv->last_seqno == 0)
+		dev_priv->last_seqno--;
+
+	return 0;
+}
+
+static int
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
+{
+	/* reserve 0 for non-seqno */
+	if (unlikely(dev_priv->next_seqno == 0)) {
+		int ret = i915_gem_init_seqno(dev_priv, 0);
+		if (ret)
+			return ret;
+
+		dev_priv->next_seqno = 1;
+	}
+
+	*seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+	return 0;
+}
+
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out)
+{
+	struct drm_i915_private *dev_priv = to_i915(ring->dev);
+	unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
+	struct drm_i915_gem_request *req;
+	int ret;
+
+	if (!req_out)
+		return -EINVAL;
+
+	*req_out = NULL;
+
+	/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+	 * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
+	 * and restart.
+	 */
+	ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
+	if (ret)
+		return ret;
+
+	req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+	if (req == NULL)
+		return -ENOMEM;
+
+	ret = i915_gem_get_seqno(dev_priv, &req->seqno);
+	if (ret)
+		goto err;
+
+	kref_init(&req->ref);
+	req->i915 = dev_priv;
+	req->ring = ring;
+	req->reset_counter = reset_counter;
+	req->ctx  = ctx;
+	i915_gem_context_reference(req->ctx);
+
+	if (i915.enable_execlists)
+		ret = intel_logical_ring_alloc_request_extras(req);
+	else
+		ret = intel_ring_alloc_request_extras(req);
+	if (ret) {
+		i915_gem_context_unreference(req->ctx);
+		goto err;
+	}
+
+	/*
+	 * Reserve space in the ring buffer for all the commands required to
+	 * eventually emit this request. This is to guarantee that the
+	 * i915_add_request() call can't fail. Note that the reserve may need
+	 * to be redone if the request is not actually submitted straight
+	 * away, e.g. because a GPU scheduler has deferred it.
+	 */
+	if (i915.enable_execlists)
+		ret = intel_logical_ring_reserve_space(req);
+	else
+		ret = intel_ring_reserve_space(req);
+	if (ret) {
+		/*
+		 * At this point, the request is fully allocated even if not
+		 * fully prepared. Thus it can be cleaned up using the proper
+		 * free code.
+		 */
+		i915_gem_request_cancel(req);
+		return ret;
+	}
+
+	*req_out = req;
+	return 0;
+
+err:
+	kmem_cache_free(dev_priv->requests, req);
+	return ret;
+}
+
+void i915_gem_request_cancel(struct drm_i915_gem_request *req)
+{
+	intel_ring_reserved_space_cancel(req->ringbuf);
+
+	i915_gem_request_unreference(req);
+}
+
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file)
+{
+	struct drm_i915_private *dev_private;
+	struct drm_i915_file_private *file_priv;
+
+	WARN_ON(!req || !file || req->file_priv);
+
+	if (!req || !file)
+		return -EINVAL;
+
+	if (req->file_priv)
+		return -EINVAL;
+
+	dev_private = req->ring->dev->dev_private;
+	file_priv = file->driver_priv;
+
+	spin_lock(&file_priv->mm.lock);
+	req->file_priv = file_priv;
+	list_add_tail(&req->client_list, &file_priv->mm.request_list);
+	spin_unlock(&file_priv->mm.lock);
+
+	req->pid = get_pid(task_pid(current));
+
+	return 0;
+}
+
+static inline void
+i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
+{
+	struct drm_i915_file_private *file_priv = request->file_priv;
+
+	if (!file_priv)
+		return;
+
+	spin_lock(&file_priv->mm.lock);
+	list_del(&request->client_list);
+	request->file_priv = NULL;
+	spin_unlock(&file_priv->mm.lock);
+
+	put_pid(request->pid);
+	request->pid = NULL;
+}
+
+static void i915_gem_request_retire(struct drm_i915_gem_request *request)
+{
+	trace_i915_gem_request_retire(request);
+
+	/* We know the GPU must have read the request to have
+	 * sent us the seqno + interrupt, so use the position
+	 * of tail of the request to update the last known position
+	 * of the GPU head.
+	 *
+	 * Note this requires that we are always called in request
+	 * completion order.
+	 */
+	request->ringbuf->last_retired_head = request->postfix;
+
+	list_del_init(&request->list);
+	i915_gem_request_remove_from_client(request);
+
+	i915_gem_request_unreference(request);
+}
+
+void
+i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
+{
+	struct intel_engine_cs *engine = req->ring;
+	struct drm_i915_gem_request *tmp;
+
+	lockdep_assert_held(&engine->dev->struct_mutex);
+
+	if (list_empty(&req->list))
+		return;
+
+	do {
+		tmp = list_first_entry(&engine->request_list,
+				       typeof(*tmp), list);
+
+		i915_gem_request_retire(tmp);
+	} while (tmp != req);
+
+	WARN_ON(i915_verify_lists(engine->dev));
+}
+
+static void i915_gem_mark_busy(struct drm_i915_private *dev_priv)
+{
+	if (dev_priv->mm.busy)
+		return;
+
+	intel_runtime_pm_get_noresume(dev_priv);
+
+	i915_update_gfx_val(dev_priv);
+	if (INTEL_INFO(dev_priv)->gen >= 6)
+		gen6_rps_busy(dev_priv);
+
+	queue_delayed_work(dev_priv->wq,
+			   &dev_priv->mm.retire_work,
+			   round_jiffies_up_relative(HZ));
+
+	dev_priv->mm.busy = true;
+}
+
+/*
+ * NB: This function is not allowed to fail. Doing so would mean the the
+ * request is not being tracked for completion but the work itself is
+ * going to happen on the hardware. This would be a Bad Thing(tm).
+ */
+void __i915_add_request(struct drm_i915_gem_request *request,
+			struct drm_i915_gem_object *obj,
+			bool flush_caches)
+{
+	struct intel_engine_cs *ring;
+	struct drm_i915_private *dev_priv;
+	struct intel_ringbuffer *ringbuf;
+	u32 request_start;
+	int ret;
+
+	if (WARN_ON(request == NULL))
+		return;
+
+	ring = request->ring;
+	dev_priv = ring->dev->dev_private;
+	ringbuf = request->ringbuf;
+
+	/*
+	 * To ensure that this call will not fail, space for its emissions
+	 * should already have been reserved in the ring buffer. Let the ring
+	 * know that it is time to use that space up.
+	 */
+	intel_ring_reserved_space_use(ringbuf);
+
+	request_start = intel_ring_get_tail(ringbuf);
+	/*
+	 * Emit any outstanding flushes - execbuf can fail to emit the flush
+	 * after having emitted the batchbuffer command. Hence we need to fix
+	 * things up similar to emitting the lazy request. The difference here
+	 * is that the flush _must_ happen before the next request, no matter
+	 * what.
+	 */
+	if (flush_caches) {
+		if (i915.enable_execlists)
+			ret = logical_ring_flush_all_caches(request);
+		else
+			ret = intel_ring_flush_all_caches(request);
+		/* Not allowed to fail! */
+		WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
+	}
+
+	/* Record the position of the start of the request so that
+	 * should we detect the updated seqno part-way through the
+	 * GPU processing the request, we never over-estimate the
+	 * position of the head.
+	 */
+	request->postfix = intel_ring_get_tail(ringbuf);
+
+	if (i915.enable_execlists)
+		ret = ring->emit_request(request);
+	else {
+		ret = ring->add_request(request);
+
+		request->tail = intel_ring_get_tail(ringbuf);
+	}
+	/* Not allowed to fail! */
+	WARN(ret, "emit|add_request failed: %d!\n", ret);
+
+	request->head = request_start;
+
+	/* Whilst this request exists, batch_obj will be on the
+	 * active_list, and so will hold the active reference. Only when this
+	 * request is retired will the the batch_obj be moved onto the
+	 * inactive_list and lose its active reference. Hence we do not need
+	 * to explicitly hold another reference here.
+	 */
+	request->batch_obj = obj;
+
+	request->emitted_jiffies = jiffies;
+	request->previous_seqno = ring->last_submitted_seqno;
+	ring->last_submitted_seqno = request->seqno;
+	list_add_tail(&request->list, &ring->request_list);
+
+	trace_i915_gem_request_add(request);
+
+	i915_gem_mark_busy(dev_priv);
+
+	/* Sanity check that the reserved size was large enough. */
+	intel_ring_reserved_space_end(ringbuf);
+}
+
+
+static unsigned long local_clock_us(unsigned *cpu)
+{
+	unsigned long t;
+
+	/* Cheaply and approximately convert from nanoseconds to microseconds.
+	 * The result and subsequent calculations are also defined in the same
+	 * approximate microseconds units. The principal source of timing
+	 * error here is from the simple truncation.
+	 *
+	 * Note that local_clock() is only defined wrt to the current CPU;
+	 * the comparisons are no longer valid if we switch CPUs. Instead of
+	 * blocking preemption for the entire busywait, we can detect the CPU
+	 * switch and use that as indicator of system load and a reason to
+	 * stop busywaiting, see busywait_stop().
+	 */
+	*cpu = get_cpu();
+	t = local_clock() >> 10;
+	put_cpu();
+
+	return t;
+}
+
+static bool busywait_stop(unsigned long timeout, unsigned cpu)
+{
+	unsigned this_cpu;
+
+	if (time_after(local_clock_us(&this_cpu), timeout))
+		return true;
+
+	return this_cpu != cpu;
+}
+
+static bool __i915_spin_request(struct drm_i915_gem_request *req,
+				struct intel_wait *wait,
+				int state)
+{
+	unsigned long timeout;
+	unsigned cpu;
+
+	/* When waiting for high frequency requests, e.g. during synchronous
+	 * rendering split between the CPU and GPU, the finite amount of time
+	 * required to set up the irq and wait upon it limits the response
+	 * rate. By busywaiting on the request completion for a short while we
+	 * can service the high frequency waits as quick as possible. However,
+	 * if it is a slow request, we want to sleep as quickly as possible.
+	 * The tradeoff between waiting and sleeping is roughly the time it
+	 * takes to sleep on a request, on the order of a microsecond.
+	 */
+
+	/* Only spin if we know the GPU is processing this request */
+	if (!i915_gem_request_started(req))
+		return false;
+
+	timeout = local_clock_us(&cpu) + 5;
+	do {
+		if (i915_gem_request_completed(req))
+			return true;
+
+		if (signal_pending_state(state, wait->task))
+			break;
+
+		if (busywait_stop(timeout, cpu))
+			break;
+
+		cpu_relax_lowlatency();
+
+		/* Break the loop if we have consumed the timeslice (or been
+		 * preempted) or when either the background thread has
+		 * enabled the interrupt, or the IRQ itself has fired.
+		 */
+	} while (!need_resched() && wait->task->state == state);
+
+	return false;
+}
+
+/**
+ * __i915_wait_request - wait until execution of request has finished
+ * @req: duh!
+ * @interruptible: do an interruptible wait (normally yes)
+ * @timeout: in - how long to wait (NULL forever); out - how much time remaining
+ *
+ * Note: It is of utmost importance that the passed in seqno and reset_counter
+ * values have been read by the caller in an smp safe manner. Where read-side
+ * locks are involved, it is sufficient to read the reset_counter before
+ * unlocking the lock that protects the seqno. For lockless tricks, the
+ * reset_counter _must_ be read before, and an appropriate smp_rmb must be
+ * inserted.
+ *
+ * Returns 0 if the request was found within the alloted time. Else returns the
+ * errno with remaining time filled in timeout argument.
+ */
+int __i915_wait_request(struct drm_i915_gem_request *req,
+			bool interruptible,
+			s64 *timeout,
+			struct intel_rps_client *rps)
+{
+	int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+	struct intel_wait wait;
+	unsigned long timeout_remain;
+	int ret = 0;
+
+	might_sleep();
+
+	if (list_empty(&req->list))
+		return 0;
+
+	if (i915_gem_request_completed(req))
+		return 0;
+
+	timeout_remain = MAX_SCHEDULE_TIMEOUT;
+	if (timeout) {
+		if (WARN_ON(*timeout < 0))
+			return -EINVAL;
+
+		if (*timeout == 0)
+			return -ETIME;
+
+		/* Record current time in case interrupted, or wedged */
+		timeout_remain = nsecs_to_jiffies_timeout(*timeout);
+		*timeout += ktime_get_raw_ns();
+	}
+
+	trace_i915_gem_request_wait_begin(req);
+
+	/* This client is about to stall waiting for the GPU. In many cases
+	 * this is undesirable and limits the throughput of the system, as
+	 * many clients cannot continue processing user input/output whilst
+	 * blocked. RPS autotuning may take tens of milliseconds to respond
+	 * to the GPU load and thus incurs additional latency for the client.
+	 * We can circumvent that by promoting the GPU frequency to maximum
+	 * before we wait. This makes the GPU throttle up much more quickly
+	 * (good for benchmarks and user experience, e.g. window animations),
+	 * but at a cost of spending more power processing the workload
+	 * (bad for battery). Not all clients even want their results
+	 * immediately and for them we should just let the GPU select its own
+	 * frequency to maximise efficiency. To prevent a single client from
+	 * forcing the clocks too high for the whole system, we only allow
+	 * each client to waitboost once in a busy period.
+	 */
+	if (INTEL_INFO(req->i915)->gen >= 6)
+		gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
+
+	intel_wait_init(&wait, req->seqno);
+	set_task_state(wait.task, state);
+
+	/* Optimistic spin for the next ~jiffie before touching IRQs */
+	if (intel_engine_add_wait(req->ring, &wait)) {
+		if (__i915_spin_request(req, &wait, state))
+			goto complete;
+
+		/* In order to check that we haven't missed the interrupt
+		 * as we enabled it, we need to kick ourselves to do a
+		 * coherent check on the seqno before we sleep.
+		 */
+		if (intel_engine_enable_wait_irq(req->ring, &wait))
+			goto wakeup;
+	}
+
+	for (;;) {
+		if (signal_pending_state(state, wait.task)) {
+			ret = -ERESTARTSYS;
+			break;
+		}
+
+		/* Ensure that even if the GPU hangs, we get woken up. */
+		i915_queue_hangcheck(req->i915);
+
+		timeout_remain = io_schedule_timeout(timeout_remain);
+		if (timeout_remain == 0) {
+			ret = -ETIME;
+			break;
+		}
+
+		if (intel_wait_complete(&wait))
+			break;
+
+wakeup:
+		set_task_state(wait.task, state);
+
+		/* Carefully check if the request is complete, giving time
+		 * for the seqno to be visible following the interrupt.
+		 * We also have to check in case we are kicked by the GPU
+		 * reset in order to drop the struct_mutex.
+		 */
+		if (__i915_request_irq_complete(req))
+			break;
+	}
+
+complete:
+	intel_engine_remove_wait(req->ring, &wait);
+	__set_task_state(wait.task, TASK_RUNNING);
+	trace_i915_gem_request_wait_end(req);
+
+	if (timeout) {
+		*timeout -= ktime_get_raw_ns();
+		if (*timeout < 0)
+			*timeout = 0;
+
+		/*
+		 * Apparently ktime isn't accurate enough and occasionally has a
+		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
+		 * things up to make the test happy. We allow up to 1 jiffy.
+		 *
+		 * This is a regrssion from the timespec->ktime conversion.
+		 */
+		if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
+			*timeout = 0;
+	}
+
+	if (ret == 0 && rps && req->seqno == req->ring->last_submitted_seqno) {
+		/* The GPU is now idle and this client has stalled.
+		 * Since no other client has submitted a request in the
+		 * meantime, assume that this client is the only one
+		 * supplying work to the GPU but is unable to keep that
+		 * work supplied because it is waiting. Since the GPU is
+		 * then never kept fully busy, RPS autoclocking will
+		 * keep the clocks relatively low, causing further delays.
+		 * Compensate by giving the synchronous client credit for
+		 * a waitboost next time.
+		 */
+		spin_lock(&req->i915->rps.client_lock);
+		list_del_init(&rps->link);
+		spin_unlock(&req->i915->rps.client_lock);
+	}
+
+	return ret;
+}
+
+/**
+ * Waits for a request to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+int
+i915_wait_request(struct drm_i915_gem_request *req)
+{
+	struct drm_device *dev;
+	struct drm_i915_private *dev_priv;
+	bool interruptible;
+	int ret;
+
+	BUG_ON(req == NULL);
+
+	dev = req->ring->dev;
+	dev_priv = dev->dev_private;
+	interruptible = dev_priv->mm.interruptible;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	ret = __i915_wait_request(req, interruptible, NULL, NULL);
+	if (ret)
+		return ret;
+
+	i915_gem_request_retire_upto(req);
+	return 0;
+}
+
+void i915_gem_request_free(struct kref *req_ref)
+{
+	struct drm_i915_gem_request *req = container_of(req_ref,
+						 typeof(*req), ref);
+	struct intel_context *ctx = req->ctx;
+
+	if (req->file_priv)
+		i915_gem_request_remove_from_client(req);
+
+	if (ctx) {
+		if (i915.enable_execlists) {
+			if (ctx != req->ring->default_context)
+				intel_lr_context_unpin(req);
+		}
+
+		i915_gem_context_unreference(ctx);
+	}
+
+	kmem_cache_free(req->i915->requests, req);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
new file mode 100644
index 000000000000..d46f22f30b0a
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright © 2008-2015 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#ifndef I915_GEM_REQUEST_H
+#define I915_GEM_REQUEST_H
+
+/**
+ * Request queue structure.
+ *
+ * The request queue allows us to note sequence numbers that have been emitted
+ * and may be associated with active buffers to be retired.
+ *
+ * By keeping this list, we can avoid having to do questionable sequence
+ * number comparisons on buffer last_read|write_seqno. It also allows an
+ * emission time to be associated with the request for tracking how far ahead
+ * of the GPU the submission is.
+ *
+ * The requests are reference counted, so upon creation they should have an
+ * initial reference taken using kref_init
+ */
+struct drm_i915_gem_request {
+	struct kref ref;
+
+	/** On Which ring this request was generated */
+	struct drm_i915_private *i915;
+	struct intel_engine_cs *ring;
+	unsigned reset_counter;
+
+	 /** GEM sequence number associated with the previous request,
+	  * when the HWS breadcrumb is equal to this the GPU is processing
+	  * this request.
+	  */
+	u32 previous_seqno;
+
+	 /** GEM sequence number associated with this request,
+	  * when the HWS breadcrumb is equal or greater than this the GPU
+	  * has finished processing this request.
+	  */
+	u32 seqno;
+
+	/** Position in the ringbuffer of the start of the request */
+	u32 head;
+
+	/**
+	 * Position in the ringbuffer of the start of the postfix.
+	 * This is required to calculate the maximum available ringbuffer
+	 * space without overwriting the postfix.
+	 */
+	 u32 postfix;
+
+	/** Position in the ringbuffer of the end of the whole request */
+	u32 tail;
+
+	/**
+	 * Context and ring buffer related to this request
+	 * Contexts are refcounted, so when this request is associated with a
+	 * context, we must increment the context's refcount, to guarantee that
+	 * it persists while any request is linked to it. Requests themselves
+	 * are also refcounted, so the request will only be freed when the last
+	 * reference to it is dismissed, and the code in
+	 * i915_gem_request_free() will then decrement the refcount on the
+	 * context.
+	 */
+	struct intel_context *ctx;
+	struct intel_ringbuffer *ringbuf;
+
+	/** Batch buffer related to this request if any (used for
+	    error state dump only) */
+	struct drm_i915_gem_object *batch_obj;
+
+	/** Time at which this request was emitted, in jiffies. */
+	unsigned long emitted_jiffies;
+
+	/** global list entry for this request */
+	struct list_head list;
+
+	struct drm_i915_file_private *file_priv;
+	/** file_priv list entry for this request */
+	struct list_head client_list;
+
+	/** process identifier submitting this request */
+	struct pid *pid;
+
+	/**
+	 * The ELSP only accepts two elements at a time, so we queue
+	 * context/tail pairs on a given queue (ring->execlist_queue) until the
+	 * hardware is available. The queue serves a double purpose: we also use
+	 * it to keep track of the up to 2 contexts currently in the hardware
+	 * (usually one in execution and the other queued up by the GPU): We
+	 * only remove elements from the head of the queue when the hardware
+	 * informs us that an element has been completed.
+	 *
+	 * All accesses to the queue are mediated by a spinlock
+	 * (ring->execlist_lock).
+	 */
+
+	/** Execlist link in the submission queue.*/
+	struct list_head execlist_link;
+
+	/** Execlists no. of times this request has been sent to the ELSP */
+	int elsp_submitted;
+};
+
+int i915_gem_request_alloc(struct intel_engine_cs *ring,
+			   struct intel_context *ctx,
+			   struct drm_i915_gem_request **req_out);
+void i915_gem_request_cancel(struct drm_i915_gem_request *req);
+void i915_gem_request_free(struct kref *req_ref);
+int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
+				   struct drm_file *file);
+void i915_gem_request_retire_upto(struct drm_i915_gem_request *req);
+
+static inline uint32_t
+i915_gem_request_get_seqno(struct drm_i915_gem_request *req)
+{
+	return req ? req->seqno : 0;
+}
+
+static inline struct intel_engine_cs *
+i915_gem_request_get_ring(struct drm_i915_gem_request *req)
+{
+	return req ? req->ring : NULL;
+}
+
+static inline struct drm_i915_gem_request *
+i915_gem_request_reference(struct drm_i915_gem_request *req)
+{
+	if (req)
+		kref_get(&req->ref);
+	return req;
+}
+
+static inline void
+i915_gem_request_unreference(struct drm_i915_gem_request *req)
+{
+	WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
+	kref_put(&req->ref, i915_gem_request_free);
+}
+
+static inline void
+i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
+{
+	struct drm_device *dev;
+
+	if (!req)
+		return;
+
+	dev = req->ring->dev;
+	if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
+		mutex_unlock(&dev->struct_mutex);
+}
+
+static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
+					   struct drm_i915_gem_request *src)
+{
+	if (src)
+		i915_gem_request_reference(src);
+
+	if (*pdst)
+		i915_gem_request_unreference(*pdst);
+
+	*pdst = src;
+}
+
+void __i915_add_request(struct drm_i915_gem_request *req,
+			struct drm_i915_gem_object *batch_obj,
+			bool flush_caches);
+#define i915_add_request(req) \
+	__i915_add_request(req, NULL, true)
+#define i915_add_request_no_flush(req) \
+	__i915_add_request(req, NULL, false)
+
+struct intel_rps_client;
+
+int __i915_wait_request(struct drm_i915_gem_request *req,
+			bool interruptible,
+			s64 *timeout,
+			struct intel_rps_client *rps);
+int __must_check i915_wait_request(struct drm_i915_gem_request *req);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+	return (int32_t)(seq1 - seq2) >= 0;
+}
+
+static inline bool i915_gem_request_started(struct drm_i915_gem_request *req)
+{
+	return i915_seqno_passed(intel_ring_get_seqno(req->ring),
+				 req->previous_seqno);
+}
+
+static inline bool i915_gem_request_completed(struct drm_i915_gem_request *req)
+{
+	return i915_seqno_passed(intel_ring_get_seqno(req->ring),
+				 req->seqno);
+}
+
+#endif /* I915_GEM_REQUEST_H */
-- 
2.7.0.rc3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2016-01-11  9:20 UTC|newest]

Thread overview: 263+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-01-11  9:16 [PATCH 001/190] drm: Release driver references to handle before making it available again Chris Wilson
2016-01-11  9:16 ` [PATCH 002/190] drm/i915: Move the mb() following release-mmap into release-mmap Chris Wilson
2016-01-11  9:16 ` [PATCH 003/190] drm/i915: Add an optional selection from i915 of CONFIG_MMU_NOTIFIER Chris Wilson
2016-02-17 12:59   ` Daniel Vetter
2016-01-11  9:16 ` [PATCH 004/190] drm/i915: Fix some invalid requests cancellations Chris Wilson
2016-01-12 18:16   ` [Intel-gfx] " Dave Gordon
2016-01-12 18:16     ` Dave Gordon
2016-01-13 20:06     ` [Intel-gfx] " Chris Wilson
2016-01-11  9:16 ` [PATCH 005/190] drm/i915: Force clean compilation with -Werror Chris Wilson
2016-01-11  9:16 ` [PATCH 006/190] drm/i915: Add GEM debugging Kconfig option Chris Wilson
2016-01-12 17:44   ` Dave Gordon
2016-01-11  9:16 ` [PATCH 007/190] drm/i915: Hide the atomic_read(reset_counter) behind a helper Chris Wilson
2016-01-11  9:16 ` [PATCH 008/190] drm/i915: Simplify checking of GPU reset_counter in display pageflips Chris Wilson
2016-01-11  9:16 ` [PATCH 009/190] drm/i915: Tighten reset_counter for reset status Chris Wilson
2016-01-11  9:16 ` [PATCH 010/190] drm/i915: Store the reset counter when constructing a request Chris Wilson
2016-01-11  9:16 ` [PATCH 011/190] drm/i915: Simplify reset_counter handling during atomic modesetting Chris Wilson
2016-01-11  9:16 ` [PATCH 012/190] drm/i915: Prevent leaking of -EIO from i915_wait_request() Chris Wilson
2016-01-11  9:16 ` [PATCH 013/190] drm/i915: Suppress error message when GPU resets are disabled Chris Wilson
2016-01-11  9:16 ` [PATCH 014/190] drm/i915: Delay queuing hangcheck to wait-request Chris Wilson
2016-01-11  9:16 ` [PATCH 015/190] drm/i915: Remove the dedicated hangcheck workqueue Chris Wilson
2016-01-11  9:16 ` [PATCH 016/190] drm/i915: Make queueing the hangcheck work inline Chris Wilson
2016-01-11  9:16 ` [PATCH 017/190] drm/i915: Remove forcewake dance from seqno/irq barrier on legacy gen6+ Chris Wilson
2016-01-11 14:02   ` Dave Gordon
2016-01-21 16:27     ` Mika Kuoppala
2016-03-24  6:39   ` David Weinehall
2016-01-11  9:16 ` [PATCH 018/190] drm/i915: Slaughter the thundering i915_wait_request herd Chris Wilson
2016-01-11  9:16 ` [PATCH 019/190] drm/i915: Separate out the seqno-barrier from engine->get_seqno Chris Wilson
2016-01-11 15:43   ` Dave Gordon
2016-01-11  9:16 ` [PATCH 020/190] drm/i915: Remove the lazy_coherency parameter from request-completed? Chris Wilson
2016-01-11 15:45   ` Dave Gordon
2016-01-11 16:24     ` Chris Wilson
2016-01-12 10:27   ` Mika Kuoppala
2016-01-12 10:51     ` Chris Wilson
2016-01-11  9:16 ` [PATCH 021/190] drm/i915: Use HWS for seqno tracking everywhere Chris Wilson
2016-01-11 20:03   ` Dave Gordon
2016-01-12 10:05   ` Mika Kuoppala
2016-01-12 11:03     ` Chris Wilson
2016-01-12 14:30       ` Mika Kuoppala
2016-01-12 14:46         ` Chris Wilson
2016-01-11  9:16 ` [PATCH 022/190] drm/i915: Check the CPU cached value of seqno after waking the waiter Chris Wilson
2016-01-11  9:16 ` [PATCH 023/190] drm/i915: Only apply one barrier after a breadcrumb interrupt is posted Chris Wilson
2016-01-11  9:16 ` [PATCH 024/190] drm/i915: Replace manual barrier() with READ_ONCE() in HWS accessor Chris Wilson
2016-01-12 14:17   ` Mika Kuoppala
2016-01-11  9:16 ` [PATCH 025/190] drm/i915: Broadwell execlists needs exactly the same seqno w/a as legacy Chris Wilson
2016-01-11  9:16 ` [PATCH 026/190] drm/i915: Stop setting wraparound seqno on initialisation Chris Wilson
2016-01-11  9:16 ` [PATCH 027/190] drm/i915: Only query timestamp when measuring elapsed time Chris Wilson
2016-01-11  9:16 ` [PATCH 028/190] drm/i915: On GPU reset, set the HWS breadcrumb to the last seqno Chris Wilson
2016-01-11  9:16 ` [PATCH 029/190] drm/i915: Convert trace-irq to the breadcrumb waiter Chris Wilson
2016-01-11  9:16 ` [PATCH 030/190] drm/i915: Move the get/put irq locking into the caller Chris Wilson
2016-01-11  9:16 ` [PATCH 031/190] drm/i915: Harden detection of missed interrupts Chris Wilson
2016-01-11  9:16 ` [PATCH 032/190] drm/i915: Remove debug noise on detecting fault-injection " Chris Wilson
2016-01-11  9:16 ` [PATCH 033/190] drm/i915: Only start retire worker when idle Chris Wilson
2016-01-11  9:16 ` [PATCH 034/190] drm/i915: Do not keep postponing the idle-work Chris Wilson
2016-01-11  9:16 ` [PATCH 035/190] drm/i915: Remove redundant queue_delayed_work() from throttle ioctl Chris Wilson
2016-01-11  9:16 ` [PATCH 036/190] drm/i915: Restore waitboost credit to the synchronous waiter Chris Wilson
2016-01-11 16:10   ` Jesse Barnes
2016-01-11  9:16 ` [PATCH 037/190] drm/i915: Add background commentary to "waitboosting" Chris Wilson
2016-01-11  9:16 ` [PATCH 038/190] drm/i915: Flush the RPS bottom-half when the GPU idles Chris Wilson
2016-01-11  9:16 ` [PATCH 039/190] drm/i915: Remove stop-rings debugfs interface Chris Wilson
2016-02-25 17:30   ` Arun Siluvery
2016-01-11  9:16 ` [PATCH 040/190] drm/i915: Record the ringbuffer associated with the request Chris Wilson
2016-01-11  9:16 ` [PATCH 041/190] drm/i915: Allow userspace to request no-error-capture upon GPU hangs Chris Wilson
2016-01-11  9:16 ` [PATCH 042/190] drm/i915: Clean up GPU hang message Chris Wilson
2016-02-25 17:40   ` Arun Siluvery
2016-01-11  9:16 ` [PATCH 043/190] drm/i915: Skip capturing an error state if we already have one Chris Wilson
2016-01-11  9:16 ` Chris Wilson [this message]
2016-02-25 17:52   ` [PATCH 044/190] drm/i915: Move GEM request routines to i915_gem_request.c Arun Siluvery
2016-03-08 12:58     ` Tvrtko Ursulin
2016-03-08 13:35       ` Arun Siluvery
2016-01-11  9:16 ` [PATCH 045/190] drm/i915: Move releasing of the GEM request from free to retire/cancel Chris Wilson
2016-03-08 13:15   ` Tvrtko Ursulin
2016-04-05 13:42     ` Tvrtko Ursulin
2016-04-05 14:09       ` Chris Wilson
2016-04-05 14:17         ` Tvrtko Ursulin
2016-04-05 14:27           ` Chris Wilson
2016-04-05 14:45             ` Chris Wilson
2016-04-05 14:10       ` Chris Wilson
2016-01-11  9:16 ` [PATCH 046/190] drm/i915: Derive GEM requests from dma-fence Chris Wilson
2016-01-11  9:16 ` [PATCH 047/190] drm/i915: Rename request reference/unreference to get/put Chris Wilson
2016-01-11  9:16 ` [PATCH 048/190] drm/i915: Disable waitboosting for fence_wait() Chris Wilson
2016-01-11  9:17 ` [PATCH 049/190] drm/i915: Disable waitboosting for mmioflips/semaphores Chris Wilson
2016-01-11  9:17 ` [PATCH 050/190] drm/i915: Refactor duplicate object vmap functions Chris Wilson
2016-01-11  9:17 ` [PATCH 051/190] drm,i915: Introduce drm_malloc_gfp() Chris Wilson
2016-01-11  9:17 ` [PATCH 052/190] drm/i915: Treat ringbuffer writes as write to normal memory Chris Wilson
2016-01-11  9:17 ` [PATCH 053/190] drm/i915: Convert i915_semaphores_is_enabled over to early sanitize Chris Wilson
2016-01-12 19:07   ` Dave Gordon
2016-01-11  9:17 ` [PATCH 054/190] drm/i915: Use the new rq->i915 field where appropriate Chris Wilson
2016-01-11  9:17 ` [PATCH 055/190] drm/i915: Unify intel_logical_ring_emit and intel_ring_emit Chris Wilson
2016-01-12 17:29   ` Dave Gordon
2016-01-11  9:17 ` [PATCH 056/190] drm/i915: Unify intel_ring_begin() Chris Wilson
2016-01-11  9:17 ` [PATCH 057/190] drm/i915: Remove the identical implementations of request space reservation Chris Wilson
2016-01-11  9:17 ` [PATCH 058/190] drm/i915: Rename request->ring to request->engine Chris Wilson
2016-01-28 11:45   ` Tvrtko Ursulin
2016-01-11  9:17 ` [PATCH 059/190] drm/i915: Rename request->ringbuf to request->ring Chris Wilson
2016-01-28 11:48   ` Tvrtko Ursulin
2016-01-11  9:17 ` [PATCH 060/190] drm/i915: Rename backpointer from intel_ringbuffer to intel_engine_cs Chris Wilson
2016-01-28 11:49   ` Tvrtko Ursulin
2016-01-11  9:17 ` [PATCH 061/190] drm/i915: Rename intel_context[engine].ringbuf Chris Wilson
2016-01-11  9:17 ` [PATCH 062/190] drm/i915: Rename extern functions operating on intel_engine_cs Chris Wilson
2016-01-11  9:17 ` [PATCH 063/190] drm/i915: Rename struct intel_ringbuffer to intel_ring Chris Wilson
2016-01-28 11:54   ` Tvrtko Ursulin
2016-01-11  9:17 ` [PATCH 064/190] drm/i915: Rename intel_pin_and_map_ring() Chris Wilson
2016-01-11  9:17 ` [PATCH 065/190] drm/i915: Remove obsolete engine->gpu_caches_dirty Chris Wilson
2016-01-11  9:17 ` [PATCH 066/190] drm/i915: Simplify request_alloc by returning the allocated request Chris Wilson
2016-01-12 17:11   ` Dave Gordon
2016-01-11  9:17 ` [PATCH 067/190] drm/i915: Unify legacy/execlists emission of MI_BATCHBUFFER_START Chris Wilson
2016-01-11  9:17 ` [PATCH 068/190] drm/i915: Unify adding requests between ringbuffer and execlists Chris Wilson
2016-01-11  9:17 ` [PATCH 069/190] drm/i915: Remove duplicate golden render state init from execlists Chris Wilson
2016-01-11  9:17 ` [PATCH 070/190] drm/i915: Unify legacy/execlists submit_execbuf callbacks Chris Wilson
2016-01-11  9:17 ` [PATCH 071/190] drm/i915: Simplify calling engine->sync_to Chris Wilson
2016-01-11  9:17 ` [PATCH 072/190] drm/i915: Execlists cannot pin a context without the object Chris Wilson
2016-01-11 15:24   ` Tvrtko Ursulin
2016-01-11  9:17 ` [PATCH 073/190] drm/i915: Introduce i915_gem_active for request tracking Chris Wilson
2016-01-11 17:32   ` Tvrtko Ursulin
2016-01-11 22:49     ` Chris Wilson
2016-01-12 10:04       ` Tvrtko Ursulin
2016-01-12 11:01         ` Chris Wilson
2016-01-12 13:42           ` Tvrtko Ursulin
2016-01-12 13:44           ` Tvrtko Ursulin
2016-01-12 14:08             ` Chris Wilson
2016-01-11  9:17 ` [PATCH 074/190] drm/i915: Rename request->list to link for consistency Chris Wilson
2016-01-12 13:47   ` Tvrtko Ursulin
2016-01-11  9:17 ` [PATCH 075/190] drm/i915: Refactor activity tracking for requests Chris Wilson
2016-01-28 11:41   ` Tvrtko Ursulin
2016-01-28 11:46     ` Chris Wilson
2016-01-28 11:56       ` Tvrtko Ursulin
2016-01-11  9:17 ` [PATCH 076/190] drm/i915: Rename vma->*_list to *_link for consistency Chris Wilson
2016-01-12 13:49   ` Tvrtko Ursulin
2016-01-11  9:17 ` [PATCH 077/190] drm/i915: Amalgamate GGTT/ppGTT vma debug list walkers Chris Wilson
2016-01-11  9:17 ` [PATCH 078/190] drm/i915: Split early global GTT initialisation Chris Wilson
2016-01-11  9:17 ` [PATCH 079/190] drm/i915: Reduce the pointer dance of i915_is_ggtt() Chris Wilson
2016-01-15 12:12   ` Dave Gordon
2016-01-15 12:24     ` Chris Wilson
2016-01-11  9:17 ` [PATCH 080/190] drm/i915: Store owning file on the i915_address_space Chris Wilson
2016-01-11  9:17 ` [PATCH 081/190] drm/i915: i915_vma_move_to_active prep patch Chris Wilson
2016-01-11  9:17 ` [PATCH 082/190] drm/i915: Count how many VMA are bound for an object Chris Wilson
2016-01-11  9:17 ` [PATCH 083/190] drm/i915: Be more careful when unbinding vma Chris Wilson
2016-01-11  9:17 ` [PATCH 084/190] drm/i915: Track active vma requests Chris Wilson
2016-01-11  9:17 ` [PATCH 085/190] drm/i915: Release vma when the handle is closed Chris Wilson
2016-01-11  9:17 ` [PATCH 086/190] drm/i915: Mark the context and address space as closed Chris Wilson
2016-01-11 10:44 ` [PATCH 087/190] Revert "drm/i915: Clean up associated VMAs on context destruction" Chris Wilson
2016-01-11 10:44   ` [PATCH 088/190] drm/i915: Move execlists interrupt based submission to a bottom-half Chris Wilson
2016-02-19 12:08     ` Tvrtko Ursulin
2016-02-19 12:29       ` Chris Wilson
2016-02-19 14:10         ` Tvrtko Ursulin
2016-02-19 14:34           ` Chris Wilson
2016-02-19 14:52             ` Tvrtko Ursulin
2016-02-19 15:02               ` Chris Wilson
2016-02-19 14:41           ` Chris Wilson
2016-01-11 10:44   ` [PATCH 089/190] drm/i915: Tidy execlists submission and tracking Chris Wilson
2016-01-11 10:44   ` [PATCH 090/190] drm/i915: Refactor execlists default context pinning Chris Wilson
2016-01-11 10:44   ` [PATCH 091/190] drm/i915: Move context initialisation to first-use Chris Wilson
2016-01-11 10:44   ` [PATCH 092/190] drm/i915: Move the magical deferred context allocation into the request Chris Wilson
2016-01-11 10:44   ` [PATCH 093/190] drm/i915: Move the forced switch back to the kernel context into eviction Chris Wilson
2016-01-11 10:44   ` [PATCH 094/190] drm/i915: Remove early l3-remap Chris Wilson
2016-01-11 10:44   ` [PATCH 095/190] drm/i915: Rearrange switch_context to load the aliasing ppgtt on first use Chris Wilson
2016-01-11 10:44   ` [PATCH 096/190] drm/i915: Eliminate early submission of context enabling request Chris Wilson
2016-01-11 10:44   ` [PATCH 097/190] drm/i915/shrinker: Flush active on objects before counting Chris Wilson
2016-01-11 10:44   ` [PATCH 098/190] drm/i915: Double check the active status on the batch pool Chris Wilson
2016-01-11 10:44   ` [PATCH 099/190] drm/i915: Check for request completion before choosing CS flips Chris Wilson
2016-01-11 10:44   ` [PATCH 100/190] drm/i915: Remove request retirement before each batch Chris Wilson
2016-01-11 10:44   ` [PATCH 101/190] drm/i915: Only retire if necessary when creating a userptr Chris Wilson
2016-01-11 10:44   ` [PATCH 102/190] drm/i915: Move the "per-ring" default_context to the device Chris Wilson
2016-01-11 14:40     ` Dave Gordon
2016-01-11 10:44   ` [PATCH 103/190] drm/i915: Move pinning of dev_priv->kernel_context into its creator Chris Wilson
2016-01-11 10:44   ` [PATCH 104/190] drm/i915: Remove i915_gem_execbuffer_retire_commands() Chris Wilson
2016-01-11 10:44   ` [PATCH 105/190] drm/i915: Pad GTT views of exec objects up to user specified size Chris Wilson
2016-03-22 14:32     ` David Weinehall
2016-01-11 10:44   ` [PATCH 106/190] drm/i915: Split insertion/binding of an object into the VM Chris Wilson
2016-01-11 10:44   ` [PATCH 107/190] drm/i915: Record allocated vma size Chris Wilson
2016-01-11 10:44   ` [PATCH 108/190] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
2016-01-11 10:44   ` [PATCH 109/190] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin() Chris Wilson
2016-01-11 10:44   ` [PATCH 110/190] drm/i915: Move vma->pin_count:4 to vma->flags Chris Wilson
2016-01-11 10:44   ` [PATCH 111/190] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
2016-01-11 10:44   ` [PATCH 112/190] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
2016-03-24 12:00     ` David Weinehall
2016-01-11 10:44   ` [PATCH 113/190] drm/i915: Enable lockless lookup of request tracking via RCU Chris Wilson
2016-01-11 10:44   ` [PATCH 114/190] drm/i915: Remove (struct_mutex) locking for wait-ioctl Chris Wilson
2016-01-11 10:44   ` [PATCH 115/190] drm/i915: Remove (struct_mutex) locking for busy-ioctl Chris Wilson
2016-01-11 10:45   ` [PATCH 116/190] drm/i915: Reduce locking inside swfinish ioctl Chris Wilson
2016-01-11 10:45   ` [PATCH 117/190] drm/i915: Remove pinned check from madvise ioctl Chris Wilson
2016-01-11 10:45   ` [PATCH 118/190] drm/i915: Remove locking for get_tiling Chris Wilson
2016-01-11 10:45   ` [PATCH 119/190] drm/i915: Reduce amount of duplicate buffer information captured on error Chris Wilson
2016-01-11 10:45   ` [PATCH 120/190] drm/i915: Stop the machine whilst capturing the GPU crash dump Chris Wilson
2016-01-11 10:45   ` [PATCH 121/190] drm/i915: Scan GGTT active list for context object Chris Wilson
2016-01-11 10:45   ` [PATCH 122/190] drm/i915: Move setting of request->batch into its single callsite Chris Wilson
2016-01-11 10:45   ` [PATCH 123/190] drm/i915: Mark unmappable GGTT entries as PIN_HIGH Chris Wilson
2016-01-11 10:45   ` [PATCH 124/190] drm/i915: Track pinned vma inside guc Chris Wilson
2016-01-11 10:45   ` [PATCH 125/190] drm/i915: Track pinned VMA Chris Wilson
2016-01-11 10:45   ` [PATCH 126/190] drm/i915: Print the batchbuffer offset next to BBADDR in error state Chris Wilson
2016-01-11 10:45   ` [PATCH 127/190] drm/i915: Cache kmap between relocations Chris Wilson
2016-01-11 10:45   ` [PATCH 128/190] drm/i915: Extract i915_gem_obj_prepare_shmem_write() Chris Wilson
2016-01-11 10:45   ` [PATCH 129/190] drm/i915: Before accessing an object via the cpu, flush GTT writes Chris Wilson
2016-01-11 10:45   ` [PATCH 130/190] drm/i915: Wait for writes through the GTT to land before reading back Chris Wilson
2016-01-11 10:45   ` [PATCH 131/190] drm/i915: Pin the pages first in shmem prepare read/write Chris Wilson
2016-01-11 10:45   ` [PATCH 132/190] drm/i915: Tidy up flush cpu/gtt write domains Chris Wilson
2016-01-11 10:45   ` [PATCH 133/190] drm/i915: Convert known clflush paths over to clflush_cache_range() Chris Wilson
2016-01-11 10:45   ` [PATCH 134/190] drm/i915: Refactor execbuffer relocation writing Chris Wilson
2016-01-11 10:45   ` [PATCH 135/190] drm/i915: Move map-and-fenceable tracking to the VMA Chris Wilson
2016-01-11 10:45   ` [PATCH 136/190] drm/i915: Move ioremap_wc tracking onto VMA Chris Wilson
2016-02-11 13:20     ` Tvrtko Ursulin
2016-02-11 13:29       ` Chris Wilson
2016-02-11 14:10         ` Tvrtko Ursulin
2016-02-19 15:11           ` Chris Wilson
2016-02-22 15:29             ` Tvrtko Ursulin
2016-02-23 10:21               ` Chris Wilson
2016-01-11 10:45   ` [PATCH 137/190] drm/i915: Shrink pages around failure to dma map Chris Wilson
2016-01-11 10:45   ` [PATCH 138/190] drm/i915/userptr: Make gup errors stickier Chris Wilson
2016-01-11 10:45   ` [PATCH 139/190] drm/i915: Move fence tracking from object to vma Chris Wilson
2016-01-11 10:45   ` [PATCH 140/190] drm/i915: Fix partial GGTT faulting Chris Wilson
2016-01-11 10:45   ` [PATCH 141/190] drm/i915: Choose not to evict faultable objects from the GGTT Chris Wilson
2016-01-11 11:00 ` [PATCH 142/190] drm/i915: Fallback to using unmappable memory for scanout Chris Wilson
2016-01-11 11:00   ` [PATCH 143/190] drm/i915: Track display alignment on VMA Chris Wilson
2016-01-11 11:00   ` [PATCH 144/190] drm/i915: Bump the inactive MRU tracking for all VMA accessed Chris Wilson
2016-01-11 11:00   ` [PATCH 145/190] drm/i915: Stop discarding GTT cache-domain on unbind vma Chris Wilson
2016-01-12 13:22     ` Joonas Lahtinen
2016-01-11 11:00   ` [PATCH 146/190] io-mapping: Always create a struct to hold metadata about the io-mapping Chris Wilson
2016-01-11 11:00   ` [PATCH 147/190] drm/i915: Use remap_io_mapping() to prefault all PTE in a single pass Chris Wilson
2016-01-11 11:00   ` [PATCH 148/190] drm/i915: Stop marking the unaccessible scratch page as UC Chris Wilson
2016-01-11 11:00   ` [PATCH 149/190] drm/i915: Use i915_vm_to_ppgtt() Chris Wilson
2016-01-11 11:00   ` [PATCH 150/190] drm/i915: Embed the scratch page struct into each VM Chris Wilson
2016-01-11 11:00   ` [PATCH 151/190] drm/i915: Allow DMA pagetables to use highmem Chris Wilson
2016-01-11 11:00   ` [PATCH 152/190] drm/i915: Replace request->postfix with ->head for space searching Chris Wilson
2016-01-11 11:00   ` [PATCH 153/190] drm/i915: Record the position of the start of the request Chris Wilson
2016-01-11 11:00   ` [PATCH 154/190] drm/i915: Move per-request pid from request to ctx Chris Wilson
2016-01-11 11:00   ` [PATCH 155/190] drm/i915: Merge legacy+execlists context structs Chris Wilson
2016-01-11 11:00   ` [PATCH 156/190] drm/i915: Store the active context object on all engines upon error Chris Wilson
2016-01-11 11:00   ` [PATCH 157/190] drm/i915: Tidy execlists by using intel_context_engine locals Chris Wilson
2016-01-11 11:00   ` [PATCH 158/190] drm/i915: Skip holding an object reference for execbuf preparation Chris Wilson
2016-01-11 11:01   ` [PATCH 159/190] drm/i915: Defer active reference until required Chris Wilson
2016-01-11 11:01   ` [PATCH 160/190] drm: Track drm_mm nodes with an interval tree Chris Wilson
2016-01-11 11:01   ` [PATCH 161/190] drm: Convert drm_vma_manager to embedded interval-tree in drm_mm Chris Wilson
2016-01-11 11:01   ` [PATCH 162/190] drm/i915: Allow the user to pass a context to any ring Chris Wilson
2016-01-11 11:01   ` [PATCH 163/190] drm/i915: Fix i915_gem_evict_for_vma (soft-pinning) Chris Wilson
2016-01-11 11:01   ` [PATCH 164/190] drm/i915: Move obj->dirty:1 to obj->flags Chris Wilson
2016-03-24  8:17     ` David Weinehall
2016-01-11 11:01   ` [PATCH 165/190] drm/i915: Use the precomputed value for whether to enable command parsing Chris Wilson
2016-01-11 11:01   ` [PATCH 166/190] drm/i915: Drop spinlocks around adding to the client request list Chris Wilson
2016-01-11 11:01   ` [PATCH 167/190] drm/i915: Amalgamate execbuffer parameter structures Chris Wilson
2016-01-11 11:01   ` [PATCH 168/190] drm/i915: Skip holding context reference for duration of execbuffer call Chris Wilson
2016-01-11 11:01   ` [PATCH 169/190] drm/i915: Use vma->exec_entry as our double-entry placeholder Chris Wilson
2016-01-11 11:01   ` [PATCH 170/190] drm/i915: Store a direct lookup from object handle to vma Chris Wilson
2016-01-11 11:01   ` [PATCH 171/190] drm/i915: Pass vma to relocate entry Chris Wilson
2016-01-11 11:01   ` [PATCH 172/190] drm/i915: Eliminate lots of iterations over the execobjects array Chris Wilson
2016-01-11 11:01   ` [PATCH 173/190] drm/i915: Wait upon userptr get-user-pages within execbuffer Chris Wilson
2016-01-11 11:01   ` [PATCH 174/190] drm/i915: Show context objects in debugfs/i915_gem_objects Chris Wilson
2016-03-24  7:58     ` David Weinehall
2016-01-11 11:01   ` [PATCH 175/190] drm/i915: Remove superfluous i915_add_request_no_flush() helper Chris Wilson
2016-01-11 11:01   ` [PATCH 176/190] drm/i915: Use the MRU stack search after evicting Chris Wilson
2016-01-11 11:01   ` [PATCH 177/190] drm/i915: Use VMA as the primary object for context state Chris Wilson
2016-01-11 11:01   ` [PATCH 178/190] drm/i915: Do an inline flush-active before dropping the mutex when waiting Chris Wilson
2016-01-11 11:01   ` [PATCH 179/190] drm/i915: Skip MI_SET_CONTEXT for the same context Chris Wilson
2016-01-11 11:01   ` [PATCH 180/190] drm/i915: Micro-optimise i915_gem_object_get_dirty_page() Chris Wilson
2016-01-11 11:01   ` [PATCH 181/190] drm/i915: Introduce an internal allocator for disposable private objects Chris Wilson
2016-01-11 11:01   ` [PATCH 182/190] drm/i915: Avoid allocating a vmap arena for a single page Chris Wilson
2016-01-11 11:01   ` [PATCH 183/190] drm/i915/cmdparser: Use cached vmappings Chris Wilson
2016-01-11 11:01   ` [PATCH 184/190] drm/i915/cmdparser: Only cache the dst vmap Chris Wilson
2016-01-11 11:01   ` [PATCH 185/190] drm/i915/cmdparser: Improve hash function Chris Wilson
2016-01-11 11:01   ` [PATCH 186/190] drm/i915/cmdparser: Compare against the previous command descriptor Chris Wilson
2016-01-11 11:01   ` [PATCH 187/190] drm/i915: Allow execbuffer to use the first object as the batch Chris Wilson
2016-01-11 11:01   ` [PATCH 188/190] drm/i915: Use VMA for ringbuffer tracking Chris Wilson
2016-01-11 11:01   ` [PATCH 189/190] drm/i915: Skip clearing the GGTT on full-ppgtt systems Chris Wilson
2016-01-11 11:01   ` [PATCH 190/190] drm/i915: Do a nonblocking wait first in pread/pwrite Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1452503961-14837-44-git-send-email-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.