All of lore.kernel.org
 help / color / mirror / Atom feed
* [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something
@ 2016-08-04 15:32 Chris Wilson
  2016-08-04 15:32 ` [CI 02/26] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something() Chris Wilson
                   ` (25 more replies)
  0 siblings, 26 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Slight micro-optimise to produce combine loops so that gcc is able to
optimise the inner-loops concisely. Since we are reviewing the loops, we
can update the comments to describe the current state of affairs, in
particular the distinction between evicting from the global GTT (which
may contain untracked items and transient global pins) and the
per-process GTT.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_evict.c | 143 +++++++++++++++++-----------------
 1 file changed, 70 insertions(+), 73 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 3437ced76cb6..016be7316676 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -34,6 +34,19 @@
 #include "i915_trace.h"
 
 static bool
+gpu_is_idle(struct drm_i915_private *dev_priv)
+{
+	struct intel_engine_cs *engine;
+
+	for_each_engine(engine, dev_priv) {
+		if (!list_empty(&engine->request_list))
+			return false;
+	}
+
+	return true;
+}
+
+static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
 	if (vma->pin_count)
@@ -76,37 +89,31 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 			 unsigned long start, unsigned long end,
 			 unsigned flags)
 {
-	struct list_head eviction_list, unwind_list;
-	struct i915_vma *vma;
-	int ret = 0;
-	int pass = 0;
+	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct list_head eviction_list;
+	struct list_head *phases[] = {
+		&vm->inactive_list,
+		&vm->active_list,
+		NULL,
+	}, **phase;
+	struct i915_vma *vma, *next;
+	int ret;
 
 	trace_i915_gem_evict(dev, min_size, alignment, flags);
 
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
 	 * The oldest idle objects reside on the inactive list, which is in
-	 * retirement order. The next objects to retire are those on the (per
-	 * ring) active list that do not have an outstanding flush. Once the
-	 * hardware reports completion (the seqno is updated after the
-	 * batchbuffer has been finished) the clean buffer objects would
-	 * be retired to the inactive list. Any dirty objects would be added
-	 * to the tail of the flushing list. So after processing the clean
-	 * active objects we need to emit a MI_FLUSH to retire the flushing
-	 * list, hence the retirement order of the flushing list is in
-	 * advance of the dirty objects on the active lists.
+	 * retirement order. The next objects to retire are those in flight,
+	 * on the active list, again in retirement order.
 	 *
 	 * The retirement sequence is thus:
 	 *   1. Inactive objects (already retired)
-	 *   2. Clean active objects
-	 *   3. Flushing list
-	 *   4. Dirty active objects.
+	 *   2. Active objects (will stall on unbinding)
 	 *
 	 * On each list, the oldest objects lie at the HEAD with the freshest
 	 * object on the TAIL.
 	 */
-
-	INIT_LIST_HEAD(&unwind_list);
 	if (start != 0 || end != vm->total) {
 		drm_mm_init_scan_with_range(&vm->mm, min_size,
 					    alignment, cache_level,
@@ -114,79 +121,71 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 	} else
 		drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
 
-search_again:
-	/* First see if there is a large enough contiguous idle region... */
-	list_for_each_entry(vma, &vm->inactive_list, vm_link) {
-		if (mark_free(vma, &unwind_list))
-			goto found;
-	}
-
 	if (flags & PIN_NONBLOCK)
-		goto none;
+		phases[1] = NULL;
 
-	/* Now merge in the soon-to-be-expired objects... */
-	list_for_each_entry(vma, &vm->active_list, vm_link) {
-		if (mark_free(vma, &unwind_list))
-			goto found;
-	}
+search_again:
+	INIT_LIST_HEAD(&eviction_list);
+	phase = phases;
+	do {
+		list_for_each_entry(vma, *phase, vm_link)
+			if (mark_free(vma, &eviction_list))
+				goto found;
+	} while (*++phase);
 
-none:
 	/* Nothing found, clean up and bail out! */
-	while (!list_empty(&unwind_list)) {
-		vma = list_first_entry(&unwind_list,
-				       struct i915_vma,
-				       exec_list);
+	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
 		ret = drm_mm_scan_remove_block(&vma->node);
 		BUG_ON(ret);
 
-		list_del_init(&vma->exec_list);
+		INIT_LIST_HEAD(&vma->exec_list);
 	}
 
 	/* Can we unpin some objects such as idle hw contents,
-	 * or pending flips?
+	 * or pending flips? But since only the GGTT has global entries
+	 * such as scanouts, rinbuffers and contexts, we can skip the
+	 * purge when inspecting per-process local address spaces.
 	 */
-	if (flags & PIN_NONBLOCK)
+	if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK)
 		return -ENOSPC;
 
-	/* Only idle the GPU and repeat the search once */
-	if (pass++ == 0) {
-		struct drm_i915_private *dev_priv = to_i915(dev);
-
-		if (i915_is_ggtt(vm)) {
-			ret = i915_gem_switch_to_kernel_context(dev_priv);
-			if (ret)
-				return ret;
-		}
-
-		ret = i915_gem_wait_for_idle(dev_priv);
-		if (ret)
-			return ret;
-
-		i915_gem_retire_requests(dev_priv);
-		goto search_again;
+	if (gpu_is_idle(dev_priv)) {
+		/* If we still have pending pageflip completions, drop
+		 * back to userspace to give our workqueues time to
+		 * acquire our locks and unpin the old scanouts.
+		 */
+		return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
 	}
 
-	/* If we still have pending pageflip completions, drop
-	 * back to userspace to give our workqueues time to
-	 * acquire our locks and unpin the old scanouts.
+	/* Not everything in the GGTT is tracked via vma (otherwise we
+	 * could evict as required with minimal stalling) so we are forced
+	 * to idle the GPU and explicitly retire outstanding requests in
+	 * the hopes that we can then remove contexts and the like only
+	 * bound by their active reference.
 	 */
-	return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
+	ret = i915_gem_switch_to_kernel_context(dev_priv);
+	if (ret)
+		return ret;
+
+	ret = i915_gem_wait_for_idle(dev_priv);
+	if (ret)
+		return ret;
+
+	i915_gem_retire_requests(dev_priv);
+	goto search_again;
 
 found:
 	/* drm_mm doesn't allow any other other operations while
-	 * scanning, therefore store to be evicted objects on a
-	 * temporary list. */
-	INIT_LIST_HEAD(&eviction_list);
-	while (!list_empty(&unwind_list)) {
-		vma = list_first_entry(&unwind_list,
-				       struct i915_vma,
-				       exec_list);
-		if (drm_mm_scan_remove_block(&vma->node)) {
+	 * scanning, therefore store to-be-evicted objects on a
+	 * temporary list and take a reference for all before
+	 * calling unbind (which may remove the active reference
+	 * of any of our objects, thus corrupting the list).
+	 */
+	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
+		if (drm_mm_scan_remove_block(&vma->node))
 			vma->pin_count++;
-			list_move(&vma->exec_list, &eviction_list);
-			continue;
-		}
-		list_del_init(&vma->exec_list);
+		else
+			list_del_init(&vma->exec_list);
 	}
 
 	/* Unbinding will emit any required flushes */
@@ -200,7 +199,6 @@ found:
 		if (ret == 0)
 			ret = i915_vma_unbind(vma);
 	}
-
 	return ret;
 }
 
@@ -279,7 +277,6 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 			return ret;
 
 		i915_gem_retire_requests(dev_priv);
-
 		WARN_ON(!list_empty(&vm->active_list));
 	}
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 02/26] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something()
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 03/26] drm/i915: Double check the active status on the batch pool Chris Wilson
                   ` (24 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Eviction is VM local, so we can ignore the significance of the
drm_device in the caller, and leave it to i915_gem_evict_something() to
manage itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h       |  3 +--
 drivers/gpu/drm/i915/i915_gem.c       |  2 +-
 drivers/gpu/drm/i915/i915_gem_evict.c |  9 ++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.c   |  2 +-
 drivers/gpu/drm/i915/i915_trace.h     | 14 ++++++++------
 5 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 66b98fa4715a..fbda38f25c6b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3398,8 +3398,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
 				       struct drm_file *file);
 
 /* i915_gem_evict.c */
-int __must_check i915_gem_evict_something(struct drm_device *dev,
-					  struct i915_address_space *vm,
+int __must_check i915_gem_evict_something(struct i915_address_space *vm,
 					  int min_size,
 					  unsigned alignment,
 					  unsigned cache_level,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 9cb838ef8d02..d8b2fbb9bcb9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3089,7 +3089,7 @@ search_free:
 							  search_flag,
 							  alloc_flag);
 		if (ret) {
-			ret = i915_gem_evict_something(dev, vm, size, alignment,
+			ret = i915_gem_evict_something(vm, size, alignment,
 						       obj->cache_level,
 						       start, end,
 						       flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 016be7316676..4bce72fa14c4 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -61,7 +61,6 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
 
 /**
  * i915_gem_evict_something - Evict vmas to make room for binding a new one
- * @dev: drm_device
  * @vm: address space to evict from
  * @min_size: size of the desired free space
  * @alignment: alignment constraint of the desired free space
@@ -84,12 +83,12 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
  * memory in e.g. the shrinker.
  */
 int
-i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
+i915_gem_evict_something(struct i915_address_space *vm,
 			 int min_size, unsigned alignment, unsigned cache_level,
 			 unsigned long start, unsigned long end,
 			 unsigned flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(vm->dev);
 	struct list_head eviction_list;
 	struct list_head *phases[] = {
 		&vm->inactive_list,
@@ -99,7 +98,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 	struct i915_vma *vma, *next;
 	int ret;
 
-	trace_i915_gem_evict(dev, min_size, alignment, flags);
+	trace_i915_gem_evict(vm, min_size, alignment, flags);
 
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
@@ -154,7 +153,7 @@ search_again:
 		 * back to userspace to give our workqueues time to
 		 * acquire our locks and unpin the old scanouts.
 		 */
-		return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC;
+		return intel_has_pending_fb_unpin(vm->dev) ? -EAGAIN : -ENOSPC;
 	}
 
 	/* Not everything in the GGTT is tracked via vma (otherwise we
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 685ca2aaada8..f1f14e5a5a99 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2012,7 +2012,7 @@ alloc:
 						  0, ggtt->base.total,
 						  DRM_MM_TOPDOWN);
 	if (ret == -ENOSPC && !retried) {
-		ret = i915_gem_evict_something(dev, &ggtt->base,
+		ret = i915_gem_evict_something(&ggtt->base,
 					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
 					       I915_CACHE_NONE,
 					       0, ggtt->base.total,
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 9e43c0aa6e3b..178798002a73 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -394,25 +394,27 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 );
 
 TRACE_EVENT(i915_gem_evict,
-	    TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
-	    TP_ARGS(dev, size, align, flags),
+	    TP_PROTO(struct i915_address_space *vm, u32 size, u32 align, unsigned int flags),
+	    TP_ARGS(vm, size, align, flags),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
+			     __field(struct i915_address_space *, vm)
 			     __field(u32, size)
 			     __field(u32, align)
-			     __field(unsigned, flags)
+			     __field(unsigned int, flags)
 			    ),
 
 	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
+			   __entry->dev = vm->dev->primary->index;
+			   __entry->vm = vm;
 			   __entry->size = size;
 			   __entry->align = align;
 			   __entry->flags = flags;
 			  ),
 
-	    TP_printk("dev=%d, size=%d, align=%d %s",
-		      __entry->dev, __entry->size, __entry->align,
+	    TP_printk("dev=%d, vm=%p, size=%d, align=%d %s",
+		      __entry->dev, __entry->vm, __entry->size, __entry->align,
 		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
 );
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 03/26] drm/i915: Double check the active status on the batch pool
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
  2016-08-04 15:32 ` [CI 02/26] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something() Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 04/26] drm/i915: Remove request retirement before each batch Chris Wilson
                   ` (23 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

We should not rely on obj->active being uptodate unless we manually
flush it. Instead, we can verify that the next available batch object is
idle by looking at its last active request (and checking it for
completion).

v2: remove the struct drm_device forward declaration added in the
process of removing its necessity

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_batch_pool.c | 15 ++++++++-------
 drivers/gpu/drm/i915/i915_gem_batch_pool.h |  6 ++++--
 drivers/gpu/drm/i915/intel_engine_cs.c     |  2 +-
 3 files changed, 13 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.c b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
index 825981b5aa40..ed989596d9a3 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.c
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.c
@@ -41,15 +41,15 @@
 
 /**
  * i915_gem_batch_pool_init() - initialize a batch buffer pool
- * @dev: the drm device
+ * @engine: the associated request submission engine
  * @pool: the batch buffer pool
  */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
 			      struct i915_gem_batch_pool *pool)
 {
 	int n;
 
-	pool->dev = dev;
+	pool->engine = engine;
 
 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++)
 		INIT_LIST_HEAD(&pool->cache_list[n]);
@@ -65,7 +65,7 @@ void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool)
 {
 	int n;
 
-	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 
 	for (n = 0; n < ARRAY_SIZE(pool->cache_list); n++) {
 		struct drm_i915_gem_object *obj, *next;
@@ -101,7 +101,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 	struct list_head *list;
 	int n;
 
-	WARN_ON(!mutex_is_locked(&pool->dev->struct_mutex));
+	lockdep_assert_held(&pool->engine->i915->drm.struct_mutex);
 
 	/* Compute a power-of-two bucket, but throw everything greater than
 	 * 16KiB into the same bucket: i.e. the the buckets hold objects of
@@ -114,7 +114,8 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 
 	list_for_each_entry_safe(tmp, next, list, batch_pool_link) {
 		/* The batches are strictly LRU ordered */
-		if (tmp->active)
+		if (!i915_gem_active_is_idle(&tmp->last_read[pool->engine->id],
+					     &tmp->base.dev->struct_mutex))
 			break;
 
 		/* While we're looping, do some clean up */
@@ -133,7 +134,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
 	if (obj == NULL) {
 		int ret;
 
-		obj = i915_gem_object_create(pool->dev, size);
+		obj = i915_gem_object_create(&pool->engine->i915->drm, size);
 		if (IS_ERR(obj))
 			return obj;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_batch_pool.h b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
index 848e90703eed..10d5ac4c00d3 100644
--- a/drivers/gpu/drm/i915/i915_gem_batch_pool.h
+++ b/drivers/gpu/drm/i915/i915_gem_batch_pool.h
@@ -27,13 +27,15 @@
 
 #include "i915_drv.h"
 
+struct intel_engine_cs;
+
 struct i915_gem_batch_pool {
-	struct drm_device *dev;
+	struct intel_engine_cs *engine;
 	struct list_head cache_list[4];
 };
 
 /* i915_gem_batch_pool.c */
-void i915_gem_batch_pool_init(struct drm_device *dev,
+void i915_gem_batch_pool_init(struct intel_engine_cs *engine,
 			      struct i915_gem_batch_pool *pool);
 void i915_gem_batch_pool_fini(struct i915_gem_batch_pool *pool);
 struct drm_i915_gem_object*
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 202ad83b3dd1..f495969f749b 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -185,7 +185,7 @@ void intel_engine_setup_common(struct intel_engine_cs *engine)
 	engine->fence_context = fence_context_alloc(1);
 
 	intel_engine_init_hangcheck(engine);
-	i915_gem_batch_pool_init(&engine->i915->drm, &engine->batch_pool);
+	i915_gem_batch_pool_init(engine, &engine->batch_pool);
 }
 
 /**
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 04/26] drm/i915: Remove request retirement before each batch
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
  2016-08-04 15:32 ` [CI 02/26] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something() Chris Wilson
  2016-08-04 15:32 ` [CI 03/26] drm/i915: Double check the active status on the batch pool Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 05/26] drm/i915: Remove i915_gem_execbuffer_retire_commands() Chris Wilson
                   ` (22 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

This reimplements the denial-of-service protection against igt from
commit 227f782e4667 ("drm/i915: Retire requests before creating a new
one") and transfers the stall from before each batch into get_pages().
The issue is that the stall is increasing latency between batches which
is detrimental in some cases (especially coupled with execlists) to
keeping the GPU well fed. Also we have made the observation that retiring
requests can of itself free objects (and requests) and therefore makes
a good first step when shrinking.

v2: Recycle objects prior to i915_gem_object_get_pages()
v3: Remove the reference to the ring from i915_gem_requests_ring() as it
operates on an intel_engine_cs.
v4: Since commit 9b5f4e5ed6fd ("drm/i915: Retire oldest completed request
before allocating next") we no longer need the safeguard to retire
requests before get_pages(). We no longer see the huge latencies when
hitting the shrinker between allocations.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            | 1 -
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 --
 drivers/gpu/drm/i915/i915_gem_request.c    | 4 ++--
 3 files changed, 2 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index fbda38f25c6b..2de3d16f7b80 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3169,7 +3169,6 @@ struct drm_i915_gem_request *
 i915_gem_find_active_request(struct intel_engine_cs *engine);
 
 void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
-void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
 
 static inline u32 i915_reset_counter(struct i915_gpu_error *error)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 5e3b5054f72d..0593ea3ba211 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -781,8 +781,6 @@ i915_gem_execbuffer_reserve(struct intel_engine_cs *engine,
 	bool has_fenced_gpu_access = INTEL_GEN(engine->i915) < 4;
 	int retry;
 
-	i915_gem_retire_requests_ring(engine);
-
 	vm = list_first_entry(vmas, struct i915_vma, exec_list)->vm;
 
 	INIT_LIST_HEAD(&ordered_vmas);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 6faa84832ade..773b942a3a5f 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -732,7 +732,7 @@ complete:
 	return ret;
 }
 
-void i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
+static void engine_retire_requests(struct intel_engine_cs *engine)
 {
 	struct drm_i915_gem_request *request, *next;
 
@@ -756,7 +756,7 @@ void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 	GEM_BUG_ON(!dev_priv->gt.awake);
 
 	for_each_engine(engine, dev_priv) {
-		i915_gem_retire_requests_ring(engine);
+		engine_retire_requests(engine);
 		if (list_empty(&engine->request_list))
 			dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
 	}
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 05/26] drm/i915: Remove i915_gem_execbuffer_retire_commands()
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (2 preceding siblings ...)
  2016-08-04 15:32 ` [CI 04/26] drm/i915: Remove request retirement before each batch Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 06/26] drm/i915: Fix up vma alignment to be u64 Chris Wilson
                   ` (21 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Move the single line to the callsite as the name is now misleading, and
the purpose is solely to add the request to the execution queue. Here,
we can see that if we failed to dispatch the batch from the request, we
can forgo flushing the GPU when closing the request.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 9 +--------
 1 file changed, 1 insertion(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 0593ea3ba211..63984c4d8e5a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1211,13 +1211,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 	}
 }
 
-static void
-i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
-{
-	/* Add a breadcrumb for the completion of the batch buffer */
-	__i915_add_request(params->request, params->batch_obj, true);
-}
-
 static int
 i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 {
@@ -1692,7 +1685,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 	ret = execbuf_submit(params, args, &eb->vmas);
 err_request:
-	i915_gem_execbuffer_retire_commands(params);
+	__i915_add_request(params->request, params->batch_obj, ret == 0);
 
 err_batch_unpin:
 	/*
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 06/26] drm/i915: Fix up vma alignment to be u64
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (3 preceding siblings ...)
  2016-08-04 15:32 ` [CI 05/26] drm/i915: Remove i915_gem_execbuffer_retire_commands() Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 07/26] drm/i915: Pad GTT views of exec objects up to user specified size Chris Wilson
                   ` (20 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

This is not the full fix, as we are required to percolate the u64 nature
down through the drm_mm stack, but this is required now to prevent
explosions due to mismatch between execbuf (eb_vma_misplaced) and vma
binding (i915_vma_misplaced) - and reduces the risk of spurious changes
as we adjust the vma interface in the next patches.

v2: long long casts not required for u64 printk (%llx)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h       | 14 ++++++--------
 drivers/gpu/drm/i915/i915_gem.c       | 22 +++++++++++-----------
 drivers/gpu/drm/i915/i915_gem_evict.c |  5 +++--
 3 files changed, 20 insertions(+), 21 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2de3d16f7b80..74a31358fd87 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3032,13 +3032,13 @@ void i915_gem_free_object(struct drm_gem_object *obj);
 int __must_check
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
-		    uint32_t alignment,
-		    uint64_t flags);
+		    u64 alignment,
+		    u64 flags);
 int __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
-			 uint32_t alignment,
-			 uint64_t flags);
+			 u64 alignment,
+			 u64 flags);
 
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		  u32 flags);
@@ -3398,11 +3398,9 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data,
 
 /* i915_gem_evict.c */
 int __must_check i915_gem_evict_something(struct i915_address_space *vm,
-					  int min_size,
-					  unsigned alignment,
+					  u64 min_size, u64 alignment,
 					  unsigned cache_level,
-					  unsigned long start,
-					  unsigned long end,
+					  u64 start, u64 end,
 					  unsigned flags);
 int __must_check i915_gem_evict_for_vma(struct i915_vma *target);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d8b2fbb9bcb9..504414c4285c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2963,8 +2963,8 @@ static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 			   struct i915_address_space *vm,
 			   const struct i915_ggtt_view *ggtt_view,
-			   unsigned alignment,
-			   uint64_t flags)
+			   u64 alignment,
+			   u64 flags)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3023,7 +3023,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 		alignment = flags & PIN_MAPPABLE ? fence_alignment :
 						unfenced_alignment;
 	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
-		DRM_DEBUG("Invalid object (view type=%u) alignment requested %u\n",
+		DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
 			  ggtt_view ? ggtt_view->type : 0,
 			  alignment);
 		return ERR_PTR(-EINVAL);
@@ -3678,7 +3678,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 }
 
 static bool
-i915_vma_misplaced(struct i915_vma *vma, uint32_t alignment, uint64_t flags)
+i915_vma_misplaced(struct i915_vma *vma, u64 alignment, u64 flags)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
 
@@ -3727,8 +3727,8 @@ static int
 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		       struct i915_address_space *vm,
 		       const struct i915_ggtt_view *ggtt_view,
-		       uint32_t alignment,
-		       uint64_t flags)
+		       u64 alignment,
+		       u64 flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct i915_vma *vma;
@@ -3757,7 +3757,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		if (i915_vma_misplaced(vma, alignment, flags)) {
 			WARN(vma->pin_count,
 			     "bo is already pinned in %s with incorrect alignment:"
-			     " offset=%08x %08x, req.alignment=%x, req.map_and_fenceable=%d,"
+			     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
 			     " obj->map_and_fenceable=%d\n",
 			     ggtt_view ? "ggtt" : "ppgtt",
 			     upper_32_bits(vma->node.start),
@@ -3798,8 +3798,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
-		    uint32_t alignment,
-		    uint64_t flags)
+		    u64 alignment,
+		    u64 flags)
 {
 	return i915_gem_object_do_pin(obj, vm,
 				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
@@ -3809,8 +3809,8 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 int
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
-			 uint32_t alignment,
-			 uint64_t flags)
+			 u64 alignment,
+			 u64 flags)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 4bce72fa14c4..ef12ecd2b182 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -84,8 +84,9 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
  */
 int
 i915_gem_evict_something(struct i915_address_space *vm,
-			 int min_size, unsigned alignment, unsigned cache_level,
-			 unsigned long start, unsigned long end,
+			 u64 min_size, u64 alignment,
+			 unsigned cache_level,
+			 u64 start, u64 end,
 			 unsigned flags)
 {
 	struct drm_i915_private *dev_priv = to_i915(vm->dev);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 07/26] drm/i915: Pad GTT views of exec objects up to user specified size
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (4 preceding siblings ...)
  2016-08-04 15:32 ` [CI 06/26] drm/i915: Fix up vma alignment to be u64 Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 08/26] drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check Chris Wilson
                   ` (19 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Our GPUs impose certain requirements upon buffers that depend upon how
exactly they are used. Typically this is expressed as that they require
a larger surface than would be naively computed by pitch * height.
Normally such requirements are hidden away in the userspace driver, but
when we accept pointers from strangers and later impose extra conditions
on them, the original client allocator has no idea about the
monstrosities in the GPU and we require the userspace driver to inform
the kernel how many padding pages are required beyond the client
allocation.

v2: Long time, no see
v3: Try an anonymous union for uapi struct compatibility

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            |  6 ++-
 drivers/gpu/drm/i915/i915_gem.c            | 80 ++++++++++++++----------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 17 ++++++-
 include/uapi/drm/i915_drm.h                |  8 ++-
 4 files changed, 64 insertions(+), 47 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 74a31358fd87..1e1369319326 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3032,11 +3032,13 @@ void i915_gem_free_object(struct drm_gem_object *obj);
 int __must_check
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
+		    u64 size,
 		    u64 alignment,
 		    u64 flags);
 int __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
+			 u64 size,
 			 u64 alignment,
 			 u64 flags);
 
@@ -3313,8 +3315,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 
-	return i915_gem_object_pin(obj, &ggtt->base,
-				   alignment, flags | PIN_GLOBAL);
+	return i915_gem_object_pin(obj, &ggtt->base, 0, alignment,
+				   flags | PIN_GLOBAL);
 }
 
 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 504414c4285c..4e08aed93225 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1692,7 +1692,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	/* Now pin it into the GTT if needed */
-	ret = i915_gem_object_ggtt_pin(obj, &view, 0, PIN_MAPPABLE);
+	ret = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
 	if (ret)
 		goto unlock;
 
@@ -2956,6 +2956,7 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
  * @obj: object to bind
  * @vm: address space to bind into
  * @ggtt_view: global gtt view if applicable
+ * @size: requested size in bytes (can be larger than the VMA)
  * @alignment: requested alignment
  * @flags: mask of PIN_* flags to use
  */
@@ -2963,21 +2964,20 @@ static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 			   struct i915_address_space *vm,
 			   const struct i915_ggtt_view *ggtt_view,
+			   u64 size,
 			   u64 alignment,
 			   u64 flags)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-	u32 fence_alignment, unfenced_alignment;
-	u32 search_flag, alloc_flag;
 	u64 start, end;
-	u64 size, fence_size;
+	u32 search_flag, alloc_flag;
 	struct i915_vma *vma;
 	int ret;
 
 	if (i915_is_ggtt(vm)) {
-		u32 view_size;
+		u32 fence_size, fence_alignment, unfenced_alignment;
+		u64 view_size;
 
 		if (WARN_ON(!ggtt_view))
 			return ERR_PTR(-EINVAL);
@@ -2995,48 +2995,39 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 								view_size,
 								obj->tiling_mode,
 								false);
-		size = flags & PIN_MAPPABLE ? fence_size : view_size;
+		size = max(size, view_size);
+		if (flags & PIN_MAPPABLE)
+			size = max_t(u64, size, fence_size);
+
+		if (alignment == 0)
+			alignment = flags & PIN_MAPPABLE ? fence_alignment :
+				unfenced_alignment;
+		if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
+			DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
+				  ggtt_view ? ggtt_view->type : 0,
+				  alignment);
+			return ERR_PTR(-EINVAL);
+		}
 	} else {
-		fence_size = i915_gem_get_gtt_size(dev,
-						   obj->base.size,
-						   obj->tiling_mode);
-		fence_alignment = i915_gem_get_gtt_alignment(dev,
-							     obj->base.size,
-							     obj->tiling_mode,
-							     true);
-		unfenced_alignment =
-			i915_gem_get_gtt_alignment(dev,
-						   obj->base.size,
-						   obj->tiling_mode,
-						   false);
-		size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
+		size = max_t(u64, size, obj->base.size);
+		alignment = 4096;
 	}
 
 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
 	end = vm->total;
 	if (flags & PIN_MAPPABLE)
-		end = min_t(u64, end, ggtt->mappable_end);
+		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
 	if (flags & PIN_ZONE_4G)
 		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
 
-	if (alignment == 0)
-		alignment = flags & PIN_MAPPABLE ? fence_alignment :
-						unfenced_alignment;
-	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
-		DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
-			  ggtt_view ? ggtt_view->type : 0,
-			  alignment);
-		return ERR_PTR(-EINVAL);
-	}
-
 	/* If binding the object/GGTT view requires more space than the entire
 	 * aperture has, reject it early before evicting everything in a vain
 	 * attempt to find space.
 	 */
 	if (size > end) {
-		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: size=%llu > %s aperture=%llu\n",
+		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
 			  ggtt_view ? ggtt_view->type : 0,
-			  size,
+			  size, obj->base.size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
 			  end);
 		return ERR_PTR(-E2BIG);
@@ -3530,7 +3521,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * (e.g. libkms for the bootup splash), we have to ensure that we
 	 * always use map_and_fenceable for all scanout buffers.
 	 */
-	ret = i915_gem_object_ggtt_pin(obj, view, alignment,
+	ret = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
 				       view->type == I915_GGTT_VIEW_NORMAL ?
 				       PIN_MAPPABLE : 0);
 	if (ret)
@@ -3678,12 +3669,14 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 }
 
 static bool
-i915_vma_misplaced(struct i915_vma *vma, u64 alignment, u64 flags)
+i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
 
-	if (alignment &&
-	    vma->node.start & (alignment - 1))
+	if (vma->node.size < size)
+		return true;
+
+	if (alignment && vma->node.start & (alignment - 1))
 		return true;
 
 	if (flags & PIN_MAPPABLE && !obj->map_and_fenceable)
@@ -3727,6 +3720,7 @@ static int
 i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		       struct i915_address_space *vm,
 		       const struct i915_ggtt_view *ggtt_view,
+		       u64 size,
 		       u64 alignment,
 		       u64 flags)
 {
@@ -3754,7 +3748,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
 			return -EBUSY;
 
-		if (i915_vma_misplaced(vma, alignment, flags)) {
+		if (i915_vma_misplaced(vma, size, alignment, flags)) {
 			WARN(vma->pin_count,
 			     "bo is already pinned in %s with incorrect alignment:"
 			     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
@@ -3775,8 +3769,8 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 
 	bound = vma ? vma->bound : 0;
 	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view, alignment,
-						 flags);
+		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view,
+						 size, alignment, flags);
 		if (IS_ERR(vma))
 			return PTR_ERR(vma);
 	} else {
@@ -3798,17 +3792,19 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
+		    u64 size,
 		    u64 alignment,
 		    u64 flags)
 {
 	return i915_gem_object_do_pin(obj, vm,
 				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
-				      alignment, flags);
+				      size, alignment, flags);
 }
 
 int
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
+			 u64 size,
 			 u64 alignment,
 			 u64 flags)
 {
@@ -3819,7 +3815,7 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 	BUG_ON(!view);
 
 	return i915_gem_object_do_pin(obj, &ggtt->base, view,
-				      alignment, flags | PIN_GLOBAL);
+				      size, alignment, flags | PIN_GLOBAL);
 }
 
 void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 63984c4d8e5a..d2e27e730ecb 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -682,10 +682,14 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 			flags |= PIN_HIGH;
 	}
 
-	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
+	ret = i915_gem_object_pin(obj, vma->vm,
+				  entry->pad_to_size,
+				  entry->alignment,
+				  flags);
 	if ((ret == -ENOSPC  || ret == -E2BIG) &&
 	    only_mappable_for_reloc(entry->flags))
 		ret = i915_gem_object_pin(obj, vma->vm,
+					  entry->pad_to_size,
 					  entry->alignment,
 					  flags & ~PIN_MAPPABLE);
 	if (ret)
@@ -748,6 +752,9 @@ eb_vma_misplaced(struct i915_vma *vma)
 	    vma->node.start & (entry->alignment - 1))
 		return true;
 
+	if (vma->node.size < entry->pad_to_size)
+		return true;
+
 	if (entry->flags & EXEC_OBJECT_PINNED &&
 	    vma->node.start != entry->offset)
 		return true;
@@ -1091,6 +1098,14 @@ validate_exec_list(struct drm_device *dev,
 		if (exec[i].alignment && !is_power_of_2(exec[i].alignment))
 			return -EINVAL;
 
+		/* pad_to_size was once a reserved field, so sanitize it */
+		if (exec[i].flags & EXEC_OBJECT_PAD_TO_SIZE) {
+			if (offset_in_page(exec[i].pad_to_size))
+				return -EINVAL;
+		} else {
+			exec[i].pad_to_size = 0;
+		}
+
 		/* First check for malicious input causing overflow in
 		 * the worst case where we need to allocate the entire
 		 * relocation tree as a single array.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 33ce5ff9556a..0f292733cffc 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -727,11 +727,15 @@ struct drm_i915_gem_exec_object2 {
 #define EXEC_OBJECT_WRITE		 (1<<2)
 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
 #define EXEC_OBJECT_PINNED		 (1<<4)
+#define EXEC_OBJECT_PAD_TO_SIZE		 (1<<5)
 /* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
-#define __EXEC_OBJECT_UNKNOWN_FLAGS	(-(EXEC_OBJECT_PINNED<<1))
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PAD_TO_SIZE<<1)
 	__u64 flags;
 
-	__u64 rsvd1;
+	union {
+		__u64 rsvd1;
+		__u64 pad_to_size;
+	};
 	__u64 rsvd2;
 };
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 08/26] drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (5 preceding siblings ...)
  2016-08-04 15:32 ` [CI 07/26] drm/i915: Pad GTT views of exec objects up to user specified size Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 09/26] drm/i915: Split insertion/binding of an object into the VM Chris Wilson
                   ` (18 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

i915_gem_valid_gtt_space() is used after inserting the VMA to double
check the list - the location should have been chosen to pass all the
restrictions.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 5 +----
 1 file changed, 1 insertion(+), 4 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4e08aed93225..1e78ccc6538e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3090,10 +3090,7 @@ search_free:
 			goto err_vma;
 		}
 	}
-	if (WARN_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level))) {
-		ret = -EINVAL;
-		goto err_remove_node;
-	}
+	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
 	trace_i915_vma_bind(vma, flags);
 	ret = i915_vma_bind(vma, obj->cache_level, flags);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 09/26] drm/i915: Split insertion/binding of an object into the VM
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (6 preceding siblings ...)
  2016-08-04 15:32 ` [CI 08/26] drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 10/26] drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations Chris Wilson
                   ` (17 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Split the insertion into the address space's range manager and binding
of that object into the GTT to simplify the code flow when pinning a
VMA.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 35 +++++++++++++++--------------------
 1 file changed, 15 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1e78ccc6538e..00f8d8fa0b04 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2961,12 +2961,12 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
  * @flags: mask of PIN_* flags to use
  */
 static struct i915_vma *
-i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
-			   struct i915_address_space *vm,
-			   const struct i915_ggtt_view *ggtt_view,
-			   u64 size,
-			   u64 alignment,
-			   u64 flags)
+i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
+			       struct i915_address_space *vm,
+			       const struct i915_ggtt_view *ggtt_view,
+			       u64 size,
+			       u64 alignment,
+			       u64 flags)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -3092,19 +3092,12 @@ search_free:
 	}
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
-	trace_i915_vma_bind(vma, flags);
-	ret = i915_vma_bind(vma, obj->cache_level, flags);
-	if (ret)
-		goto err_remove_node;
-
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
 	list_move_tail(&vma->vm_link, &vm->inactive_list);
 	obj->bind_count++;
 
 	return vma;
 
-err_remove_node:
-	drm_mm_remove_node(&vma->node);
 err_vma:
 	vma = ERR_PTR(ret);
 err_unpin:
@@ -3764,24 +3757,26 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 		}
 	}
 
-	bound = vma ? vma->bound : 0;
 	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-		vma = i915_gem_object_bind_to_vm(obj, vm, ggtt_view,
-						 size, alignment, flags);
+		vma = i915_gem_object_insert_into_vm(obj, vm, ggtt_view,
+						     size, alignment, flags);
 		if (IS_ERR(vma))
 			return PTR_ERR(vma);
-	} else {
-		ret = i915_vma_bind(vma, obj->cache_level, flags);
-		if (ret)
-			return ret;
 	}
 
+	bound = vma->bound;
+	ret = i915_vma_bind(vma, obj->cache_level, flags);
+	if (ret)
+		return ret;
+
 	if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
 	    (bound ^ vma->bound) & GLOBAL_BIND) {
 		__i915_vma_set_map_and_fenceable(vma);
 		WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
 	}
 
+	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
+
 	vma->pin_count++;
 	return 0;
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 10/26] drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (7 preceding siblings ...)
  2016-08-04 15:32 ` [CI 09/26] drm/i915: Split insertion/binding of an object into the VM Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 11/26] drm/i915: Update the GGTT size/alignment query functions Chris Wilson
                   ` (16 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

As we always allocate in chunks of 4096 (that being both the PAGE_SIZE
and our own GTT_PAGE_SIZE), we know that all results from the drm_mm are
aligned to at least 4096. The drm_mm allocator itself is optimised for
alignment == 0, and so by converting alignments of 4096 to 0 we can
satisfy our own requirements and still hit the faster path.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 00f8d8fa0b04..eedb80451dd5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3072,6 +3072,15 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 			alloc_flag = DRM_MM_CREATE_DEFAULT;
 		}
 
+		/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
+		 * so we know that we always have a minimum alignment of 4096.
+		 * The drm_mm range manager is optimised to return results
+		 * with zero alignment, so where possible use the optimal
+		 * path.
+		 */
+		if (alignment <= 4096)
+			alignment = 0;
+
 search_free:
 		ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
 							  size, alignment,
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 11/26] drm/i915: Update the GGTT size/alignment query functions
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (8 preceding siblings ...)
  2016-08-04 15:32 ` [CI 10/26] drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 12/26] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private Chris Wilson
                   ` (15 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

In order to be consistent with other address space functions, we want to
pass around 64-bit sizes, even though all known global GTT are limited
to 4GiB. Similarly, we are trying to be consistent in using the _ggtt_
nomenclature when referring to the special global GTT.

v2: Update docs to consistently state "global GTT".

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h        |  8 ++--
 drivers/gpu/drm/i915/i915_gem.c        | 81 +++++++++++++++++++---------------
 drivers/gpu/drm/i915/i915_gem_tiling.c |  3 +-
 3 files changed, 51 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1e1369319326..b6e56ecb8637 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3241,11 +3241,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-			    int tiling_mode, bool fenced);
+u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode);
+u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+				int tiling_mode, bool fenced);
 
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index eedb80451dd5..0be3c5e4d307 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1847,46 +1847,57 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
 		i915_gem_release_mmap(obj);
 }
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+/**
+ * i915_gem_get_ggtt_size - return required global GTT size for an object
+ * @dev: drm device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ *
+ * Return the required global GTT size for an object, taking into account
+ * potential fence register mapping.
+ */
+u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
 {
-	uint32_t gtt_size;
+	u64 ggtt_size;
 
-	if (INTEL_INFO(dev)->gen >= 4 ||
+	GEM_BUG_ON(size == 0);
+
+	if (INTEL_GEN(dev) >= 4 ||
 	    tiling_mode == I915_TILING_NONE)
 		return size;
 
 	/* Previous chips need a power-of-two fence region when tiling */
 	if (IS_GEN3(dev))
-		gtt_size = 1024*1024;
+		ggtt_size = 1024*1024;
 	else
-		gtt_size = 512*1024;
+		ggtt_size = 512*1024;
 
-	while (gtt_size < size)
-		gtt_size <<= 1;
+	while (ggtt_size < size)
+		ggtt_size <<= 1;
 
-	return gtt_size;
+	return ggtt_size;
 }
 
 /**
- * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * i915_gem_get_ggtt_alignment - return required global GTT alignment
  * @dev: drm device
  * @size: object size
  * @tiling_mode: tiling mode
- * @fenced: is fenced alignemned required or not
+ * @fenced: is fenced alignment required or not
  *
- * Return the required GTT alignment for an object, taking into account
+ * Return the required global GTT alignment for an object, taking into account
  * potential fence register mapping.
  */
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-			   int tiling_mode, bool fenced)
+u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+				int tiling_mode, bool fenced)
 {
+	GEM_BUG_ON(size == 0);
+
 	/*
 	 * Minimum alignment is 4k (GTT page size), but might be greater
 	 * if a fence register is needed for the object.
 	 */
-	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+	if (INTEL_GEN(dev) >= 4 || (!fenced && IS_G33(dev)) ||
 	    tiling_mode == I915_TILING_NONE)
 		return 4096;
 
@@ -1894,7 +1905,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
 	 * Previous chips need to be aligned to the size of the smallest
 	 * fence register that can contain the object.
 	 */
-	return i915_gem_get_gtt_size(dev, size, tiling_mode);
+	return i915_gem_get_ggtt_size(dev, size, tiling_mode);
 }
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2984,17 +2995,17 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 
 		view_size = i915_ggtt_view_size(obj, ggtt_view);
 
-		fence_size = i915_gem_get_gtt_size(dev,
-						   view_size,
-						   obj->tiling_mode);
-		fence_alignment = i915_gem_get_gtt_alignment(dev,
-							     view_size,
-							     obj->tiling_mode,
-							     true);
-		unfenced_alignment = i915_gem_get_gtt_alignment(dev,
-								view_size,
-								obj->tiling_mode,
-								false);
+		fence_size = i915_gem_get_ggtt_size(dev,
+						    view_size,
+						    obj->tiling_mode);
+		fence_alignment = i915_gem_get_ggtt_alignment(dev,
+							      view_size,
+							      obj->tiling_mode,
+							      true);
+		unfenced_alignment = i915_gem_get_ggtt_alignment(dev,
+								 view_size,
+								 obj->tiling_mode,
+								 false);
 		size = max(size, view_size);
 		if (flags & PIN_MAPPABLE)
 			size = max_t(u64, size, fence_size);
@@ -3698,13 +3709,13 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	bool mappable, fenceable;
 	u32 fence_size, fence_alignment;
 
-	fence_size = i915_gem_get_gtt_size(obj->base.dev,
-					   obj->base.size,
-					   obj->tiling_mode);
-	fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
-						     obj->base.size,
-						     obj->tiling_mode,
-						     true);
+	fence_size = i915_gem_get_ggtt_size(obj->base.dev,
+					    obj->base.size,
+					    obj->tiling_mode);
+	fence_alignment = i915_gem_get_ggtt_alignment(obj->base.dev,
+						      obj->base.size,
+						      obj->tiling_mode,
+						      true);
 
 	fenceable = (vma->node.size == fence_size &&
 		     (vma->node.start & (fence_alignment - 1)) == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index fa2eb4a4f212..4e42da691e4e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -133,7 +133,8 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 			return false;
 	}
 
-	size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
+	size = i915_gem_get_ggtt_size(obj->base.dev,
+				      obj->base.size, tiling_mode);
 	if (i915_gem_obj_ggtt_size(obj) != size)
 		return false;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 12/26] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (9 preceding siblings ...)
  2016-08-04 15:32 ` [CI 11/26] drm/i915: Update the GGTT size/alignment query functions Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 13/26] drm/i915: Record allocated vma size Chris Wilson
                   ` (14 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

For consistency, internal functions should take drm_i915_private rather
than drm_device. Now that we are subclassing drm_device, there are no
more size wins, but being consistent is its own blessing.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h        |  5 +++--
 drivers/gpu/drm/i915/i915_gem.c        | 30 ++++++++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_tiling.c |  8 ++++----
 3 files changed, 23 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b6e56ecb8637..3d73394b52d7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3241,8 +3241,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode);
-u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv, u64 size,
+			   int tiling_mode);
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
 				int tiling_mode, bool fenced);
 
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 0be3c5e4d307..b8f7c821d184 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1849,25 +1849,26 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
 
 /**
  * i915_gem_get_ggtt_size - return required global GTT size for an object
- * @dev: drm device
+ * @dev_priv: i915 device
  * @size: object size
  * @tiling_mode: tiling mode
  *
  * Return the required global GTT size for an object, taking into account
  * potential fence register mapping.
  */
-u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
+u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
+			   u64 size, int tiling_mode)
 {
 	u64 ggtt_size;
 
 	GEM_BUG_ON(size == 0);
 
-	if (INTEL_GEN(dev) >= 4 ||
+	if (INTEL_GEN(dev_priv) >= 4 ||
 	    tiling_mode == I915_TILING_NONE)
 		return size;
 
 	/* Previous chips need a power-of-two fence region when tiling */
-	if (IS_GEN3(dev))
+	if (IS_GEN3(dev_priv))
 		ggtt_size = 1024*1024;
 	else
 		ggtt_size = 512*1024;
@@ -1880,7 +1881,7 @@ u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
 
 /**
  * i915_gem_get_ggtt_alignment - return required global GTT alignment
- * @dev: drm device
+ * @dev_priv: i915 device
  * @size: object size
  * @tiling_mode: tiling mode
  * @fenced: is fenced alignment required or not
@@ -1888,7 +1889,7 @@ u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
  * Return the required global GTT alignment for an object, taking into account
  * potential fence register mapping.
  */
-u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
 				int tiling_mode, bool fenced)
 {
 	GEM_BUG_ON(size == 0);
@@ -1897,7 +1898,7 @@ u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
 	 * Minimum alignment is 4k (GTT page size), but might be greater
 	 * if a fence register is needed for the object.
 	 */
-	if (INTEL_GEN(dev) >= 4 || (!fenced && IS_G33(dev)) ||
+	if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
 	    tiling_mode == I915_TILING_NONE)
 		return 4096;
 
@@ -1905,7 +1906,7 @@ u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
 	 * Previous chips need to be aligned to the size of the smallest
 	 * fence register that can contain the object.
 	 */
-	return i915_gem_get_ggtt_size(dev, size, tiling_mode);
+	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
 }
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2995,14 +2996,14 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 
 		view_size = i915_ggtt_view_size(obj, ggtt_view);
 
-		fence_size = i915_gem_get_ggtt_size(dev,
+		fence_size = i915_gem_get_ggtt_size(dev_priv,
 						    view_size,
 						    obj->tiling_mode);
-		fence_alignment = i915_gem_get_ggtt_alignment(dev,
+		fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
 							      view_size,
 							      obj->tiling_mode,
 							      true);
-		unfenced_alignment = i915_gem_get_ggtt_alignment(dev,
+		unfenced_alignment = i915_gem_get_ggtt_alignment(dev_priv,
 								 view_size,
 								 obj->tiling_mode,
 								 false);
@@ -3706,13 +3707,14 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	bool mappable, fenceable;
 	u32 fence_size, fence_alignment;
 
-	fence_size = i915_gem_get_ggtt_size(obj->base.dev,
+	fence_size = i915_gem_get_ggtt_size(dev_priv,
 					    obj->base.size,
 					    obj->tiling_mode);
-	fence_alignment = i915_gem_get_ggtt_alignment(obj->base.dev,
+	fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
 						      obj->base.size,
 						      obj->tiling_mode,
 						      true);
@@ -3721,7 +3723,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 		     (vma->node.start & (fence_alignment - 1)) == 0);
 
 	mappable = (vma->node.start + fence_size <=
-		    to_i915(obj->base.dev)->ggtt.mappable_end);
+		    dev_priv->ggtt.mappable_end);
 
 	obj->map_and_fenceable = mappable && fenceable;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 4e42da691e4e..b7f9875f69b4 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -117,15 +117,16 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 static bool
 i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 {
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	u32 size;
 
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 
-	if (INTEL_INFO(obj->base.dev)->gen >= 4)
+	if (INTEL_GEN(dev_priv) >= 4)
 		return true;
 
-	if (IS_GEN3(obj->base.dev)) {
+	if (IS_GEN3(dev_priv)) {
 		if (i915_gem_obj_ggtt_offset(obj) & ~I915_FENCE_START_MASK)
 			return false;
 	} else {
@@ -133,8 +134,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 			return false;
 	}
 
-	size = i915_gem_get_ggtt_size(obj->base.dev,
-				      obj->base.size, tiling_mode);
+	size = i915_gem_get_ggtt_size(dev_priv, obj->base.size, tiling_mode);
 	if (i915_gem_obj_ggtt_size(obj) != size)
 		return false;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 13/26] drm/i915: Record allocated vma size
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (10 preceding siblings ...)
  2016-08-04 15:32 ` [CI 12/26] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 14/26] drm/i915: Wrap vma->pin_count accessors with small inline helpers Chris Wilson
                   ` (13 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Tracking the size of the VMA as allocated allows us to dramatically
reduce the complexity of later functions (like inserting the VMA in to
the drm_mm range manager).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c     | 103 ++++++++++++++----------------------
 drivers/gpu/drm/i915/i915_gem_gtt.c |  67 ++++++++---------------
 drivers/gpu/drm/i915/i915_gem_gtt.h |   5 +-
 3 files changed, 63 insertions(+), 112 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b8f7c821d184..2b91dee7c328 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2980,53 +2980,36 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 			       u64 alignment,
 			       u64 flags)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	u64 start, end;
-	u32 search_flag, alloc_flag;
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct i915_vma *vma;
+	u64 start, end;
+	u64 min_alignment;
 	int ret;
 
-	if (i915_is_ggtt(vm)) {
-		u32 fence_size, fence_alignment, unfenced_alignment;
-		u64 view_size;
-
-		if (WARN_ON(!ggtt_view))
-			return ERR_PTR(-EINVAL);
-
-		view_size = i915_ggtt_view_size(obj, ggtt_view);
-
-		fence_size = i915_gem_get_ggtt_size(dev_priv,
-						    view_size,
-						    obj->tiling_mode);
-		fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
-							      view_size,
-							      obj->tiling_mode,
-							      true);
-		unfenced_alignment = i915_gem_get_ggtt_alignment(dev_priv,
-								 view_size,
-								 obj->tiling_mode,
-								 false);
-		size = max(size, view_size);
-		if (flags & PIN_MAPPABLE)
-			size = max_t(u64, size, fence_size);
-
-		if (alignment == 0)
-			alignment = flags & PIN_MAPPABLE ? fence_alignment :
-				unfenced_alignment;
-		if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
-			DRM_DEBUG("Invalid object (view type=%u) alignment requested %llx\n",
-				  ggtt_view ? ggtt_view->type : 0,
-				  alignment);
-			return ERR_PTR(-EINVAL);
-		}
-	} else {
-		size = max_t(u64, size, obj->base.size);
-		alignment = 4096;
+	vma = ggtt_view ?
+		i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
+		i915_gem_obj_lookup_or_create_vma(obj, vm);
+	if (IS_ERR(vma))
+		return vma;
+
+	size = max(size, vma->size);
+	if (flags & PIN_MAPPABLE)
+		size = i915_gem_get_ggtt_size(dev_priv, size, obj->tiling_mode);
+
+	min_alignment =
+		i915_gem_get_ggtt_alignment(dev_priv, size, obj->tiling_mode,
+					    flags & PIN_MAPPABLE);
+	if (alignment == 0)
+		alignment = min_alignment;
+	if (alignment & (min_alignment - 1)) {
+		DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
+			  alignment, min_alignment);
+		return ERR_PTR(-EINVAL);
 	}
 
 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
-	end = vm->total;
+
+	end = vma->vm->total;
 	if (flags & PIN_MAPPABLE)
 		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
 	if (flags & PIN_ZONE_4G)
@@ -3037,8 +3020,7 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 	 * attempt to find space.
 	 */
 	if (size > end) {
-		DRM_DEBUG("Attempting to bind an object (view type=%u) larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
-			  ggtt_view ? ggtt_view->type : 0,
+		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
 			  size, obj->base.size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
 			  end);
@@ -3051,31 +3033,27 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 
 	i915_gem_object_pin_pages(obj);
 
-	vma = ggtt_view ? i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
-			  i915_gem_obj_lookup_or_create_vma(obj, vm);
-
-	if (IS_ERR(vma))
-		goto err_unpin;
-
 	if (flags & PIN_OFFSET_FIXED) {
 		uint64_t offset = flags & PIN_OFFSET_MASK;
-
-		if (offset & (alignment - 1) || offset + size > end) {
+		if (offset & (alignment - 1) || offset > end - size) {
 			ret = -EINVAL;
-			goto err_vma;
+			goto err_unpin;
 		}
+
 		vma->node.start = offset;
 		vma->node.size = size;
 		vma->node.color = obj->cache_level;
-		ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+		ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
 		if (ret) {
 			ret = i915_gem_evict_for_vma(vma);
 			if (ret == 0)
-				ret = drm_mm_reserve_node(&vm->mm, &vma->node);
+				ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
+			if (ret)
+				goto err_unpin;
 		}
-		if (ret)
-			goto err_vma;
 	} else {
+		u32 search_flag, alloc_flag;
+
 		if (flags & PIN_HIGH) {
 			search_flag = DRM_MM_SEARCH_BELOW;
 			alloc_flag = DRM_MM_CREATE_TOP;
@@ -3094,36 +3072,35 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 			alignment = 0;
 
 search_free:
-		ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node,
+		ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
+							  &vma->node,
 							  size, alignment,
 							  obj->cache_level,
 							  start, end,
 							  search_flag,
 							  alloc_flag);
 		if (ret) {
-			ret = i915_gem_evict_something(vm, size, alignment,
+			ret = i915_gem_evict_something(vma->vm, size, alignment,
 						       obj->cache_level,
 						       start, end,
 						       flags);
 			if (ret == 0)
 				goto search_free;
 
-			goto err_vma;
+			goto err_unpin;
 		}
 	}
 	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
 
 	list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
-	list_move_tail(&vma->vm_link, &vm->inactive_list);
+	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	obj->bind_count++;
 
 	return vma;
 
-err_vma:
-	vma = ERR_PTR(ret);
 err_unpin:
 	i915_gem_object_unpin_pages(obj);
-	return vma;
+	return ERR_PTR(ret);
 }
 
 bool
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index f1f14e5a5a99..b8c1e9d4fac8 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -184,7 +184,7 @@ static void ppgtt_unbind_vma(struct i915_vma *vma)
 {
 	vma->vm->clear_range(vma->vm,
 			     vma->node.start,
-			     vma->obj->base.size,
+			     vma->size,
 			     true);
 }
 
@@ -2695,28 +2695,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 
 static void ggtt_unbind_vma(struct i915_vma *vma)
 {
-	struct drm_device *dev = vma->vm->dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_object *obj = vma->obj;
-	const uint64_t size = min_t(uint64_t,
-				    obj->base.size,
-				    vma->node.size);
+	struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
+	const u64 size = min(vma->size, vma->node.size);
 
-	if (vma->bound & GLOBAL_BIND) {
+	if (vma->bound & GLOBAL_BIND)
 		vma->vm->clear_range(vma->vm,
-				     vma->node.start,
-				     size,
+				     vma->node.start, size,
 				     true);
-	}
-
-	if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
-		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
 
+	if (vma->bound & LOCAL_BIND && appgtt)
 		appgtt->base.clear_range(&appgtt->base,
-					 vma->node.start,
-					 size,
+					 vma->node.start, size,
 					 true);
-	}
 }
 
 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
@@ -3374,14 +3364,14 @@ void i915_vma_close(struct i915_vma *vma)
 static struct i915_vma *
 __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 		      struct i915_address_space *vm,
-		      const struct i915_ggtt_view *ggtt_view)
+		      const struct i915_ggtt_view *view)
 {
 	struct i915_vma *vma;
 	int i;
 
 	GEM_BUG_ON(vm->closed);
 
-	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
+	if (WARN_ON(i915_is_ggtt(vm) != !!view))
 		return ERR_PTR(-EINVAL);
 
 	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
@@ -3395,12 +3385,22 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	list_add(&vma->vm_link, &vm->unbound_list);
 	vma->vm = vm;
 	vma->obj = obj;
+	vma->size = obj->base.size;
 	vma->is_ggtt = i915_is_ggtt(vm);
 
-	if (i915_is_ggtt(vm))
-		vma->ggtt_view = *ggtt_view;
-	else
+	if (i915_is_ggtt(vm)) {
+		vma->ggtt_view = *view;
+		if (view->type == I915_GGTT_VIEW_PARTIAL) {
+			vma->size = view->params.partial.size;
+			vma->size <<= PAGE_SHIFT;
+		} else if (view->type == I915_GGTT_VIEW_ROTATED) {
+			vma->size =
+				intel_rotation_info_size(&view->params.rotated);
+			vma->size <<= PAGE_SHIFT;
+		}
+	} else {
 		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
+	}
 
 	list_add_tail(&vma->obj_link, &obj->vma_list);
 
@@ -3685,29 +3685,6 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	return 0;
 }
 
-/**
- * i915_ggtt_view_size - Get the size of a GGTT view.
- * @obj: Object the view is of.
- * @view: The view in question.
- *
- * @return The size of the GGTT view in bytes.
- */
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
-		    const struct i915_ggtt_view *view)
-{
-	if (view->type == I915_GGTT_VIEW_NORMAL) {
-		return obj->base.size;
-	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
-		return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
-	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
-		return view->params.partial.size << PAGE_SHIFT;
-	} else {
-		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
-		return obj->base.size;
-	}
-}
-
 void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 {
 	void __iomem *ptr;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index f6cc3fe8e1ad..2c0360c1ab97 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -180,6 +180,7 @@ struct i915_vma {
 	struct drm_i915_gem_object *obj;
 	struct i915_address_space *vm;
 	void __iomem *iomap;
+	u64 size;
 
 	unsigned int active;
 	struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -608,10 +609,6 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
 	return true;
 }
 
-size_t
-i915_ggtt_view_size(struct drm_i915_gem_object *obj,
-		    const struct i915_ggtt_view *view);
-
 /**
  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
  * @vma: VMA to iomap
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 14/26] drm/i915: Wrap vma->pin_count accessors with small inline helpers
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (11 preceding siblings ...)
  2016-08-04 15:32 ` [CI 13/26] drm/i915: Record allocated vma size Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 15/26] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
                   ` (12 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

In the next few patches, the VMA pinning API is overhauled and to reduce
the churn we pull out the update to the accessors into a prep patch.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_gem.c            | 26 ++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_evict.c      | 12 ++++++------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  2 +-
 drivers/gpu/drm/i915/i915_gem_fence.c      |  2 +-
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 10 +++++-----
 drivers/gpu/drm/i915/i915_gem_gtt.h        | 31 ++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_gpu_error.c      |  4 ++--
 8 files changed, 58 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index b35e6174fa7d..4401a2ab9a74 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -168,7 +168,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (vma->pin_count > 0)
+		if (i915_vma_is_pinned(vma))
 			pin_count++;
 	}
 	seq_printf(m, " (pinned x %d)", pin_count);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2b91dee7c328..6987cb22ff9c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -153,10 +153,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
-		if (vma->pin_count)
+		if (i915_vma_is_pinned(vma))
 			pinned += vma->node.size;
 	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
-		if (vma->pin_count)
+		if (i915_vma_is_pinned(vma))
 			pinned += vma->node.size;
 	mutex_unlock(&dev->struct_mutex);
 
@@ -2809,7 +2809,7 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
 
 static void __i915_vma_iounmap(struct i915_vma *vma)
 {
-	GEM_BUG_ON(vma->pin_count);
+	GEM_BUG_ON(i915_vma_is_pinned(vma));
 
 	if (vma->iomap == NULL)
 		return;
@@ -2836,7 +2836,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		 * take a pin on the vma so that the second unbind is
 		 * aborted.
 		 */
-		vma->pin_count++;
+		__i915_vma_pin(vma);
 
 		for_each_active(active, idx) {
 			ret = i915_gem_active_retire(&vma->last_read[idx],
@@ -2845,14 +2845,14 @@ int i915_vma_unbind(struct i915_vma *vma)
 				break;
 		}
 
-		vma->pin_count--;
+		__i915_vma_unpin(vma);
 		if (ret)
 			return ret;
 
 		GEM_BUG_ON(i915_vma_is_active(vma));
 	}
 
-	if (vma->pin_count)
+	if (i915_vma_is_pinned(vma))
 		return -EBUSY;
 
 	if (!drm_mm_node_allocated(&vma->node))
@@ -3297,7 +3297,7 @@ restart:
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
-		if (vma->pin_count) {
+		if (i915_vma_is_pinned(vma)) {
 			DRM_DEBUG("can not change the cache level of pinned objects\n");
 			return -EBUSY;
 		}
@@ -3734,11 +3734,11 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 			  i915_gem_obj_to_vma(obj, vm);
 
 	if (vma) {
-		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+		if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
 			return -EBUSY;
 
 		if (i915_vma_misplaced(vma, size, alignment, flags)) {
-			WARN(vma->pin_count,
+			WARN(i915_vma_is_pinned(vma),
 			     "bo is already pinned in %s with incorrect alignment:"
 			     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
 			     " obj->map_and_fenceable=%d\n",
@@ -3776,7 +3776,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
 
-	vma->pin_count++;
+	__i915_vma_pin(vma);
 	return 0;
 }
 
@@ -3815,10 +3815,10 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 {
 	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
-	WARN_ON(vma->pin_count == 0);
+	WARN_ON(!i915_vma_is_pinned(vma));
 	WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
 
-	--vma->pin_count;
+	__i915_vma_unpin(vma);
 }
 
 int
@@ -4686,7 +4686,7 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
 	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->pin_count > 0)
+		if (i915_vma_is_pinned(vma))
 			return true;
 
 	return false;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index ef12ecd2b182..7be425826539 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -49,7 +49,7 @@ gpu_is_idle(struct drm_i915_private *dev_priv)
 static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
-	if (vma->pin_count)
+	if (i915_vma_is_pinned(vma))
 		return false;
 
 	if (WARN_ON(!list_empty(&vma->exec_list)))
@@ -183,7 +183,7 @@ found:
 	 */
 	list_for_each_entry_safe(vma, next, &eviction_list, exec_list) {
 		if (drm_mm_scan_remove_block(&vma->node))
-			vma->pin_count++;
+			__i915_vma_pin(vma);
 		else
 			list_del_init(&vma->exec_list);
 	}
@@ -195,7 +195,7 @@ found:
 				       exec_list);
 
 		list_del_init(&vma->exec_list);
-		vma->pin_count--;
+		__i915_vma_unpin(vma);
 		if (ret == 0)
 			ret = i915_vma_unbind(vma);
 	}
@@ -220,8 +220,8 @@ i915_gem_evict_for_vma(struct i915_vma *target)
 
 		vma = container_of(node, typeof(*vma), node);
 
-		if (vma->pin_count) {
-			if (!vma->exec_entry || (vma->pin_count > 1))
+		if (i915_vma_is_pinned(vma)) {
+			if (!vma->exec_entry || i915_vma_pin_count(vma) > 1)
 				/* Object is pinned for some other use */
 				return -EBUSY;
 
@@ -281,7 +281,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 	}
 
 	list_for_each_entry_safe(vma, next, &vm->inactive_list, vm_link)
-		if (vma->pin_count == 0)
+		if (!i915_vma_is_pinned(vma))
 			WARN_ON(i915_vma_unbind(vma));
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d2e27e730ecb..82ed80f68103 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -261,7 +261,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 		i915_gem_object_unpin_fence(obj);
 
 	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-		vma->pin_count--;
+		__i915_vma_unpin(vma);
 
 	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_fence.c b/drivers/gpu/drm/i915/i915_gem_fence.c
index dbaab9ce29c9..3b462da612ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence.c
@@ -431,7 +431,7 @@ i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
 
 		WARN_ON(!ggtt_vma ||
 			dev_priv->fence_regs[obj->fence_reg].pin_count >
-			ggtt_vma->pin_count);
+			i915_vma_pin_count(ggtt_vma));
 		dev_priv->fence_regs[obj->fence_reg].pin_count++;
 		return true;
 	} else
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b8c1e9d4fac8..088b89b3ea48 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3334,7 +3334,7 @@ i915_vma_retire(struct i915_gem_active *active,
 		return;
 
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-	if (unlikely(vma->closed && !vma->pin_count))
+	if (unlikely(vma->closed && !i915_vma_is_pinned(vma)))
 		WARN_ON(i915_vma_unbind(vma));
 }
 
@@ -3357,7 +3357,7 @@ void i915_vma_close(struct i915_vma *vma)
 	vma->closed = true;
 
 	list_del_init(&vma->obj_link);
-	if (!i915_vma_is_active(vma) && !vma->pin_count)
+	if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
 		WARN_ON(i915_vma_unbind(vma));
 }
 
@@ -3666,12 +3666,12 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 
 	if (vma->bound == 0 && vma->vm->allocate_va_range) {
 		/* XXX: i915_vma_pin() will fix this +- hack */
-		vma->pin_count++;
+		__i915_vma_pin(vma);
 		trace_i915_va_alloc(vma);
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
 						 vma->node.size);
-		vma->pin_count--;
+		__i915_vma_unpin(vma);
 		if (ret)
 			return ret;
 	}
@@ -3707,6 +3707,6 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 		vma->iomap = ptr;
 	}
 
-	vma->pin_count++;
+	__i915_vma_pin(vma);
 	return ptr;
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 2c0360c1ab97..eee22fcf5826 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -609,6 +609,34 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
 	return true;
 }
 
+static inline int i915_vma_pin_count(const struct i915_vma *vma)
+{
+	return vma->pin_count;
+}
+
+static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
+{
+	return i915_vma_pin_count(vma);
+}
+
+static inline void __i915_vma_pin(struct i915_vma *vma)
+{
+	vma->pin_count++;
+	GEM_BUG_ON(!i915_vma_is_pinned(vma));
+}
+
+static inline void __i915_vma_unpin(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!i915_vma_is_pinned(vma));
+	vma->pin_count--;
+}
+
+static inline void i915_vma_unpin(struct i915_vma *vma)
+{
+	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
+	__i915_vma_unpin(vma);
+}
+
 /**
  * i915_vma_pin_iomap - calls ioremap_wc to map the GGTT VMA via the aperture
  * @vma: VMA to iomap
@@ -637,9 +665,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma);
 static inline void i915_vma_unpin_iomap(struct i915_vma *vma)
 {
 	lockdep_assert_held(&vma->vm->dev->struct_mutex);
-	GEM_BUG_ON(vma->pin_count == 0);
 	GEM_BUG_ON(vma->iomap == NULL);
-	vma->pin_count--;
+	i915_vma_unpin(vma);
 }
 
 #endif
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index c19f72e1bcf7..d94eb907a23a 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -818,7 +818,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
 			break;
 
 		list_for_each_entry(vma, &obj->vma_list, obj_link)
-			if (vma->vm == vm && vma->pin_count > 0)
+			if (vma->vm == vm && i915_vma_is_pinned(vma))
 				capture_bo(err++, vma);
 	}
 
@@ -1230,7 +1230,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
 		list_for_each_entry(vma, &obj->vma_list, obj_link)
-			if (vma->vm == vm && vma->pin_count > 0)
+			if (vma->vm == vm && i915_vma_is_pinned(vma))
 				i++;
 	}
 	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 15/26] drm/i915: Start passing around i915_vma from execbuffer
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (12 preceding siblings ...)
  2016-08-04 15:32 ` [CI 14/26] drm/i915: Wrap vma->pin_count accessors with small inline helpers Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 16/26] drm/i915: Combine all i915_vma bitfields into a single set of flags Chris Wilson
                   ` (11 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

During execbuffer we look up the i915_vma in order to reserve them in
the VM. However, we then do a double lookup of the vma in order to then
pin them, all because we lack the necessary interfaces to operate on
i915_vma - so introduce i915_vma_pin()!

v2: Tidy parameter lists to remove one level of redirection in the hot
path.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h            |  24 +---
 drivers/gpu/drm/i915/i915_gem.c            | 173 ++++++++++++-----------------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 142 ++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_gtt.c        |   3 -
 drivers/gpu/drm/i915/i915_gem_gtt.h        |  14 +++
 5 files changed, 149 insertions(+), 207 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3d73394b52d7..cda8238c952f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3018,23 +3018,6 @@ struct drm_i915_gem_object *i915_gem_object_create_from_data(
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file);
 void i915_gem_free_object(struct drm_gem_object *obj);
 
-/* Flags used by pin/bind&friends. */
-#define PIN_MAPPABLE	(1<<0)
-#define PIN_NONBLOCK	(1<<1)
-#define PIN_GLOBAL	(1<<2)
-#define PIN_OFFSET_BIAS	(1<<3)
-#define PIN_USER	(1<<4)
-#define PIN_UPDATE	(1<<5)
-#define PIN_ZONE_4G	(1<<6)
-#define PIN_HIGH	(1<<7)
-#define PIN_OFFSET_FIXED	(1<<8)
-#define PIN_OFFSET_MASK (~4095)
-int __must_check
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm,
-		    u64 size,
-		    u64 alignment,
-		    u64 flags);
 int __must_check
 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 const struct i915_ggtt_view *view,
@@ -3311,11 +3294,8 @@ i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 		      uint32_t alignment,
 		      unsigned flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
-	return i915_gem_object_pin(obj, &ggtt->base, 0, alignment,
-				   flags | PIN_GLOBAL);
+	return i915_gem_object_ggtt_pin(obj, &i915_ggtt_view_normal,
+					0, alignment, flags);
 }
 
 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6987cb22ff9c..325a546ff88e 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2963,34 +2963,30 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 }
 
 /**
- * Finds free space in the GTT aperture and binds the object or a view of it
- * there.
- * @obj: object to bind
- * @vm: address space to bind into
- * @ggtt_view: global gtt view if applicable
+ * i915_vma_insert - finds a slot for the vma in its address space
+ * @vma: the vma
  * @size: requested size in bytes (can be larger than the VMA)
- * @alignment: requested alignment
+ * @alignment: required alignment
  * @flags: mask of PIN_* flags to use
+ *
+ * First we try to allocate some free space that meets the requirements for
+ * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
+ * preferrably the oldest idle entry to make room for the new VMA.
+ *
+ * Returns:
+ * 0 on success, negative error code otherwise.
  */
-static struct i915_vma *
-i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
-			       struct i915_address_space *vm,
-			       const struct i915_ggtt_view *ggtt_view,
-			       u64 size,
-			       u64 alignment,
-			       u64 flags)
+static int
+i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_vma *vma;
+	struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
+	struct drm_i915_gem_object *obj = vma->obj;
 	u64 start, end;
 	u64 min_alignment;
 	int ret;
 
-	vma = ggtt_view ?
-		i915_gem_obj_lookup_or_create_ggtt_vma(obj, ggtt_view) :
-		i915_gem_obj_lookup_or_create_vma(obj, vm);
-	if (IS_ERR(vma))
-		return vma;
+	GEM_BUG_ON(vma->bound);
+	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
 	size = max(size, vma->size);
 	if (flags & PIN_MAPPABLE)
@@ -3004,7 +3000,7 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 	if (alignment & (min_alignment - 1)) {
 		DRM_DEBUG("Invalid object alignment requested %llu, minimum %llu\n",
 			  alignment, min_alignment);
-		return ERR_PTR(-EINVAL);
+		return -EINVAL;
 	}
 
 	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
@@ -3024,17 +3020,17 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 			  size, obj->base.size,
 			  flags & PIN_MAPPABLE ? "mappable" : "total",
 			  end);
-		return ERR_PTR(-E2BIG);
+		return -E2BIG;
 	}
 
 	ret = i915_gem_object_get_pages(obj);
 	if (ret)
-		return ERR_PTR(ret);
+		return ret;
 
 	i915_gem_object_pin_pages(obj);
 
 	if (flags & PIN_OFFSET_FIXED) {
-		uint64_t offset = flags & PIN_OFFSET_MASK;
+		u64 offset = flags & PIN_OFFSET_MASK;
 		if (offset & (alignment - 1) || offset > end - size) {
 			ret = -EINVAL;
 			goto err_unpin;
@@ -3096,11 +3092,11 @@ search_free:
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
 	obj->bind_count++;
 
-	return vma;
+	return 0;
 
 err_unpin:
 	i915_gem_object_unpin_pages(obj);
-	return ERR_PTR(ret);
+	return ret;
 }
 
 bool
@@ -3661,6 +3657,9 @@ i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
 	struct drm_i915_gem_object *obj = vma->obj;
 
+	if (!drm_mm_node_allocated(&vma->node))
+		return false;
+
 	if (vma->node.size < size)
 		return true;
 
@@ -3705,91 +3704,42 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	obj->map_and_fenceable = mappable && fenceable;
 }
 
-static int
-i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
-		       struct i915_address_space *vm,
-		       const struct i915_ggtt_view *ggtt_view,
-		       u64 size,
-		       u64 alignment,
-		       u64 flags)
+int
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-	struct i915_vma *vma;
-	unsigned bound;
+	unsigned int bound = vma->bound;
 	int ret;
 
-	if (WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base))
-		return -ENODEV;
-
-	if (WARN_ON(flags & (PIN_GLOBAL | PIN_MAPPABLE) && !i915_is_ggtt(vm)))
-		return -EINVAL;
-
-	if (WARN_ON((flags & (PIN_MAPPABLE | PIN_GLOBAL)) == PIN_MAPPABLE))
-		return -EINVAL;
-
-	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
-		return -EINVAL;
-
-	vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
-			  i915_gem_obj_to_vma(obj, vm);
-
-	if (vma) {
-		if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
-			return -EBUSY;
+	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
+	GEM_BUG_ON((flags & PIN_GLOBAL) && !vma->is_ggtt);
 
-		if (i915_vma_misplaced(vma, size, alignment, flags)) {
-			WARN(i915_vma_is_pinned(vma),
-			     "bo is already pinned in %s with incorrect alignment:"
-			     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
-			     " obj->map_and_fenceable=%d\n",
-			     ggtt_view ? "ggtt" : "ppgtt",
-			     upper_32_bits(vma->node.start),
-			     lower_32_bits(vma->node.start),
-			     alignment,
-			     !!(flags & PIN_MAPPABLE),
-			     obj->map_and_fenceable);
-			ret = i915_vma_unbind(vma);
-			if (ret)
-				return ret;
+	if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+		return -EBUSY;
 
-			vma = NULL;
-		}
-	}
+	/* Pin early to prevent the shrinker/eviction logic from destroying
+	 * our vma as we insert and bind.
+	 */
+	__i915_vma_pin(vma);
 
-	if (vma == NULL || !drm_mm_node_allocated(&vma->node)) {
-		vma = i915_gem_object_insert_into_vm(obj, vm, ggtt_view,
-						     size, alignment, flags);
-		if (IS_ERR(vma))
-			return PTR_ERR(vma);
+	if (!bound) {
+		ret = i915_vma_insert(vma, size, alignment, flags);
+		if (ret)
+			goto err;
 	}
 
-	bound = vma->bound;
-	ret = i915_vma_bind(vma, obj->cache_level, flags);
+	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
 	if (ret)
-		return ret;
+		goto err;
 
-	if (ggtt_view && ggtt_view->type == I915_GGTT_VIEW_NORMAL &&
-	    (bound ^ vma->bound) & GLOBAL_BIND) {
+	if ((bound ^ vma->bound) & GLOBAL_BIND)
 		__i915_vma_set_map_and_fenceable(vma);
-		WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
-	}
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
-
-	__i915_vma_pin(vma);
 	return 0;
-}
 
-int
-i915_gem_object_pin(struct drm_i915_gem_object *obj,
-		    struct i915_address_space *vm,
-		    u64 size,
-		    u64 alignment,
-		    u64 flags)
-{
-	return i915_gem_object_do_pin(obj, vm,
-				      i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL,
-				      size, alignment, flags);
+err:
+	__i915_vma_unpin(vma);
+	return ret;
 }
 
 int
@@ -3799,14 +3749,35 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 			 u64 alignment,
 			 u64 flags)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct i915_ggtt *ggtt = &dev_priv->ggtt;
+	struct i915_vma *vma;
+	int ret;
 
 	BUG_ON(!view);
 
-	return i915_gem_object_do_pin(obj, &ggtt->base, view,
-				      size, alignment, flags | PIN_GLOBAL);
+	vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	if (i915_vma_misplaced(vma, size, alignment, flags)) {
+		if (flags & PIN_NONBLOCK &&
+		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
+			return -ENOSPC;
+
+		WARN(i915_vma_is_pinned(vma),
+		     "bo is already pinned in ggtt with incorrect alignment:"
+		     " offset=%08x %08x, req.alignment=%llx, req.map_and_fenceable=%d,"
+		     " obj->map_and_fenceable=%d\n",
+		     upper_32_bits(vma->node.start),
+		     lower_32_bits(vma->node.start),
+		     alignment,
+		     !!(flags & PIN_MAPPABLE),
+		     obj->map_and_fenceable);
+		ret = i915_vma_unbind(vma);
+		if (ret)
+			return ret;
+	}
+
+	return i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
 }
 
 void
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 82ed80f68103..d95a0432678f 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -45,11 +45,10 @@
 struct i915_execbuffer_params {
 	struct drm_device               *dev;
 	struct drm_file                 *file;
-	u32				 dispatch_flags;
-	u32				 args_batch_start_offset;
-	u32				 batch_obj_vm_offset;
+	struct i915_vma			*batch;
+	u32				dispatch_flags;
+	u32				args_batch_start_offset;
 	struct intel_engine_cs          *engine;
-	struct drm_i915_gem_object      *batch_obj;
 	struct i915_gem_context         *ctx;
 	struct drm_i915_gem_request     *request;
 };
@@ -102,6 +101,26 @@ eb_reset(struct eb_vmas *eb)
 		memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
 }
 
+static struct i915_vma *
+eb_get_batch(struct eb_vmas *eb)
+{
+	struct i915_vma *vma = list_entry(eb->vmas.prev, typeof(*vma), exec_list);
+
+	/*
+	 * SNA is doing fancy tricks with compressing batch buffers, which leads
+	 * to negative relocation deltas. Usually that works out ok since the
+	 * relocate address is still positive, except when the batch is placed
+	 * very low in the GTT. Ensure this doesn't happen.
+	 *
+	 * Note that actual hangs have only been observed on gen7, but for
+	 * paranoia do it everywhere.
+	 */
+	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
+		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
+
+	return vma;
+}
+
 static int
 eb_lookup_vmas(struct eb_vmas *eb,
 	       struct drm_i915_gem_exec_object2 *exec,
@@ -198,35 +217,6 @@ err:
 	return ret;
 }
 
-static inline struct i915_vma *
-eb_get_batch_vma(struct eb_vmas *eb)
-{
-	/* The batch is always the LAST item in the VMA list */
-	struct i915_vma *vma = list_last_entry(&eb->vmas, typeof(*vma), exec_list);
-
-	return vma;
-}
-
-static struct drm_i915_gem_object *
-eb_get_batch(struct eb_vmas *eb)
-{
-	struct i915_vma *vma = eb_get_batch_vma(eb);
-
-	/*
-	 * SNA is doing fancy tricks with compressing batch buffers, which leads
-	 * to negative relocation deltas. Usually that works out ok since the
-	 * relocate address is still positive, except when the batch is placed
-	 * very low in the GTT. Ensure this doesn't happen.
-	 *
-	 * Note that actual hangs have only been observed on gen7, but for
-	 * paranoia do it everywhere.
-	 */
-	if ((vma->exec_entry->flags & EXEC_OBJECT_PINNED) == 0)
-		vma->exec_entry->flags |= __EXEC_OBJECT_NEEDS_BIAS;
-
-	return vma->obj;
-}
-
 static struct i915_vma *eb_get_vma(struct eb_vmas *eb, unsigned long handle)
 {
 	if (eb->and < 0) {
@@ -682,16 +672,16 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 			flags |= PIN_HIGH;
 	}
 
-	ret = i915_gem_object_pin(obj, vma->vm,
-				  entry->pad_to_size,
-				  entry->alignment,
-				  flags);
-	if ((ret == -ENOSPC  || ret == -E2BIG) &&
+	ret = i915_vma_pin(vma,
+			   entry->pad_to_size,
+			   entry->alignment,
+			   flags);
+	if ((ret == -ENOSPC || ret == -E2BIG) &&
 	    only_mappable_for_reloc(entry->flags))
-		ret = i915_gem_object_pin(obj, vma->vm,
-					  entry->pad_to_size,
-					  entry->alignment,
-					  flags & ~PIN_MAPPABLE);
+		ret = i915_vma_pin(vma,
+				   entry->pad_to_size,
+				   entry->alignment,
+				   flags & ~PIN_MAPPABLE);
 	if (ret)
 		return ret;
 
@@ -1252,11 +1242,11 @@ i915_reset_gen7_sol_offsets(struct drm_i915_gem_request *req)
 	return 0;
 }
 
-static struct drm_i915_gem_object*
+static struct i915_vma*
 i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 			  struct drm_i915_gem_exec_object2 *shadow_exec_entry,
-			  struct eb_vmas *eb,
 			  struct drm_i915_gem_object *batch_obj,
+			  struct eb_vmas *eb,
 			  u32 batch_start_offset,
 			  u32 batch_len,
 			  bool is_master)
@@ -1268,7 +1258,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 	shadow_batch_obj = i915_gem_batch_pool_get(&engine->batch_pool,
 						   PAGE_ALIGN(batch_len));
 	if (IS_ERR(shadow_batch_obj))
-		return shadow_batch_obj;
+		return ERR_CAST(shadow_batch_obj);
 
 	ret = intel_engine_cmd_parser(engine,
 				      batch_obj,
@@ -1293,14 +1283,12 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 	i915_gem_object_get(shadow_batch_obj);
 	list_add_tail(&vma->exec_list, &eb->vmas);
 
-	shadow_batch_obj->base.pending_read_domains = I915_GEM_DOMAIN_COMMAND;
-
-	return shadow_batch_obj;
+	return vma;
 
 err:
 	i915_gem_object_unpin_pages(shadow_batch_obj);
 	if (ret == -EACCES) /* unhandled chained batch */
-		return batch_obj;
+		return NULL;
 	else
 		return ERR_PTR(ret);
 }
@@ -1381,11 +1369,11 @@ execbuf_submit(struct i915_execbuffer_params *params,
 	}
 
 	exec_len   = args->batch_len;
-	exec_start = params->batch_obj_vm_offset +
+	exec_start = params->batch->node.start +
 		     params->args_batch_start_offset;
 
 	if (exec_len == 0)
-		exec_len = params->batch_obj->base.size;
+		exec_len = params->batch->size;
 
 	ret = params->engine->emit_bb_start(params->request,
 					    exec_start, exec_len,
@@ -1489,7 +1477,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct eb_vmas *eb;
-	struct drm_i915_gem_object *batch_obj;
 	struct drm_i915_gem_exec_object2 shadow_exec_entry;
 	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
@@ -1583,7 +1570,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		goto err;
 
 	/* take note of the batch buffer before we might reorder the lists */
-	batch_obj = eb_get_batch(eb);
+	params->batch = eb_get_batch(eb);
 
 	/* Move the objects en-masse into the GTT, evicting if necessary. */
 	need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
@@ -1607,7 +1594,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	}
 
 	/* Set the pending read domains for the batch buffer to COMMAND */
-	if (batch_obj->base.pending_write_domain) {
+	if (params->batch->obj->base.pending_write_domain) {
 		DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
 		ret = -EINVAL;
 		goto err;
@@ -1615,26 +1602,20 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 	params->args_batch_start_offset = args->batch_start_offset;
 	if (intel_engine_needs_cmd_parser(engine) && args->batch_len) {
-		struct drm_i915_gem_object *parsed_batch_obj;
-
-		parsed_batch_obj = i915_gem_execbuffer_parse(engine,
-							     &shadow_exec_entry,
-							     eb,
-							     batch_obj,
-							     args->batch_start_offset,
-							     args->batch_len,
-							     drm_is_current_master(file));
-		if (IS_ERR(parsed_batch_obj)) {
-			ret = PTR_ERR(parsed_batch_obj);
+		struct i915_vma *vma;
+
+		vma = i915_gem_execbuffer_parse(engine, &shadow_exec_entry,
+						params->batch->obj,
+						eb,
+						args->batch_start_offset,
+						args->batch_len,
+						drm_is_current_master(file));
+		if (IS_ERR(vma)) {
+			ret = PTR_ERR(vma);
 			goto err;
 		}
 
-		/*
-		 * parsed_batch_obj == batch_obj means batch not fully parsed:
-		 * Accept, but don't promote to secure.
-		 */
-
-		if (parsed_batch_obj != batch_obj) {
+		if (vma) {
 			/*
 			 * Batch parsed and accepted:
 			 *
@@ -1646,16 +1627,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 			 */
 			dispatch_flags |= I915_DISPATCH_SECURE;
 			params->args_batch_start_offset = 0;
-			batch_obj = parsed_batch_obj;
+			params->batch = vma;
 		}
 	}
 
-	batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+	params->batch->obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
 	/* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
 	 * batch" bit. Hence we need to pin secure batches into the global gtt.
 	 * hsw should have this fixed, but bdw mucks it up again. */
 	if (dispatch_flags & I915_DISPATCH_SECURE) {
+		struct drm_i915_gem_object *obj = params->batch->obj;
+
 		/*
 		 * So on first glance it looks freaky that we pin the batch here
 		 * outside of the reservation loop. But:
@@ -1666,13 +1649,12 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		 *   fitting due to fragmentation.
 		 * So this is actually safe.
 		 */
-		ret = i915_gem_obj_ggtt_pin(batch_obj, 0, 0);
+		ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
 		if (ret)
 			goto err;
 
-		params->batch_obj_vm_offset = i915_gem_obj_ggtt_offset(batch_obj);
-	} else
-		params->batch_obj_vm_offset = i915_gem_obj_offset(batch_obj, vm);
+		params->batch = i915_gem_obj_to_ggtt(obj);
+	}
 
 	/* Allocate a request for this batch buffer nice and early. */
 	params->request = i915_gem_request_alloc(engine, ctx);
@@ -1695,12 +1677,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 	params->file                    = file;
 	params->engine                    = engine;
 	params->dispatch_flags          = dispatch_flags;
-	params->batch_obj               = batch_obj;
 	params->ctx                     = ctx;
 
 	ret = execbuf_submit(params, args, &eb->vmas);
 err_request:
-	__i915_add_request(params->request, params->batch_obj, ret == 0);
+	__i915_add_request(params->request, params->batch->obj, ret == 0);
 
 err_batch_unpin:
 	/*
@@ -1710,8 +1691,7 @@ err_batch_unpin:
 	 * active.
 	 */
 	if (dispatch_flags & I915_DISPATCH_SECURE)
-		i915_gem_object_ggtt_unpin(batch_obj);
-
+		i915_vma_unpin(params->batch);
 err:
 	/* the request owns the ref now */
 	i915_gem_context_put(ctx);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 088b89b3ea48..84b786cf490f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3665,13 +3665,10 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		return 0;
 
 	if (vma->bound == 0 && vma->vm->allocate_va_range) {
-		/* XXX: i915_vma_pin() will fix this +- hack */
-		__i915_vma_pin(vma);
 		trace_i915_va_alloc(vma);
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
 						 vma->node.size);
-		__i915_vma_unpin(vma);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index eee22fcf5826..c63cc1b4c437 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -609,6 +609,20 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
 	return true;
 }
 
+int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
+/* Flags used by pin/bind&friends. */
+#define PIN_MAPPABLE		BIT(0)
+#define PIN_NONBLOCK		BIT(1)
+#define PIN_GLOBAL		BIT(2)
+#define PIN_OFFSET_BIAS		BIT(3)
+#define PIN_USER		BIT(4)
+#define PIN_UPDATE		BIT(5)
+#define PIN_ZONE_4G		BIT(6)
+#define PIN_HIGH		BIT(7)
+#define PIN_OFFSET_FIXED	BIT(8)
+#define PIN_OFFSET_MASK		(~4095)
+
 static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
 	return vma->pin_count;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 16/26] drm/i915: Combine all i915_vma bitfields into a single set of flags
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (13 preceding siblings ...)
  2016-08-04 15:32 ` [CI 15/26] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 17/26] drm/i915: Make i915_vma_pin() small and inline Chris Wilson
                   ` (10 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

In preparation to perform some magic to speed up i915_vma_pin(), which
is among the hottest of hot paths in execbuf, refactor all the bitfields
accessed by i915_vma_pin() into a single unified set of flags.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  8 ++---
 drivers/gpu/drm/i915/i915_gem.c            | 40 ++++++++++++----------
 drivers/gpu/drm/i915/i915_gem_context.c    |  2 +-
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  5 +--
 drivers/gpu/drm/i915/i915_gem_gtt.c        | 46 ++++++++++++-------------
 drivers/gpu/drm/i915/i915_gem_gtt.h        | 55 ++++++++++++++++++------------
 drivers/gpu/drm/i915/i915_gem_shrinker.c   |  2 +-
 drivers/gpu/drm/i915/i915_gem_stolen.c     |  2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      |  4 +--
 9 files changed, 90 insertions(+), 74 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4401a2ab9a74..8d47d1bf7b85 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -125,7 +125,7 @@ static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link) {
-		if (vma->is_ggtt && drm_mm_node_allocated(&vma->node))
+		if (i915_vma_is_ggtt(vma) && drm_mm_node_allocated(&vma->node))
 			size += vma->node.size;
 	}
 
@@ -181,9 +181,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 			continue;
 
 		seq_printf(m, " (%sgtt offset: %08llx, size: %08llx",
-			   vma->is_ggtt ? "g" : "pp",
+			   i915_vma_is_ggtt(vma) ? "g" : "pp",
 			   vma->node.start, vma->node.size);
-		if (vma->is_ggtt)
+		if (i915_vma_is_ggtt(vma))
 			seq_printf(m, ", type: %u", vma->ggtt_view.type);
 		seq_puts(m, ")");
 	}
@@ -356,7 +356,7 @@ static int per_file_stats(int id, void *ptr, void *data)
 		if (!drm_mm_node_allocated(&vma->node))
 			continue;
 
-		if (vma->is_ggtt) {
+		if (i915_vma_is_ggtt(vma)) {
 			stats->global += vma->node.size;
 		} else {
 			struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vma->vm);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 325a546ff88e..c47ffe151c82 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2861,7 +2861,8 @@ int i915_vma_unbind(struct i915_vma *vma)
 	GEM_BUG_ON(obj->bind_count == 0);
 	GEM_BUG_ON(!obj->pages);
 
-	if (vma->is_ggtt && vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
+	if (i915_vma_is_ggtt(vma) &&
+	    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 		i915_gem_object_finish_gtt(obj);
 
 		/* release the fence reg _after_ flushing */
@@ -2876,12 +2877,12 @@ int i915_vma_unbind(struct i915_vma *vma)
 		trace_i915_vma_unbind(vma);
 		vma->vm->unbind_vma(vma);
 	}
-	vma->bound = 0;
+	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 
 	drm_mm_remove_node(&vma->node);
 	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);
 
-	if (vma->is_ggtt) {
+	if (i915_vma_is_ggtt(vma)) {
 		if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) {
 			obj->map_and_fenceable = false;
 		} else if (vma->ggtt_view.pages) {
@@ -2904,7 +2905,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 	i915_gem_object_unpin_pages(obj);
 
 destroy:
-	if (unlikely(vma->closed))
+	if (unlikely(i915_vma_is_closed(vma)))
 		i915_vma_destroy(vma);
 
 	return 0;
@@ -2985,7 +2986,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	u64 min_alignment;
 	int ret;
 
-	GEM_BUG_ON(vma->bound);
+	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
 	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
 
 	size = max(size, vma->size);
@@ -3707,13 +3708,14 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 int
 i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 {
-	unsigned int bound = vma->bound;
+	unsigned int bound;
 	int ret;
 
 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
-	GEM_BUG_ON((flags & PIN_GLOBAL) && !vma->is_ggtt);
+	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
-	if (WARN_ON(i915_vma_pin_count(vma) == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+	bound = vma->flags;
+	if (WARN_ON((bound & I915_VMA_PIN_MASK) == I915_VMA_PIN_MASK))
 		return -EBUSY;
 
 	/* Pin early to prevent the shrinker/eviction logic from destroying
@@ -3721,7 +3723,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	 */
 	__i915_vma_pin(vma);
 
-	if (!bound) {
+	if ((bound & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)) == 0) {
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
 			goto err;
@@ -3731,7 +3733,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
 	if (ret)
 		goto err;
 
-	if ((bound ^ vma->bound) & GLOBAL_BIND)
+	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
 		__i915_vma_set_map_and_fenceable(vma);
 
 	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
@@ -4032,9 +4034,9 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	 * unbound now.
 	 */
 	list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link) {
-		GEM_BUG_ON(!vma->is_ggtt);
+		GEM_BUG_ON(!i915_vma_is_ggtt(vma));
 		GEM_BUG_ON(i915_vma_is_active(vma));
-		vma->pin_count = 0;
+		vma->flags &= ~I915_VMA_PIN_MASK;
 		i915_vma_close(vma);
 	}
 	GEM_BUG_ON(obj->bind_count);
@@ -4094,7 +4096,8 @@ struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
 	GEM_BUG_ON(!view);
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
+		if (i915_vma_is_ggtt(vma) &&
+		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma;
 	return NULL;
 }
@@ -4583,7 +4586,7 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
 	WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
 
 	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
+		if (i915_vma_is_ggtt(vma) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
 		if (vma->vm == vm)
@@ -4601,7 +4604,8 @@ u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
+		if (i915_vma_is_ggtt(vma) &&
+		    i915_ggtt_view_equal(&vma->ggtt_view, view))
 			return vma->node.start;
 
 	WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -4614,7 +4618,7 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
+		if (i915_vma_is_ggtt(vma) &&
 		    vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
 			continue;
 		if (vma->vm == vm && drm_mm_node_allocated(&vma->node))
@@ -4630,7 +4634,7 @@ bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &o->vma_list, obj_link)
-		if (vma->is_ggtt &&
+		if (i915_vma_is_ggtt(vma) &&
 		    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
 		    drm_mm_node_allocated(&vma->node))
 			return true;
@@ -4645,7 +4649,7 @@ unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
 	GEM_BUG_ON(list_empty(&o->vma_list));
 
 	list_for_each_entry(vma, &o->vma_list, obj_link) {
-		if (vma->is_ggtt &&
+		if (i915_vma_is_ggtt(vma) &&
 		    vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
 			return vma->node.size;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index eff6d3953ecd..dc7c0ae73b62 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -219,7 +219,7 @@ static void i915_ppgtt_close(struct i915_address_space *vm)
 		struct i915_vma *vma, *vn;
 
 		list_for_each_entry_safe(vma, vn, *phase, vm_link)
-			if (!vma->closed)
+			if (!i915_vma_is_closed(vma))
 				i915_vma_close(vma);
 	}
 }
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index d95a0432678f..a0c95ab15d57 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -717,7 +717,7 @@ need_reloc_mappable(struct i915_vma *vma)
 	if (entry->relocation_count == 0)
 		return false;
 
-	if (!vma->is_ggtt)
+	if (!i915_vma_is_ggtt(vma))
 		return false;
 
 	/* See also use_cpu_reloc() */
@@ -736,7 +736,8 @@ eb_vma_misplaced(struct i915_vma *vma)
 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	struct drm_i915_gem_object *obj = vma->obj;
 
-	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP && !vma->is_ggtt);
+	WARN_ON(entry->flags & __EXEC_OBJECT_NEEDS_MAP &&
+		!i915_vma_is_ggtt(vma));
 
 	if (entry->alignment &&
 	    vma->node.start & (entry->alignment - 1))
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 84b786cf490f..af012b1cd1ec 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2652,7 +2652,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
 	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
 	 * upgrade to both bound if we bind either to avoid double-binding.
 	 */
-	vma->bound |= GLOBAL_BIND | LOCAL_BIND;
+	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
 
 	return 0;
 }
@@ -2674,14 +2674,14 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
 		pte_flags |= PTE_READ_ONLY;
 
 
-	if (flags & GLOBAL_BIND) {
+	if (flags & I915_VMA_GLOBAL_BIND) {
 		vma->vm->insert_entries(vma->vm,
 					vma->ggtt_view.pages,
 					vma->node.start,
 					cache_level, pte_flags);
 	}
 
-	if (flags & LOCAL_BIND) {
+	if (flags & I915_VMA_LOCAL_BIND) {
 		struct i915_hw_ppgtt *appgtt =
 			to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
 		appgtt->base.insert_entries(&appgtt->base,
@@ -2698,12 +2698,12 @@ static void ggtt_unbind_vma(struct i915_vma *vma)
 	struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
 	const u64 size = min(vma->size, vma->node.size);
 
-	if (vma->bound & GLOBAL_BIND)
+	if (vma->flags & I915_VMA_GLOBAL_BIND)
 		vma->vm->clear_range(vma->vm,
 				     vma->node.start, size,
 				     true);
 
-	if (vma->bound & LOCAL_BIND && appgtt)
+	if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
 		appgtt->base.clear_range(&appgtt->base,
 					 vma->node.start, size,
 					 true);
@@ -3334,7 +3334,7 @@ i915_vma_retire(struct i915_gem_active *active,
 		return;
 
 	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
-	if (unlikely(vma->closed && !i915_vma_is_pinned(vma)))
+	if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
 		WARN_ON(i915_vma_unbind(vma));
 }
 
@@ -3342,10 +3342,10 @@ void i915_vma_destroy(struct i915_vma *vma)
 {
 	GEM_BUG_ON(vma->node.allocated);
 	GEM_BUG_ON(i915_vma_is_active(vma));
-	GEM_BUG_ON(!vma->closed);
+	GEM_BUG_ON(!i915_vma_is_closed(vma));
 
 	list_del(&vma->vm_link);
-	if (!vma->is_ggtt)
+	if (!i915_vma_is_ggtt(vma))
 		i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
 
 	kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
@@ -3353,8 +3353,8 @@ void i915_vma_destroy(struct i915_vma *vma)
 
 void i915_vma_close(struct i915_vma *vma)
 {
-	GEM_BUG_ON(vma->closed);
-	vma->closed = true;
+	GEM_BUG_ON(i915_vma_is_closed(vma));
+	vma->flags |= I915_VMA_CLOSED;
 
 	list_del_init(&vma->obj_link);
 	if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
@@ -3386,9 +3386,9 @@ __i915_gem_vma_create(struct drm_i915_gem_object *obj,
 	vma->vm = vm;
 	vma->obj = obj;
 	vma->size = obj->base.size;
-	vma->is_ggtt = i915_is_ggtt(vm);
 
 	if (i915_is_ggtt(vm)) {
+		vma->flags |= I915_VMA_GGTT;
 		vma->ggtt_view = *view;
 		if (view->type == I915_GGTT_VIEW_PARTIAL) {
 			vma->size = view->params.partial.size;
@@ -3433,7 +3433,7 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 	if (!vma)
 		vma = __i915_gem_vma_create(obj, &ggtt->base, view);
 
-	GEM_BUG_ON(vma->closed);
+	GEM_BUG_ON(i915_vma_is_closed(vma));
 	return vma;
 
 }
@@ -3644,27 +3644,28 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 		  u32 flags)
 {
-	int ret;
 	u32 bind_flags;
+	u32 vma_flags;
+	int ret;
 
 	if (WARN_ON(flags == 0))
 		return -EINVAL;
 
 	bind_flags = 0;
 	if (flags & PIN_GLOBAL)
-		bind_flags |= GLOBAL_BIND;
+		bind_flags |= I915_VMA_GLOBAL_BIND;
 	if (flags & PIN_USER)
-		bind_flags |= LOCAL_BIND;
+		bind_flags |= I915_VMA_LOCAL_BIND;
 
+	vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
 	if (flags & PIN_UPDATE)
-		bind_flags |= vma->bound;
+		bind_flags |= vma_flags;
 	else
-		bind_flags &= ~vma->bound;
-
+		bind_flags &= ~vma_flags;
 	if (bind_flags == 0)
 		return 0;
 
-	if (vma->bound == 0 && vma->vm->allocate_va_range) {
+	if (vma_flags == 0 && vma->vm->allocate_va_range) {
 		trace_i915_va_alloc(vma);
 		ret = vma->vm->allocate_va_range(vma->vm,
 						 vma->node.start,
@@ -3677,8 +3678,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
 	if (ret)
 		return ret;
 
-	vma->bound |= bind_flags;
-
+	vma->flags |= bind_flags;
 	return 0;
 }
 
@@ -3690,8 +3690,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 	if (WARN_ON(!vma->obj->map_and_fenceable))
 		return IO_ERR_PTR(-ENODEV);
 
-	GEM_BUG_ON(!vma->is_ggtt);
-	GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);
+	GEM_BUG_ON(!i915_vma_is_ggtt(vma));
+	GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
 
 	ptr = vma->iomap;
 	if (ptr == NULL) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index c63cc1b4c437..5134c0433abd 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -182,15 +182,28 @@ struct i915_vma {
 	void __iomem *iomap;
 	u64 size;
 
-	unsigned int active;
-	struct i915_gem_active last_read[I915_NUM_ENGINES];
+	unsigned int flags;
+	/**
+	 * How many users have pinned this object in GTT space. The following
+	 * users can each hold at most one reference: pwrite/pread, execbuffer
+	 * (objects are not allowed multiple times for the same batchbuffer),
+	 * and the framebuffer code. When switching/pageflipping, the
+	 * framebuffer code has at most two buffers pinned per crtc.
+	 *
+	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+	 * bits with absolutely no headroom. So use 4 bits.
+	 */
+#define I915_VMA_PIN_MASK 0xf
 
 	/** Flags and address space this VMA is bound to */
-#define GLOBAL_BIND	(1<<0)
-#define LOCAL_BIND	(1<<1)
-	unsigned int bound : 4;
-	bool is_ggtt : 1;
-	bool closed : 1;
+#define I915_VMA_GLOBAL_BIND	BIT(5)
+#define I915_VMA_LOCAL_BIND	BIT(6)
+
+#define I915_VMA_GGTT	BIT(7)
+#define I915_VMA_CLOSED BIT(8)
+
+	unsigned int active;
+	struct i915_gem_active last_read[I915_NUM_ENGINES];
 
 	/**
 	 * Support different GGTT views into the same object.
@@ -215,20 +228,18 @@ struct i915_vma {
 	struct hlist_node exec_node;
 	unsigned long exec_handle;
 	struct drm_i915_gem_exec_object2 *exec_entry;
-
-	/**
-	 * How many users have pinned this object in GTT space. The following
-	 * users can each hold at most one reference: pwrite/pread, execbuffer
-	 * (objects are not allowed multiple times for the same batchbuffer),
-	 * and the framebuffer code. When switching/pageflipping, the
-	 * framebuffer code has at most two buffers pinned per crtc.
-	 *
-	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-	 * bits with absolutely no headroom. So use 4 bits. */
-	unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
+static inline bool i915_vma_is_ggtt(const struct i915_vma *vma)
+{
+	return vma->flags & I915_VMA_GGTT;
+}
+
+static inline bool i915_vma_is_closed(const struct i915_vma *vma)
+{
+	return vma->flags & I915_VMA_CLOSED;
+}
+
 static inline unsigned int i915_vma_get_active(const struct i915_vma *vma)
 {
 	return vma->active;
@@ -625,7 +636,7 @@ i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
 
 static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
-	return vma->pin_count;
+	return vma->flags & I915_VMA_PIN_MASK;
 }
 
 static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
@@ -635,14 +646,14 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
 
 static inline void __i915_vma_pin(struct i915_vma *vma)
 {
-	vma->pin_count++;
+	vma->flags++;
 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
 }
 
 static inline void __i915_vma_unpin(struct i915_vma *vma)
 {
 	GEM_BUG_ON(!i915_vma_is_pinned(vma));
-	vma->pin_count--;
+	vma->flags--;
 }
 
 static inline void i915_vma_unpin(struct i915_vma *vma)
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index 64d179d4f684..b5776358c05e 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -53,7 +53,7 @@ static bool any_vma_pinned(struct drm_i915_gem_object *obj)
 	struct i915_vma *vma;
 
 	list_for_each_entry(vma, &obj->vma_list, obj_link)
-		if (vma->pin_count)
+		if (i915_vma_is_pinned(vma))
 			return true;
 
 	return false;
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index bc91ffe614e2..13279610eeec 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -705,7 +705,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
 		goto err;
 	}
 
-	vma->bound |= GLOBAL_BIND;
+	vma->flags |= I915_VMA_GLOBAL_BIND;
 	__i915_vma_set_map_and_fenceable(vma);
 	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
 	obj->bind_count++;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d94eb907a23a..cc28ad429dd8 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -669,14 +669,14 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
 	if (i915_is_ggtt(vm))
 		vma = i915_gem_obj_to_ggtt(src);
 	use_ggtt = (src->cache_level == I915_CACHE_NONE &&
-		   vma && (vma->bound & GLOBAL_BIND) &&
+		   vma && (vma->flags & I915_VMA_GLOBAL_BIND) &&
 		   reloc_offset + num_pages * PAGE_SIZE <= ggtt->mappable_end);
 
 	/* Cannot access stolen address directly, try to use the aperture */
 	if (src->stolen) {
 		use_ggtt = true;
 
-		if (!(vma && vma->bound & GLOBAL_BIND))
+		if (!(vma && vma->flags & I915_VMA_GLOBAL_BIND))
 			goto unwind;
 
 		reloc_offset = i915_gem_obj_ggtt_offset(src);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 17/26] drm/i915: Make i915_vma_pin() small and inline
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (14 preceding siblings ...)
  2016-08-04 15:32 ` [CI 16/26] drm/i915: Combine all i915_vma bitfields into a single set of flags Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 18/26] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin() Chris Wilson
                   ` (9 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Not only is i915_vma_pin() called for every single object on every single
execbuf, it is usually a simple increment as the VMA is already bound for
execution by the GPU. Rearrange the tests for unbound and pin_count
overflow so that we can do the increment and test very cheaply and
compact enough to inline the operation into execbuf. The trick used is
to note that we can check for an overflow bit (keeping space available
for it inside the flags) at the same time as checking the binding bits.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c     | 18 +++++--------
 drivers/gpu/drm/i915/i915_gem_gtt.h | 53 ++++++++++++++++++++++++++-----------
 2 files changed, 44 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index c47ffe151c82..6876af6914f9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3705,23 +3705,19 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	obj->map_and_fenceable = mappable && fenceable;
 }
 
-int
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+int __i915_vma_do_pin(struct i915_vma *vma,
+		      u64 size, u64 alignment, u64 flags)
 {
-	unsigned int bound;
+	unsigned int bound = vma->flags;
 	int ret;
 
 	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
 	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
 
-	bound = vma->flags;
-	if (WARN_ON((bound & I915_VMA_PIN_MASK) == I915_VMA_PIN_MASK))
-		return -EBUSY;
-
-	/* Pin early to prevent the shrinker/eviction logic from destroying
-	 * our vma as we insert and bind.
-	 */
-	__i915_vma_pin(vma);
+	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
+		ret = -EBUSY;
+		goto err;
+	}
 
 	if ((bound & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)) == 0) {
 		ret = i915_vma_insert(vma, size, alignment, flags);
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index 5134c0433abd..cc56206a1600 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -194,13 +194,15 @@ struct i915_vma {
 	 * bits with absolutely no headroom. So use 4 bits.
 	 */
 #define I915_VMA_PIN_MASK 0xf
+#define I915_VMA_PIN_OVERFLOW	BIT(5)
 
 	/** Flags and address space this VMA is bound to */
-#define I915_VMA_GLOBAL_BIND	BIT(5)
-#define I915_VMA_LOCAL_BIND	BIT(6)
+#define I915_VMA_GLOBAL_BIND	BIT(6)
+#define I915_VMA_LOCAL_BIND	BIT(7)
+#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW)
 
-#define I915_VMA_GGTT	BIT(7)
-#define I915_VMA_CLOSED BIT(8)
+#define I915_VMA_GGTT	BIT(8)
+#define I915_VMA_CLOSED BIT(9)
 
 	unsigned int active;
 	struct i915_gem_active last_read[I915_NUM_ENGINES];
@@ -620,20 +622,39 @@ i915_ggtt_view_equal(const struct i915_ggtt_view *a,
 	return true;
 }
 
-int __must_check
-i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags);
 /* Flags used by pin/bind&friends. */
-#define PIN_MAPPABLE		BIT(0)
-#define PIN_NONBLOCK		BIT(1)
-#define PIN_GLOBAL		BIT(2)
-#define PIN_OFFSET_BIAS		BIT(3)
-#define PIN_USER		BIT(4)
-#define PIN_UPDATE		BIT(5)
-#define PIN_ZONE_4G		BIT(6)
-#define PIN_HIGH		BIT(7)
-#define PIN_OFFSET_FIXED	BIT(8)
+#define PIN_NONBLOCK		BIT(0)
+#define PIN_MAPPABLE		BIT(1)
+#define PIN_ZONE_4G		BIT(2)
+
+#define PIN_MBZ			BIT(5) /* I915_VMA_PIN_OVERFLOW */
+#define PIN_GLOBAL		BIT(6) /* I915_VMA_GLOBAL_BIND */
+#define PIN_USER		BIT(7) /* I915_VMA_LOCAL_BIND */
+#define PIN_UPDATE		BIT(8)
+
+#define PIN_HIGH		BIT(9)
+#define PIN_OFFSET_BIAS		BIT(10)
+#define PIN_OFFSET_FIXED	BIT(11)
 #define PIN_OFFSET_MASK		(~4095)
 
+int __i915_vma_do_pin(struct i915_vma *vma,
+		      u64 size, u64 alignment, u64 flags);
+static inline int __must_check
+i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
+{
+	BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW);
+	BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND);
+	BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND);
+
+	/* Pin early to prevent the shrinker/eviction logic from destroying
+	 * our vma as we insert and bind.
+	 */
+	if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0))
+		return 0;
+
+	return __i915_vma_do_pin(vma, size, alignment, flags);
+}
+
 static inline int i915_vma_pin_count(const struct i915_vma *vma)
 {
 	return vma->flags & I915_VMA_PIN_MASK;
@@ -647,7 +668,7 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma)
 static inline void __i915_vma_pin(struct i915_vma *vma)
 {
 	vma->flags++;
-	GEM_BUG_ON(!i915_vma_is_pinned(vma));
+	GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW);
 }
 
 static inline void __i915_vma_unpin(struct i915_vma *vma)
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 18/26] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin()
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (15 preceding siblings ...)
  2016-08-04 15:32 ` [CI 17/26] drm/i915: Make i915_vma_pin() small and inline Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 19/26] drm/i915: Separate intel_frontbuffer into its own header Chris Wilson
                   ` (8 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Since i915_gem_obj_ggtt_pin() is an idiom breaking curry function for
i915_gem_object_ggtt_pin(), spare us the confusion and remove it.
Removing it now simplifies later patches to change the i915_vma_pin()
(and friends) interface.

v2: Add a redundant GEM_BUG_ON(!view) to
i915_gem_obj_lookup_or_create_ggtt_vma()

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h              |  9 ---------
 drivers/gpu/drm/i915/i915_gem.c              | 17 +++++++----------
 drivers/gpu/drm/i915/i915_gem_context.c      |  5 ++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c   |  4 ++--
 drivers/gpu/drm/i915/i915_gem_gtt.c          |  2 ++
 drivers/gpu/drm/i915/i915_gem_render_state.c |  2 +-
 drivers/gpu/drm/i915/i915_guc_submission.c   |  4 ++--
 drivers/gpu/drm/i915/intel_guc_loader.c      |  2 +-
 drivers/gpu/drm/i915/intel_lrc.c             |  8 +++++---
 drivers/gpu/drm/i915/intel_overlay.c         |  3 ++-
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 15 ++++++++-------
 11 files changed, 32 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index cda8238c952f..7ba99057c317 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3289,15 +3289,6 @@ static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
 unsigned long
 i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
 
-static inline int __must_check
-i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
-		      uint32_t alignment,
-		      unsigned flags)
-{
-	return i915_gem_object_ggtt_pin(obj, &i915_ggtt_view_normal,
-					0, alignment, flags);
-}
-
 void i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 				     const struct i915_ggtt_view *view);
 static inline void
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6876af6914f9..7dcc71fde4a5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -652,7 +652,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
 	uint64_t offset;
 	int ret;
 
-	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
 	if (ret) {
 		ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
 		if (ret)
@@ -949,7 +949,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
 	if (obj->tiling_mode != I915_TILING_NONE)
 		return -EFAULT;
 
-	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
+	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+				       PIN_MAPPABLE | PIN_NONBLOCK);
 	if (ret) {
 		ret = insert_mappable_node(i915, &node, PAGE_SIZE);
 		if (ret)
@@ -3719,7 +3720,7 @@ int __i915_vma_do_pin(struct i915_vma *vma,
 		goto err;
 	}
 
-	if ((bound & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)) == 0) {
+	if ((bound & I915_VMA_BIND_MASK) == 0) {
 		ret = i915_vma_insert(vma, size, alignment, flags);
 		if (ret)
 			goto err;
@@ -3750,7 +3751,8 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	BUG_ON(!view);
+	if (!view)
+		view = &i915_ggtt_view_normal;
 
 	vma = i915_gem_obj_lookup_or_create_ggtt_vma(obj, view);
 	if (IS_ERR(vma))
@@ -3782,12 +3784,7 @@ void
 i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
 				const struct i915_ggtt_view *view)
 {
-	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
-
-	WARN_ON(!i915_vma_is_pinned(vma));
-	WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
-
-	__i915_vma_unpin(vma);
+	i915_vma_unpin(i915_gem_obj_to_ggtt_view(obj, view));
 }
 
 int
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index dc7c0ae73b62..bb72af5320b0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -763,9 +763,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
-	ret = i915_gem_obj_ggtt_pin(to->engine[RCS].state,
-				    to->ggtt_alignment,
-				    0);
+	ret = i915_gem_object_ggtt_pin(to->engine[RCS].state, NULL, 0,
+				       to->ggtt_alignment, 0);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a0c95ab15d57..109bb9d8df4a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1270,7 +1270,7 @@ i915_gem_execbuffer_parse(struct intel_engine_cs *engine,
 	if (ret)
 		goto err;
 
-	ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 0, 0);
+	ret = i915_gem_object_ggtt_pin(shadow_batch_obj, NULL, 0, 0, 0);
 	if (ret)
 		goto err;
 
@@ -1650,7 +1650,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 		 *   fitting due to fragmentation.
 		 * So this is actually safe.
 		 */
-		ret = i915_gem_obj_ggtt_pin(obj, 0, 0);
+		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
 		if (ret)
 			goto err;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index af012b1cd1ec..db97155074d3 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -3430,6 +3430,8 @@ i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
 	struct i915_ggtt *ggtt = &dev_priv->ggtt;
 	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
 
+	GEM_BUG_ON(!view);
+
 	if (!vma)
 		vma = __i915_gem_vma_create(obj, &ggtt->base, view);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 90236672ac1e..57fd767a2d79 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -191,7 +191,7 @@ int i915_gem_render_state_init(struct drm_i915_gem_request *req)
 	if (IS_ERR(so.obj))
 		return PTR_ERR(so.obj);
 
-	ret = i915_gem_obj_ggtt_pin(so.obj, 4096, 0);
+	ret = i915_gem_object_ggtt_pin(so.obj, NULL, 0, 0, 0);
 	if (ret)
 		goto err_obj;
 
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index c9105f68cc4e..03a5cef353eb 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -635,8 +635,8 @@ gem_allocate_guc_obj(struct drm_i915_private *dev_priv, u32 size)
 		return NULL;
 	}
 
-	if (i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-			PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
+	if (i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
+				     PIN_OFFSET_BIAS | GUC_WOPCM_TOP)) {
 		i915_gem_object_put(obj);
 		return NULL;
 	}
diff --git a/drivers/gpu/drm/i915/intel_guc_loader.c b/drivers/gpu/drm/i915/intel_guc_loader.c
index b883efd35e3f..3763e30cc165 100644
--- a/drivers/gpu/drm/i915/intel_guc_loader.c
+++ b/drivers/gpu/drm/i915/intel_guc_loader.c
@@ -323,7 +323,7 @@ static int guc_ucode_xfer(struct drm_i915_private *dev_priv)
 		return ret;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(guc_fw->guc_fw_obj, 0, 0);
+	ret = i915_gem_object_ggtt_pin(guc_fw->guc_fw_obj, NULL, 0, 0, 0);
 	if (ret) {
 		DRM_DEBUG_DRIVER("pin failed %d\n", ret);
 		return ret;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 64ce198005da..a07da548ff49 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -799,8 +799,9 @@ static int intel_lr_context_pin(struct i915_gem_context *ctx,
 	if (ce->pin_count++)
 		return 0;
 
-	ret = i915_gem_obj_ggtt_pin(ce->state, GEN8_LR_CONTEXT_ALIGN,
-				    PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
+	ret = i915_gem_object_ggtt_pin(ce->state, NULL,
+				       0, GEN8_LR_CONTEXT_ALIGN,
+				       PIN_OFFSET_BIAS | GUC_WOPCM_TOP);
 	if (ret)
 		goto err;
 
@@ -1205,7 +1206,8 @@ static int lrc_setup_wa_ctx_obj(struct intel_engine_cs *engine, u32 size)
 		return ret;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(engine->wa_ctx.obj, PAGE_SIZE, 0);
+	ret = i915_gem_object_ggtt_pin(engine->wa_ctx.obj, NULL,
+				       0, PAGE_SIZE, 0);
 	if (ret) {
 		DRM_DEBUG_DRIVER("pin LRC WA ctx backing obj failed: %d\n",
 				 ret);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2c598d63c794..217fefc49bf9 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1401,7 +1401,8 @@ void intel_setup_overlay(struct drm_i915_private *dev_priv)
 		}
 		overlay->flip_addr = reg_bo->phys_handle->busaddr;
 	} else {
-		ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
+		ret = i915_gem_object_ggtt_pin(reg_bo, NULL,
+					       0, PAGE_SIZE, PIN_MAPPABLE);
 		if (ret) {
 			DRM_ERROR("failed to pin overlay register bo\n");
 			goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ac2e610ab37f..a862234ccf18 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -639,7 +639,7 @@ int intel_init_pipe_control(struct intel_engine_cs *engine, int size)
 		goto err;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_HIGH);
+	ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, PIN_HIGH);
 	if (ret)
 		goto err_unref;
 
@@ -1896,7 +1896,7 @@ static int init_status_page(struct intel_engine_cs *engine)
 			 * actualy map it).
 			 */
 			flags |= PIN_MAPPABLE;
-		ret = i915_gem_obj_ggtt_pin(obj, 4096, flags);
+		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 4096, flags);
 		if (ret) {
 err_unref:
 			i915_gem_object_put(obj);
@@ -1943,7 +1943,7 @@ int intel_ring_pin(struct intel_ring *ring)
 	int ret;
 
 	if (HAS_LLC(dev_priv) && !obj->stolen) {
-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, flags);
+		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE, flags);
 		if (ret)
 			return ret;
 
@@ -1957,8 +1957,8 @@ int intel_ring_pin(struct intel_ring *ring)
 			goto err_unpin;
 		}
 	} else {
-		ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE,
-					    flags | PIN_MAPPABLE);
+		ret = i915_gem_object_ggtt_pin(obj, NULL, 0, PAGE_SIZE,
+					       flags | PIN_MAPPABLE);
 		if (ret)
 			return ret;
 
@@ -2092,7 +2092,8 @@ static int intel_ring_context_pin(struct i915_gem_context *ctx,
 		return 0;
 
 	if (ce->state) {
-		ret = i915_gem_obj_ggtt_pin(ce->state, ctx->ggtt_alignment, 0);
+		ret = i915_gem_object_ggtt_pin(ce->state, NULL, 0,
+					       ctx->ggtt_alignment, 0);
 		if (ret)
 			goto error;
 	}
@@ -2649,7 +2650,7 @@ static void intel_ring_init_semaphores(struct drm_i915_private *dev_priv,
 			i915.semaphores = 0;
 		} else {
 			i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
-			ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_NONBLOCK);
+			ret = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
 			if (ret != 0) {
 				i915_gem_object_put(obj);
 				DRM_ERROR("Failed to pin semaphore bo. Disabling semaphores\n");
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 19/26] drm/i915: Separate intel_frontbuffer into its own header
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (16 preceding siblings ...)
  2016-08-04 15:32 ` [CI 18/26] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin() Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 20/26] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
                   ` (7 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

In view of adding inline functions into the intel_frontbuffer section,
we first split the header into its own file so that we can integrate it
more easily with kerneldoc.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 Documentation/gpu/i915.rst                 |  3 +++
 drivers/gpu/drm/i915/i915_gem.c            |  1 +
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  1 +
 drivers/gpu/drm/i915/intel_display.c       |  1 +
 drivers/gpu/drm/i915/intel_drv.h           | 11 --------
 drivers/gpu/drm/i915/intel_fbdev.c         |  1 +
 drivers/gpu/drm/i915/intel_frontbuffer.c   |  1 +
 drivers/gpu/drm/i915/intel_frontbuffer.h   | 42 ++++++++++++++++++++++++++++++
 drivers/gpu/drm/i915/intel_overlay.c       |  1 +
 drivers/gpu/drm/i915/intel_sprite.c        |  1 +
 10 files changed, 52 insertions(+), 11 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/intel_frontbuffer.h

diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index 2fe5952e90f1..87aaffc22920 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -70,6 +70,9 @@ Frontbuffer Tracking
 .. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
    :doc: frontbuffer tracking
 
+.. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.h
+   :internal:
+
 .. kernel-doc:: drivers/gpu/drm/i915/intel_frontbuffer.c
    :internal:
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7dcc71fde4a5..7396c99aa60a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -33,6 +33,7 @@
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include "intel_mocs.h"
 #include <linux/reservation.h>
 #include <linux/shmem_fs.h>
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 109bb9d8df4a..e8e194fa2e65 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -31,6 +31,7 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <linux/dma_remapping.h>
 #include <linux/uaccess.h>
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 483ddfe957f4..229b1c471df3 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -34,6 +34,7 @@
 #include <drm/drm_edid.h>
 #include <drm/drmP.h>
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 #include "i915_gem_dmabuf.h"
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ab873669415f..127eb651925f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1135,21 +1135,10 @@ void intel_ddi_clock_get(struct intel_encoder *encoder,
 void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state);
 uint32_t ddi_signal_levels(struct intel_dp *intel_dp);
 
-/* intel_frontbuffer.c */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     enum fb_op_origin origin);
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
-				    unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
-				     unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_device *dev,
-			    unsigned frontbuffer_bits);
 unsigned int intel_fb_align_height(struct drm_device *dev,
 				   unsigned int height,
 				   uint32_t pixel_format,
 				   uint64_t fb_format_modifier);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
-			enum fb_op_origin origin);
 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
 			      uint64_t fb_modifier, uint32_t pixel_format);
 
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 6344999d3d6e..0436b4869d57 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -42,6 +42,7 @@
 #include <drm/drm_crtc.h>
 #include <drm/drm_fb_helper.h>
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index ac85357010b4..b83a70016b12 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -63,6 +63,7 @@
 #include <drm/drmP.h>
 
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include "i915_drv.h"
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
new file mode 100644
index 000000000000..3d0061454bd5
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2014-2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __INTEL_FRONTBUFFER_H__
+#define __INTEL_FRONTBUFFER_H__
+
+struct drm_device;
+struct drm_i915_private;
+struct drm_i915_gem_object;
+
+void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			     enum fb_op_origin origin);
+void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+				    unsigned frontbuffer_bits);
+void intel_frontbuffer_flip_complete(struct drm_device *dev,
+				     unsigned frontbuffer_bits);
+void intel_frontbuffer_flip(struct drm_device *dev,
+			    unsigned frontbuffer_bits);
+void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
+			enum fb_op_origin origin);
+
+#endif /* __INTEL_FRONTBUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 217fefc49bf9..fd891fe16427 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -30,6 +30,7 @@
 #include "i915_drv.h"
 #include "i915_reg.h"
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 
 /* Limits for overlay size. According to intel doc, the real limits are:
  * Y width: 4095, UV width (planar): 2047, Y height: 2047,
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 7c08e4f29032..c47b74d9cfb1 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -36,6 +36,7 @@
 #include <drm/drm_atomic.h>
 #include <drm/drm_plane_helper.h>
 #include "intel_drv.h"
+#include "intel_frontbuffer.h"
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 20/26] drm/i915: Make fb_tracking.lock a spinlock
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (17 preceding siblings ...)
  2016-08-04 15:32 ` [CI 19/26] drm/i915: Separate intel_frontbuffer into its own header Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 21/26] drm/i915: Use atomics to manipulate obj->frontbuffer_bits Chris Wilson
                   ` (6 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

We only need a very lightweight mechanism here as the locking is only
used for co-ordinating a bitfield.

v2: Move the cheap unlikely tests into the caller
v3: Move the kerneldoc into the header (now separated out into
intel_fronbuffer.h for better kerneldoc and readability)

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtien <joonas.lahtinen@linux.intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_drv.h          |  2 +-
 drivers/gpu/drm/i915/i915_gem.c          |  2 +-
 drivers/gpu/drm/i915/intel_frontbuffer.c | 73 ++++++++++----------------------
 drivers/gpu/drm/i915/intel_frontbuffer.h | 50 ++++++++++++++++++++--
 4 files changed, 71 insertions(+), 56 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7ba99057c317..b26f5b1f466b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1669,7 +1669,7 @@ struct intel_pipe_crc {
 };
 
 struct i915_frontbuffer_tracking {
-	struct mutex lock;
+	spinlock_t lock;
 
 	/*
 	 * Tracking bits for delayed frontbuffer flushing du to gpu activity or
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7396c99aa60a..1fc21871a23c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4455,7 +4455,7 @@ i915_gem_load_init(struct drm_device *dev)
 
 	dev_priv->mm.interruptible = true;
 
-	mutex_init(&dev_priv->fb_tracking.lock);
+	spin_lock_init(&dev_priv->fb_tracking.lock);
 }
 
 void i915_gem_load_cleanup(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index b83a70016b12..f15486aee7f8 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -66,35 +66,19 @@
 #include "intel_frontbuffer.h"
 #include "i915_drv.h"
 
-/**
- * intel_fb_obj_invalidate - invalidate frontbuffer object
- * @obj: GEM object to invalidate
- * @origin: which operation caused the invalidation
- *
- * This function gets called every time rendering on the given object starts and
- * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
- * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
- * until the rendering completes or a flip on this frontbuffer plane is
- * scheduled.
- */
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     enum fb_op_origin origin)
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			       enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (!obj->frontbuffer_bits)
-		return;
-
 	if (origin == ORIGIN_CS) {
-		mutex_lock(&dev_priv->fb_tracking.lock);
-		dev_priv->fb_tracking.busy_bits
-			|= obj->frontbuffer_bits;
-		dev_priv->fb_tracking.flip_bits
-			&= ~obj->frontbuffer_bits;
-		mutex_unlock(&dev_priv->fb_tracking.lock);
+		spin_lock(&dev_priv->fb_tracking.lock);
+		dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
+		dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
+		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
 	intel_psr_invalidate(dev, obj->frontbuffer_bits);
@@ -121,9 +105,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
 	/* Delay flushing when rings are still busy.*/
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	if (!frontbuffer_bits)
 		return;
@@ -133,18 +117,9 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
 	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
-/**
- * intel_fb_obj_flush - flush frontbuffer object
- * @obj: GEM object to flush
- * @retire: set when retiring asynchronous rendering
- * @origin: which operation caused the flush
- *
- * This function gets called every time rendering on the given object has
- * completed and frontbuffer caching can be started again. If @retire is true
- * then any delayed flushes will be unblocked.
- */
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
-			bool retire, enum fb_op_origin origin)
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			  bool retire,
+			  enum fb_op_origin origin)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
@@ -152,21 +127,18 @@ void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 
 	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 
-	if (!obj->frontbuffer_bits)
-		return;
-
 	frontbuffer_bits = obj->frontbuffer_bits;
 
 	if (retire) {
-		mutex_lock(&dev_priv->fb_tracking.lock);
+		spin_lock(&dev_priv->fb_tracking.lock);
 		/* Filter out new bits since rendering started. */
 		frontbuffer_bits &= dev_priv->fb_tracking.busy_bits;
-
 		dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-		mutex_unlock(&dev_priv->fb_tracking.lock);
+		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+	if (frontbuffer_bits)
+		intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
 }
 
 /**
@@ -186,11 +158,11 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	intel_psr_single_frame_update(dev, frontbuffer_bits);
 }
@@ -210,13 +182,14 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Mask any cancelled flips. */
 	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
 	dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+	if (frontbuffer_bits)
+		intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
@@ -235,10 +208,10 @@ void intel_frontbuffer_flip(struct drm_device *dev,
 {
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	mutex_lock(&dev_priv->fb_tracking.lock);
+	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
-	mutex_unlock(&dev_priv->fb_tracking.lock);
+	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
 }
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 3d0061454bd5..60a0ec179108 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -28,15 +28,57 @@ struct drm_device;
 struct drm_i915_private;
 struct drm_i915_gem_object;
 
-void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			     enum fb_op_origin origin);
 void intel_frontbuffer_flip_prepare(struct drm_device *dev,
 				    unsigned frontbuffer_bits);
 void intel_frontbuffer_flip_complete(struct drm_device *dev,
 				     unsigned frontbuffer_bits);
 void intel_frontbuffer_flip(struct drm_device *dev,
 			    unsigned frontbuffer_bits);
-void intel_fb_obj_flush(struct drm_i915_gem_object *obj, bool retire,
-			enum fb_op_origin origin);
+
+void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+			       enum fb_op_origin origin);
+void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+			  bool retire,
+			  enum fb_op_origin origin);
+
+/**
+ * intel_fb_obj_invalidate - invalidate frontbuffer object
+ * @obj: GEM object to invalidate
+ * @origin: which operation caused the invalidation
+ *
+ * This function gets called every time rendering on the given object starts and
+ * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
+ * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
+ * until the rendering completes or a flip on this frontbuffer plane is
+ * scheduled.
+ */
+static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
+					   enum fb_op_origin origin)
+{
+	if (!obj->frontbuffer_bits)
+		return;
+
+	__intel_fb_obj_invalidate(obj, origin);
+}
+
+/**
+ * intel_fb_obj_flush - flush frontbuffer object
+ * @obj: GEM object to flush
+ * @retire: set when retiring asynchronous rendering
+ * @origin: which operation caused the flush
+ *
+ * This function gets called every time rendering on the given object has
+ * completed and frontbuffer caching can be started again. If @retire is true
+ * then any delayed flushes will be unblocked.
+ */
+static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
+				      bool retire,
+				      enum fb_op_origin origin)
+{
+	if (!obj->frontbuffer_bits)
+		return;
+
+	__intel_fb_obj_flush(obj, retire, origin);
+}
 
 #endif /* __INTEL_FRONTBUFFER_H__ */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 21/26] drm/i915: Use atomics to manipulate obj->frontbuffer_bits
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (18 preceding siblings ...)
  2016-08-04 15:32 ` [CI 20/26] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 22/26] drm/i915: Use dev_priv consistently through the intel_frontbuffer interface Chris Wilson
                   ` (5 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

The individual bits inside obj->frontbuffer_bits are protected by each
plane->mutex, but the whole bitfield may be accessed by multiple KMS
operations simultaneously and so the RMW need to be under atomics.
However, for updating the single field we do not need to mandate that it
be under the struct_mutex, one more step towards its removal as the de
facto BKL.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c      |  6 ++++--
 drivers/gpu/drm/i915/i915_drv.h          |  4 +---
 drivers/gpu/drm/i915/i915_gem.c          | 21 ++++++++++++++-------
 drivers/gpu/drm/i915/intel_display.c     | 18 ++++++------------
 drivers/gpu/drm/i915/intel_frontbuffer.c | 23 +++++++++--------------
 drivers/gpu/drm/i915/intel_frontbuffer.h | 20 ++++++++++++++------
 6 files changed, 48 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 8d47d1bf7b85..9796b07bdb0d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -138,6 +138,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 	struct intel_engine_cs *engine;
 	struct i915_vma *vma;
+	unsigned int frontbuffer_bits;
 	int pin_count = 0;
 	enum intel_engine_id id;
 
@@ -204,8 +205,9 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 	if (engine)
 		seq_printf(m, " (%s)", engine->name);
 
-	if (obj->frontbuffer_bits)
-		seq_printf(m, " (frontbuffer: 0x%03x)", obj->frontbuffer_bits);
+	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (frontbuffer_bits)
+		seq_printf(m, " (frontbuffer: 0x%03x)", frontbuffer_bits);
 }
 
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b26f5b1f466b..3de75e82ca76 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2127,8 +2127,6 @@ struct drm_i915_gem_object_ops {
  */
 #define INTEL_MAX_SPRITE_BITS_PER_PIPE 5
 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
-#define INTEL_FRONTBUFFER_BITS \
-	(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES)
 #define INTEL_FRONTBUFFER_PRIMARY(pipe) \
 	(1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)))
 #define INTEL_FRONTBUFFER_CURSOR(pipe) \
@@ -2216,7 +2214,7 @@ struct drm_i915_gem_object {
 	unsigned int cache_level:3;
 	unsigned int cache_dirty:1;
 
-	unsigned int frontbuffer_bits:INTEL_FRONTBUFFER_BITS;
+	atomic_t frontbuffer_bits;
 
 	unsigned int has_wc_mmap;
 	/** Count of VMA actually bound by this object */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 1fc21871a23c..3c0531ba8f60 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4040,7 +4040,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	if (obj->stolen)
 		i915_gem_object_unpin_pages(obj);
 
-	WARN_ON(obj->frontbuffer_bits);
+	WARN_ON(atomic_read(&obj->frontbuffer_bits));
 
 	if (obj->pages && obj->madv == I915_MADV_WILLNEED &&
 	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES &&
@@ -4557,16 +4557,23 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 		       struct drm_i915_gem_object *new,
 		       unsigned frontbuffer_bits)
 {
+	/* Control of individual bits within the mask are guarded by
+	 * the owning plane->mutex, i.e. we can never see concurrent
+	 * manipulation of individual bits. But since the bitfield as a whole
+	 * is updated using RMW, we need to use atomics in order to update
+	 * the bits.
+	 */
+	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
+		     sizeof(atomic_t) * BITS_PER_BYTE);
+
 	if (old) {
-		WARN_ON(!mutex_is_locked(&old->base.dev->struct_mutex));
-		WARN_ON(!(old->frontbuffer_bits & frontbuffer_bits));
-		old->frontbuffer_bits &= ~frontbuffer_bits;
+		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
+		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
 	}
 
 	if (new) {
-		WARN_ON(!mutex_is_locked(&new->base.dev->struct_mutex));
-		WARN_ON(new->frontbuffer_bits & frontbuffer_bits);
-		new->frontbuffer_bits |= frontbuffer_bits;
+		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
+		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
 	}
 }
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 229b1c471df3..5bc82064b219 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2601,7 +2601,8 @@ valid_fb:
 	primary->fb = primary->state->fb = fb;
 	primary->crtc = primary->state->crtc = &intel_crtc->base;
 	intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
-	obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
+	atomic_or(to_intel_plane(primary)->frontbuffer_bit,
+		  &obj->frontbuffer_bits);
 }
 
 static void i9xx_update_primary_plane(struct drm_plane *primary,
@@ -13810,19 +13811,12 @@ static void intel_atomic_track_fbs(struct drm_atomic_state *state)
 {
 	struct drm_plane_state *old_plane_state;
 	struct drm_plane *plane;
-	struct drm_i915_gem_object *obj, *old_obj;
-	struct intel_plane *intel_plane;
 	int i;
 
-	mutex_lock(&state->dev->struct_mutex);
-	for_each_plane_in_state(state, plane, old_plane_state, i) {
-		obj = intel_fb_obj(plane->state->fb);
-		old_obj = intel_fb_obj(old_plane_state->fb);
-		intel_plane = to_intel_plane(plane);
-
-		i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
-	}
-	mutex_unlock(&state->dev->struct_mutex);
+	for_each_plane_in_state(state, plane, old_plane_state, i)
+		i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
+				  intel_fb_obj(plane->state->fb),
+				  to_intel_plane(plane)->frontbuffer_bit);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index f15486aee7f8..0e5da902473c 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -67,23 +67,22 @@
 #include "i915_drv.h"
 
 void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			       enum fb_op_origin origin)
+			       enum fb_op_origin origin,
+			       unsigned int frontbuffer_bits)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
 
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
 	if (origin == ORIGIN_CS) {
 		spin_lock(&dev_priv->fb_tracking.lock);
-		dev_priv->fb_tracking.busy_bits |= obj->frontbuffer_bits;
-		dev_priv->fb_tracking.flip_bits &= ~obj->frontbuffer_bits;
+		dev_priv->fb_tracking.busy_bits |= frontbuffer_bits;
+		dev_priv->fb_tracking.flip_bits &= ~frontbuffer_bits;
 		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_psr_invalidate(dev, obj->frontbuffer_bits);
-	intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits);
-	intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin);
+	intel_psr_invalidate(dev, frontbuffer_bits);
+	intel_edp_drrs_invalidate(dev, frontbuffer_bits);
+	intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
@@ -119,15 +118,11 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
 
 void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 			  bool retire,
-			  enum fb_op_origin origin)
+			  enum fb_op_origin origin,
+			  unsigned int frontbuffer_bits)
 {
 	struct drm_device *dev = obj->base.dev;
 	struct drm_i915_private *dev_priv = to_i915(dev);
-	unsigned frontbuffer_bits;
-
-	WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
-	frontbuffer_bits = obj->frontbuffer_bits;
 
 	if (retire) {
 		spin_lock(&dev_priv->fb_tracking.lock);
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 60a0ec179108..0c85b205d902 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -36,10 +36,12 @@ void intel_frontbuffer_flip(struct drm_device *dev,
 			    unsigned frontbuffer_bits);
 
 void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
-			       enum fb_op_origin origin);
+			       enum fb_op_origin origin,
+			       unsigned int frontbuffer_bits);
 void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 			  bool retire,
-			  enum fb_op_origin origin);
+			  enum fb_op_origin origin,
+			  unsigned int frontbuffer_bits);
 
 /**
  * intel_fb_obj_invalidate - invalidate frontbuffer object
@@ -55,10 +57,13 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 static inline void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 					   enum fb_op_origin origin)
 {
-	if (!obj->frontbuffer_bits)
+	unsigned int frontbuffer_bits;
+
+	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (!frontbuffer_bits)
 		return;
 
-	__intel_fb_obj_invalidate(obj, origin);
+	__intel_fb_obj_invalidate(obj, origin, frontbuffer_bits);
 }
 
 /**
@@ -75,10 +80,13 @@ static inline void intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 				      bool retire,
 				      enum fb_op_origin origin)
 {
-	if (!obj->frontbuffer_bits)
+	unsigned int frontbuffer_bits;
+
+	frontbuffer_bits = atomic_read(&obj->frontbuffer_bits);
+	if (!frontbuffer_bits)
 		return;
 
-	__intel_fb_obj_flush(obj, retire, origin);
+	__intel_fb_obj_flush(obj, retire, origin, frontbuffer_bits);
 }
 
 #endif /* __INTEL_FRONTBUFFER_H__ */
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 22/26] drm/i915: Use dev_priv consistently through the intel_frontbuffer interface
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (19 preceding siblings ...)
  2016-08-04 15:32 ` [CI 21/26] drm/i915: Use atomics to manipulate obj->frontbuffer_bits Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 23/26] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
                   ` (4 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Rather than a mismash of struct drm_device *dev and struct
drm_i915_private *dev_priv being used freely within a function, be
consistent and only pass along dev_priv.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/intel_display.c     | 10 +++----
 drivers/gpu/drm/i915/intel_dp.c          | 14 ++++------
 drivers/gpu/drm/i915/intel_drv.h         | 15 +++++-----
 drivers/gpu/drm/i915/intel_frontbuffer.c | 47 +++++++++++++-------------------
 drivers/gpu/drm/i915/intel_frontbuffer.h |  7 ++---
 drivers/gpu/drm/i915/intel_overlay.c     |  3 +-
 drivers/gpu/drm/i915/intel_psr.c         | 26 ++++++++----------
 7 files changed, 53 insertions(+), 69 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5bc82064b219..9068676943bf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -4566,12 +4566,11 @@ static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
 	struct drm_atomic_state *old_state = old_crtc_state->base.state;
 	struct intel_crtc_state *pipe_config =
 		to_intel_crtc_state(crtc->base.state);
-	struct drm_device *dev = crtc->base.dev;
 	struct drm_plane *primary = crtc->base.primary;
 	struct drm_plane_state *old_pri_state =
 		drm_atomic_get_existing_plane_state(old_state, primary);
 
-	intel_frontbuffer_flip(dev, pipe_config->fb_bits);
+	intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
 
 	crtc->wm.cxsr_allowed = true;
 
@@ -4694,7 +4693,7 @@ static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask
 	 * to compute the mask of flip planes precisely. For the time being
 	 * consider this a flip to a NULL plane.
 	 */
-	intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
+	intel_frontbuffer_flip(to_i915(dev), INTEL_FRONTBUFFER_ALL_MASK(pipe));
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -10952,7 +10951,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 
 	i915_gem_request_put(work->flip_queued_req);
 
-	intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
+	intel_frontbuffer_flip_complete(to_i915(dev),
+					to_intel_plane(primary)->frontbuffer_bit);
 	intel_fbc_post_update(crtc);
 	drm_framebuffer_unreference(work->old_fb);
 
@@ -11727,7 +11727,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
 			  to_intel_plane(primary)->frontbuffer_bit);
 	mutex_unlock(&dev->struct_mutex);
 
-	intel_frontbuffer_flip_prepare(dev,
+	intel_frontbuffer_flip_prepare(to_i915(dev),
 				       to_intel_plane(primary)->frontbuffer_bit);
 
 	trace_i915_flip_request(intel_crtc->plane, obj);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 53d97f6af24b..0aadc65ff035 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -5228,7 +5228,7 @@ unlock:
 
 /**
  * intel_edp_drrs_invalidate - Disable Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called everytime rendering on the given planes start.
@@ -5236,10 +5236,9 @@ unlock:
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
-void intel_edp_drrs_invalidate(struct drm_device *dev,
-		unsigned frontbuffer_bits)
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+			       unsigned int frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -5271,7 +5270,7 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
 
 /**
  * intel_edp_drrs_flush - Restart Idleness DRRS
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called every time rendering on the given planes has
@@ -5281,10 +5280,9 @@ void intel_edp_drrs_invalidate(struct drm_device *dev,
  *
  * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
  */
-void intel_edp_drrs_flush(struct drm_device *dev,
-		unsigned frontbuffer_bits)
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+			  unsigned int frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 127eb651925f..e54e6c205f41 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1374,11 +1374,12 @@ uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes);
 void intel_plane_destroy(struct drm_plane *plane);
 void intel_edp_drrs_enable(struct intel_dp *intel_dp);
 void intel_edp_drrs_disable(struct intel_dp *intel_dp);
-void intel_edp_drrs_invalidate(struct drm_device *dev,
-		unsigned frontbuffer_bits);
-void intel_edp_drrs_flush(struct drm_device *dev, unsigned frontbuffer_bits);
+void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
+			       unsigned int frontbuffer_bits);
+void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
+			  unsigned int frontbuffer_bits);
 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
-					 struct intel_digital_port *port);
+				  struct intel_digital_port *port);
 
 void
 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
@@ -1551,13 +1552,13 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
 /* intel_psr.c */
 void intel_psr_enable(struct intel_dp *intel_dp);
 void intel_psr_disable(struct intel_dp *intel_dp);
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits);
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits,
 		     enum fb_op_origin origin);
 void intel_psr_init(struct drm_device *dev);
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 				   unsigned frontbuffer_bits);
 
 /* intel_runtime_pm.c */
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c
index 0e5da902473c..966de4c7c7a2 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.c
@@ -70,8 +70,7 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 			       enum fb_op_origin origin,
 			       unsigned int frontbuffer_bits)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
 	if (origin == ORIGIN_CS) {
 		spin_lock(&dev_priv->fb_tracking.lock);
@@ -80,14 +79,14 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
 		spin_unlock(&dev_priv->fb_tracking.lock);
 	}
 
-	intel_psr_invalidate(dev, frontbuffer_bits);
-	intel_edp_drrs_invalidate(dev, frontbuffer_bits);
+	intel_psr_invalidate(dev_priv, frontbuffer_bits);
+	intel_edp_drrs_invalidate(dev_priv, frontbuffer_bits);
 	intel_fbc_invalidate(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_frontbuffer_flush - flush frontbuffer
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  * @origin: which operation caused the flush
  *
@@ -97,12 +96,10 @@ void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-static void intel_frontbuffer_flush(struct drm_device *dev,
+static void intel_frontbuffer_flush(struct drm_i915_private *dev_priv,
 				    unsigned frontbuffer_bits,
 				    enum fb_op_origin origin)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	/* Delay flushing when rings are still busy.*/
 	spin_lock(&dev_priv->fb_tracking.lock);
 	frontbuffer_bits &= ~dev_priv->fb_tracking.busy_bits;
@@ -111,8 +108,8 @@ static void intel_frontbuffer_flush(struct drm_device *dev,
 	if (!frontbuffer_bits)
 		return;
 
-	intel_edp_drrs_flush(dev, frontbuffer_bits);
-	intel_psr_flush(dev, frontbuffer_bits, origin);
+	intel_edp_drrs_flush(dev_priv, frontbuffer_bits);
+	intel_psr_flush(dev_priv, frontbuffer_bits, origin);
 	intel_fbc_flush(dev_priv, frontbuffer_bits, origin);
 }
 
@@ -121,8 +118,7 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 			  enum fb_op_origin origin,
 			  unsigned int frontbuffer_bits)
 {
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
+	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
 
 	if (retire) {
 		spin_lock(&dev_priv->fb_tracking.lock);
@@ -133,12 +129,12 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
 	}
 
 	if (frontbuffer_bits)
-		intel_frontbuffer_flush(dev, frontbuffer_bits, origin);
+		intel_frontbuffer_flush(dev_priv, frontbuffer_bits, origin);
 }
 
 /**
  * intel_frontbuffer_flip_prepare - prepare asynchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. The actual
@@ -148,23 +144,21 @@ void __intel_fb_obj_flush(struct drm_i915_gem_object *obj,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
 				    unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	spin_lock(&dev_priv->fb_tracking.lock);
 	dev_priv->fb_tracking.flip_bits |= frontbuffer_bits;
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
 	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_psr_single_frame_update(dev, frontbuffer_bits);
+	intel_psr_single_frame_update(dev_priv, frontbuffer_bits);
 }
 
 /**
  * intel_frontbuffer_flip_complete - complete asynchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after the flip has been latched and will complete
@@ -172,11 +166,9 @@ void intel_frontbuffer_flip_prepare(struct drm_device *dev,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
 				     unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Mask any cancelled flips. */
 	frontbuffer_bits &= dev_priv->fb_tracking.flip_bits;
@@ -184,12 +176,13 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
 	spin_unlock(&dev_priv->fb_tracking.lock);
 
 	if (frontbuffer_bits)
-		intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+		intel_frontbuffer_flush(dev_priv,
+					frontbuffer_bits, ORIGIN_FLIP);
 }
 
 /**
  * intel_frontbuffer_flip - synchronous frontbuffer flip
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * This function gets called after scheduling a flip on @obj. This is for
@@ -198,15 +191,13 @@ void intel_frontbuffer_flip_complete(struct drm_device *dev,
  *
  * Can be called without any locks held.
  */
-void intel_frontbuffer_flip(struct drm_device *dev,
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
 			    unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
-
 	spin_lock(&dev_priv->fb_tracking.lock);
 	/* Remove stale busy bits due to the old buffer. */
 	dev_priv->fb_tracking.busy_bits &= ~frontbuffer_bits;
 	spin_unlock(&dev_priv->fb_tracking.lock);
 
-	intel_frontbuffer_flush(dev, frontbuffer_bits, ORIGIN_FLIP);
+	intel_frontbuffer_flush(dev_priv, frontbuffer_bits, ORIGIN_FLIP);
 }
diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.h b/drivers/gpu/drm/i915/intel_frontbuffer.h
index 0c85b205d902..76ceb539f9f0 100644
--- a/drivers/gpu/drm/i915/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/intel_frontbuffer.h
@@ -24,15 +24,14 @@
 #ifndef __INTEL_FRONTBUFFER_H__
 #define __INTEL_FRONTBUFFER_H__
 
-struct drm_device;
 struct drm_i915_private;
 struct drm_i915_gem_object;
 
-void intel_frontbuffer_flip_prepare(struct drm_device *dev,
+void intel_frontbuffer_flip_prepare(struct drm_i915_private *dev_priv,
 				    unsigned frontbuffer_bits);
-void intel_frontbuffer_flip_complete(struct drm_device *dev,
+void intel_frontbuffer_flip_complete(struct drm_i915_private *dev_priv,
 				     unsigned frontbuffer_bits);
-void intel_frontbuffer_flip(struct drm_device *dev,
+void intel_frontbuffer_flip(struct drm_i915_private *dev_priv,
 			    unsigned frontbuffer_bits);
 
 void __intel_fb_obj_invalidate(struct drm_i915_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index fd891fe16427..413a2038e6d1 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -840,8 +840,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	overlay->old_vid_bo = overlay->vid_bo;
 	overlay->vid_bo = new_bo;
 
-	intel_frontbuffer_flip(&dev_priv->drm,
-			       INTEL_FRONTBUFFER_OVERLAY(pipe));
+	intel_frontbuffer_flip(dev_priv, INTEL_FRONTBUFFER_OVERLAY(pipe));
 
 	return 0;
 
diff --git a/drivers/gpu/drm/i915/intel_psr.c b/drivers/gpu/drm/i915/intel_psr.c
index 2b0d1baf15b3..59a21c9d2e43 100644
--- a/drivers/gpu/drm/i915/intel_psr.c
+++ b/drivers/gpu/drm/i915/intel_psr.c
@@ -645,9 +645,8 @@ unlock:
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
-static void intel_psr_exit(struct drm_device *dev)
+static void intel_psr_exit(struct drm_i915_private *dev_priv)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct intel_dp *intel_dp = dev_priv->psr.enabled;
 	struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
 	enum pipe pipe = to_intel_crtc(crtc)->pipe;
@@ -656,7 +655,7 @@ static void intel_psr_exit(struct drm_device *dev)
 	if (!dev_priv->psr.active)
 		return;
 
-	if (HAS_DDI(dev)) {
+	if (HAS_DDI(dev_priv)) {
 		val = I915_READ(EDP_PSR_CTL);
 
 		WARN_ON(!(val & EDP_PSR_ENABLE));
@@ -691,7 +690,7 @@ static void intel_psr_exit(struct drm_device *dev)
 
 /**
  * intel_psr_single_frame_update - Single Frame Update
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Some platforms support a single frame update feature that is used to
@@ -699,10 +698,9 @@ static void intel_psr_exit(struct drm_device *dev)
  * So far it is only implemented for Valleyview and Cherryview because
  * hardware requires this to be done before a page flip.
  */
-void intel_psr_single_frame_update(struct drm_device *dev,
+void intel_psr_single_frame_update(struct drm_i915_private *dev_priv,
 				   unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 	u32 val;
@@ -711,7 +709,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
 	 * Single frame update is already supported on BDW+ but it requires
 	 * many W/A and it isn't really needed.
 	 */
-	if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
+	if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
 		return;
 
 	mutex_lock(&dev_priv->psr.lock);
@@ -737,7 +735,7 @@ void intel_psr_single_frame_update(struct drm_device *dev,
 
 /**
  * intel_psr_invalidate - Invalidade PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  *
  * Since the hardware frontbuffer tracking has gaps we need to integrate
@@ -747,10 +745,9 @@ void intel_psr_single_frame_update(struct drm_device *dev,
  *
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
  */
-void intel_psr_invalidate(struct drm_device *dev,
+void intel_psr_invalidate(struct drm_i915_private *dev_priv,
 			  unsigned frontbuffer_bits)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -767,14 +764,14 @@ void intel_psr_invalidate(struct drm_device *dev,
 	dev_priv->psr.busy_frontbuffer_bits |= frontbuffer_bits;
 
 	if (frontbuffer_bits)
-		intel_psr_exit(dev);
+		intel_psr_exit(dev_priv);
 
 	mutex_unlock(&dev_priv->psr.lock);
 }
 
 /**
  * intel_psr_flush - Flush PSR
- * @dev: DRM device
+ * @dev_priv: i915 device
  * @frontbuffer_bits: frontbuffer plane tracking bits
  * @origin: which operation caused the flush
  *
@@ -785,10 +782,9 @@ void intel_psr_invalidate(struct drm_device *dev,
  *
  * Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
  */
-void intel_psr_flush(struct drm_device *dev,
+void intel_psr_flush(struct drm_i915_private *dev_priv,
 		     unsigned frontbuffer_bits, enum fb_op_origin origin)
 {
-	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc *crtc;
 	enum pipe pipe;
 
@@ -806,7 +802,7 @@ void intel_psr_flush(struct drm_device *dev,
 
 	/* By definition flush = invalidate + flush */
 	if (frontbuffer_bits)
-		intel_psr_exit(dev);
+		intel_psr_exit(dev_priv);
 
 	if (!dev_priv->psr.active && !dev_priv->psr.busy_frontbuffer_bits)
 		if (!work_busy(&dev_priv->psr.work.work))
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 23/26] drm/i915: Move obj->active:5 to obj->flags
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (20 preceding siblings ...)
  2016-08-04 15:32 ` [CI 22/26] drm/i915: Use dev_priv consistently through the intel_frontbuffer interface Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 24/26] drm/i915: Move i915_gem_object_wait_rendering() Chris Wilson
                   ` (3 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

We are motivated to avoid using a bitfield for obj->active for a couple
of reasons. Firstly, we wish to document our lockless read of obj->active
using READ_ONCE inside i915_gem_busy_ioctl() and that requires an
integral type (i.e. not a bitfield). Secondly, gcc produces abysmal code
when presented with a bitfield and that shows up high on the profiles of
request tracking (mainly due to excess memory traffic as it converts
the bitfield to a register and back and generates frequent AGI in the
process).

v2: BIT, break up a long line in compute the other engines, new paint
for i915_gem_object_is_active (now i915_gem_object_get_active).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c        |  2 +-
 drivers/gpu/drm/i915/i915_drv.h            | 37 +++++++++++++++++++++++++++++-
 drivers/gpu/drm/i915/i915_gem.c            | 16 ++++++-------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 20 ++++++++++++----
 drivers/gpu/drm/i915/i915_gem_shrinker.c   |  5 ++--
 drivers/gpu/drm/i915/i915_gem_userptr.c    |  2 +-
 6 files changed, 64 insertions(+), 18 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 9796b07bdb0d..24d63e271f4b 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -91,7 +91,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
 
 static char get_active_flag(struct drm_i915_gem_object *obj)
 {
-	return obj->active ? '*' : ' ';
+	return i915_gem_object_is_active(obj) ? '*' : ' ';
 }
 
 static char get_pin_flag(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 3de75e82ca76..db5dc5bd78d8 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2155,12 +2155,16 @@ struct drm_i915_gem_object {
 
 	struct list_head batch_pool_link;
 
+	unsigned long flags;
 	/**
 	 * This is set if the object is on the active lists (has pending
 	 * rendering and so a non-zero seqno), and is not set if it i s on
 	 * inactive (ready to be unbound) list.
 	 */
-	unsigned int active:I915_NUM_ENGINES;
+#define I915_BO_ACTIVE_SHIFT 0
+#define I915_BO_ACTIVE_MASK ((1 << I915_NUM_ENGINES) - 1)
+#define __I915_BO_ACTIVE(bo) \
+	((READ_ONCE((bo)->flags) >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK)
 
 	/**
 	 * This is set if the object has been written to since last bound
@@ -2325,6 +2329,37 @@ i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
 	return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE;
 }
 
+static inline unsigned long
+i915_gem_object_get_active(const struct drm_i915_gem_object *obj)
+{
+	return (obj->flags >> I915_BO_ACTIVE_SHIFT) & I915_BO_ACTIVE_MASK;
+}
+
+static inline bool
+i915_gem_object_is_active(const struct drm_i915_gem_object *obj)
+{
+	return i915_gem_object_get_active(obj);
+}
+
+static inline void
+i915_gem_object_set_active(struct drm_i915_gem_object *obj, int engine)
+{
+	obj->flags |= BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline void
+i915_gem_object_clear_active(struct drm_i915_gem_object *obj, int engine)
+{
+	obj->flags &= ~BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
+static inline bool
+i915_gem_object_has_active_engine(const struct drm_i915_gem_object *obj,
+				  int engine)
+{
+	return obj->flags & BIT(engine + I915_BO_ACTIVE_SHIFT);
+}
+
 /*
  * Optimised SGL iterator for GEM objects
  */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3c0531ba8f60..ce422e0dce8f 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1358,7 +1358,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 
 	if (!readonly) {
 		active = obj->last_read;
-		active_mask = obj->active;
+		active_mask = i915_gem_object_get_active(obj);
 	} else {
 		active_mask = 1;
 		active = &obj->last_write;
@@ -1402,7 +1402,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
 	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 	BUG_ON(!dev_priv->mm.interruptible);
 
-	active_mask = obj->active;
+	active_mask = i915_gem_object_get_active(obj);
 	if (!active_mask)
 		return 0;
 
@@ -2365,10 +2365,10 @@ i915_gem_object_retire__read(struct i915_gem_active *active,
 	struct drm_i915_gem_object *obj =
 		container_of(active, struct drm_i915_gem_object, last_read[idx]);
 
-	GEM_BUG_ON((obj->active & (1 << idx)) == 0);
+	GEM_BUG_ON(!i915_gem_object_has_active_engine(obj, idx));
 
-	obj->active &= ~(1 << idx);
-	if (obj->active)
+	i915_gem_object_clear_active(obj, idx);
+	if (i915_gem_object_is_active(obj))
 		return;
 
 	/* Bump our place on the bound list to keep it roughly in LRU order
@@ -2672,7 +2672,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 		return -ENOENT;
 	}
 
-	if (!obj->active)
+	if (!i915_gem_object_is_active(obj))
 		goto out;
 
 	for (i = 0; i < I915_NUM_ENGINES; i++) {
@@ -2760,7 +2760,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
 
 	lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-	active_mask = obj->active;
+	active_mask = i915_gem_object_get_active(obj);
 	if (!active_mask)
 		return 0;
 
@@ -3811,7 +3811,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 	 * become non-busy without any further actions.
 	 */
 	args->busy = 0;
-	if (obj->active) {
+	if (i915_gem_object_is_active(obj)) {
 		struct drm_i915_gem_request *req;
 		int i;
 
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index e8e194fa2e65..a1da3028a949 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -434,7 +434,7 @@ relocate_entry_clflush(struct drm_i915_gem_object *obj,
 
 static bool object_is_idle(struct drm_i915_gem_object *obj)
 {
-	unsigned long active = obj->active;
+	unsigned long active = i915_gem_object_get_active(obj);
 	int idx;
 
 	for_each_active(active, idx) {
@@ -990,11 +990,21 @@ err:
 	return ret;
 }
 
+static unsigned int eb_other_engines(struct drm_i915_gem_request *req)
+{
+	unsigned int mask;
+
+	mask = ~intel_engine_flag(req->engine) & I915_BO_ACTIVE_MASK;
+	mask <<= I915_BO_ACTIVE_SHIFT;
+
+	return mask;
+}
+
 static int
 i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 				struct list_head *vmas)
 {
-	const unsigned other_rings = ~intel_engine_flag(req->engine);
+	const unsigned int other_rings = eb_other_engines(req);
 	struct i915_vma *vma;
 	uint32_t flush_domains = 0;
 	bool flush_chipset = false;
@@ -1003,7 +1013,7 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 	list_for_each_entry(vma, vmas, exec_list) {
 		struct drm_i915_gem_object *obj = vma->obj;
 
-		if (obj->active & other_rings) {
+		if (obj->flags & other_rings) {
 			ret = i915_gem_object_sync(obj, req);
 			if (ret)
 				return ret;
@@ -1166,9 +1176,9 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	 * add the active reference first and queue for it to be dropped
 	 * *last*.
 	 */
-	if (obj->active == 0)
+	if (!i915_gem_object_is_active(obj))
 		i915_gem_object_get(obj);
-	obj->active |= 1 << idx;
+	i915_gem_object_set_active(obj, idx);
 	i915_gem_active_set(&obj->last_read[idx], req);
 
 	if (flags & EXEC_OBJECT_WRITE) {
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index b5776358c05e..bcd85bdbc25f 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -182,7 +182,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 			    !is_vmalloc_addr(obj->mapping))
 				continue;
 
-			if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
+			if ((flags & I915_SHRINK_ACTIVE) == 0 &&
+			    i915_gem_object_is_active(obj))
 				continue;
 
 			if (!can_release_pages(obj))
@@ -267,7 +268,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
 			count += obj->base.size >> PAGE_SHIFT;
 
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (!obj->active && can_release_pages(obj))
+		if (!i915_gem_object_is_active(obj) && can_release_pages(obj))
 			count += obj->base.size >> PAGE_SHIFT;
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 651a84ba840c..53f64fcc89ef 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -67,7 +67,7 @@ static void wait_rendering(struct drm_i915_gem_object *obj)
 	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
 	int i, n;
 
-	if (!obj->active)
+	if (!i915_gem_object_is_active(obj))
 		return;
 
 	n = 0;
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 24/26] drm/i915: Move i915_gem_object_wait_rendering()
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (21 preceding siblings ...)
  2016-08-04 15:32 ` [CI 23/26] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 25/26] drm/i915: Enable lockless lookup of request tracking via RCU Chris Wilson
                   ` (2 subsequent siblings)
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

Just move it earlier so that we can use the companion nonblocking
version in a couple of more callsites without having to add a forward
declaration.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 204 ++++++++++++++++++++--------------------
 1 file changed, 103 insertions(+), 101 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ce422e0dce8f..852a14468804 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -302,6 +302,109 @@ i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 	return ret;
 }
 
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ * @obj: i915 gem object
+ * @readonly: waiting for just read access or read-write access
+ */
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+			       bool readonly)
+{
+	struct reservation_object *resv;
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int idx;
+
+	lockdep_assert_held(&obj->base.dev->struct_mutex);
+
+	if (!readonly) {
+		active = obj->last_read;
+		active_mask = i915_gem_object_get_active(obj);
+	} else {
+		active_mask = 1;
+		active = &obj->last_write;
+	}
+
+	for_each_active(active_mask, idx) {
+		int ret;
+
+		ret = i915_gem_active_wait(&active[idx],
+					   &obj->base.dev->struct_mutex);
+		if (ret)
+			return ret;
+	}
+
+	resv = i915_gem_object_get_dmabuf_resv(obj);
+	if (resv) {
+		long err;
+
+		err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
+							  MAX_SCHEDULE_TIMEOUT);
+		if (err < 0)
+			return err;
+	}
+
+	return 0;
+}
+
+/* A nonblocking variant of the above wait. This is a highly dangerous routine
+ * as the object state may change during this call.
+ */
+static __must_check int
+i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
+					    struct intel_rps_client *rps,
+					    bool readonly)
+{
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
+	struct i915_gem_active *active;
+	unsigned long active_mask;
+	int ret, i, n = 0;
+
+	lockdep_assert_held(&dev->struct_mutex);
+	GEM_BUG_ON(!to_i915(dev)->mm.interruptible);
+
+	active_mask = i915_gem_object_get_active(obj);
+	if (!active_mask)
+		return 0;
+
+	if (!readonly) {
+		active = obj->last_read;
+	} else {
+		active_mask = 1;
+		active = &obj->last_write;
+	}
+
+	for_each_active(active_mask, i) {
+		struct drm_i915_gem_request *req;
+
+		req = i915_gem_active_get(&active[i],
+					  &obj->base.dev->struct_mutex);
+		if (req)
+			requests[n++] = req;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+	ret = 0;
+	for (i = 0; ret == 0 && i < n; i++)
+		ret = i915_wait_request(requests[i], true, NULL, rps);
+	mutex_lock(&dev->struct_mutex);
+
+	for (i = 0; i < n; i++)
+		i915_gem_request_put(requests[i]);
+
+	return ret;
+}
+
+static struct intel_rps_client *to_rps_client(struct drm_file *file)
+{
+	struct drm_i915_file_private *fpriv = file->driver_priv;
+
+	return &fpriv->rps;
+}
+
 int
 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 			    int align)
@@ -1339,107 +1442,6 @@ put_rpm:
 	return ret;
 }
 
-/**
- * Ensures that all rendering to the object has completed and the object is
- * safe to unbind from the GTT or access from the CPU.
- * @obj: i915 gem object
- * @readonly: waiting for read access or write
- */
-int
-i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
-			       bool readonly)
-{
-	struct reservation_object *resv;
-	struct i915_gem_active *active;
-	unsigned long active_mask;
-	int idx, ret;
-
-	lockdep_assert_held(&obj->base.dev->struct_mutex);
-
-	if (!readonly) {
-		active = obj->last_read;
-		active_mask = i915_gem_object_get_active(obj);
-	} else {
-		active_mask = 1;
-		active = &obj->last_write;
-	}
-
-	for_each_active(active_mask, idx) {
-		ret = i915_gem_active_wait(&active[idx],
-					   &obj->base.dev->struct_mutex);
-		if (ret)
-			return ret;
-	}
-
-	resv = i915_gem_object_get_dmabuf_resv(obj);
-	if (resv) {
-		long err;
-
-		err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
-							  MAX_SCHEDULE_TIMEOUT);
-		if (err < 0)
-			return err;
-	}
-
-	return 0;
-}
-
-/* A nonblocking variant of the above wait. This is a highly dangerous routine
- * as the object state may change during this call.
- */
-static __must_check int
-i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
-					    struct intel_rps_client *rps,
-					    bool readonly)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_i915_private *dev_priv = to_i915(dev);
-	struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
-	struct i915_gem_active *active;
-	unsigned long active_mask;
-	int ret, i, n = 0;
-
-	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
-	BUG_ON(!dev_priv->mm.interruptible);
-
-	active_mask = i915_gem_object_get_active(obj);
-	if (!active_mask)
-		return 0;
-
-	if (!readonly) {
-		active = obj->last_read;
-	} else {
-		active_mask = 1;
-		active = &obj->last_write;
-	}
-
-	for_each_active(active_mask, i) {
-		struct drm_i915_gem_request *req;
-
-		req = i915_gem_active_get(&active[i],
-					  &obj->base.dev->struct_mutex);
-		if (req)
-			requests[n++] = req;
-	}
-
-	mutex_unlock(&dev->struct_mutex);
-	ret = 0;
-	for (i = 0; ret == 0 && i < n; i++)
-		ret = i915_wait_request(requests[i], true, NULL, rps);
-	mutex_lock(&dev->struct_mutex);
-
-	for (i = 0; i < n; i++)
-		i915_gem_request_put(requests[i]);
-
-	return ret;
-}
-
-static struct intel_rps_client *to_rps_client(struct drm_file *file)
-{
-	struct drm_i915_file_private *fpriv = file->driver_priv;
-	return &fpriv->rps;
-}
-
 static enum fb_op_origin
 write_origin(struct drm_i915_gem_object *obj, unsigned domain)
 {
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 25/26] drm/i915: Enable lockless lookup of request tracking via RCU
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (22 preceding siblings ...)
  2016-08-04 15:32 ` [CI 24/26] drm/i915: Move i915_gem_object_wait_rendering() Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 15:32 ` [CI 26/26] drm/i915: Export our request as a dma-buf fence on the reservation object Chris Wilson
  2016-08-04 16:11 ` ✗ Ro.CI.BAT: failure for series starting with [CI,01/26] drm/i915: Combine loops within i915_gem_evict_something Patchwork
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

If we enable RCU for the requests (providing a grace period where we can
inspect a "dead" request before it is freed), we can allow callers to
carefully perform lockless lookup of an active request.

However, by enabling deferred freeing of requests, we can potentially
hog a lot of memory when dealing with tens of thousands of requests per
second - with a quick insertion of a synchronize_rcu() inside our
shrinker callback, that issue disappears.

v2: Currently, it is our responsibility to handle reclaim i.e. to avoid
hogging memory with the delayed slab frees. At the moment, we wait for a
grace period in the shrinker, and block for all RCU callbacks on oom.
Suggested alternatives focus on flushing our RCU callback when we have a
certain number of outstanding request frees, and blocking on that flush
after a second high watermark. (So rather than wait for the system to
run out of memory, we stop issuing requests - both are nondeterministic.)

Paul E. McKenney wrote:

Another approach is synchronize_rcu() after some largish number of
requests.  The advantage of this approach is that it throttles the
production of callbacks at the source.  The corresponding disadvantage
is that it slows things up.

Another approach is to use call_rcu(), but if the previous call_rcu()
is still in flight, block waiting for it.  Yet another approach is
the get_state_synchronize_rcu() / cond_synchronize_rcu() pair.  The
idea is to do something like this:

        cond_synchronize_rcu(cookie);
        cookie = get_state_synchronize_rcu();

You would of course do an initial get_state_synchronize_rcu() to
get things going.  This would not block unless there was less than
one grace period's worth of time between invocations.  But this
assumes a busy system, where there is almost always a grace period
in flight.  But you can make that happen as follows:

        cond_synchronize_rcu(cookie);
        cookie = get_state_synchronize_rcu();
        call_rcu(&my_rcu_head, noop_function);

Note that you need additional code to make sure that the old callback
has completed before doing a new one.  Setting and clearing a flag
with appropriate memory ordering control suffices (e.g,. smp_load_acquire()
and smp_store_release()).

v3: More comments on compiler and processor order of operations within
the RCU lookup and discover we can use rcu_access_pointer() here instead.

v4: Wrap i915_gem_active_get_rcu() to take the rcu_read_lock itself.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: "Goel, Akash" <akash.goel@intel.com>
Cc: Josh Triplett <josh@joshtriplett.org>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem.c          |   7 +-
 drivers/gpu/drm/i915/i915_gem_request.c  |   2 +-
 drivers/gpu/drm/i915/i915_gem_request.h  | 163 +++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_gem_shrinker.c |  15 ++-
 4 files changed, 174 insertions(+), 13 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 852a14468804..aa40d6c35c30 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4431,7 +4431,9 @@ i915_gem_load_init(struct drm_device *dev)
 	dev_priv->requests =
 		kmem_cache_create("i915_gem_request",
 				  sizeof(struct drm_i915_gem_request), 0,
-				  SLAB_HWCACHE_ALIGN,
+				  SLAB_HWCACHE_ALIGN |
+				  SLAB_RECLAIM_ACCOUNT |
+				  SLAB_DESTROY_BY_RCU,
 				  NULL);
 
 	INIT_LIST_HEAD(&dev_priv->context_list);
@@ -4467,6 +4469,9 @@ void i915_gem_load_cleanup(struct drm_device *dev)
 	kmem_cache_destroy(dev_priv->requests);
 	kmem_cache_destroy(dev_priv->vmas);
 	kmem_cache_destroy(dev_priv->objects);
+
+	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
+	rcu_barrier();
 }
 
 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 773b942a3a5f..3fecb8f0e041 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -205,7 +205,7 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 		prefetchw(next);
 
 		INIT_LIST_HEAD(&active->link);
-		active->request = NULL;
+		RCU_INIT_POINTER(active->request, NULL);
 
 		active->retire(active, request);
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_request.h b/drivers/gpu/drm/i915/i915_gem_request.h
index 26ca697f5af7..6002adc43523 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.h
+++ b/drivers/gpu/drm/i915/i915_gem_request.h
@@ -183,6 +183,12 @@ i915_gem_request_get(struct drm_i915_gem_request *req)
 	return to_request(fence_get(&req->fence));
 }
 
+static inline struct drm_i915_gem_request *
+i915_gem_request_get_rcu(struct drm_i915_gem_request *req)
+{
+	return to_request(fence_get_rcu(&req->fence));
+}
+
 static inline void
 i915_gem_request_put(struct drm_i915_gem_request *req)
 {
@@ -286,7 +292,7 @@ typedef void (*i915_gem_retire_fn)(struct i915_gem_active *,
 				   struct drm_i915_gem_request *);
 
 struct i915_gem_active {
-	struct drm_i915_gem_request *request;
+	struct drm_i915_gem_request __rcu *request;
 	struct list_head link;
 	i915_gem_retire_fn retire;
 };
@@ -327,13 +333,19 @@ i915_gem_active_set(struct i915_gem_active *active,
 		    struct drm_i915_gem_request *request)
 {
 	list_move(&active->link, &request->active_list);
-	active->request = request;
+	rcu_assign_pointer(active->request, request);
 }
 
 static inline struct drm_i915_gem_request *
 __i915_gem_active_peek(const struct i915_gem_active *active)
 {
-	return active->request;
+	/* Inside the error capture (running with the driver in an unknown
+	 * state), we want to bend the rules slightly (a lot).
+	 *
+	 * Work is in progress to make it safer, in the meantime this keeps
+	 * the known issue from spamming the logs.
+	 */
+	return rcu_dereference_protected(active->request, 1);
 }
 
 /**
@@ -349,7 +361,29 @@ i915_gem_active_peek(const struct i915_gem_active *active, struct mutex *mutex)
 {
 	struct drm_i915_gem_request *request;
 
-	request = active->request;
+	request = rcu_dereference_protected(active->request,
+					    lockdep_is_held(mutex));
+	if (!request || i915_gem_request_completed(request))
+		return NULL;
+
+	return request;
+}
+
+/**
+ * i915_gem_active_peek_rcu - report the active request being monitored
+ * @active - the active tracker
+ *
+ * i915_gem_active_peek_rcu() returns the current request being tracked if
+ * still active, or NULL. It does not obtain a reference on the request
+ * for the caller, and inspection of the request is only valid under
+ * the RCU lock.
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_peek_rcu(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	request = rcu_dereference(active->request);
 	if (!request || i915_gem_request_completed(request))
 		return NULL;
 
@@ -370,6 +404,119 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 }
 
 /**
+ * __i915_gem_active_get_rcu - return a reference to the active request
+ * @active - the active tracker
+ *
+ * __i915_gem_active_get() returns a reference to the active request, or NULL
+ * if the active tracker is idle. The caller must hold the RCU read lock, but
+ * the returned pointer is safe to use outside of RCU.
+ */
+static inline struct drm_i915_gem_request *
+__i915_gem_active_get_rcu(const struct i915_gem_active *active)
+{
+	/* Performing a lockless retrieval of the active request is super
+	 * tricky. SLAB_DESTROY_BY_RCU merely guarantees that the backing
+	 * slab of request objects will not be freed whilst we hold the
+	 * RCU read lock. It does not guarantee that the request itself
+	 * will not be freed and then *reused*. Viz,
+	 *
+	 * Thread A			Thread B
+	 *
+	 * req = active.request
+	 *				retire(req) -> free(req);
+	 *				(req is now first on the slab freelist)
+	 *				active.request = NULL
+	 *
+	 *				req = new submission on a new object
+	 * ref(req)
+	 *
+	 * To prevent the request from being reused whilst the caller
+	 * uses it, we take a reference like normal. Whilst acquiring
+	 * the reference we check that it is not in a destroyed state
+	 * (refcnt == 0). That prevents the request being reallocated
+	 * whilst the caller holds on to it. To check that the request
+	 * was not reallocated as we acquired the reference we have to
+	 * check that our request remains the active request across
+	 * the lookup, in the same manner as a seqlock. The visibility
+	 * of the pointer versus the reference counting is controlled
+	 * by using RCU barriers (rcu_dereference and rcu_assign_pointer).
+	 *
+	 * In the middle of all that, we inspect whether the request is
+	 * complete. Retiring is lazy so the request may be completed long
+	 * before the active tracker is updated. Querying whether the
+	 * request is complete is far cheaper (as it involves no locked
+	 * instructions setting cachelines to exclusive) than acquiring
+	 * the reference, so we do it first. The RCU read lock ensures the
+	 * pointer dereference is valid, but does not ensure that the
+	 * seqno nor HWS is the right one! However, if the request was
+	 * reallocated, that means the active tracker's request was complete.
+	 * If the new request is also complete, then both are and we can
+	 * just report the active tracker is idle. If the new request is
+	 * incomplete, then we acquire a reference on it and check that
+	 * it remained the active request.
+	 */
+	do {
+		struct drm_i915_gem_request *request;
+
+		request = rcu_dereference(active->request);
+		if (!request || i915_gem_request_completed(request))
+			return NULL;
+
+		request = i915_gem_request_get_rcu(request);
+
+		/* What stops the following rcu_access_pointer() from occurring
+		 * before the above i915_gem_request_get_rcu()? If we were
+		 * to read the value before pausing to get the reference to
+		 * the request, we may not notice a change in the active
+		 * tracker.
+		 *
+		 * The rcu_access_pointer() is a mere compiler barrier, which
+		 * means both the CPU and compiler are free to perform the
+		 * memory read without constraint. The compiler only has to
+		 * ensure that any operations after the rcu_access_pointer()
+		 * occur afterwards in program order. This means the read may
+		 * be performed earlier by an out-of-order CPU, or adventurous
+		 * compiler.
+		 *
+		 * The atomic operation at the heart of
+		 * i915_gem_request_get_rcu(), see fence_get_rcu(), is
+		 * atomic_inc_not_zero() which is only a full memory barrier
+		 * when successful. That is, if i915_gem_request_get_rcu()
+		 * returns the request (and so with the reference counted
+		 * incremented) then the following read for rcu_access_pointer()
+		 * must occur after the atomic operation and so confirm
+		 * that this request is the one currently being tracked.
+		 */
+		if (!request || request == rcu_access_pointer(active->request))
+			return rcu_pointer_handoff(request);
+
+		i915_gem_request_put(request);
+	} while (1);
+}
+
+/**
+ * i915_gem_active_get_unlocked - return a reference to the active request
+ * @active - the active tracker
+ *
+ * i915_gem_active_get_unlocked() returns a reference to the active request,
+ * or NULL if the active tracker is idle. The reference is obtained under RCU,
+ * so no locking is required by the caller.
+ *
+ * The reference should be freed with i915_gem_request_put().
+ */
+static inline struct drm_i915_gem_request *
+i915_gem_active_get_unlocked(const struct i915_gem_active *active)
+{
+	struct drm_i915_gem_request *request;
+
+	rcu_read_lock();
+	request = __i915_gem_active_get_rcu(active);
+	rcu_read_unlock();
+
+	return request;
+}
+
+/**
  * i915_gem_active_isset - report whether the active tracker is assigned
  * @active - the active tracker
  *
@@ -380,7 +527,7 @@ i915_gem_active_get(const struct i915_gem_active *active, struct mutex *mutex)
 static inline bool
 i915_gem_active_isset(const struct i915_gem_active *active)
 {
-	return active->request;
+	return rcu_access_pointer(active->request);
 }
 
 /**
@@ -437,7 +584,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
 	struct drm_i915_gem_request *request;
 	int ret;
 
-	request = active->request;
+	request = rcu_dereference_protected(active->request,
+					    lockdep_is_held(mutex));
 	if (!request)
 		return 0;
 
@@ -446,7 +594,8 @@ i915_gem_active_retire(struct i915_gem_active *active,
 		return ret;
 
 	list_del_init(&active->link);
-	active->request = NULL;
+	RCU_INIT_POINTER(active->request, NULL);
+
 	active->retire(active, request);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index bcd85bdbc25f..1341cb55b6f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -205,6 +205,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
 		intel_runtime_pm_put(dev_priv);
 
 	i915_gem_retire_requests(dev_priv);
+	/* expedite the RCU grace period to free some request slabs */
+	synchronize_rcu_expedited();
 
 	return count;
 }
@@ -225,10 +227,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
  */
 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv)
 {
-	return i915_gem_shrink(dev_priv, -1UL,
-			       I915_SHRINK_BOUND |
-			       I915_SHRINK_UNBOUND |
-			       I915_SHRINK_ACTIVE);
+	unsigned long freed;
+
+	freed = i915_gem_shrink(dev_priv, -1UL,
+				I915_SHRINK_BOUND |
+				I915_SHRINK_UNBOUND |
+				I915_SHRINK_ACTIVE);
+	rcu_barrier(); /* wait until our RCU delayed slab frees are completed */
+
+	return freed;
 }
 
 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* [CI 26/26] drm/i915: Export our request as a dma-buf fence on the reservation object
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (23 preceding siblings ...)
  2016-08-04 15:32 ` [CI 25/26] drm/i915: Enable lockless lookup of request tracking via RCU Chris Wilson
@ 2016-08-04 15:32 ` Chris Wilson
  2016-08-04 16:11 ` ✗ Ro.CI.BAT: failure for series starting with [CI,01/26] drm/i915: Combine loops within i915_gem_evict_something Patchwork
  25 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 15:32 UTC (permalink / raw)
  To: intel-gfx

If the GEM objects being rendered with in this request have been
exported via dma-buf to a third party, hook ourselves into the dma-buf
reservation object so that the third party can serialise with our
rendering via the dma-buf fences.

Testcase: igt/prime_busy
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_gem_dmabuf.c     | 58 ++++++++++++++++++++++++++++--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 31 ++++++++++++++--
 2 files changed, 84 insertions(+), 5 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
index 3a00ab3ad06e..c60a8d5bbad0 100644
--- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c
@@ -23,9 +23,13 @@
  * Authors:
  *	Dave Airlie <airlied@redhat.com>
  */
+
+#include <linux/dma-buf.h>
+#include <linux/reservation.h>
+
 #include <drm/drmP.h>
+
 #include "i915_drv.h"
-#include <linux/dma-buf.h>
 
 static struct drm_i915_gem_object *dma_buf_to_obj(struct dma_buf *buf)
 {
@@ -218,25 +222,73 @@ static const struct dma_buf_ops i915_dmabuf_ops =  {
 	.end_cpu_access = i915_gem_end_cpu_access,
 };
 
+static void export_fences(struct drm_i915_gem_object *obj,
+			  struct dma_buf *dma_buf)
+{
+	struct reservation_object *resv = dma_buf->resv;
+	struct drm_i915_gem_request *req;
+	unsigned long active;
+	int idx;
+
+	active = __I915_BO_ACTIVE(obj);
+	if (!active)
+		return;
+
+	/* Serialise with execbuf to prevent concurrent fence-loops */
+	mutex_lock(&obj->base.dev->struct_mutex);
+
+	/* Mark the object for future fences before racily adding old fences */
+	obj->base.dma_buf = dma_buf;
+
+	ww_mutex_lock(&resv->lock, NULL);
+
+	for_each_active(active, idx) {
+		req = i915_gem_active_get(&obj->last_read[idx],
+					  &obj->base.dev->struct_mutex);
+		if (!req)
+			continue;
+
+		if (reservation_object_reserve_shared(resv) == 0)
+			reservation_object_add_shared_fence(resv, &req->fence);
+
+		i915_gem_request_put(req);
+	}
+
+	req = i915_gem_active_get(&obj->last_write,
+				  &obj->base.dev->struct_mutex);
+	if (req) {
+		reservation_object_add_excl_fence(resv, &req->fence);
+		i915_gem_request_put(req);
+	}
+
+	ww_mutex_unlock(&resv->lock);
+	mutex_unlock(&obj->base.dev->struct_mutex);
+}
+
 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
 				      struct drm_gem_object *gem_obj, int flags)
 {
 	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+	struct dma_buf *dma_buf;
 
 	exp_info.ops = &i915_dmabuf_ops;
 	exp_info.size = gem_obj->size;
 	exp_info.flags = flags;
 	exp_info.priv = gem_obj;
 
-
 	if (obj->ops->dmabuf_export) {
 		int ret = obj->ops->dmabuf_export(obj);
 		if (ret)
 			return ERR_PTR(ret);
 	}
 
-	return dma_buf_export(&exp_info);
+	dma_buf = dma_buf_export(&exp_info);
+	if (IS_ERR(dma_buf))
+		return dma_buf;
+
+	export_fences(obj, dma_buf);
+	return dma_buf;
 }
 
 static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index a1da3028a949..71834741bd87 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -26,14 +26,18 @@
  *
  */
 
+#include <linux/dma_remapping.h>
+#include <linux/reservation.h>
+#include <linux/uaccess.h>
+
 #include <drm/drmP.h>
 #include <drm/i915_drm.h>
+
 #include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
-#include <linux/dma_remapping.h>
-#include <linux/uaccess.h>
 
 #define  __EXEC_OBJECT_HAS_PIN		(1<<31)
 #define  __EXEC_OBJECT_HAS_FENCE	(1<<30)
@@ -1205,6 +1209,28 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 	list_move_tail(&vma->vm_link, &vma->vm->active_list);
 }
 
+static void eb_export_fence(struct drm_i915_gem_object *obj,
+			    struct drm_i915_gem_request *req,
+			    unsigned int flags)
+{
+	struct reservation_object *resv;
+
+	resv = i915_gem_object_get_dmabuf_resv(obj);
+	if (!resv)
+		return;
+
+	/* Ignore errors from failing to allocate the new fence, we can't
+	 * handle an error right now. Worst case should be missed
+	 * synchronisation leading to rendering corruption.
+	 */
+	ww_mutex_lock(&resv->lock, NULL);
+	if (flags & EXEC_OBJECT_WRITE)
+		reservation_object_add_excl_fence(resv, &req->fence);
+	else if (reservation_object_reserve_shared(resv) == 0)
+		reservation_object_add_shared_fence(resv, &req->fence);
+	ww_mutex_unlock(&resv->lock);
+}
+
 static void
 i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 				   struct drm_i915_gem_request *req)
@@ -1224,6 +1250,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		obj->base.read_domains = obj->base.pending_read_domains;
 
 		i915_vma_move_to_active(vma, req, vma->exec_entry->flags);
+		eb_export_fence(obj, req, vma->exec_entry->flags);
 		trace_i915_gem_object_change_domain(obj, old_read, old_write);
 	}
 }
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

* ✗ Ro.CI.BAT: failure for series starting with [CI,01/26] drm/i915: Combine loops within i915_gem_evict_something
  2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
                   ` (24 preceding siblings ...)
  2016-08-04 15:32 ` [CI 26/26] drm/i915: Export our request as a dma-buf fence on the reservation object Chris Wilson
@ 2016-08-04 16:11 ` Patchwork
  2016-08-04 16:15   ` Chris Wilson
  25 siblings, 1 reply; 29+ messages in thread
From: Patchwork @ 2016-08-04 16:11 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [CI,01/26] drm/i915: Combine loops within i915_gem_evict_something
URL   : https://patchwork.freedesktop.org/series/10676/
State : failure

== Summary ==

Series 10676v1 Series without cover letter
http://patchwork.freedesktop.org/api/1.0/series/10676/revisions/1/mbox

Test gem_exec_suspend:
        Subgroup basic-s3:
                pass       -> INCOMPLETE (fi-hsw-i7-4770k)
                pass       -> DMESG-WARN (fi-skl-i5-6260u)
                pass       -> DMESG-WARN (fi-skl-i7-6700k)
Test kms_cursor_legacy:
        Subgroup basic-flip-vs-cursor-legacy:
                pass       -> FAIL       (ro-skl3-i5-6260u)
                fail       -> PASS       (ro-bdw-i5-5250u)
                fail       -> DMESG-FAIL (fi-skl-i7-6700k)
        Subgroup basic-flip-vs-cursor-varying-size:
                pass       -> FAIL       (ro-snb-i7-2620M)
                fail       -> DMESG-FAIL (fi-skl-i7-6700k)
Test kms_flip:
        Subgroup basic-flip-vs-wf_vblank:
                dmesg-warn -> PASS       (ro-byt-n2820)
Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-a:
                pass       -> DMESG-WARN (fi-skl-i5-6260u)
                pass       -> DMESG-WARN (fi-skl-i7-6700k)
        Subgroup suspend-read-crc-pipe-b:
                pass       -> DMESG-WARN (fi-skl-i5-6260u)
                pass       -> DMESG-WARN (fi-skl-i7-6700k)
        Subgroup suspend-read-crc-pipe-c:
                pass       -> DMESG-WARN (fi-skl-i5-6260u)
                pass       -> DMESG-WARN (fi-skl-i7-6700k)

fi-hsw-i7-4770k  total:107  pass:91   dwarn:0   dfail:0   fail:0   skip:15 
fi-kbl-qkkr      total:240  pass:182  dwarn:29  dfail:0   fail:3   skip:26 
fi-skl-i5-6260u  total:240  pass:220  dwarn:4   dfail:0   fail:2   skip:14 
fi-skl-i7-6700k  total:240  pass:204  dwarn:4   dfail:2   fail:2   skip:28 
fi-snb-i7-2600   total:240  pass:198  dwarn:0   dfail:0   fail:0   skip:42 
ro-bdw-i5-5250u  total:240  pass:219  dwarn:5   dfail:0   fail:0   skip:16 
ro-bdw-i7-5557U  total:240  pass:223  dwarn:1   dfail:0   fail:0   skip:16 
ro-bdw-i7-5600u  total:240  pass:205  dwarn:1   dfail:0   fail:2   skip:32 
ro-bsw-n3050     total:240  pass:193  dwarn:0   dfail:0   fail:5   skip:42 
ro-byt-n2820     total:240  pass:197  dwarn:0   dfail:0   fail:3   skip:40 
ro-hsw-i3-4010u  total:240  pass:214  dwarn:0   dfail:0   fail:0   skip:26 
ro-hsw-i7-4770r  total:240  pass:214  dwarn:0   dfail:0   fail:0   skip:26 
ro-ilk-i7-620lm  total:240  pass:173  dwarn:1   dfail:0   fail:1   skip:65 
ro-ilk1-i5-650   total:235  pass:173  dwarn:0   dfail:0   fail:2   skip:60 
ro-ivb-i7-3770   total:240  pass:205  dwarn:0   dfail:0   fail:0   skip:35 
ro-ivb2-i7-3770  total:240  pass:209  dwarn:0   dfail:0   fail:0   skip:31 
ro-skl3-i5-6260u total:240  pass:221  dwarn:1   dfail:0   fail:4   skip:14 
ro-snb-i7-2620M  total:240  pass:197  dwarn:0   dfail:0   fail:2   skip:41 

Results at /archive/results/CI_IGT_test/RO_Patchwork_1716/

8b0dfb8 drm-intel-nightly: 2016y-08m-04d-13h-05m-22s UTC integration manifest
7f486e0 drm/i915: Export our request as a dma-buf fence on the reservation object
0f30360 drm/i915: Enable lockless lookup of request tracking via RCU
eb2c665 drm/i915: Move i915_gem_object_wait_rendering()
0178728 drm/i915: Move obj->active:5 to obj->flags
84cfd14 drm/i915: Use dev_priv consistently through the intel_frontbuffer interface
24dd1f6 drm/i915: Use atomics to manipulate obj->frontbuffer_bits
37632ba drm/i915: Make fb_tracking.lock a spinlock
906079b drm/i915: Separate intel_frontbuffer into its own header
5eaf46b drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin()
5e03638 drm/i915: Make i915_vma_pin() small and inline
145bec5 drm/i915: Combine all i915_vma bitfields into a single set of flags
1ac38f9 drm/i915: Start passing around i915_vma from execbuffer
eccd55b drm/i915: Wrap vma->pin_count accessors with small inline helpers
c2950db drm/i915: Record allocated vma size
74443a7 drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private
7c2ad8f drm/i915: Update the GGTT size/alignment query functions
afae76e drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations
4eacdf5 drm/i915: Split insertion/binding of an object into the VM
b0226a9 drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check
6184390 drm/i915: Pad GTT views of exec objects up to user specified size
8e9c271 drm/i915: Fix up vma alignment to be u64
c01d3bc drm/i915: Remove i915_gem_execbuffer_retire_commands()
d881daf drm/i915: Remove request retirement before each batch
570049d drm/i915: Double check the active status on the batch pool
a9b6ee1 drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something()
a7da93d drm/i915: Combine loops within i915_gem_evict_something

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 29+ messages in thread

* Re: ✗ Ro.CI.BAT: failure for series starting with [CI,01/26]  drm/i915: Combine loops within i915_gem_evict_something
  2016-08-04 16:11 ` ✗ Ro.CI.BAT: failure for series starting with [CI,01/26] drm/i915: Combine loops within i915_gem_evict_something Patchwork
@ 2016-08-04 16:15   ` Chris Wilson
  0 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04 16:15 UTC (permalink / raw)
  To: intel-gfx

On Thu, Aug 04, 2016 at 04:11:24PM -0000, Patchwork wrote:
> == Series Details ==
> 
> Series: series starting with [CI,01/26] drm/i915: Combine loops within i915_gem_evict_something
> URL   : https://patchwork.freedesktop.org/series/10676/
> State : failure
> 
> == Summary ==
> 
> Series 10676v1 Series without cover letter
> http://patchwork.freedesktop.org/api/1.0/series/10676/revisions/1/mbox
> 
> Test gem_exec_suspend:
>         Subgroup basic-s3:
>                 pass       -> DMESG-WARN (fi-skl-i5-6260u)
>                 pass       -> DMESG-WARN (fi-skl-i7-6700k)
> Test kms_cursor_legacy:
>         Subgroup basic-flip-vs-cursor-legacy:
>                 pass       -> FAIL       (ro-skl3-i5-6260u)
>                 fail       -> PASS       (ro-bdw-i5-5250u)
>                 fail       -> DMESG-FAIL (fi-skl-i7-6700k)
>         Subgroup basic-flip-vs-cursor-varying-size:
>                 pass       -> FAIL       (ro-snb-i7-2620M)
>                 fail       -> DMESG-FAIL (fi-skl-i7-6700k)
> Test kms_flip:
>         Subgroup basic-flip-vs-wf_vblank:
>                 dmesg-warn -> PASS       (ro-byt-n2820)
> Test kms_pipe_crc_basic:
>         Subgroup suspend-read-crc-pipe-a:
>                 pass       -> DMESG-WARN (fi-skl-i5-6260u)
>                 pass       -> DMESG-WARN (fi-skl-i7-6700k)
>         Subgroup suspend-read-crc-pipe-b:
>                 pass       -> DMESG-WARN (fi-skl-i5-6260u)
>                 pass       -> DMESG-WARN (fi-skl-i7-6700k)
>         Subgroup suspend-read-crc-pipe-c:
>                 pass       -> DMESG-WARN (fi-skl-i5-6260u)
>                 pass       -> DMESG-WARN (fi-skl-i7-6700k)

The skylake noise is in the base -nightly :|
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 29+ messages in thread

* [CI 11/26] drm/i915: Update the GGTT size/alignment query functions
  2016-08-04  7:32 [CI 01/26] " Chris Wilson
@ 2016-08-04  7:33 ` Chris Wilson
  0 siblings, 0 replies; 29+ messages in thread
From: Chris Wilson @ 2016-08-04  7:33 UTC (permalink / raw)
  To: intel-gfx

In order to be consistent with other address space functions, we want to
pass around 64-bit sizes, even though all known global GTT are limited
to 4GiB. Similarly, we are trying to be consistent in using the _ggtt_
nomenclature when referring to the special global GTT.

v2: Update docs to consistently state "global GTT".

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h        |  8 ++--
 drivers/gpu/drm/i915/i915_gem.c        | 81 +++++++++++++++++++---------------
 drivers/gpu/drm/i915/i915_gem_tiling.c |  3 +-
 3 files changed, 51 insertions(+), 41 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1e1369319326..b6e56ecb8637 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3241,11 +3241,9 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
 int i915_gem_open(struct drm_device *dev, struct drm_file *file);
 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-			    int tiling_mode, bool fenced);
+u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode);
+u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+				int tiling_mode, bool fenced);
 
 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 				    enum i915_cache_level cache_level);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4042c407f5b9..9104758a0403 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1844,46 +1844,57 @@ i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
 		i915_gem_release_mmap(obj);
 }
 
-uint32_t
-i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
+/**
+ * i915_gem_get_ggtt_size - return required global GTT size for an object
+ * @dev: drm device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ *
+ * Return the required global GTT size for an object, taking into account
+ * potential fence register mapping.
+ */
+u64 i915_gem_get_ggtt_size(struct drm_device *dev, u64 size, int tiling_mode)
 {
-	uint32_t gtt_size;
+	u64 ggtt_size;
 
-	if (INTEL_INFO(dev)->gen >= 4 ||
+	GEM_BUG_ON(size == 0);
+
+	if (INTEL_GEN(dev) >= 4 ||
 	    tiling_mode == I915_TILING_NONE)
 		return size;
 
 	/* Previous chips need a power-of-two fence region when tiling */
 	if (IS_GEN3(dev))
-		gtt_size = 1024*1024;
+		ggtt_size = 1024*1024;
 	else
-		gtt_size = 512*1024;
+		ggtt_size = 512*1024;
 
-	while (gtt_size < size)
-		gtt_size <<= 1;
+	while (ggtt_size < size)
+		ggtt_size <<= 1;
 
-	return gtt_size;
+	return ggtt_size;
 }
 
 /**
- * i915_gem_get_gtt_alignment - return required GTT alignment for an object
+ * i915_gem_get_ggtt_alignment - return required global GTT alignment
  * @dev: drm device
  * @size: object size
  * @tiling_mode: tiling mode
- * @fenced: is fenced alignemned required or not
+ * @fenced: is fenced alignment required or not
  *
- * Return the required GTT alignment for an object, taking into account
+ * Return the required global GTT alignment for an object, taking into account
  * potential fence register mapping.
  */
-uint32_t
-i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
-			   int tiling_mode, bool fenced)
+u64 i915_gem_get_ggtt_alignment(struct drm_device *dev, u64 size,
+				int tiling_mode, bool fenced)
 {
+	GEM_BUG_ON(size == 0);
+
 	/*
 	 * Minimum alignment is 4k (GTT page size), but might be greater
 	 * if a fence register is needed for the object.
 	 */
-	if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
+	if (INTEL_GEN(dev) >= 4 || (!fenced && IS_G33(dev)) ||
 	    tiling_mode == I915_TILING_NONE)
 		return 4096;
 
@@ -1891,7 +1902,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
 	 * Previous chips need to be aligned to the size of the smallest
 	 * fence register that can contain the object.
 	 */
-	return i915_gem_get_gtt_size(dev, size, tiling_mode);
+	return i915_gem_get_ggtt_size(dev, size, tiling_mode);
 }
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
@@ -2981,17 +2992,17 @@ i915_gem_object_insert_into_vm(struct drm_i915_gem_object *obj,
 
 		view_size = i915_ggtt_view_size(obj, ggtt_view);
 
-		fence_size = i915_gem_get_gtt_size(dev,
-						   view_size,
-						   obj->tiling_mode);
-		fence_alignment = i915_gem_get_gtt_alignment(dev,
-							     view_size,
-							     obj->tiling_mode,
-							     true);
-		unfenced_alignment = i915_gem_get_gtt_alignment(dev,
-								view_size,
-								obj->tiling_mode,
-								false);
+		fence_size = i915_gem_get_ggtt_size(dev,
+						    view_size,
+						    obj->tiling_mode);
+		fence_alignment = i915_gem_get_ggtt_alignment(dev,
+							      view_size,
+							      obj->tiling_mode,
+							      true);
+		unfenced_alignment = i915_gem_get_ggtt_alignment(dev,
+								 view_size,
+								 obj->tiling_mode,
+								 false);
 		size = max(size, view_size);
 		if (flags & PIN_MAPPABLE)
 			size = max_t(u64, size, fence_size);
@@ -3695,13 +3706,13 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
 	bool mappable, fenceable;
 	u32 fence_size, fence_alignment;
 
-	fence_size = i915_gem_get_gtt_size(obj->base.dev,
-					   obj->base.size,
-					   obj->tiling_mode);
-	fence_alignment = i915_gem_get_gtt_alignment(obj->base.dev,
-						     obj->base.size,
-						     obj->tiling_mode,
-						     true);
+	fence_size = i915_gem_get_ggtt_size(obj->base.dev,
+					    obj->base.size,
+					    obj->tiling_mode);
+	fence_alignment = i915_gem_get_ggtt_alignment(obj->base.dev,
+						      obj->base.size,
+						      obj->tiling_mode,
+						      true);
 
 	fenceable = (vma->node.size == fence_size &&
 		     (vma->node.start & (fence_alignment - 1)) == 0);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index fa2eb4a4f212..4e42da691e4e 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -133,7 +133,8 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 			return false;
 	}
 
-	size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
+	size = i915_gem_get_ggtt_size(obj->base.dev,
+				      obj->base.size, tiling_mode);
 	if (i915_gem_obj_ggtt_size(obj) != size)
 		return false;
 
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 29+ messages in thread

end of thread, other threads:[~2016-08-04 16:15 UTC | newest]

Thread overview: 29+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-04 15:32 [CI 01/26] drm/i915: Combine loops within i915_gem_evict_something Chris Wilson
2016-08-04 15:32 ` [CI 02/26] drm/i915: Remove surplus drm_device parameter to i915_gem_evict_something() Chris Wilson
2016-08-04 15:32 ` [CI 03/26] drm/i915: Double check the active status on the batch pool Chris Wilson
2016-08-04 15:32 ` [CI 04/26] drm/i915: Remove request retirement before each batch Chris Wilson
2016-08-04 15:32 ` [CI 05/26] drm/i915: Remove i915_gem_execbuffer_retire_commands() Chris Wilson
2016-08-04 15:32 ` [CI 06/26] drm/i915: Fix up vma alignment to be u64 Chris Wilson
2016-08-04 15:32 ` [CI 07/26] drm/i915: Pad GTT views of exec objects up to user specified size Chris Wilson
2016-08-04 15:32 ` [CI 08/26] drm/i915: Reduce WARN(i915_gem_valid_gtt_space) to a debug-only check Chris Wilson
2016-08-04 15:32 ` [CI 09/26] drm/i915: Split insertion/binding of an object into the VM Chris Wilson
2016-08-04 15:32 ` [CI 10/26] drm/i915: Convert 4096 alignment request to 0 for drm_mm allocations Chris Wilson
2016-08-04 15:32 ` [CI 11/26] drm/i915: Update the GGTT size/alignment query functions Chris Wilson
2016-08-04 15:32 ` [CI 12/26] drm/i915: Update i915_gem_get_ggtt_size/_alignment to use drm_i915_private Chris Wilson
2016-08-04 15:32 ` [CI 13/26] drm/i915: Record allocated vma size Chris Wilson
2016-08-04 15:32 ` [CI 14/26] drm/i915: Wrap vma->pin_count accessors with small inline helpers Chris Wilson
2016-08-04 15:32 ` [CI 15/26] drm/i915: Start passing around i915_vma from execbuffer Chris Wilson
2016-08-04 15:32 ` [CI 16/26] drm/i915: Combine all i915_vma bitfields into a single set of flags Chris Wilson
2016-08-04 15:32 ` [CI 17/26] drm/i915: Make i915_vma_pin() small and inline Chris Wilson
2016-08-04 15:32 ` [CI 18/26] drm/i915: Remove highly confusing i915_gem_obj_ggtt_pin() Chris Wilson
2016-08-04 15:32 ` [CI 19/26] drm/i915: Separate intel_frontbuffer into its own header Chris Wilson
2016-08-04 15:32 ` [CI 20/26] drm/i915: Make fb_tracking.lock a spinlock Chris Wilson
2016-08-04 15:32 ` [CI 21/26] drm/i915: Use atomics to manipulate obj->frontbuffer_bits Chris Wilson
2016-08-04 15:32 ` [CI 22/26] drm/i915: Use dev_priv consistently through the intel_frontbuffer interface Chris Wilson
2016-08-04 15:32 ` [CI 23/26] drm/i915: Move obj->active:5 to obj->flags Chris Wilson
2016-08-04 15:32 ` [CI 24/26] drm/i915: Move i915_gem_object_wait_rendering() Chris Wilson
2016-08-04 15:32 ` [CI 25/26] drm/i915: Enable lockless lookup of request tracking via RCU Chris Wilson
2016-08-04 15:32 ` [CI 26/26] drm/i915: Export our request as a dma-buf fence on the reservation object Chris Wilson
2016-08-04 16:11 ` ✗ Ro.CI.BAT: failure for series starting with [CI,01/26] drm/i915: Combine loops within i915_gem_evict_something Patchwork
2016-08-04 16:15   ` Chris Wilson
  -- strict thread matches above, loose matches on Subject: below --
2016-08-04  7:32 [CI 01/26] " Chris Wilson
2016-08-04  7:33 ` [CI 11/26] drm/i915: Update the GGTT size/alignment query functions Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.