All of lore.kernel.org
 help / color / mirror / Atom feed
From: Daniel Vetter <daniel.vetter@ffwll.ch>
To: Intel Graphics Development <intel-gfx@lists.freedesktop.org>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>,
	Ben Widawsky <benjamin.widawsky@intel.com>
Subject: [PATCH 01/11] drm/i915: Consolidate binding parameters into flags
Date: Fri, 14 Feb 2014 14:01:11 +0100	[thread overview]
Message-ID: <1392382881-19073-2-git-send-email-daniel.vetter@ffwll.ch> (raw)
In-Reply-To: <1392382881-19073-1-git-send-email-daniel.vetter@ffwll.ch>

Anything more than just one bool parameter is just a pain to read,
symbolic constants are much better.

Split out from Chris' vma-binding rework patch.

v2: Undo the behaviour change in object_pin that Chris spotted.

v3: Split out misplaced hunk to handle set_cache_level errors,
spotted by Jani.

v4: Keep the current over-zealous binding logic in the execbuffer code
working with a quick hack while the overall binding code gets shuffled
around.

v5: Reorder the PIN_ flags for more natural patch splitup.

v6: Pull out the PIN_GLOBAL split-up again.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ben Widawsky <benjamin.widawsky@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
---
 drivers/gpu/drm/i915/i915_drv.h            | 14 +++----
 drivers/gpu/drm/i915/i915_gem.c            | 62 ++++++++++++------------------
 drivers/gpu/drm/i915/i915_gem_context.c    |  9 ++---
 drivers/gpu/drm/i915/i915_gem_evict.c      | 10 ++---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 19 ++++++---
 drivers/gpu/drm/i915/i915_gem_gtt.c        |  2 +-
 drivers/gpu/drm/i915/i915_trace.h          | 20 +++++-----
 drivers/gpu/drm/i915/intel_overlay.c       |  2 +-
 drivers/gpu/drm/i915/intel_pm.c            |  2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 11 +++---
 10 files changed, 70 insertions(+), 81 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2572a954555b..7536e4ba427d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -2084,11 +2084,12 @@ void i915_init_vm(struct drm_i915_private *dev_priv,
 void i915_gem_free_object(struct drm_gem_object *obj);
 void i915_gem_vma_destroy(struct i915_vma *vma);
 
+#define PIN_MAPPABLE 0x1
+#define PIN_NONBLOCK 0x2
 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
 				     struct i915_address_space *vm,
 				     uint32_t alignment,
-				     bool map_and_fenceable,
-				     bool nonblocking);
+				     unsigned flags);
 void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
@@ -2291,11 +2292,9 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
 static inline int __must_check
 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
 		      uint32_t alignment,
-		      bool map_and_fenceable,
-		      bool nonblocking)
+		      unsigned flags)
 {
-	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
-				   map_and_fenceable, nonblocking);
+	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags);
 }
 
 /* i915_gem_context.c */
@@ -2339,8 +2338,7 @@ int __must_check i915_gem_evict_something(struct drm_device *dev,
 					  int min_size,
 					  unsigned alignment,
 					  unsigned cache_level,
-					  bool mappable,
-					  bool nonblock);
+					  unsigned flags);
 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 int i915_gem_evict_everything(struct drm_device *dev);
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index dee560267b1d..aa263e371ebc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -43,12 +43,6 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
 static __must_check int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
 			       bool readonly);
-static __must_check int
-i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
-			   struct i915_address_space *vm,
-			   unsigned alignment,
-			   bool map_and_fenceable,
-			   bool nonblocking);
 static int i915_gem_phys_pwrite(struct drm_device *dev,
 				struct drm_i915_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
@@ -605,7 +599,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 	char __user *user_data;
 	int page_offset, page_length, ret;
 
-	ret = i915_gem_obj_ggtt_pin(obj, 0, true, true);
+	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
 	if (ret)
 		goto out;
 
@@ -1411,7 +1405,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	}
 
 	/* Now bind it into the GTT if needed */
-	ret = i915_gem_obj_ggtt_pin(obj,  0, true, false);
+	ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
 	if (ret)
 		goto unlock;
 
@@ -2721,7 +2715,6 @@ int i915_vma_unbind(struct i915_vma *vma)
 
 	if (!drm_mm_node_allocated(&vma->node)) {
 		i915_gem_vma_destroy(vma);
-
 		return 0;
 	}
 
@@ -3219,14 +3212,13 @@ static int
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 			   struct i915_address_space *vm,
 			   unsigned alignment,
-			   bool map_and_fenceable,
-			   bool nonblocking)
+			   unsigned flags)
 {
 	struct drm_device *dev = obj->base.dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 size, fence_size, fence_alignment, unfenced_alignment;
 	size_t gtt_max =
-		map_and_fenceable ? dev_priv->gtt.mappable_end : vm->total;
+		flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total;
 	struct i915_vma *vma;
 	int ret;
 
@@ -3238,18 +3230,18 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 						     obj->tiling_mode, true);
 	unfenced_alignment =
 		i915_gem_get_gtt_alignment(dev,
-						    obj->base.size,
-						    obj->tiling_mode, false);
+					   obj->base.size,
+					   obj->tiling_mode, false);
 
 	if (alignment == 0)
-		alignment = map_and_fenceable ? fence_alignment :
+		alignment = flags & PIN_MAPPABLE ? fence_alignment :
 						unfenced_alignment;
-	if (map_and_fenceable && alignment & (fence_alignment - 1)) {
+	if (flags & PIN_MAPPABLE && alignment & (fence_alignment - 1)) {
 		DRM_DEBUG("Invalid object alignment requested %u\n", alignment);
 		return -EINVAL;
 	}
 
-	size = map_and_fenceable ? fence_size : obj->base.size;
+	size = flags & PIN_MAPPABLE ? fence_size : obj->base.size;
 
 	/* If the object is bigger than the entire aperture, reject it early
 	 * before evicting everything in a vain attempt to find space.
@@ -3257,7 +3249,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
 	if (obj->base.size > gtt_max) {
 		DRM_DEBUG("Attempting to bind an object larger than the aperture: object=%zd > %s aperture=%zu\n",
 			  obj->base.size,
-			  map_and_fenceable ? "mappable" : "total",
+			  flags & PIN_MAPPABLE ? "mappable" : "total",
 			  gtt_max);
 		return -E2BIG;
 	}
@@ -3281,9 +3273,7 @@ search_free:
 						  DRM_MM_SEARCH_DEFAULT);
 	if (ret) {
 		ret = i915_gem_evict_something(dev, vm, size, alignment,
-					       obj->cache_level,
-					       map_and_fenceable,
-					       nonblocking);
+					       obj->cache_level, flags);
 		if (ret == 0)
 			goto search_free;
 
@@ -3314,9 +3304,9 @@ search_free:
 		obj->map_and_fenceable = mappable && fenceable;
 	}
 
-	WARN_ON(map_and_fenceable && !obj->map_and_fenceable);
+	WARN_ON(flags & PIN_MAPPABLE && !obj->map_and_fenceable);
 
-	trace_i915_vma_bind(vma, map_and_fenceable);
+	trace_i915_vma_bind(vma, flags);
 	i915_gem_verify_gtt(dev);
 	return 0;
 
@@ -3687,7 +3677,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 	 * (e.g. libkms for the bootup splash), we have to ensure that we
 	 * always use map_and_fenceable for all scanout buffers.
 	 */
-	ret = i915_gem_obj_ggtt_pin(obj, alignment, true, false);
+	ret = i915_gem_obj_ggtt_pin(obj, alignment, PIN_MAPPABLE);
 	if (ret)
 		goto err_unpin_display;
 
@@ -3843,30 +3833,28 @@ int
 i915_gem_object_pin(struct drm_i915_gem_object *obj,
 		    struct i915_address_space *vm,
 		    uint32_t alignment,
-		    bool map_and_fenceable,
-		    bool nonblocking)
+		    unsigned flags)
 {
-	const u32 flags = map_and_fenceable ? GLOBAL_BIND : 0;
 	struct i915_vma *vma;
 	int ret;
 
-	WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
+	if (WARN_ON(flags & PIN_MAPPABLE && !i915_is_ggtt(vm)))
+		return -EINVAL;
 
 	vma = i915_gem_obj_to_vma(obj, vm);
-
 	if (vma) {
 		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
 			return -EBUSY;
 
 		if ((alignment &&
 		     vma->node.start & (alignment - 1)) ||
-		    (map_and_fenceable && !obj->map_and_fenceable)) {
+		    (flags & PIN_MAPPABLE && !obj->map_and_fenceable)) {
 			WARN(vma->pin_count,
 			     "bo is already pinned with incorrect alignment:"
 			     " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
 			     " obj->map_and_fenceable=%d\n",
 			     i915_gem_obj_offset(obj, vm), alignment,
-			     map_and_fenceable,
+			     flags & PIN_MAPPABLE,
 			     obj->map_and_fenceable);
 			ret = i915_vma_unbind(vma);
 			if (ret)
@@ -3875,9 +3863,7 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 	}
 
 	if (!i915_gem_obj_bound(obj, vm)) {
-		ret = i915_gem_object_bind_to_vm(obj, vm, alignment,
-						 map_and_fenceable,
-						 nonblocking);
+		ret = i915_gem_object_bind_to_vm(obj, vm, alignment, flags);
 		if (ret)
 			return ret;
 
@@ -3885,10 +3871,12 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 
 	vma = i915_gem_obj_to_vma(obj, vm);
 
-	vma->bind_vma(vma, obj->cache_level, flags);
+	vma->bind_vma(vma, obj->cache_level,
+		      flags & PIN_MAPPABLE ? GLOBAL_BIND : 0);
 
 	i915_gem_obj_to_vma(obj, vm)->pin_count++;
-	obj->pin_mappable |= map_and_fenceable;
+	if (flags & PIN_MAPPABLE)
+		obj->pin_mappable |= true;
 
 	return 0;
 }
@@ -3946,7 +3934,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
 	}
 
 	if (obj->user_pin_count == 0) {
-		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, true, false);
+		ret = i915_gem_obj_ggtt_pin(obj, args->alignment, PIN_MAPPABLE);
 		if (ret)
 			goto out;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 19fd3629795c..f8c21a6dd663 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -258,8 +258,7 @@ i915_gem_create_context(struct drm_device *dev,
 		 * context.
 		 */
 		ret = i915_gem_obj_ggtt_pin(ctx->obj,
-					    get_context_alignment(dev),
-					    false, false);
+					    get_context_alignment(dev), 0);
 		if (ret) {
 			DRM_DEBUG_DRIVER("Couldn't pin %d\n", ret);
 			goto err_destroy;
@@ -335,8 +334,7 @@ void i915_gem_context_reset(struct drm_device *dev)
 
 		if (i == RCS) {
 			WARN_ON(i915_gem_obj_ggtt_pin(dctx->obj,
-						      get_context_alignment(dev),
-						      false, false));
+						      get_context_alignment(dev), 0));
 			/* Fake a finish/inactive */
 			dctx->obj->base.write_domain = 0;
 			dctx->obj->active = 0;
@@ -612,8 +610,7 @@ static int do_switch(struct intel_ring_buffer *ring,
 	/* Trying to pin first makes error handling easier. */
 	if (ring == &dev_priv->ring[RCS]) {
 		ret = i915_gem_obj_ggtt_pin(to->obj,
-					    get_context_alignment(ring->dev),
-					    false, false);
+					    get_context_alignment(ring->dev), 0);
 		if (ret)
 			return ret;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 5168d6a08054..8a78f7885cba 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -68,7 +68,7 @@ mark_free(struct i915_vma *vma, struct list_head *unwind)
 int
 i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 			 int min_size, unsigned alignment, unsigned cache_level,
-			 bool mappable, bool nonblocking)
+			 unsigned flags)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct list_head eviction_list, unwind_list;
@@ -76,7 +76,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 	int ret = 0;
 	int pass = 0;
 
-	trace_i915_gem_evict(dev, min_size, alignment, mappable);
+	trace_i915_gem_evict(dev, min_size, alignment, flags);
 
 	/*
 	 * The goal is to evict objects and amalgamate space in LRU order.
@@ -102,7 +102,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
 	 */
 
 	INIT_LIST_HEAD(&unwind_list);
-	if (mappable) {
+	if (flags & PIN_MAPPABLE) {
 		BUG_ON(!i915_is_ggtt(vm));
 		drm_mm_init_scan_with_range(&vm->mm, min_size,
 					    alignment, cache_level, 0,
@@ -117,7 +117,7 @@ search_again:
 			goto found;
 	}
 
-	if (nonblocking)
+	if (flags & PIN_NONBLOCK)
 		goto none;
 
 	/* Now merge in the soon-to-be-expired objects... */
@@ -141,7 +141,7 @@ none:
 	/* Can we unpin some objects such as idle hw contents,
 	 * or pending flips?
 	 */
-	if (nonblocking)
+	if (flags & PIN_NONBLOCK)
 		return -ENOSPC;
 
 	/* Only idle the GPU and repeat the search once */
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 032def901f98..013bd5ab3913 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -544,19 +544,23 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 	struct drm_i915_gem_object *obj = vma->obj;
 	struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
 	bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
-	bool need_fence, need_mappable;
-	u32 flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
-		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
+	bool need_fence;
+	unsigned flags;
 	int ret;
 
+	flags = 0;
+
 	need_fence =
 		has_fenced_gpu_access &&
 		entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
 		obj->tiling_mode != I915_TILING_NONE;
-	need_mappable = need_fence || need_reloc_mappable(vma);
+	if (need_fence || need_reloc_mappable(vma))
+		flags |= PIN_MAPPABLE;
 
-	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, need_mappable,
-				  false);
+	if (entry->flags & EXEC_OBJECT_NEEDS_GTT)
+		flags |= PIN_MAPPABLE;
+
+	ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags);
 	if (ret)
 		return ret;
 
@@ -585,6 +589,9 @@ i915_gem_execbuffer_reserve_vma(struct i915_vma *vma,
 		obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
 	}
 
+	/* Temporary hack while we rework the binding logic. */
+	flags = (entry->flags & EXEC_OBJECT_NEEDS_GTT) &&
+		!vma->obj->has_global_gtt_mapping ? GLOBAL_BIND : 0;
 	vma->bind_vma(vma, obj->cache_level, flags);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 6e858e17bb0c..154e1ad8bfab 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -868,7 +868,7 @@ alloc:
 	if (ret == -ENOSPC && !retried) {
 		ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
 					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
-					       I915_CACHE_NONE, false, true);
+					       I915_CACHE_NONE, PIN_NONBLOCK);
 		if (ret)
 			return ret;
 
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 6e580c98dede..b95a380958db 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -34,15 +34,15 @@ TRACE_EVENT(i915_gem_object_create,
 );
 
 TRACE_EVENT(i915_vma_bind,
-	    TP_PROTO(struct i915_vma *vma, bool mappable),
-	    TP_ARGS(vma, mappable),
+	    TP_PROTO(struct i915_vma *vma, unsigned flags),
+	    TP_ARGS(vma, flags),
 
 	    TP_STRUCT__entry(
 			     __field(struct drm_i915_gem_object *, obj)
 			     __field(struct i915_address_space *, vm)
 			     __field(u32, offset)
 			     __field(u32, size)
-			     __field(bool, mappable)
+			     __field(unsigned, flags)
 			     ),
 
 	    TP_fast_assign(
@@ -50,12 +50,12 @@ TRACE_EVENT(i915_vma_bind,
 			   __entry->vm = vma->vm;
 			   __entry->offset = vma->node.start;
 			   __entry->size = vma->node.size;
-			   __entry->mappable = mappable;
+			   __entry->flags = flags;
 			   ),
 
 	    TP_printk("obj=%p, offset=%08x size=%x%s vm=%p",
 		      __entry->obj, __entry->offset, __entry->size,
-		      __entry->mappable ? ", mappable" : "",
+		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "",
 		      __entry->vm)
 );
 
@@ -196,26 +196,26 @@ DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 );
 
 TRACE_EVENT(i915_gem_evict,
-	    TP_PROTO(struct drm_device *dev, u32 size, u32 align, bool mappable),
-	    TP_ARGS(dev, size, align, mappable),
+	    TP_PROTO(struct drm_device *dev, u32 size, u32 align, unsigned flags),
+	    TP_ARGS(dev, size, align, flags),
 
 	    TP_STRUCT__entry(
 			     __field(u32, dev)
 			     __field(u32, size)
 			     __field(u32, align)
-			     __field(bool, mappable)
+			     __field(unsigned, flags)
 			    ),
 
 	    TP_fast_assign(
 			   __entry->dev = dev->primary->index;
 			   __entry->size = size;
 			   __entry->align = align;
-			   __entry->mappable = mappable;
+			   __entry->flags = flags;
 			  ),
 
 	    TP_printk("dev=%d, size=%d, align=%d %s",
 		      __entry->dev, __entry->size, __entry->align,
-		      __entry->mappable ? ", mappable" : "")
+		      __entry->flags & PIN_MAPPABLE ? ", mappable" : "")
 );
 
 TRACE_EVENT(i915_gem_evict_everything,
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 424f0946d8c4..ac519cb46f22 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -1349,7 +1349,7 @@ void intel_setup_overlay(struct drm_device *dev)
 		}
 		overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
 	} else {
-		ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, true, false);
+		ret = i915_gem_obj_ggtt_pin(reg_bo, PAGE_SIZE, PIN_MAPPABLE);
 		if (ret) {
 			DRM_ERROR("failed to pin overlay register bo\n");
 			goto out_free_bo;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e4a0c9cc226d..136647037244 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2741,7 +2741,7 @@ intel_alloc_context_page(struct drm_device *dev)
 		return NULL;
 	}
 
-	ret = i915_gem_obj_ggtt_pin(ctx, 4096, true, false);
+	ret = i915_gem_obj_ggtt_pin(ctx, 4096, PIN_MAPPABLE);
 	if (ret) {
 		DRM_ERROR("failed to pin power context: %d\n", ret);
 		goto err_unref;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index ff4f47f1f52a..1c6b52fec32c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -533,7 +533,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
 
 	i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
 
-	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
+	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);
 	if (ret)
 		goto err_unref;
 
@@ -1273,10 +1273,9 @@ static int init_status_page(struct intel_ring_buffer *ring)
 
 	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
 
-	ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
-	if (ret != 0) {
+	ret = i915_gem_obj_ggtt_pin(obj, 4096, PIN_MAPPABLE);
+	if (ret)
 		goto err_unref;
-	}
 
 	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
 	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
@@ -1356,7 +1355,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 
 	ring->obj = obj;
 
-	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
+	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);
 	if (ret)
 		goto err_unref;
 
@@ -1940,7 +1939,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 			return -ENOMEM;
 		}
 
-		ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
+		ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
 		if (ret != 0) {
 			drm_gem_object_unreference(&obj->base);
 			DRM_ERROR("Failed to ping batch bo\n");
-- 
1.8.5.2

  reply	other threads:[~2014-02-14 13:01 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-02-14 13:01 Shuffled full-ppgtt vma binding performance fix Daniel Vetter
2014-02-14 13:01 ` Daniel Vetter [this message]
2014-02-14 13:01 ` [PATCH 02/11] drm/i915: split PIN_GLOBAL out from PIN_MAPPABLE Daniel Vetter
2014-02-14 13:01 ` [PATCH 03/11] drm/i915: Handle set_cache_level errors in the pipe control scratch setup Daniel Vetter
2014-02-14 13:01 ` [PATCH 04/11] drm/i915: Don't set PIN_MAPPABLE for legacy ringbuffers Daniel Vetter
2014-02-14 13:01 ` [PATCH 05/11] drm/i915: Don't pin the status page as mappable Daniel Vetter
2014-02-14 13:01 ` [PATCH 06/11] drm/i915: Handle set_cache_level errors in the status page setup Daniel Vetter
2014-02-14 13:01 ` [PATCH 07/11] drm/i915: Don't allocate context pages as mappable Daniel Vetter
2014-02-14 13:01 ` [PATCH 08/11] drm/i915: Allow blocking in the PDE alloc when running low on gtt space Daniel Vetter
2014-02-14 13:01 ` [PATCH 09/11] drm/i915: Simplify i915_gem_object_ggtt_unpin Daniel Vetter
2014-02-14 13:01 ` [PATCH 10/11] drm/i915: Directly return the vma from bind_to_vm Daniel Vetter
2014-02-14 13:01 ` [PATCH 11/11] drm/i915: Only bind each object rather than for every execbuffer Daniel Vetter
2014-02-14 13:08 ` Shuffled full-ppgtt vma binding performance fix Chris Wilson
2014-02-14 13:19   ` Daniel Vetter

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1392382881-19073-2-git-send-email-daniel.vetter@ffwll.ch \
    --to=daniel.vetter@ffwll.ch \
    --cc=benjamin.widawsky@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.