All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ben Widawsky <benjamin.widawsky@intel.com>
To: Intel GFX <intel-gfx@lists.freedesktop.org>
Cc: Ben Widawsky <ben@bwidawsk.net>,
	Ben Widawsky <benjamin.widawsky@intel.com>
Subject: [PATCH 10/48] drm/i915: Make pin count per VMA
Date: Fri,  6 Dec 2013 14:11:43 -0800	[thread overview]
Message-ID: <1386367941-7131-58-git-send-email-benjamin.widawsky@intel.com> (raw)
In-Reply-To: <1386367941-7131-1-git-send-email-benjamin.widawsky@intel.com>

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
---
 drivers/gpu/drm/i915/i915_debugfs.c        | 14 ++++++---
 drivers/gpu/drm/i915/i915_drv.h            | 32 +++++++++++--------
 drivers/gpu/drm/i915/i915_gem.c            | 49 ++++++++++++++++--------------
 drivers/gpu/drm/i915/i915_gem_context.c    | 14 ++++-----
 drivers/gpu/drm/i915/i915_gem_evict.c      |  5 +--
 drivers/gpu/drm/i915/i915_gem_execbuffer.c |  6 ++--
 drivers/gpu/drm/i915/i915_gem_tiling.c     |  2 +-
 drivers/gpu/drm/i915/i915_gpu_error.c      |  6 ++--
 drivers/gpu/drm/i915/intel_fbdev.c         |  4 +--
 drivers/gpu/drm/i915/intel_overlay.c       |  8 ++---
 drivers/gpu/drm/i915/intel_pm.c            |  6 ++--
 drivers/gpu/drm/i915/intel_ringbuffer.c    | 12 ++++----
 12 files changed, 88 insertions(+), 70 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 13accf7..4c610ee 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -100,7 +100,7 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
 {
 	if (obj->user_pin_count > 0)
 		return "P";
-	else if (obj->pin_count > 0)
+	else if (i915_gem_obj_is_pinned(obj))
 		return "p";
 	else
 		return " ";
@@ -125,6 +125,8 @@ static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
 	struct i915_vma *vma;
+	int pin_count = 0;
+
 	seq_printf(m, "%pK: %s%s%s %8zdKiB %02x %02x %u %u %u%s%s%s",
 		   &obj->base,
 		   get_pin_flag(obj),
@@ -141,8 +143,10 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 	if (obj->base.name)
 		seq_printf(m, " (name: %d)", obj->base.name);
-	if (obj->pin_count)
-		seq_printf(m, " (pinned x %d)", obj->pin_count);
+	list_for_each_entry(vma, &obj->vma_list, vma_link)
+		if (vma->pin_count > 0)
+			pin_count++;
+		seq_printf(m, " (pinned x %d)", pin_count);
 	if (obj->pin_display)
 		seq_printf(m, " (display)");
 	if (obj->fence_reg != I915_FENCE_REG_NONE)
@@ -439,7 +443,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data)
 
 	total_obj_size = total_gtt_size = count = 0;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
-		if (list == PINNED_LIST && obj->pin_count == 0)
+		if (list == PINNED_LIST && !i915_gem_obj_is_pinned(obj))
 			continue;
 
 		seq_puts(m, "   ");
@@ -2843,7 +2847,7 @@ i915_drop_caches_set(void *data, u64 val)
 		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
 			list_for_each_entry_safe(vma, x, &vm->inactive_list,
 						 mm_list) {
-				if (vma->obj->pin_count)
+				if (vma->pin_count)
 					continue;
 
 				ret = i915_vma_unbind(vma);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index efc57fe..ce80a72 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -651,6 +651,17 @@ struct i915_vma {
 	unsigned long exec_handle;
 	struct drm_i915_gem_exec_object2 *exec_entry;
 
+	/** How many users have pinned this object in GTT space. The following
+	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
+	 * (via user_pin_count), execbuffer (objects are not allowed multiple
+	 * times for the same batchbuffer), and the framebuffer code. When
+	 * switching/pageflipping, the framebuffer code has at most two buffers
+	 * pinned per crtc.
+	 *
+	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
+	 * bits with absolutely no headroom. So use 4 bits. */
+	unsigned int pin_count:4;
+#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 };
 
 struct i915_ctx_hang_stats {
@@ -1617,18 +1628,6 @@ struct drm_i915_gem_object {
 	 */
 	unsigned int fence_dirty:1;
 
-	/** How many users have pinned this object in GTT space. The following
-	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
-	 * (via user_pin_count), execbuffer (objects are not allowed multiple
-	 * times for the same batchbuffer), and the framebuffer code. When
-	 * switching/pageflipping, the framebuffer code has at most two buffers
-	 * pinned per crtc.
-	 *
-	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
-	 * bits with absolutely no headroom. So use 4 bits. */
-	unsigned int pin_count:4;
-#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
-
 	/**
 	 * Is the object at the current location in the gtt mappable and
 	 * fenceable? Used to avoid costly recalculations.
@@ -2003,7 +2002,7 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
 				     uint32_t alignment,
 				     bool map_and_fenceable,
 				     bool nonblocking);
-void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);
 int __must_check i915_vma_unbind(struct i915_vma *vma);
 int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
@@ -2166,6 +2165,13 @@ i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
 				  struct i915_address_space *vm);
 
 struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
+static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) {
+	struct i915_vma *vma;
+	list_for_each_entry(vma, &obj->vma_list, vma_link)
+		if (vma->pin_count > 0)
+			return true;
+	return false;
+}
 
 /* Some GGTT VM helpers */
 #define obj_to_ggtt(obj) \
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 356d8fa..66b8dbc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -204,7 +204,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	pinned = 0;
 	mutex_lock(&dev->struct_mutex);
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-		if (obj->pin_count)
+		if (i915_gem_obj_is_pinned(obj))
 			pinned += i915_gem_obj_ggtt_size(obj);
 	mutex_unlock(&dev->struct_mutex);
 
@@ -651,7 +651,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
 	}
 
 out_unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 out:
 	return ret;
 }
@@ -1418,7 +1418,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 	/* Finally, remap it using the new GTT offset */
 	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
 unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
 out:
@@ -2721,7 +2721,7 @@ int i915_vma_unbind(struct i915_vma *vma)
 		return 0;
 	}
 
-	if (obj->pin_count)
+	if (vma->pin_count)
 		return -EBUSY;
 
 	BUG_ON(obj->pages == NULL);
@@ -2785,7 +2785,7 @@ i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)
 	if (!i915_gem_obj_ggtt_bound(obj))
 		return 0;
 
-	if (obj->pin_count)
+	if (i915_gem_obj_to_ggtt(obj)->pin_count)
 		return -EBUSY;
 
 	BUG_ON(obj->pages == NULL);
@@ -3486,7 +3486,7 @@ int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
 	if (obj->cache_level == cache_level)
 		return 0;
 
-	if (obj->pin_count) {
+	if (i915_gem_obj_is_pinned(obj)) {
 		DRM_DEBUG("can not change the cache level of pinned objects\n");
 		return -EBUSY;
 	}
@@ -3646,7 +3646,7 @@ static bool is_pin_display(struct drm_i915_gem_object *obj)
 	 * subtracting the potential reference by the user, any pin_count
 	 * remains, it must be due to another use by the display engine.
 	 */
-	return obj->pin_count - !!obj->user_pin_count;
+	return i915_gem_obj_to_ggtt(obj)->pin_count - !!obj->user_pin_count;
 }
 
 /*
@@ -3720,7 +3720,7 @@ err_unpin_display:
 void
 i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj)
 {
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 	obj->pin_display = is_pin_display(obj);
 }
 
@@ -3853,18 +3853,18 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 	struct i915_vma *vma;
 	int ret;
 
-	if (WARN_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
-		return -EBUSY;
-
 	WARN_ON(map_and_fenceable && !i915_is_ggtt(vm));
 
 	vma = i915_gem_obj_to_vma(obj, vm);
 
 	if (vma) {
+		if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
+			return -EBUSY;
+
 		if ((alignment &&
 		     vma->node.start & (alignment - 1)) ||
 		    (map_and_fenceable && !obj->map_and_fenceable)) {
-			WARN(obj->pin_count,
+			WARN(vma->pin_count,
 			     "bo is already pinned with incorrect alignment:"
 			     " offset=%lx, req.alignment=%x, req.map_and_fenceable=%d,"
 			     " obj->map_and_fenceable=%d\n",
@@ -3893,19 +3893,22 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj,
 	if (!obj->has_global_gtt_mapping && map_and_fenceable)
 		i915_gem_gtt_bind_object(obj, obj->cache_level);
 
-	obj->pin_count++;
+	i915_gem_obj_to_vma(obj, vm)->pin_count++;
 	obj->pin_mappable |= map_and_fenceable;
 
 	return 0;
 }
 
 void
-i915_gem_object_unpin(struct drm_i915_gem_object *obj)
+i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj)
 {
-	BUG_ON(obj->pin_count == 0);
-	BUG_ON(!i915_gem_obj_bound_any(obj));
+	struct i915_vma *vma = i915_gem_obj_to_ggtt(obj);
 
-	if (--obj->pin_count == 0)
+	BUG_ON(!vma);
+	BUG_ON(vma->pin_count == 0);
+	BUG_ON(!i915_gem_obj_ggtt_bound(obj));
+
+	if (--vma->pin_count == 0)
 		obj->pin_mappable = false;
 }
 
@@ -3989,7 +3992,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
 	obj->user_pin_count--;
 	if (obj->user_pin_count == 0) {
 		obj->pin_filp = NULL;
-		i915_gem_object_unpin(obj);
+		i915_gem_object_ggtt_unpin(obj);
 	}
 
 out:
@@ -4069,7 +4072,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
 		goto unlock;
 	}
 
-	if (obj->pin_count) {
+	if (i915_gem_obj_is_pinned(obj)) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -4178,12 +4181,14 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 	if (obj->phys_obj)
 		i915_gem_detach_phys_object(dev, obj);
 
-	obj->pin_count = 0;
 	/* NB: 0 or 1 elements */
 	WARN_ON(!list_empty(&obj->vma_list) &&
 		!list_is_singular(&obj->vma_list));
 	list_for_each_entry_safe(vma, next, &obj->vma_list, vma_link) {
-		int ret = i915_vma_unbind(vma);
+		int ret;
+
+		vma->pin_count = 0;
+		ret = i915_vma_unbind(vma);
 		if (WARN_ON(ret == -ERESTARTSYS)) {
 			bool was_interruptible;
 
@@ -4962,7 +4967,7 @@ i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
 		if (obj->active)
 			continue;
 
-		if (obj->pin_count == 0 && obj->pages_pin_count == 0)
+		if (!i915_gem_obj_is_pinned(obj) && obj->pages_pin_count == 0)
 			count += obj->base.size >> PAGE_SHIFT;
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e08acab..9775d8b 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -241,7 +241,7 @@ static int create_default_context(struct drm_i915_private *dev_priv)
 	return 0;
 
 err_unpin:
-	i915_gem_object_unpin(ctx->obj);
+	i915_gem_object_ggtt_unpin(ctx->obj);
 err_destroy:
 	i915_gem_context_unreference(ctx);
 	return ret;
@@ -300,11 +300,11 @@ void i915_gem_context_fini(struct drm_device *dev)
 	if (dev_priv->ring[RCS].last_context == dctx) {
 		/* Fake switch to NULL context */
 		WARN_ON(dctx->obj->active);
-		i915_gem_object_unpin(dctx->obj);
+		i915_gem_object_ggtt_unpin(dctx->obj);
 		i915_gem_context_unreference(dctx);
 	}
 
-	i915_gem_object_unpin(dctx->obj);
+	i915_gem_object_ggtt_unpin(dctx->obj);
 	i915_gem_context_unreference(dctx);
 	dev_priv->ring[RCS].default_context = NULL;
 	dev_priv->ring[RCS].last_context = NULL;
@@ -410,7 +410,7 @@ static int do_switch(struct i915_hw_context *to)
 	u32 hw_flags = 0;
 	int ret, i;
 
-	BUG_ON(from != NULL && from->obj != NULL && from->obj->pin_count == 0);
+	BUG_ON(from != NULL && from->obj != NULL && !i915_gem_obj_is_pinned(from->obj));
 
 	if (from == to && !to->remap_slice)
 		return 0;
@@ -436,7 +436,7 @@ static int do_switch(struct i915_hw_context *to)
 	 */
 	ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
 	if (ret) {
-		i915_gem_object_unpin(to->obj);
+		i915_gem_object_ggtt_unpin(to->obj);
 		return ret;
 	}
 
@@ -448,7 +448,7 @@ static int do_switch(struct i915_hw_context *to)
 
 	ret = mi_set_context(ring, to, hw_flags);
 	if (ret) {
-		i915_gem_object_unpin(to->obj);
+		i915_gem_object_ggtt_unpin(to->obj);
 		return ret;
 	}
 
@@ -484,7 +484,7 @@ static int do_switch(struct i915_hw_context *to)
 		BUG_ON(from->obj->ring != ring);
 
 		/* obj is kept alive until the next request by its active ref */
-		i915_gem_object_unpin(from->obj);
+		i915_gem_object_ggtt_unpin(from->obj);
 		i915_gem_context_unreference(from);
 	}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b737653..5cb0aa4 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -34,7 +34,8 @@
 static bool
 mark_free(struct i915_vma *vma, struct list_head *unwind)
 {
-	if (vma->obj->pin_count)
+	/* Freeing up memory requires no VMAs are pinned */
+	if (i915_gem_obj_is_pinned(vma->obj))
 		return false;
 
 	if (WARN_ON(!list_empty(&vma->exec_list)))
@@ -186,7 +187,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
 	}
 
 	list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
-		if (vma->obj->pin_count == 0)
+		if (vma->pin_count == 0)
 			WARN_ON(i915_vma_unbind(vma));
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index cdab6e4..742f920 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -212,7 +212,7 @@ i915_gem_execbuffer_unreserve_vma(struct i915_vma *vma)
 		i915_gem_object_unpin_fence(obj);
 
 	if (entry->flags & __EXEC_OBJECT_HAS_PIN)
-		i915_gem_object_unpin(obj);
+		vma->pin_count--;
 
 	entry->flags &= ~(__EXEC_OBJECT_HAS_FENCE | __EXEC_OBJECT_HAS_PIN);
 }
@@ -934,7 +934,9 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
 		if (obj->base.write_domain) {
 			obj->dirty = 1;
 			obj->last_write_seqno = intel_ring_get_seqno(ring);
-			if (obj->pin_count) /* check for potential scanout */
+			/* check for potential scanout */
+			if (i915_gem_obj_ggtt_bound(obj) &&
+			    i915_gem_obj_to_ggtt(obj)->pin_count)
 				intel_mark_fb_busy(obj, ring);
 		}
 
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index b139053..eb99358 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -308,7 +308,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
-	if (obj->pin_count || obj->framebuffer_references) {
+	if (i915_gem_obj_is_pinned(obj) || obj->framebuffer_references) {
 		drm_gem_object_unreference_unlocked(&obj->base);
 		return -EBUSY;
 	}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 9e0ff69..b553824 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -578,7 +578,7 @@ static void capture_bo(struct drm_i915_error_buffer *err,
 	err->write_domain = obj->base.write_domain;
 	err->fence_reg = obj->fence_reg;
 	err->pinned = 0;
-	if (obj->pin_count > 0)
+	if (i915_gem_obj_is_pinned(obj))
 		err->pinned = 1;
 	if (obj->user_pin_count > 0)
 		err->pinned = -1;
@@ -611,7 +611,7 @@ static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
 	int i = 0;
 
 	list_for_each_entry(obj, head, global_list) {
-		if (obj->pin_count == 0)
+		if (!i915_gem_obj_is_pinned(obj))
 			continue;
 
 		capture_bo(err++, obj);
@@ -874,7 +874,7 @@ static void i915_gem_capture_vm(struct drm_i915_private *dev_priv,
 		i++;
 	error->active_bo_count[ndx] = i;
 	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
-		if (obj->pin_count)
+		if (i915_gem_obj_is_pinned(obj))
 			i++;
 	error->pinned_bo_count[ndx] = i - error->active_bo_count[ndx];
 
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index 284c3eb..d53c17d 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -104,7 +104,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
 	return 0;
 
 out_unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 out_unref:
 	drm_gem_object_unreference(&obj->base);
 out:
@@ -208,7 +208,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
 	return 0;
 
 out_unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 out_unlock:
 	mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index a98a990..a1397b1 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -293,7 +293,7 @@ static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 {
 	struct drm_i915_gem_object *obj = overlay->old_vid_bo;
 
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 
 	overlay->old_vid_bo = NULL;
@@ -306,7 +306,7 @@ static void intel_overlay_off_tail(struct intel_overlay *overlay)
 	/* never have the overlay hw on without showing a frame */
 	BUG_ON(!overlay->vid_bo);
 
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 	overlay->vid_bo = NULL;
 
@@ -782,7 +782,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	return 0;
 
 out_unpin:
-	i915_gem_object_unpin(new_bo);
+	i915_gem_object_ggtt_unpin(new_bo);
 	return ret;
 }
 
@@ -1386,7 +1386,7 @@ void intel_setup_overlay(struct drm_device *dev)
 
 out_unpin_bo:
 	if (!OVERLAY_NEEDS_PHYSICAL(dev))
-		i915_gem_object_unpin(reg_bo);
+		i915_gem_object_ggtt_unpin(reg_bo);
 out_free_bo:
 	drm_gem_object_unreference(&reg_bo->base);
 out_free:
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index e6cb36e..0731338 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3299,7 +3299,7 @@ intel_alloc_context_page(struct drm_device *dev)
 	return ctx;
 
 err_unpin:
-	i915_gem_object_unpin(ctx);
+	i915_gem_object_ggtt_unpin(ctx);
 err_unref:
 	drm_gem_object_unreference(&ctx->base);
 	return NULL;
@@ -4167,13 +4167,13 @@ void ironlake_teardown_rc6(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	if (dev_priv->ips.renderctx) {
-		i915_gem_object_unpin(dev_priv->ips.renderctx);
+		i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx);
 		drm_gem_object_unreference(&dev_priv->ips.renderctx->base);
 		dev_priv->ips.renderctx = NULL;
 	}
 
 	if (dev_priv->ips.pwrctx) {
-		i915_gem_object_unpin(dev_priv->ips.pwrctx);
+		i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx);
 		drm_gem_object_unreference(&dev_priv->ips.pwrctx->base);
 		dev_priv->ips.pwrctx = NULL;
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e05a021..75c8883 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -549,7 +549,7 @@ init_pipe_control(struct intel_ring_buffer *ring)
 	return 0;
 
 err_unpin:
-	i915_gem_object_unpin(ring->scratch.obj);
+	i915_gem_object_ggtt_unpin(ring->scratch.obj);
 err_unref:
 	drm_gem_object_unreference(&ring->scratch.obj->base);
 err:
@@ -625,7 +625,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
 
 	if (INTEL_INFO(dev)->gen >= 5) {
 		kunmap(sg_page(ring->scratch.obj->pages->sgl));
-		i915_gem_object_unpin(ring->scratch.obj);
+		i915_gem_object_ggtt_unpin(ring->scratch.obj);
 	}
 
 	drm_gem_object_unreference(&ring->scratch.obj->base);
@@ -1250,7 +1250,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)
 		return;
 
 	kunmap(sg_page(obj->pages->sgl));
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 	drm_gem_object_unreference(&obj->base);
 	ring->status_page.obj = NULL;
 }
@@ -1290,7 +1290,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
 	return 0;
 
 err_unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 err_unref:
 	drm_gem_object_unreference(&obj->base);
 err:
@@ -1387,7 +1387,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
 err_unmap:
 	iounmap(ring->virtual_start);
 err_unpin:
-	i915_gem_object_unpin(obj);
+	i915_gem_object_ggtt_unpin(obj);
 err_unref:
 	drm_gem_object_unreference(&obj->base);
 	ring->obj = NULL;
@@ -1415,7 +1415,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 
 	iounmap(ring->virtual_start);
 
-	i915_gem_object_unpin(ring->obj);
+	i915_gem_object_ggtt_unpin(ring->obj);
 	drm_gem_object_unreference(&ring->obj->base);
 	ring->obj = NULL;
 	ring->preallocated_lazy_request = NULL;
-- 
1.8.4.2

  parent reply	other threads:[~2013-12-06 22:16 UTC|newest]

Thread overview: 119+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-12-06 21:55 [PULL] PPGTT Ben Widawsky
2013-12-06 22:10 ` [PATCH 01/48] drm/i915: Fix bad refcounting on execbuf failures Ben Widawsky
2013-12-06 22:10   ` [PATCH 02/48] drm/i915: Provide PDP updates via MMIO Ben Widawsky
2013-12-06 22:10   ` [PATCH 03/48] drm/i915: Don't unconditionally try to deref aliasing ppgtt Ben Widawsky
2013-12-18 13:50     ` Daniel Vetter
2013-12-06 22:10   ` [PATCH 04/48] drm/i915: Allow ggtt lookups to not WARN Ben Widawsky
2013-12-06 22:10   ` [PATCH 05/48] drm/i915: Takedown drm_mm on failed gtt setup Ben Widawsky
2013-12-06 22:10   ` [PATCH 06/48] drm/i915: Handle inactivating objects for all VMAs Ben Widawsky
2013-12-06 22:10   ` [PATCH 07/48] drm/i915: Add vm to error BO capture Ben Widawsky
2013-12-06 22:10   ` [PATCH 08/48] drm/i915: Don't use gtt mapping for !gtt error objects Ben Widawsky
2013-12-06 22:10   ` [PATCH 09/48] drm/i915: Identify active VM for batchbuffer capture Ben Widawsky
2013-12-06 22:10   ` [PATCH 10/48] drm/i915: Make pin count per VMA Ben Widawsky
2013-12-06 22:10   ` [PATCH 11/48] drm/i915: Create bind/unbind abstraction for VMAs Ben Widawsky
2013-12-06 22:10   ` [PATCH 12/48] drm/i915: Remove vm arg from relocate entry Ben Widawsky
2013-12-06 22:10   ` [PATCH 13/48] drm/i915: Add a context open function Ben Widawsky
2013-12-06 22:10   ` [PATCH 14/48] drm/i915: relax context alignment Ben Widawsky
2013-12-06 22:11   ` [PATCH 15/48] drm/i915: Simplify ring handling in execbuf Ben Widawsky
2013-12-06 22:11   ` [PATCH 16/48] drm/i915: Permit contexts on all rings Ben Widawsky
2013-12-06 22:11   ` [PATCH 17/48] drm/i915: Track which ring a context ran on Ben Widawsky
2014-04-18  9:51     ` Chris Wilson
2014-04-22 14:25       ` Daniel Vetter
2014-04-22 14:54         ` Chris Wilson
2013-12-06 22:11   ` [PATCH 18/48] drm/i915: Better reset handling for contexts Ben Widawsky
2013-12-18 14:21     ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 19/48] drm/i915: Split context enabling from init Ben Widawsky
2013-12-06 22:11   ` [PATCH 20/48] drm/i915: Generalize default context setup Ben Widawsky
2013-12-06 22:11   ` [PATCH 21/48] drm/i915: PPGTT vfuncs should take a ppgtt argument Ben Widawsky
2013-12-06 22:11   ` [PATCH 22/48] drm/i915: Use drm_mm for PPGTT PDEs Ben Widawsky
2013-12-06 22:11   ` [PATCH 23/48] drm/i915: One hopeful eviction on PPGTT alloc Ben Widawsky
2013-12-06 22:11   ` [PATCH 24/48] drm/i915: Use platform specific ppgtt enable Ben Widawsky
2013-12-06 22:11   ` [PATCH 25/48] drm/i915: Extract mm switching to function Ben Widawsky
2013-12-06 22:11   ` [PATCH 26/48] drm/i915: Use LRI for switching PP_DIR_BASE Ben Widawsky
2013-12-06 22:11   ` [PATCH 27/48] drm/i915: Flush TLBs after !RCS PP_DIR_BASE Ben Widawsky
2013-12-06 22:11   ` [PATCH 28/48] drm/i915: Generalize PPGTT init Ben Widawsky
2013-12-06 22:11   ` [PATCH 29/48] drm/i915: Reorganize intel_enable_ppgtt Ben Widawsky
2013-12-06 22:11   ` [PATCH 30/48] drm/i915: Add VM to context Ben Widawsky
2013-12-06 22:11   ` [PATCH 31/48] drm/i915: Write PDEs at init instead of enable Ben Widawsky
2013-12-06 22:11   ` [PATCH 32/48] drm/i915: Restore PDEs for all VMs Ben Widawsky
2013-12-06 22:11   ` [PATCH 33/48] drm/i915: Do aliasing PPGTT init with contexts Ben Widawsky
2013-12-06 22:11   ` [PATCH 34/48] drm/i915: Create a per file_priv default context Ben Widawsky
2013-12-06 22:11   ` [PATCH 35/48] drm/i915: Piggy back hangstats off of contexts Ben Widawsky
2013-12-06 22:11   ` [PATCH 36/48] drm/i915: Get context early in execbuf Ben Widawsky
2013-12-06 22:11   ` [PATCH 37/48] drm/i915: Defer request freeing Ben Widawsky
2013-12-12 11:08     ` Chris Wilson
2013-12-18 14:39       ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 38/48] drm/i915: Clean up VMAs before freeing Ben Widawsky
2013-12-18 14:55     ` Daniel Vetter
2013-12-18 14:56       ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 39/48] drm/i915: Do not allow buffers at offset 0 Ben Widawsky
2013-12-12 10:59     ` Chris Wilson
2013-12-18 14:58       ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 40/48] drm/i915: Add a tracepoint for new VMs Ben Widawsky
2013-12-18 14:59     ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 41/48] drm/i915: Use multiple VMs -- the point of no return Ben Widawsky
2013-12-06 22:11   ` [PATCH 42/48] drm/i915: Remove extraneous mm_switch in ppgtt enable Ben Widawsky
2013-12-06 22:11   ` [PATCH 43/48] drm/i915: Warn on gem_pin usage Ben Widawsky
2013-12-18 15:25     ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 44/48] drm/i915: Add PPGTT dumper Ben Widawsky
2013-12-06 22:11   ` [PATCH 45/48] drm/i915: Dump all ppgtt Ben Widawsky
2013-12-18 15:26     ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 46/48] drm: Optionally create mm blocks from top-to-bottom Ben Widawsky
2013-12-08 14:28     ` David Herrmann
2013-12-06 22:11   ` [PATCH 47/48] drm/i915: Use topdown allocation for PPGTT Ben Widawsky
2013-12-06 22:11   ` [PATCH 48/48] page allocator: Tmp OOM deadlock w/a from Chris Ben Widawsky
2013-12-06 22:11   ` [PATCH 01/48] drm/i915: Fix bad refcounting on execbuf failures Ben Widawsky
2013-12-06 22:11   ` [PATCH 02/48] drm/i915: Provide PDP updates via MMIO Ben Widawsky
2013-12-06 22:11   ` [PATCH 03/48] drm/i915: Don't unconditionally try to deref aliasing ppgtt Ben Widawsky
2013-12-06 22:11   ` [PATCH 04/48] drm/i915: Allow ggtt lookups to not WARN Ben Widawsky
2013-12-06 22:11   ` [PATCH 05/48] drm/i915: Takedown drm_mm on failed gtt setup Ben Widawsky
2013-12-06 22:11   ` [PATCH 06/48] drm/i915: Handle inactivating objects for all VMAs Ben Widawsky
2013-12-06 22:11   ` [PATCH 07/48] drm/i915: Add vm to error BO capture Ben Widawsky
2013-12-06 22:11   ` [PATCH 08/48] drm/i915: Don't use gtt mapping for !gtt error objects Ben Widawsky
2013-12-06 22:11   ` [PATCH 09/48] drm/i915: Identify active VM for batchbuffer capture Ben Widawsky
2013-12-06 22:11   ` Ben Widawsky [this message]
2013-12-06 22:11   ` [PATCH 11/48] drm/i915: Create bind/unbind abstraction for VMAs Ben Widawsky
2013-12-06 22:11   ` [PATCH 12/48] drm/i915: Remove vm arg from relocate entry Ben Widawsky
2013-12-06 22:11   ` [PATCH 13/48] drm/i915: Add a context open function Ben Widawsky
2013-12-06 22:11   ` [PATCH 14/48] drm/i915: relax context alignment Ben Widawsky
2013-12-06 22:11   ` [PATCH 15/48] drm/i915: Simplify ring handling in execbuf Ben Widawsky
2013-12-06 22:11   ` [PATCH 16/48] drm/i915: Permit contexts on all rings Ben Widawsky
2013-12-06 22:11   ` [PATCH 17/48] drm/i915: Track which ring a context ran on Ben Widawsky
2013-12-06 22:11   ` [PATCH 18/48] drm/i915: Better reset handling for contexts Ben Widawsky
2013-12-06 22:11   ` [PATCH 19/48] drm/i915: Split context enabling from init Ben Widawsky
2013-12-06 22:11   ` [PATCH 20/48] drm/i915: Generalize default context setup Ben Widawsky
2013-12-06 22:11   ` [PATCH 21/48] drm/i915: PPGTT vfuncs should take a ppgtt argument Ben Widawsky
2013-12-06 22:11   ` [PATCH 22/48] drm/i915: Use drm_mm for PPGTT PDEs Ben Widawsky
2014-03-20 11:10     ` Chris Wilson
2014-03-24 19:36       ` Ben Widawsky
2014-03-24 19:45         ` Chris Wilson
2014-03-24 20:02           ` Ben Widawsky
2014-03-25 13:41             ` Chris Wilson
2014-03-25 15:33               ` Daniel Vetter
2013-12-06 22:11   ` [PATCH 23/48] drm/i915: One hopeful eviction on PPGTT alloc Ben Widawsky
2014-03-20 11:12     ` Chris Wilson
2013-12-06 22:11   ` [PATCH 24/48] drm/i915: Use platform specific ppgtt enable Ben Widawsky
2013-12-06 22:11   ` [PATCH 25/48] drm/i915: Extract mm switching to function Ben Widawsky
2013-12-06 22:11   ` [PATCH 26/48] drm/i915: Use LRI for switching PP_DIR_BASE Ben Widawsky
2013-12-06 22:12   ` [PATCH 27/48] drm/i915: Flush TLBs after !RCS PP_DIR_BASE Ben Widawsky
2013-12-06 22:12   ` [PATCH 28/48] drm/i915: Generalize PPGTT init Ben Widawsky
2013-12-06 22:12   ` [PATCH 29/48] drm/i915: Reorganize intel_enable_ppgtt Ben Widawsky
2013-12-06 22:12   ` [PATCH 30/48] drm/i915: Add VM to context Ben Widawsky
2013-12-06 22:12   ` [PATCH 31/48] drm/i915: Write PDEs at init instead of enable Ben Widawsky
2013-12-06 22:12   ` [PATCH 32/48] drm/i915: Restore PDEs for all VMs Ben Widawsky
2013-12-06 22:12   ` [PATCH 33/48] drm/i915: Do aliasing PPGTT init with contexts Ben Widawsky
2013-12-06 22:12   ` [PATCH 34/48] drm/i915: Create a per file_priv default context Ben Widawsky
2013-12-06 22:12   ` [PATCH 35/48] drm/i915: Piggy back hangstats off of contexts Ben Widawsky
2013-12-06 22:12   ` [PATCH 36/48] drm/i915: Get context early in execbuf Ben Widawsky
2013-12-06 22:12   ` [PATCH 37/48] drm/i915: Defer request freeing Ben Widawsky
2013-12-06 22:12   ` [PATCH 38/48] drm/i915: Clean up VMAs before freeing Ben Widawsky
2013-12-06 22:12   ` [PATCH 39/48] drm/i915: Do not allow buffers at offset 0 Ben Widawsky
2013-12-06 22:12   ` [PATCH 40/48] drm/i915: Add a tracepoint for new VMs Ben Widawsky
2013-12-06 22:12   ` [PATCH 41/48] drm/i915: Use multiple VMs -- the point of no return Ben Widawsky
2013-12-06 22:12   ` [PATCH 42/48] drm/i915: Remove extraneous mm_switch in ppgtt enable Ben Widawsky
2013-12-06 22:12   ` [PATCH 43/48] drm/i915: Warn on gem_pin usage Ben Widawsky
2013-12-06 22:12   ` [PATCH 44/48] drm/i915: Add PPGTT dumper Ben Widawsky
2013-12-06 22:12   ` [PATCH 45/48] drm/i915: Dump all ppgtt Ben Widawsky
2013-12-06 22:12   ` [PATCH 46/48] drm: Optionally create mm blocks from top-to-bottom Ben Widawsky
2013-12-06 22:12   ` [PATCH 47/48] drm/i915: Use topdown allocation for PPGTT Ben Widawsky
2013-12-06 22:12   ` [PATCH 48/48] page allocator: Tmp OOM deadlock w/a from Chris Ben Widawsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1386367941-7131-58-git-send-email-benjamin.widawsky@intel.com \
    --to=benjamin.widawsky@intel.com \
    --cc=ben@bwidawsk.net \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.