intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] drm/i915: Protect mmaped buffers from casual eviction.
@ 2010-05-11 15:55 Chris Wilson
  2010-05-11 16:38 ` Eric Anholt
  0 siblings, 1 reply; 3+ messages in thread
From: Chris Wilson @ 2010-05-11 15:55 UTC (permalink / raw)
  To: intel-gfx

By keeping buffers that are in use by the CPU, having been mmapped and
moved to the CPU or GTT domain since their last rendering on a separate
inactive list, prevents the first-pass eviction process from unbinding
one of these buffers. Those buffers are evicted as normal during
evict-everything so that the memory can be recovered under high pressure
or a forced idle.

References:

  Bug 20152 - [G45/GM965 UXA] cannot view JPG in firefox when running UXA
  https://bugs.freedesktop.org/show_bug.cgi?id=20152

  Bug 24369 - Hang when scrolling firefox page with window in front
  https://bugs.freedesktop.org/show_bug.cgi?id=24369

  Bug 15911 -  Intermittent X crash (freeze)
  https://bugzilla.kernel.org/show_bug.cgi?id=15911

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Tested-by: Christian von Schultz <kernel@vonschultz.se>
---
 drivers/gpu/drm/i915/i915_drv.h |   13 +++++++
 drivers/gpu/drm/i915/i915_gem.c |   71 ++++++++++++++++++++++++++++++++++----
 2 files changed, 76 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 317c9bf..f99936f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -557,6 +557,19 @@ typedef struct drm_i915_private {
 		 */
 		struct list_head inactive_list;
 
+		/**
+		 * LRU list of objects which are not in the ringbuffer and
+		 * are ready to unbind, but are still in the GTT and currently
+		 * mapped and in use by the CPU.
+		 *
+		 * last_rendering_seqno is 0 while an object is in this list.
+		 *
+		 * A reference is not held on the buffer while on this list,
+		 * as merely being GTT-bound shouldn't prevent its being
+		 * freed, and we'll pull it off the list in the free path.
+		 */
+		struct list_head mmap_list;
+
 		/** LRU list of objects with fence regs on them. */
 		struct list_head fence_list;
 
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 229354e..9a73b20 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -52,6 +52,7 @@ static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
 static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
 static int i915_gem_evict_something(struct drm_device *dev, int min_size);
 static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
+static int i915_gem_evict_from_mmap_list(struct drm_device *dev);
 static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 				struct drm_i915_gem_pwrite *args,
 				struct drm_file *file_priv);
@@ -1064,6 +1065,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 		ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 	}
 
+	if (ret == 0 && obj_priv->gtt_space && !obj_priv->active)
+	    list_move_tail(&obj_priv->list, &dev_priv->mm.mmap_list);
+
 	drm_gem_object_unreference(obj);
 	mutex_unlock(&dev->struct_mutex);
 	return ret;
@@ -1197,6 +1201,9 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 			goto unlock;
 	}
 
+	if (!obj_priv->active)
+		list_move_tail(&obj_priv->list, &dev_priv->mm.mmap_list);
+
 	pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
 		page_offset;
 
@@ -2162,7 +2169,8 @@ i915_gem_evict_everything(struct drm_device *dev)
 	bool lists_empty;
 
 	spin_lock(&dev_priv->mm.active_list_lock);
-	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+	lists_empty = (list_empty(&dev_priv->mm.mmap_list) &&
+		       list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->mm.active_list));
 	spin_unlock(&dev_priv->mm.active_list_lock);
@@ -2177,12 +2185,17 @@ i915_gem_evict_everything(struct drm_device *dev)
 
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 
+	ret = i915_gem_evict_from_mmap_list(dev);
+	if (ret)
+		return ret;
+
 	ret = i915_gem_evict_from_inactive_list(dev);
 	if (ret)
 		return ret;
 
 	spin_lock(&dev_priv->mm.active_list_lock);
-	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
+	lists_empty = (list_empty(&dev_priv->mm.mmap_list) &&
+		       list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->mm.active_list));
 	spin_unlock(&dev_priv->mm.active_list_lock);
@@ -4624,17 +4637,15 @@ void i915_gem_free_object(struct drm_gem_object *obj)
 	kfree(obj->driver_private);
 }
 
-/** Unbinds all inactive objects. */
 static int
-i915_gem_evict_from_inactive_list(struct drm_device *dev)
+i915_gem_evict_from_list(struct drm_device *dev,
+			 struct list_head *list)
 {
-	drm_i915_private_t *dev_priv = dev->dev_private;
-
-	while (!list_empty(&dev_priv->mm.inactive_list)) {
+	while (!list_empty(list)) {
 		struct drm_gem_object *obj;
 		int ret;
 
-		obj = list_first_entry(&dev_priv->mm.inactive_list,
+		obj = list_first_entry(list,
 				       struct drm_i915_gem_object,
 				       list)->obj;
 
@@ -4648,6 +4659,23 @@ i915_gem_evict_from_inactive_list(struct drm_device *dev)
 	return 0;
 }
 
+/** Unbinds all inactive objects. */
+static int
+i915_gem_evict_from_inactive_list(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	return i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+}
+
+static int
+i915_gem_evict_from_mmap_list(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	return i915_gem_evict_from_list(dev, &dev_priv->mm.mmap_list);
+}
+
 int
 i915_gem_idle(struct drm_device *dev)
 {
@@ -4674,6 +4702,12 @@ i915_gem_idle(struct drm_device *dev)
 			mutex_unlock(&dev->struct_mutex);
 			return ret;
 		}
+
+		ret = i915_gem_evict_from_mmap_list(dev);
+		if (ret) {
+			mutex_unlock(&dev->struct_mutex);
+			return ret;
+		}
 	}
 
 	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
@@ -5010,6 +5044,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
 
 	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+	BUG_ON(!list_empty(&dev_priv->mm.mmap_list));
 	BUG_ON(!list_empty(&dev_priv->mm.request_list));
 	mutex_unlock(&dev->struct_mutex);
 
@@ -5053,6 +5088,7 @@ i915_gem_load(struct drm_device *dev)
 	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
 	INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
 	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+	INIT_LIST_HEAD(&dev_priv->mm.mmap_list);
 	INIT_LIST_HEAD(&dev_priv->mm.request_list);
 	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
@@ -5308,6 +5344,22 @@ i915_gpu_is_active(struct drm_device *dev)
 }
 
 static int
+i915_move_mmap_onto_inactive_list(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (list_empty(&dev_priv->mm.mmap_list))
+		return 0;
+
+	do {
+		list_move_tail(dev_priv->mm.mmap_list.next,
+			       &dev_priv->mm.inactive_list);
+	} while (!list_empty(&dev_priv->mm.mmap_list));
+
+	return 1;
+}
+
+static int
 i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask)
 {
 	drm_i915_private_t *dev_priv, *next_dev;
@@ -5416,6 +5468,9 @@ rescan:
 				active++;
 			}
 
+			if (i915_move_mmap_onto_inactive_list(dev))
+				active++;
+
 			spin_lock(&shrink_list_lock);
 			mutex_unlock(&dev->struct_mutex);
 		}
-- 
1.7.1

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] drm/i915: Protect mmaped buffers from casual eviction.
  2010-05-11 15:55 [PATCH] drm/i915: Protect mmaped buffers from casual eviction Chris Wilson
@ 2010-05-11 16:38 ` Eric Anholt
  2010-05-11 22:57   ` Chris Wilson
  0 siblings, 1 reply; 3+ messages in thread
From: Eric Anholt @ 2010-05-11 16:38 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 1069 bytes --]

On Tue, 11 May 2010 16:55:27 +0100, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> By keeping buffers that are in use by the CPU, having been mmapped and
> moved to the CPU or GTT domain since their last rendering on a separate
> inactive list, prevents the first-pass eviction process from unbinding
> one of these buffers. Those buffers are evicted as normal during
> evict-everything so that the memory can be recovered under high pressure
> or a forced idle.
> 
> References:
> 
>   Bug 20152 - [G45/GM965 UXA] cannot view JPG in firefox when running UXA
>   https://bugs.freedesktop.org/show_bug.cgi?id=20152
> 
>   Bug 24369 - Hang when scrolling firefox page with window in front
>   https://bugs.freedesktop.org/show_bug.cgi?id=24369
> 
>   Bug 15911 -  Intermittent X crash (freeze)
>   https://bugzilla.kernel.org/show_bug.cgi?id=15911

Couldn't this be more easily handled by the times where you would move
to the tail of mmap, just move to the tail of inactive?  Since inactive
is "obj_priv->gtt_space && !obj_priv->active" already.

[-- Attachment #1.2: Type: application/pgp-signature, Size: 197 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] drm/i915: Protect mmaped buffers from casual eviction.
  2010-05-11 16:38 ` Eric Anholt
@ 2010-05-11 22:57   ` Chris Wilson
  0 siblings, 0 replies; 3+ messages in thread
From: Chris Wilson @ 2010-05-11 22:57 UTC (permalink / raw)
  To: Eric Anholt, intel-gfx

On Tue, 11 May 2010 09:38:36 -0700, Eric Anholt <eric@anholt.net> wrote:
> Couldn't this be more easily handled by the times where you would move
> to the tail of mmap, just move to the tail of inactive?  Since inactive
> is "obj_priv->gtt_space && !obj_priv->active" already.

The real issue is the inactive list is no longer evicted in LRU, otherwise
just moving to the end of inactive list would be ideal. In benchmarks it
is faster to evict the appropriately sized object rather than iterate
over the inactive list until enough contiguous space has been freed. The
consequence is that the page-fault-of-doom is reintroduced unless some
measure is taken to avoid it. I don't have any figures to suggest what the
average size of the mmap_list will be. As an object is only on the list
until it is used or evict-everything, then the list should be kept quite
short. As our drivers improve, the frequency at which we have to mmap
buffers should reduce as well...
-ickle

-- 
Chris Wilson, Intel Open Source Technology Centre

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2010-05-11 22:57 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-05-11 15:55 [PATCH] drm/i915: Protect mmaped buffers from casual eviction Chris Wilson
2010-05-11 16:38 ` Eric Anholt
2010-05-11 22:57   ` Chris Wilson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).