All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 2/2] drm/i915: Shrink the GEM kmem_caches upon idling
Date: Fri, 19 Jan 2018 15:23:56 +0000	[thread overview]
Message-ID: <20180119152356.17025-2-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20180119152356.17025-1-chris@chris-wilson.co.uk>

When we finally decide the gpu is idle, that is a good time to shrink
our kmem_caches.

v3: Defer until an rcu grace period after we idle.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem.c | 65 +++++++++++++++++++++++++++++++++++++++++
 1 file changed, 65 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 7f0684ccc724..6a8fbcae835b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3341,12 +3341,59 @@ new_requests_since_last_retire(const struct drm_i915_private *i915)
 		work_pending(&i915->gt.idle_work.work));
 }
 
+static void shrink_caches(struct drm_i915_private *i915)
+{
+	/*
+	 * kmem_cache_shrink() discards empty slabs and reorders partially
+	 * filled slabs to prioritise allocating from the mostly full slabs,
+	 * with the aim of reducing fragmentation.
+	 */
+	kmem_cache_shrink(i915->priorities);
+	kmem_cache_shrink(i915->dependencies);
+	kmem_cache_shrink(i915->requests);
+	kmem_cache_shrink(i915->luts);
+	kmem_cache_shrink(i915->vmas);
+	kmem_cache_shrink(i915->objects);
+}
+
+struct sleep_rcu_work {
+	struct drm_i915_private *i915;
+	struct rcu_head rcu;
+	struct work_struct work;
+	u32 epoch;
+};
+
+static void __sleep_work(struct work_struct *work)
+{
+	struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
+	struct drm_i915_private *i915 = s->i915;
+	u32 epoch = s->epoch;
+
+	kfree(s);
+	if (epoch == READ_ONCE(i915->gt.epoch))
+		shrink_caches(i915);
+}
+
+static void __sleep_rcu(struct rcu_head *rcu)
+{
+	struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
+	struct drm_i915_private *i915 = s->i915;
+
+	if (s->epoch == READ_ONCE(i915->gt.epoch)) {
+		INIT_WORK(&s->work, __sleep_work);
+		queue_work(i915->wq, &s->work);
+	} else {
+		kfree(s);
+	}
+}
+
 static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
 	struct drm_i915_private *dev_priv =
 		container_of(work, typeof(*dev_priv), gt.idle_work.work);
 	bool rearm_hangcheck;
+	u32 epoch = 0;
 	ktime_t end;
 
 	if (!READ_ONCE(dev_priv->gt.awake))
@@ -3406,6 +3453,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
 	GEM_BUG_ON(!dev_priv->gt.awake);
 	dev_priv->gt.awake = false;
 	rearm_hangcheck = false;
+	epoch = dev_priv->gt.epoch;
 
 	if (INTEL_GEN(dev_priv) >= 6)
 		gen6_rps_idle(dev_priv);
@@ -3421,6 +3469,23 @@ i915_gem_idle_work_handler(struct work_struct *work)
 		GEM_BUG_ON(!dev_priv->gt.awake);
 		i915_queue_hangcheck(dev_priv);
 	}
+
+	/*
+	 * When we are idle, it is an opportune time to reap our caches.
+	 * However, we have many objects that utilise RCU and the ordered
+	 * i915->wq that this work is executing on. To try and flush any
+	 * pending frees now we are idle, we first wait for an RCU grace
+	 * period, and then queue a task (that will run last on the wq) to
+	 * shrink and re-optimize the caches.
+	 */
+	if (epoch == READ_ONCE(dev_priv->gt.epoch)) {
+		struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
+		if (s) {
+			s->i915 = dev_priv;
+			s->epoch = epoch;
+			call_rcu(&s->rcu, __sleep_rcu);
+		}
+	}
 }
 
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
-- 
2.15.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2018-01-19 15:24 UTC|newest]

Thread overview: 9+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-19 15:23 [PATCH 1/2] drm/i915: Track the number of times we have woken the GPU up Chris Wilson
2018-01-19 15:23 ` Chris Wilson [this message]
2018-01-24 10:32   ` [PATCH 2/2] drm/i915: Shrink the GEM kmem_caches upon idling Tvrtko Ursulin
2018-01-24 10:37     ` Chris Wilson
2018-01-24 11:06       ` Tvrtko Ursulin
2018-01-19 16:13 ` ✓ Fi.CI.BAT: success for series starting with [1/2] drm/i915: Track the number of times we have woken the GPU up Patchwork
2018-01-19 21:56 ` ✗ Fi.CI.IGT: failure " Patchwork
2018-01-24 10:33 ` [PATCH 1/2] " Tvrtko Ursulin
2018-01-24 10:42   ` Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180119152356.17025-2-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.