All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 02/10] drm/i915: Move i915_gem_retire_work_handler
Date: Mon, 15 Jan 2018 21:24:47 +0000	[thread overview]
Message-ID: <20180115212455.24046-3-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20180115212455.24046-1-chris@chris-wilson.co.uk>

In preparation for the next patch, move i915_gem_retire_work_handler()
later to avoid a forward declaration.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c | 228 ++++++++++++++++++++--------------------
 1 file changed, 114 insertions(+), 114 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 87937c4f9dff..a8840a514377 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3310,120 +3310,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 	return true;
 }
 
-static void
-i915_gem_retire_work_handler(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.retire_work.work);
-	struct drm_device *dev = &dev_priv->drm;
-
-	/* Come back later if the device is busy... */
-	if (mutex_trylock(&dev->struct_mutex)) {
-		i915_gem_retire_requests(dev_priv);
-		mutex_unlock(&dev->struct_mutex);
-	}
-
-	/* Keep the retire handler running until we are finally idle.
-	 * We do not need to do this test under locking as in the worst-case
-	 * we queue the retire worker once too often.
-	 */
-	if (READ_ONCE(dev_priv->gt.awake)) {
-		i915_queue_hangcheck(dev_priv);
-		queue_delayed_work(dev_priv->wq,
-				   &dev_priv->gt.retire_work,
-				   round_jiffies_up_relative(HZ));
-	}
-}
-
-static inline bool
-new_requests_since_last_retire(const struct drm_i915_private *i915)
-{
-	return (READ_ONCE(i915->gt.active_requests) ||
-		work_pending(&i915->gt.idle_work.work));
-}
-
-static void
-i915_gem_idle_work_handler(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.idle_work.work);
-	bool rearm_hangcheck;
-	ktime_t end;
-
-	if (!READ_ONCE(dev_priv->gt.awake))
-		return;
-
-	/*
-	 * Wait for last execlists context complete, but bail out in case a
-	 * new request is submitted.
-	 */
-	end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
-	do {
-		if (new_requests_since_last_retire(dev_priv))
-			return;
-
-		if (intel_engines_are_idle(dev_priv))
-			break;
-
-		usleep_range(100, 500);
-	} while (ktime_before(ktime_get(), end));
-
-	rearm_hangcheck =
-		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-
-	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
-		/* Currently busy, come back later */
-		mod_delayed_work(dev_priv->wq,
-				 &dev_priv->gt.idle_work,
-				 msecs_to_jiffies(50));
-		goto out_rearm;
-	}
-
-	/*
-	 * New request retired after this work handler started, extend active
-	 * period until next instance of the work.
-	 */
-	if (new_requests_since_last_retire(dev_priv))
-		goto out_unlock;
-
-	/*
-	 * Be paranoid and flush a concurrent interrupt to make sure
-	 * we don't reactivate any irq tasklets after parking.
-	 *
-	 * FIXME: Note that even though we have waited for execlists to be idle,
-	 * there may still be an in-flight interrupt even though the CSB
-	 * is now empty. synchronize_irq() makes sure that a residual interrupt
-	 * is completed before we continue, but it doesn't prevent the HW from
-	 * raising a spurious interrupt later. To complete the shield we should
-	 * coordinate disabling the CS irq with flushing the interrupts.
-	 */
-	synchronize_irq(dev_priv->drm.irq);
-
-	intel_engines_park(dev_priv);
-	i915_gem_timelines_park(dev_priv);
-
-	i915_pmu_gt_parked(dev_priv);
-
-	GEM_BUG_ON(!dev_priv->gt.awake);
-	dev_priv->gt.awake = false;
-	rearm_hangcheck = false;
-
-	if (INTEL_GEN(dev_priv) >= 6)
-		gen6_rps_idle(dev_priv);
-
-	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
-
-	intel_runtime_pm_put(dev_priv);
-out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-out_rearm:
-	if (rearm_hangcheck) {
-		GEM_BUG_ON(!dev_priv->gt.awake);
-		i915_queue_hangcheck(dev_priv);
-	}
-}
-
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 {
 	struct drm_i915_private *i915 = to_i915(gem->dev);
@@ -4798,6 +4684,120 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 	}
 }
 
+static void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), gt.retire_work.work);
+	struct drm_device *dev = &dev_priv->drm;
+
+	/* Come back later if the device is busy... */
+	if (mutex_trylock(&dev->struct_mutex)) {
+		i915_gem_retire_requests(dev_priv);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	/* Keep the retire handler running until we are finally idle.
+	 * We do not need to do this test under locking as in the worst-case
+	 * we queue the retire worker once too often.
+	 */
+	if (READ_ONCE(dev_priv->gt.awake)) {
+		i915_queue_hangcheck(dev_priv);
+		queue_delayed_work(dev_priv->wq,
+				   &dev_priv->gt.retire_work,
+				   round_jiffies_up_relative(HZ));
+	}
+}
+
+static inline bool
+new_requests_since_last_retire(const struct drm_i915_private *i915)
+{
+	return (READ_ONCE(i915->gt.active_requests) ||
+		work_pending(&i915->gt.idle_work.work));
+}
+
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), gt.idle_work.work);
+	bool rearm_hangcheck;
+	ktime_t end;
+
+	if (!READ_ONCE(dev_priv->gt.awake))
+		return;
+
+	/*
+	 * Wait for last execlists context complete, but bail out in case a
+	 * new request is submitted.
+	 */
+	end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
+	do {
+		if (new_requests_since_last_retire(dev_priv))
+			return;
+
+		if (intel_engines_are_idle(dev_priv))
+			break;
+
+		usleep_range(100, 500);
+	} while (ktime_before(ktime_get(), end));
+
+	rearm_hangcheck =
+		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+
+	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
+		/* Currently busy, come back later */
+		mod_delayed_work(dev_priv->wq,
+				 &dev_priv->gt.idle_work,
+				 msecs_to_jiffies(50));
+		goto out_rearm;
+	}
+
+	/*
+	 * New request retired after this work handler started, extend active
+	 * period until next instance of the work.
+	 */
+	if (new_requests_since_last_retire(dev_priv))
+		goto out_unlock;
+
+	/*
+	 * Be paranoid and flush a concurrent interrupt to make sure
+	 * we don't reactivate any irq tasklets after parking.
+	 *
+	 * FIXME: Note that even though we have waited for execlists to be idle,
+	 * there may still be an in-flight interrupt even though the CSB
+	 * is now empty. synchronize_irq() makes sure that a residual interrupt
+	 * is completed before we continue, but it doesn't prevent the HW from
+	 * raising a spurious interrupt later. To complete the shield we should
+	 * coordinate disabling the CS irq with flushing the interrupts.
+	 */
+	synchronize_irq(dev_priv->drm.irq);
+
+	intel_engines_park(dev_priv);
+	i915_gem_timelines_park(dev_priv);
+
+	i915_pmu_gt_parked(dev_priv);
+
+	GEM_BUG_ON(!dev_priv->gt.awake);
+	dev_priv->gt.awake = false;
+	rearm_hangcheck = false;
+
+	if (INTEL_GEN(dev_priv) >= 6)
+		gen6_rps_idle(dev_priv);
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
+
+	intel_runtime_pm_put(dev_priv);
+out_unlock:
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+
+out_rearm:
+	if (rearm_hangcheck) {
+		GEM_BUG_ON(!dev_priv->gt.awake);
+		i915_queue_hangcheck(dev_priv);
+	}
+}
+
 int i915_gem_suspend(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
-- 
2.15.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2018-01-15 21:25 UTC|newest]

Thread overview: 47+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-15 21:24 Prevent trivial oom from gem_exec_nop/sequential Chris Wilson
2018-01-15 21:24 ` [PATCH 01/10] drm/i915: Only attempt to scan the requested number of shrinker slabs Chris Wilson
2018-01-17 10:29   ` Tvrtko Ursulin
2018-01-18  9:16     ` Chris Wilson
2018-01-18  9:19     ` Chris Wilson
2018-01-15 21:24 ` Chris Wilson [this message]
2018-01-17 10:33   ` [PATCH 02/10] drm/i915: Move i915_gem_retire_work_handler Tvrtko Ursulin
2018-01-15 21:24 ` [PATCH 03/10] drm/i915: Shrink the GEM kmem_caches upon idling Chris Wilson
2018-01-16 10:00   ` Tvrtko Ursulin
2018-01-16 10:19     ` Chris Wilson
2018-01-16 13:05     ` [PATCH v2] " Chris Wilson
2018-01-16 15:12       ` Tvrtko Ursulin
2018-01-16 15:16         ` Tvrtko Ursulin
2018-01-16 15:21         ` Chris Wilson
2018-01-16 17:25           ` Tvrtko Ursulin
2018-01-16 17:36             ` Chris Wilson
2018-01-17 10:18               ` Tvrtko Ursulin
2018-01-18 18:06                 ` Chris Wilson
2018-01-15 21:24 ` [PATCH 04/10] drm/i915: Shrink the request kmem_cache on allocation error Chris Wilson
2018-01-16 10:10   ` Tvrtko Ursulin
2018-01-16 10:26     ` Chris Wilson
2018-01-16 13:15     ` [PATCH v2] " Chris Wilson
2018-01-16 15:19       ` Tvrtko Ursulin
2018-01-15 21:24 ` [PATCH 05/10] drm/i915: Trim the retired request queue after submitting Chris Wilson
2018-01-16 10:18   ` Tvrtko Ursulin
2018-01-16 10:32     ` Chris Wilson
2018-01-17 10:23       ` Tvrtko Ursulin
2018-01-16 13:30     ` [PATCH v2] " Chris Wilson
2018-01-15 21:24 ` [PATCH 06/10] drm/i915/breadcrumbs: Drop request reference for the signaler thread Chris Wilson
2018-01-15 21:24 ` [PATCH 07/10] drm/i915: Reduce spinlock hold time during notify_ring() interrupt Chris Wilson
2018-01-17 10:45   ` Tvrtko Ursulin
2018-01-18 18:08     ` Chris Wilson
2018-01-18 18:10     ` Chris Wilson
2018-01-15 21:24 ` [PATCH 08/10] drm/i915: Move the irq_counter inside the spinlock Chris Wilson
2018-01-17 12:12   ` Tvrtko Ursulin
2018-01-15 21:24 ` [PATCH 09/10] drm/i915: Only signal from interrupt when requested Chris Wilson
2018-01-17 12:22   ` Tvrtko Ursulin
2018-01-18 18:12     ` Chris Wilson
2018-01-15 21:24 ` [PATCH 10/10] drm/i915/breadcrumbs: Reduce signaler rbtree to a sorted list Chris Wilson
2018-01-15 22:04 ` ✓ Fi.CI.BAT: success for series starting with [01/10] drm/i915: Only attempt to scan the requested number of shrinker slabs Patchwork
2018-01-16  9:21 ` ✓ Fi.CI.IGT: " Patchwork
2018-01-16  9:52 ` Prevent trivial oom from gem_exec_nop/sequential Tvrtko Ursulin
2018-01-16 10:02   ` Chris Wilson
2018-01-16 13:10   ` Chris Wilson
2018-01-16 13:42 ` ✓ Fi.CI.BAT: success for series starting with [01/10] drm/i915: Only attempt to scan the requested number of shrinker slabs (rev3) Patchwork
2018-01-16 14:02 ` ✓ Fi.CI.BAT: success for series starting with [01/10] drm/i915: Only attempt to scan the requested number of shrinker slabs (rev4) Patchwork
2018-01-16 15:29 ` ✓ Fi.CI.IGT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180115212455.24046-3-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.