All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 06/19] drm/i915: Move i915_gem_retire_work_handler
Date: Tue,  2 Jan 2018 15:12:22 +0000	[thread overview]
Message-ID: <20180102151235.3949-6-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20180102151235.3949-1-chris@chris-wilson.co.uk>

In preparation for the next patch, move i915_gem_retire_work_handler()
later to avoid a forward declaration.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c | 228 ++++++++++++++++++++--------------------
 1 file changed, 114 insertions(+), 114 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4808011264ae..b3c2258ebadc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3310,120 +3310,6 @@ bool i915_gem_unset_wedged(struct drm_i915_private *i915)
 	return true;
 }
 
-static void
-i915_gem_retire_work_handler(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.retire_work.work);
-	struct drm_device *dev = &dev_priv->drm;
-
-	/* Come back later if the device is busy... */
-	if (mutex_trylock(&dev->struct_mutex)) {
-		i915_gem_retire_requests(dev_priv);
-		mutex_unlock(&dev->struct_mutex);
-	}
-
-	/* Keep the retire handler running until we are finally idle.
-	 * We do not need to do this test under locking as in the worst-case
-	 * we queue the retire worker once too often.
-	 */
-	if (READ_ONCE(dev_priv->gt.awake)) {
-		i915_queue_hangcheck(dev_priv);
-		queue_delayed_work(dev_priv->wq,
-				   &dev_priv->gt.retire_work,
-				   round_jiffies_up_relative(HZ));
-	}
-}
-
-static inline bool
-new_requests_since_last_retire(const struct drm_i915_private *i915)
-{
-	return (READ_ONCE(i915->gt.active_requests) ||
-		work_pending(&i915->gt.idle_work.work));
-}
-
-static void
-i915_gem_idle_work_handler(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work, typeof(*dev_priv), gt.idle_work.work);
-	bool rearm_hangcheck;
-	ktime_t end;
-
-	if (!READ_ONCE(dev_priv->gt.awake))
-		return;
-
-	/*
-	 * Wait for last execlists context complete, but bail out in case a
-	 * new request is submitted.
-	 */
-	end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
-	do {
-		if (new_requests_since_last_retire(dev_priv))
-			return;
-
-		if (intel_engines_are_idle(dev_priv))
-			break;
-
-		usleep_range(100, 500);
-	} while (ktime_before(ktime_get(), end));
-
-	rearm_hangcheck =
-		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-
-	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
-		/* Currently busy, come back later */
-		mod_delayed_work(dev_priv->wq,
-				 &dev_priv->gt.idle_work,
-				 msecs_to_jiffies(50));
-		goto out_rearm;
-	}
-
-	/*
-	 * New request retired after this work handler started, extend active
-	 * period until next instance of the work.
-	 */
-	if (new_requests_since_last_retire(dev_priv))
-		goto out_unlock;
-
-	/*
-	 * Be paranoid and flush a concurrent interrupt to make sure
-	 * we don't reactivate any irq tasklets after parking.
-	 *
-	 * FIXME: Note that even though we have waited for execlists to be idle,
-	 * there may still be an in-flight interrupt even though the CSB
-	 * is now empty. synchronize_irq() makes sure that a residual interrupt
-	 * is completed before we continue, but it doesn't prevent the HW from
-	 * raising a spurious interrupt later. To complete the shield we should
-	 * coordinate disabling the CS irq with flushing the interrupts.
-	 */
-	synchronize_irq(dev_priv->drm.irq);
-
-	intel_engines_park(dev_priv);
-	i915_gem_timelines_park(dev_priv);
-
-	i915_pmu_gt_parked(dev_priv);
-
-	GEM_BUG_ON(!dev_priv->gt.awake);
-	dev_priv->gt.awake = false;
-	rearm_hangcheck = false;
-
-	if (INTEL_GEN(dev_priv) >= 6)
-		gen6_rps_idle(dev_priv);
-
-	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
-
-	intel_runtime_pm_put(dev_priv);
-out_unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-
-out_rearm:
-	if (rearm_hangcheck) {
-		GEM_BUG_ON(!dev_priv->gt.awake);
-		i915_queue_hangcheck(dev_priv);
-	}
-}
-
 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
 {
 	struct drm_i915_private *i915 = to_i915(gem->dev);
@@ -4798,6 +4684,120 @@ void i915_gem_sanitize(struct drm_i915_private *i915)
 	}
 }
 
+static void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), gt.retire_work.work);
+	struct drm_device *dev = &dev_priv->drm;
+
+	/* Come back later if the device is busy... */
+	if (mutex_trylock(&dev->struct_mutex)) {
+		i915_gem_retire_requests(dev_priv);
+		mutex_unlock(&dev->struct_mutex);
+	}
+
+	/* Keep the retire handler running until we are finally idle.
+	 * We do not need to do this test under locking as in the worst-case
+	 * we queue the retire worker once too often.
+	 */
+	if (READ_ONCE(dev_priv->gt.awake)) {
+		i915_queue_hangcheck(dev_priv);
+		queue_delayed_work(dev_priv->wq,
+				   &dev_priv->gt.retire_work,
+				   round_jiffies_up_relative(HZ));
+	}
+}
+
+static inline bool
+new_requests_since_last_retire(const struct drm_i915_private *i915)
+{
+	return (READ_ONCE(i915->gt.active_requests) ||
+		work_pending(&i915->gt.idle_work.work));
+}
+
+static void
+i915_gem_idle_work_handler(struct work_struct *work)
+{
+	struct drm_i915_private *dev_priv =
+		container_of(work, typeof(*dev_priv), gt.idle_work.work);
+	bool rearm_hangcheck;
+	ktime_t end;
+
+	if (!READ_ONCE(dev_priv->gt.awake))
+		return;
+
+	/*
+	 * Wait for last execlists context complete, but bail out in case a
+	 * new request is submitted.
+	 */
+	end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
+	do {
+		if (new_requests_since_last_retire(dev_priv))
+			return;
+
+		if (intel_engines_are_idle(dev_priv))
+			break;
+
+		usleep_range(100, 500);
+	} while (ktime_before(ktime_get(), end));
+
+	rearm_hangcheck =
+		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+
+	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
+		/* Currently busy, come back later */
+		mod_delayed_work(dev_priv->wq,
+				 &dev_priv->gt.idle_work,
+				 msecs_to_jiffies(50));
+		goto out_rearm;
+	}
+
+	/*
+	 * New request retired after this work handler started, extend active
+	 * period until next instance of the work.
+	 */
+	if (new_requests_since_last_retire(dev_priv))
+		goto out_unlock;
+
+	/*
+	 * Be paranoid and flush a concurrent interrupt to make sure
+	 * we don't reactivate any irq tasklets after parking.
+	 *
+	 * FIXME: Note that even though we have waited for execlists to be idle,
+	 * there may still be an in-flight interrupt even though the CSB
+	 * is now empty. synchronize_irq() makes sure that a residual interrupt
+	 * is completed before we continue, but it doesn't prevent the HW from
+	 * raising a spurious interrupt later. To complete the shield we should
+	 * coordinate disabling the CS irq with flushing the interrupts.
+	 */
+	synchronize_irq(dev_priv->drm.irq);
+
+	intel_engines_park(dev_priv);
+	i915_gem_timelines_park(dev_priv);
+
+	i915_pmu_gt_parked(dev_priv);
+
+	GEM_BUG_ON(!dev_priv->gt.awake);
+	dev_priv->gt.awake = false;
+	rearm_hangcheck = false;
+
+	if (INTEL_GEN(dev_priv) >= 6)
+		gen6_rps_idle(dev_priv);
+
+	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
+
+	intel_runtime_pm_put(dev_priv);
+out_unlock:
+	mutex_unlock(&dev_priv->drm.struct_mutex);
+
+out_rearm:
+	if (rearm_hangcheck) {
+		GEM_BUG_ON(!dev_priv->gt.awake);
+		i915_queue_hangcheck(dev_priv);
+	}
+}
+
 int i915_gem_suspend(struct drm_i915_private *dev_priv)
 {
 	struct drm_device *dev = &dev_priv->drm;
-- 
2.15.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2018-01-02 15:13 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-02 15:12 [PATCH 01/19] drm/i915: Delete defunct i915_gem_request_assign() Chris Wilson
2018-01-02 15:12 ` [PATCH 02/19] drm/i915: Only defer freeing of fence callback when also using the timer Chris Wilson
2018-01-02 15:12 ` [PATCH 03/19] drm/i915/fence: Separate timeout mechanism for awaiting on dma-fences Chris Wilson
2018-01-02 15:12 ` [PATCH 04/19] drm/i915: Use our singlethreaded wq for freeing objects Chris Wilson
2018-01-02 15:12 ` [PATCH 05/19] drm/i915: Only attempt to scan the requested number of shrinker slabs Chris Wilson
2018-01-02 15:12 ` Chris Wilson [this message]
2018-01-02 15:12 ` [PATCH 07/19] drm/i915: Shrink the GEM kmem_caches upon idling Chris Wilson
2018-01-02 15:12 ` [PATCH 08/19] drm/i915: Shrink the request kmem_cache on allocation error Chris Wilson
2018-01-02 15:12 ` [PATCH 09/19] drm/i915: Assert all signalers we depended on did indeed signal Chris Wilson
2018-01-03 11:40   ` Michał Winiarski
2018-01-03 12:24     ` Chris Wilson
2018-01-02 15:12 ` [PATCH 10/19] drm/i915/execlists: Assert there are no simple cycles in the dependencies Chris Wilson
2018-01-03 11:13   ` Michał Winiarski
2018-01-02 15:12 ` [PATCH 11/19] drm/i915/execlists: Reduce list_for_each_safe+list_safe_reset_next Chris Wilson
2018-01-03 11:12   ` Michał Winiarski
2018-01-02 15:12 ` [PATCH 12/19] drm/i915: Drop request reference for the signaler thread Chris Wilson
2018-01-02 15:12 ` [PATCH 13/19] drm/i915: Reduce spinlock hold time during notify_ring() interrupt Chris Wilson
2018-01-02 15:12 ` [PATCH 14/19] drm/i915: Reconstruct active state on starting busy-stats Chris Wilson
2018-01-08 14:18   ` Tvrtko Ursulin
2018-01-08 14:29     ` Chris Wilson
2018-01-08 14:44       ` Tvrtko Ursulin
2018-01-08 14:52         ` Chris Wilson
2018-01-02 15:12 ` [PATCH 15/19] drm/i915: Hold rpm wakeref for modifying the global seqno Chris Wilson
2018-01-03 10:58   ` Michał Winiarski
2018-01-02 15:12 ` [PATCH 16/19] drm/i915/execlists: Clear context-switch interrupt earlier in the reset Chris Wilson
2018-01-03 10:23   ` Michał Winiarski
2018-01-02 15:12 ` [PATCH 17/19] drm/i915/execlists: Record elsp offset during engine setup Chris Wilson
2018-01-03 10:00   ` Michał Winiarski
2018-01-02 15:12 ` [PATCH 18/19] drm/i915/execlists: Tidy enabling execlists Chris Wilson
2018-01-03  9:55   ` Michał Winiarski
2018-01-03 11:12     ` Chris Wilson
2018-01-02 15:12 ` [PATCH 19/19] drm/i915/execlists: Repeat CSB mmio until it returns a sensible result Chris Wilson
2018-01-02 15:37 ` ✓ Fi.CI.BAT: success for series starting with [01/19] drm/i915: Delete defunct i915_gem_request_assign() Patchwork
2018-01-02 16:24 ` ✓ Fi.CI.IGT: " Patchwork
2018-01-02 19:01 ` [PATCH 01/19] " Rodrigo Vivi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180102151235.3949-6-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.