All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 022/262] drm/i915/preemption: Select timeout when scheduling
Date: Thu, 17 May 2018 07:03:38 +0100	[thread overview]
Message-ID: <20180517060738.19193-22-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20180517060738.19193-1-chris@chris-wilson.co.uk>

The choice of preemption timeout is determined by the context from which
we trigger the preemption, as such allow the caller to specify the
desired timeout.

Effectively the other choice would be to use the shortest timeout along
the dependency chain. However, given that we would have already
triggered preemption for the dependency chain, we can assume that no
preemption along that chain is more important than the current request,
ergo we need only consider the current timeout. Realising this, we can
then pass control of the preemption timeout to the caller for greater
flexibility.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c            |   2 +-
 drivers/gpu/drm/i915/i915_request.c        |   2 +-
 drivers/gpu/drm/i915/intel_lrc.c           |   5 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h    |   6 +-
 drivers/gpu/drm/i915/selftests/intel_lrc.c | 110 ++++++++++++++++++++-
 5 files changed, 118 insertions(+), 7 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index fa09837d0569..0ec4c7a7c237 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -581,7 +581,7 @@ static void __fence_set_priority(struct dma_fence *fence,
 	local_bh_disable();
 	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
 	if (engine->schedule)
-		engine->schedule(rq, attr);
+		engine->schedule(rq, attr, 0);
 	rcu_read_unlock();
 	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
 }
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index fc499bcbd105..c3893312cb9d 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1113,7 +1113,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
 	local_bh_disable();
 	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
 	if (engine->schedule)
-		engine->schedule(request, &request->gem_context->sched);
+		engine->schedule(request, &request->gem_context->sched, 0);
 	rcu_read_unlock();
 	i915_sw_fence_commit(&request->submit);
 	local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index b6f369d06bb8..c5795b9944b1 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1290,7 +1290,8 @@ sched_lock_engine(struct i915_sched_node *node, struct intel_engine_cs *locked)
 }
 
 static void execlists_schedule(struct i915_request *request,
-			       const struct i915_sched_attr *attr)
+			       const struct i915_sched_attr *attr,
+			       unsigned int timeout)
 {
 	struct i915_priolist *uninitialized_var(pl);
 	struct intel_engine_cs *engine, *last;
@@ -1393,7 +1394,7 @@ static void execlists_schedule(struct i915_request *request,
 
 		if (prio > engine->execlists.queue_priority &&
 		    i915_sw_fence_done(&sched_to_request(node)->submit)) {
-			__update_queue(engine, prio, 0);
+			__update_queue(engine, prio, timeout);
 			tasklet_hi_schedule(&engine->execlists.tasklet);
 		}
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 495cc2628fa9..769ac73004c4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -478,14 +478,16 @@ struct intel_engine_cs {
 	 */
 	void		(*submit_request)(struct i915_request *rq);
 
-	/* Call when the priority on a request has changed and it and its
+	/*
+	 * Call when the priority on a request has changed and it and its
 	 * dependencies may need rescheduling. Note the request itself may
 	 * not be ready to run!
 	 *
 	 * Called under the struct_mutex.
 	 */
 	void		(*schedule)(struct i915_request *request,
-				    const struct i915_sched_attr *attr);
+				    const struct i915_sched_attr *attr,
+				    unsigned int timeout);
 
 	/*
 	 * Cancel all requests on the hardware, or queued for execution.
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 40c1f6951444..7efa52e514ab 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -406,7 +406,7 @@ static int live_late_preempt(void *arg)
 		}
 
 		attr.priority = I915_PRIORITY_MAX;
-		engine->schedule(rq, &attr);
+		engine->schedule(rq, &attr, 0);
 
 		if (!wait_for_spinner(&spin_hi, rq)) {
 			pr_err("High priority context failed to preempt the low priority context\n");
@@ -618,6 +618,113 @@ static int live_preempt_reset(void *arg)
 	return err;
 }
 
+static int live_late_preempt_timeout(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct i915_gem_context *ctx_hi, *ctx_lo;
+	struct spinner spin_hi, spin_lo;
+	struct intel_engine_cs *engine;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+		return 0;
+
+	mutex_lock(&i915->drm.struct_mutex);
+
+	if (spinner_init(&spin_hi, i915))
+		goto err_unlock;
+
+	if (spinner_init(&spin_lo, i915))
+		goto err_spin_hi;
+
+	ctx_hi = kernel_context(i915);
+	if (!ctx_hi)
+		goto err_spin_lo;
+
+	ctx_lo = kernel_context(i915);
+	if (!ctx_lo)
+		goto err_ctx_hi;
+
+	for_each_engine(engine, i915, id) {
+		struct i915_request *rq;
+
+		rq = spinner_create_request(&spin_lo, ctx_lo, engine, MI_NOOP);
+		if (IS_ERR(rq)) {
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (!wait_for_spinner(&spin_lo, rq)) {
+			pr_err("First context failed to start\n");
+			goto err_wedged;
+		}
+
+		rq = spinner_create_request(&spin_hi, ctx_hi, engine, MI_NOOP);
+		if (IS_ERR(rq)) {
+			spinner_end(&spin_lo);
+			err = PTR_ERR(rq);
+			goto err_ctx_lo;
+		}
+
+		i915_request_add(rq);
+		if (wait_for_spinner(&spin_hi, rq)) {
+			pr_err("Second context overtook first?\n");
+			goto err_wedged;
+		}
+
+		GEM_TRACE("%s rescheduling (no timeout)\n", engine->name);
+		engine->schedule(rq, &(struct i915_sched_attr){
+				 .priority = 1,
+				 }, 0);
+
+		if (wait_for_spinner(&spin_hi, rq)) {
+			pr_err("High priority context overtook first without an arbitration point?\n");
+			goto err_wedged;
+		}
+
+		GEM_TRACE("%s rescheduling (with timeout)\n", engine->name);
+		engine->schedule(rq, &(struct i915_sched_attr){
+				 .priority = 2,
+				 }, 10 * 1000 /* 10us */);
+
+		if (!wait_for_spinner(&spin_hi, rq)) {
+			pr_err("High priority context failed to force itself in front of the low priority context\n");
+			GEM_TRACE_DUMP();
+			goto err_wedged;
+		}
+
+		spinner_end(&spin_hi);
+		spinner_end(&spin_lo);
+		if (igt_flush_test(i915, I915_WAIT_LOCKED)) {
+			err = -EIO;
+			goto err_ctx_lo;
+		}
+	}
+
+	err = 0;
+err_ctx_lo:
+	kernel_context_close(ctx_lo);
+err_ctx_hi:
+	kernel_context_close(ctx_hi);
+err_spin_lo:
+	spinner_fini(&spin_lo);
+err_spin_hi:
+	spinner_fini(&spin_hi);
+err_unlock:
+	igt_flush_test(i915, I915_WAIT_LOCKED);
+	mutex_unlock(&i915->drm.struct_mutex);
+	return err;
+
+err_wedged:
+	spinner_end(&spin_hi);
+	spinner_end(&spin_lo);
+	i915_gem_set_wedged(i915);
+	err = -EIO;
+	goto err_ctx_lo;
+}
+
 int intel_execlists_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
@@ -626,6 +733,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_late_preempt),
 		SUBTEST(live_preempt_timeout),
 		SUBTEST(live_preempt_reset),
+		SUBTEST(live_late_preempt_timeout),
 	};
 
 	if (!HAS_EXECLISTS(i915))
-- 
2.17.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2018-05-17  6:08 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-17  6:03 [PATCH 001/262] drm/i915: Move request->ctx aside Chris Wilson
2018-05-17  6:03 ` [PATCH 002/262] drm/i915: Move fiddling with engine->last_retired_context Chris Wilson
2018-05-17  6:03 ` [PATCH 003/262] drm/i915: Store a pointer to intel_context in i915_request Chris Wilson
2018-05-17  6:03 ` [PATCH 004/262] drm/i915: Pull the context->pin_count dec into the common intel_context_unpin Chris Wilson
2018-05-17  6:03 ` [PATCH 005/262] drm/i915: Be irqsafe inside reset Chris Wilson
2018-05-17  6:03 ` [PATCH 006/262] drm/i915: Make intel_engine_dump irqsafe Chris Wilson
2018-05-17  6:03 ` [PATCH 007/262] drm/i915/execlists: Handle copying default context state for atomic reset Chris Wilson
2018-05-17  6:03 ` [PATCH 008/262] drm/i915: Allow init_breadcrumbs to be used from irq context Chris Wilson
2018-05-17  6:03 ` [PATCH 009/262] drm/i915/execlists: HWACK checking superseded checking port[0].count Chris Wilson
2018-05-17  6:03 ` [PATCH 010/262] drm/i915: Remove USES_GUC_SUBMISSION() pointer chasing from gen8_cs_irq_handler Chris Wilson
2018-05-17  6:03 ` [PATCH 011/262] drm/i915/execlists: Double check rpm wakeref Chris Wilson
2018-05-17  6:03 ` [PATCH 012/262] drm/i915: After reset on sanitization, reset the engine backends Chris Wilson
2018-05-17  6:03 ` [PATCH 013/262] drm/i915/execlists: Reset the CSB head tracking on reset/sanitization Chris Wilson
2018-05-17  6:03 ` [PATCH 014/262] drm/i915/execlists: Pull submit after dequeue under timeline lock Chris Wilson
2018-05-17  6:03 ` [PATCH 015/262] drm/i915/execlists: Process one CSB interrupt at a time Chris Wilson
2018-05-17  6:03 ` [PATCH 016/262] drm/i915/execlists: Unify CSB access pointers Chris Wilson
2018-05-17  6:03 ` [PATCH 017/262] drm/i915/execlists: Process the CSB directly from inside the irq handler Chris Wilson
2018-05-17  6:03 ` [PATCH 018/262] drm/i915/execlists: Direct submission (avoid tasklet/ksoftirqd) Chris Wilson
2018-05-17  6:03 ` [PATCH 019/262] drm/i915: Combine gt irq ack/handlers Chris Wilson
2018-05-17  6:03 ` [PATCH 020/262] drm/i915/execlists: Force preemption via reset on timeout Chris Wilson
2018-05-17  6:03 ` [PATCH 021/262] drm/i915/execlists: Try preempt-reset from hardirq timer context Chris Wilson
2018-05-17  6:03 ` Chris Wilson [this message]
2018-05-17  6:03 ` [PATCH 023/262] drm/i915: Use a preemption timeout to enforce interactivity Chris Wilson
2018-05-17  6:03 ` [PATCH 024/262] drm/i915: Allow user control over preempt timeout on their important context Chris Wilson
2018-05-17  6:03 ` [PATCH 025/262] drm/mm: Reject over-sized allocation requests early Chris Wilson
2018-05-17  6:03 ` [PATCH 026/262] drm/mm: Add a search-by-address variant to only inspect a single hole Chris Wilson
2018-05-17  6:03 ` [PATCH 027/262] drm/i915: Limit searching for PIN_HIGH Chris Wilson
2018-05-17  6:03 ` [PATCH 028/262] drm/i915: Pin the ring high Chris Wilson
2018-05-17  6:03 ` [PATCH 029/262] drm/i915: Track the purgeable objects on a separate eviction list Chris Wilson
2018-05-18 11:36   ` Matthew Auld
2018-05-18 11:59     ` Chris Wilson
2018-05-17  6:03 ` [PATCH 030/262] drm/i915: Refactor unsettting obj->mm.pages Chris Wilson
2018-05-18 13:35   ` Matthew Auld
2018-05-17  6:03 ` [PATCH 031/262] drm/i915: Report all objects with allocated pages to the shrinker Chris Wilson
2018-05-18 16:42   ` Matthew Auld
2018-05-18 16:45     ` Chris Wilson
2018-05-17  6:03 ` [PATCH 032/262] drm/i915: Disable preemption and sleeping while using the punit sideband Chris Wilson
2018-05-17  6:03 ` [PATCH 033/262] drm/i915: Lift acquiring the vlv punit magic to a common sb-get Chris Wilson
2018-05-17  6:03 ` [PATCH 034/262] drm/i915: Lift sideband locking for vlv_punit_(read|write) Chris Wilson
2018-05-17  6:03 ` [PATCH 035/262] drm/i915: Reduce RPS update frequency on Valleyview/Cherryview Chris Wilson
2018-05-17  6:03 ` [PATCH 036/262] Revert "drm/i915: Avoid tweaking evaluation thresholds on Baytrail v3" Chris Wilson
2018-05-17  6:03 ` [PATCH 037/262] drm/i915: Replace pcu_lock with sb_lock Chris Wilson
2018-05-17  6:03 ` [PATCH 038/262] drm/i915: Separate sideband declarations to intel_sideband.h Chris Wilson
2018-05-17  6:03 ` [PATCH 039/262] drm/i915: Merge sbi read/write into a single accessor Chris Wilson
2018-05-17  6:03 ` [PATCH 040/262] drm/i915: Merge sandybridge_pcode_(read|write) Chris Wilson
2018-05-17  6:03 ` [PATCH 041/262] drm/i915: Move sandybride pcode access to intel_sideband.c Chris Wilson
2018-05-17  6:03 ` [PATCH 042/262] drm/i915: Mark up Ironlake ips with rpm wakerefs Chris Wilson
2018-05-17  6:03 ` [PATCH 043/262] drm/i915: Record logical context support in driver caps Chris Wilson
2018-05-17  6:04 ` [PATCH 044/262] drm/i915: Generalize i915_gem_sanitize() to reset contexts Chris Wilson
2018-05-17  6:04 ` [PATCH 045/262] drm/i915: Enable render context support for Ironlake (gen5) Chris Wilson
2018-05-17  6:04 ` [PATCH 046/262] drm/i915: Enable render context support for gen4 (Broadwater to Cantiga) Chris Wilson
2018-05-17  6:04 ` [PATCH 047/262] drm/i915: Split GT powermanagement functions to intel_gt_pm.c Chris Wilson
2018-05-17  6:04 ` [PATCH 048/262] drm/i915: Move rps worker " Chris Wilson
2018-05-17  6:04 ` [PATCH 049/262] drm/i915: Move all the RPS irq handlers to intel_gt_pm Chris Wilson
2018-05-17  6:04 ` [PATCH 050/262] drm/i915: Track HAS_RPS alongside HAS_RC6 in the device info Chris Wilson
2018-05-17  6:04 ` [PATCH 051/262] drm/i915: Remove defunct intel_suspend_gt_powersave() Chris Wilson
2018-05-17  6:04 ` [PATCH 052/262] drm/i915: Reorder GT interface code Chris Wilson
2018-05-17  6:04 ` [PATCH 053/262] drm/i915: Split control of rps and rc6 Chris Wilson
2018-05-17  6:04 ` [PATCH 054/262] drm/i915: Enabling rc6 and rps have different requirements, so separate them Chris Wilson
2018-05-17  6:04 ` [PATCH 055/262] drm/i915: Simplify rc6/rps enabling Chris Wilson
2018-05-17  6:04 ` [PATCH 056/262] drm/i915: Refactor frequency bounds computation Chris Wilson
2018-05-17  6:12 ` [PATCH 001/262] drm/i915: Move request->ctx aside Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180517060738.19193-22-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.