* [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context
@ 2020-03-27 9:24 Chris Wilson
2020-03-27 9:27 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context (rev3) Patchwork
0 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2020-03-27 9:24 UTC (permalink / raw)
To: intel-gfx
In what seems remarkably similar to the w/a required to not reload an
idle context with HEAD==TAIL, it appears we must prevent the HW from
switching to an idle context in ELSP[1], while simultaneously trying to
preempt the HW to run another context and a continuation of the idle
context (which is no longer idle).
process_csb: vecs0: cs-irq head=0, tail=1
process_csb: vecs0: csb[1]: status=0x00000882:0x00000020
trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
process_csb: vecs0: cs-irq head=1, tail=2
process_csb: vecs0: csb[2]: status=0x00000814:0x00000040
trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
process_csb: vecs0: cs-irq head=2, tail=5
process_csb: vecs0: csb[3]: status=0x00000812:0x00000020
trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
process_csb: vecs0: csb[4]: status=0x00000814:0x00000060
trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
process_csb: vecs0: csb[5]: status=0x00000818:0x00000020
trace_ports: vecs0: completed { 8c0:32, 0:0 }
process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, ctl:00000000, mode:00000200}
process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, hwsp:30},
process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
process_csb: GEM_BUG_ON("context completed before request")
Fortunately, we just so happen to have a semaphore in place to prevent
the ring HEAD from proceeding past the end of a request that we can use
to fix the HEAD in position as we reprogram ELSP.
Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
drivers/gpu/drm/i915/gt/intel_lrc.c | 75 +++++++++++++++--------------
1 file changed, 39 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b12355048501..26ece9390954 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request **ports, int count)
memset_p((void **)ports, NULL, count);
}
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static bool execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending;
@@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
execlists->queue_priority_hint);
record_preemption(execlists);
- /*
- * Don't let the RING_HEAD advance past the breadcrumb
- * as we unwind (and until we resubmit) so that we do
- * not accidentally tell it to go backwards.
- */
- ring_set_paused(engine, 1);
-
/*
* Note that we have not stopped the GPU at this point,
* so we are unwinding the incomplete requests as they
@@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->sched.attr.priority,
execlists->queue_priority_hint);
- ring_set_paused(engine, 1);
defer_active(engine);
/*
@@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of timeslices, our queue might be.
*/
start_timeslice(engine);
- return;
+ return false;
}
}
}
@@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
if (last && !can_merge_rq(last, rq)) {
+ /* leave this for another sibling */
spin_unlock(&ve->base.active.lock);
start_timeslice(engine);
- return; /* leave this for another sibling */
+ return false;
}
ENGINE_TRACE(engine,
@@ -2193,32 +2186,31 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
+ if (!submit)
+ return false;
- if (submit) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- execlists->switch_priority_hint =
- switch_prio(engine, *execlists->pending);
-
- /*
- * Skip if we ended up with exactly the same set of requests,
- * e.g. trying to timeslice a pair of ordered contexts
- */
- if (!memcmp(active, execlists->pending,
- (port - execlists->pending + 1) * sizeof(*port))) {
- do
- execlists_schedule_out(fetch_and_zero(port));
- while (port-- != execlists->pending);
+ *port = execlists_schedule_in(last, port - execlists->pending);
+ execlists->switch_priority_hint =
+ switch_prio(engine, *execlists->pending);
- goto skip_submit;
- }
- clear_ports(port + 1, last_port - port);
+ /*
+ * Skip if we ended up with exactly the same set of requests,
+ * e.g. trying to timeslice a pair of ordered contexts
+ */
+ if (!memcmp(active, execlists->pending,
+ (port - execlists->pending + 1) * sizeof(*port))) {
+ do
+ execlists_schedule_out(fetch_and_zero(port));
+ while (port-- != execlists->pending);
- execlists_submit_ports(engine);
- set_preempt_timeout(engine, *active);
- } else {
-skip_submit:
- ring_set_paused(engine, 0);
+ return false;
}
+ clear_ports(port + 1, last_port - port);
+
+ execlists_submit_ports(engine);
+ set_preempt_timeout(engine, *active);
+ tasklet_hi_schedule(&execlists->active); /* paper over lost interrupt */
+ return true;
}
static void
@@ -2478,7 +2470,16 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
lockdep_assert_held(&engine->active.lock);
if (!READ_ONCE(engine->execlists.pending[0])) {
rcu_read_lock(); /* protect peeking at execlists->active */
- execlists_dequeue(engine);
+
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
+ if (!execlists_dequeue(engine))
+ ring_set_paused(engine, 0);
+
rcu_read_unlock();
}
}
@@ -2816,8 +2817,7 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
ring_set_paused(engine, 1); /* Freeze the current request in place */
if (execlists_capture(engine))
intel_engine_reset(engine, msg);
- else
- ring_set_paused(engine, 0);
+ ring_set_paused(engine, 0);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
@@ -4203,6 +4203,9 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
{
+ *cs++ = MI_ARB_CHECK;
+ *cs++ = MI_NOOP;
+
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Intel-gfx] ✗ Fi.CI.BUILD: failure for drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context (rev3)
2020-03-27 9:24 [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context Chris Wilson
@ 2020-03-27 9:27 ` Patchwork
0 siblings, 0 replies; 8+ messages in thread
From: Patchwork @ 2020-03-27 9:27 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context (rev3)
URL : https://patchwork.freedesktop.org/series/75130/
State : failure
== Summary ==
CALL scripts/checksyscalls.sh
CALL scripts/atomic/check-atomics.sh
DESCEND objtool
CHK include/generated/compile.h
CC [M] drivers/gpu/drm/i915/gt/intel_lrc.o
drivers/gpu/drm/i915/gt/intel_lrc.c: In function ‘execlists_dequeue’:
drivers/gpu/drm/i915/gt/intel_lrc.c:2212:22: error: passing argument 1 of ‘tasklet_hi_schedule’ from incompatible pointer type [-Werror=incompatible-pointer-types]
tasklet_hi_schedule(&execlists->active); /* paper over lost interrupt */
^
In file included from drivers/gpu/drm/i915/gt/intel_lrc.c:134:0:
./include/linux/interrupt.h:658:20: note: expected ‘struct tasklet_struct *’ but argument is of type ‘struct i915_request * const**’
static inline void tasklet_hi_schedule(struct tasklet_struct *t)
^~~~~~~~~~~~~~~~~~~
cc1: all warnings being treated as errors
scripts/Makefile.build:267: recipe for target 'drivers/gpu/drm/i915/gt/intel_lrc.o' failed
make[4]: *** [drivers/gpu/drm/i915/gt/intel_lrc.o] Error 1
scripts/Makefile.build:505: recipe for target 'drivers/gpu/drm/i915' failed
make[3]: *** [drivers/gpu/drm/i915] Error 2
scripts/Makefile.build:505: recipe for target 'drivers/gpu/drm' failed
make[2]: *** [drivers/gpu/drm] Error 2
scripts/Makefile.build:505: recipe for target 'drivers/gpu' failed
make[1]: *** [drivers/gpu] Error 2
Makefile:1683: recipe for target 'drivers' failed
make: *** [drivers] Error 2
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context
2020-03-27 20:19 ` Mika Kuoppala
@ 2020-03-27 20:32 ` Chris Wilson
0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-03-27 20:32 UTC (permalink / raw)
To: Mika Kuoppala, Tvrtko Ursulin, intel-gfx
Quoting Mika Kuoppala (2020-03-27 20:19:15)
> Chris Wilson <chris@chris-wilson.co.uk> writes:
>
> > Quoting Tvrtko Ursulin (2020-03-27 15:59:45)
> >>
> >> On 27/03/2020 11:26, Chris Wilson wrote:
> >> > In what seems remarkably similar to the w/a required to not reload an
> >> > idle context with HEAD==TAIL, it appears we must prevent the HW from
> >> > switching to an idle context in ELSP[1], while simultaneously trying to
> >> > preempt the HW to run another context and a continuation of the idle
> >> > context (which is no longer idle).
> >> >
> >> > process_csb: vecs0: cs-irq head=0, tail=1
> >> > process_csb: vecs0: csb[1]: status=0x00000882:0x00000020
> >> > trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
> >> > trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
> >> > trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
> >> > process_csb: vecs0: cs-irq head=1, tail=2
> >> > process_csb: vecs0: csb[2]: status=0x00000814:0x00000040
> >> > trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
> >> > process_csb: vecs0: cs-irq head=2, tail=5
> >> > process_csb: vecs0: csb[3]: status=0x00000812:0x00000020
> >> > trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
> >> > trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
> >> > process_csb: vecs0: csb[4]: status=0x00000814:0x00000060
> >> > trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
> >> > process_csb: vecs0: csb[5]: status=0x00000818:0x00000020
> >> > trace_ports: vecs0: completed { 8c0:32, 0:0 }
> >> > process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, ctl:00000000, mode:00000200}
> >> > process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, hwsp:30},
> >> > process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
> >> > process_csb: GEM_BUG_ON("context completed before request")
> >> >
> >> > Fortunately, we just so happen to have a semaphore in place to prevent
> >> > the ring HEAD from proceeding past the end of a request that we can use
> >> > to fix the HEAD in position as we reprogram ELSP.
> >> >
> >> > Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
> >> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> >> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> >> > ---
> >> > drivers/gpu/drm/i915/gt/intel_gt_irq.c | 2 +
> >> > drivers/gpu/drm/i915/gt/intel_lrc.c | 72 +++++++++++++-------------
> >> > 2 files changed, 38 insertions(+), 36 deletions(-)
> >> >
> >> > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
> >> > index f0e7fd95165a..be3817d99908 100644
> >> > --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
> >> > +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
> >> > @@ -24,6 +24,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
> >> > {
> >> > bool tasklet = false;
> >> >
> >> > + ENGINE_TRACE(engine, "iir: %04x\n", iir);
> >> > +
> >> > if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
> >> > u32 eir;
> >> >
> >> > diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> >> > index b12355048501..53650b452bc9 100644
> >> > --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> >> > +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> >> > @@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request **ports, int count)
> >> > memset_p((void **)ports, NULL, count);
> >> > }
> >> >
> >> > -static void execlists_dequeue(struct intel_engine_cs *engine)
> >> > +static bool execlists_dequeue(struct intel_engine_cs *engine)
> >> > {
> >> > struct intel_engine_execlists * const execlists = &engine->execlists;
> >> > struct i915_request **port = execlists->pending;
> >> > @@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> >> > execlists->queue_priority_hint);
> >> > record_preemption(execlists);
> >> >
> >> > - /*
> >> > - * Don't let the RING_HEAD advance past the breadcrumb
> >> > - * as we unwind (and until we resubmit) so that we do
> >> > - * not accidentally tell it to go backwards.
> >> > - */
> >> > - ring_set_paused(engine, 1);
> >> > -
> >> > /*
> >> > * Note that we have not stopped the GPU at this point,
> >> > * so we are unwinding the incomplete requests as they
> >> > @@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> >> > last->sched.attr.priority,
> >> > execlists->queue_priority_hint);
> >> >
> >> > - ring_set_paused(engine, 1);
> >> > defer_active(engine);
> >> >
> >> > /*
> >> > @@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> >> > * of timeslices, our queue might be.
> >> > */
> >> > start_timeslice(engine);
> >> > - return;
> >> > + return false;
> >> > }
> >> > }
> >> > }
> >> > @@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> >> > }
> >> >
> >> > if (last && !can_merge_rq(last, rq)) {
> >> > + /* leave this for another sibling */
> >> > spin_unlock(&ve->base.active.lock);
> >> > start_timeslice(engine);
> >> > - return; /* leave this for another sibling */
> >> > + return false;
> >> > }
> >> >
> >> > ENGINE_TRACE(engine,
> >> > @@ -2193,32 +2186,31 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> >> > * interrupt for secondary ports).
> >> > */
> >> > execlists->queue_priority_hint = queue_prio(execlists);
> >> > + if (!submit)
> >> > + return false;
> >> >
> >> > - if (submit) {
> >> > - *port = execlists_schedule_in(last, port - execlists->pending);
> >> > - execlists->switch_priority_hint =
> >> > - switch_prio(engine, *execlists->pending);
> >> > + *port = execlists_schedule_in(last, port - execlists->pending);
> >> > + execlists->switch_priority_hint =
> >> > + switch_prio(engine, *execlists->pending);
> >> >
> >> > - /*
> >> > - * Skip if we ended up with exactly the same set of requests,
> >> > - * e.g. trying to timeslice a pair of ordered contexts
> >> > - */
> >> > - if (!memcmp(active, execlists->pending,
> >> > - (port - execlists->pending + 1) * sizeof(*port))) {
> >> > - do
> >> > - execlists_schedule_out(fetch_and_zero(port));
> >> > - while (port-- != execlists->pending);
> >> > -
> >> > - goto skip_submit;
> >> > - }
> >> > - clear_ports(port + 1, last_port - port);
> >> > + /*
> >> > + * Skip if we ended up with exactly the same set of requests,
> >> > + * e.g. trying to timeslice a pair of ordered contexts
> >> > + */
> >> > + if (!memcmp(active, execlists->pending,
> >> > + (port - execlists->pending + 1) * sizeof(*port))) {
> >> > + do
> >> > + execlists_schedule_out(fetch_and_zero(port));
> >> > + while (port-- != execlists->pending);
> >> >
> >> > - execlists_submit_ports(engine);
> >> > - set_preempt_timeout(engine, *active);
> >> > - } else {
> >> > -skip_submit:
> >> > - ring_set_paused(engine, 0);
> >> > + return false;
> >> > }
> >> > + clear_ports(port + 1, last_port - port);
> >> > +
> >> > + execlists_submit_ports(engine);
> >> > + set_preempt_timeout(engine, *active);
> >> > + tasklet_hi_schedule(&execlists->tasklet); /* lost interrupt */
> >> > + return true;
> >> > }
> >> >
> >> > static void
> >> > @@ -2478,7 +2470,16 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
> >> > lockdep_assert_held(&engine->active.lock);
> >> > if (!READ_ONCE(engine->execlists.pending[0])) {
> >> > rcu_read_lock(); /* protect peeking at execlists->active */
> >> > - execlists_dequeue(engine);
> >> > +
> >> > + /*
> >> > + * Don't let the RING_HEAD advance past the breadcrumb
> >> > + * as we unwind (and until we resubmit) so that we do
> >> > + * not accidentally tell it to go backwards.
> >> > + */
> >> > + ring_set_paused(engine, 1);
> >> > + if (!execlists_dequeue(engine))
> >> > + ring_set_paused(engine, 0);
> >> > +
> >> > rcu_read_unlock();
> >> > }
> >> > }
> >> > @@ -2816,8 +2817,7 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
> >> > ring_set_paused(engine, 1); /* Freeze the current request in place */
> >> > if (execlists_capture(engine))
> >> > intel_engine_reset(engine, msg);
> >> > - else
> >> > - ring_set_paused(engine, 0);
> >> > + ring_set_paused(engine, 0);
> >> >
> >> > tasklet_enable(&engine->execlists.tasklet);
> >> > clear_and_wake_up_bit(bit, lock);
> >> >
> >>
> >> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> >
> > The problem appears to be that if we lite-restore into a context inside
> > the semaphore, it doesn't yield an arbitration point and we do not raise
> > a CSB event.
>
> Trying to make senses of it all...
>
> we do not raise == the hardware does not raise?
HW.
> So if it is about lite restoring, we can't workaround by always making
> sure elsp[1] head != tail?
No. The context we enter into ELSP[1] is still executing on the GPU and
hits HEAD==TAIL after we write the register, but before the GPU
processes the register.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context
2020-03-27 16:47 ` Chris Wilson
@ 2020-03-27 20:19 ` Mika Kuoppala
2020-03-27 20:32 ` Chris Wilson
0 siblings, 1 reply; 8+ messages in thread
From: Mika Kuoppala @ 2020-03-27 20:19 UTC (permalink / raw)
To: Chris Wilson, Tvrtko Ursulin, intel-gfx
Chris Wilson <chris@chris-wilson.co.uk> writes:
> Quoting Tvrtko Ursulin (2020-03-27 15:59:45)
>>
>> On 27/03/2020 11:26, Chris Wilson wrote:
>> > In what seems remarkably similar to the w/a required to not reload an
>> > idle context with HEAD==TAIL, it appears we must prevent the HW from
>> > switching to an idle context in ELSP[1], while simultaneously trying to
>> > preempt the HW to run another context and a continuation of the idle
>> > context (which is no longer idle).
>> >
>> > process_csb: vecs0: cs-irq head=0, tail=1
>> > process_csb: vecs0: csb[1]: status=0x00000882:0x00000020
>> > trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
>> > trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
>> > trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
>> > process_csb: vecs0: cs-irq head=1, tail=2
>> > process_csb: vecs0: csb[2]: status=0x00000814:0x00000040
>> > trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
>> > process_csb: vecs0: cs-irq head=2, tail=5
>> > process_csb: vecs0: csb[3]: status=0x00000812:0x00000020
>> > trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
>> > trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
>> > process_csb: vecs0: csb[4]: status=0x00000814:0x00000060
>> > trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
>> > process_csb: vecs0: csb[5]: status=0x00000818:0x00000020
>> > trace_ports: vecs0: completed { 8c0:32, 0:0 }
>> > process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, ctl:00000000, mode:00000200}
>> > process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, hwsp:30},
>> > process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
>> > process_csb: GEM_BUG_ON("context completed before request")
>> >
>> > Fortunately, we just so happen to have a semaphore in place to prevent
>> > the ring HEAD from proceeding past the end of a request that we can use
>> > to fix the HEAD in position as we reprogram ELSP.
>> >
>> > Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
>> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
>> > ---
>> > drivers/gpu/drm/i915/gt/intel_gt_irq.c | 2 +
>> > drivers/gpu/drm/i915/gt/intel_lrc.c | 72 +++++++++++++-------------
>> > 2 files changed, 38 insertions(+), 36 deletions(-)
>> >
>> > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
>> > index f0e7fd95165a..be3817d99908 100644
>> > --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
>> > +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
>> > @@ -24,6 +24,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
>> > {
>> > bool tasklet = false;
>> >
>> > + ENGINE_TRACE(engine, "iir: %04x\n", iir);
>> > +
>> > if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
>> > u32 eir;
>> >
>> > diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
>> > index b12355048501..53650b452bc9 100644
>> > --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
>> > +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
>> > @@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request **ports, int count)
>> > memset_p((void **)ports, NULL, count);
>> > }
>> >
>> > -static void execlists_dequeue(struct intel_engine_cs *engine)
>> > +static bool execlists_dequeue(struct intel_engine_cs *engine)
>> > {
>> > struct intel_engine_execlists * const execlists = &engine->execlists;
>> > struct i915_request **port = execlists->pending;
>> > @@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>> > execlists->queue_priority_hint);
>> > record_preemption(execlists);
>> >
>> > - /*
>> > - * Don't let the RING_HEAD advance past the breadcrumb
>> > - * as we unwind (and until we resubmit) so that we do
>> > - * not accidentally tell it to go backwards.
>> > - */
>> > - ring_set_paused(engine, 1);
>> > -
>> > /*
>> > * Note that we have not stopped the GPU at this point,
>> > * so we are unwinding the incomplete requests as they
>> > @@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>> > last->sched.attr.priority,
>> > execlists->queue_priority_hint);
>> >
>> > - ring_set_paused(engine, 1);
>> > defer_active(engine);
>> >
>> > /*
>> > @@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>> > * of timeslices, our queue might be.
>> > */
>> > start_timeslice(engine);
>> > - return;
>> > + return false;
>> > }
>> > }
>> > }
>> > @@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>> > }
>> >
>> > if (last && !can_merge_rq(last, rq)) {
>> > + /* leave this for another sibling */
>> > spin_unlock(&ve->base.active.lock);
>> > start_timeslice(engine);
>> > - return; /* leave this for another sibling */
>> > + return false;
>> > }
>> >
>> > ENGINE_TRACE(engine,
>> > @@ -2193,32 +2186,31 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>> > * interrupt for secondary ports).
>> > */
>> > execlists->queue_priority_hint = queue_prio(execlists);
>> > + if (!submit)
>> > + return false;
>> >
>> > - if (submit) {
>> > - *port = execlists_schedule_in(last, port - execlists->pending);
>> > - execlists->switch_priority_hint =
>> > - switch_prio(engine, *execlists->pending);
>> > + *port = execlists_schedule_in(last, port - execlists->pending);
>> > + execlists->switch_priority_hint =
>> > + switch_prio(engine, *execlists->pending);
>> >
>> > - /*
>> > - * Skip if we ended up with exactly the same set of requests,
>> > - * e.g. trying to timeslice a pair of ordered contexts
>> > - */
>> > - if (!memcmp(active, execlists->pending,
>> > - (port - execlists->pending + 1) * sizeof(*port))) {
>> > - do
>> > - execlists_schedule_out(fetch_and_zero(port));
>> > - while (port-- != execlists->pending);
>> > -
>> > - goto skip_submit;
>> > - }
>> > - clear_ports(port + 1, last_port - port);
>> > + /*
>> > + * Skip if we ended up with exactly the same set of requests,
>> > + * e.g. trying to timeslice a pair of ordered contexts
>> > + */
>> > + if (!memcmp(active, execlists->pending,
>> > + (port - execlists->pending + 1) * sizeof(*port))) {
>> > + do
>> > + execlists_schedule_out(fetch_and_zero(port));
>> > + while (port-- != execlists->pending);
>> >
>> > - execlists_submit_ports(engine);
>> > - set_preempt_timeout(engine, *active);
>> > - } else {
>> > -skip_submit:
>> > - ring_set_paused(engine, 0);
>> > + return false;
>> > }
>> > + clear_ports(port + 1, last_port - port);
>> > +
>> > + execlists_submit_ports(engine);
>> > + set_preempt_timeout(engine, *active);
>> > + tasklet_hi_schedule(&execlists->tasklet); /* lost interrupt */
>> > + return true;
>> > }
>> >
>> > static void
>> > @@ -2478,7 +2470,16 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
>> > lockdep_assert_held(&engine->active.lock);
>> > if (!READ_ONCE(engine->execlists.pending[0])) {
>> > rcu_read_lock(); /* protect peeking at execlists->active */
>> > - execlists_dequeue(engine);
>> > +
>> > + /*
>> > + * Don't let the RING_HEAD advance past the breadcrumb
>> > + * as we unwind (and until we resubmit) so that we do
>> > + * not accidentally tell it to go backwards.
>> > + */
>> > + ring_set_paused(engine, 1);
>> > + if (!execlists_dequeue(engine))
>> > + ring_set_paused(engine, 0);
>> > +
>> > rcu_read_unlock();
>> > }
>> > }
>> > @@ -2816,8 +2817,7 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
>> > ring_set_paused(engine, 1); /* Freeze the current request in place */
>> > if (execlists_capture(engine))
>> > intel_engine_reset(engine, msg);
>> > - else
>> > - ring_set_paused(engine, 0);
>> > + ring_set_paused(engine, 0);
>> >
>> > tasklet_enable(&engine->execlists.tasklet);
>> > clear_and_wake_up_bit(bit, lock);
>> >
>>
>> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>
> The problem appears to be that if we lite-restore into a context inside
> the semaphore, it doesn't yield an arbitration point and we do not raise
> a CSB event.
Trying to make senses of it all...
we do not raise == the hardware does not raise?
So if it is about lite restoring, we can't workaround by always making
sure elsp[1] head != tail?
-Mika
>
> Out of the frying pan and into the fire.
> -Chris
> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context
2020-03-27 15:59 ` Tvrtko Ursulin
@ 2020-03-27 16:47 ` Chris Wilson
2020-03-27 20:19 ` Mika Kuoppala
0 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2020-03-27 16:47 UTC (permalink / raw)
To: Tvrtko Ursulin, intel-gfx
Quoting Tvrtko Ursulin (2020-03-27 15:59:45)
>
> On 27/03/2020 11:26, Chris Wilson wrote:
> > In what seems remarkably similar to the w/a required to not reload an
> > idle context with HEAD==TAIL, it appears we must prevent the HW from
> > switching to an idle context in ELSP[1], while simultaneously trying to
> > preempt the HW to run another context and a continuation of the idle
> > context (which is no longer idle).
> >
> > process_csb: vecs0: cs-irq head=0, tail=1
> > process_csb: vecs0: csb[1]: status=0x00000882:0x00000020
> > trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
> > trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
> > trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
> > process_csb: vecs0: cs-irq head=1, tail=2
> > process_csb: vecs0: csb[2]: status=0x00000814:0x00000040
> > trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
> > process_csb: vecs0: cs-irq head=2, tail=5
> > process_csb: vecs0: csb[3]: status=0x00000812:0x00000020
> > trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
> > trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
> > process_csb: vecs0: csb[4]: status=0x00000814:0x00000060
> > trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
> > process_csb: vecs0: csb[5]: status=0x00000818:0x00000020
> > trace_ports: vecs0: completed { 8c0:32, 0:0 }
> > process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, ctl:00000000, mode:00000200}
> > process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, hwsp:30},
> > process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
> > process_csb: GEM_BUG_ON("context completed before request")
> >
> > Fortunately, we just so happen to have a semaphore in place to prevent
> > the ring HEAD from proceeding past the end of a request that we can use
> > to fix the HEAD in position as we reprogram ELSP.
> >
> > Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> > ---
> > drivers/gpu/drm/i915/gt/intel_gt_irq.c | 2 +
> > drivers/gpu/drm/i915/gt/intel_lrc.c | 72 +++++++++++++-------------
> > 2 files changed, 38 insertions(+), 36 deletions(-)
> >
> > diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
> > index f0e7fd95165a..be3817d99908 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
> > @@ -24,6 +24,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
> > {
> > bool tasklet = false;
> >
> > + ENGINE_TRACE(engine, "iir: %04x\n", iir);
> > +
> > if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
> > u32 eir;
> >
> > diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > index b12355048501..53650b452bc9 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > @@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request **ports, int count)
> > memset_p((void **)ports, NULL, count);
> > }
> >
> > -static void execlists_dequeue(struct intel_engine_cs *engine)
> > +static bool execlists_dequeue(struct intel_engine_cs *engine)
> > {
> > struct intel_engine_execlists * const execlists = &engine->execlists;
> > struct i915_request **port = execlists->pending;
> > @@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> > execlists->queue_priority_hint);
> > record_preemption(execlists);
> >
> > - /*
> > - * Don't let the RING_HEAD advance past the breadcrumb
> > - * as we unwind (and until we resubmit) so that we do
> > - * not accidentally tell it to go backwards.
> > - */
> > - ring_set_paused(engine, 1);
> > -
> > /*
> > * Note that we have not stopped the GPU at this point,
> > * so we are unwinding the incomplete requests as they
> > @@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> > last->sched.attr.priority,
> > execlists->queue_priority_hint);
> >
> > - ring_set_paused(engine, 1);
> > defer_active(engine);
> >
> > /*
> > @@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> > * of timeslices, our queue might be.
> > */
> > start_timeslice(engine);
> > - return;
> > + return false;
> > }
> > }
> > }
> > @@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> > }
> >
> > if (last && !can_merge_rq(last, rq)) {
> > + /* leave this for another sibling */
> > spin_unlock(&ve->base.active.lock);
> > start_timeslice(engine);
> > - return; /* leave this for another sibling */
> > + return false;
> > }
> >
> > ENGINE_TRACE(engine,
> > @@ -2193,32 +2186,31 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> > * interrupt for secondary ports).
> > */
> > execlists->queue_priority_hint = queue_prio(execlists);
> > + if (!submit)
> > + return false;
> >
> > - if (submit) {
> > - *port = execlists_schedule_in(last, port - execlists->pending);
> > - execlists->switch_priority_hint =
> > - switch_prio(engine, *execlists->pending);
> > + *port = execlists_schedule_in(last, port - execlists->pending);
> > + execlists->switch_priority_hint =
> > + switch_prio(engine, *execlists->pending);
> >
> > - /*
> > - * Skip if we ended up with exactly the same set of requests,
> > - * e.g. trying to timeslice a pair of ordered contexts
> > - */
> > - if (!memcmp(active, execlists->pending,
> > - (port - execlists->pending + 1) * sizeof(*port))) {
> > - do
> > - execlists_schedule_out(fetch_and_zero(port));
> > - while (port-- != execlists->pending);
> > -
> > - goto skip_submit;
> > - }
> > - clear_ports(port + 1, last_port - port);
> > + /*
> > + * Skip if we ended up with exactly the same set of requests,
> > + * e.g. trying to timeslice a pair of ordered contexts
> > + */
> > + if (!memcmp(active, execlists->pending,
> > + (port - execlists->pending + 1) * sizeof(*port))) {
> > + do
> > + execlists_schedule_out(fetch_and_zero(port));
> > + while (port-- != execlists->pending);
> >
> > - execlists_submit_ports(engine);
> > - set_preempt_timeout(engine, *active);
> > - } else {
> > -skip_submit:
> > - ring_set_paused(engine, 0);
> > + return false;
> > }
> > + clear_ports(port + 1, last_port - port);
> > +
> > + execlists_submit_ports(engine);
> > + set_preempt_timeout(engine, *active);
> > + tasklet_hi_schedule(&execlists->tasklet); /* lost interrupt */
> > + return true;
> > }
> >
> > static void
> > @@ -2478,7 +2470,16 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
> > lockdep_assert_held(&engine->active.lock);
> > if (!READ_ONCE(engine->execlists.pending[0])) {
> > rcu_read_lock(); /* protect peeking at execlists->active */
> > - execlists_dequeue(engine);
> > +
> > + /*
> > + * Don't let the RING_HEAD advance past the breadcrumb
> > + * as we unwind (and until we resubmit) so that we do
> > + * not accidentally tell it to go backwards.
> > + */
> > + ring_set_paused(engine, 1);
> > + if (!execlists_dequeue(engine))
> > + ring_set_paused(engine, 0);
> > +
> > rcu_read_unlock();
> > }
> > }
> > @@ -2816,8 +2817,7 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
> > ring_set_paused(engine, 1); /* Freeze the current request in place */
> > if (execlists_capture(engine))
> > intel_engine_reset(engine, msg);
> > - else
> > - ring_set_paused(engine, 0);
> > + ring_set_paused(engine, 0);
> >
> > tasklet_enable(&engine->execlists.tasklet);
> > clear_and_wake_up_bit(bit, lock);
> >
>
> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
The problem appears to be that if we lite-restore into a context inside
the semaphore, it doesn't yield an arbitration point and we do not raise
a CSB event.
Out of the frying pan and into the fire.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 8+ messages in thread
* Re: [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context
2020-03-27 11:26 Chris Wilson
@ 2020-03-27 15:59 ` Tvrtko Ursulin
2020-03-27 16:47 ` Chris Wilson
0 siblings, 1 reply; 8+ messages in thread
From: Tvrtko Ursulin @ 2020-03-27 15:59 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
On 27/03/2020 11:26, Chris Wilson wrote:
> In what seems remarkably similar to the w/a required to not reload an
> idle context with HEAD==TAIL, it appears we must prevent the HW from
> switching to an idle context in ELSP[1], while simultaneously trying to
> preempt the HW to run another context and a continuation of the idle
> context (which is no longer idle).
>
> process_csb: vecs0: cs-irq head=0, tail=1
> process_csb: vecs0: csb[1]: status=0x00000882:0x00000020
> trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
> trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
> trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
> process_csb: vecs0: cs-irq head=1, tail=2
> process_csb: vecs0: csb[2]: status=0x00000814:0x00000040
> trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
> process_csb: vecs0: cs-irq head=2, tail=5
> process_csb: vecs0: csb[3]: status=0x00000812:0x00000020
> trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
> trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
> process_csb: vecs0: csb[4]: status=0x00000814:0x00000060
> trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
> process_csb: vecs0: csb[5]: status=0x00000818:0x00000020
> trace_ports: vecs0: completed { 8c0:32, 0:0 }
> process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, ctl:00000000, mode:00000200}
> process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, hwsp:30},
> process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
> process_csb: GEM_BUG_ON("context completed before request")
>
> Fortunately, we just so happen to have a semaphore in place to prevent
> the ring HEAD from proceeding past the end of a request that we can use
> to fix the HEAD in position as we reprogram ELSP.
>
> Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> ---
> drivers/gpu/drm/i915/gt/intel_gt_irq.c | 2 +
> drivers/gpu/drm/i915/gt/intel_lrc.c | 72 +++++++++++++-------------
> 2 files changed, 38 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
> index f0e7fd95165a..be3817d99908 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
> @@ -24,6 +24,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
> {
> bool tasklet = false;
>
> + ENGINE_TRACE(engine, "iir: %04x\n", iir);
> +
> if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
> u32 eir;
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index b12355048501..53650b452bc9 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request **ports, int count)
> memset_p((void **)ports, NULL, count);
> }
>
> -static void execlists_dequeue(struct intel_engine_cs *engine)
> +static bool execlists_dequeue(struct intel_engine_cs *engine)
> {
> struct intel_engine_execlists * const execlists = &engine->execlists;
> struct i915_request **port = execlists->pending;
> @@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> execlists->queue_priority_hint);
> record_preemption(execlists);
>
> - /*
> - * Don't let the RING_HEAD advance past the breadcrumb
> - * as we unwind (and until we resubmit) so that we do
> - * not accidentally tell it to go backwards.
> - */
> - ring_set_paused(engine, 1);
> -
> /*
> * Note that we have not stopped the GPU at this point,
> * so we are unwinding the incomplete requests as they
> @@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> last->sched.attr.priority,
> execlists->queue_priority_hint);
>
> - ring_set_paused(engine, 1);
> defer_active(engine);
>
> /*
> @@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> * of timeslices, our queue might be.
> */
> start_timeslice(engine);
> - return;
> + return false;
> }
> }
> }
> @@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> }
>
> if (last && !can_merge_rq(last, rq)) {
> + /* leave this for another sibling */
> spin_unlock(&ve->base.active.lock);
> start_timeslice(engine);
> - return; /* leave this for another sibling */
> + return false;
> }
>
> ENGINE_TRACE(engine,
> @@ -2193,32 +2186,31 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> * interrupt for secondary ports).
> */
> execlists->queue_priority_hint = queue_prio(execlists);
> + if (!submit)
> + return false;
>
> - if (submit) {
> - *port = execlists_schedule_in(last, port - execlists->pending);
> - execlists->switch_priority_hint =
> - switch_prio(engine, *execlists->pending);
> + *port = execlists_schedule_in(last, port - execlists->pending);
> + execlists->switch_priority_hint =
> + switch_prio(engine, *execlists->pending);
>
> - /*
> - * Skip if we ended up with exactly the same set of requests,
> - * e.g. trying to timeslice a pair of ordered contexts
> - */
> - if (!memcmp(active, execlists->pending,
> - (port - execlists->pending + 1) * sizeof(*port))) {
> - do
> - execlists_schedule_out(fetch_and_zero(port));
> - while (port-- != execlists->pending);
> -
> - goto skip_submit;
> - }
> - clear_ports(port + 1, last_port - port);
> + /*
> + * Skip if we ended up with exactly the same set of requests,
> + * e.g. trying to timeslice a pair of ordered contexts
> + */
> + if (!memcmp(active, execlists->pending,
> + (port - execlists->pending + 1) * sizeof(*port))) {
> + do
> + execlists_schedule_out(fetch_and_zero(port));
> + while (port-- != execlists->pending);
>
> - execlists_submit_ports(engine);
> - set_preempt_timeout(engine, *active);
> - } else {
> -skip_submit:
> - ring_set_paused(engine, 0);
> + return false;
> }
> + clear_ports(port + 1, last_port - port);
> +
> + execlists_submit_ports(engine);
> + set_preempt_timeout(engine, *active);
> + tasklet_hi_schedule(&execlists->tasklet); /* lost interrupt */
> + return true;
> }
>
> static void
> @@ -2478,7 +2470,16 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
> lockdep_assert_held(&engine->active.lock);
> if (!READ_ONCE(engine->execlists.pending[0])) {
> rcu_read_lock(); /* protect peeking at execlists->active */
> - execlists_dequeue(engine);
> +
> + /*
> + * Don't let the RING_HEAD advance past the breadcrumb
> + * as we unwind (and until we resubmit) so that we do
> + * not accidentally tell it to go backwards.
> + */
> + ring_set_paused(engine, 1);
> + if (!execlists_dequeue(engine))
> + ring_set_paused(engine, 0);
> +
> rcu_read_unlock();
> }
> }
> @@ -2816,8 +2817,7 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
> ring_set_paused(engine, 1); /* Freeze the current request in place */
> if (execlists_capture(engine))
> intel_engine_reset(engine, msg);
> - else
> - ring_set_paused(engine, 0);
> + ring_set_paused(engine, 0);
>
> tasklet_enable(&engine->execlists.tasklet);
> clear_and_wake_up_bit(bit, lock);
>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 8+ messages in thread
* [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context
@ 2020-03-27 11:26 Chris Wilson
2020-03-27 15:59 ` Tvrtko Ursulin
0 siblings, 1 reply; 8+ messages in thread
From: Chris Wilson @ 2020-03-27 11:26 UTC (permalink / raw)
To: intel-gfx
In what seems remarkably similar to the w/a required to not reload an
idle context with HEAD==TAIL, it appears we must prevent the HW from
switching to an idle context in ELSP[1], while simultaneously trying to
preempt the HW to run another context and a continuation of the idle
context (which is no longer idle).
process_csb: vecs0: cs-irq head=0, tail=1
process_csb: vecs0: csb[1]: status=0x00000882:0x00000020
trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
process_csb: vecs0: cs-irq head=1, tail=2
process_csb: vecs0: csb[2]: status=0x00000814:0x00000040
trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
process_csb: vecs0: cs-irq head=2, tail=5
process_csb: vecs0: csb[3]: status=0x00000812:0x00000020
trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
process_csb: vecs0: csb[4]: status=0x00000814:0x00000060
trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
process_csb: vecs0: csb[5]: status=0x00000818:0x00000020
trace_ports: vecs0: completed { 8c0:32, 0:0 }
process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, ctl:00000000, mode:00000200}
process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, hwsp:30},
process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
process_csb: GEM_BUG_ON("context completed before request")
Fortunately, we just so happen to have a semaphore in place to prevent
the ring HEAD from proceeding past the end of a request that we can use
to fix the HEAD in position as we reprogram ELSP.
Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
drivers/gpu/drm/i915/gt/intel_gt_irq.c | 2 +
drivers/gpu/drm/i915/gt/intel_lrc.c | 72 +++++++++++++-------------
2 files changed, 38 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f0e7fd95165a..be3817d99908 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -24,6 +24,8 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
bool tasklet = false;
+ ENGINE_TRACE(engine, "iir: %04x\n", iir);
+
if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
u32 eir;
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b12355048501..53650b452bc9 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request **ports, int count)
memset_p((void **)ports, NULL, count);
}
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static bool execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending;
@@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
execlists->queue_priority_hint);
record_preemption(execlists);
- /*
- * Don't let the RING_HEAD advance past the breadcrumb
- * as we unwind (and until we resubmit) so that we do
- * not accidentally tell it to go backwards.
- */
- ring_set_paused(engine, 1);
-
/*
* Note that we have not stopped the GPU at this point,
* so we are unwinding the incomplete requests as they
@@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->sched.attr.priority,
execlists->queue_priority_hint);
- ring_set_paused(engine, 1);
defer_active(engine);
/*
@@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of timeslices, our queue might be.
*/
start_timeslice(engine);
- return;
+ return false;
}
}
}
@@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
if (last && !can_merge_rq(last, rq)) {
+ /* leave this for another sibling */
spin_unlock(&ve->base.active.lock);
start_timeslice(engine);
- return; /* leave this for another sibling */
+ return false;
}
ENGINE_TRACE(engine,
@@ -2193,32 +2186,31 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
+ if (!submit)
+ return false;
- if (submit) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- execlists->switch_priority_hint =
- switch_prio(engine, *execlists->pending);
+ *port = execlists_schedule_in(last, port - execlists->pending);
+ execlists->switch_priority_hint =
+ switch_prio(engine, *execlists->pending);
- /*
- * Skip if we ended up with exactly the same set of requests,
- * e.g. trying to timeslice a pair of ordered contexts
- */
- if (!memcmp(active, execlists->pending,
- (port - execlists->pending + 1) * sizeof(*port))) {
- do
- execlists_schedule_out(fetch_and_zero(port));
- while (port-- != execlists->pending);
-
- goto skip_submit;
- }
- clear_ports(port + 1, last_port - port);
+ /*
+ * Skip if we ended up with exactly the same set of requests,
+ * e.g. trying to timeslice a pair of ordered contexts
+ */
+ if (!memcmp(active, execlists->pending,
+ (port - execlists->pending + 1) * sizeof(*port))) {
+ do
+ execlists_schedule_out(fetch_and_zero(port));
+ while (port-- != execlists->pending);
- execlists_submit_ports(engine);
- set_preempt_timeout(engine, *active);
- } else {
-skip_submit:
- ring_set_paused(engine, 0);
+ return false;
}
+ clear_ports(port + 1, last_port - port);
+
+ execlists_submit_ports(engine);
+ set_preempt_timeout(engine, *active);
+ tasklet_hi_schedule(&execlists->tasklet); /* lost interrupt */
+ return true;
}
static void
@@ -2478,7 +2470,16 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
lockdep_assert_held(&engine->active.lock);
if (!READ_ONCE(engine->execlists.pending[0])) {
rcu_read_lock(); /* protect peeking at execlists->active */
- execlists_dequeue(engine);
+
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
+ if (!execlists_dequeue(engine))
+ ring_set_paused(engine, 0);
+
rcu_read_unlock();
}
}
@@ -2816,8 +2817,7 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
ring_set_paused(engine, 1); /* Freeze the current request in place */
if (execlists_capture(engine))
intel_engine_reset(engine, msg);
- else
- ring_set_paused(engine, 0);
+ ring_set_paused(engine, 0);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
* [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context
@ 2020-03-27 9:29 Chris Wilson
0 siblings, 0 replies; 8+ messages in thread
From: Chris Wilson @ 2020-03-27 9:29 UTC (permalink / raw)
To: intel-gfx
In what seems remarkably similar to the w/a required to not reload an
idle context with HEAD==TAIL, it appears we must prevent the HW from
switching to an idle context in ELSP[1], while simultaneously trying to
preempt the HW to run another context and a continuation of the idle
context (which is no longer idle).
process_csb: vecs0: cs-irq head=0, tail=1
process_csb: vecs0: csb[1]: status=0x00000882:0x00000020
trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
trace_ports: vecs0: promote { 8b2:32!, 8c0:30 }
trace_ports: vecs0: submit { 8b8:32, 8c0:32 }
process_csb: vecs0: cs-irq head=1, tail=2
process_csb: vecs0: csb[2]: status=0x00000814:0x00000040
trace_ports: vecs0: completed { 8b2:32!, 8c0:30 }
process_csb: vecs0: cs-irq head=2, tail=5
process_csb: vecs0: csb[3]: status=0x00000812:0x00000020
trace_ports: vecs0: preempted { 8c0:30!, 0:0 }
trace_ports: vecs0: promote { 8b8:32!, 8c0:32 }
process_csb: vecs0: csb[4]: status=0x00000814:0x00000060
trace_ports: vecs0: completed { 8b8:32!, 8c0:32 }
process_csb: vecs0: csb[5]: status=0x00000818:0x00000020
trace_ports: vecs0: completed { 8c0:32, 0:0 }
process_csb: vecs0: ring:{start:0x00021000, head:03f8, tail:03f8, ctl:00000000, mode:00000200}
process_csb: vecs0: rq:{start:00021000, head:03c0, tail:0400, seqno:8c0:32, hwsp:30},
process_csb: vecs0: ctx:{start:00021000, head:03f8, tail:03f8},
process_csb: GEM_BUG_ON("context completed before request")
Fortunately, we just so happen to have a semaphore in place to prevent
the ring HEAD from proceeding past the end of a request that we can use
to fix the HEAD in position as we reprogram ELSP.
Closes: https://gitlab.freedesktop.org/drm/intel/issues/1501
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
drivers/gpu/drm/i915/gt/intel_lrc.c | 75 +++++++++++++++--------------
1 file changed, 39 insertions(+), 36 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index b12355048501..26acc4e63a1b 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1854,7 +1854,7 @@ static inline void clear_ports(struct i915_request **ports, int count)
memset_p((void **)ports, NULL, count);
}
-static void execlists_dequeue(struct intel_engine_cs *engine)
+static bool execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending;
@@ -1928,13 +1928,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
execlists->queue_priority_hint);
record_preemption(execlists);
- /*
- * Don't let the RING_HEAD advance past the breadcrumb
- * as we unwind (and until we resubmit) so that we do
- * not accidentally tell it to go backwards.
- */
- ring_set_paused(engine, 1);
-
/*
* Note that we have not stopped the GPU at this point,
* so we are unwinding the incomplete requests as they
@@ -1954,7 +1947,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
last->sched.attr.priority,
execlists->queue_priority_hint);
- ring_set_paused(engine, 1);
defer_active(engine);
/*
@@ -1988,7 +1980,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* of timeslices, our queue might be.
*/
start_timeslice(engine);
- return;
+ return false;
}
}
}
@@ -2021,9 +2013,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
if (last && !can_merge_rq(last, rq)) {
+ /* leave this for another sibling */
spin_unlock(&ve->base.active.lock);
start_timeslice(engine);
- return; /* leave this for another sibling */
+ return false;
}
ENGINE_TRACE(engine,
@@ -2193,32 +2186,31 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
+ if (!submit)
+ return false;
- if (submit) {
- *port = execlists_schedule_in(last, port - execlists->pending);
- execlists->switch_priority_hint =
- switch_prio(engine, *execlists->pending);
-
- /*
- * Skip if we ended up with exactly the same set of requests,
- * e.g. trying to timeslice a pair of ordered contexts
- */
- if (!memcmp(active, execlists->pending,
- (port - execlists->pending + 1) * sizeof(*port))) {
- do
- execlists_schedule_out(fetch_and_zero(port));
- while (port-- != execlists->pending);
+ *port = execlists_schedule_in(last, port - execlists->pending);
+ execlists->switch_priority_hint =
+ switch_prio(engine, *execlists->pending);
- goto skip_submit;
- }
- clear_ports(port + 1, last_port - port);
+ /*
+ * Skip if we ended up with exactly the same set of requests,
+ * e.g. trying to timeslice a pair of ordered contexts
+ */
+ if (!memcmp(active, execlists->pending,
+ (port - execlists->pending + 1) * sizeof(*port))) {
+ do
+ execlists_schedule_out(fetch_and_zero(port));
+ while (port-- != execlists->pending);
- execlists_submit_ports(engine);
- set_preempt_timeout(engine, *active);
- } else {
-skip_submit:
- ring_set_paused(engine, 0);
+ return false;
}
+ clear_ports(port + 1, last_port - port);
+
+ execlists_submit_ports(engine);
+ set_preempt_timeout(engine, *active);
+ tasklet_hi_schedule(&execlists->tasklet); /* lost interrupt */
+ return true;
}
static void
@@ -2478,7 +2470,16 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
lockdep_assert_held(&engine->active.lock);
if (!READ_ONCE(engine->execlists.pending[0])) {
rcu_read_lock(); /* protect peeking at execlists->active */
- execlists_dequeue(engine);
+
+ /*
+ * Don't let the RING_HEAD advance past the breadcrumb
+ * as we unwind (and until we resubmit) so that we do
+ * not accidentally tell it to go backwards.
+ */
+ ring_set_paused(engine, 1);
+ if (!execlists_dequeue(engine))
+ ring_set_paused(engine, 0);
+
rcu_read_unlock();
}
}
@@ -2816,8 +2817,7 @@ static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
ring_set_paused(engine, 1); /* Freeze the current request in place */
if (execlists_capture(engine))
intel_engine_reset(engine, msg);
- else
- ring_set_paused(engine, 0);
+ ring_set_paused(engine, 0);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
@@ -4203,6 +4203,9 @@ static u32 *gen8_emit_wa_tail(struct i915_request *request, u32 *cs)
static u32 *emit_preempt_busywait(struct i915_request *request, u32 *cs)
{
+ *cs++ = MI_ARB_CHECK;
+ *cs++ = MI_NOOP;
+
*cs++ = MI_SEMAPHORE_WAIT |
MI_SEMAPHORE_GLOBAL_GTT |
MI_SEMAPHORE_POLL |
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 8+ messages in thread
end of thread, other threads:[~2020-03-27 20:33 UTC | newest]
Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-27 9:24 [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context Chris Wilson
2020-03-27 9:27 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context (rev3) Patchwork
2020-03-27 9:29 [Intel-gfx] [CI] drm/i915/execlists: Prevent GPU death on ELSP[1] promotion to idle context Chris Wilson
2020-03-27 11:26 Chris Wilson
2020-03-27 15:59 ` Tvrtko Ursulin
2020-03-27 16:47 ` Chris Wilson
2020-03-27 20:19 ` Mika Kuoppala
2020-03-27 20:32 ` Chris Wilson
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.