All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>,
	intel-gfx@lists.freedesktop.org
Subject: Re: [Intel-gfx] [PATCH 26/57] drm/i915: Move finding the current active request to the scheduler
Date: Thu, 04 Feb 2021 14:59:26 +0000	[thread overview]
Message-ID: <161245076627.3075.9732775786725541361@build.alporthouse.com> (raw)
In-Reply-To: <79b782ae-dbe5-2d3c-7093-c1f9364eba55@linux.intel.com>

Quoting Tvrtko Ursulin (2021-02-04 14:30:18)
> 
> On 01/02/2021 08:56, Chris Wilson wrote:
> > Since finding the currently active request starts by walking the
> > scheduler lists under the scheduler lock, move the routine to the
> > scheduler.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> >   drivers/gpu/drm/i915/gt/intel_engine.h        |  3 -
> >   drivers/gpu/drm/i915/gt/intel_engine_cs.c     | 71 ++--------------
> >   .../drm/i915/gt/intel_execlists_submission.c  | 83 ++++++++++++++++++-
> >   drivers/gpu/drm/i915/i915_gpu_error.c         | 18 ++--
> >   drivers/gpu/drm/i915/i915_gpu_error.h         |  4 +-
> >   drivers/gpu/drm/i915/i915_request.c           | 71 +---------------
> >   drivers/gpu/drm/i915/i915_request.h           |  8 ++
> >   drivers/gpu/drm/i915/i915_scheduler.c         | 50 +++++++++++
> >   drivers/gpu/drm/i915/i915_scheduler_types.h   |  4 +
> >   9 files changed, 162 insertions(+), 150 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
> > index 52bba16c62e8..c530839627bb 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
> > @@ -230,9 +230,6 @@ void intel_engine_dump(struct intel_engine_cs *engine,
> >   ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine,
> >                                  ktime_t *now);
> >   
> > -struct i915_request *
> > -intel_engine_find_active_request(struct intel_engine_cs *engine);
> > -
> >   u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
> >   
> >   void intel_engine_init_active(struct intel_engine_cs *engine,
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> > index b5b957283f2c..5751a529b2df 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> > @@ -1277,7 +1277,7 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine)
> >       }
> >   }
> >   
> > -static struct intel_timeline *get_timeline(struct i915_request *rq)
> > +static struct intel_timeline *get_timeline(const struct i915_request *rq)
> >   {
> >       struct intel_timeline *tl;
> >   
> > @@ -1505,7 +1505,8 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
> >       }
> >   }
> >   
> > -static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
> > +static void
> > +print_request_ring(struct drm_printer *m, const struct i915_request *rq)
> >   {
> >       void *ring;
> >       int size;
> > @@ -1590,7 +1591,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
> >   {
> >       struct i915_gpu_error * const error = &engine->i915->gpu_error;
> >       struct i915_sched *se = intel_engine_get_scheduler(engine);
> > -     struct i915_request *rq;
> > +     const struct i915_request *rq;
> >       intel_wakeref_t wakeref;
> >       unsigned long flags;
> >       ktime_t dummy;
> > @@ -1631,8 +1632,9 @@ void intel_engine_dump(struct intel_engine_cs *engine,
> >   
> >       drm_printf(m, "\tRequests:\n");
> >   
> > +     rcu_read_lock();
> >       spin_lock_irqsave(&se->lock, flags);
> > -     rq = intel_engine_find_active_request(engine);
> > +     rq = se->active_request(se);
> 
> Wrap with i915_sched_find_active_request perhaps?

Ok.

> >       if (rq) {
> >               struct intel_timeline *tl = get_timeline(rq);
> >   
> > @@ -1664,6 +1666,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
> >       }
> >       drm_printf(m, "\tOn hold?: %lu\n", list_count(&se->hold));
> >       spin_unlock_irqrestore(&se->lock, flags);
> > +     rcu_read_unlock();
> >   
> >       drm_printf(m, "\tMMIO base:  0x%08x\n", engine->mmio_base);
> >       wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
> > @@ -1712,66 +1715,6 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
> >       return ktime_add(total, start);
> >   }
> >   
> > -static bool match_ring(struct i915_request *rq)
> > -{
> > -     u32 ring = ENGINE_READ(rq->engine, RING_START);
> > -
> > -     return ring == i915_ggtt_offset(rq->ring->vma);
> > -}
> > -
> > -struct i915_request *
> > -intel_engine_find_active_request(struct intel_engine_cs *engine)
> > -{
> > -     struct i915_sched *se = intel_engine_get_scheduler(engine);
> > -     struct i915_request *request, *active = NULL;
> > -
> > -     /*
> > -      * We are called by the error capture, reset and to dump engine
> > -      * state at random points in time. In particular, note that neither is
> > -      * crucially ordered with an interrupt. After a hang, the GPU is dead
> > -      * and we assume that no more writes can happen (we waited long enough
> > -      * for all writes that were in transaction to be flushed) - adding an
> > -      * extra delay for a recent interrupt is pointless. Hence, we do
> > -      * not need an engine->irq_seqno_barrier() before the seqno reads.
> > -      * At all other times, we must assume the GPU is still running, but
> > -      * we only care about the snapshot of this moment.
> > -      */
> > -     lockdep_assert_held(&se->lock);
> > -
> > -     rcu_read_lock();
> > -     request = execlists_active(&engine->execlists);
> > -     if (request) {
> > -             struct intel_timeline *tl = request->context->timeline;
> > -
> > -             list_for_each_entry_from_reverse(request, &tl->requests, link) {
> > -                     if (__i915_request_is_complete(request))
> > -                             break;
> > -
> > -                     active = request;
> > -             }
> > -     }
> > -     rcu_read_unlock();
> > -     if (active)
> > -             return active;
> > -
> > -     list_for_each_entry(request, &se->requests, sched.link) {
> > -             if (__i915_request_is_complete(request))
> > -                     continue;
> > -
> > -             if (!__i915_request_has_started(request))
> > -                     continue;
> > -
> > -             /* More than one preemptible request may match! */
> > -             if (!match_ring(request))
> > -                     continue;
> > -
> > -             active = request;
> > -             break;
> > -     }
> > -
> > -     return active;
> > -}
> > -
> >   #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> >   #include "mock_engine.c"
> >   #include "selftest_engine.c"
> > diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> > index 33c1a833df20..8b848adb65b7 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> > @@ -2336,7 +2336,7 @@ static void sanitize_hwsp(struct intel_engine_cs *engine)
> >   
> >   static void execlists_sanitize(struct intel_engine_cs *engine)
> >   {
> > -     GEM_BUG_ON(execlists_active(&engine->execlists));
> > +     GEM_BUG_ON(*engine->execlists.active);
> >   
> >       /*
> >        * Poison residual state on resume, in case the suspend didn't!
> > @@ -2755,6 +2755,85 @@ static void execlists_park(struct intel_engine_cs *engine)
> >       cancel_timer(&engine->execlists.preempt);
> >   }
> >   
> > +static const struct i915_request *
> > +execlists_active_request(struct i915_sched *se)
> > +{
> > +     struct intel_engine_cs *engine =
> > +             container_of(se, typeof(*engine), sched);
> > +     struct i915_request *rq;
> > +
> > +     rq = execlists_active(&engine->execlists);
> > +     if (rq)
> > +             rq = active_request(rq->context->timeline, rq);
> 
> Now which active_request() is this?

execlists search function along the active context.

> The one local to 
> intel_execlist_submission.c and not the common part extracted to 
> i915_scheduler.c? What is the latter used for then?

For the ringbuffer. The guc does still try to use it, but it's not
accurate for the guc.

It ends up being called i915_sched_default_active_request().

> > +static inline bool i915_request_is_executing(const struct i915_request *rq)
> > +{
> > +     if (i915_request_is_active(rq))
> > +             return true;
> > +
> > +     return i915_request_get_scheduler(rq)->is_executing(rq);
> 
> Isn't active flag a superset of the "is executing" check in which case 
> how this makes sense? I could understand:
> 
> if !active
>         return false;
> 
> return ->is_executing

is_active() is not always accurate. When set, we know we have passed it
to HW and currently believe that is on HW. When not set, we may have
remove it from the HW queue, but the HW has not preempted away from it.

        /*
         * Even if we have unwound the request, it may still be on
         * the GPU (preempt-to-busy). If that request is inside an
         * unpreemptible critical section, it will not be removed. Some
         * GPU functions may even be stuck waiting for the paired request
         * (__await_execution) to be submitted and cannot be preempted
         * until the bond is executing.
         *
         * As we know that there are always preemption points between
         * requests, we know that only the currently executing request
         * may be still active even though we have cleared the flag.
         * However, we can't rely on our tracking of ELSP[0] to know
         * which request is currently active and so maybe stuck, as
         * the tracking maybe an event behind. Instead assume that
         * if the context is still inflight, then it is still active
         * even if the active flag has been cleared.
         *
         * To further complicate matters, if there a pending promotion, the HW
         * may either perform a context switch to the second inflight execlists,
         * or it may switch to the pending set of execlists. In the case of the
         * latter, it may send the ACK and we process the event copying the
         * pending[] over top of inflight[], _overwriting_ our *active. Since
         * this implies the HW is arbitrating and not struck in *active, we do
         * not worry about complete accuracy, but we do require no read/write
         * tearing of the pointer [the read of the pointer must be valid, even
         * as the array is being overwritten, for which we require the writes
         * to avoid tearing.]
         *
         * Note that the read of *execlists->active may race with the promotion
         * of execlists->pending[] to execlists->inflight[], overwriting
         * the value at *execlists->active. This is fine. The promotion implies
         * that we received an ACK from the HW, and so the context is not
         * stuck -- if we do not see ourselves in *active, the inflight status
         * is valid. If instead we see ourselves being copied into *active,
         * we are inflight and may signal the callback.
         */

That's the bit we moved to the backend and hid behind is_executing().

For the inline,

/* Double check with the backend in case the request is still on the HW */

Or something.

> >   #endif /* I915_REQUEST_H */
> > diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
> > index 620db6430a10..cb27bcb7a1f6 100644
> > --- a/drivers/gpu/drm/i915/i915_scheduler.c
> > +++ b/drivers/gpu/drm/i915/i915_scheduler.c
> > @@ -91,6 +91,54 @@ static void i915_sched_init_ipi(struct i915_sched_ipi *ipi)
> >       ipi->list = NULL;
> >   }
> >   
> > +static bool match_ring(struct i915_request *rq)
> > +{
> > +     const struct intel_engine_cs *engine = rq->engine;
> > +     const struct intel_ring *ring = rq->ring;
> > +
> > +     return ENGINE_READ(engine, RING_START) == i915_ggtt_offset(ring->vma);
> > +}
> 
> Ouchy ouch. I'll look the other way. :)

Yeah, good old legacy ringbuffer search function... As we still have no
better routine for the guc.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2021-02-04 14:59 UTC|newest]

Thread overview: 103+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-01  8:56 [Intel-gfx] [PATCH 01/57] drm/i915/gt: Restrict the GT clock override to just Icelake Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 02/57] drm/i915/selftests: Exercise relative mmio paths to non-privileged registers Chris Wilson
2021-02-01 14:34   ` Mika Kuoppala
2021-02-01  8:56 ` [Intel-gfx] [PATCH 03/57] drm/i915/selftests: Exercise cross-process context isolation Chris Wilson
2021-02-01 16:37   ` Mika Kuoppala
2021-02-01  8:56 ` [Intel-gfx] [PATCH 04/57] drm/i915: Protect against request freeing during cancellation on wedging Chris Wilson
2021-02-02  9:55   ` Mika Kuoppala
2021-02-01  8:56 ` [Intel-gfx] [PATCH 05/57] drm/i915: Take rcu_read_lock for querying fence's driver/timeline names Chris Wilson
2021-02-02 18:33   ` Mika Kuoppala
2021-02-01  8:56 ` [Intel-gfx] [PATCH 06/57] drm/i915/gt: Always flush the submission queue on checking for idle Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 07/57] drm/i915/gt: Move engine setup out of set_default_submission Chris Wilson
2021-02-02 11:57   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 08/57] drm/i915/gt: Move submission_method into intel_gt Chris Wilson
2021-02-02 12:03   ` Tvrtko Ursulin
2021-02-02 12:18     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 09/57] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 10/57] drm/i915: Restructure priority inheritance Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 11/57] drm/i915/selftests: Measure set-priority duration Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 12/57] drm/i915/selftests: Exercise priority inheritance around an engine loop Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 13/57] drm/i915/selftests: Force a rewind if at first we don't succeed Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 14/57] drm/i915: Improve DFS for priority inheritance Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 15/57] drm/i915: Extract request submission from execlists Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 16/57] drm/i915: Extract request rewinding " Chris Wilson
2021-02-02 13:08   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 17/57] drm/i915: Extract request suspension from the execlists Chris Wilson
2021-02-02 13:15   ` Tvrtko Ursulin
2021-02-02 13:26     ` Chris Wilson
2021-02-02 13:32       ` Tvrtko Ursulin
2021-02-02 13:27     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 18/57] drm/i915: Extract the ability to defer and rerun a request later Chris Wilson
2021-02-02 13:18   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 19/57] drm/i915: Fix the iterative dfs for defering requests Chris Wilson
2021-02-02 14:10   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 20/57] drm/i915: Wrap access to intel_engine.active Chris Wilson
2021-02-04 11:07   ` Tvrtko Ursulin
2021-02-04 11:18     ` Chris Wilson
2021-02-04 11:56       ` Chris Wilson
2021-02-04 12:08         ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 21/57] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
2021-02-04 11:12   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 22/57] drm/i915: Move scheduler queue Chris Wilson
2021-02-04 11:19   ` Tvrtko Ursulin
2021-02-04 11:32     ` Chris Wilson
2021-02-04 11:40     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 23/57] drm/i915: Move tasklet from execlists to sched Chris Wilson
2021-02-04 14:06   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 24/57] drm/i915/gt: Only kick the scheduler on timeslice/preemption change Chris Wilson
2021-02-04 14:09   ` Tvrtko Ursulin
2021-02-04 14:43     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 25/57] drm/i915: Move submit_request to i915_sched_engine Chris Wilson
2021-02-04 14:13   ` Tvrtko Ursulin
2021-02-04 14:45     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 26/57] drm/i915: Move finding the current active request to the scheduler Chris Wilson
2021-02-04 14:30   ` Tvrtko Ursulin
2021-02-04 14:59     ` Chris Wilson [this message]
2021-02-01  8:56 ` [Intel-gfx] [PATCH 27/57] drm/i915: Show execlists queues when dumping state Chris Wilson
2021-02-04 15:04   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 28/57] drm/i915: Wrap i915_request_use_semaphores() Chris Wilson
2021-02-04 15:05   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 29/57] drm/i915: Move scheduler flags Chris Wilson
2021-02-04 15:14   ` Tvrtko Ursulin
2021-02-04 16:05     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 30/57] drm/i915: Move timeslicing flag to scheduler Chris Wilson
2021-02-04 15:18   ` Tvrtko Ursulin
2021-02-04 16:11     ` Chris Wilson
2021-02-05  9:48       ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 31/57] drm/i915/gt: Declare when we enabled timeslicing Chris Wilson
2021-02-04 15:26   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 32/57] drm/i915: Move needs-breadcrumb flags to scheduler Chris Wilson
2021-02-04 15:28   ` Tvrtko Ursulin
2021-02-04 16:12     ` Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 33/57] drm/i915: Move busywaiting control to the scheduler Chris Wilson
2021-02-04 15:32   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 34/57] drm/i915: Move preempt-reset flag " Chris Wilson
2021-02-04 15:34   ` Tvrtko Ursulin
2021-02-01  8:56 ` [Intel-gfx] [PATCH 35/57] drm/i915: Replace priolist rbtree with a skiplist Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 36/57] drm/i915: Wrap cmpxchg64 with try_cmpxchg64() helper Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 37/57] drm/i915: Fair low-latency scheduling Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 38/57] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 39/57] drm/i915: Extend the priority boosting for the display with a deadline Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 40/57] drm/i915/gt: Support virtual engine queues Chris Wilson
2021-02-01  8:56 ` [Intel-gfx] [PATCH 41/57] drm/i915: Move saturated workload detection back to the context Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 42/57] drm/i915: Bump default timeslicing quantum to 5ms Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 43/57] drm/i915/gt: Delay taking irqoff for execlists submission Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 44/57] drm/i915/gt: Wrap intel_timeline.has_initial_breadcrumb Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 45/57] drm/i915/gt: Track timeline GGTT offset separately from subpage offset Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 46/57] drm/i915/gt: Add timeline "mode" Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 47/57] drm/i915/gt: Use indices for writing into relative timelines Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 48/57] drm/i915/selftests: Exercise relative timeline modes Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 49/57] drm/i915/gt: Use ppHWSP for unshared non-semaphore related timelines Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 50/57] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 51/57] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 52/57] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 53/57] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 54/57] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 55/57] drm/i915/gt: Implement ring scheduler for gen4-7 Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 56/57] drm/i915/gt: Enable ring scheduling for gen5-7 Chris Wilson
2021-02-01  8:57 ` [Intel-gfx] [PATCH 57/57] drm/i915: Support secure dispatch on gen6/gen7 Chris Wilson
2021-02-01 14:13 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/57] drm/i915/gt: Restrict the GT clock override to just Icelake Patchwork
2021-02-01 14:15 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-02-01 14:15 ` [Intel-gfx] [PATCH 01/57] " Mika Kuoppala
2021-02-01 14:41 ` [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [01/57] " Patchwork
2021-02-01 19:33 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=161245076627.3075.9732775786725541361@build.alporthouse.com \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=tvrtko.ursulin@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.