From: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
To: Chris Wilson <chris@chris-wilson.co.uk>, intel-gfx@lists.freedesktop.org
Cc: thomas.hellstrom@intel.com
Subject: Re: [Intel-gfx] [PATCH 18/41] drm/i915: Move tasklet from execlists to sched
Date: Wed, 27 Jan 2021 14:10:55 +0000 [thread overview]
Message-ID: <bab1b4e7-7487-a057-3514-c26e3afc9350@linux.intel.com> (raw)
In-Reply-To: <20210125140136.10494-18-chris@chris-wilson.co.uk>
+ Matt to check on how this fits with GuC. This patch and a few before
it in this series.
The split between physical and scheduling engine (i915_sched_engine)
makes sense to me. Gut feeling says it should work for GuC as well, in
principle.
A small comment or two below:
On 25/01/2021 14:01, Chris Wilson wrote:
> Move the scheduling tasklists out of the execlists backend into the
> per-engine scheduling bookkeeping.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> drivers/gpu/drm/i915/gt/intel_engine.h | 14 ----
> drivers/gpu/drm/i915/gt/intel_engine_cs.c | 11 ++--
> drivers/gpu/drm/i915/gt/intel_engine_types.h | 5 --
> .../drm/i915/gt/intel_execlists_submission.c | 65 +++++++++----------
> drivers/gpu/drm/i915/gt/intel_gt_irq.c | 2 +-
> drivers/gpu/drm/i915/gt/selftest_execlists.c | 16 ++---
> drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 2 +-
> drivers/gpu/drm/i915/gt/selftest_lrc.c | 6 +-
> drivers/gpu/drm/i915/gt/selftest_reset.c | 2 +-
> .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 18 ++---
> drivers/gpu/drm/i915/i915_scheduler.c | 14 ++--
> drivers/gpu/drm/i915/i915_scheduler.h | 20 ++++++
> drivers/gpu/drm/i915/i915_scheduler_types.h | 6 ++
> .../gpu/drm/i915/selftests/i915_scheduler.c | 16 ++---
> 14 files changed, 99 insertions(+), 98 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
> index 20974415e7d8..801ae54cf60d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
> @@ -122,20 +122,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
> return active;
> }
>
> -static inline void
> -execlists_active_lock_bh(struct intel_engine_execlists *execlists)
> -{
> - local_bh_disable(); /* prevent local softirq and lock recursion */
> - tasklet_lock(&execlists->tasklet);
> -}
> -
> -static inline void
> -execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
> -{
> - tasklet_unlock(&execlists->tasklet);
> - local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
> -}
> -
> static inline u32
> intel_read_status_page(const struct intel_engine_cs *engine, int reg)
> {
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> index ef225da35399..cdd07aeada05 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> @@ -902,7 +902,6 @@ int intel_engines_init(struct intel_gt *gt)
> void intel_engine_cleanup_common(struct intel_engine_cs *engine)
> {
> i915_sched_fini_engine(&engine->active);
> - tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
>
> intel_breadcrumbs_free(engine->breadcrumbs);
>
> @@ -1187,7 +1186,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
>
> void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
> {
> - struct tasklet_struct *t = &engine->execlists.tasklet;
> + struct tasklet_struct *t = &engine->active.tasklet;
>
> if (!t->func)
> return;
> @@ -1454,8 +1453,8 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
>
> drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
> yesno(test_bit(TASKLET_STATE_SCHED,
> - &engine->execlists.tasklet.state)),
> - enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
> + &engine->active.tasklet.state)),
> + enableddisabled(!atomic_read(&engine->active.tasklet.count)),
> repr_timer(&engine->execlists.preempt),
> repr_timer(&engine->execlists.timer));
>
> @@ -1479,7 +1478,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
> idx, hws[idx * 2], hws[idx * 2 + 1]);
> }
>
> - execlists_active_lock_bh(execlists);
> + i915_sched_lock_bh(&engine->active);
> rcu_read_lock();
> for (port = execlists->active; (rq = *port); port++) {
> char hdr[160];
> @@ -1510,7 +1509,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
> i915_request_show(m, rq, hdr, 0);
> }
> rcu_read_unlock();
> - execlists_active_unlock_bh(execlists);
> + i915_sched_unlock_bh(&engine->active);
> } else if (INTEL_GEN(dev_priv) > 6) {
> drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
> ENGINE_READ(engine, RING_PP_DIR_BASE));
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index c46d70b7e484..76d561c2c6aa 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -138,11 +138,6 @@ struct st_preempt_hang {
> * driver and the hardware state for execlist mode of submission.
> */
> struct intel_engine_execlists {
> - /**
> - * @tasklet: softirq tasklet for bottom handler
> - */
> - struct tasklet_struct tasklet;
> -
> /**
> * @timer: kick the current context if its timeslice expires
> */
> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> index 756ac388a4a8..1103c8a00af1 100644
> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> @@ -513,7 +513,7 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
> resubmit_virtual_request(rq, ve);
>
> if (READ_ONCE(ve->request))
> - tasklet_hi_schedule(&ve->base.execlists.tasklet);
> + i915_sched_kick(&ve->base.active);
i915_sched_ or i915_sched_engine_ ?
> }
>
> static void __execlists_schedule_out(struct i915_request * const rq,
> @@ -679,10 +679,9 @@ trace_ports(const struct intel_engine_execlists *execlists,
> dump_port(p1, sizeof(p1), ", ", ports[1]));
> }
>
> -static bool
> -reset_in_progress(const struct intel_engine_execlists *execlists)
> +static bool reset_in_progress(const struct intel_engine_cs *engine)
> {
> - return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
> + return unlikely(!__tasklet_is_enabled(&engine->active.tasklet));
> }
>
> static __maybe_unused noinline bool
> @@ -699,7 +698,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
> trace_ports(execlists, msg, execlists->pending);
>
> /* We may be messing around with the lists during reset, lalala */
> - if (reset_in_progress(execlists))
> + if (reset_in_progress(engine))
> return true;
>
> if (!execlists->pending[0]) {
> @@ -1084,7 +1083,7 @@ static void start_timeslice(struct intel_engine_cs *engine)
> * its timeslice, so recheck.
> */
> if (!timer_pending(&el->timer))
> - tasklet_hi_schedule(&el->tasklet);
> + i915_sched_kick(&engine->active);
> return;
> }
>
> @@ -1664,8 +1663,8 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
> * access. Either we are inside the tasklet, or the tasklet is disabled
> * and we assume that is only inside the reset paths and so serialised.
> */
> - GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
> - !reset_in_progress(execlists));
> + GEM_BUG_ON(!tasklet_is_locked(&engine->active.tasklet) &&
> + !reset_in_progress(engine));
> GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine));
>
> /*
> @@ -2077,13 +2076,13 @@ static noinline void execlists_reset(struct intel_engine_cs *engine)
> ENGINE_TRACE(engine, "reset for %s\n", msg);
>
> /* Mark this tasklet as disabled to avoid waiting for it to complete */
> - tasklet_disable_nosync(&engine->execlists.tasklet);
> + tasklet_disable_nosync(&engine->active.tasklet);
>
> ring_set_paused(engine, 1); /* Freeze the current request in place */
> execlists_capture(engine);
> intel_engine_reset(engine, msg);
>
> - tasklet_enable(&engine->execlists.tasklet);
> + tasklet_enable(&engine->active.tasklet);
Maybe all access to the tasklet from the backend should go via
i915_sched_ helpers to complete the separation? And with some generic
naming in case we don't want to trumpet it is a tasklet but instead some
higher level concept. Like schedule_enable/disable I don't know..
Depends also how this plugs in the GuC.
Just this really, code itself looks clean enough.
Regards,
Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2021-01-27 14:11 UTC|newest]
Thread overview: 90+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-01-25 14:00 [Intel-gfx] [PATCH 01/41] drm/i915/selftests: Check for engine-reset errors in the middle of workarounds Chris Wilson
2021-01-25 14:00 ` [Intel-gfx] [PATCH 02/41] drm/i915/gt: Move the defer_request waiter active assertion Chris Wilson
2021-01-25 14:53 ` Tvrtko Ursulin
2021-01-25 14:00 ` [Intel-gfx] [PATCH 03/41] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2021-01-25 15:14 ` Tvrtko Ursulin
2021-01-25 14:00 ` [Intel-gfx] [PATCH 04/41] drm/i915: Teach the i915_dependency to use a double-lock Chris Wilson
2021-01-25 15:34 ` Tvrtko Ursulin
2021-01-25 21:37 ` Chris Wilson
2021-01-26 9:40 ` Tvrtko Ursulin
2021-01-25 14:01 ` [Intel-gfx] [PATCH 05/41] drm/i915: Restructure priority inheritance Chris Wilson
2021-01-26 11:12 ` Tvrtko Ursulin
2021-01-26 11:30 ` Chris Wilson
2021-01-26 11:40 ` Tvrtko Ursulin
2021-01-26 11:55 ` Chris Wilson
2021-01-26 13:15 ` Tvrtko Ursulin
2021-01-26 13:24 ` Chris Wilson
2021-01-26 13:45 ` Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 06/41] drm/i915/selftests: Measure set-priority duration Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 07/41] drm/i915/selftests: Exercise priority inheritance around an engine loop Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 08/41] drm/i915: Improve DFS for priority inheritance Chris Wilson
2021-01-26 16:22 ` Tvrtko Ursulin
2021-01-26 16:26 ` Chris Wilson
2021-01-26 16:42 ` Tvrtko Ursulin
2021-01-26 16:51 ` Tvrtko Ursulin
2021-01-26 16:51 ` Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 09/41] drm/i915/selftests: Exercise relative mmio paths to non-privileged registers Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 10/41] drm/i915/selftests: Exercise cross-process context isolation Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 11/41] drm/i915: Extract request submission from execlists Chris Wilson
2021-01-26 16:28 ` Tvrtko Ursulin
2021-01-25 14:01 ` [Intel-gfx] [PATCH 12/41] drm/i915: Extract request rewinding " Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 13/41] drm/i915: Extract request suspension from the execlists Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 14/41] drm/i915: Extract the ability to defer and rerun a request later Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 15/41] drm/i915: Fix the iterative dfs for defering requests Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 16/41] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 17/41] drm/i915: Move scheduler queue Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 18/41] drm/i915: Move tasklet from execlists to sched Chris Wilson
2021-01-27 14:10 ` Tvrtko Ursulin [this message]
2021-01-27 14:24 ` Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 19/41] drm/i915/gt: Show scheduler queues when dumping state Chris Wilson
2021-01-27 14:13 ` Tvrtko Ursulin
2021-01-27 14:35 ` Chris Wilson
2021-01-27 14:50 ` Tvrtko Ursulin
2021-01-27 14:55 ` Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 20/41] drm/i915: Replace priolist rbtree with a skiplist Chris Wilson
2021-01-27 15:10 ` Tvrtko Ursulin
2021-01-27 15:33 ` Chris Wilson
2021-01-27 15:44 ` Chris Wilson
2021-01-27 15:58 ` Tvrtko Ursulin
2021-01-28 9:50 ` Chris Wilson
2021-01-28 15:56 ` Tvrtko Ursulin
2021-01-28 16:26 ` Chris Wilson
2021-01-28 16:42 ` Tvrtko Ursulin
2021-01-28 22:20 ` Chris Wilson
2021-01-28 22:44 ` Chris Wilson
2021-01-29 9:24 ` Tvrtko Ursulin
2021-01-29 9:37 ` Tvrtko Ursulin
2021-01-29 10:26 ` Chris Wilson
2021-01-28 22:56 ` Matthew Brost
2021-01-29 10:30 ` Chris Wilson
2021-01-29 17:01 ` Matthew Brost
2021-01-29 10:22 ` Tvrtko Ursulin
2021-01-25 14:01 ` [Intel-gfx] [PATCH 21/41] drm/i915: Wrap cmpxchg64 with try_cmpxchg64() helper Chris Wilson
2021-01-27 15:28 ` Tvrtko Ursulin
2021-01-25 14:01 ` [Intel-gfx] [PATCH 22/41] drm/i915: Fair low-latency scheduling Chris Wilson
2021-01-28 11:35 ` Tvrtko Ursulin
2021-01-28 12:32 ` Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 23/41] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 24/41] drm/i915: Extend the priority boosting for the display with a deadline Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 25/41] drm/i915/gt: Support virtual engine queues Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 26/41] drm/i915: Move saturated workload detection back to the context Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 27/41] drm/i915: Bump default timeslicing quantum to 5ms Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 28/41] drm/i915/gt: Wrap intel_timeline.has_initial_breadcrumb Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 29/41] drm/i915/gt: Track timeline GGTT offset separately from subpage offset Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 30/41] drm/i915/gt: Add timeline "mode" Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 31/41] drm/i915/gt: Use indices for writing into relative timelines Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 32/41] drm/i915/selftests: Exercise relative timeline modes Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 33/41] drm/i915/gt: Use ppHWSP for unshared non-semaphore related timelines Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 34/41] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 35/41] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 36/41] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 37/41] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 38/41] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 39/41] drm/i915/gt: Implement ring scheduler for gen4-7 Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 40/41] drm/i915/gt: Enable ring scheduling for gen5-7 Chris Wilson
2021-01-25 14:01 ` [Intel-gfx] [PATCH 41/41] drm/i915: Support secure dispatch on gen6/gen7 Chris Wilson
2021-01-25 14:40 ` [Intel-gfx] [PATCH 01/41] drm/i915/selftests: Check for engine-reset errors in the middle of workarounds Tvrtko Ursulin
2021-01-25 17:08 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/41] " Patchwork
2021-01-25 17:10 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-01-25 17:38 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-01-25 22:45 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=bab1b4e7-7487-a057-3514-c26e3afc9350@linux.intel.com \
--to=tvrtko.ursulin@linux.intel.com \
--cc=chris@chris-wilson.co.uk \
--cc=intel-gfx@lists.freedesktop.org \
--cc=thomas.hellstrom@intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).