* [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active
@ 2021-02-06 1:20 Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 2/5] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
` (7 more replies)
0 siblings, 8 replies; 9+ messages in thread
From: Chris Wilson @ 2021-02-06 1:20 UTC (permalink / raw)
To: intel-gfx
As we are about to shuffle the lists around to consolidate new control
objects, reduce the code movement by wrapping access to the scheduler
lists ahead of time.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 17 +++---
drivers/gpu/drm/i915/gt/intel_engine_types.h | 11 +++-
.../drm/i915/gt/intel_execlists_submission.c | 58 +++++++++++--------
.../gpu/drm/i915/gt/intel_ring_submission.c | 14 +++--
drivers/gpu/drm/i915/gt/mock_engine.c | 7 ++-
drivers/gpu/drm/i915/gt/selftest_execlists.c | 6 +-
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 20 ++++---
drivers/gpu/drm/i915/i915_gpu_error.c | 5 +-
drivers/gpu/drm/i915/i915_request.c | 23 +++-----
drivers/gpu/drm/i915/i915_request.h | 8 ++-
drivers/gpu/drm/i915/i915_scheduler.c | 47 ++++++++-------
drivers/gpu/drm/i915/i915_scheduler_types.h | 4 +-
.../gpu/drm/i915/selftests/i915_scheduler.c | 19 +++---
13 files changed, 141 insertions(+), 98 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index daadada6de0b..b8ff82c442b8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -732,6 +732,7 @@ struct measure_breadcrumb {
static int measure_breadcrumb_dw(struct intel_context *ce)
{
struct intel_engine_cs *engine = ce->engine;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct measure_breadcrumb *frame;
int dw;
@@ -754,11 +755,11 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
frame->rq.ring = &frame->ring;
mutex_lock(&ce->timeline->mutex);
- spin_lock_irq(&engine->active.lock);
+ spin_lock_irq(&se->lock);
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
- spin_unlock_irq(&engine->active.lock);
+ spin_unlock_irq(&se->lock);
mutex_unlock(&ce->timeline->mutex);
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
@@ -1623,6 +1624,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
const char *header, ...)
{
struct i915_gpu_error * const error = &engine->i915->gpu_error;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *rq;
intel_wakeref_t wakeref;
unsigned long flags;
@@ -1664,7 +1666,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
drm_printf(m, "\tRequests:\n");
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq) {
struct intel_timeline *tl = get_timeline(rq);
@@ -1695,8 +1697,8 @@ void intel_engine_dump(struct intel_engine_cs *engine,
hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
}
}
- drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ drm_printf(m, "\tOn hold?: %lu\n", list_count(&se->hold));
+ spin_unlock_irqrestore(&se->lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm);
@@ -1755,6 +1757,7 @@ static bool match_ring(struct i915_request *rq)
struct i915_request *
intel_engine_find_active_request(struct intel_engine_cs *engine)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *request, *active = NULL;
/*
@@ -1768,7 +1771,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* At all other times, we must assume the GPU is still running, but
* we only care about the snapshot of this moment.
*/
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&se->lock);
rcu_read_lock();
request = execlists_active(&engine->execlists);
@@ -1786,7 +1789,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
if (active)
return active;
- list_for_each_entry(request, &engine->active.requests, sched.link) {
+ list_for_each_entry(request, &se->requests, sched.link) {
if (__i915_request_is_complete(request))
continue;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 7159f9575e65..13b3c83f74fc 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -329,7 +329,7 @@ struct intel_engine_cs {
struct intel_sseu sseu;
- struct {
+ struct i915_sched {
spinlock_t lock;
struct list_head requests;
struct list_head hold; /* ready requests, but on hold */
@@ -615,5 +615,12 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
(slice_) += ((subslice_) == 0)) \
for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \
(instdone_has_subslice(dev_priv_, sseu_, slice_, \
- subslice_)))
+ subslice_)))
+
+static inline struct i915_sched *
+intel_engine_get_scheduler(struct intel_engine_cs *engine)
+{
+ return &engine->active;
+}
+
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 3a01b66939a0..36bdb963852e 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -294,6 +294,7 @@ static int virtual_prio(const struct intel_engine_execlists *el)
static bool need_preempt(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
+ const struct i915_sched *se = &engine->active;
int last_prio;
if (!intel_engine_has_semaphores(engine))
@@ -325,7 +326,7 @@ static bool need_preempt(const struct intel_engine_cs *engine,
* Check against the first request in ELSP[1], it will, thanks to the
* power of PI, be the highest priority of that context.
*/
- if (!list_is_last(&rq->sched.link, &engine->active.requests) &&
+ if (!list_is_last(&rq->sched.link, &se->requests) &&
rq_prio(list_next_entry(rq, sched.link)) > last_prio)
return true;
@@ -477,15 +478,15 @@ static void execlists_schedule_in(struct i915_request *rq, int idx)
static void
resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve)
{
- struct intel_engine_cs *engine = rq->engine;
+ struct i915_sched *se = i915_request_get_scheduler(rq);
- spin_lock_irq(&engine->active.lock);
+ spin_lock_irq(&se->lock);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
WRITE_ONCE(rq->engine, &ve->base);
ve->base.submit_request(rq);
- spin_unlock_irq(&engine->active.lock);
+ spin_unlock_irq(&se->lock);
}
static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
@@ -1019,6 +1020,8 @@ timeslice_yield(const struct intel_engine_execlists *el,
static bool needs_timeslice(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
+ const struct i915_sched *se = &engine->active;
+
if (!intel_engine_has_timeslices(engine))
return false;
@@ -1031,7 +1034,7 @@ static bool needs_timeslice(const struct intel_engine_cs *engine,
return false;
/* If ELSP[1] is occupied, always check to see if worth slicing */
- if (!list_is_last_rcu(&rq->sched.link, &engine->active.requests)) {
+ if (!list_is_last_rcu(&rq->sched.link, &se->requests)) {
ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
return true;
}
@@ -1134,6 +1137,7 @@ static bool completed(const struct i915_request *rq)
static void execlists_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask;
struct i915_request *last, * const *active;
@@ -1163,7 +1167,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* and context switches) submission.
*/
- spin_lock(&engine->active.lock);
+ spin_lock(&se->lock);
/*
* If the queue is higher priority than the last
@@ -1263,7 +1267,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
- spin_unlock(&engine->active.lock);
+ spin_unlock(&se->lock);
return;
}
}
@@ -1289,7 +1293,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- spin_unlock(&engine->active.lock);
+ spin_unlock(&se->lock);
return; /* leave this for another sibling */
}
@@ -1450,7 +1454,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* interrupt for secondary ports).
*/
execlists->queue_priority_hint = queue_prio(execlists);
- spin_unlock(&engine->active.lock);
+ spin_unlock(&se->lock);
/*
* We can skip poking the HW if we ended up with exactly the same set
@@ -2634,6 +2638,7 @@ static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled)
static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
unsigned long flags;
ENGINE_TRACE(engine, "\n");
@@ -2643,9 +2648,9 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
/* Push back any incomplete requests for replay after the reset. */
rcu_read_lock();
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
__i915_sched_rewind_requests(engine);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
rcu_read_unlock();
}
@@ -2661,6 +2666,7 @@ static void nop_submission_tasklet(struct tasklet_struct *t)
static void execlists_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
@@ -2684,10 +2690,10 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
execlists_reset_csb(engine, true);
rcu_read_lock();
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link)
+ list_for_each_entry(rq, &se->requests, sched.link)
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
@@ -2707,7 +2713,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
}
/* On-hold requests will be flushed to timeline upon their release */
- list_for_each_entry(rq, &engine->active.hold, sched.link)
+ list_for_each_entry(rq, &se->hold, sched.link)
i915_request_put(i915_request_mark_eio(rq));
/* Cancel all attached virtual engines */
@@ -2741,7 +2747,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
execlists->tasklet.callback = nop_submission_tasklet;
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
rcu_read_unlock();
}
@@ -2985,6 +2991,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
{
struct virtual_engine *ve =
container_of(wrk, typeof(*ve), rcu.work);
+ struct i915_sched *se = intel_engine_get_scheduler(&ve->base);
unsigned int n;
GEM_BUG_ON(ve->context.inflight);
@@ -2993,7 +3000,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
if (unlikely(ve->request)) {
struct i915_request *old;
- spin_lock_irq(&ve->base.active.lock);
+ spin_lock_irq(&se->lock);
old = fetch_and_zero(&ve->request);
if (old) {
@@ -3002,7 +3009,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
i915_request_put(old);
}
- spin_unlock_irq(&ve->base.active.lock);
+ spin_unlock_irq(&se->lock);
}
/*
@@ -3188,6 +3195,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
for (n = 0; n < ve->num_siblings; n++) {
struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]);
+ struct i915_sched *se = intel_engine_get_scheduler(sibling);
struct ve_node * const node = &ve->nodes[sibling->id];
struct rb_node **parent, *rb;
bool first;
@@ -3195,7 +3203,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
if (!READ_ONCE(ve->request))
break; /* already handled by a sibling's tasklet */
- spin_lock_irq(&sibling->active.lock);
+ spin_lock_irq(&se->lock);
if (unlikely(!(mask & sibling->mask))) {
if (!RB_EMPTY_NODE(&node->rb)) {
@@ -3248,7 +3256,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
tasklet_hi_schedule(&sibling->execlists.tasklet);
unlock_engine:
- spin_unlock_irq(&sibling->active.lock);
+ spin_unlock_irq(&se->lock);
if (intel_context_inflight(&ve->context))
break;
@@ -3258,6 +3266,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
static void virtual_submit_request(struct i915_request *rq)
{
struct virtual_engine *ve = to_virtual_engine(rq->engine);
+ struct i915_sched *se = intel_engine_get_scheduler(&ve->base);
unsigned long flags;
ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n",
@@ -3266,7 +3275,7 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(ve->base.submit_request != virtual_submit_request);
- spin_lock_irqsave(&ve->base.active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
/* By the time we resubmit a request, it may be completed */
if (__i915_request_is_complete(rq)) {
@@ -3289,7 +3298,7 @@ static void virtual_submit_request(struct i915_request *rq)
tasklet_hi_schedule(&ve->base.execlists.tasklet);
unlock:
- spin_unlock_irqrestore(&ve->base.active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
static struct ve_bond *
@@ -3540,16 +3549,17 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
unsigned int max)
{
const struct intel_engine_execlists *execlists = &engine->execlists;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *rq, *last;
unsigned long flags;
unsigned int count;
struct rb_node *rb;
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
last = NULL;
count = 0;
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
+ list_for_each_entry(rq, &se->requests, sched.link) {
if (count++ < max - 1)
show_request(m, rq, "\t\t", 0);
else
@@ -3612,7 +3622,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\t", 0);
}
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index aa9cfb4dcbca..68dcb8a1eb8c 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -323,14 +323,15 @@ static void reset_prepare(struct intel_engine_cs *engine)
static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *pos, *rq;
unsigned long flags;
u32 head;
rq = NULL;
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
rcu_read_lock();
- list_for_each_entry(pos, &engine->active.requests, sched.link) {
+ list_for_each_entry(pos, &se->requests, sched.link) {
if (!__i915_request_is_complete(pos)) {
rq = pos;
break;
@@ -385,7 +386,7 @@ static void reset_rewind(struct intel_engine_cs *engine, bool stalled)
}
engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
static void reset_finish(struct intel_engine_cs *engine)
@@ -394,19 +395,20 @@ static void reset_finish(struct intel_engine_cs *engine)
static void reset_cancel(struct intel_engine_cs *engine)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *request;
unsigned long flags;
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &engine->active.requests, sched.link)
+ list_for_each_entry(request, &se->requests, sched.link)
i915_request_put(i915_request_mark_eio(request));
intel_engine_signal_breadcrumbs(engine);
/* Remaining _unready_ requests will be nop'ed when submitted */
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
static void i9xx_submit_request(struct i915_request *request)
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index cf1269e74998..b4d26d3bf39f 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -230,15 +230,16 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
{
struct mock_engine *mock =
container_of(engine, typeof(*mock), base);
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *rq;
unsigned long flags;
del_timer_sync(&mock->hw_delay);
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
/* Mark all submitted requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link)
+ list_for_each_entry(rq, &se->requests, sched.link)
i915_request_put(i915_request_mark_eio(rq));
intel_engine_signal_breadcrumbs(engine);
@@ -251,7 +252,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
}
INIT_LIST_HEAD(&mock->hw_queue);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
static void mock_reset_finish(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 7bab147e4421..5266532c16db 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -4529,6 +4529,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
struct intel_context *ve;
struct igt_spinner spin;
struct i915_request *rq;
+ struct i915_sched *se;
unsigned int n;
int err = 0;
@@ -4565,6 +4566,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
engine = rq->engine;
GEM_BUG_ON(engine == ve->engine);
+ se = intel_engine_get_scheduler(engine);
/* Take ownership of the reset and tasklet */
local_bh_disable();
@@ -4581,9 +4583,9 @@ static int reset_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Fake a preemption event; failed of course */
- spin_lock_irq(&engine->active.lock);
+ spin_lock_irq(&se->lock);
__i915_sched_rewind_requests(engine);
- spin_unlock_irq(&engine->active.lock);
+ spin_unlock_irq(&se->lock);
GEM_BUG_ON(rq->engine != engine);
/* Reset the engine while keeping our active request on hold */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 186c8601689e..4ecbb26391b7 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -182,6 +182,7 @@ static void schedule_out(struct i915_request *rq)
static void __guc_dequeue(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request **first = execlists->inflight;
struct i915_request ** const last_port = first + execlists->port_mask;
struct i915_request *last = first[0];
@@ -189,7 +190,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
bool submit = false;
struct rb_node *rb;
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&se->lock);
if (last) {
if (*++first)
@@ -242,11 +243,12 @@ static void guc_submission_tasklet(struct tasklet_struct *t)
{
struct intel_engine_cs * const engine =
from_tasklet(engine, t, execlists.tasklet);
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port, *rq;
unsigned long flags;
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
for (port = execlists->inflight; (rq = *port); port++) {
if (!i915_request_completed(rq))
@@ -262,7 +264,7 @@ static void guc_submission_tasklet(struct tasklet_struct *t)
__guc_dequeue(engine);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
@@ -315,10 +317,11 @@ static void guc_reset_state(struct intel_context *ce,
static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *rq;
unsigned long flags;
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
/* Push back any incomplete requests for replay after the reset. */
rq = __i915_sched_rewind_requests(engine);
@@ -332,12 +335,13 @@ static void guc_reset_rewind(struct intel_engine_cs *engine, bool stalled)
guc_reset_state(rq->context, engine, rq->head, stalled);
out_unlock:
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
static void guc_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
@@ -358,10 +362,10 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
* submission's irq state, we also wish to remind ourselves that
* it is irq state.)
*/
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
/* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &engine->active.requests, sched.link) {
+ list_for_each_entry(rq, &se->requests, sched.link) {
i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -386,7 +390,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
static void guc_reset_finish(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 45e7fc31c013..f8c50195b330 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1433,6 +1433,7 @@ static struct intel_engine_coredump *
capture_engine(struct intel_engine_cs *engine,
struct i915_vma_compress *compress)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct intel_engine_capture_vma *capture = NULL;
struct intel_engine_coredump *ee;
struct i915_request *rq;
@@ -1442,12 +1443,12 @@ capture_engine(struct intel_engine_cs *engine,
if (!ee)
return NULL;
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
rq = intel_engine_find_active_request(engine);
if (rq)
capture = intel_engine_coredump_add_request(ee, rq,
ATOMIC_MAYFAIL);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
if (!capture) {
kfree(ee);
return NULL;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 916e74fbab6c..947e4fad7cf0 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -533,12 +533,12 @@ struct i915_request *i915_request_mark_eio(struct i915_request *rq)
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
bool result = false;
RQ_TRACE(request, "\n");
- GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&se->lock);
/*
* With the advent of preempt-to-busy, we frequently encounter
@@ -595,7 +595,7 @@ bool __i915_request_submit(struct i915_request *request)
result = true;
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags));
- list_move_tail(&request->sched.link, &engine->active.requests);
+ list_move_tail(&request->sched.link, &se->requests);
active:
clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags);
@@ -621,30 +621,25 @@ bool __i915_request_submit(struct i915_request *request)
void i915_request_submit(struct i915_request *request)
{
- struct intel_engine_cs *engine = request->engine;
+ struct i915_sched *se = i915_request_get_scheduler(request);
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
__i915_request_submit(request);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
void __i915_request_unsubmit(struct i915_request *request)
{
- struct intel_engine_cs *engine = request->engine;
-
/*
* Only unwind in reverse order, required so that the per-context list
* is kept in seqno/ring order.
*/
RQ_TRACE(request, "\n");
- GEM_BUG_ON(!irqs_disabled());
- lockdep_assert_held(&engine->active.lock);
-
/*
* Before we remove this breadcrumb from the signal list, we have
* to ensure that a concurrent dma_fence_enable_signaling() does not
@@ -672,15 +667,15 @@ void __i915_request_unsubmit(struct i915_request *request)
void i915_request_unsubmit(struct i915_request *request)
{
- struct intel_engine_cs *engine = request->engine;
+ struct i915_sched *se = i915_request_get_scheduler(request);
unsigned long flags;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
__i915_request_unsubmit(request);
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
}
static int __i915_sw_fence_call
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 9ce074ffc1dd..e320edd718f3 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -589,6 +589,12 @@ static inline void i915_request_clear_hold(struct i915_request *rq)
clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
}
+static inline struct i915_sched *
+i915_request_get_scheduler(const struct i915_request *rq)
+{
+ return intel_engine_get_scheduler(rq->engine);
+}
+
static inline struct intel_timeline *
i915_request_timeline(const struct i915_request *rq)
{
@@ -613,7 +619,7 @@ i915_request_active_timeline(const struct i915_request *rq)
* this submission.
*/
return rcu_dereference_protected(rq->timeline,
- lockdep_is_held(&rq->engine->active.lock));
+ lockdep_is_held(&i915_request_get_scheduler(rq)->lock));
}
static inline bool i915_request_use_scheduler(const struct i915_request *rq)
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 641141f3ce10..034a186017ae 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -183,11 +183,12 @@ static struct list_head *
lookup_priolist(struct intel_engine_cs *engine, int prio)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&se->lock);
assert_priolists(execlists);
if (unlikely(execlists->no_priolist))
@@ -466,10 +467,11 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
void __i915_sched_defer_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct list_head *pl;
LIST_HEAD(list);
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&se->lock);
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
/*
@@ -561,26 +563,27 @@ static bool hold_request(const struct i915_request *rq)
return result;
}
-static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+static bool ancestor_on_hold(const struct i915_sched *se,
const struct i915_request *rq)
{
GEM_BUG_ON(i915_request_on_hold(rq));
- return unlikely(!list_empty(&engine->active.hold)) && hold_request(rq);
+ return unlikely(!list_empty(&se->hold)) && hold_request(rq);
}
void i915_request_enqueue(struct i915_request *rq)
{
struct intel_engine_cs *engine = rq->engine;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
unsigned long flags;
bool kick = false;
/* Will be called from irq-context when using foreign fences. */
- spin_lock_irqsave(&engine->active.lock, flags);
+ spin_lock_irqsave(&se->lock, flags);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
- if (unlikely(ancestor_on_hold(engine, rq))) {
+ if (unlikely(ancestor_on_hold(se, rq))) {
RQ_TRACE(rq, "ancestor on hold\n");
- list_add_tail(&rq->sched.link, &engine->active.hold);
+ list_add_tail(&rq->sched.link, &se->hold);
i915_request_set_hold(rq);
} else {
queue_request(engine, rq);
@@ -591,7 +594,7 @@ void i915_request_enqueue(struct i915_request *rq)
}
GEM_BUG_ON(list_empty(&rq->sched.link));
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&se->lock, flags);
if (kick)
tasklet_hi_schedule(&engine->execlists.tasklet);
}
@@ -599,15 +602,14 @@ void i915_request_enqueue(struct i915_request *rq)
struct i915_request *
__i915_sched_rewind_requests(struct intel_engine_cs *engine)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request *rq, *rn, *active = NULL;
struct list_head *pl;
int prio = I915_PRIORITY_INVALID;
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&se->lock);
- list_for_each_entry_safe_reverse(rq, rn,
- &engine->active.requests,
- sched.link) {
+ list_for_each_entry_safe_reverse(rq, rn, &se->requests, sched.link) {
if (__i915_request_is_complete(rq)) {
list_del_init(&rq->sched.link);
continue;
@@ -640,9 +642,10 @@ __i915_sched_rewind_requests(struct intel_engine_cs *engine)
bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
LIST_HEAD(list);
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&se->lock);
GEM_BUG_ON(rq->engine != engine);
if (__i915_request_is_complete(rq)) /* too late! */
@@ -666,7 +669,7 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
if (i915_request_is_active(rq))
__i915_request_unsubmit(rq);
- list_move_tail(&rq->sched.link, &engine->active.hold);
+ list_move_tail(&rq->sched.link, &se->hold);
clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
i915_request_set_hold(rq);
RQ_TRACE(rq, "on hold\n");
@@ -697,7 +700,7 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
} while (rq);
- GEM_BUG_ON(list_empty(&engine->active.hold));
+ GEM_BUG_ON(list_empty(&se->hold));
return true;
}
@@ -705,14 +708,15 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
bool i915_sched_suspend_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
bool result;
if (i915_request_on_hold(rq))
return false;
- spin_lock_irq(&engine->active.lock);
+ spin_lock_irq(&se->lock);
result = __i915_sched_suspend_request(engine, rq);
- spin_unlock_irq(&engine->active.lock);
+ spin_unlock_irq(&se->lock);
return result;
}
@@ -720,9 +724,10 @@ bool i915_sched_suspend_request(struct intel_engine_cs *engine,
void __i915_sched_resume_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
LIST_HEAD(list);
- lockdep_assert_held(&engine->active.lock);
+ lockdep_assert_held(&se->lock);
if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
engine->execlists.queue_priority_hint = rq_prio(rq);
@@ -785,9 +790,11 @@ void __i915_sched_resume_request(struct intel_engine_cs *engine,
void i915_sched_resume_request(struct intel_engine_cs *engine,
struct i915_request *rq)
{
- spin_lock_irq(&engine->active.lock);
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
+
+ spin_lock_irq(&se->lock);
__i915_sched_resume_request(engine, rq);
- spin_unlock_irq(&engine->active.lock);
+ spin_unlock_irq(&se->lock);
}
void i915_sched_node_init(struct i915_sched_node *node)
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 28138c3fcc81..f2b0ac3a05a5 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -68,12 +68,12 @@ struct i915_sched_node {
struct list_head signalers_list; /* those before us, we depend upon */
struct list_head waiters_list; /* those after us, they depend upon us */
- struct list_head link; /* guarded by engine->active.lock */
+ struct list_head link; /* guarded by i915_sched.lock */
struct i915_sched_stack {
/* Branch memoization used during depth-first search */
struct i915_request *prev;
struct list_head *pos;
- } dfs; /* guarded by engine->active.lock */
+ } dfs; /* guarded by i915_sched.lock */
struct i915_sched_attr attr;
unsigned long flags;
#define I915_SCHED_HAS_EXTERNAL_CHAIN BIT(0)
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 298c31ee550c..8c53c613decf 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -77,7 +77,8 @@ static int all_engines(struct drm_i915_private *i915,
return 0;
}
-static bool check_context_order(struct intel_engine_cs *engine)
+static bool check_context_order(struct i915_sched *se,
+ struct intel_engine_cs *engine)
{
u64 last_seqno, last_context;
unsigned long count;
@@ -86,7 +87,7 @@ static bool check_context_order(struct intel_engine_cs *engine)
int last_prio;
/* We expect the execution order to follow ascending fence-context */
- spin_lock_irq(&engine->active.lock);
+ spin_lock_irq(&se->lock);
count = 0;
last_context = 0;
@@ -119,7 +120,7 @@ static bool check_context_order(struct intel_engine_cs *engine)
}
result = true;
out_unlock:
- spin_unlock_irq(&engine->active.lock);
+ spin_unlock_irq(&se->lock);
return result;
}
@@ -128,6 +129,7 @@ static int __single_chain(struct intel_engine_cs *engine, unsigned long length,
bool (*fn)(struct i915_request *rq,
unsigned long v, unsigned long e))
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct intel_context *ce;
struct igt_spinner spin;
struct i915_request *rq;
@@ -173,7 +175,7 @@ static int __single_chain(struct intel_engine_cs *engine, unsigned long length,
intel_engine_flush_submission(engine);
execlists_active_lock_bh(&engine->execlists);
- if (fn(rq, count, count - 1) && !check_context_order(engine))
+ if (fn(rq, count, count - 1) && !check_context_order(se, engine))
err = -EINVAL;
execlists_active_unlock_bh(&engine->execlists);
@@ -191,6 +193,7 @@ static int __wide_chain(struct intel_engine_cs *engine, unsigned long width,
bool (*fn)(struct i915_request *rq,
unsigned long v, unsigned long e))
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct intel_context **ce;
struct i915_request **rq;
struct igt_spinner spin;
@@ -257,7 +260,7 @@ static int __wide_chain(struct intel_engine_cs *engine, unsigned long width,
intel_engine_flush_submission(engine);
execlists_active_lock_bh(&engine->execlists);
- if (fn(rq[i - 1], i, count) && !check_context_order(engine))
+ if (fn(rq[i - 1], i, count) && !check_context_order(se, engine))
err = -EINVAL;
execlists_active_unlock_bh(&engine->execlists);
@@ -279,6 +282,7 @@ static int __inv_chain(struct intel_engine_cs *engine, unsigned long width,
bool (*fn)(struct i915_request *rq,
unsigned long v, unsigned long e))
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct intel_context **ce;
struct i915_request **rq;
struct igt_spinner spin;
@@ -345,7 +349,7 @@ static int __inv_chain(struct intel_engine_cs *engine, unsigned long width,
intel_engine_flush_submission(engine);
execlists_active_lock_bh(&engine->execlists);
- if (fn(rq[i - 1], i, count) && !check_context_order(engine))
+ if (fn(rq[i - 1], i, count) && !check_context_order(se, engine))
err = -EINVAL;
execlists_active_unlock_bh(&engine->execlists);
@@ -367,6 +371,7 @@ static int __sparse_chain(struct intel_engine_cs *engine, unsigned long width,
bool (*fn)(struct i915_request *rq,
unsigned long v, unsigned long e))
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct intel_context **ce;
struct i915_request **rq;
struct igt_spinner spin;
@@ -450,7 +455,7 @@ static int __sparse_chain(struct intel_engine_cs *engine, unsigned long width,
intel_engine_flush_submission(engine);
execlists_active_lock_bh(&engine->execlists);
- if (fn(rq[i - 1], i, count) && !check_context_order(engine))
+ if (fn(rq[i - 1], i, count) && !check_context_order(se, engine))
err = -EINVAL;
execlists_active_unlock_bh(&engine->execlists);
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [Intel-gfx] [CI 2/5] drm/i915: Move common active lists from engine to i915_scheduler
2021-02-06 1:20 [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active Chris Wilson
@ 2021-02-06 1:20 ` Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 3/5] drm/i915: Move scheduler queue Chris Wilson
` (6 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2021-02-06 1:20 UTC (permalink / raw)
To: intel-gfx
Extract the scheduler lists into a related structure, stop sprawling
over struct intel_engine_cs. Also transfer the responsibility of tracing
the scheduler events from ENGINE_TRACE() to SCHED_TRACE().
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/gem/i915_gem_context.c | 8 +--
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 22 ++----
drivers/gpu/drm/i915/gt/intel_engine_types.h | 10 +--
.../drm/i915/gt/intel_execlists_submission.c | 27 +++++---
drivers/gpu/drm/i915/gt/mock_engine.c | 7 +-
drivers/gpu/drm/i915/i915_request.c | 8 +--
drivers/gpu/drm/i915/i915_request.h | 8 ++-
drivers/gpu/drm/i915/i915_scheduler.c | 68 +++++++++++++------
drivers/gpu/drm/i915/i915_scheduler.h | 13 +++-
drivers/gpu/drm/i915/i915_scheduler_types.h | 31 +++++++--
.../gpu/drm/i915/selftests/i915_scheduler.c | 1 +
11 files changed, 133 insertions(+), 70 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index ecacfae8412d..ca37d93ef5e7 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -422,11 +422,11 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
* check that we have acquired the lock on the final engine.
*/
locked = READ_ONCE(rq->engine);
- spin_lock_irq(&locked->active.lock);
+ spin_lock_irq(&locked->sched.lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
- spin_unlock(&locked->active.lock);
+ spin_unlock(&locked->sched.lock);
locked = engine;
- spin_lock(&locked->active.lock);
+ spin_lock(&locked->sched.lock);
}
if (i915_request_is_active(rq)) {
@@ -435,7 +435,7 @@ __active_engine(struct i915_request *rq, struct intel_engine_cs **active)
ret = true;
}
- spin_unlock_irq(&locked->active.lock);
+ spin_unlock_irq(&locked->sched.lock);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index b8ff82c442b8..9fbb2e924522 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -582,8 +582,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine)
execlists->queue_priority_hint = INT_MIN;
execlists->queue = RB_ROOT_CACHED;
-
- i915_sched_init_ipi(&execlists->ipi);
}
static void cleanup_status_page(struct intel_engine_cs *engine)
@@ -699,7 +697,12 @@ static int engine_setup_common(struct intel_engine_cs *engine)
goto err_status;
}
- intel_engine_init_active(engine, ENGINE_PHYSICAL);
+ i915_sched_init(&engine->sched,
+ engine->i915->drm.dev,
+ engine->name,
+ engine->mask,
+ ENGINE_PHYSICAL);
+
intel_engine_init_execlists(engine);
intel_engine_init_cmd_parser(engine);
intel_engine_init__pm(engine);
@@ -768,17 +771,6 @@ static int measure_breadcrumb_dw(struct intel_context *ce)
return dw;
}
-void
-intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
-{
- INIT_LIST_HEAD(&engine->active.requests);
- INIT_LIST_HEAD(&engine->active.hold);
-
- spin_lock_init(&engine->active.lock);
- lockdep_set_subclass(&engine->active.lock, subclass);
- mark_lock_used_irq(&engine->active.lock);
-}
-
static struct intel_context *
create_pinned_context(struct intel_engine_cs *engine,
unsigned int hwsp,
@@ -926,7 +918,7 @@ int intel_engines_init(struct intel_gt *gt)
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
- GEM_BUG_ON(!list_empty(&engine->active.requests));
+ GEM_BUG_ON(!list_empty(&engine->sched.requests));
tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
intel_breadcrumbs_free(engine->breadcrumbs);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 13b3c83f74fc..5a28113c9a98 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -258,8 +258,6 @@ struct intel_engine_execlists {
struct rb_root_cached queue;
struct rb_root_cached virtual;
- struct i915_sched_ipi ipi;
-
/**
* @csb_write: control register for Context Switch buffer
*
@@ -329,11 +327,7 @@ struct intel_engine_cs {
struct intel_sseu sseu;
- struct i915_sched {
- spinlock_t lock;
- struct list_head requests;
- struct list_head hold; /* ready requests, but on hold */
- } active;
+ struct i915_sched sched;
/* keep a request in reserve for a [pm] barrier under oom */
struct i915_request *request_pool;
@@ -620,7 +614,7 @@ intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine)
static inline struct i915_sched *
intel_engine_get_scheduler(struct intel_engine_cs *engine)
{
- return &engine->active;
+ return &engine->sched;
}
#endif /* __INTEL_ENGINE_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 36bdb963852e..1b65df24cacd 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -294,7 +294,7 @@ static int virtual_prio(const struct intel_engine_execlists *el)
static bool need_preempt(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
- const struct i915_sched *se = &engine->active;
+ const struct i915_sched *se = &engine->sched;
int last_prio;
if (!intel_engine_has_semaphores(engine))
@@ -1020,7 +1020,7 @@ timeslice_yield(const struct intel_engine_execlists *el,
static bool needs_timeslice(const struct intel_engine_cs *engine,
const struct i915_request *rq)
{
- const struct i915_sched *se = &engine->active;
+ const struct i915_sched *se = &engine->sched;
if (!intel_engine_has_timeslices(engine))
return false;
@@ -1277,7 +1277,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
while ((ve = first_virtual_engine(engine))) {
struct i915_request *rq;
- spin_lock(&ve->base.active.lock);
+ spin_lock(&ve->base.sched.lock);
rq = ve->request;
if (unlikely(!virtual_matches(ve, rq, engine)))
@@ -1287,12 +1287,12 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(rq->context != &ve->context);
if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
- spin_unlock(&ve->base.active.lock);
+ spin_unlock(&ve->base.sched.lock);
break;
}
if (last && !can_merge_rq(last, rq)) {
- spin_unlock(&ve->base.active.lock);
+ spin_unlock(&ve->base.sched.lock);
spin_unlock(&se->lock);
return; /* leave this for another sibling */
}
@@ -1339,7 +1339,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
i915_request_put(rq);
unlock:
- spin_unlock(&ve->base.active.lock);
+ spin_unlock(&ve->base.sched.lock);
/*
* Hmm, we have a bunch of virtual engine requests,
@@ -2724,7 +2724,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
- spin_lock(&ve->base.active.lock);
+ spin_lock(&ve->base.sched.lock);
rq = fetch_and_zero(&ve->request);
if (rq) {
if (i915_request_mark_eio(rq)) {
@@ -2736,7 +2736,7 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
ve->base.execlists.queue_priority_hint = INT_MIN;
}
- spin_unlock(&ve->base.active.lock);
+ spin_unlock(&ve->base.sched.lock);
}
/* Remaining _unready_ requests will be nop'ed when submitted */
@@ -3029,13 +3029,13 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
if (RB_EMPTY_NODE(node))
continue;
- spin_lock_irq(&sibling->active.lock);
+ spin_lock_irq(&sibling->sched.lock);
/* Detachment is lazily performed in the execlists tasklet */
if (!RB_EMPTY_NODE(node))
rb_erase_cached(node, &sibling->execlists.virtual);
- spin_unlock_irq(&sibling->active.lock);
+ spin_unlock_irq(&sibling->sched.lock);
}
GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
@@ -3382,7 +3382,6 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
snprintf(ve->base.name, sizeof(ve->base.name), "virtual");
- intel_engine_init_active(&ve->base, ENGINE_VIRTUAL);
intel_engine_init_execlists(&ve->base);
ve->base.cops = &virtual_context_ops;
@@ -3468,6 +3467,12 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
ve->base.flags |= I915_ENGINE_IS_VIRTUAL;
+ i915_sched_init(&ve->base.sched,
+ ve->base.i915->drm.dev,
+ ve->base.name,
+ ve->base.mask,
+ ENGINE_VIRTUAL);
+
virtual_engine_initial_hint(ve);
return &ve->context;
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index b4d26d3bf39f..8b1c2727d25c 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -328,7 +328,12 @@ int mock_engine_init(struct intel_engine_cs *engine)
{
struct intel_context *ce;
- intel_engine_init_active(engine, ENGINE_MOCK);
+ i915_sched_init(&engine->sched,
+ engine->i915->drm.dev,
+ engine->name,
+ engine->mask,
+ ENGINE_MOCK);
+
intel_engine_init_execlists(engine);
intel_engine_init__pm(engine);
intel_engine_init_retire(engine);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 947e4fad7cf0..d736c1aae6e5 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -255,10 +255,10 @@ static void remove_from_engine(struct i915_request *rq)
* check that the rq still belongs to the newly locked engine.
*/
locked = READ_ONCE(rq->engine);
- spin_lock_irq(&locked->active.lock);
+ spin_lock_irq(&locked->sched.lock);
while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
- spin_unlock(&locked->active.lock);
- spin_lock(&engine->active.lock);
+ spin_unlock(&locked->sched.lock);
+ spin_lock(&engine->sched.lock);
locked = engine;
}
list_del_init(&rq->sched.link);
@@ -269,7 +269,7 @@ static void remove_from_engine(struct i915_request *rq)
/* Prevent further __await_execution() registering a cb, then flush */
set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
- spin_unlock_irq(&locked->active.lock);
+ spin_unlock_irq(&locked->sched.lock);
__notify_execute_cb_imm(rq);
}
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index e320edd718f3..3a5d6bdcd8dd 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -51,11 +51,13 @@ struct i915_capture_list {
struct i915_vma *vma;
};
+#define RQ_FMT "%llx:%lld"
+#define RQ_ARG(rq) (rq) ? (rq)->fence.context : 0, (rq) ? (rq)->fence.seqno : 0
+
#define RQ_TRACE(rq, fmt, ...) do { \
const struct i915_request *rq__ = (rq); \
- ENGINE_TRACE(rq__->engine, "fence %llx:%lld, current %d " fmt, \
- rq__->fence.context, rq__->fence.seqno, \
- hwsp_seqno(rq__), ##__VA_ARGS__); \
+ ENGINE_TRACE(rq__->engine, "fence " RQ_FMT ", current %d " fmt, \
+ RQ_ARG(rq__), hwsp_seqno(rq__), ##__VA_ARGS__); \
} while (0)
enum {
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 034a186017ae..aec99142f712 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -13,6 +13,7 @@
#include "i915_globals.h"
#include "i915_request.h"
#include "i915_scheduler.h"
+#include "i915_utils.h"
static struct i915_global_scheduler {
struct i915_global base;
@@ -30,11 +31,11 @@ static struct i915_global_scheduler {
struct i915_request * const rq__ = (rq); \
struct intel_engine_cs *engine__ = READ_ONCE(rq__->engine); \
\
- spin_lock_irqsave(&engine__->active.lock, (flags)); \
+ spin_lock_irqsave(&engine__->sched.lock, (flags)); \
while (engine__ != READ_ONCE((rq__)->engine)) { \
- spin_unlock(&engine__->active.lock); \
+ spin_unlock(&engine__->sched.lock); \
engine__ = READ_ONCE(rq__->engine); \
- spin_lock(&engine__->active.lock); \
+ spin_lock(&engine__->sched.lock); \
} \
\
engine__; \
@@ -105,16 +106,37 @@ static void ipi_schedule(struct work_struct *wrk)
} while (rq);
}
-void i915_sched_init_ipi(struct i915_sched_ipi *ipi)
+static void init_ipi(struct i915_sched_ipi *ipi)
{
INIT_WORK(&ipi->work, ipi_schedule);
ipi->list = NULL;
}
+void i915_sched_init(struct i915_sched *se,
+ struct device *dev,
+ const char *name,
+ unsigned long mask,
+ unsigned int subclass)
+{
+ spin_lock_init(&se->lock);
+ lockdep_set_subclass(&se->lock, subclass);
+ mark_lock_used_irq(&se->lock);
+
+ se->dbg.dev = dev;
+ se->dbg.name = name;
+
+ se->mask = mask;
+
+ INIT_LIST_HEAD(&se->requests);
+ INIT_LIST_HEAD(&se->hold);
+
+ init_ipi(&se->ipi);
+}
+
static void __ipi_add(struct i915_request *rq)
{
#define STUB ((struct i915_request *)1)
- struct intel_engine_cs *engine = READ_ONCE(rq->engine);
+ struct i915_sched *se = i915_request_get_scheduler(rq);
struct i915_request *first;
if (!i915_request_get_rcu(rq))
@@ -134,13 +156,13 @@ static void __ipi_add(struct i915_request *rq)
}
/* Carefully insert ourselves into the head of the llist */
- first = READ_ONCE(engine->execlists.ipi.list);
+ first = READ_ONCE(se->ipi.list);
do {
rq->sched.ipi_link = ptr_pack_bits(first, 1, 1);
- } while (!try_cmpxchg(&engine->execlists.ipi.list, &first, rq));
+ } while (!try_cmpxchg(&se->ipi.list, &first, rq));
if (!first)
- queue_work(system_unbound_wq, &engine->execlists.ipi.work);
+ queue_work(system_unbound_wq, &se->ipi.work);
}
static const struct i915_request *
@@ -303,12 +325,11 @@ static void kick_submission(struct intel_engine_cs *engine,
if (inflight->context == rq->context)
return;
- ENGINE_TRACE(engine,
- "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n",
- prio,
- rq->fence.context, rq->fence.seqno,
- inflight->fence.context, inflight->fence.seqno,
- inflight->sched.attr.priority);
+ SCHED_TRACE(&engine->sched,
+ "bumping queue-priority-hint:%d for rq:" RQ_FMT ", inflight:" RQ_FMT " prio %d\n",
+ prio,
+ RQ_ARG(rq), RQ_ARG(inflight),
+ inflight->sched.attr.priority);
engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
@@ -333,6 +354,9 @@ static void __i915_request_set_priority(struct i915_request *rq, int prio)
struct list_head *pos = &rq->sched.signalers_list;
struct list_head *plist;
+ SCHED_TRACE(&engine->sched, "PI for " RQ_FMT ", prio:%d\n",
+ RQ_ARG(rq), prio);
+
plist = lookup_priolist(engine, prio);
/*
@@ -461,7 +485,7 @@ void i915_request_set_priority(struct i915_request *rq, int prio)
GEM_BUG_ON(rq_prio(rq) != prio);
unlock:
- spin_unlock_irqrestore(&engine->active.lock, flags);
+ spin_unlock_irqrestore(&engine->sched.lock, flags);
}
void __i915_sched_defer_request(struct intel_engine_cs *engine,
@@ -471,6 +495,8 @@ void __i915_sched_defer_request(struct intel_engine_cs *engine,
struct list_head *pl;
LIST_HEAD(list);
+ SCHED_TRACE(se, "defer request " RQ_FMT "\n", RQ_ARG(rq));
+
lockdep_assert_held(&se->lock);
GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
@@ -577,6 +603,8 @@ void i915_request_enqueue(struct i915_request *rq)
unsigned long flags;
bool kick = false;
+ SCHED_TRACE(se, "queue request " RQ_FMT "\n", RQ_ARG(rq));
+
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&se->lock, flags);
GEM_BUG_ON(test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags));
@@ -636,6 +664,10 @@ __i915_sched_rewind_requests(struct intel_engine_cs *engine)
active = rq;
}
+ SCHED_TRACE(se,
+ "rewind requests, active request " RQ_FMT "\n",
+ RQ_ARG(active));
+
return active;
}
@@ -654,8 +686,7 @@ bool __i915_sched_suspend_request(struct intel_engine_cs *engine,
if (i915_request_on_hold(rq))
return false;
- ENGINE_TRACE(engine, "suspending request %llx:%lld\n",
- rq->fence.context, rq->fence.seqno);
+ SCHED_TRACE(se, "suspending request " RQ_FMT "\n", RQ_ARG(rq));
/*
* Transfer this request onto the hold queue to prevent it
@@ -737,8 +768,7 @@ void __i915_sched_resume_request(struct intel_engine_cs *engine,
if (!i915_request_on_hold(rq))
return;
- ENGINE_TRACE(engine, "resuming request %llx:%lld\n",
- rq->fence.context, rq->fence.seqno);
+ SCHED_TRACE(se, "resuming request " RQ_FMT "\n", RQ_ARG(rq));
/*
* Move this request back to the priority queue, and all of its
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 00ce0a9d519d..ebd93ae303b4 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -16,6 +16,13 @@
struct drm_printer;
struct intel_engine_cs;
+#define SCHED_TRACE(se, fmt, ...) do { \
+ const struct i915_sched *se__ __maybe_unused = (se); \
+ GEM_TRACE("%s sched:%s: " fmt, \
+ dev_name(se__->dbg.dev), se__->dbg.name, \
+ ##__VA_ARGS__); \
+} while (0)
+
#define priolist_for_each_request(it, plist) \
list_for_each_entry(it, &(plist)->requests, sched.link)
@@ -36,7 +43,11 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
void i915_sched_node_retire(struct i915_sched_node *node);
-void i915_sched_init_ipi(struct i915_sched_ipi *ipi);
+void i915_sched_init(struct i915_sched *se,
+ struct device *dev,
+ const char *name,
+ unsigned long mask,
+ unsigned int subclass);
void i915_request_set_priority(struct i915_request *request, int prio);
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index f2b0ac3a05a5..b7ee122d4f28 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -14,10 +14,33 @@
struct i915_request;
-/* Inter-engine scheduling delegation */
-struct i915_sched_ipi {
- struct i915_request *list;
- struct work_struct work;
+/**
+ * struct i915_sched - funnels requests towards hardware
+ *
+ * The struct i915_sched captures all the requests as they become ready
+ * to execute (on waking the i915_request.submit fence) puts them into
+ * a queue where they may be reordered according to priority and then
+ * wakes the backend tasklet to feed the queue to HW.
+ */
+struct i915_sched {
+ spinlock_t lock; /* protects the scheduling lists and queue */
+
+ unsigned long mask; /* available scheduling channels */
+
+ struct list_head requests; /* active request, on HW */
+ struct list_head hold; /* ready requests, but on hold */
+
+ /* Inter-engine scheduling delegate */
+ struct i915_sched_ipi {
+ struct i915_request *list;
+ struct work_struct work;
+ } ipi;
+
+ /* Pretty device names for debug messages */
+ struct {
+ struct device *dev;
+ const char *name;
+ } dbg;
};
struct i915_sched_attr {
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 8c53c613decf..88ebe92bed2d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -878,6 +878,7 @@ int i915_scheduler_perf_selftests(struct drm_i915_private *i915)
#define T(t) { #t, sizeof(struct t) }
T(i915_dependency),
T(i915_priolist),
+ T(i915_sched),
T(i915_sched_attr),
T(i915_sched_node),
T(i915_request),
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [Intel-gfx] [CI 3/5] drm/i915: Move scheduler queue
2021-02-06 1:20 [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 2/5] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
@ 2021-02-06 1:20 ` Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 4/5] drm/i915: Move tasklet from execlists to sched Chris Wilson
` (5 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2021-02-06 1:20 UTC (permalink / raw)
To: intel-gfx
Extract the scheduling queue from "execlists" into the per-engine
scheduling structs, for reuse by other backends.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
.../gpu/drm/i915/gem/i915_gem_context_types.h | 2 +-
drivers/gpu/drm/i915/gem/i915_gem_wait.c | 1 +
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 7 +-
drivers/gpu/drm/i915/gt/intel_engine_pm.c | 3 +-
drivers/gpu/drm/i915/gt/intel_engine_types.h | 14 ---
.../drm/i915/gt/intel_execlists_submission.c | 54 ++++-------
.../gpu/drm/i915/gt/intel_ring_submission.c | 9 +-
drivers/gpu/drm/i915/gt/mock_engine.c | 7 +-
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 32 ++-----
drivers/gpu/drm/i915/i915_drv.h | 1 -
drivers/gpu/drm/i915/i915_request.h | 2 +-
drivers/gpu/drm/i915/i915_scheduler.c | 90 ++++++++++++++-----
drivers/gpu/drm/i915/i915_scheduler.h | 17 ++++
drivers/gpu/drm/i915/i915_scheduler_types.h | 34 +++++++
.../gpu/drm/i915/selftests/i915_scheduler.c | 13 ++-
15 files changed, 159 insertions(+), 127 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 085f6a3735e8..d5bc75508048 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -19,7 +19,7 @@
#include "gt/intel_context_types.h"
-#include "i915_scheduler.h"
+#include "i915_scheduler_types.h"
#include "i915_sw_fence.h"
struct pid;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_wait.c b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
index d79bf16083bd..4d1897c347b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_wait.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_wait.c
@@ -13,6 +13,7 @@
#include "dma_resv_utils.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
+#include "i915_scheduler.h"
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 9fbb2e924522..2ce65189d5e2 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -581,7 +581,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine)
memset(execlists->inflight, 0, sizeof(execlists->inflight));
execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
}
static void cleanup_status_page(struct intel_engine_cs *engine)
@@ -918,7 +917,7 @@ int intel_engines_init(struct intel_gt *gt)
*/
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
- GEM_BUG_ON(!list_empty(&engine->sched.requests));
+ i915_sched_fini(intel_engine_get_scheduler(engine));
tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
intel_breadcrumbs_free(engine->breadcrumbs);
@@ -1232,6 +1231,8 @@ void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
*/
bool intel_engine_is_idle(struct intel_engine_cs *engine)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
+
/* More white lies, if wedged, hw state is inconsistent */
if (intel_gt_is_wedged(engine->gt))
return true;
@@ -1244,7 +1245,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
intel_engine_flush_submission(engine);
/* ELSP is empty, but there are ready requests? E.g. after reset */
- if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root))
+ if (!i915_sched_is_idle(se))
return false;
/* Ring stopped? */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 6372d7826bc9..3510c9236334 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -4,6 +4,7 @@
*/
#include "i915_drv.h"
+#include "i915_scheduler.h"
#include "intel_breadcrumbs.h"
#include "intel_context.h"
@@ -276,7 +277,7 @@ static int __engine_park(struct intel_wakeref *wf)
if (engine->park)
engine->park(engine);
- engine->execlists.no_priolist = false;
+ i915_sched_park(intel_engine_get_scheduler(engine));
/* While gt calls i915_vma_parked(), we have to break the lock cycle */
intel_gt_pm_put_async(engine->gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 5a28113c9a98..9ea38b01121a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -153,11 +153,6 @@ struct intel_engine_execlists {
*/
struct timer_list preempt;
- /**
- * @default_priolist: priority list for I915_PRIORITY_NORMAL
- */
- struct i915_priolist default_priolist;
-
/**
* @ccid: identifier for contexts submitted to this engine
*/
@@ -192,11 +187,6 @@ struct intel_engine_execlists {
*/
u32 reset_ccid;
- /**
- * @no_priolist: priority lists disabled
- */
- bool no_priolist;
-
/**
* @submit_reg: gen-specific execlist submission register
* set to the ExecList Submission Port (elsp) register pre-Gen11 and to
@@ -252,10 +242,6 @@ struct intel_engine_execlists {
*/
int queue_priority_hint;
- /**
- * @queue: queue of requests, in priority lists
- */
- struct rb_root_cached queue;
struct rb_root_cached virtual;
/**
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 1b65df24cacd..12e20a8eb5d5 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -273,11 +273,11 @@ static int effective_prio(const struct i915_request *rq)
return prio;
}
-static int queue_prio(const struct intel_engine_execlists *execlists)
+static int queue_prio(const struct i915_sched *se)
{
struct rb_node *rb;
- rb = rb_first_cached(&execlists->queue);
+ rb = rb_first_cached(&se->queue);
if (!rb)
return INT_MIN;
@@ -341,7 +341,7 @@ static bool need_preempt(const struct intel_engine_cs *engine,
* context, it's priority would not exceed ELSP[0] aka last_prio.
*/
return max(virtual_prio(&engine->execlists),
- queue_prio(&engine->execlists)) > last_prio;
+ queue_prio(se)) > last_prio;
}
__maybe_unused static bool
@@ -1034,13 +1034,13 @@ static bool needs_timeslice(const struct intel_engine_cs *engine,
return false;
/* If ELSP[1] is occupied, always check to see if worth slicing */
- if (!list_is_last_rcu(&rq->sched.link, &se->requests)) {
+ if (!i915_sched_is_last_request(se, rq)) {
ENGINE_TRACE(engine, "timeslice required for second inflight context\n");
return true;
}
/* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */
- if (!RB_EMPTY_ROOT(&engine->execlists.queue.rb_root)) {
+ if (!i915_sched_is_idle(se)) {
ENGINE_TRACE(engine, "timeslice required for queue\n");
return true;
}
@@ -1286,7 +1286,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(rq->engine != &ve->base);
GEM_BUG_ON(rq->context != &ve->context);
- if (unlikely(rq_prio(rq) < queue_prio(execlists))) {
+ if (unlikely(rq_prio(rq) < queue_prio(se))) {
spin_unlock(&ve->base.sched.lock);
break;
}
@@ -1352,7 +1352,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
break;
}
- while ((rb = rb_first_cached(&execlists->queue))) {
+ while ((rb = rb_first_cached(&se->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -1431,7 +1431,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
}
}
- rb_erase_cached(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &se->queue);
i915_priolist_free(p);
}
done:
@@ -1453,7 +1453,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* request triggering preemption on the next dequeue (or subsequent
* interrupt for secondary ports).
*/
- execlists->queue_priority_hint = queue_prio(execlists);
+ execlists->queue_priority_hint = queue_prio(se);
spin_unlock(&se->lock);
/*
@@ -2667,7 +2667,6 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_sched *se = intel_engine_get_scheduler(engine);
- struct i915_request *rq, *rn;
struct rb_node *rb;
unsigned long flags;
@@ -2692,34 +2691,13 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
rcu_read_lock();
spin_lock_irqsave(&se->lock, flags);
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &se->requests, sched.link)
- i915_request_put(i915_request_mark_eio(rq));
- intel_engine_signal_breadcrumbs(engine);
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
-
- priolist_for_each_request_consume(rq, rn, p) {
- if (i915_request_mark_eio(rq)) {
- __i915_request_submit(rq);
- i915_request_put(rq);
- }
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
-
- /* On-hold requests will be flushed to timeline upon their release */
- list_for_each_entry(rq, &se->hold, sched.link)
- i915_request_put(i915_request_mark_eio(rq));
+ __i915_sched_cancel_queue(se);
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
struct virtual_engine *ve =
rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
+ struct i915_request *rq;
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
@@ -2739,16 +2717,16 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
spin_unlock(&ve->base.sched.lock);
}
- /* Remaining _unready_ requests will be nop'ed when submitted */
-
execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
+ se->queue = RB_ROOT_CACHED;
GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
execlists->tasklet.callback = nop_submission_tasklet;
spin_unlock_irqrestore(&se->lock, flags);
rcu_read_unlock();
+
+ intel_engine_signal_breadcrumbs(engine);
}
static void execlists_reset_finish(struct intel_engine_cs *engine)
@@ -2984,7 +2962,7 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
static struct list_head *virtual_queue(struct virtual_engine *ve)
{
- return &ve->base.execlists.default_priolist.requests;
+ return &ve->base.sched.default_priolist.requests;
}
static void rcu_virtual_context_destroy(struct work_struct *wrk)
@@ -3585,7 +3563,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
last = NULL;
count = 0;
- for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ for (rb = rb_first_cached(&se->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
priolist_for_each_request(rq, p) {
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 68dcb8a1eb8c..4a7d3420cc9d 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -396,19 +396,14 @@ static void reset_finish(struct intel_engine_cs *engine)
static void reset_cancel(struct intel_engine_cs *engine)
{
struct i915_sched *se = intel_engine_get_scheduler(engine);
- struct i915_request *request;
unsigned long flags;
spin_lock_irqsave(&se->lock, flags);
- /* Mark all submitted requests as skipped. */
- list_for_each_entry(request, &se->requests, sched.link)
- i915_request_put(i915_request_mark_eio(request));
- intel_engine_signal_breadcrumbs(engine);
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
+ __i915_sched_cancel_queue(se);
spin_unlock_irqrestore(&se->lock, flags);
+ intel_engine_signal_breadcrumbs(engine);
}
static void i9xx_submit_request(struct i915_request *request)
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index 8b1c2727d25c..2081deed94b7 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -238,10 +238,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
spin_lock_irqsave(&se->lock, flags);
- /* Mark all submitted requests as skipped. */
- list_for_each_entry(rq, &se->requests, sched.link)
- i915_request_put(i915_request_mark_eio(rq));
- intel_engine_signal_breadcrumbs(engine);
+ __i915_sched_cancel_queue(se);
/* Cancel and submit all pending requests. */
list_for_each_entry(rq, &mock->hw_queue, mock.link) {
@@ -252,6 +249,8 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
}
INIT_LIST_HEAD(&mock->hw_queue);
+ intel_engine_signal_breadcrumbs(engine);
+
spin_unlock_irqrestore(&se->lock, flags);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 4ecbb26391b7..7b1780d1652f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -205,7 +205,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
* event.
*/
port = first;
- while ((rb = rb_first_cached(&execlists->queue))) {
+ while ((rb = rb_first_cached(&se->queue))) {
struct i915_priolist *p = to_priolist(rb);
struct i915_request *rq, *rn;
@@ -225,7 +225,7 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
last = rq;
}
- rb_erase_cached(&p->node, &execlists->queue);
+ rb_erase_cached(&p->node, &se->queue);
i915_priolist_free(p);
}
done:
@@ -342,8 +342,6 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
{
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_sched *se = intel_engine_get_scheduler(engine);
- struct i915_request *rq, *rn;
- struct rb_node *rb;
unsigned long flags;
ENGINE_TRACE(engine, "\n");
@@ -364,33 +362,13 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
*/
spin_lock_irqsave(&se->lock, flags);
- /* Mark all executing requests as skipped. */
- list_for_each_entry(rq, &se->requests, sched.link) {
- i915_request_set_error_once(rq, -EIO);
- i915_request_mark_complete(rq);
- }
-
- /* Flush the queued requests to the timeline list (for retiring). */
- while ((rb = rb_first_cached(&execlists->queue))) {
- struct i915_priolist *p = to_priolist(rb);
-
- priolist_for_each_request_consume(rq, rn, p) {
- list_del_init(&rq->sched.link);
- __i915_request_submit(rq);
- dma_fence_set_error(&rq->fence, -EIO);
- i915_request_mark_complete(rq);
- }
-
- rb_erase_cached(&p->node, &execlists->queue);
- i915_priolist_free(p);
- }
-
- /* Remaining _unready_ requests will be nop'ed when submitted */
+ __i915_sched_cancel_queue(se);
execlists->queue_priority_hint = INT_MIN;
- execlists->queue = RB_ROOT_CACHED;
+ se->queue = RB_ROOT_CACHED;
spin_unlock_irqrestore(&se->lock, flags);
+ intel_engine_signal_breadcrumbs(engine);
}
static void guc_reset_finish(struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index faf7eafdad45..cee180ca7f5a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -99,7 +99,6 @@
#include "i915_gpu_error.h"
#include "i915_perf_types.h"
#include "i915_request.h"
-#include "i915_scheduler.h"
#include "gt/intel_timeline.h"
#include "i915_vma.h"
#include "i915_irq.h"
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 3a5d6bdcd8dd..c41582b96b46 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -35,7 +35,7 @@
#include "gt/intel_timeline_types.h"
#include "i915_gem.h"
-#include "i915_scheduler.h"
+#include "i915_scheduler_types.h"
#include "i915_selftest.h"
#include "i915_sw_fence.h"
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index aec99142f712..4d281e990a86 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -129,10 +129,24 @@ void i915_sched_init(struct i915_sched *se,
INIT_LIST_HEAD(&se->requests);
INIT_LIST_HEAD(&se->hold);
+ se->queue = RB_ROOT_CACHED;
init_ipi(&se->ipi);
}
+void i915_sched_park(struct i915_sched *se)
+{
+ GEM_BUG_ON(!i915_sched_is_idle(se));
+ se->no_priolist = false;
+}
+
+void i915_sched_fini(struct i915_sched *se)
+{
+ GEM_BUG_ON(!list_empty(&se->requests));
+
+ i915_sched_park(se);
+}
+
static void __ipi_add(struct i915_request *rq)
{
#define STUB ((struct i915_request *)1)
@@ -181,7 +195,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
return rb_entry(rb, struct i915_priolist, node);
}
-static void assert_priolists(struct intel_engine_execlists * const execlists)
+static void assert_priolists(struct i915_sched * const se)
{
struct rb_node *rb;
long last_prio;
@@ -189,11 +203,11 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
return;
- GEM_BUG_ON(rb_first_cached(&execlists->queue) !=
- rb_first(&execlists->queue.rb_root));
+ GEM_BUG_ON(rb_first_cached(&se->queue) !=
+ rb_first(&se->queue.rb_root));
last_prio = INT_MAX;
- for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
+ for (rb = rb_first_cached(&se->queue); rb; rb = rb_next(rb)) {
const struct i915_priolist *p = to_priolist(rb);
GEM_BUG_ON(p->priority > last_prio);
@@ -202,24 +216,22 @@ static void assert_priolists(struct intel_engine_execlists * const execlists)
}
static struct list_head *
-lookup_priolist(struct intel_engine_cs *engine, int prio)
+lookup_priolist(struct i915_sched *se, int prio)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
- struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_priolist *p;
struct rb_node **parent, *rb;
bool first = true;
lockdep_assert_held(&se->lock);
- assert_priolists(execlists);
+ assert_priolists(se);
- if (unlikely(execlists->no_priolist))
+ if (unlikely(se->no_priolist))
prio = I915_PRIORITY_NORMAL;
find_priolist:
/* most positive priority is scheduled first, equal priorities fifo */
rb = NULL;
- parent = &execlists->queue.rb_root.rb_node;
+ parent = &se->queue.rb_root.rb_node;
while (*parent) {
rb = *parent;
p = to_priolist(rb);
@@ -234,7 +246,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
}
if (prio == I915_PRIORITY_NORMAL) {
- p = &execlists->default_priolist;
+ p = &se->default_priolist;
} else {
p = kmem_cache_alloc(global.slab_priorities, GFP_ATOMIC);
/* Convert an allocation failure to a priority bump */
@@ -249,7 +261,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
* requests, so if userspace lied about their
* dependencies that reordering may be visible.
*/
- execlists->no_priolist = true;
+ se->no_priolist = true;
goto find_priolist;
}
}
@@ -258,7 +270,7 @@ lookup_priolist(struct intel_engine_cs *engine, int prio)
INIT_LIST_HEAD(&p->requests);
rb_link_node(&p->node, rb, parent);
- rb_insert_color_cached(&p->node, &execlists->queue, first);
+ rb_insert_color_cached(&p->node, &se->queue, first);
return &p->requests;
}
@@ -351,13 +363,14 @@ static void ipi_priority(struct i915_request *rq, int prio)
static void __i915_request_set_priority(struct i915_request *rq, int prio)
{
struct intel_engine_cs *engine = rq->engine;
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct list_head *pos = &rq->sched.signalers_list;
struct list_head *plist;
SCHED_TRACE(&engine->sched, "PI for " RQ_FMT ", prio:%d\n",
RQ_ARG(rq), prio);
- plist = lookup_priolist(engine, prio);
+ plist = lookup_priolist(se, prio);
/*
* Recursively bump all dependent priorities to match the new request.
@@ -505,7 +518,7 @@ void __i915_sched_defer_request(struct intel_engine_cs *engine,
* to those that are waiting upon it. So we traverse its chain of
* waiters and move any that are earlier than the request to after it.
*/
- pl = lookup_priolist(engine, rq_prio(rq));
+ pl = lookup_priolist(se, rq_prio(rq));
do {
struct i915_dependency *p;
@@ -543,11 +556,10 @@ void __i915_sched_defer_request(struct intel_engine_cs *engine,
} while (rq);
}
-static void queue_request(struct intel_engine_cs *engine,
- struct i915_request *rq)
+static void queue_request(struct i915_sched *se, struct i915_request *rq)
{
GEM_BUG_ON(!list_empty(&rq->sched.link));
- list_add_tail(&rq->sched.link, lookup_priolist(engine, rq_prio(rq)));
+ list_add_tail(&rq->sched.link, lookup_priolist(se, rq_prio(rq)));
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
@@ -614,9 +626,9 @@ void i915_request_enqueue(struct i915_request *rq)
list_add_tail(&rq->sched.link, &se->hold);
i915_request_set_hold(rq);
} else {
- queue_request(engine, rq);
+ queue_request(se, rq);
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(i915_sched_is_idle(se));
kick = submit_queue(engine, rq);
}
@@ -648,9 +660,9 @@ __i915_sched_rewind_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID);
if (rq_prio(rq) != prio) {
prio = rq_prio(rq);
- pl = lookup_priolist(engine, prio);
+ pl = lookup_priolist(se, prio);
}
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(i915_sched_is_idle(se));
list_move(&rq->sched.link, pl);
set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
@@ -785,7 +797,7 @@ void __i915_sched_resume_request(struct intel_engine_cs *engine,
i915_request_clear_hold(rq);
list_del_init(&rq->sched.link);
- queue_request(engine, rq);
+ queue_request(se, rq);
/* Also release any children on this engine that are ready */
for_each_waiter(p, rq) {
@@ -827,6 +839,38 @@ void i915_sched_resume_request(struct intel_engine_cs *engine,
spin_unlock_irq(&se->lock);
}
+void __i915_sched_cancel_queue(struct i915_sched *se)
+{
+ struct i915_request *rq, *rn;
+ struct rb_node *rb;
+
+ lockdep_assert_held(&se->lock);
+
+ /* Mark all executing requests as skipped. */
+ list_for_each_entry(rq, &se->requests, sched.link)
+ i915_request_put(i915_request_mark_eio(rq));
+
+ /* Flush the queued requests to the timeline list (for retiring). */
+ while ((rb = rb_first_cached(&se->queue))) {
+ struct i915_priolist *p = to_priolist(rb);
+
+ priolist_for_each_request_consume(rq, rn, p) {
+ i915_request_put(i915_request_mark_eio(rq));
+ __i915_request_submit(rq);
+ }
+
+ rb_erase_cached(&p->node, &se->queue);
+ i915_priolist_free(p);
+ }
+ GEM_BUG_ON(!i915_sched_is_idle(se));
+
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &se->hold, sched.link)
+ i915_request_put(i915_request_mark_eio(rq));
+
+ /* Remaining _unready_ requests will be nop'ed when submitted */
+}
+
void i915_sched_node_init(struct i915_sched_node *node)
{
spin_lock_init(&node->lock);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index ebd93ae303b4..6b80df7feb78 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -12,6 +12,7 @@
#include <linux/kernel.h>
#include "i915_scheduler_types.h"
+#include "i915_request.h"
struct drm_printer;
struct intel_engine_cs;
@@ -48,6 +49,8 @@ void i915_sched_init(struct i915_sched *se,
const char *name,
unsigned long mask,
unsigned int subclass);
+void i915_sched_park(struct i915_sched *se);
+void i915_sched_fini(struct i915_sched *se);
void i915_request_set_priority(struct i915_request *request, int prio);
@@ -68,6 +71,8 @@ bool i915_sched_suspend_request(struct intel_engine_cs *engine,
void i915_sched_resume_request(struct intel_engine_cs *engine,
struct i915_request *rq);
+void __i915_sched_cancel_queue(struct i915_sched *se);
+
void __i915_priolist_free(struct i915_priolist *p);
static inline void i915_priolist_free(struct i915_priolist *p)
{
@@ -75,6 +80,18 @@ static inline void i915_priolist_free(struct i915_priolist *p)
__i915_priolist_free(p);
}
+static inline bool i915_sched_is_idle(const struct i915_sched *se)
+{
+ return RB_EMPTY_ROOT(&se->queue.rb_root);
+}
+
+static inline bool
+i915_sched_is_last_request(const struct i915_sched *se,
+ const struct i915_request *rq)
+{
+ return list_is_last_rcu(&rq->sched.link, &se->requests);
+}
+
void i915_request_show_with_schedule(struct drm_printer *m,
const struct i915_request *rq,
const char *prefix,
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index b7ee122d4f28..2b34d9882835 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -30,12 +30,46 @@ struct i915_sched {
struct list_head requests; /* active request, on HW */
struct list_head hold; /* ready requests, but on hold */
+ /**
+ * @queue: queue of requests, in priority lists
+ *
+ * During request construction, we build a list of fence dependencies
+ * that must be completed before the fence is executed. Then when the
+ * request is committed, it waits for all of those fences before it is
+ * submitted to the scheduler.
+ *
+ * The scheduler only sees requests that are ready to be executed.
+ * However, the number that we may execute at any one time may be
+ * limited, and so we store them in the @queue. This queue is sorted
+ * in execution order, such that when the backend may submit more
+ * requests to the HW, it can fill the HW submission ports from the
+ * head of the queue. It also allows the backends to inspect the head
+ * of the queue against the currently active requests to see if
+ * we need to preempt the current execution in order to run higher
+ * priority requests from the queue.
+ *
+ * In the simplest cases where the HW can consume everything, the
+ * @queue is only used to transfer requests from the scheduler
+ * frontend to the back.
+ */
+ struct rb_root_cached queue;
+
/* Inter-engine scheduling delegate */
struct i915_sched_ipi {
struct i915_request *list;
struct work_struct work;
} ipi;
+ /**
+ * @default_priolist: priority list for I915_PRIORITY_NORMAL
+ */
+ struct i915_priolist default_priolist;
+
+ /**
+ * @no_priolist: priority lists disabled
+ */
+ bool no_priolist;
+
/* Pretty device names for debug messages */
struct {
struct device *dev;
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 88ebe92bed2d..956a9af6f7e5 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -77,8 +77,7 @@ static int all_engines(struct drm_i915_private *i915,
return 0;
}
-static bool check_context_order(struct i915_sched *se,
- struct intel_engine_cs *engine)
+static bool check_context_order(struct i915_sched *se)
{
u64 last_seqno, last_context;
unsigned long count;
@@ -93,7 +92,7 @@ static bool check_context_order(struct i915_sched *se,
last_context = 0;
last_seqno = 0;
last_prio = 0;
- for (rb = rb_first_cached(&engine->execlists.queue); rb; rb = rb_next(rb)) {
+ for (rb = rb_first_cached(&se->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
struct i915_request *rq;
@@ -175,7 +174,7 @@ static int __single_chain(struct intel_engine_cs *engine, unsigned long length,
intel_engine_flush_submission(engine);
execlists_active_lock_bh(&engine->execlists);
- if (fn(rq, count, count - 1) && !check_context_order(se, engine))
+ if (fn(rq, count, count - 1) && !check_context_order(se))
err = -EINVAL;
execlists_active_unlock_bh(&engine->execlists);
@@ -260,7 +259,7 @@ static int __wide_chain(struct intel_engine_cs *engine, unsigned long width,
intel_engine_flush_submission(engine);
execlists_active_lock_bh(&engine->execlists);
- if (fn(rq[i - 1], i, count) && !check_context_order(se, engine))
+ if (fn(rq[i - 1], i, count) && !check_context_order(se))
err = -EINVAL;
execlists_active_unlock_bh(&engine->execlists);
@@ -349,7 +348,7 @@ static int __inv_chain(struct intel_engine_cs *engine, unsigned long width,
intel_engine_flush_submission(engine);
execlists_active_lock_bh(&engine->execlists);
- if (fn(rq[i - 1], i, count) && !check_context_order(se, engine))
+ if (fn(rq[i - 1], i, count) && !check_context_order(se))
err = -EINVAL;
execlists_active_unlock_bh(&engine->execlists);
@@ -455,7 +454,7 @@ static int __sparse_chain(struct intel_engine_cs *engine, unsigned long width,
intel_engine_flush_submission(engine);
execlists_active_lock_bh(&engine->execlists);
- if (fn(rq[i - 1], i, count) && !check_context_order(se, engine))
+ if (fn(rq[i - 1], i, count) && !check_context_order(se))
err = -EINVAL;
execlists_active_unlock_bh(&engine->execlists);
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [Intel-gfx] [CI 4/5] drm/i915: Move tasklet from execlists to sched
2021-02-06 1:20 [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 2/5] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 3/5] drm/i915: Move scheduler queue Chris Wilson
@ 2021-02-06 1:20 ` Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 5/5] drm/i915/gt: Only kick the scheduler on timeslice/preemption change Chris Wilson
` (4 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2021-02-06 1:20 UTC (permalink / raw)
To: intel-gfx
Move the scheduling tasklists out of the execlists backend into the
per-engine scheduling bookkeeping.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/gt/intel_engine.h | 33 +++-----
drivers/gpu/drm/i915/gt/intel_engine_cs.c | 33 ++------
.../gpu/drm/i915/gt/intel_engine_heartbeat.c | 2 +-
drivers/gpu/drm/i915/gt/intel_engine_pm.c | 2 +-
drivers/gpu/drm/i915/gt/intel_engine_types.h | 5 --
.../drm/i915/gt/intel_execlists_submission.c | 84 +++++++------------
drivers/gpu/drm/i915/gt/intel_gt_requests.c | 2 +-
drivers/gpu/drm/i915/gt/selftest_engine_pm.c | 2 +-
drivers/gpu/drm/i915/gt/selftest_execlists.c | 49 +++++------
drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 3 +-
drivers/gpu/drm/i915/gt/selftest_lrc.c | 13 +--
drivers/gpu/drm/i915/gt/selftest_reset.c | 3 +-
.../gpu/drm/i915/gt/uc/intel_guc_submission.c | 25 ++----
drivers/gpu/drm/i915/i915_request.c | 2 +-
drivers/gpu/drm/i915/i915_scheduler.c | 45 +++++++++-
drivers/gpu/drm/i915/i915_scheduler.h | 46 ++++++++++
drivers/gpu/drm/i915/i915_scheduler_types.h | 9 ++
drivers/gpu/drm/i915/selftests/i915_request.c | 10 +--
.../gpu/drm/i915/selftests/i915_scheduler.c | 24 +++---
drivers/gpu/drm/i915/selftests/igt_spinner.c | 2 +-
20 files changed, 213 insertions(+), 181 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index cc2df80eb449..52bba16c62e8 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -12,6 +12,7 @@
#include "i915_pmu.h"
#include "i915_reg.h"
#include "i915_request.h"
+#include "i915_scheduler.h"
#include "i915_selftest.h"
#include "intel_engine_types.h"
#include "intel_gt_types.h"
@@ -123,20 +124,6 @@ execlists_active(const struct intel_engine_execlists *execlists)
return active;
}
-static inline void
-execlists_active_lock_bh(struct intel_engine_execlists *execlists)
-{
- local_bh_disable(); /* prevent local softirq and lock recursion */
- tasklet_lock(&execlists->tasklet);
-}
-
-static inline void
-execlists_active_unlock_bh(struct intel_engine_execlists *execlists)
-{
- tasklet_unlock(&execlists->tasklet);
- local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
-}
-
static inline u32
intel_read_status_page(const struct intel_engine_cs *engine, int reg)
{
@@ -231,12 +218,6 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine,
bool intel_engines_are_idle(struct intel_gt *gt);
bool intel_engine_is_idle(struct intel_engine_cs *engine);
-void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync);
-static inline void intel_engine_flush_submission(struct intel_engine_cs *engine)
-{
- __intel_engine_flush_submission(engine, true);
-}
-
void intel_engines_reset_default_submission(struct intel_gt *gt);
bool intel_engine_can_store_dword(struct intel_engine_cs *engine);
@@ -283,4 +264,16 @@ intel_engine_has_heartbeat(const struct intel_engine_cs *engine)
return READ_ONCE(engine->props.heartbeat_interval_ms);
}
+static inline void
+intel_engine_kick_scheduler(struct intel_engine_cs *engine)
+{
+ i915_sched_kick(intel_engine_get_scheduler(engine));
+}
+
+static inline void
+intel_engine_flush_scheduler(struct intel_engine_cs *engine)
+{
+ i915_sched_flush(intel_engine_get_scheduler(engine));
+}
+
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 2ce65189d5e2..3b299339fb62 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -918,7 +918,6 @@ int intel_engines_init(struct intel_gt *gt)
void intel_engine_cleanup_common(struct intel_engine_cs *engine)
{
i915_sched_fini(intel_engine_get_scheduler(engine));
- tasklet_kill(&engine->execlists.tasklet); /* flush the callback */
intel_breadcrumbs_free(engine->breadcrumbs);
@@ -1201,27 +1200,6 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
return idle;
}
-void __intel_engine_flush_submission(struct intel_engine_cs *engine, bool sync)
-{
- struct tasklet_struct *t = &engine->execlists.tasklet;
-
- if (!t->callback)
- return;
-
- local_bh_disable();
- if (tasklet_trylock(t)) {
- /* Must wait for any GPU reset in progress. */
- if (__tasklet_is_enabled(t))
- t->callback(t);
- tasklet_unlock(t);
- }
- local_bh_enable();
-
- /* Synchronise and wait for the tasklet on another CPU */
- if (sync)
- tasklet_unlock_wait(t);
-}
-
/**
* intel_engine_is_idle() - Report if the engine has finished process all work
* @engine: the intel_engine_cs
@@ -1242,7 +1220,7 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine)
/* Waiting to drain ELSP? */
synchronize_hardirq(to_pci_dev(engine->i915->drm.dev)->irq);
- intel_engine_flush_submission(engine);
+ i915_sched_flush(se);
/* ELSP is empty, but there are ready requests? E.g. after reset */
if (!i915_sched_is_idle(se))
@@ -1457,6 +1435,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
if (intel_engine_uses_guc(engine)) {
/* nothing to print yet */
} else if (HAS_EXECLISTS(dev_priv)) {
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct i915_request * const *port, *rq;
const u32 *hws =
&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX];
@@ -1466,8 +1445,8 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n",
yesno(test_bit(TASKLET_STATE_SCHED,
- &engine->execlists.tasklet.state)),
- enableddisabled(!atomic_read(&engine->execlists.tasklet.count)),
+ &se->tasklet.state)),
+ enableddisabled(!atomic_read(&se->tasklet.count)),
repr_timer(&engine->execlists.preempt),
repr_timer(&engine->execlists.timer));
@@ -1491,7 +1470,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
idx, hws[idx * 2], hws[idx * 2 + 1]);
}
- execlists_active_lock_bh(execlists);
+ i915_sched_lock_bh(se);
rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) {
char hdr[160];
@@ -1522,7 +1501,7 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
i915_request_show(m, rq, hdr, 0);
}
rcu_read_unlock();
- execlists_active_unlock_bh(execlists);
+ i915_sched_unlock_bh(se);
} else if (INTEL_GEN(dev_priv) > 6) {
drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n",
ENGINE_READ(engine, RING_PP_DIR_BASE));
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 93741a65924a..b6dbd1150ba9 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -88,7 +88,7 @@ static void heartbeat(struct work_struct *wrk)
unsigned long serial;
/* Just in case everything has gone horribly wrong, give it a kick */
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
rq = engine->heartbeat.systole;
if (rq && i915_request_completed(rq)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 3510c9236334..27d9d17b35cb 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -53,7 +53,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
/* Flush all pending HW writes before we touch the context */
while (unlikely(intel_context_inflight(ce)))
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
/* First poison the image to verify we never fully trust it */
dbg_poison_ce(ce);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 9ea38b01121a..d5f917462f0e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -138,11 +138,6 @@ struct st_preempt_hang {
* driver and the hardware state for execlist mode of submission.
*/
struct intel_engine_execlists {
- /**
- * @tasklet: softirq tasklet for bottom handler
- */
- struct tasklet_struct tasklet;
-
/**
* @timer: kick the current context if its timeslice expires
*/
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 12e20a8eb5d5..f8dca5f2f9b2 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -515,7 +515,7 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
resubmit_virtual_request(rq, ve);
if (READ_ONCE(ve->request))
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
+ intel_engine_kick_scheduler(&ve->base);
}
static void __execlists_schedule_out(struct i915_request * const rq,
@@ -681,12 +681,6 @@ trace_ports(const struct intel_engine_execlists *execlists,
dump_port(p1, sizeof(p1), ", ", ports[1]));
}
-static bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
- return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
static __maybe_unused noinline bool
assert_pending_valid(const struct intel_engine_execlists *execlists,
const char *msg)
@@ -701,7 +695,7 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
trace_ports(execlists, msg, execlists->pending);
/* We may be messing around with the lists during reset, lalala */
- if (reset_in_progress(execlists))
+ if (__i915_sched_tasklet_is_disabled(intel_engine_get_scheduler(engine)))
return true;
if (!execlists->pending[0]) {
@@ -1088,7 +1082,7 @@ static void start_timeslice(struct intel_engine_cs *engine)
* its timeslice, so recheck.
*/
if (!timer_pending(&el->timer))
- tasklet_hi_schedule(&el->tasklet);
+ intel_engine_kick_scheduler(engine);
return;
}
@@ -1664,14 +1658,6 @@ process_csb(struct intel_engine_cs *engine, struct i915_request **inactive)
struct i915_request **prev;
u8 head, tail;
- /*
- * As we modify our execlists state tracking we require exclusive
- * access. Either we are inside the tasklet, or the tasklet is disabled
- * and we assume that is only inside the reset paths and so serialised.
- */
- GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) &&
- !reset_in_progress(execlists));
-
/*
* Note that csb_write, csb_status may be either in HWSP or mmio.
* When reading from the csb_write mmio register, we have to be
@@ -2067,6 +2053,7 @@ static void execlists_capture(struct intel_engine_cs *engine)
static noinline void execlists_reset(struct intel_engine_cs *engine)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
unsigned long eir = fetch_and_zero(&engine->execlists.error_interrupt);
@@ -2090,13 +2077,13 @@ static noinline void execlists_reset(struct intel_engine_cs *engine)
ENGINE_TRACE(engine, "reset for %s\n", msg);
/* Mark this tasklet as disabled to avoid waiting for it to complete */
- tasklet_disable_nosync(&engine->execlists.tasklet);
+ tasklet_disable_nosync(&se->tasklet);
ring_set_paused(engine, 1); /* Freeze the current request in place */
execlists_capture(engine);
intel_engine_reset(engine, msg);
- tasklet_enable(&engine->execlists.tasklet);
+ tasklet_enable(&se->tasklet);
clear_and_wake_up_bit(bit, lock);
}
@@ -2120,10 +2107,16 @@ static bool preempt_timeout(const struct intel_engine_cs *const engine)
static void execlists_submission_tasklet(struct tasklet_struct *t)
{
struct intel_engine_cs * const engine =
- from_tasklet(engine, t, execlists.tasklet);
+ from_tasklet(engine, t, sched.tasklet);
struct i915_request *post[2 * EXECLIST_MAX_PORTS];
struct i915_request **inactive;
+ /*
+ * As we modify our execlists state tracking we require exclusive
+ * access. Either we are inside the tasklet, or the tasklet is disabled
+ * and we assume that is only inside the reset paths and so serialised.
+ */
+
rcu_read_lock();
inactive = process_csb(engine, post);
GEM_BUG_ON(inactive - post > ARRAY_SIZE(post));
@@ -2181,13 +2174,15 @@ static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir)
intel_engine_signal_breadcrumbs(engine);
if (tasklet)
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_kick_scheduler(engine);
}
static void __execlists_kick(struct intel_engine_execlists *execlists)
{
- /* Kick the tasklet for some interrupt coalescing and reset handling */
- tasklet_hi_schedule(&execlists->tasklet);
+ struct intel_engine_cs *engine =
+ container_of(execlists, typeof(*engine), execlists);
+
+ intel_engine_kick_scheduler(engine);
}
#define execlists_kick(t, member) \
@@ -2490,11 +2485,6 @@ static int execlists_resume(struct intel_engine_cs *engine)
static void execlists_reset_prepare(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- ENGINE_TRACE(engine, "depth<-%d\n",
- atomic_read(&execlists->tasklet.count));
-
/*
* Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
@@ -2504,8 +2494,7 @@ static void execlists_reset_prepare(struct intel_engine_cs *engine)
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
- __tasklet_disable_sync_once(&execlists->tasklet);
- GEM_BUG_ON(!reset_in_progress(execlists));
+ i915_sched_disable_tasklet(intel_engine_get_scheduler(engine));
/*
* We stop engines, otherwise we might get failed reset and a
@@ -2657,7 +2646,7 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
static void nop_submission_tasklet(struct tasklet_struct *t)
{
struct intel_engine_cs * const engine =
- from_tasklet(engine, t, execlists.tasklet);
+ from_tasklet(engine, t, sched.tasklet);
/* The driver is wedged; don't process any more events. */
WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
@@ -2720,8 +2709,8 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
execlists->queue_priority_hint = INT_MIN;
se->queue = RB_ROOT_CACHED;
- GEM_BUG_ON(__tasklet_is_enabled(&execlists->tasklet));
- execlists->tasklet.callback = nop_submission_tasklet;
+ GEM_BUG_ON(__tasklet_is_enabled(&se->tasklet));
+ se->tasklet.callback = nop_submission_tasklet;
spin_unlock_irqrestore(&se->lock, flags);
rcu_read_unlock();
@@ -2731,8 +2720,6 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
static void execlists_reset_finish(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
/*
* After a GPU reset, we may have requests to replay. Do so now while
* we still have the forcewake to be sure that the GPU is not allowed
@@ -2743,14 +2730,8 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
* reset as the next level of recovery, and as a final resort we
* will declare the device wedged.
*/
- GEM_BUG_ON(!reset_in_progress(execlists));
- /* And kick in case we missed a new request submission. */
- if (__tasklet_enable(&execlists->tasklet))
- __execlists_kick(execlists);
-
- ENGINE_TRACE(engine, "depth->%d\n",
- atomic_read(&execlists->tasklet.count));
+ i915_sched_enable_tasklet(intel_engine_get_scheduler(engine));
}
static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
@@ -2783,7 +2764,7 @@ static bool can_preempt(struct intel_engine_cs *engine)
static void execlists_set_default_submission(struct intel_engine_cs *engine)
{
engine->submit_request = i915_request_enqueue;
- engine->execlists.tasklet.callback = execlists_submission_tasklet;
+ engine->sched.tasklet.callback = execlists_submission_tasklet;
}
static void execlists_shutdown(struct intel_engine_cs *engine)
@@ -2791,7 +2772,6 @@ static void execlists_shutdown(struct intel_engine_cs *engine)
/* Synchronise with residual timers and any softirq they raise */
del_timer_sync(&engine->execlists.timer);
del_timer_sync(&engine->execlists.preempt);
- tasklet_kill(&engine->execlists.tasklet);
}
static void execlists_release(struct intel_engine_cs *engine)
@@ -2908,7 +2888,7 @@ static void init_execlists(struct intel_engine_cs *engine)
struct intel_uncore *uncore = engine->uncore;
u32 base = engine->mmio_base;
- tasklet_setup(&engine->execlists.tasklet, execlists_submission_tasklet);
+ tasklet_setup(&engine->sched.tasklet, execlists_submission_tasklet);
timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
@@ -2997,7 +2977,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
* rbtrees as in the case it is running in parallel, it may reinsert
* the rb_node into a sibling.
*/
- tasklet_kill(&ve->base.execlists.tasklet);
+ i915_sched_kill_tasklet(se);
/* Decouple ourselves from the siblings, no more access allowed. */
for (n = 0; n < ve->num_siblings; n++) {
@@ -3015,7 +2995,7 @@ static void rcu_virtual_context_destroy(struct work_struct *wrk)
spin_unlock_irq(&sibling->sched.lock);
}
- GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.execlists.tasklet));
+ GEM_BUG_ON(__tasklet_is_scheduled(&se->tasklet));
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
lrc_fini(&ve->context);
@@ -3160,7 +3140,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
static void virtual_submission_tasklet(struct tasklet_struct *t)
{
struct virtual_engine * const ve =
- from_tasklet(ve, t, base.execlists.tasklet);
+ from_tasklet(ve, t, base.sched.tasklet);
const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
@@ -3231,7 +3211,7 @@ static void virtual_submission_tasklet(struct tasklet_struct *t)
GEM_BUG_ON(RB_EMPTY_NODE(&node->rb));
node->prio = prio;
if (first && prio > sibling->execlists.queue_priority_hint)
- tasklet_hi_schedule(&sibling->execlists.tasklet);
+ i915_sched_kick(se);
unlock_engine:
spin_unlock_irq(&se->lock);
@@ -3273,7 +3253,7 @@ static void virtual_submit_request(struct i915_request *rq)
GEM_BUG_ON(!list_empty(virtual_queue(ve)));
list_move_tail(&rq->sched.link, virtual_queue(ve));
- tasklet_hi_schedule(&ve->base.execlists.tasklet);
+ intel_engine_kick_scheduler(&ve->base);
unlock:
spin_unlock_irqrestore(&se->lock, flags);
@@ -3370,7 +3350,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
INIT_LIST_HEAD(virtual_queue(ve));
ve->base.execlists.queue_priority_hint = INT_MIN;
- tasklet_setup(&ve->base.execlists.tasklet, virtual_submission_tasklet);
+ tasklet_setup(&ve->base.sched.tasklet, virtual_submission_tasklet);
intel_context_init(&ve->context, &ve->base);
@@ -3398,7 +3378,7 @@ intel_execlists_create_virtual(struct intel_engine_cs **siblings,
* layering if we handle cloning of the requests and
* submitting a copy into each backend.
*/
- if (sibling->execlists.tasklet.callback !=
+ if (sibling->sched.tasklet.callback !=
execlists_submission_tasklet) {
err = -ENODEV;
goto err_put;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 14c7b18090f3..36ec97f79174 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -43,7 +43,7 @@ static bool flush_submission(struct intel_gt *gt, long timeout)
return false;
for_each_engine(engine, gt, id) {
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
/* Flush the background retirement and idle barriers */
flush_work(&engine->retire_work);
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
index 2c898622bdfb..e0b502209a78 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c
@@ -103,7 +103,7 @@ static int __measure_timestamps(struct intel_context *ce,
intel_ring_advance(rq, cs);
i915_request_get(rq);
i915_request_add(rq);
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
/* Wait for the request to start executing, that then waits for us */
while (READ_ONCE(sema[2]) == 0)
diff --git a/drivers/gpu/drm/i915/gt/selftest_execlists.c b/drivers/gpu/drm/i915/gt/selftest_execlists.c
index 5266532c16db..f625c29023ea 100644
--- a/drivers/gpu/drm/i915/gt/selftest_execlists.c
+++ b/drivers/gpu/drm/i915/gt/selftest_execlists.c
@@ -43,7 +43,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
unsigned long timeout)
{
/* Ignore our own attempts to suppress excess tasklets */
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_kick_scheduler(engine);
timeout += jiffies;
do {
@@ -53,7 +53,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
return 0;
/* Wait until the HW has acknowleged the submission (or err) */
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
@@ -72,7 +72,7 @@ static int wait_for_reset(struct intel_engine_cs *engine,
do {
cond_resched();
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
if (READ_ONCE(engine->execlists.pending[0]))
continue;
@@ -288,7 +288,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
i915_request_put(rq[0]);
err_ce:
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
igt_spinner_end(&spin);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
if (IS_ERR_OR_NULL(ce[n]))
@@ -409,10 +409,10 @@ static int live_unlite_ring(void *arg)
}
i915_request_add(tmp);
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
n++;
}
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
pr_debug("%s: Filled ring with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
engine->name, n,
ce[0]->ring->size,
@@ -449,7 +449,7 @@ static int live_unlite_ring(void *arg)
ce[1]->ring->tail, ce[1]->ring->emit);
err_ce:
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
igt_spinner_end(&spin);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
if (IS_ERR_OR_NULL(ce[n]))
@@ -568,6 +568,7 @@ static int live_hold_reset(void *arg)
return -ENOMEM;
for_each_engine(engine, gt, id) {
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
struct intel_context *ce;
struct i915_request *rq;
@@ -602,9 +603,9 @@ static int live_hold_reset(void *arg)
err = -EBUSY;
goto out;
}
- tasklet_disable(&engine->execlists.tasklet);
+ tasklet_disable(&se->tasklet);
- engine->execlists.tasklet.callback(&engine->execlists.tasklet);
+ se->tasklet.callback(&se->tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
i915_request_get(rq);
@@ -614,7 +615,7 @@ static int live_hold_reset(void *arg)
__intel_engine_reset_bh(engine, NULL);
GEM_BUG_ON(rq->fence.error != -EIO);
- tasklet_enable(&engine->execlists.tasklet);
+ tasklet_enable(&se->tasklet);
clear_and_wake_up_bit(I915_RESET_ENGINE + id,
>->reset.flags);
local_bh_enable();
@@ -762,7 +763,7 @@ static int live_error_interrupt(void *arg)
}
/* Kick the tasklet to process the error */
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
if (client[i]->fence.error != p->error[i]) {
pr_err("%s: %s request (%s) with wrong error code: %d\n",
engine->name,
@@ -1176,8 +1177,8 @@ static int live_timeslice_rewind(void *arg)
while (i915_request_is_active(rq[A2])) { /* semaphore yield! */
/* Wait for the timeslice to kick in */
del_timer(&engine->execlists.timer);
- tasklet_hi_schedule(&engine->execlists.tasklet);
- intel_engine_flush_submission(engine);
+ intel_engine_kick_scheduler(engine);
+ intel_engine_flush_scheduler(engine);
}
/* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
GEM_BUG_ON(!i915_request_is_active(rq[A1]));
@@ -1350,7 +1351,7 @@ static int live_timeslice_queue(void *arg)
/* Wait until we ack the release_queue and start timeslicing */
do {
cond_resched();
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
} while (READ_ONCE(engine->execlists.pending[0]));
/* Timeslice every jiffy, so within 2 we should signal */
@@ -2320,9 +2321,9 @@ static int __cancel_fail(struct live_preempt_cancel *arg)
/* force preempt reset [failure] */
while (!engine->execlists.pending[0])
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
del_timer_sync(&engine->execlists.preempt);
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
cancel_reset_timeout(engine);
@@ -2826,10 +2827,10 @@ static int __live_preempt_ring(struct intel_engine_cs *engine,
}
i915_request_add(tmp);
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
n++;
}
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
pr_debug("%s: Filled %d with %d nop tails {size:%x, tail:%x, emit:%x, rq.tail:%x}\n",
engine->name, queue_sz, n,
ce[0]->ring->size,
@@ -2863,7 +2864,7 @@ static int __live_preempt_ring(struct intel_engine_cs *engine,
ce[1]->ring->tail, ce[1]->ring->emit);
err_ce:
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
igt_spinner_end(spin);
for (n = 0; n < ARRAY_SIZE(ce); n++) {
if (IS_ERR_OR_NULL(ce[n]))
@@ -3398,7 +3399,7 @@ static int live_preempt_timeout(void *arg)
i915_request_get(rq);
i915_request_add(rq);
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
engine->props.preempt_timeout_ms = saved_timeout;
if (i915_request_wait(rq, 0, HZ / 10) < 0) {
@@ -4438,7 +4439,7 @@ static int bond_virtual_engine(struct intel_gt *gt,
}
}
onstack_fence_fini(&fence);
- intel_engine_flush_submission(master);
+ intel_engine_flush_scheduler(master);
igt_spinner_end(&spin);
if (i915_request_wait(rq[0], 0, HZ / 10) < 0) {
@@ -4577,9 +4578,9 @@ static int reset_virtual_engine(struct intel_gt *gt,
err = -EBUSY;
goto out_heartbeat;
}
- tasklet_disable(&engine->execlists.tasklet);
+ tasklet_disable(&se->tasklet);
- engine->execlists.tasklet.callback(&engine->execlists.tasklet);
+ se->tasklet.callback(&se->tasklet);
GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
/* Fake a preemption event; failed of course */
@@ -4596,7 +4597,7 @@ static int reset_virtual_engine(struct intel_gt *gt,
GEM_BUG_ON(rq->fence.error != -EIO);
/* Release our grasp on the engine, letting CS flow again */
- tasklet_enable(&engine->execlists.tasklet);
+ tasklet_enable(&se->tasklet);
clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, >->reset.flags);
local_bh_enable();
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 8cad102922e7..cdb0ceff3be1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1701,7 +1701,8 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine,
const struct igt_atomic_section *p,
const char *mode)
{
- struct tasklet_struct * const t = &engine->execlists.tasklet;
+ struct tasklet_struct * const t =
+ &intel_engine_get_scheduler(engine)->tasklet;
int err;
GEM_TRACE("i915_reset_engine(%s:%s) under %s\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index e97adf1b7729..279091e41b41 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -49,7 +49,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
unsigned long timeout)
{
/* Ignore our own attempts to suppress excess tasklets */
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_kick_scheduler(engine);
timeout += jiffies;
do {
@@ -59,7 +59,7 @@ static int wait_for_submit(struct intel_engine_cs *engine,
return 0;
/* Wait until the HW has acknowleged the submission (or err) */
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
if (!READ_ONCE(engine->execlists.pending[0]) && is_active(rq))
return 0;
@@ -417,7 +417,7 @@ static int __live_lrc_state(struct intel_engine_cs *engine,
if (err)
goto err_rq;
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
expected[RING_TAIL_IDX] = ce->ring->tail;
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
@@ -1852,17 +1852,18 @@ static int live_lrc_indirect_ctx_bb(void *arg)
static void garbage_reset(struct intel_engine_cs *engine,
struct i915_request *rq)
{
+ struct i915_sched *se = intel_engine_get_scheduler(engine);
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
local_bh_disable();
if (!test_and_set_bit(bit, lock)) {
- tasklet_disable(&engine->execlists.tasklet);
+ tasklet_disable(&se->tasklet);
if (!rq->fence.error)
__intel_engine_reset_bh(engine, NULL);
- tasklet_enable(&engine->execlists.tasklet);
+ tasklet_enable(&se->tasklet);
clear_and_wake_up_bit(bit, lock);
}
local_bh_enable();
@@ -1923,7 +1924,7 @@ static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
intel_context_set_banned(ce);
garbage_reset(engine, hang);
- intel_engine_flush_submission(engine);
+ intel_engine_flush_scheduler(engine);
if (!hang->fence.error) {
i915_request_put(hang);
pr_err("%s: corrupted context was not reset\n",
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 8784257ec808..08594309a96d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -321,7 +321,8 @@ static int igt_atomic_engine_reset(void *arg)
goto out_unlock;
for_each_engine(engine, gt, id) {
- struct tasklet_struct *t = &engine->execlists.tasklet;
+ struct tasklet_struct *t =
+ &intel_engine_get_scheduler(engine)->tasklet;
if (t->func)
tasklet_disable(t);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 7b1780d1652f..cf99715e194d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -241,9 +241,9 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
static void guc_submission_tasklet(struct tasklet_struct *t)
{
+ struct i915_sched *se = from_tasklet(se, t, tasklet);
struct intel_engine_cs * const engine =
- from_tasklet(engine, t, execlists.tasklet);
- struct i915_sched *se = intel_engine_get_scheduler(engine);
+ container_of(se, typeof(*engine), sched);
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port, *rq;
unsigned long flags;
@@ -271,16 +271,12 @@ static void cs_irq_handler(struct intel_engine_cs *engine, u16 iir)
{
if (iir & GT_RENDER_USER_INTERRUPT) {
intel_engine_signal_breadcrumbs(engine);
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_kick_scheduler(engine);
}
}
static void guc_reset_prepare(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- ENGINE_TRACE(engine, "\n");
-
/*
* Prevent request submission to the hardware until we have
* completed the reset in i915_gem_reset_finish(). If a request
@@ -290,7 +286,7 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
* Turning off the execlists->tasklet until the reset is over
* prevents the race.
*/
- __tasklet_disable_sync_once(&execlists->tasklet);
+ i915_sched_enable_tasklet(intel_engine_get_scheduler(engine));
}
static void guc_reset_state(struct intel_context *ce,
@@ -373,14 +369,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
static void guc_reset_finish(struct intel_engine_cs *engine)
{
- struct intel_engine_execlists * const execlists = &engine->execlists;
-
- if (__tasklet_enable(&execlists->tasklet))
- /* And kick in case we missed a new request submission. */
- tasklet_hi_schedule(&execlists->tasklet);
-
- ENGINE_TRACE(engine, "depth->%d\n",
- atomic_read(&execlists->tasklet.count));
+ i915_sched_enable_tasklet(intel_engine_get_scheduler(engine));
}
/*
@@ -576,8 +565,6 @@ static void guc_release(struct intel_engine_cs *engine)
{
engine->sanitize = NULL; /* no longer in control, nothing to sanitize */
- tasklet_kill(&engine->execlists.tasklet);
-
intel_engine_cleanup_common(engine);
lrc_fini_wa_ctx(engine);
}
@@ -654,7 +641,7 @@ int intel_guc_submission_setup(struct intel_engine_cs *engine)
*/
GEM_BUG_ON(INTEL_GEN(i915) < 11);
- tasklet_setup(&engine->execlists.tasklet, guc_submission_tasklet);
+ tasklet_setup(&engine->sched.tasklet, guc_submission_tasklet);
guc_default_vfuncs(engine);
guc_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index d736c1aae6e5..1b52dcaa023d 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -1847,7 +1847,7 @@ long i915_request_wait(struct i915_request *rq,
* for unhappy HW.
*/
if (i915_request_is_ready(rq))
- __intel_engine_flush_submission(rq->engine, false);
+ __i915_sched_flush(i915_request_get_scheduler(rq), false);
for (;;) {
set_current_state(state);
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index 4d281e990a86..ba308e937109 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -144,6 +144,7 @@ void i915_sched_fini(struct i915_sched *se)
{
GEM_BUG_ON(!list_empty(&se->requests));
+ tasklet_kill(&se->tasklet); /* flush the callback */
i915_sched_park(se);
}
@@ -345,7 +346,7 @@ static void kick_submission(struct intel_engine_cs *engine,
engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ intel_engine_kick_scheduler(engine);
}
static void ipi_priority(struct i915_request *rq, int prio)
@@ -636,7 +637,7 @@ void i915_request_enqueue(struct i915_request *rq)
GEM_BUG_ON(list_empty(&rq->sched.link));
spin_unlock_irqrestore(&se->lock, flags);
if (kick)
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ i915_sched_kick(se);
}
struct i915_request *
@@ -774,7 +775,7 @@ void __i915_sched_resume_request(struct intel_engine_cs *engine,
if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
engine->execlists.queue_priority_hint = rq_prio(rq);
- tasklet_hi_schedule(&engine->execlists.tasklet);
+ i915_sched_kick(se);
}
if (!i915_request_on_hold(rq))
@@ -1002,6 +1003,44 @@ void i915_sched_node_retire(struct i915_sched_node *node)
}
}
+void i915_sched_disable_tasklet(struct i915_sched *se)
+{
+ __tasklet_disable_sync_once(&se->tasklet);
+ GEM_BUG_ON(!__i915_sched_tasklet_is_disabled(se));
+ SCHED_TRACE(se, "disable:%d\n", atomic_read(&se->tasklet.count));
+}
+
+void i915_sched_enable_tasklet(struct i915_sched *se)
+{
+ SCHED_TRACE(se, "enable:%d\n", atomic_read(&se->tasklet.count));
+ GEM_BUG_ON(!__i915_sched_tasklet_is_disabled(se));
+
+ /* And kick in case we missed a new request submission. */
+ if (__tasklet_enable(&se->tasklet))
+ i915_sched_kick(se);
+}
+
+void __i915_sched_flush(struct i915_sched *se, bool sync)
+{
+ struct tasklet_struct *t = &se->tasklet;
+
+ if (!t->callback)
+ return;
+
+ local_bh_disable();
+ if (tasklet_trylock(t)) {
+ /* Must wait for any GPU reset in progress. */
+ if (__tasklet_is_enabled(t))
+ t->callback(t);
+ tasklet_unlock(t);
+ }
+ local_bh_enable();
+
+ /* Synchronise and wait for the tasklet on another CPU */
+ if (sync)
+ tasklet_unlock_wait(t);
+}
+
void i915_request_show_with_schedule(struct drm_printer *m,
const struct i915_request *rq,
const char *prefix,
diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
index 6b80df7feb78..1803fc37bada 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.h
+++ b/drivers/gpu/drm/i915/i915_scheduler.h
@@ -92,6 +92,52 @@ i915_sched_is_last_request(const struct i915_sched *se,
return list_is_last_rcu(&rq->sched.link, &se->requests);
}
+static inline void
+i915_sched_lock_bh(struct i915_sched *se)
+{
+ local_bh_disable(); /* prevent local softirq and lock recursion */
+ tasklet_lock(&se->tasklet);
+}
+
+static inline void
+i915_sched_unlock_bh(struct i915_sched *se)
+{
+ tasklet_unlock(&se->tasklet);
+ local_bh_enable(); /* restore softirq, and kick ksoftirqd! */
+}
+
+/*
+ * Control execution of the submission backend. While this does not immediately
+ * stop the HW, it does prevent us from propagating any more requests to it.
+ * Typically used aroung reset.
+ */
+void i915_sched_disable_tasklet(struct i915_sched *se);
+void i915_sched_enable_tasklet(struct i915_sched *se);
+
+static inline bool __i915_sched_tasklet_is_disabled(const struct i915_sched *se)
+{
+ return unlikely(!__tasklet_is_enabled(&se->tasklet));
+}
+
+static inline void i915_sched_kill_tasklet(struct i915_sched *se)
+{
+ tasklet_kill(&se->tasklet);
+}
+
+/* Schedule execution of the scheduler's bottom-half, the submission backend */
+static inline void i915_sched_kick(struct i915_sched *se)
+{
+ /* Kick the tasklet for some interrupt coalescing and reset handling */
+ tasklet_hi_schedule(&se->tasklet);
+}
+
+/* Immediately execute the scheduler's bottom-half, and wait for completion */
+void __i915_sched_flush(struct i915_sched *se, bool sync);
+static inline void i915_sched_flush(struct i915_sched *se)
+{
+ __i915_sched_flush(se, true);
+}
+
void i915_request_show_with_schedule(struct drm_printer *m,
const struct i915_request *rq,
const char *prefix,
diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index 2b34d9882835..3e2e47298bc6 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -7,6 +7,7 @@
#ifndef _I915_SCHEDULER_TYPES_H_
#define _I915_SCHEDULER_TYPES_H_
+#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/workqueue.h>
@@ -54,6 +55,14 @@ struct i915_sched {
*/
struct rb_root_cached queue;
+ /**
+ * @tasklet: softirq tasklet for bottom half
+ *
+ * The tasklet is responsible for transferring the priority queue
+ * to HW, and for handling responses from HW.
+ */
+ struct tasklet_struct tasklet;
+
/* Inter-engine scheduling delegate */
struct i915_sched_ipi {
struct i915_request *list;
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index d2a678a2497e..39c619bccb74 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -1517,7 +1517,7 @@ static int switch_to_kernel_sync(struct intel_context *ce, int err)
i915_request_put(rq);
while (!err && !intel_engine_is_idle(ce->engine))
- intel_engine_flush_submission(ce->engine);
+ intel_engine_flush_scheduler(ce->engine);
return err;
}
@@ -1902,7 +1902,7 @@ static int measure_inter_request(struct intel_context *ce)
return -ENOMEM;
}
- intel_engine_flush_submission(ce->engine);
+ intel_engine_flush_scheduler(ce->engine);
for (i = 1; i <= ARRAY_SIZE(elapsed); i++) {
struct i915_request *rq;
u32 *cs;
@@ -1934,7 +1934,7 @@ static int measure_inter_request(struct intel_context *ce)
i915_request_add(rq);
}
i915_sw_fence_commit(submit);
- intel_engine_flush_submission(ce->engine);
+ intel_engine_flush_scheduler(ce->engine);
heap_fence_put(submit);
semaphore_set(sema, 1);
@@ -2030,7 +2030,7 @@ static int measure_context_switch(struct intel_context *ce)
}
}
i915_request_put(fence);
- intel_engine_flush_submission(ce->engine);
+ intel_engine_flush_scheduler(ce->engine);
semaphore_set(sema, 1);
err = intel_gt_wait_for_idle(ce->engine->gt, HZ / 2);
@@ -2221,7 +2221,7 @@ static int measure_completion(struct intel_context *ce)
dma_fence_add_callback(&rq->fence, &cb.base, signal_cb);
i915_request_add(rq);
- intel_engine_flush_submission(ce->engine);
+ intel_engine_flush_scheduler(ce->engine);
if (wait_for(READ_ONCE(sema[i]) == -1, 50)) {
err = -EIO;
goto err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_scheduler.c b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
index 956a9af6f7e5..f54bdbeaa48b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/selftests/i915_scheduler.c
@@ -171,12 +171,12 @@ static int __single_chain(struct intel_engine_cs *engine, unsigned long length,
i915_request_add(rq);
count++;
}
- intel_engine_flush_submission(engine);
+ i915_sched_flush(se);
- execlists_active_lock_bh(&engine->execlists);
+ i915_sched_lock_bh(se);
if (fn(rq, count, count - 1) && !check_context_order(se))
err = -EINVAL;
- execlists_active_unlock_bh(&engine->execlists);
+ i915_sched_unlock_bh(se);
igt_spinner_end(&spin);
err_context:
@@ -256,12 +256,12 @@ static int __wide_chain(struct intel_engine_cs *engine, unsigned long width,
}
i915_request_add(rq[i]);
}
- intel_engine_flush_submission(engine);
+ i915_sched_flush(se);
- execlists_active_lock_bh(&engine->execlists);
+ i915_sched_lock_bh(se);
if (fn(rq[i - 1], i, count) && !check_context_order(se))
err = -EINVAL;
- execlists_active_unlock_bh(&engine->execlists);
+ i915_sched_unlock_bh(se);
igt_spinner_end(&spin);
err_free:
@@ -345,12 +345,12 @@ static int __inv_chain(struct intel_engine_cs *engine, unsigned long width,
}
i915_request_add(rq[i]);
}
- intel_engine_flush_submission(engine);
+ i915_sched_flush(se);
- execlists_active_lock_bh(&engine->execlists);
+ i915_sched_lock_bh(se);
if (fn(rq[i - 1], i, count) && !check_context_order(se))
err = -EINVAL;
- execlists_active_unlock_bh(&engine->execlists);
+ i915_sched_unlock_bh(se);
igt_spinner_end(&spin);
err_free:
@@ -451,12 +451,12 @@ static int __sparse_chain(struct intel_engine_cs *engine, unsigned long width,
if (err)
break;
}
- intel_engine_flush_submission(engine);
+ i915_sched_flush(se);
- execlists_active_lock_bh(&engine->execlists);
+ i915_sched_lock_bh(se);
if (fn(rq[i - 1], i, count) && !check_context_order(se))
err = -EINVAL;
- execlists_active_unlock_bh(&engine->execlists);
+ i915_sched_unlock_bh(se);
igt_spinner_end(&spin);
err_free:
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index 83f6e5f31fb3..0e6c1ea0082a 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -221,7 +221,7 @@ void igt_spinner_fini(struct igt_spinner *spin)
bool igt_wait_for_spinner(struct igt_spinner *spin, struct i915_request *rq)
{
if (i915_request_is_ready(rq))
- intel_engine_flush_submission(rq->engine);
+ __i915_sched_flush(i915_request_get_scheduler(rq), false);
return !(wait_for_us(i915_seqno_passed(hws_seqno(spin, rq),
rq->fence.seqno),
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [Intel-gfx] [CI 5/5] drm/i915/gt: Only kick the scheduler on timeslice/preemption change
2021-02-06 1:20 [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active Chris Wilson
` (2 preceding siblings ...)
2021-02-06 1:20 ` [Intel-gfx] [CI 4/5] drm/i915: Move tasklet from execlists to sched Chris Wilson
@ 2021-02-06 1:20 ` Chris Wilson
2021-02-06 2:40 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active Patchwork
` (3 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2021-02-06 1:20 UTC (permalink / raw)
To: intel-gfx
Kick the scheduler to allow it to see the timeslice duration change,
don't peek into execlists.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
drivers/gpu/drm/i915/gt/sysfs_engines.c | 8 ++------
1 file changed, 2 insertions(+), 6 deletions(-)
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index 57ef5383dd4e..70506f43d6be 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -222,9 +222,7 @@ timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
return -EINVAL;
WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
-
- if (execlists_active(&engine->execlists))
- set_timer_ms(&engine->execlists.timer, duration);
+ intel_engine_kick_scheduler(engine);
return count;
}
@@ -326,9 +324,7 @@ preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
return -EINVAL;
WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
-
- if (READ_ONCE(engine->execlists.pending[0]))
- set_timer_ms(&engine->execlists.preempt, timeout);
+ intel_engine_kick_scheduler(engine);
return count;
}
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active
2021-02-06 1:20 [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active Chris Wilson
` (3 preceding siblings ...)
2021-02-06 1:20 ` [Intel-gfx] [CI 5/5] drm/i915/gt: Only kick the scheduler on timeslice/preemption change Chris Wilson
@ 2021-02-06 2:40 ` Patchwork
2021-02-06 2:41 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
` (2 subsequent siblings)
7 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2021-02-06 2:40 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active
URL : https://patchwork.freedesktop.org/series/86797/
State : warning
== Summary ==
$ dim checkpatch origin/drm-tip
ced9b3f47b92 drm/i915: Wrap access to intel_engine.active
69d8dcdc66c1 drm/i915: Move common active lists from engine to i915_scheduler
-:295: ERROR:COMPLEX_MACRO: Macros with complex values should be enclosed in parentheses
#295: FILE: drivers/gpu/drm/i915/i915_request.h:55:
+#define RQ_ARG(rq) (rq) ? (rq)->fence.context : 0, (rq) ? (rq)->fence.seqno : 0
-:295: CHECK:MACRO_ARG_REUSE: Macro argument reuse 'rq' - possible side-effects?
#295: FILE: drivers/gpu/drm/i915/i915_request.h:55:
+#define RQ_ARG(rq) (rq) ? (rq)->fence.context : 0, (rq) ? (rq)->fence.seqno : 0
total: 1 errors, 0 warnings, 1 checks, 465 lines checked
c4249043eb53 drm/i915: Move scheduler queue
f993a982c9ad drm/i915: Move tasklet from execlists to sched
fe5dd19adf50 drm/i915/gt: Only kick the scheduler on timeslice/preemption change
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active
2021-02-06 1:20 [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active Chris Wilson
` (4 preceding siblings ...)
2021-02-06 2:40 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active Patchwork
@ 2021-02-06 2:41 ` Patchwork
2021-02-06 3:10 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-02-06 15:59 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
7 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2021-02-06 2:41 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active
URL : https://patchwork.freedesktop.org/series/86797/
State : warning
== Summary ==
$ dim sparse --fast origin/drm-tip
Sparse version: v0.6.2
Fast mode used, each commit won't be checked separately.
-
+drivers/gpu/drm/i915/gt/intel_reset.c:1323:5: warning: context imbalance in 'intel_gt_reset_trylock' - different lock contexts for basic block
+drivers/gpu/drm/i915/gvt/mmio.c:295:23: warning: memcpy with byte count of 279040
+drivers/gpu/drm/i915/i915_perf.c:1450:15: warning: memset with byte count of 16777216
+drivers/gpu/drm/i915/i915_perf.c:1504:15: warning: memset with byte count of 16777216
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'fwtable_read16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'fwtable_read32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'fwtable_read64' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'fwtable_read8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'fwtable_write16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'fwtable_write32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'fwtable_write8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen11_fwtable_read16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen11_fwtable_read32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen11_fwtable_read64' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen11_fwtable_read8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen11_fwtable_write16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen11_fwtable_write32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen11_fwtable_write8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen12_fwtable_read16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen12_fwtable_read32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen12_fwtable_read64' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen12_fwtable_read8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen12_fwtable_write16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen12_fwtable_write32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen12_fwtable_write8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen6_read16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen6_read32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen6_read64' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen6_read8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen6_write16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen6_write32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen6_write8' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen8_write16' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen8_write32' - different lock contexts for basic block
+./include/linux/spinlock.h:409:9: warning: context imbalance in 'gen8_write8' - different lock contexts for basic block
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active
2021-02-06 1:20 [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active Chris Wilson
` (5 preceding siblings ...)
2021-02-06 2:41 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
@ 2021-02-06 3:10 ` Patchwork
2021-02-06 15:59 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
7 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2021-02-06 3:10 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
[-- Attachment #1.1: Type: text/plain, Size: 3021 bytes --]
== Series Details ==
Series: series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active
URL : https://patchwork.freedesktop.org/series/86797/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_9741 -> Patchwork_19616
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/index.html
Known issues
------------
Here are the changes found in Patchwork_19616 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_flink_basic@bad-flink:
- fi-tgl-y: [PASS][1] -> [DMESG-WARN][2] ([i915#402])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/fi-tgl-y/igt@gem_flink_basic@bad-flink.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/fi-tgl-y/igt@gem_flink_basic@bad-flink.html
* igt@i915_selftest@live@execlists:
- fi-bsw-nick: [PASS][3] -> [INCOMPLETE][4] ([i915#2940])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/fi-bsw-nick/igt@i915_selftest@live@execlists.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/fi-bsw-nick/igt@i915_selftest@live@execlists.html
#### Possible fixes ####
* igt@gem_mmap_gtt@basic:
- fi-tgl-y: [DMESG-WARN][5] ([i915#402]) -> [PASS][6]
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/fi-tgl-y/igt@gem_mmap_gtt@basic.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/fi-tgl-y/igt@gem_mmap_gtt@basic.html
* igt@i915_module_load@reload:
- fi-tgl-u2: [FAIL][7] -> [PASS][8]
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/fi-tgl-u2/igt@i915_module_load@reload.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/fi-tgl-u2/igt@i915_module_load@reload.html
[i915#2940]: https://gitlab.freedesktop.org/drm/intel/issues/2940
[i915#402]: https://gitlab.freedesktop.org/drm/intel/issues/402
Participating hosts (44 -> 38)
------------------------------
Missing (6): fi-jsl-1 fi-ilk-m540 fi-hsw-4200u fi-bsw-cyan fi-ctg-p8600 fi-bdw-samus
Build changes
-------------
* Linux: CI_DRM_9741 -> Patchwork_19616
CI-20190529: 20190529
CI_DRM_9741: 439ad4a70b3d6663de543ee56478d64b16cca1cf @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_5994: 53268fc5e5dde45a16e7185023a42296e3599e6e @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
Patchwork_19616: fe5dd19adf5026f38936cca144d34133e1a84daa @ git://anongit.freedesktop.org/gfx-ci/linux
== Linux commits ==
fe5dd19adf50 drm/i915/gt: Only kick the scheduler on timeslice/preemption change
f993a982c9ad drm/i915: Move tasklet from execlists to sched
c4249043eb53 drm/i915: Move scheduler queue
69d8dcdc66c1 drm/i915: Move common active lists from engine to i915_scheduler
ced9b3f47b92 drm/i915: Wrap access to intel_engine.active
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/index.html
[-- Attachment #1.2: Type: text/html, Size: 3810 bytes --]
[-- Attachment #2: Type: text/plain, Size: 160 bytes --]
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
* [Intel-gfx] ✓ Fi.CI.IGT: success for series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active
2021-02-06 1:20 [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active Chris Wilson
` (6 preceding siblings ...)
2021-02-06 3:10 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
@ 2021-02-06 15:59 ` Patchwork
7 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2021-02-06 15:59 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
[-- Attachment #1.1: Type: text/plain, Size: 30296 bytes --]
== Series Details ==
Series: series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active
URL : https://patchwork.freedesktop.org/series/86797/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_9741_full -> Patchwork_19616_full
====================================================
Summary
-------
**SUCCESS**
No regressions found.
Known issues
------------
Here are the changes found in Patchwork_19616_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_ctx_persistence@engines-hostile-preempt:
- shard-hsw: NOTRUN -> [SKIP][1] ([fdo#109271] / [i915#1099])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-hsw2/igt@gem_ctx_persistence@engines-hostile-preempt.html
* igt@gem_exec_fair@basic-none-vip@rcs0:
- shard-kbl: [PASS][2] -> [FAIL][3] ([i915#2842])
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl6/igt@gem_exec_fair@basic-none-vip@rcs0.html
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl6/igt@gem_exec_fair@basic-none-vip@rcs0.html
* igt@gem_exec_fair@basic-pace-share@rcs0:
- shard-tglb: [PASS][4] -> [FAIL][5] ([i915#2842]) +1 similar issue
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-tglb2/igt@gem_exec_fair@basic-pace-share@rcs0.html
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-tglb5/igt@gem_exec_fair@basic-pace-share@rcs0.html
* igt@gem_exec_reloc@basic-many-active@vcs1:
- shard-iclb: NOTRUN -> [FAIL][6] ([i915#2389])
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb4/igt@gem_exec_reloc@basic-many-active@vcs1.html
* igt@gem_exec_whisper@basic-forked:
- shard-glk: [PASS][7] -> [DMESG-WARN][8] ([i915#118] / [i915#95])
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-glk3/igt@gem_exec_whisper@basic-forked.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-glk3/igt@gem_exec_whisper@basic-forked.html
* igt@gem_render_copy@x-tiled-to-vebox-yf-tiled:
- shard-kbl: NOTRUN -> [SKIP][9] ([fdo#109271]) +135 similar issues
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl2/igt@gem_render_copy@x-tiled-to-vebox-yf-tiled.html
* igt@gem_userptr_blits@process-exit-mmap-busy@uc:
- shard-skl: NOTRUN -> [SKIP][10] ([fdo#109271] / [i915#1699]) +3 similar issues
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl10/igt@gem_userptr_blits@process-exit-mmap-busy@uc.html
* igt@gem_userptr_blits@process-exit-mmap@gtt:
- shard-kbl: NOTRUN -> [SKIP][11] ([fdo#109271] / [i915#1699]) +3 similar issues
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl2/igt@gem_userptr_blits@process-exit-mmap@gtt.html
* igt@gen7_exec_parse@basic-rejected:
- shard-iclb: NOTRUN -> [SKIP][12] ([fdo#109289])
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb2/igt@gen7_exec_parse@basic-rejected.html
* igt@i915_pm_dc@dc3co-vpb-simulation:
- shard-hsw: NOTRUN -> [SKIP][13] ([fdo#109271]) +19 similar issues
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-hsw2/igt@i915_pm_dc@dc3co-vpb-simulation.html
- shard-kbl: NOTRUN -> [SKIP][14] ([fdo#109271] / [i915#658])
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl2/igt@i915_pm_dc@dc3co-vpb-simulation.html
* igt@kms_busy@basic-modeset-pipe-d:
- shard-hsw: NOTRUN -> [SKIP][15] ([fdo#109271] / [i915#533]) +2 similar issues
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-hsw2/igt@kms_busy@basic-modeset-pipe-d.html
* igt@kms_ccs@pipe-c-bad-aux-stride:
- shard-skl: NOTRUN -> [SKIP][16] ([fdo#109271] / [fdo#111304])
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl4/igt@kms_ccs@pipe-c-bad-aux-stride.html
* igt@kms_chamelium@dp-edid-read:
- shard-apl: NOTRUN -> [SKIP][17] ([fdo#109271] / [fdo#111827])
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl6/igt@kms_chamelium@dp-edid-read.html
* igt@kms_chamelium@hdmi-hpd-for-each-pipe:
- shard-hsw: NOTRUN -> [SKIP][18] ([fdo#109271] / [fdo#111827])
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-hsw2/igt@kms_chamelium@hdmi-hpd-for-each-pipe.html
* igt@kms_chamelium@vga-hpd-after-suspend:
- shard-skl: NOTRUN -> [SKIP][19] ([fdo#109271] / [fdo#111827]) +6 similar issues
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl10/igt@kms_chamelium@vga-hpd-after-suspend.html
* igt@kms_color@pipe-d-ctm-0-5:
- shard-skl: NOTRUN -> [SKIP][20] ([fdo#109271]) +81 similar issues
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl10/igt@kms_color@pipe-d-ctm-0-5.html
* igt@kms_color_chamelium@pipe-a-ctm-0-75:
- shard-kbl: NOTRUN -> [SKIP][21] ([fdo#109271] / [fdo#111827]) +12 similar issues
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl1/igt@kms_color_chamelium@pipe-a-ctm-0-75.html
* igt@kms_content_protection@atomic-dpms:
- shard-apl: NOTRUN -> [TIMEOUT][22] ([i915#1319])
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl6/igt@kms_content_protection@atomic-dpms.html
* igt@kms_content_protection@legacy:
- shard-kbl: NOTRUN -> [TIMEOUT][23] ([i915#1319])
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl1/igt@kms_content_protection@legacy.html
* igt@kms_cursor_crc@pipe-b-cursor-256x85-random:
- shard-skl: [PASS][24] -> [FAIL][25] ([i915#54]) +2 similar issues
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-skl6/igt@kms_cursor_crc@pipe-b-cursor-256x85-random.html
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl7/igt@kms_cursor_crc@pipe-b-cursor-256x85-random.html
* igt@kms_cursor_crc@pipe-c-cursor-256x256-onscreen:
- shard-skl: NOTRUN -> [FAIL][26] ([i915#54])
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl4/igt@kms_cursor_crc@pipe-c-cursor-256x256-onscreen.html
* igt@kms_cursor_legacy@flip-vs-cursor-atomic:
- shard-tglb: [PASS][27] -> [FAIL][28] ([i915#2346]) +1 similar issue
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-tglb7/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-tglb8/igt@kms_cursor_legacy@flip-vs-cursor-atomic.html
* igt@kms_fbcon_fbt@fbc-suspend:
- shard-apl: [PASS][29] -> [INCOMPLETE][30] ([i915#180] / [i915#1982])
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl1/igt@kms_fbcon_fbt@fbc-suspend.html
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl3/igt@kms_fbcon_fbt@fbc-suspend.html
* igt@kms_flip@flip-vs-expired-vblank-interruptible@a-edp1:
- shard-tglb: [PASS][31] -> [FAIL][32] ([i915#2598])
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-tglb3/igt@kms_flip@flip-vs-expired-vblank-interruptible@a-edp1.html
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-tglb8/igt@kms_flip@flip-vs-expired-vblank-interruptible@a-edp1.html
* igt@kms_flip@flip-vs-expired-vblank@b-edp1:
- shard-skl: [PASS][33] -> [FAIL][34] ([i915#79])
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-skl9/igt@kms_flip@flip-vs-expired-vblank@b-edp1.html
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl2/igt@kms_flip@flip-vs-expired-vblank@b-edp1.html
* igt@kms_flip@plain-flip-fb-recreate@a-edp1:
- shard-skl: NOTRUN -> [FAIL][35] ([i915#2122])
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl10/igt@kms_flip@plain-flip-fb-recreate@a-edp1.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs:
- shard-kbl: NOTRUN -> [FAIL][36] ([i915#2641])
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl1/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-32bpp-ytileccs.html
* igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile:
- shard-skl: NOTRUN -> [SKIP][37] ([fdo#109271] / [i915#2642])
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl10/igt@kms_flip_scaled_crc@flip-32bpp-ytile-to-64bpp-ytile.html
* igt@kms_hdr@bpc-switch-suspend:
- shard-kbl: [PASS][38] -> [DMESG-WARN][39] ([i915#180]) +3 similar issues
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl2/igt@kms_hdr@bpc-switch-suspend.html
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl4/igt@kms_hdr@bpc-switch-suspend.html
* igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b:
- shard-apl: [PASS][40] -> [DMESG-WARN][41] ([i915#180]) +1 similar issue
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl4/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b.html
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl1/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b.html
* igt@kms_plane_alpha_blend@pipe-c-alpha-transparent-fb:
- shard-skl: NOTRUN -> [FAIL][42] ([i915#265])
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl10/igt@kms_plane_alpha_blend@pipe-c-alpha-transparent-fb.html
* igt@kms_plane_alpha_blend@pipe-c-constant-alpha-max:
- shard-kbl: NOTRUN -> [FAIL][43] ([fdo#108145] / [i915#265])
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl2/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-max.html
* igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-4:
- shard-iclb: NOTRUN -> [SKIP][44] ([i915#2920])
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb2/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-4.html
* igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-5:
- shard-skl: NOTRUN -> [SKIP][45] ([fdo#109271] / [i915#658])
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl4/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-5.html
* igt@kms_psr@psr2_suspend:
- shard-iclb: [PASS][46] -> [SKIP][47] ([fdo#109441])
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb2/igt@kms_psr@psr2_suspend.html
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb3/igt@kms_psr@psr2_suspend.html
* igt@kms_sysfs_edid_timing:
- shard-kbl: NOTRUN -> [FAIL][48] ([IGT#2])
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl2/igt@kms_sysfs_edid_timing.html
* igt@kms_vblank@pipe-d-query-forked-hang:
- shard-iclb: NOTRUN -> [SKIP][49] ([fdo#109278])
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb2/igt@kms_vblank@pipe-d-query-forked-hang.html
* igt@kms_vblank@pipe-d-wait-idle:
- shard-kbl: NOTRUN -> [SKIP][50] ([fdo#109271] / [i915#533]) +1 similar issue
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl1/igt@kms_vblank@pipe-d-wait-idle.html
* igt@kms_writeback@writeback-fb-id:
- shard-skl: NOTRUN -> [SKIP][51] ([fdo#109271] / [i915#2437])
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl4/igt@kms_writeback@writeback-fb-id.html
* igt@prime_nv_api@i915_nv_double_import:
- shard-apl: NOTRUN -> [SKIP][52] ([fdo#109271]) +5 similar issues
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl6/igt@prime_nv_api@i915_nv_double_import.html
* igt@sysfs_heartbeat_interval@mixed@rcs0:
- shard-skl: [PASS][53] -> [FAIL][54] ([i915#1731])
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-skl2/igt@sysfs_heartbeat_interval@mixed@rcs0.html
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl4/igt@sysfs_heartbeat_interval@mixed@rcs0.html
* igt@sysfs_timeslice_duration@timeout@rcs0:
- shard-skl: [PASS][55] -> [FAIL][56] ([i915#2825])
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-skl9/igt@sysfs_timeslice_duration@timeout@rcs0.html
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl2/igt@sysfs_timeslice_duration@timeout@rcs0.html
#### Possible fixes ####
* igt@gem_ctx_isolation@preservation-s3@vcs0:
- shard-skl: [INCOMPLETE][57] ([i915#198]) -> [PASS][58]
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-skl2/igt@gem_ctx_isolation@preservation-s3@vcs0.html
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl4/igt@gem_ctx_isolation@preservation-s3@vcs0.html
* igt@gem_eio@in-flight-suspend:
- shard-kbl: [DMESG-WARN][59] ([i915#1037] / [i915#180]) -> [PASS][60]
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl4/igt@gem_eio@in-flight-suspend.html
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl2/igt@gem_eio@in-flight-suspend.html
* igt@gem_exec_fair@basic-deadline:
- shard-kbl: [FAIL][61] ([i915#2846]) -> [PASS][62]
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl3/igt@gem_exec_fair@basic-deadline.html
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl2/igt@gem_exec_fair@basic-deadline.html
* igt@gem_exec_fair@basic-none-share@rcs0:
- shard-iclb: [FAIL][63] ([i915#2842]) -> [PASS][64]
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb4/igt@gem_exec_fair@basic-none-share@rcs0.html
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb3/igt@gem_exec_fair@basic-none-share@rcs0.html
* igt@gem_exec_fair@basic-pace@vcs0:
- shard-kbl: [SKIP][65] ([fdo#109271]) -> [PASS][66]
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl1/igt@gem_exec_fair@basic-pace@vcs0.html
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl4/igt@gem_exec_fair@basic-pace@vcs0.html
- shard-glk: [FAIL][67] ([i915#2842]) -> [PASS][68]
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-glk2/igt@gem_exec_fair@basic-pace@vcs0.html
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-glk9/igt@gem_exec_fair@basic-pace@vcs0.html
* igt@gem_exec_fair@basic-pace@vcs1:
- shard-tglb: [FAIL][69] ([i915#2842]) -> [PASS][70]
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-tglb6/igt@gem_exec_fair@basic-pace@vcs1.html
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-tglb1/igt@gem_exec_fair@basic-pace@vcs1.html
* igt@gem_exec_fair@basic-pace@vecs0:
- shard-kbl: [FAIL][71] ([i915#2842]) -> [PASS][72]
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl1/igt@gem_exec_fair@basic-pace@vecs0.html
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl4/igt@gem_exec_fair@basic-pace@vecs0.html
* igt@gem_exec_schedule@u-fairslice@vcs1:
- shard-iclb: [DMESG-WARN][73] ([i915#2803]) -> [PASS][74]
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb2/igt@gem_exec_schedule@u-fairslice@vcs1.html
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb2/igt@gem_exec_schedule@u-fairslice@vcs1.html
* igt@gem_softpin@noreloc-s3:
- shard-kbl: [INCOMPLETE][75] ([i915#180] / [i915#2405]) -> [PASS][76]
[75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl4/igt@gem_softpin@noreloc-s3.html
[76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl2/igt@gem_softpin@noreloc-s3.html
* igt@i915_pm_dc@dc6-psr:
- shard-iclb: [FAIL][77] ([i915#454]) -> [PASS][78]
[77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb8/igt@i915_pm_dc@dc6-psr.html
[78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb7/igt@i915_pm_dc@dc6-psr.html
* igt@i915_pm_rc6_residency@rc6-fence:
- shard-hsw: [WARN][79] ([i915#1519]) -> [PASS][80]
[79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-hsw2/igt@i915_pm_rc6_residency@rc6-fence.html
[80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-hsw7/igt@i915_pm_rc6_residency@rc6-fence.html
* igt@i915_suspend@debugfs-reader:
- shard-apl: [DMESG-WARN][81] ([i915#180]) -> [PASS][82]
[81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl1/igt@i915_suspend@debugfs-reader.html
[82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl6/igt@i915_suspend@debugfs-reader.html
* igt@i915_suspend@fence-restore-untiled:
- shard-kbl: [DMESG-WARN][83] ([i915#180]) -> [PASS][84] +1 similar issue
[83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl4/igt@i915_suspend@fence-restore-untiled.html
[84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl3/igt@i915_suspend@fence-restore-untiled.html
* igt@kms_cursor_crc@pipe-a-cursor-256x85-offscreen:
- shard-skl: [FAIL][85] ([i915#54]) -> [PASS][86] +2 similar issues
[85]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-skl7/igt@kms_cursor_crc@pipe-a-cursor-256x85-offscreen.html
[86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl2/igt@kms_cursor_crc@pipe-a-cursor-256x85-offscreen.html
* igt@kms_cursor_legacy@2x-long-cursor-vs-flip-legacy:
- shard-hsw: [FAIL][87] ([i915#96]) -> [PASS][88]
[87]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-hsw1/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-legacy.html
[88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-hsw7/igt@kms_cursor_legacy@2x-long-cursor-vs-flip-legacy.html
* igt@kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size:
- shard-hsw: [FAIL][89] ([i915#2370]) -> [PASS][90]
[89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-hsw1/igt@kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size.html
[90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-hsw6/igt@kms_cursor_legacy@cursor-vs-flip-atomic-transitions-varying-size.html
* igt@kms_cursor_legacy@flip-vs-cursor-busy-crc-legacy:
- shard-skl: [FAIL][91] ([i915#2346]) -> [PASS][92]
[91]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-skl4/igt@kms_cursor_legacy@flip-vs-cursor-busy-crc-legacy.html
[92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl10/igt@kms_cursor_legacy@flip-vs-cursor-busy-crc-legacy.html
* igt@kms_flip@plain-flip-fb-recreate-interruptible@c-edp1:
- shard-skl: [FAIL][93] ([i915#2122]) -> [PASS][94] +2 similar issues
[93]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-skl6/igt@kms_flip@plain-flip-fb-recreate-interruptible@c-edp1.html
[94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl8/igt@kms_flip@plain-flip-fb-recreate-interruptible@c-edp1.html
* igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes:
- shard-kbl: [DMESG-WARN][95] ([i915#180] / [i915#533]) -> [PASS][96]
[95]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl4/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
[96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl1/igt@kms_plane@plane-panning-bottom-right-suspend-pipe-a-planes.html
* igt@kms_plane_alpha_blend@pipe-a-coverage-7efc:
- shard-skl: [FAIL][97] ([fdo#108145] / [i915#265]) -> [PASS][98]
[97]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-skl1/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
[98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-skl6/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
* igt@kms_psr2_su@frontbuffer:
- shard-iclb: [SKIP][99] ([fdo#109642] / [fdo#111068] / [i915#658]) -> [PASS][100]
[99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb7/igt@kms_psr2_su@frontbuffer.html
[100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb2/igt@kms_psr2_su@frontbuffer.html
* igt@kms_psr@psr2_primary_page_flip:
- shard-iclb: [SKIP][101] ([fdo#109441]) -> [PASS][102] +1 similar issue
[101]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb7/igt@kms_psr@psr2_primary_page_flip.html
[102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb2/igt@kms_psr@psr2_primary_page_flip.html
* igt@sysfs_clients@recycle:
- shard-iclb: [FAIL][103] ([i915#3028]) -> [PASS][104]
[103]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb4/igt@sysfs_clients@recycle.html
[104]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb6/igt@sysfs_clients@recycle.html
* {igt@sysfs_clients@recycle-many}:
- shard-glk: [FAIL][105] ([i915#3028]) -> [PASS][106] +1 similar issue
[105]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-glk2/igt@sysfs_clients@recycle-many.html
[106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-glk7/igt@sysfs_clients@recycle-many.html
* igt@sysfs_clients@sema-10@vcs0:
- shard-apl: [SKIP][107] ([fdo#109271] / [i915#3026]) -> [PASS][108] +2 similar issues
[107]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl8/igt@sysfs_clients@sema-10@vcs0.html
[108]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl1/igt@sysfs_clients@sema-10@vcs0.html
#### Warnings ####
* igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-3:
- shard-iclb: [SKIP][109] ([i915#658]) -> [SKIP][110] ([i915#2920]) +1 similar issue
[109]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb7/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-3.html
[110]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb2/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-3.html
* igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-5:
- shard-iclb: [SKIP][111] ([i915#2920]) -> [SKIP][112] ([i915#658])
[111]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb2/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-5.html
[112]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb1/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-5.html
* igt@runner@aborted:
- shard-kbl: ([FAIL][113], [FAIL][114], [FAIL][115], [FAIL][116], [FAIL][117], [FAIL][118], [FAIL][119], [FAIL][120], [FAIL][121], [FAIL][122], [FAIL][123], [FAIL][124]) ([i915#1436] / [i915#1814] / [i915#2283] / [i915#2295] / [i915#2505] / [i915#3002]) -> ([FAIL][125], [FAIL][126], [FAIL][127], [FAIL][128], [FAIL][129], [FAIL][130], [FAIL][131], [FAIL][132], [FAIL][133]) ([i915#1814] / [i915#2283] / [i915#2295] / [i915#2505] / [i915#3002])
[113]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl7/igt@runner@aborted.html
[114]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl4/igt@runner@aborted.html
[115]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl6/igt@runner@aborted.html
[116]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl7/igt@runner@aborted.html
[117]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl4/igt@runner@aborted.html
[118]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl4/igt@runner@aborted.html
[119]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl4/igt@runner@aborted.html
[120]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl7/igt@runner@aborted.html
[121]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl4/igt@runner@aborted.html
[122]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl2/igt@runner@aborted.html
[123]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl2/igt@runner@aborted.html
[124]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-kbl1/igt@runner@aborted.html
[125]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl7/igt@runner@aborted.html
[126]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl6/igt@runner@aborted.html
[127]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl4/igt@runner@aborted.html
[128]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl4/igt@runner@aborted.html
[129]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl1/igt@runner@aborted.html
[130]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl4/igt@runner@aborted.html
[131]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl4/igt@runner@aborted.html
[132]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl6/igt@runner@aborted.html
[133]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-kbl7/igt@runner@aborted.html
- shard-iclb: ([FAIL][134], [FAIL][135], [FAIL][136], [FAIL][137], [FAIL][138]) ([i915#2283] / [i915#2295] / [i915#2426] / [i915#2724] / [i915#3002]) -> ([FAIL][139], [FAIL][140], [FAIL][141], [FAIL][142]) ([i915#2283] / [i915#2295] / [i915#2724] / [i915#3002])
[134]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb7/igt@runner@aborted.html
[135]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb1/igt@runner@aborted.html
[136]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb2/igt@runner@aborted.html
[137]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb4/igt@runner@aborted.html
[138]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-iclb8/igt@runner@aborted.html
[139]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb5/igt@runner@aborted.html
[140]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb2/igt@runner@aborted.html
[141]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb2/igt@runner@aborted.html
[142]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-iclb3/igt@runner@aborted.html
- shard-apl: ([FAIL][143], [FAIL][144], [FAIL][145], [FAIL][146], [FAIL][147], [FAIL][148], [FAIL][149]) ([i915#2283] / [i915#2295] / [i915#3002]) -> ([FAIL][150], [FAIL][151], [FAIL][152], [FAIL][153], [FAIL][154], [FAIL][155], [FAIL][156], [FAIL][157]) ([fdo#109271] / [i915#1814] / [i915#2283] / [i915#2295] / [i915#3002])
[143]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl1/igt@runner@aborted.html
[144]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl6/igt@runner@aborted.html
[145]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl6/igt@runner@aborted.html
[146]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl2/igt@runner@aborted.html
[147]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl6/igt@runner@aborted.html
[148]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl6/igt@runner@aborted.html
[149]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_9741/shard-apl1/igt@runner@aborted.html
[150]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl4/igt@runner@aborted.html
[151]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl6/igt@runner@aborted.html
[152]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl8/igt@runner@aborted.html
[153]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl3/igt@runner@aborted.html
[154]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl1/igt@runner@aborted.html
[155]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl6/igt@runner@aborted.html
[156]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl3/igt@runner@aborted.html
[157]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/shard-apl6/igt@runner@aborted.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[IGT#2]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/2
[fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
[fdo#109289]: https://bugs.freedesktop.org/show_bug.cgi?id=109289
[fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
[fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
[fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
[fdo#111304]: https://bugs.freedesktop.org/show_bug.cgi?id=111304
[fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
[i915#1037]: https://gitlab.freedesktop.org/drm/intel/issues/1037
[i915#1099]: https://gitlab.freedesktop.org/drm/intel/issues/1099
[i915#118]: https://gitlab.freedesktop.org/drm/intel/issues/118
[i915#1319]: https://gitlab.freedesktop.org/drm/intel/issues/1319
[i915#1436]: https://gitlab.freedesktop.org/drm/intel/issues/1436
[i915#1519]: https://gitlab.freedesktop.org/drm/intel/issues/1519
[i915#1699]: https://gitlab.freedesktop.org/drm/intel/issues/1699
[i915#1731]: https://gitlab.freedesktop.org/drm/intel/issues/1731
[i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
[i915#1814]: https://gitlab.freedesktop.org/drm/intel/issues/1814
[i915#198]: https://gitlab.freedesktop.org/drm/intel/issues/198
[i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
[i915#2122]: https://gitlab.freedesktop.org/drm/intel/issues/2122
[i915#2283]: https://gitlab.freedesktop.org/drm/intel/issues/2283
[i915#2295]: https://gitlab.freedesktop.org/drm/intel/issues/2295
[i915#2346]:
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_19616/index.html
[-- Attachment #1.2: Type: text/html, Size: 36227 bytes --]
[-- Attachment #2: Type: text/plain, Size: 160 bytes --]
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 9+ messages in thread
end of thread, other threads:[~2021-02-06 15:59 UTC | newest]
Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-06 1:20 [Intel-gfx] [CI 1/5] drm/i915: Wrap access to intel_engine.active Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 2/5] drm/i915: Move common active lists from engine to i915_scheduler Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 3/5] drm/i915: Move scheduler queue Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 4/5] drm/i915: Move tasklet from execlists to sched Chris Wilson
2021-02-06 1:20 ` [Intel-gfx] [CI 5/5] drm/i915/gt: Only kick the scheduler on timeslice/preemption change Chris Wilson
2021-02-06 2:40 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [CI,1/5] drm/i915: Wrap access to intel_engine.active Patchwork
2021-02-06 2:41 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-02-06 3:10 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-02-06 15:59 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).