dri-devel.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] Fix up request cancel
@ 2022-01-24 15:01 Matthew Brost
  2022-01-24 15:01 ` [PATCH 1/4] drm/i915: Add request cancel low level trace point Matthew Brost
                   ` (3 more replies)
  0 siblings, 4 replies; 15+ messages in thread
From: Matthew Brost @ 2022-01-24 15:01 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: daniele.ceraolospurio, john.c.harrison, tvrtko.ursulin

Fix request cancellation + add request cancel low level trace point.

v2:
  - Update cancel reset selftest preemption timeout value to zero
  - Fix bug in execlists cancel code

Signed-off-by: Matthew Brost <matthew.brost@intel.com>

Matthew Brost (4):
  drm/i915: Add request cancel low level trace point
  drm/i915/guc: Cancel requests immediately
  drm/i915/execlists: Fix execlists request cancellation corner case
  drm/i915/selftests: Set preemption timeout to zero in cancel reset
    test

 drivers/gpu/drm/i915/gt/intel_context.h       |  1 +
 drivers/gpu/drm/i915/gt/intel_context_types.h |  5 ++
 .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 23 ++++++++--
 .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  1 +
 .../drm/i915/gt/intel_execlists_submission.c  | 18 +++++---
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 46 +++++++++++--------
 drivers/gpu/drm/i915/i915_request.h           |  6 +++
 drivers/gpu/drm/i915/i915_trace.h             | 10 ++++
 drivers/gpu/drm/i915/selftests/i915_request.c |  7 +--
 9 files changed, 84 insertions(+), 33 deletions(-)

-- 
2.34.1


^ permalink raw reply	[flat|nested] 15+ messages in thread

* [PATCH 1/4] drm/i915: Add request cancel low level trace point
  2022-01-24 15:01 [PATCH 0/4] Fix up request cancel Matthew Brost
@ 2022-01-24 15:01 ` Matthew Brost
  2022-01-25 12:27   ` [Intel-gfx] " Tvrtko Ursulin
  2022-01-24 15:01 ` [PATCH 2/4] drm/i915/guc: Cancel requests immediately Matthew Brost
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 15+ messages in thread
From: Matthew Brost @ 2022-01-24 15:01 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: daniele.ceraolospurio, john.c.harrison, tvrtko.ursulin

Add request cancel trace point guarded by
CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINT.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context.h |  1 +
 drivers/gpu/drm/i915/i915_trace.h       | 10 ++++++++++
 2 files changed, 11 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index d8c74bbf9aae2..3aed4d77f116c 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -124,6 +124,7 @@ intel_context_is_pinned(struct intel_context *ce)
 static inline void intel_context_cancel_request(struct intel_context *ce,
 						struct i915_request *rq)
 {
+	trace_i915_request_cancel(rq);
 	GEM_BUG_ON(!ce->ops->cancel_request);
 	return ce->ops->cancel_request(ce, rq);
 }
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 37b5c9e9d260e..d0a11a8bb0ca3 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -324,6 +324,11 @@ DEFINE_EVENT(i915_request, i915_request_add,
 );
 
 #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
+DEFINE_EVENT(i915_request, i915_request_cancel,
+	     TP_PROTO(struct i915_request *rq),
+	     TP_ARGS(rq)
+);
+
 DEFINE_EVENT(i915_request, i915_request_guc_submit,
 	     TP_PROTO(struct i915_request *rq),
 	     TP_ARGS(rq)
@@ -497,6 +502,11 @@ DEFINE_EVENT(intel_context, intel_context_do_unpin,
 
 #else
 #if !defined(TRACE_HEADER_MULTI_READ)
+static inline void
+trace_i915_request_cancel(struct i915_request *rq)
+{
+}
+
 static inline void
 trace_i915_request_guc_submit(struct i915_request *rq)
 {
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 2/4] drm/i915/guc: Cancel requests immediately
  2022-01-24 15:01 [PATCH 0/4] Fix up request cancel Matthew Brost
  2022-01-24 15:01 ` [PATCH 1/4] drm/i915: Add request cancel low level trace point Matthew Brost
@ 2022-01-24 15:01 ` Matthew Brost
  2022-01-26 18:58   ` John Harrison
  2022-01-24 15:01 ` [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case Matthew Brost
  2022-01-24 15:01 ` [PATCH 4/4] drm/i915/selftests: Set preemption timeout to zero in cancel reset test Matthew Brost
  3 siblings, 1 reply; 15+ messages in thread
From: Matthew Brost @ 2022-01-24 15:01 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: daniele.ceraolospurio, john.c.harrison, tvrtko.ursulin

Change the preemption timeout to the smallest possible value (1 us) when
disabling scheduling to cancel a request and restore it after
cancellation. This not only cancels the request as fast as possible, it
fixes a bug where the preemption timeout is 0 which results in the
schedule disable hanging forever.

Reported-by: Jani Saarinen <jani.saarinen@intel.com>
Fixes: 62eaf0ae217d4 ("drm/i915/guc: Support request cancellation")
Link: https://gitlab.freedesktop.org/drm/intel/-/issues/4960
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |  5 ++
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 46 +++++++++++--------
 2 files changed, 31 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 30cd81ad8911a..730998823dbea 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -198,6 +198,11 @@ struct intel_context {
 		 * each priority bucket
 		 */
 		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
+		/**
+		 * @preemption_timeout: preemption timeout of the context, used
+		 * to restore this value after request cancellation
+		 */
+		u32 preemption_timeout;
 	} guc_state;
 
 	struct {
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 3918f1be114fa..966947c450253 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -2147,7 +2147,8 @@ static inline u32 get_children_join_value(struct intel_context *ce,
 	return __get_parent_scratch(ce)->join[child_index].semaphore;
 }
 
-static void guc_context_policy_init(struct intel_engine_cs *engine,
+static void guc_context_policy_init(struct intel_context *ce,
+				    struct intel_engine_cs *engine,
 				    struct guc_lrc_desc *desc)
 {
 	desc->policy_flags = 0;
@@ -2157,7 +2158,8 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
 
 	/* NB: For both of these, zero means disabled. */
 	desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
-	desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+	ce->guc_state.preemption_timeout = engine->props.preempt_timeout_ms * 1000;
+	desc->preemption_timeout = ce->guc_state.preemption_timeout;
 }
 
 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
@@ -2193,7 +2195,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 	desc->hw_context_desc = ce->lrc.lrca;
 	desc->priority = ce->guc_state.prio;
 	desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
-	guc_context_policy_init(engine, desc);
+	guc_context_policy_init(ce, engine, desc);
 
 	/*
 	 * If context is a parent, we need to register a process descriptor
@@ -2226,7 +2228,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 			desc->hw_context_desc = child->lrc.lrca;
 			desc->priority = ce->guc_state.prio;
 			desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
-			guc_context_policy_init(engine, desc);
+			guc_context_policy_init(child, engine, desc);
 		}
 
 		clear_children_join_go_memory(ce);
@@ -2409,6 +2411,19 @@ static u16 prep_context_pending_disable(struct intel_context *ce)
 	return ce->guc_id.id;
 }
 
+static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
+						 u16 guc_id,
+						 u32 preemption_timeout)
+{
+	u32 action[] = {
+		INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
+		guc_id,
+		preemption_timeout
+	};
+
+	intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
+}
+
 static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
 {
 	struct intel_guc *guc = ce_to_guc(ce);
@@ -2442,8 +2457,10 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
 
 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
-	with_intel_runtime_pm(runtime_pm, wakeref)
+	with_intel_runtime_pm(runtime_pm, wakeref) {
+		__guc_context_set_preemption_timeout(guc, guc_id, 1);
 		__guc_context_sched_disable(guc, ce, guc_id);
+	}
 
 	return &ce->guc_state.blocked;
 }
@@ -2492,8 +2509,10 @@ static void guc_context_unblock(struct intel_context *ce)
 
 	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
-	if (enable) {
-		with_intel_runtime_pm(runtime_pm, wakeref)
+	with_intel_runtime_pm(runtime_pm, wakeref) {
+		__guc_context_set_preemption_timeout(guc, ce->guc_id.id,
+						     ce->guc_state.preemption_timeout);
+		if (enable)
 			__guc_context_sched_enable(guc, ce);
 	}
 }
@@ -2521,19 +2540,6 @@ static void guc_context_cancel_request(struct intel_context *ce,
 	}
 }
 
-static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
-						 u16 guc_id,
-						 u32 preemption_timeout)
-{
-	u32 action[] = {
-		INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
-		guc_id,
-		preemption_timeout
-	};
-
-	intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
-}
-
 static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
 {
 	struct intel_guc *guc = ce_to_guc(ce);
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case
  2022-01-24 15:01 [PATCH 0/4] Fix up request cancel Matthew Brost
  2022-01-24 15:01 ` [PATCH 1/4] drm/i915: Add request cancel low level trace point Matthew Brost
  2022-01-24 15:01 ` [PATCH 2/4] drm/i915/guc: Cancel requests immediately Matthew Brost
@ 2022-01-24 15:01 ` Matthew Brost
  2022-01-25 15:27   ` [Intel-gfx] " Tvrtko Ursulin
  2022-01-26 19:03   ` John Harrison
  2022-01-24 15:01 ` [PATCH 4/4] drm/i915/selftests: Set preemption timeout to zero in cancel reset test Matthew Brost
  3 siblings, 2 replies; 15+ messages in thread
From: Matthew Brost @ 2022-01-24 15:01 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: daniele.ceraolospurio, john.c.harrison, tvrtko.ursulin

More than 1 request can be submitted to a single ELSP at a time if
multiple requests are ready run to on the same context. When a request
is canceled it is marked bad, an idle pulse is triggered to the engine
(high priority kernel request), the execlists scheduler sees that
running request is bad and sets preemption timeout to minimum value (1
ms). This fails to work if multiple requests are combined on the ELSP as
only the most recent request is stored in the execlists schedule (the
request stored in the ELSP isn't marked bad, thus preemption timeout
isn't set to the minimum value). If the preempt timeout is configured to
zero, the engine is permanently hung. This is shown by an upcoming
selftest.

To work around this, mark the idle pulse with a flag to force a preempt
with the minimum value.

Fixes: 38b237eab2bc7 ("drm/i915: Individual request cancellation")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 23 +++++++++++++++----
 .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  1 +
 .../drm/i915/gt/intel_execlists_submission.c  | 18 ++++++++++-----
 drivers/gpu/drm/i915/i915_request.h           |  6 +++++
 4 files changed, 38 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index a3698f611f457..efd1c719b4072 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -243,7 +243,8 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
 	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
 }
 
-static int __intel_engine_pulse(struct intel_engine_cs *engine)
+static int __intel_engine_pulse(struct intel_engine_cs *engine,
+				bool force_preempt)
 {
 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
 	struct intel_context *ce = engine->kernel_context;
@@ -258,6 +259,8 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
 		return PTR_ERR(rq);
 
 	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
+	if (force_preempt)
+		__set_bit(I915_FENCE_FLAG_FORCE_PREEMPT, &rq->fence.flags);
 
 	heartbeat_commit(rq, &attr);
 	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
@@ -299,7 +302,7 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
 
 		/* recheck current execution */
 		if (intel_engine_has_preemption(engine)) {
-			err = __intel_engine_pulse(engine);
+			err = __intel_engine_pulse(engine, false);
 			if (err)
 				set_heartbeat(engine, saved);
 		}
@@ -312,7 +315,8 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
 	return err;
 }
 
-int intel_engine_pulse(struct intel_engine_cs *engine)
+static int _intel_engine_pulse(struct intel_engine_cs *engine,
+			       bool force_preempt)
 {
 	struct intel_context *ce = engine->kernel_context;
 	int err;
@@ -325,7 +329,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 
 	err = -EINTR;
 	if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
-		err = __intel_engine_pulse(engine);
+		err = __intel_engine_pulse(engine, force_preempt);
 		mutex_unlock(&ce->timeline->mutex);
 	}
 
@@ -334,6 +338,17 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 	return err;
 }
 
+int intel_engine_pulse(struct intel_engine_cs *engine)
+{
+	return _intel_engine_pulse(engine, false);
+}
+
+
+int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine)
+{
+	return _intel_engine_pulse(engine, true);
+}
+
 int intel_engine_flush_barriers(struct intel_engine_cs *engine)
 {
 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
index 5da6d809a87a2..d9c8386754cb3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -21,6 +21,7 @@ void intel_gt_park_heartbeats(struct intel_gt *gt);
 void intel_gt_unpark_heartbeats(struct intel_gt *gt);
 
 int intel_engine_pulse(struct intel_engine_cs *engine);
+int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine);
 int intel_engine_flush_barriers(struct intel_engine_cs *engine);
 
 #endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index 960a9aaf4f3a3..f0c2024058731 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -1222,26 +1222,29 @@ static void record_preemption(struct intel_engine_execlists *execlists)
 }
 
 static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
-					    const struct i915_request *rq)
+					    const struct i915_request *rq,
+					    bool force_preempt)
 {
 	if (!rq)
 		return 0;
 
 	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
-	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
+	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq) ||
+		     force_preempt))
 		return 1;
 
 	return READ_ONCE(engine->props.preempt_timeout_ms);
 }
 
 static void set_preempt_timeout(struct intel_engine_cs *engine,
-				const struct i915_request *rq)
+				const struct i915_request *rq,
+				bool force_preempt)
 {
 	if (!intel_engine_has_preempt_reset(engine))
 		return;
 
 	set_timer_ms(&engine->execlists.preempt,
-		     active_preempt_timeout(engine, rq));
+		     active_preempt_timeout(engine, rq, force_preempt));
 }
 
 static bool completed(const struct i915_request *rq)
@@ -1584,12 +1587,15 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	    memcmp(active,
 		   execlists->pending,
 		   (port - execlists->pending) * sizeof(*port))) {
+		bool force_preempt = test_bit(I915_FENCE_FLAG_FORCE_PREEMPT,
+					      &last->fence.flags);
+
 		*port = NULL;
 		while (port-- != execlists->pending)
 			execlists_schedule_in(*port, port - execlists->pending);
 
 		WRITE_ONCE(execlists->yield, -1);
-		set_preempt_timeout(engine, *active);
+		set_preempt_timeout(engine, *active, force_preempt);
 		execlists_submit_ports(engine);
 	} else {
 		ring_set_paused(engine, 0);
@@ -2594,7 +2600,7 @@ static void execlists_context_cancel_request(struct intel_context *ce,
 
 	i915_request_active_engine(rq, &engine);
 
-	if (engine && intel_engine_pulse(engine))
+	if (engine && intel_engine_pulse_force_preempt(engine))
 		intel_gt_handle_error(engine->gt, engine->mask, 0,
 				      "request cancellation by %s",
 				      current->comm);
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 28b1f9db54875..7e6312233d4c7 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -170,6 +170,12 @@ enum {
 	 * fence (dma_fence_array) and i915 generated for parallel submission.
 	 */
 	I915_FENCE_FLAG_COMPOSITE,
+
+	/*
+	 * I915_FENCE_FLAG_FORCE_PREEMPT - Force preempt immediately regardless
+	 * of preempt timeout configuration
+	 */
+	I915_FENCE_FLAG_FORCE_PREEMPT,
 };
 
 /**
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* [PATCH 4/4] drm/i915/selftests: Set preemption timeout to zero in cancel reset test
  2022-01-24 15:01 [PATCH 0/4] Fix up request cancel Matthew Brost
                   ` (2 preceding siblings ...)
  2022-01-24 15:01 ` [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case Matthew Brost
@ 2022-01-24 15:01 ` Matthew Brost
  3 siblings, 0 replies; 15+ messages in thread
From: Matthew Brost @ 2022-01-24 15:01 UTC (permalink / raw)
  To: intel-gfx, dri-devel
  Cc: daniele.ceraolospurio, john.c.harrison, tvrtko.ursulin

Set the preemption timeout to zero to prove that request cancellation
with preemption disabled works. Also this seals a race between a
possible preemption and request cancellation.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/selftests/i915_request.c | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 2a99dd7c2fe8a..e522e24129f9b 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -790,8 +790,9 @@ static int __cancel_completed(struct intel_engine_cs *engine)
  * wait for spinner to start, create a NOP request and submit it, cancel the
  * spinner, wait for spinner to complete and verify it failed with an error,
  * finally wait for NOP request to complete verify it succeeded without an
- * error. Preemption timeout also reduced / restored so test runs in a timely
- * maner.
+ * error. Preemption timeout also set to zero to ensure cancellation works with
+ * preemption disabled and to ensure the NOP request doesn't trigger a
+ * preemption on the spinner sealing a race between a preemption and the cancel.
  */
 static int __cancel_reset(struct drm_i915_private *i915,
 			  struct intel_engine_cs *engine)
@@ -807,7 +808,7 @@ static int __cancel_reset(struct drm_i915_private *i915,
 		return 0;
 
 	preempt_timeout_ms = engine->props.preempt_timeout_ms;
-	engine->props.preempt_timeout_ms = 100;
+	engine->props.preempt_timeout_ms = 0;
 
 	if (igt_spinner_init(&spin, engine->gt))
 		goto out_restore;
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH 1/4] drm/i915: Add request cancel low level trace point
  2022-01-24 15:01 ` [PATCH 1/4] drm/i915: Add request cancel low level trace point Matthew Brost
@ 2022-01-25 12:27   ` Tvrtko Ursulin
  2022-01-25 16:39     ` Matthew Brost
  0 siblings, 1 reply; 15+ messages in thread
From: Tvrtko Ursulin @ 2022-01-25 12:27 UTC (permalink / raw)
  To: Matthew Brost, intel-gfx, dri-devel


On 24/01/2022 15:01, Matthew Brost wrote:
> Add request cancel trace point guarded by
> CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINT.

Okay-ish I guess (There is pr_notice with the only real caller, but I 
suppose you want it for selftests? Oh yes, why is missing from the 
commit message.), but I'd emit it from i915_request_cancel since that's 
what the tracepoint is called so it fits.

Regards,

Tvrtko

> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_context.h |  1 +
>   drivers/gpu/drm/i915/i915_trace.h       | 10 ++++++++++
>   2 files changed, 11 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
> index d8c74bbf9aae2..3aed4d77f116c 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context.h
> @@ -124,6 +124,7 @@ intel_context_is_pinned(struct intel_context *ce)
>   static inline void intel_context_cancel_request(struct intel_context *ce,
>   						struct i915_request *rq)
>   {
> +	trace_i915_request_cancel(rq);
>   	GEM_BUG_ON(!ce->ops->cancel_request);
>   	return ce->ops->cancel_request(ce, rq);
>   }
> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> index 37b5c9e9d260e..d0a11a8bb0ca3 100644
> --- a/drivers/gpu/drm/i915/i915_trace.h
> +++ b/drivers/gpu/drm/i915/i915_trace.h
> @@ -324,6 +324,11 @@ DEFINE_EVENT(i915_request, i915_request_add,
>   );
>   
>   #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
> +DEFINE_EVENT(i915_request, i915_request_cancel,
> +	     TP_PROTO(struct i915_request *rq),
> +	     TP_ARGS(rq)
> +);
> +
>   DEFINE_EVENT(i915_request, i915_request_guc_submit,
>   	     TP_PROTO(struct i915_request *rq),
>   	     TP_ARGS(rq)
> @@ -497,6 +502,11 @@ DEFINE_EVENT(intel_context, intel_context_do_unpin,
>   
>   #else
>   #if !defined(TRACE_HEADER_MULTI_READ)
> +static inline void
> +trace_i915_request_cancel(struct i915_request *rq)
> +{
> +}
> +
>   static inline void
>   trace_i915_request_guc_submit(struct i915_request *rq)
>   {
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case
  2022-01-24 15:01 ` [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case Matthew Brost
@ 2022-01-25 15:27   ` Tvrtko Ursulin
  2022-01-25 16:32     ` Matthew Brost
  2022-01-26 19:03   ` John Harrison
  1 sibling, 1 reply; 15+ messages in thread
From: Tvrtko Ursulin @ 2022-01-25 15:27 UTC (permalink / raw)
  To: Matthew Brost, intel-gfx, dri-devel


On 24/01/2022 15:01, Matthew Brost wrote:
> More than 1 request can be submitted to a single ELSP at a time if
> multiple requests are ready run to on the same context. When a request
> is canceled it is marked bad, an idle pulse is triggered to the engine
> (high priority kernel request), the execlists scheduler sees that
> running request is bad and sets preemption timeout to minimum value (1
> ms). This fails to work if multiple requests are combined on the ELSP as
> only the most recent request is stored in the execlists schedule (the
> request stored in the ELSP isn't marked bad, thus preemption timeout
> isn't set to the minimum value). If the preempt timeout is configured to
> zero, the engine is permanently hung. This is shown by an upcoming
> selftest.
> 
> To work around this, mark the idle pulse with a flag to force a preempt
> with the minimum value.

A couple of quick questions first before I find time to dig deeper.

First about the "permanently hung" statement. How permanent? Does the 
heartbeat eventually resolve it and if not why not? Naive view is that 
missed heartbeats would identify the stuck non-preemptible request and 
then engine reset would skip over it.

If it does resolve, then the problem is only that request timeout works 
less well if someone set preempt timeout to zero? Which may not be as 
bad, since request timeout was never about any time guarantees.

Regards,

Tvrtko

> 
> Fixes: 38b237eab2bc7 ("drm/i915: Individual request cancellation")
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 23 +++++++++++++++----
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  1 +
>   .../drm/i915/gt/intel_execlists_submission.c  | 18 ++++++++++-----
>   drivers/gpu/drm/i915/i915_request.h           |  6 +++++
>   4 files changed, 38 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> index a3698f611f457..efd1c719b4072 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> @@ -243,7 +243,8 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
>   	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
>   }
>   
> -static int __intel_engine_pulse(struct intel_engine_cs *engine)
> +static int __intel_engine_pulse(struct intel_engine_cs *engine,
> +				bool force_preempt)
>   {
>   	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
>   	struct intel_context *ce = engine->kernel_context;
> @@ -258,6 +259,8 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
>   		return PTR_ERR(rq);
>   
>   	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
> +	if (force_preempt)
> +		__set_bit(I915_FENCE_FLAG_FORCE_PREEMPT, &rq->fence.flags);
>   
>   	heartbeat_commit(rq, &attr);
>   	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
> @@ -299,7 +302,7 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
>   
>   		/* recheck current execution */
>   		if (intel_engine_has_preemption(engine)) {
> -			err = __intel_engine_pulse(engine);
> +			err = __intel_engine_pulse(engine, false);
>   			if (err)
>   				set_heartbeat(engine, saved);
>   		}
> @@ -312,7 +315,8 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
>   	return err;
>   }
>   
> -int intel_engine_pulse(struct intel_engine_cs *engine)
> +static int _intel_engine_pulse(struct intel_engine_cs *engine,
> +			       bool force_preempt)
>   {
>   	struct intel_context *ce = engine->kernel_context;
>   	int err;
> @@ -325,7 +329,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
>   
>   	err = -EINTR;
>   	if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
> -		err = __intel_engine_pulse(engine);
> +		err = __intel_engine_pulse(engine, force_preempt);
>   		mutex_unlock(&ce->timeline->mutex);
>   	}
>   
> @@ -334,6 +338,17 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
>   	return err;
>   }
>   
> +int intel_engine_pulse(struct intel_engine_cs *engine)
> +{
> +	return _intel_engine_pulse(engine, false);
> +}
> +
> +
> +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine)
> +{
> +	return _intel_engine_pulse(engine, true);
> +}
> +
>   int intel_engine_flush_barriers(struct intel_engine_cs *engine)
>   {
>   	struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> index 5da6d809a87a2..d9c8386754cb3 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> @@ -21,6 +21,7 @@ void intel_gt_park_heartbeats(struct intel_gt *gt);
>   void intel_gt_unpark_heartbeats(struct intel_gt *gt);
>   
>   int intel_engine_pulse(struct intel_engine_cs *engine);
> +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine);
>   int intel_engine_flush_barriers(struct intel_engine_cs *engine);
>   
>   #endif /* INTEL_ENGINE_HEARTBEAT_H */
> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> index 960a9aaf4f3a3..f0c2024058731 100644
> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> @@ -1222,26 +1222,29 @@ static void record_preemption(struct intel_engine_execlists *execlists)
>   }
>   
>   static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
> -					    const struct i915_request *rq)
> +					    const struct i915_request *rq,
> +					    bool force_preempt)
>   {
>   	if (!rq)
>   		return 0;
>   
>   	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
> -	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
> +	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq) ||
> +		     force_preempt))
>   		return 1;
>   
>   	return READ_ONCE(engine->props.preempt_timeout_ms);
>   }
>   
>   static void set_preempt_timeout(struct intel_engine_cs *engine,
> -				const struct i915_request *rq)
> +				const struct i915_request *rq,
> +				bool force_preempt)
>   {
>   	if (!intel_engine_has_preempt_reset(engine))
>   		return;
>   
>   	set_timer_ms(&engine->execlists.preempt,
> -		     active_preempt_timeout(engine, rq));
> +		     active_preempt_timeout(engine, rq, force_preempt));
>   }
>   
>   static bool completed(const struct i915_request *rq)
> @@ -1584,12 +1587,15 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   	    memcmp(active,
>   		   execlists->pending,
>   		   (port - execlists->pending) * sizeof(*port))) {
> +		bool force_preempt = test_bit(I915_FENCE_FLAG_FORCE_PREEMPT,
> +					      &last->fence.flags);
> +
>   		*port = NULL;
>   		while (port-- != execlists->pending)
>   			execlists_schedule_in(*port, port - execlists->pending);
>   
>   		WRITE_ONCE(execlists->yield, -1);
> -		set_preempt_timeout(engine, *active);
> +		set_preempt_timeout(engine, *active, force_preempt);
>   		execlists_submit_ports(engine);
>   	} else {
>   		ring_set_paused(engine, 0);
> @@ -2594,7 +2600,7 @@ static void execlists_context_cancel_request(struct intel_context *ce,
>   
>   	i915_request_active_engine(rq, &engine);
>   
> -	if (engine && intel_engine_pulse(engine))
> +	if (engine && intel_engine_pulse_force_preempt(engine))
>   		intel_gt_handle_error(engine->gt, engine->mask, 0,
>   				      "request cancellation by %s",
>   				      current->comm);
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 28b1f9db54875..7e6312233d4c7 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -170,6 +170,12 @@ enum {
>   	 * fence (dma_fence_array) and i915 generated for parallel submission.
>   	 */
>   	I915_FENCE_FLAG_COMPOSITE,
> +
> +	/*
> +	 * I915_FENCE_FLAG_FORCE_PREEMPT - Force preempt immediately regardless
> +	 * of preempt timeout configuration
> +	 */
> +	I915_FENCE_FLAG_FORCE_PREEMPT,
>   };
>   
>   /**
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case
  2022-01-25 15:27   ` [Intel-gfx] " Tvrtko Ursulin
@ 2022-01-25 16:32     ` Matthew Brost
  2022-01-26 10:38       ` Tvrtko Ursulin
  0 siblings, 1 reply; 15+ messages in thread
From: Matthew Brost @ 2022-01-25 16:32 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx, dri-devel

On Tue, Jan 25, 2022 at 03:27:31PM +0000, Tvrtko Ursulin wrote:
> 
> On 24/01/2022 15:01, Matthew Brost wrote:
> > More than 1 request can be submitted to a single ELSP at a time if
> > multiple requests are ready run to on the same context. When a request
> > is canceled it is marked bad, an idle pulse is triggered to the engine
> > (high priority kernel request), the execlists scheduler sees that
> > running request is bad and sets preemption timeout to minimum value (1
> > ms). This fails to work if multiple requests are combined on the ELSP as
> > only the most recent request is stored in the execlists schedule (the
> > request stored in the ELSP isn't marked bad, thus preemption timeout
> > isn't set to the minimum value). If the preempt timeout is configured to
> > zero, the engine is permanently hung. This is shown by an upcoming
> > selftest.
> > 
> > To work around this, mark the idle pulse with a flag to force a preempt
> > with the minimum value.
> 
> A couple of quick questions first before I find time to dig deeper.
> 
> First about the "permanently hung" statement. How permanent? Does the
> heartbeat eventually resolve it and if not why not? Naive view is that
> missed heartbeats would identify the stuck non-preemptible request and then
> engine reset would skip over it.
> 

Yes, if the heartbeat is enabled then the heartbeat would eventually
recover the engine. It is not always enabled though...

> If it does resolve, then the problem is only that request timeout works less
> well if someone set preempt timeout to zero? Which may not be as bad, since
> request timeout was never about any time guarantees.
>

Yes, if the heartbeat is enabled the problem isn't as bad.

Matt

> Regards,
> 
> Tvrtko
> 
> > 
> > Fixes: 38b237eab2bc7 ("drm/i915: Individual request cancellation")
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >   .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 23 +++++++++++++++----
> >   .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  1 +
> >   .../drm/i915/gt/intel_execlists_submission.c  | 18 ++++++++++-----
> >   drivers/gpu/drm/i915/i915_request.h           |  6 +++++
> >   4 files changed, 38 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> > index a3698f611f457..efd1c719b4072 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> > @@ -243,7 +243,8 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
> >   	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
> >   }
> > -static int __intel_engine_pulse(struct intel_engine_cs *engine)
> > +static int __intel_engine_pulse(struct intel_engine_cs *engine,
> > +				bool force_preempt)
> >   {
> >   	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
> >   	struct intel_context *ce = engine->kernel_context;
> > @@ -258,6 +259,8 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
> >   		return PTR_ERR(rq);
> >   	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
> > +	if (force_preempt)
> > +		__set_bit(I915_FENCE_FLAG_FORCE_PREEMPT, &rq->fence.flags);
> >   	heartbeat_commit(rq, &attr);
> >   	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
> > @@ -299,7 +302,7 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
> >   		/* recheck current execution */
> >   		if (intel_engine_has_preemption(engine)) {
> > -			err = __intel_engine_pulse(engine);
> > +			err = __intel_engine_pulse(engine, false);
> >   			if (err)
> >   				set_heartbeat(engine, saved);
> >   		}
> > @@ -312,7 +315,8 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
> >   	return err;
> >   }
> > -int intel_engine_pulse(struct intel_engine_cs *engine)
> > +static int _intel_engine_pulse(struct intel_engine_cs *engine,
> > +			       bool force_preempt)
> >   {
> >   	struct intel_context *ce = engine->kernel_context;
> >   	int err;
> > @@ -325,7 +329,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
> >   	err = -EINTR;
> >   	if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
> > -		err = __intel_engine_pulse(engine);
> > +		err = __intel_engine_pulse(engine, force_preempt);
> >   		mutex_unlock(&ce->timeline->mutex);
> >   	}
> > @@ -334,6 +338,17 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
> >   	return err;
> >   }
> > +int intel_engine_pulse(struct intel_engine_cs *engine)
> > +{
> > +	return _intel_engine_pulse(engine, false);
> > +}
> > +
> > +
> > +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine)
> > +{
> > +	return _intel_engine_pulse(engine, true);
> > +}
> > +
> >   int intel_engine_flush_barriers(struct intel_engine_cs *engine)
> >   {
> >   	struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> > index 5da6d809a87a2..d9c8386754cb3 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> > @@ -21,6 +21,7 @@ void intel_gt_park_heartbeats(struct intel_gt *gt);
> >   void intel_gt_unpark_heartbeats(struct intel_gt *gt);
> >   int intel_engine_pulse(struct intel_engine_cs *engine);
> > +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine);
> >   int intel_engine_flush_barriers(struct intel_engine_cs *engine);
> >   #endif /* INTEL_ENGINE_HEARTBEAT_H */
> > diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> > index 960a9aaf4f3a3..f0c2024058731 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> > @@ -1222,26 +1222,29 @@ static void record_preemption(struct intel_engine_execlists *execlists)
> >   }
> >   static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
> > -					    const struct i915_request *rq)
> > +					    const struct i915_request *rq,
> > +					    bool force_preempt)
> >   {
> >   	if (!rq)
> >   		return 0;
> >   	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
> > -	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
> > +	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq) ||
> > +		     force_preempt))
> >   		return 1;
> >   	return READ_ONCE(engine->props.preempt_timeout_ms);
> >   }
> >   static void set_preempt_timeout(struct intel_engine_cs *engine,
> > -				const struct i915_request *rq)
> > +				const struct i915_request *rq,
> > +				bool force_preempt)
> >   {
> >   	if (!intel_engine_has_preempt_reset(engine))
> >   		return;
> >   	set_timer_ms(&engine->execlists.preempt,
> > -		     active_preempt_timeout(engine, rq));
> > +		     active_preempt_timeout(engine, rq, force_preempt));
> >   }
> >   static bool completed(const struct i915_request *rq)
> > @@ -1584,12 +1587,15 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> >   	    memcmp(active,
> >   		   execlists->pending,
> >   		   (port - execlists->pending) * sizeof(*port))) {
> > +		bool force_preempt = test_bit(I915_FENCE_FLAG_FORCE_PREEMPT,
> > +					      &last->fence.flags);
> > +
> >   		*port = NULL;
> >   		while (port-- != execlists->pending)
> >   			execlists_schedule_in(*port, port - execlists->pending);
> >   		WRITE_ONCE(execlists->yield, -1);
> > -		set_preempt_timeout(engine, *active);
> > +		set_preempt_timeout(engine, *active, force_preempt);
> >   		execlists_submit_ports(engine);
> >   	} else {
> >   		ring_set_paused(engine, 0);
> > @@ -2594,7 +2600,7 @@ static void execlists_context_cancel_request(struct intel_context *ce,
> >   	i915_request_active_engine(rq, &engine);
> > -	if (engine && intel_engine_pulse(engine))
> > +	if (engine && intel_engine_pulse_force_preempt(engine))
> >   		intel_gt_handle_error(engine->gt, engine->mask, 0,
> >   				      "request cancellation by %s",
> >   				      current->comm);
> > diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> > index 28b1f9db54875..7e6312233d4c7 100644
> > --- a/drivers/gpu/drm/i915/i915_request.h
> > +++ b/drivers/gpu/drm/i915/i915_request.h
> > @@ -170,6 +170,12 @@ enum {
> >   	 * fence (dma_fence_array) and i915 generated for parallel submission.
> >   	 */
> >   	I915_FENCE_FLAG_COMPOSITE,
> > +
> > +	/*
> > +	 * I915_FENCE_FLAG_FORCE_PREEMPT - Force preempt immediately regardless
> > +	 * of preempt timeout configuration
> > +	 */
> > +	I915_FENCE_FLAG_FORCE_PREEMPT,
> >   };
> >   /**
> > 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH 1/4] drm/i915: Add request cancel low level trace point
  2022-01-25 12:27   ` [Intel-gfx] " Tvrtko Ursulin
@ 2022-01-25 16:39     ` Matthew Brost
  2022-01-26 10:29       ` Tvrtko Ursulin
  0 siblings, 1 reply; 15+ messages in thread
From: Matthew Brost @ 2022-01-25 16:39 UTC (permalink / raw)
  To: Tvrtko Ursulin; +Cc: intel-gfx, dri-devel

On Tue, Jan 25, 2022 at 12:27:43PM +0000, Tvrtko Ursulin wrote:
> 
> On 24/01/2022 15:01, Matthew Brost wrote:
> > Add request cancel trace point guarded by
> > CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINT.
> 
> Okay-ish I guess (There is pr_notice with the only real caller, but I
> suppose you want it for selftests? Oh yes, why is missing from the commit
> message.), but I'd emit it from i915_request_cancel since that's what the
> tracepoint is called so it fits.
> 

I had this tracepoint at one point but somehow during the upstreaming it
got lost. Noticed when debugging the below issue this tracepoint wasn't
present, so I brought it back in.

I generally use low level tracepoints for debug, so a pr_notice doesn't
really help there.

Link: https://gitlab.freedesktop.org/drm/intel/-/issues/4960

Matt

> Regards,
> 
> Tvrtko
> 
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gt/intel_context.h |  1 +
> >   drivers/gpu/drm/i915/i915_trace.h       | 10 ++++++++++
> >   2 files changed, 11 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
> > index d8c74bbf9aae2..3aed4d77f116c 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_context.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_context.h
> > @@ -124,6 +124,7 @@ intel_context_is_pinned(struct intel_context *ce)
> >   static inline void intel_context_cancel_request(struct intel_context *ce,
> >   						struct i915_request *rq)
> >   {
> > +	trace_i915_request_cancel(rq);
> >   	GEM_BUG_ON(!ce->ops->cancel_request);
> >   	return ce->ops->cancel_request(ce, rq);
> >   }
> > diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
> > index 37b5c9e9d260e..d0a11a8bb0ca3 100644
> > --- a/drivers/gpu/drm/i915/i915_trace.h
> > +++ b/drivers/gpu/drm/i915/i915_trace.h
> > @@ -324,6 +324,11 @@ DEFINE_EVENT(i915_request, i915_request_add,
> >   );
> >   #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
> > +DEFINE_EVENT(i915_request, i915_request_cancel,
> > +	     TP_PROTO(struct i915_request *rq),
> > +	     TP_ARGS(rq)
> > +);
> > +
> >   DEFINE_EVENT(i915_request, i915_request_guc_submit,
> >   	     TP_PROTO(struct i915_request *rq),
> >   	     TP_ARGS(rq)
> > @@ -497,6 +502,11 @@ DEFINE_EVENT(intel_context, intel_context_do_unpin,
> >   #else
> >   #if !defined(TRACE_HEADER_MULTI_READ)
> > +static inline void
> > +trace_i915_request_cancel(struct i915_request *rq)
> > +{
> > +}
> > +
> >   static inline void
> >   trace_i915_request_guc_submit(struct i915_request *rq)
> >   {
> > 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH 1/4] drm/i915: Add request cancel low level trace point
  2022-01-25 16:39     ` Matthew Brost
@ 2022-01-26 10:29       ` Tvrtko Ursulin
  0 siblings, 0 replies; 15+ messages in thread
From: Tvrtko Ursulin @ 2022-01-26 10:29 UTC (permalink / raw)
  To: Matthew Brost; +Cc: intel-gfx, dri-devel


On 25/01/2022 16:39, Matthew Brost wrote:
> On Tue, Jan 25, 2022 at 12:27:43PM +0000, Tvrtko Ursulin wrote:
>>
>> On 24/01/2022 15:01, Matthew Brost wrote:
>>> Add request cancel trace point guarded by
>>> CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINT.
>>
>> Okay-ish I guess (There is pr_notice with the only real caller, but I
>> suppose you want it for selftests? Oh yes, why is missing from the commit
>> message.), but I'd emit it from i915_request_cancel since that's what the
>> tracepoint is called so it fits.
>>
> 
> I had this tracepoint at one point but somehow during the upstreaming it
> got lost. Noticed when debugging the below issue this tracepoint wasn't
> present, so I brought it back in.
> 
> I generally use low level tracepoints for debug, so a pr_notice doesn't
> really help there.
> 
> Link: https://gitlab.freedesktop.org/drm/intel/-/issues/4960

This was a GuC backend bug right? And log shows this:

<7> [275.431050] [drm:eb_lookup_vmas [i915]] EINVAL at eb_validate_vma:514
<5> [295.433920] Fence expiration time out i915-0000:03:00.0:kms_vblank[1038]:2!
<3> [332.736763] INFO: task kworker/2:1:55 blocked for more than 30 seconds.

So pr_notice worked. I am not opposed to the tracepoint just put a solid why in the commit message and if the tracepoint is called i915_request_cancel it should be emitted from i915_request_cancel().

Regards,

Tvrtko

> 
> Matt
> 
>> Regards,
>>
>> Tvrtko
>>
>>> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
>>> ---
>>>    drivers/gpu/drm/i915/gt/intel_context.h |  1 +
>>>    drivers/gpu/drm/i915/i915_trace.h       | 10 ++++++++++
>>>    2 files changed, 11 insertions(+)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
>>> index d8c74bbf9aae2..3aed4d77f116c 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_context.h
>>> +++ b/drivers/gpu/drm/i915/gt/intel_context.h
>>> @@ -124,6 +124,7 @@ intel_context_is_pinned(struct intel_context *ce)
>>>    static inline void intel_context_cancel_request(struct intel_context *ce,
>>>    						struct i915_request *rq)
>>>    {
>>> +	trace_i915_request_cancel(rq);
>>>    	GEM_BUG_ON(!ce->ops->cancel_request);
>>>    	return ce->ops->cancel_request(ce, rq);
>>>    }
>>> diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
>>> index 37b5c9e9d260e..d0a11a8bb0ca3 100644
>>> --- a/drivers/gpu/drm/i915/i915_trace.h
>>> +++ b/drivers/gpu/drm/i915/i915_trace.h
>>> @@ -324,6 +324,11 @@ DEFINE_EVENT(i915_request, i915_request_add,
>>>    );
>>>    #if defined(CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS)
>>> +DEFINE_EVENT(i915_request, i915_request_cancel,
>>> +	     TP_PROTO(struct i915_request *rq),
>>> +	     TP_ARGS(rq)
>>> +);
>>> +
>>>    DEFINE_EVENT(i915_request, i915_request_guc_submit,
>>>    	     TP_PROTO(struct i915_request *rq),
>>>    	     TP_ARGS(rq)
>>> @@ -497,6 +502,11 @@ DEFINE_EVENT(intel_context, intel_context_do_unpin,
>>>    #else
>>>    #if !defined(TRACE_HEADER_MULTI_READ)
>>> +static inline void
>>> +trace_i915_request_cancel(struct i915_request *rq)
>>> +{
>>> +}
>>> +
>>>    static inline void
>>>    trace_i915_request_guc_submit(struct i915_request *rq)
>>>    {
>>>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [Intel-gfx] [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case
  2022-01-25 16:32     ` Matthew Brost
@ 2022-01-26 10:38       ` Tvrtko Ursulin
  0 siblings, 0 replies; 15+ messages in thread
From: Tvrtko Ursulin @ 2022-01-26 10:38 UTC (permalink / raw)
  To: Matthew Brost; +Cc: intel-gfx, dri-devel


On 25/01/2022 16:32, Matthew Brost wrote:
> On Tue, Jan 25, 2022 at 03:27:31PM +0000, Tvrtko Ursulin wrote:
>>
>> On 24/01/2022 15:01, Matthew Brost wrote:
>>> More than 1 request can be submitted to a single ELSP at a time if
>>> multiple requests are ready run to on the same context. When a request
>>> is canceled it is marked bad, an idle pulse is triggered to the engine
>>> (high priority kernel request), the execlists scheduler sees that
>>> running request is bad and sets preemption timeout to minimum value (1
>>> ms). This fails to work if multiple requests are combined on the ELSP as
>>> only the most recent request is stored in the execlists schedule (the
>>> request stored in the ELSP isn't marked bad, thus preemption timeout
>>> isn't set to the minimum value). If the preempt timeout is configured to
>>> zero, the engine is permanently hung. This is shown by an upcoming
>>> selftest.
>>>
>>> To work around this, mark the idle pulse with a flag to force a preempt
>>> with the minimum value.
>>
>> A couple of quick questions first before I find time to dig deeper.
>>
>> First about the "permanently hung" statement. How permanent? Does the
>> heartbeat eventually resolve it and if not why not? Naive view is that
>> missed heartbeats would identify the stuck non-preemptible request and then
>> engine reset would skip over it.
>>
> 
> Yes, if the heartbeat is enabled then the heartbeat would eventually
> recover the engine. It is not always enabled though...
> 
>> If it does resolve, then the problem is only that request timeout works less
>> well if someone set preempt timeout to zero? Which may not be as bad, since
>> request timeout was never about any time guarantees.
>>
> 
> Yes, if the heartbeat is enabled the problem isn't as bad.

Good, so commit message needs some work to be accurate.

On the technical side of the patch it looks reasonable to me. And the 
idea that cancellation pulse is made special also sounds plausible. 
Question is whether we want to add code to support this considering the 
opens I have:

1)
Do we care about request cancellation working for non-preemptible 
batches, *if* someone explicitly disabled both preemption timeout and 
hearbteats?

2)
Do we care to improve the responsiveness of request cancellation if only 
preempt timeout was disabled?

Conclusions here will also dictate whether Fixes: tag is warranted. Best 
to avoid hairy backports if we decide it is not really needed.

Also, in the next patch you change one selftest to only run with preempt 
timeout disabled. I think it makes sense to have this test run in the 
default config (preempt timeout enabled) to reflect the typical 
configuration. You may add a second pass with it disabled to execise the 
corner case, again, depending on conclusions after above questions.

Regards,

Tvrtko

> 
> Matt
> 
>> Regards,
>>
>> Tvrtko
>>
>>>
>>> Fixes: 38b237eab2bc7 ("drm/i915: Individual request cancellation")
>>> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
>>> ---
>>>    .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 23 +++++++++++++++----
>>>    .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  1 +
>>>    .../drm/i915/gt/intel_execlists_submission.c  | 18 ++++++++++-----
>>>    drivers/gpu/drm/i915/i915_request.h           |  6 +++++
>>>    4 files changed, 38 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
>>> index a3698f611f457..efd1c719b4072 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
>>> @@ -243,7 +243,8 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
>>>    	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
>>>    }
>>> -static int __intel_engine_pulse(struct intel_engine_cs *engine)
>>> +static int __intel_engine_pulse(struct intel_engine_cs *engine,
>>> +				bool force_preempt)
>>>    {
>>>    	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
>>>    	struct intel_context *ce = engine->kernel_context;
>>> @@ -258,6 +259,8 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
>>>    		return PTR_ERR(rq);
>>>    	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
>>> +	if (force_preempt)
>>> +		__set_bit(I915_FENCE_FLAG_FORCE_PREEMPT, &rq->fence.flags);
>>>    	heartbeat_commit(rq, &attr);
>>>    	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
>>> @@ -299,7 +302,7 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
>>>    		/* recheck current execution */
>>>    		if (intel_engine_has_preemption(engine)) {
>>> -			err = __intel_engine_pulse(engine);
>>> +			err = __intel_engine_pulse(engine, false);
>>>    			if (err)
>>>    				set_heartbeat(engine, saved);
>>>    		}
>>> @@ -312,7 +315,8 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
>>>    	return err;
>>>    }
>>> -int intel_engine_pulse(struct intel_engine_cs *engine)
>>> +static int _intel_engine_pulse(struct intel_engine_cs *engine,
>>> +			       bool force_preempt)
>>>    {
>>>    	struct intel_context *ce = engine->kernel_context;
>>>    	int err;
>>> @@ -325,7 +329,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
>>>    	err = -EINTR;
>>>    	if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
>>> -		err = __intel_engine_pulse(engine);
>>> +		err = __intel_engine_pulse(engine, force_preempt);
>>>    		mutex_unlock(&ce->timeline->mutex);
>>>    	}
>>> @@ -334,6 +338,17 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
>>>    	return err;
>>>    }
>>> +int intel_engine_pulse(struct intel_engine_cs *engine)
>>> +{
>>> +	return _intel_engine_pulse(engine, false);
>>> +}
>>> +
>>> +
>>> +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine)
>>> +{
>>> +	return _intel_engine_pulse(engine, true);
>>> +}
>>> +
>>>    int intel_engine_flush_barriers(struct intel_engine_cs *engine)
>>>    {
>>>    	struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
>>> index 5da6d809a87a2..d9c8386754cb3 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
>>> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
>>> @@ -21,6 +21,7 @@ void intel_gt_park_heartbeats(struct intel_gt *gt);
>>>    void intel_gt_unpark_heartbeats(struct intel_gt *gt);
>>>    int intel_engine_pulse(struct intel_engine_cs *engine);
>>> +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine);
>>>    int intel_engine_flush_barriers(struct intel_engine_cs *engine);
>>>    #endif /* INTEL_ENGINE_HEARTBEAT_H */
>>> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
>>> index 960a9aaf4f3a3..f0c2024058731 100644
>>> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
>>> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
>>> @@ -1222,26 +1222,29 @@ static void record_preemption(struct intel_engine_execlists *execlists)
>>>    }
>>>    static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
>>> -					    const struct i915_request *rq)
>>> +					    const struct i915_request *rq,
>>> +					    bool force_preempt)
>>>    {
>>>    	if (!rq)
>>>    		return 0;
>>>    	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
>>> -	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
>>> +	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq) ||
>>> +		     force_preempt))
>>>    		return 1;
>>>    	return READ_ONCE(engine->props.preempt_timeout_ms);
>>>    }
>>>    static void set_preempt_timeout(struct intel_engine_cs *engine,
>>> -				const struct i915_request *rq)
>>> +				const struct i915_request *rq,
>>> +				bool force_preempt)
>>>    {
>>>    	if (!intel_engine_has_preempt_reset(engine))
>>>    		return;
>>>    	set_timer_ms(&engine->execlists.preempt,
>>> -		     active_preempt_timeout(engine, rq));
>>> +		     active_preempt_timeout(engine, rq, force_preempt));
>>>    }
>>>    static bool completed(const struct i915_request *rq)
>>> @@ -1584,12 +1587,15 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>>>    	    memcmp(active,
>>>    		   execlists->pending,
>>>    		   (port - execlists->pending) * sizeof(*port))) {
>>> +		bool force_preempt = test_bit(I915_FENCE_FLAG_FORCE_PREEMPT,
>>> +					      &last->fence.flags);
>>> +
>>>    		*port = NULL;
>>>    		while (port-- != execlists->pending)
>>>    			execlists_schedule_in(*port, port - execlists->pending);
>>>    		WRITE_ONCE(execlists->yield, -1);
>>> -		set_preempt_timeout(engine, *active);
>>> +		set_preempt_timeout(engine, *active, force_preempt);
>>>    		execlists_submit_ports(engine);
>>>    	} else {
>>>    		ring_set_paused(engine, 0);
>>> @@ -2594,7 +2600,7 @@ static void execlists_context_cancel_request(struct intel_context *ce,
>>>    	i915_request_active_engine(rq, &engine);
>>> -	if (engine && intel_engine_pulse(engine))
>>> +	if (engine && intel_engine_pulse_force_preempt(engine))
>>>    		intel_gt_handle_error(engine->gt, engine->mask, 0,
>>>    				      "request cancellation by %s",
>>>    				      current->comm);
>>> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
>>> index 28b1f9db54875..7e6312233d4c7 100644
>>> --- a/drivers/gpu/drm/i915/i915_request.h
>>> +++ b/drivers/gpu/drm/i915/i915_request.h
>>> @@ -170,6 +170,12 @@ enum {
>>>    	 * fence (dma_fence_array) and i915 generated for parallel submission.
>>>    	 */
>>>    	I915_FENCE_FLAG_COMPOSITE,
>>> +
>>> +	/*
>>> +	 * I915_FENCE_FLAG_FORCE_PREEMPT - Force preempt immediately regardless
>>> +	 * of preempt timeout configuration
>>> +	 */
>>> +	I915_FENCE_FLAG_FORCE_PREEMPT,
>>>    };
>>>    /**
>>>

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/4] drm/i915/guc: Cancel requests immediately
  2022-01-24 15:01 ` [PATCH 2/4] drm/i915/guc: Cancel requests immediately Matthew Brost
@ 2022-01-26 18:58   ` John Harrison
  2022-01-26 20:12     ` Matthew Brost
  0 siblings, 1 reply; 15+ messages in thread
From: John Harrison @ 2022-01-26 18:58 UTC (permalink / raw)
  To: Matthew Brost, intel-gfx, dri-devel; +Cc: daniele.ceraolospurio, tvrtko.ursulin

On 1/24/2022 07:01, Matthew Brost wrote:
> Change the preemption timeout to the smallest possible value (1 us) when
> disabling scheduling to cancel a request and restore it after
> cancellation. This not only cancels the request as fast as possible, it
> fixes a bug where the preemption timeout is 0 which results in the
> schedule disable hanging forever.
Shouldn't there be an 'if' in the above statement? The pre-emption 
timeout is not normally zero.

>
> Reported-by: Jani Saarinen <jani.saarinen@intel.com>
> Fixes: 62eaf0ae217d4 ("drm/i915/guc: Support request cancellation")
> Link: https://gitlab.freedesktop.org/drm/intel/-/issues/4960
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_context_types.h |  5 ++
>   .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 46 +++++++++++--------
>   2 files changed, 31 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
> index 30cd81ad8911a..730998823dbea 100644
> --- a/drivers/gpu/drm/i915/gt/intel_context_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
> @@ -198,6 +198,11 @@ struct intel_context {
>   		 * each priority bucket
>   		 */
>   		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
> +		/**
> +		 * @preemption_timeout: preemption timeout of the context, used
> +		 * to restore this value after request cancellation
> +		 */
> +		u32 preemption_timeout;
>   	} guc_state;
>   
>   	struct {
> diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> index 3918f1be114fa..966947c450253 100644
> --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> @@ -2147,7 +2147,8 @@ static inline u32 get_children_join_value(struct intel_context *ce,
>   	return __get_parent_scratch(ce)->join[child_index].semaphore;
>   }
>   
> -static void guc_context_policy_init(struct intel_engine_cs *engine,
> +static void guc_context_policy_init(struct intel_context *ce,
> +				    struct intel_engine_cs *engine,
>   				    struct guc_lrc_desc *desc)
Shouldn't engine be before ce? The more general structure usually goes 
first.

John.

>   {
>   	desc->policy_flags = 0;
> @@ -2157,7 +2158,8 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
>   
>   	/* NB: For both of these, zero means disabled. */
>   	desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
> -	desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
> +	ce->guc_state.preemption_timeout = engine->props.preempt_timeout_ms * 1000;
> +	desc->preemption_timeout = ce->guc_state.preemption_timeout;
>   }
>   
>   static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
> @@ -2193,7 +2195,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
>   	desc->hw_context_desc = ce->lrc.lrca;
>   	desc->priority = ce->guc_state.prio;
>   	desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
> -	guc_context_policy_init(engine, desc);
> +	guc_context_policy_init(ce, engine, desc);
>   
>   	/*
>   	 * If context is a parent, we need to register a process descriptor
> @@ -2226,7 +2228,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
>   			desc->hw_context_desc = child->lrc.lrca;
>   			desc->priority = ce->guc_state.prio;
>   			desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
> -			guc_context_policy_init(engine, desc);
> +			guc_context_policy_init(child, engine, desc);
>   		}
>   
>   		clear_children_join_go_memory(ce);
> @@ -2409,6 +2411,19 @@ static u16 prep_context_pending_disable(struct intel_context *ce)
>   	return ce->guc_id.id;
>   }
>   
> +static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
> +						 u16 guc_id,
> +						 u32 preemption_timeout)
> +{
> +	u32 action[] = {
> +		INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
> +		guc_id,
> +		preemption_timeout
> +	};
> +
> +	intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
> +}
> +
>   static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
>   {
>   	struct intel_guc *guc = ce_to_guc(ce);
> @@ -2442,8 +2457,10 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
>   
>   	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
>   
> -	with_intel_runtime_pm(runtime_pm, wakeref)
> +	with_intel_runtime_pm(runtime_pm, wakeref) {
> +		__guc_context_set_preemption_timeout(guc, guc_id, 1);
>   		__guc_context_sched_disable(guc, ce, guc_id);
> +	}
>   
>   	return &ce->guc_state.blocked;
>   }
> @@ -2492,8 +2509,10 @@ static void guc_context_unblock(struct intel_context *ce)
>   
>   	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
>   
> -	if (enable) {
> -		with_intel_runtime_pm(runtime_pm, wakeref)
> +	with_intel_runtime_pm(runtime_pm, wakeref) {
> +		__guc_context_set_preemption_timeout(guc, ce->guc_id.id,
> +						     ce->guc_state.preemption_timeout);
> +		if (enable)
>   			__guc_context_sched_enable(guc, ce);
>   	}
>   }
> @@ -2521,19 +2540,6 @@ static void guc_context_cancel_request(struct intel_context *ce,
>   	}
>   }
>   
> -static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
> -						 u16 guc_id,
> -						 u32 preemption_timeout)
> -{
> -	u32 action[] = {
> -		INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
> -		guc_id,
> -		preemption_timeout
> -	};
> -
> -	intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
> -}
> -
>   static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
>   {
>   	struct intel_guc *guc = ce_to_guc(ce);


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case
  2022-01-24 15:01 ` [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case Matthew Brost
  2022-01-25 15:27   ` [Intel-gfx] " Tvrtko Ursulin
@ 2022-01-26 19:03   ` John Harrison
  2022-01-26 20:10     ` Matthew Brost
  1 sibling, 1 reply; 15+ messages in thread
From: John Harrison @ 2022-01-26 19:03 UTC (permalink / raw)
  To: Matthew Brost, intel-gfx, dri-devel; +Cc: daniele.ceraolospurio, tvrtko.ursulin

On 1/24/2022 07:01, Matthew Brost wrote:
> More than 1 request can be submitted to a single ELSP at a time if
> multiple requests are ready run to on the same context. When a request
> is canceled it is marked bad, an idle pulse is triggered to the engine
> (high priority kernel request), the execlists scheduler sees that
> running request is bad and sets preemption timeout to minimum value (1
> ms). This fails to work if multiple requests are combined on the ELSP as
> only the most recent request is stored in the execlists schedule (the
> request stored in the ELSP isn't marked bad, thus preemption timeout
> isn't set to the minimum value). If the preempt timeout is configured to
> zero, the engine is permanently hung. This is shown by an upcoming
> selftest.
>
> To work around this, mark the idle pulse with a flag to force a preempt
> with the minimum value.
>
> Fixes: 38b237eab2bc7 ("drm/i915: Individual request cancellation")
> Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> ---
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 23 +++++++++++++++----
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  1 +
>   .../drm/i915/gt/intel_execlists_submission.c  | 18 ++++++++++-----
>   drivers/gpu/drm/i915/i915_request.h           |  6 +++++
>   4 files changed, 38 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> index a3698f611f457..efd1c719b4072 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> @@ -243,7 +243,8 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
>   	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
>   }
>   
> -static int __intel_engine_pulse(struct intel_engine_cs *engine)
> +static int __intel_engine_pulse(struct intel_engine_cs *engine,
> +				bool force_preempt)
>   {
>   	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
>   	struct intel_context *ce = engine->kernel_context;
> @@ -258,6 +259,8 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
>   		return PTR_ERR(rq);
>   
>   	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
> +	if (force_preempt)
> +		__set_bit(I915_FENCE_FLAG_FORCE_PREEMPT, &rq->fence.flags);
>   
>   	heartbeat_commit(rq, &attr);
>   	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
> @@ -299,7 +302,7 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
>   
>   		/* recheck current execution */
>   		if (intel_engine_has_preemption(engine)) {
> -			err = __intel_engine_pulse(engine);
> +			err = __intel_engine_pulse(engine, false);
>   			if (err)
>   				set_heartbeat(engine, saved);
>   		}
> @@ -312,7 +315,8 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
>   	return err;
>   }
>   
> -int intel_engine_pulse(struct intel_engine_cs *engine)
> +static int _intel_engine_pulse(struct intel_engine_cs *engine,
> +			       bool force_preempt)
>   {
>   	struct intel_context *ce = engine->kernel_context;
>   	int err;
> @@ -325,7 +329,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
>   
>   	err = -EINTR;
>   	if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
> -		err = __intel_engine_pulse(engine);
> +		err = __intel_engine_pulse(engine, force_preempt);
>   		mutex_unlock(&ce->timeline->mutex);
>   	}
>   
> @@ -334,6 +338,17 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
>   	return err;
>   }
>   
> +int intel_engine_pulse(struct intel_engine_cs *engine)
> +{
> +	return _intel_engine_pulse(engine, false);
> +}
> +
> +
> +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine)
> +{
> +	return _intel_engine_pulse(engine, true);
> +}
> +
>   int intel_engine_flush_barriers(struct intel_engine_cs *engine)
>   {
>   	struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> index 5da6d809a87a2..d9c8386754cb3 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> @@ -21,6 +21,7 @@ void intel_gt_park_heartbeats(struct intel_gt *gt);
>   void intel_gt_unpark_heartbeats(struct intel_gt *gt);
>   
>   int intel_engine_pulse(struct intel_engine_cs *engine);
> +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine);
>   int intel_engine_flush_barriers(struct intel_engine_cs *engine);
>   
>   #endif /* INTEL_ENGINE_HEARTBEAT_H */
> diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> index 960a9aaf4f3a3..f0c2024058731 100644
> --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> @@ -1222,26 +1222,29 @@ static void record_preemption(struct intel_engine_execlists *execlists)
>   }
>   
>   static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
> -					    const struct i915_request *rq)
> +					    const struct i915_request *rq,
> +					    bool force_preempt)
>   {
>   	if (!rq)
>   		return 0;
>   
>   	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
> -	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
> +	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq) ||
> +		     force_preempt))
>   		return 1;
>   
>   	return READ_ONCE(engine->props.preempt_timeout_ms);
>   }
>   
>   static void set_preempt_timeout(struct intel_engine_cs *engine,
> -				const struct i915_request *rq)
> +				const struct i915_request *rq,
> +				bool force_preempt)
>   {
>   	if (!intel_engine_has_preempt_reset(engine))
>   		return;
>   
>   	set_timer_ms(&engine->execlists.preempt,
> -		     active_preempt_timeout(engine, rq));
> +		     active_preempt_timeout(engine, rq, force_preempt));
>   }
>   
>   static bool completed(const struct i915_request *rq)
> @@ -1584,12 +1587,15 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   	    memcmp(active,
>   		   execlists->pending,
>   		   (port - execlists->pending) * sizeof(*port))) {
> +		bool force_preempt = test_bit(I915_FENCE_FLAG_FORCE_PREEMPT,
> +					      &last->fence.flags);
> +
>   		*port = NULL;
>   		while (port-- != execlists->pending)
>   			execlists_schedule_in(*port, port - execlists->pending);
>   
>   		WRITE_ONCE(execlists->yield, -1);
> -		set_preempt_timeout(engine, *active);
> +		set_preempt_timeout(engine, *active, force_preempt);
>   		execlists_submit_ports(engine);
>   	} else {
>   		ring_set_paused(engine, 0);
> @@ -2594,7 +2600,7 @@ static void execlists_context_cancel_request(struct intel_context *ce,
>   
>   	i915_request_active_engine(rq, &engine);
>   
> -	if (engine && intel_engine_pulse(engine))
> +	if (engine && intel_engine_pulse_force_preempt(engine))
>   		intel_gt_handle_error(engine->gt, engine->mask, 0,
>   				      "request cancellation by %s",
>   				      current->comm);
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 28b1f9db54875..7e6312233d4c7 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -170,6 +170,12 @@ enum {
>   	 * fence (dma_fence_array) and i915 generated for parallel submission.
>   	 */
>   	I915_FENCE_FLAG_COMPOSITE,
> +
> +	/*
> +	 * I915_FENCE_FLAG_FORCE_PREEMPT - Force preempt immediately regardless
> +	 * of preempt timeout configuration
> +	 */
> +	I915_FENCE_FLAG_FORCE_PREEMPT,
This would be execlist only? I'm a bit concerned about adding a global 
flag that cannot be implemented on current and future hardware.

John.

>   };
>   
>   /**


^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case
  2022-01-26 19:03   ` John Harrison
@ 2022-01-26 20:10     ` Matthew Brost
  0 siblings, 0 replies; 15+ messages in thread
From: Matthew Brost @ 2022-01-26 20:10 UTC (permalink / raw)
  To: John Harrison; +Cc: intel-gfx, daniele.ceraolospurio, dri-devel, tvrtko.ursulin

On Wed, Jan 26, 2022 at 11:03:24AM -0800, John Harrison wrote:
> On 1/24/2022 07:01, Matthew Brost wrote:
> > More than 1 request can be submitted to a single ELSP at a time if
> > multiple requests are ready run to on the same context. When a request
> > is canceled it is marked bad, an idle pulse is triggered to the engine
> > (high priority kernel request), the execlists scheduler sees that
> > running request is bad and sets preemption timeout to minimum value (1
> > ms). This fails to work if multiple requests are combined on the ELSP as
> > only the most recent request is stored in the execlists schedule (the
> > request stored in the ELSP isn't marked bad, thus preemption timeout
> > isn't set to the minimum value). If the preempt timeout is configured to
> > zero, the engine is permanently hung. This is shown by an upcoming
> > selftest.
> > 
> > To work around this, mark the idle pulse with a flag to force a preempt
> > with the minimum value.
> > 
> > Fixes: 38b237eab2bc7 ("drm/i915: Individual request cancellation")
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >   .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 23 +++++++++++++++----
> >   .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  1 +
> >   .../drm/i915/gt/intel_execlists_submission.c  | 18 ++++++++++-----
> >   drivers/gpu/drm/i915/i915_request.h           |  6 +++++
> >   4 files changed, 38 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> > index a3698f611f457..efd1c719b4072 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> > @@ -243,7 +243,8 @@ void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
> >   	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
> >   }
> > -static int __intel_engine_pulse(struct intel_engine_cs *engine)
> > +static int __intel_engine_pulse(struct intel_engine_cs *engine,
> > +				bool force_preempt)
> >   {
> >   	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
> >   	struct intel_context *ce = engine->kernel_context;
> > @@ -258,6 +259,8 @@ static int __intel_engine_pulse(struct intel_engine_cs *engine)
> >   		return PTR_ERR(rq);
> >   	__set_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags);
> > +	if (force_preempt)
> > +		__set_bit(I915_FENCE_FLAG_FORCE_PREEMPT, &rq->fence.flags);
> >   	heartbeat_commit(rq, &attr);
> >   	GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
> > @@ -299,7 +302,7 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
> >   		/* recheck current execution */
> >   		if (intel_engine_has_preemption(engine)) {
> > -			err = __intel_engine_pulse(engine);
> > +			err = __intel_engine_pulse(engine, false);
> >   			if (err)
> >   				set_heartbeat(engine, saved);
> >   		}
> > @@ -312,7 +315,8 @@ int intel_engine_set_heartbeat(struct intel_engine_cs *engine,
> >   	return err;
> >   }
> > -int intel_engine_pulse(struct intel_engine_cs *engine)
> > +static int _intel_engine_pulse(struct intel_engine_cs *engine,
> > +			       bool force_preempt)
> >   {
> >   	struct intel_context *ce = engine->kernel_context;
> >   	int err;
> > @@ -325,7 +329,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
> >   	err = -EINTR;
> >   	if (!mutex_lock_interruptible(&ce->timeline->mutex)) {
> > -		err = __intel_engine_pulse(engine);
> > +		err = __intel_engine_pulse(engine, force_preempt);
> >   		mutex_unlock(&ce->timeline->mutex);
> >   	}
> > @@ -334,6 +338,17 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
> >   	return err;
> >   }
> > +int intel_engine_pulse(struct intel_engine_cs *engine)
> > +{
> > +	return _intel_engine_pulse(engine, false);
> > +}
> > +
> > +
> > +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine)
> > +{
> > +	return _intel_engine_pulse(engine, true);
> > +}
> > +
> >   int intel_engine_flush_barriers(struct intel_engine_cs *engine)
> >   {
> >   	struct i915_sched_attr attr = { .priority = I915_PRIORITY_MIN };
> > diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> > index 5da6d809a87a2..d9c8386754cb3 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> > @@ -21,6 +21,7 @@ void intel_gt_park_heartbeats(struct intel_gt *gt);
> >   void intel_gt_unpark_heartbeats(struct intel_gt *gt);
> >   int intel_engine_pulse(struct intel_engine_cs *engine);
> > +int intel_engine_pulse_force_preempt(struct intel_engine_cs *engine);
> >   int intel_engine_flush_barriers(struct intel_engine_cs *engine);
> >   #endif /* INTEL_ENGINE_HEARTBEAT_H */
> > diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> > index 960a9aaf4f3a3..f0c2024058731 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
> > @@ -1222,26 +1222,29 @@ static void record_preemption(struct intel_engine_execlists *execlists)
> >   }
> >   static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
> > -					    const struct i915_request *rq)
> > +					    const struct i915_request *rq,
> > +					    bool force_preempt)
> >   {
> >   	if (!rq)
> >   		return 0;
> >   	/* Force a fast reset for terminated contexts (ignoring sysfs!) */
> > -	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq)))
> > +	if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq) ||
> > +		     force_preempt))
> >   		return 1;
> >   	return READ_ONCE(engine->props.preempt_timeout_ms);
> >   }
> >   static void set_preempt_timeout(struct intel_engine_cs *engine,
> > -				const struct i915_request *rq)
> > +				const struct i915_request *rq,
> > +				bool force_preempt)
> >   {
> >   	if (!intel_engine_has_preempt_reset(engine))
> >   		return;
> >   	set_timer_ms(&engine->execlists.preempt,
> > -		     active_preempt_timeout(engine, rq));
> > +		     active_preempt_timeout(engine, rq, force_preempt));
> >   }
> >   static bool completed(const struct i915_request *rq)
> > @@ -1584,12 +1587,15 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
> >   	    memcmp(active,
> >   		   execlists->pending,
> >   		   (port - execlists->pending) * sizeof(*port))) {
> > +		bool force_preempt = test_bit(I915_FENCE_FLAG_FORCE_PREEMPT,
> > +					      &last->fence.flags);
> > +
> >   		*port = NULL;
> >   		while (port-- != execlists->pending)
> >   			execlists_schedule_in(*port, port - execlists->pending);
> >   		WRITE_ONCE(execlists->yield, -1);
> > -		set_preempt_timeout(engine, *active);
> > +		set_preempt_timeout(engine, *active, force_preempt);
> >   		execlists_submit_ports(engine);
> >   	} else {
> >   		ring_set_paused(engine, 0);
> > @@ -2594,7 +2600,7 @@ static void execlists_context_cancel_request(struct intel_context *ce,
> >   	i915_request_active_engine(rq, &engine);
> > -	if (engine && intel_engine_pulse(engine))
> > +	if (engine && intel_engine_pulse_force_preempt(engine))
> >   		intel_gt_handle_error(engine->gt, engine->mask, 0,
> >   				      "request cancellation by %s",
> >   				      current->comm);
> > diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> > index 28b1f9db54875..7e6312233d4c7 100644
> > --- a/drivers/gpu/drm/i915/i915_request.h
> > +++ b/drivers/gpu/drm/i915/i915_request.h
> > @@ -170,6 +170,12 @@ enum {
> >   	 * fence (dma_fence_array) and i915 generated for parallel submission.
> >   	 */
> >   	I915_FENCE_FLAG_COMPOSITE,
> > +
> > +	/*
> > +	 * I915_FENCE_FLAG_FORCE_PREEMPT - Force preempt immediately regardless
> > +	 * of preempt timeout configuration
> > +	 */
> > +	I915_FENCE_FLAG_FORCE_PREEMPT,
> This would be execlist only? I'm a bit concerned about adding a global flag
> that cannot be implemented on current and future hardware.
> 

That ship has sailed... A lot of flags defined here are backend specific.

Matt

> John.
> 
> >   };
> >   /**
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

* Re: [PATCH 2/4] drm/i915/guc: Cancel requests immediately
  2022-01-26 18:58   ` John Harrison
@ 2022-01-26 20:12     ` Matthew Brost
  0 siblings, 0 replies; 15+ messages in thread
From: Matthew Brost @ 2022-01-26 20:12 UTC (permalink / raw)
  To: John Harrison; +Cc: intel-gfx, daniele.ceraolospurio, dri-devel, tvrtko.ursulin

On Wed, Jan 26, 2022 at 10:58:46AM -0800, John Harrison wrote:
> On 1/24/2022 07:01, Matthew Brost wrote:
> > Change the preemption timeout to the smallest possible value (1 us) when
> > disabling scheduling to cancel a request and restore it after
> > cancellation. This not only cancels the request as fast as possible, it
> > fixes a bug where the preemption timeout is 0 which results in the
> > schedule disable hanging forever.
> Shouldn't there be an 'if' in the above statement? The pre-emption timeout
> is not normally zero.
>

Yes. Will reword.
 
> > 
> > Reported-by: Jani Saarinen <jani.saarinen@intel.com>
> > Fixes: 62eaf0ae217d4 ("drm/i915/guc: Support request cancellation")
> > Link: https://gitlab.freedesktop.org/drm/intel/-/issues/4960
> > Signed-off-by: Matthew Brost <matthew.brost@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gt/intel_context_types.h |  5 ++
> >   .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 46 +++++++++++--------
> >   2 files changed, 31 insertions(+), 20 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
> > index 30cd81ad8911a..730998823dbea 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_context_types.h
> > +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
> > @@ -198,6 +198,11 @@ struct intel_context {
> >   		 * each priority bucket
> >   		 */
> >   		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
> > +		/**
> > +		 * @preemption_timeout: preemption timeout of the context, used
> > +		 * to restore this value after request cancellation
> > +		 */
> > +		u32 preemption_timeout;
> >   	} guc_state;
> >   	struct {
> > diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > index 3918f1be114fa..966947c450253 100644
> > --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
> > @@ -2147,7 +2147,8 @@ static inline u32 get_children_join_value(struct intel_context *ce,
> >   	return __get_parent_scratch(ce)->join[child_index].semaphore;
> >   }
> > -static void guc_context_policy_init(struct intel_engine_cs *engine,
> > +static void guc_context_policy_init(struct intel_context *ce,
> > +				    struct intel_engine_cs *engine,
> >   				    struct guc_lrc_desc *desc)
> Shouldn't engine be before ce? The more general structure usually goes
> first.
> 

Sure. Fix fix this in the next rev.

Matt

> John.
> 
> >   {
> >   	desc->policy_flags = 0;
> > @@ -2157,7 +2158,8 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
> >   	/* NB: For both of these, zero means disabled. */
> >   	desc->execution_quantum = engine->props.timeslice_duration_ms * 1000;
> > -	desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
> > +	ce->guc_state.preemption_timeout = engine->props.preempt_timeout_ms * 1000;
> > +	desc->preemption_timeout = ce->guc_state.preemption_timeout;
> >   }
> >   static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
> > @@ -2193,7 +2195,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
> >   	desc->hw_context_desc = ce->lrc.lrca;
> >   	desc->priority = ce->guc_state.prio;
> >   	desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
> > -	guc_context_policy_init(engine, desc);
> > +	guc_context_policy_init(ce, engine, desc);
> >   	/*
> >   	 * If context is a parent, we need to register a process descriptor
> > @@ -2226,7 +2228,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
> >   			desc->hw_context_desc = child->lrc.lrca;
> >   			desc->priority = ce->guc_state.prio;
> >   			desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
> > -			guc_context_policy_init(engine, desc);
> > +			guc_context_policy_init(child, engine, desc);
> >   		}
> >   		clear_children_join_go_memory(ce);
> > @@ -2409,6 +2411,19 @@ static u16 prep_context_pending_disable(struct intel_context *ce)
> >   	return ce->guc_id.id;
> >   }
> > +static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
> > +						 u16 guc_id,
> > +						 u32 preemption_timeout)
> > +{
> > +	u32 action[] = {
> > +		INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
> > +		guc_id,
> > +		preemption_timeout
> > +	};
> > +
> > +	intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
> > +}
> > +
> >   static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
> >   {
> >   	struct intel_guc *guc = ce_to_guc(ce);
> > @@ -2442,8 +2457,10 @@ static struct i915_sw_fence *guc_context_block(struct intel_context *ce)
> >   	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> > -	with_intel_runtime_pm(runtime_pm, wakeref)
> > +	with_intel_runtime_pm(runtime_pm, wakeref) {
> > +		__guc_context_set_preemption_timeout(guc, guc_id, 1);
> >   		__guc_context_sched_disable(guc, ce, guc_id);
> > +	}
> >   	return &ce->guc_state.blocked;
> >   }
> > @@ -2492,8 +2509,10 @@ static void guc_context_unblock(struct intel_context *ce)
> >   	spin_unlock_irqrestore(&ce->guc_state.lock, flags);
> > -	if (enable) {
> > -		with_intel_runtime_pm(runtime_pm, wakeref)
> > +	with_intel_runtime_pm(runtime_pm, wakeref) {
> > +		__guc_context_set_preemption_timeout(guc, ce->guc_id.id,
> > +						     ce->guc_state.preemption_timeout);
> > +		if (enable)
> >   			__guc_context_sched_enable(guc, ce);
> >   	}
> >   }
> > @@ -2521,19 +2540,6 @@ static void guc_context_cancel_request(struct intel_context *ce,
> >   	}
> >   }
> > -static void __guc_context_set_preemption_timeout(struct intel_guc *guc,
> > -						 u16 guc_id,
> > -						 u32 preemption_timeout)
> > -{
> > -	u32 action[] = {
> > -		INTEL_GUC_ACTION_SET_CONTEXT_PREEMPTION_TIMEOUT,
> > -		guc_id,
> > -		preemption_timeout
> > -	};
> > -
> > -	intel_guc_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
> > -}
> > -
> >   static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
> >   {
> >   	struct intel_guc *guc = ce_to_guc(ce);
> 

^ permalink raw reply	[flat|nested] 15+ messages in thread

end of thread, other threads:[~2022-01-26 20:18 UTC | newest]

Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-01-24 15:01 [PATCH 0/4] Fix up request cancel Matthew Brost
2022-01-24 15:01 ` [PATCH 1/4] drm/i915: Add request cancel low level trace point Matthew Brost
2022-01-25 12:27   ` [Intel-gfx] " Tvrtko Ursulin
2022-01-25 16:39     ` Matthew Brost
2022-01-26 10:29       ` Tvrtko Ursulin
2022-01-24 15:01 ` [PATCH 2/4] drm/i915/guc: Cancel requests immediately Matthew Brost
2022-01-26 18:58   ` John Harrison
2022-01-26 20:12     ` Matthew Brost
2022-01-24 15:01 ` [PATCH 3/4] drm/i915/execlists: Fix execlists request cancellation corner case Matthew Brost
2022-01-25 15:27   ` [Intel-gfx] " Tvrtko Ursulin
2022-01-25 16:32     ` Matthew Brost
2022-01-26 10:38       ` Tvrtko Ursulin
2022-01-26 19:03   ` John Harrison
2022-01-26 20:10     ` Matthew Brost
2022-01-24 15:01 ` [PATCH 4/4] drm/i915/selftests: Set preemption timeout to zero in cancel reset test Matthew Brost

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).