All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-gfx@lists.freedesktop.org>, <dri-devel@lists.freedesktop.org>
Cc: <daniel.vetter@ffwll.ch>
Subject: [PATCH 23/27] drm/i915/guc: Move GuC priority fields in context under guc_active
Date: Wed, 18 Aug 2021 23:16:35 -0700	[thread overview]
Message-ID: <20210819061639.21051-24-matthew.brost@intel.com> (raw)
In-Reply-To: <20210819061639.21051-1-matthew.brost@intel.com>

Move GuC management fields in context under guc_active struct as this is
where the lock that protects theses fields lives. Also only set guc_prio
field once during context init.

Fixes: ee242ca704d3 ("drm/i915/guc: Implement GuC priority management")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org>
---
 drivers/gpu/drm/i915/gt/intel_context_types.h | 12 ++--
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 68 +++++++++++--------
 drivers/gpu/drm/i915/i915_trace.h             |  2 +-
 3 files changed, 45 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 524a35a78bf4..9fb0480ccf3b 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -112,6 +112,7 @@ struct intel_context {
 #define CONTEXT_FORCE_SINGLE_SUBMISSION	7
 #define CONTEXT_NOPREEMPT		8
 #define CONTEXT_LRCA_DIRTY		9
+#define CONTEXT_GUC_INIT		10
 
 	struct {
 		u64 timeout_us;
@@ -178,6 +179,11 @@ struct intel_context {
 		spinlock_t lock;
 		/** requests: active requests on this context */
 		struct list_head requests;
+		/*
+		 * GuC priority management
+		 */
+		u8 prio;
+		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
 	} guc_active;
 
 	/* GuC LRC descriptor ID */
@@ -191,12 +197,6 @@ struct intel_context {
 	 */
 	struct list_head guc_id_link;
 
-	/*
-	 * GuC priority management
-	 */
-	u8 guc_prio;
-	u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
-
 #ifdef CONFIG_DRM_I915_SELFTEST
 	/**
 	 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 3e90985b0c1b..bb90bedb1305 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1369,8 +1369,6 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
 	desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
 }
 
-static inline u8 map_i915_prio_to_guc_prio(int prio);
-
 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 {
 	struct intel_engine_cs *engine = ce->engine;
@@ -1378,8 +1376,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 	struct intel_guc *guc = &engine->gt->uc.guc;
 	u32 desc_idx = ce->guc_id;
 	struct guc_lrc_desc *desc;
-	const struct i915_gem_context *ctx;
-	int prio = I915_CONTEXT_DEFAULT_PRIORITY;
 	bool context_registered;
 	intel_wakeref_t wakeref;
 	int ret = 0;
@@ -1396,12 +1392,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 
 	context_registered = lrc_desc_registered(guc, desc_idx);
 
-	rcu_read_lock();
-	ctx = rcu_dereference(ce->gem_context);
-	if (ctx)
-		prio = ctx->sched.priority;
-	rcu_read_unlock();
-
 	reset_lrc_desc(guc, desc_idx);
 	set_lrc_desc_registered(guc, desc_idx, ce);
 
@@ -1410,8 +1400,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 	desc->engine_submit_mask = adjust_engine_mask(engine->class,
 						      engine->mask);
 	desc->hw_context_desc = ce->lrc.lrca;
-	ce->guc_prio = map_i915_prio_to_guc_prio(prio);
-	desc->priority = ce->guc_prio;
+	desc->priority = ce->guc_active.prio;
 	desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
 	guc_context_policy_init(engine, desc);
 
@@ -1813,10 +1802,10 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
 
 static void __guc_context_destroy(struct intel_context *ce)
 {
-	GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
-		   ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
-		   ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
-		   ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+	GEM_BUG_ON(ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
+		   ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
+		   ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
+		   ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
 	GEM_BUG_ON(ce->guc_state.number_committed_requests);
 
 	lrc_fini(ce);
@@ -1926,14 +1915,17 @@ static void guc_context_set_prio(struct intel_guc *guc,
 
 	GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
 		   prio > GUC_CLIENT_PRIORITY_NORMAL);
+	lockdep_assert_held(&ce->guc_active.lock);
 
-	if (ce->guc_prio == prio || submission_disabled(guc) ||
-	    !context_registered(ce))
+	if (ce->guc_active.prio == prio || submission_disabled(guc) ||
+	    !context_registered(ce)) {
+		ce->guc_active.prio = prio;
 		return;
+	}
 
 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
 
-	ce->guc_prio = prio;
+	ce->guc_active.prio = prio;
 	trace_intel_context_set_prio(ce);
 }
 
@@ -1953,24 +1945,24 @@ static inline void add_context_inflight_prio(struct intel_context *ce,
 					     u8 guc_prio)
 {
 	lockdep_assert_held(&ce->guc_active.lock);
-	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
 
-	++ce->guc_prio_count[guc_prio];
+	++ce->guc_active.prio_count[guc_prio];
 
 	/* Overflow protection */
-	GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+	GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
 }
 
 static inline void sub_context_inflight_prio(struct intel_context *ce,
 					     u8 guc_prio)
 {
 	lockdep_assert_held(&ce->guc_active.lock);
-	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
 
 	/* Underflow protection */
-	GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+	GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
 
-	--ce->guc_prio_count[guc_prio];
+	--ce->guc_active.prio_count[guc_prio];
 }
 
 static inline void update_context_prio(struct intel_context *ce)
@@ -1983,8 +1975,8 @@ static inline void update_context_prio(struct intel_context *ce)
 
 	lockdep_assert_held(&ce->guc_active.lock);
 
-	for (i = 0; i < ARRAY_SIZE(ce->guc_prio_count); ++i) {
-		if (ce->guc_prio_count[i]) {
+	for (i = 0; i < ARRAY_SIZE(ce->guc_active.prio_count); ++i) {
+		if (ce->guc_active.prio_count[i]) {
 			guc_context_set_prio(guc, ce, i);
 			break;
 		}
@@ -2123,6 +2115,20 @@ static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
 		!submission_disabled(ce_to_guc(ce));
 }
 
+static void guc_context_init(struct intel_context *ce)
+{
+	const struct i915_gem_context *ctx;
+	int prio = I915_CONTEXT_DEFAULT_PRIORITY;
+
+	rcu_read_lock();
+	ctx = rcu_dereference(ce->gem_context);
+	if (ctx)
+		prio = ctx->sched.priority;
+	rcu_read_unlock();
+
+	ce->guc_active.prio = map_i915_prio_to_guc_prio(prio);
+}
+
 static int guc_request_alloc(struct i915_request *rq)
 {
 	struct intel_context *ce = rq->context;
@@ -2154,6 +2160,9 @@ static int guc_request_alloc(struct i915_request *rq)
 
 	rq->reserved_space -= GUC_REQUEST_SIZE;
 
+	if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
+		guc_context_init(ce);
+
 	/*
 	 * Call pin_guc_id here rather than in the pinning step as with
 	 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
@@ -3031,13 +3040,12 @@ static inline void guc_log_context_priority(struct drm_printer *p,
 {
 	int i;
 
-	drm_printf(p, "\t\tPriority: %d\n",
-		   ce->guc_prio);
+	drm_printf(p, "\t\tPriority: %d\n", ce->guc_active.prio);
 	drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
 	for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
 	     i < GUC_CLIENT_PRIORITY_NUM; ++i) {
 		drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
-			   i, ce->guc_prio_count[i]);
+			   i, ce->guc_active.prio_count[i]);
 	}
 	drm_printf(p, "\n");
 }
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 0a77eb2944b5..6f882e72ed11 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -910,7 +910,7 @@ DECLARE_EVENT_CLASS(intel_context,
 			   __entry->guc_id = ce->guc_id;
 			   __entry->pin_count = atomic_read(&ce->pin_count);
 			   __entry->sched_state = ce->guc_state.sched_state;
-			   __entry->guc_prio = ce->guc_prio;
+			   __entry->guc_prio = ce->guc_active.prio;
 			   ),
 
 		    TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
-- 
2.32.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-gfx@lists.freedesktop.org>, <dri-devel@lists.freedesktop.org>
Cc: <daniel.vetter@ffwll.ch>
Subject: [Intel-gfx] [PATCH 23/27] drm/i915/guc: Move GuC priority fields in context under guc_active
Date: Wed, 18 Aug 2021 23:16:35 -0700	[thread overview]
Message-ID: <20210819061639.21051-24-matthew.brost@intel.com> (raw)
In-Reply-To: <20210819061639.21051-1-matthew.brost@intel.com>

Move GuC management fields in context under guc_active struct as this is
where the lock that protects theses fields lives. Also only set guc_prio
field once during context init.

Fixes: ee242ca704d3 ("drm/i915/guc: Implement GuC priority management")
Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Cc: <stable@vger.kernel.org>
---
 drivers/gpu/drm/i915/gt/intel_context_types.h | 12 ++--
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 68 +++++++++++--------
 drivers/gpu/drm/i915/i915_trace.h             |  2 +-
 3 files changed, 45 insertions(+), 37 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index 524a35a78bf4..9fb0480ccf3b 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -112,6 +112,7 @@ struct intel_context {
 #define CONTEXT_FORCE_SINGLE_SUBMISSION	7
 #define CONTEXT_NOPREEMPT		8
 #define CONTEXT_LRCA_DIRTY		9
+#define CONTEXT_GUC_INIT		10
 
 	struct {
 		u64 timeout_us;
@@ -178,6 +179,11 @@ struct intel_context {
 		spinlock_t lock;
 		/** requests: active requests on this context */
 		struct list_head requests;
+		/*
+		 * GuC priority management
+		 */
+		u8 prio;
+		u32 prio_count[GUC_CLIENT_PRIORITY_NUM];
 	} guc_active;
 
 	/* GuC LRC descriptor ID */
@@ -191,12 +197,6 @@ struct intel_context {
 	 */
 	struct list_head guc_id_link;
 
-	/*
-	 * GuC priority management
-	 */
-	u8 guc_prio;
-	u32 guc_prio_count[GUC_CLIENT_PRIORITY_NUM];
-
 #ifdef CONFIG_DRM_I915_SELFTEST
 	/**
 	 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 3e90985b0c1b..bb90bedb1305 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -1369,8 +1369,6 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
 	desc->preemption_timeout = engine->props.preempt_timeout_ms * 1000;
 }
 
-static inline u8 map_i915_prio_to_guc_prio(int prio);
-
 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 {
 	struct intel_engine_cs *engine = ce->engine;
@@ -1378,8 +1376,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 	struct intel_guc *guc = &engine->gt->uc.guc;
 	u32 desc_idx = ce->guc_id;
 	struct guc_lrc_desc *desc;
-	const struct i915_gem_context *ctx;
-	int prio = I915_CONTEXT_DEFAULT_PRIORITY;
 	bool context_registered;
 	intel_wakeref_t wakeref;
 	int ret = 0;
@@ -1396,12 +1392,6 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 
 	context_registered = lrc_desc_registered(guc, desc_idx);
 
-	rcu_read_lock();
-	ctx = rcu_dereference(ce->gem_context);
-	if (ctx)
-		prio = ctx->sched.priority;
-	rcu_read_unlock();
-
 	reset_lrc_desc(guc, desc_idx);
 	set_lrc_desc_registered(guc, desc_idx, ce);
 
@@ -1410,8 +1400,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 	desc->engine_submit_mask = adjust_engine_mask(engine->class,
 						      engine->mask);
 	desc->hw_context_desc = ce->lrc.lrca;
-	ce->guc_prio = map_i915_prio_to_guc_prio(prio);
-	desc->priority = ce->guc_prio;
+	desc->priority = ce->guc_active.prio;
 	desc->context_flags = CONTEXT_REGISTRATION_FLAG_KMD;
 	guc_context_policy_init(engine, desc);
 
@@ -1813,10 +1802,10 @@ static inline void guc_lrc_desc_unpin(struct intel_context *ce)
 
 static void __guc_context_destroy(struct intel_context *ce)
 {
-	GEM_BUG_ON(ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
-		   ce->guc_prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
-		   ce->guc_prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
-		   ce->guc_prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
+	GEM_BUG_ON(ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_HIGH] ||
+		   ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_HIGH] ||
+		   ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_KMD_NORMAL] ||
+		   ce->guc_active.prio_count[GUC_CLIENT_PRIORITY_NORMAL]);
 	GEM_BUG_ON(ce->guc_state.number_committed_requests);
 
 	lrc_fini(ce);
@@ -1926,14 +1915,17 @@ static void guc_context_set_prio(struct intel_guc *guc,
 
 	GEM_BUG_ON(prio < GUC_CLIENT_PRIORITY_KMD_HIGH ||
 		   prio > GUC_CLIENT_PRIORITY_NORMAL);
+	lockdep_assert_held(&ce->guc_active.lock);
 
-	if (ce->guc_prio == prio || submission_disabled(guc) ||
-	    !context_registered(ce))
+	if (ce->guc_active.prio == prio || submission_disabled(guc) ||
+	    !context_registered(ce)) {
+		ce->guc_active.prio = prio;
 		return;
+	}
 
 	guc_submission_send_busy_loop(guc, action, ARRAY_SIZE(action), 0, true);
 
-	ce->guc_prio = prio;
+	ce->guc_active.prio = prio;
 	trace_intel_context_set_prio(ce);
 }
 
@@ -1953,24 +1945,24 @@ static inline void add_context_inflight_prio(struct intel_context *ce,
 					     u8 guc_prio)
 {
 	lockdep_assert_held(&ce->guc_active.lock);
-	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
 
-	++ce->guc_prio_count[guc_prio];
+	++ce->guc_active.prio_count[guc_prio];
 
 	/* Overflow protection */
-	GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+	GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
 }
 
 static inline void sub_context_inflight_prio(struct intel_context *ce,
 					     u8 guc_prio)
 {
 	lockdep_assert_held(&ce->guc_active.lock);
-	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_prio_count));
+	GEM_BUG_ON(guc_prio >= ARRAY_SIZE(ce->guc_active.prio_count));
 
 	/* Underflow protection */
-	GEM_WARN_ON(!ce->guc_prio_count[guc_prio]);
+	GEM_WARN_ON(!ce->guc_active.prio_count[guc_prio]);
 
-	--ce->guc_prio_count[guc_prio];
+	--ce->guc_active.prio_count[guc_prio];
 }
 
 static inline void update_context_prio(struct intel_context *ce)
@@ -1983,8 +1975,8 @@ static inline void update_context_prio(struct intel_context *ce)
 
 	lockdep_assert_held(&ce->guc_active.lock);
 
-	for (i = 0; i < ARRAY_SIZE(ce->guc_prio_count); ++i) {
-		if (ce->guc_prio_count[i]) {
+	for (i = 0; i < ARRAY_SIZE(ce->guc_active.prio_count); ++i) {
+		if (ce->guc_active.prio_count[i]) {
 			guc_context_set_prio(guc, ce, i);
 			break;
 		}
@@ -2123,6 +2115,20 @@ static bool context_needs_register(struct intel_context *ce, bool new_guc_id)
 		!submission_disabled(ce_to_guc(ce));
 }
 
+static void guc_context_init(struct intel_context *ce)
+{
+	const struct i915_gem_context *ctx;
+	int prio = I915_CONTEXT_DEFAULT_PRIORITY;
+
+	rcu_read_lock();
+	ctx = rcu_dereference(ce->gem_context);
+	if (ctx)
+		prio = ctx->sched.priority;
+	rcu_read_unlock();
+
+	ce->guc_active.prio = map_i915_prio_to_guc_prio(prio);
+}
+
 static int guc_request_alloc(struct i915_request *rq)
 {
 	struct intel_context *ce = rq->context;
@@ -2154,6 +2160,9 @@ static int guc_request_alloc(struct i915_request *rq)
 
 	rq->reserved_space -= GUC_REQUEST_SIZE;
 
+	if (unlikely(!test_bit(CONTEXT_GUC_INIT, &ce->flags)))
+		guc_context_init(ce);
+
 	/*
 	 * Call pin_guc_id here rather than in the pinning step as with
 	 * dma_resv, contexts can be repeatedly pinned / unpinned trashing the
@@ -3031,13 +3040,12 @@ static inline void guc_log_context_priority(struct drm_printer *p,
 {
 	int i;
 
-	drm_printf(p, "\t\tPriority: %d\n",
-		   ce->guc_prio);
+	drm_printf(p, "\t\tPriority: %d\n", ce->guc_active.prio);
 	drm_printf(p, "\t\tNumber Requests (lower index == higher priority)\n");
 	for (i = GUC_CLIENT_PRIORITY_KMD_HIGH;
 	     i < GUC_CLIENT_PRIORITY_NUM; ++i) {
 		drm_printf(p, "\t\tNumber requests in priority band[%d]: %d\n",
-			   i, ce->guc_prio_count[i]);
+			   i, ce->guc_active.prio_count[i]);
 	}
 	drm_printf(p, "\n");
 }
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 0a77eb2944b5..6f882e72ed11 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -910,7 +910,7 @@ DECLARE_EVENT_CLASS(intel_context,
 			   __entry->guc_id = ce->guc_id;
 			   __entry->pin_count = atomic_read(&ce->pin_count);
 			   __entry->sched_state = ce->guc_state.sched_state;
-			   __entry->guc_prio = ce->guc_prio;
+			   __entry->guc_prio = ce->guc_active.prio;
 			   ),
 
 		    TP_printk("guc_id=%d, pin_count=%d sched_state=0x%x, guc_prio=%u",
-- 
2.32.0


  parent reply	other threads:[~2021-08-19  6:23 UTC|newest]

Thread overview: 120+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-19  6:16 [PATCH 00/27] Clean up GuC CI failures, simplify locking, and kernel DOC Matthew Brost
2021-08-19  6:16 ` [Intel-gfx] " Matthew Brost
2021-08-19  6:16 ` [PATCH 01/27] drm/i915/guc: Fix blocked context accounting Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-24 23:24   ` Daniele Ceraolo Spurio
2021-08-24 23:24     ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 02/27] drm/i915/guc: Fix outstanding G2H accounting Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-19 21:31   ` Daniele Ceraolo Spurio
2021-08-19 21:30     ` Matthew Brost
2021-08-19  6:16 ` [PATCH 03/27] drm/i915/guc: Unwind context requests in reverse order Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-19 23:54   ` Daniele Ceraolo Spurio
2021-08-19 23:53     ` Matthew Brost
2021-08-20  0:03       ` Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 04/27] drm/i915/guc: Don't drop ce->guc_active.lock when unwinding context Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-20  0:01   ` Daniele Ceraolo Spurio
2021-08-19 23:58     ` Matthew Brost
2021-08-19  6:16 ` [PATCH 05/27] drm/i915/guc: Process all G2H message at once in work queue Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-20  0:06   ` Daniele Ceraolo Spurio
2021-08-20  0:06     ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 06/27] drm/i915/guc: Workaround reset G2H is received after schedule done G2H Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-24 23:31   ` Daniele Ceraolo Spurio
2021-08-25  4:05     ` Matthew Brost
2021-08-19  6:16 ` [PATCH 07/27] Revert "drm/i915/gt: Propagate change in error status to children on unhold" Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-20 19:47   ` Jason Ekstrand
2021-08-20 19:47     ` [Intel-gfx] " Jason Ekstrand
2021-08-19  6:16 ` [PATCH 08/27] drm/i915/selftests: Add a cancel request selftest that triggers a reset Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-19  6:16 ` [PATCH 09/27] drm/i915/guc: Kick tasklet after queuing a request Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-20 18:31   ` Daniele Ceraolo Spurio
2021-08-20 18:36     ` Matthew Brost
2021-08-19  6:16 ` [PATCH 10/27] drm/i915/guc: Don't enable scheduling on a banned context, guc_id invalid, not registered Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-20 18:42   ` Daniele Ceraolo Spurio
2021-08-20 18:42     ` Matthew Brost
2021-08-19  6:16 ` [PATCH 11/27] drm/i915/selftests: Fix memory corruption in live_lrc_isolation Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-25  0:07   ` Daniele Ceraolo Spurio
2021-08-25 20:03     ` Matthew Brost
2021-08-19  6:16 ` [PATCH 12/27] drm/i915/selftests: Add initial GuC selftest for scrubbing lost G2H Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-25  0:58   ` Daniele Ceraolo Spurio
2021-08-25  0:58     ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 13/27] drm/i915/guc: Take context ref when cancelling request Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-21  0:07   ` Daniele Ceraolo Spurio
2021-08-21  0:07     ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-24 15:42     ` Matthew Brost
2021-08-24 15:42       ` [Intel-gfx] " Matthew Brost
2021-08-25  1:21       ` Daniele Ceraolo Spurio
2021-08-25  1:21         ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 14/27] drm/i915/guc: Don't touch guc_state.sched_state without a lock Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-25  1:20   ` Daniele Ceraolo Spurio
2021-08-25  1:44     ` Matthew Brost
2021-08-25  1:51       ` Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 15/27] drm/i915/guc: Reset LRC descriptor if register returns -ENODEV Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-21  0:14   ` Daniele Ceraolo Spurio
2021-08-21  0:14     ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 16/27] drm/i915: Allocate error capture in nowait context Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-19  6:16 ` [PATCH 17/27] drm/i915/guc: Flush G2H work queue during reset Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-21  0:25   ` Daniele Ceraolo Spurio
2021-08-21  0:25     ` Daniele Ceraolo Spurio
2021-08-24 15:44     ` Matthew Brost
2021-08-24 15:44       ` [Intel-gfx] " Matthew Brost
2021-08-25  1:22       ` Daniele Ceraolo Spurio
2021-08-25  1:22         ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 18/27] drm/i915/guc: Release submit fence from an irq_work Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-25  1:44   ` Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 19/27] drm/i915/guc: Move guc_blocked fence to struct guc_state Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-21  0:30   ` Daniele Ceraolo Spurio
2021-08-21  0:30     ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 20/27] drm/i915/guc: Rework and simplify locking Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-25 16:52   ` Daniele Ceraolo Spurio
2021-08-25 19:22     ` Matthew Brost
2021-08-19  6:16 ` [PATCH 21/27] drm/i915/guc: Proper xarray usage for contexts_lookup Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-26  0:44   ` Daniele Ceraolo Spurio
2021-08-26  0:41     ` Matthew Brost
2021-08-26  0:48       ` Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 22/27] drm/i915/guc: Drop pin count check trick between sched_disable and re-pin Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-26  0:50   ` Daniele Ceraolo Spurio
2021-08-26  0:50     ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` Matthew Brost [this message]
2021-08-19  6:16   ` [Intel-gfx] [PATCH 23/27] drm/i915/guc: Move GuC priority fields in context under guc_active Matthew Brost
2021-08-25 21:51   ` Daniele Ceraolo Spurio
2021-08-25 22:53     ` Matthew Brost
2021-08-25 23:04     ` Matthew Brost
2021-08-19  6:16 ` [PATCH 24/27] drm/i915/guc: Move fields protected by guc->contexts_lock into sub structure Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-25  2:00   ` Daniele Ceraolo Spurio
2021-08-25  2:00     ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 25/27] drm/i915/guc: Drop guc_active move everything into guc_state Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-26  0:54   ` Daniele Ceraolo Spurio
2021-08-26  0:54     ` [Intel-gfx] " Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 26/27] drm/i915/guc: Add GuC kernel doc Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-26  1:03   ` Daniele Ceraolo Spurio
2021-08-19  6:16 ` [PATCH 27/27] drm/i915/guc: Drop static inline functions intel_guc_submission.c Matthew Brost
2021-08-19  6:16   ` [Intel-gfx] " Matthew Brost
2021-08-19  7:18 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Clean up GuC CI failures, simplify locking, and kernel DOC (rev3) Patchwork
2021-08-19  7:20 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-08-19  7:51 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2021-08-19  9:08 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2021-08-26  3:23 [PATCH 00/27] Clean up GuC CI failures, simplify locking, and kernel DOC Matthew Brost
2021-08-26  3:23 ` [PATCH 23/27] drm/i915/guc: Move GuC priority fields in context under guc_active Matthew Brost
2021-08-26 23:26   ` Daniele Ceraolo Spurio

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210819061639.21051-24-matthew.brost@intel.com \
    --to=matthew.brost@intel.com \
    --cc=daniel.vetter@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.