All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-gfx@lists.freedesktop.org>, <dri-devel@lists.freedesktop.org>
Cc: matthew.brost@intel.com, tvrtko.ursulin@intel.com,
	daniele.ceraolospurio@intel.com, jason.ekstrand@intel.com,
	jon.bloomfield@intel.com, daniel.vetter@intel.com,
	john.c.harrison@intel.com
Subject: [RFC PATCH 90/97] drm/i915/guc: Non-static lrc descriptor registration buffer
Date: Thu,  6 May 2021 12:14:44 -0700	[thread overview]
Message-ID: <20210506191451.77768-91-matthew.brost@intel.com> (raw)
In-Reply-To: <20210506191451.77768-1-matthew.brost@intel.com>

Dynamically allocate space for lrc descriptor registration with the GuC
rather than using a large static buffer indexed by the guc_id. If no
space is available to register a context, fall back to tasklet flow
control mechanism. Only allow 1/2 of the space to be allocated outside
the tasklet to prevent unready requests/contexts from consuming all
registration space.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |   3 +
 drivers/gpu/drm/i915/gt/uc/intel_guc.h        |   9 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 198 +++++++++++++-----
 3 files changed, 150 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index cd2ea5b98fc3..0d7173d3eabd 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -182,6 +182,9 @@ struct intel_context {
 	/* GuC scheduling state that does not require a lock. */
 	atomic_t guc_sched_state_no_lock;
 
+	/* GuC lrc descriptor registration buffer */
+	unsigned int guc_lrcd_reg_idx;
+
 	/* GuC lrc descriptor ID */
 	u16 guc_id;
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 96849a256be8..97bb262f8a13 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -68,8 +68,13 @@ struct intel_guc {
 	u32 ads_regset_size;
 	u32 ads_golden_ctxt_size;
 
-	struct i915_vma *lrc_desc_pool;
-	void *lrc_desc_pool_vaddr;
+	/* GuC LRC descriptor registration */
+	struct {
+		struct i915_vma *vma;
+		void *vaddr;
+		struct ida ida;
+		unsigned int max_idx;
+	} lrcd_reg;
 
 	/* guc_id to intel_context lookup */
 	struct xarray context_lookup;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 608b30907f4c..79caf9596084 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -437,65 +437,54 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 	return rb_entry(rb, struct i915_priolist, node);
 }
 
-static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
+static u32 __get_lrc_desc_offset(struct intel_guc *guc, int index)
 {
-	struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
-
+	GEM_BUG_ON(index >= guc->lrcd_reg.max_idx);
 	GEM_BUG_ON(index >= guc->max_guc_ids);
 
-	return &base[index];
+	return intel_guc_ggtt_offset(guc, guc->lrcd_reg.vma) +
+		(index * sizeof(struct guc_lrc_desc));
 }
 
-static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
+static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, int index)
 {
-	struct intel_context *ce = xa_load(&guc->context_lookup, id);
+	struct guc_lrc_desc *desc;
 
-	GEM_BUG_ON(id >= guc->max_guc_ids);
+	GEM_BUG_ON(index >= guc->lrcd_reg.max_idx);
+	GEM_BUG_ON(index >= guc->max_guc_ids);
 
-	return ce;
+	desc = guc->lrcd_reg.vaddr;
+	desc = &desc[index];
+	memset(desc, 0, sizeof(*desc));
+
+	return desc;
 }
 
-static int guc_lrc_desc_pool_create(struct intel_guc *guc)
+static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
 {
-	u32 size;
-	int ret;
-
-	size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) * guc->max_guc_ids);
-	ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
-					     (void **)&guc->lrc_desc_pool_vaddr);
-	if (ret)
-		return ret;
+	struct intel_context *ce = xa_load(&guc->context_lookup, id);
 
-	return 0;
-}
+	GEM_BUG_ON(id >= guc->max_guc_ids);
 
-static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
-{
-	guc->lrc_desc_pool_vaddr = NULL;
-	i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
+	return ce;
 }
 
 static inline bool guc_submission_initialized(struct intel_guc *guc)
 {
-	return guc->lrc_desc_pool_vaddr != NULL;
+	return guc->lrcd_reg.max_idx != 0;
 }
 
-static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
+static inline void clr_lrc_desc_registered(struct intel_guc *guc, u32 id)
 {
-	if (likely(guc_submission_initialized(guc))) {
-		struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
-		unsigned long flags;
-
-		memset(desc, 0, sizeof(*desc));
+	unsigned long flags;
 
-		/*
-		 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
-		 * the lower level functions directly.
-		 */
-		xa_lock_irqsave(&guc->context_lookup, flags);
-		__xa_erase(&guc->context_lookup, id);
-		xa_unlock_irqrestore(&guc->context_lookup, flags);
-	}
+	/*
+	 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
+	 * the lower level functions directly.
+	 */
+	xa_lock_irqsave(&guc->context_lookup, flags);
+	__xa_erase(&guc->context_lookup, id);
+	xa_unlock_irqrestore(&guc->context_lookup, flags);
 }
 
 static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
@@ -1373,6 +1362,9 @@ static void retire_worker_func(struct work_struct *w)
 	}
 }
 
+static int guc_lrcd_reg_init(struct intel_guc *guc);
+static void guc_lrcd_reg_fini(struct intel_guc *guc);
+
 /*
  * Set up the memory resources to be shared with the GuC (via the GGTT)
  * at firmware loading time.
@@ -1381,17 +1373,12 @@ int intel_guc_submission_init(struct intel_guc *guc)
 {
 	int ret;
 
-	if (guc->lrc_desc_pool)
+	if (guc_submission_initialized(guc))
 		return 0;
 
-	ret = guc_lrc_desc_pool_create(guc);
+	ret = guc_lrcd_reg_init(guc);
 	if (ret)
 		return ret;
-	/*
-	 * Keep static analysers happy, let them know that we allocated the
-	 * vma after testing that it didn't exist earlier.
-	 */
-	GEM_BUG_ON(!guc->lrc_desc_pool);
 
 	xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
 
@@ -1407,10 +1394,10 @@ void intel_guc_submission_fini(struct intel_guc *guc)
 {
 	int i;
 
-	if (!guc->lrc_desc_pool)
+	if (!guc_submission_initialized(guc))
 		return;
 
-	guc_lrc_desc_pool_destroy(guc);
+	guc_lrcd_reg_fini(guc);
 
 	for (i = 0; i < GUC_SUBMIT_ENGINE_MAX; ++i) {
 		struct i915_sched_engine *sched_engine =
@@ -1481,6 +1468,7 @@ static bool need_tasklet(struct guc_submit_engine *gse, struct intel_context *ce
 	return guc_ids_exhausted(gse) || submission_disabled(gse->guc) ||
 		gse->stalled_rq || gse->stalled_context ||
 		!lrc_desc_registered(gse->guc, ce->guc_id) ||
+		context_needs_register(ce) ||
 		!i915_sched_engine_is_empty(sched_engine);
 }
 
@@ -1533,7 +1521,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
 	if (!context_guc_id_invalid(ce)) {
 		ida_simple_remove(&guc->guc_ids, ce->guc_id);
-		reset_lrc_desc(guc, ce->guc_id);
+		clr_lrc_desc_registered(guc, ce->guc_id);
 		set_context_guc_id_invalid(ce);
 	}
 	if (!list_empty(&ce->guc_id_link))
@@ -1723,14 +1711,14 @@ static void unpin_guc_id(struct intel_guc *guc,
 }
 
 static int __guc_action_register_context(struct intel_guc *guc,
+					 struct intel_context *ce,
 					 u32 guc_id,
-					 u32 offset,
 					 bool loop)
 {
 	u32 action[] = {
 		INTEL_GUC_ACTION_REGISTER_CONTEXT,
 		guc_id,
-		offset,
+		__get_lrc_desc_offset(guc, ce->guc_lrcd_reg_idx),
 	};
 
 	return guc_submission_busy_loop(guc, action, ARRAY_SIZE(action), 0, loop);
@@ -1739,13 +1727,11 @@ static int __guc_action_register_context(struct intel_guc *guc,
 static int register_context(struct intel_context *ce, bool loop)
 {
 	struct intel_guc *guc = ce_to_guc(ce);
-	u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
-		ce->guc_id * sizeof(struct guc_lrc_desc);
 	int ret;
 
 	trace_intel_context_register(ce);
 
-	ret = __guc_action_register_context(guc, ce->guc_id, offset, loop);
+	ret = __guc_action_register_context(guc, ce, ce->guc_id, loop);
 	set_context_registered(ce);
 	return ret;
 }
@@ -1804,6 +1790,86 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
 
 static inline u8 map_i915_prio_to_guc_prio(int prio);
 
+static int alloc_lrcd_reg_idx_buffer(struct intel_guc *guc, int num_per_vma)
+{
+	u32 size = num_per_vma * sizeof(struct guc_lrc_desc);
+	struct i915_vma **vma = &guc->lrcd_reg.vma;
+	void **vaddr = &guc->lrcd_reg.vaddr;
+	int ret;
+
+	GEM_BUG_ON(!is_power_of_2(size));
+
+	ret = intel_guc_allocate_and_map_vma(guc, size, vma, vaddr);
+	if (unlikely(ret))
+		return ret;
+
+	guc->lrcd_reg.max_idx += num_per_vma;
+
+	return 0;
+}
+
+static int alloc_lrcd_reg_idx(struct intel_guc *guc, bool tasklet)
+{
+	int ret;
+	gfp_t gfp = tasklet ? GFP_ATOMIC :
+		GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+
+	might_sleep_if(!tasklet);
+
+	/*
+	 * We only allow 1/2 of the space to be allocated outside of tasklet
+	 * (flow control) to ensure requests that are not ready don't consume
+	 * all context registration space.
+	 */
+	ret = ida_simple_get(&guc->lrcd_reg.ida, 0,
+			     tasklet ? guc->lrcd_reg.max_idx :
+			     guc->lrcd_reg.max_idx / 2, gfp);
+	if (unlikely(ret < 0))
+		return -EBUSY;
+
+	return ret;
+}
+
+static void __free_lrcd_reg_idx(struct intel_guc *guc, struct intel_context *ce)
+{
+	if (ce->guc_lrcd_reg_idx && guc->lrcd_reg.max_idx) {
+		ida_simple_remove(&guc->lrcd_reg.ida, ce->guc_lrcd_reg_idx);
+		ce->guc_lrcd_reg_idx = 0;;
+	}
+}
+
+static void free_lrcd_reg_idx(struct intel_guc *guc, struct intel_context *ce)
+{
+	__free_lrcd_reg_idx(guc, ce);
+}
+
+static int guc_lrcd_reg_init(struct intel_guc *guc)
+{
+	unsigned buffer_size = I915_GTT_PAGE_SIZE_4K * 16;
+	int ret;
+
+	ida_init(&guc->lrcd_reg.ida);
+
+	ret = alloc_lrcd_reg_idx_buffer(guc, buffer_size /
+					sizeof(struct guc_lrc_desc));
+	if (unlikely(ret))
+		return ret;
+
+	/* Zero is reserved */
+	ret = alloc_lrcd_reg_idx(guc, false);
+	GEM_BUG_ON(ret);
+
+	return ret;
+}
+
+static void guc_lrcd_reg_fini(struct intel_guc *guc)
+{
+	i915_vma_unpin_and_release(&guc->lrcd_reg.vma,
+				   I915_VMA_RELEASE_MAP);
+	ida_destroy(&guc->lrcd_reg.ida);
+	guc->lrcd_reg.max_idx = 0;
+}
+
 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 {
 	struct intel_runtime_pm *runtime_pm =
@@ -1828,6 +1894,14 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 	GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
 		   i915_gem_object_is_lmem(ce->ring->vma->obj));
 
+	/* Allocate space for registeration */
+	if (likely(!ce->guc_lrcd_reg_idx)) {
+		ret = alloc_lrcd_reg_idx(guc, !loop);
+		if (unlikely(ret < 0))
+			return ret;
+		ce->guc_lrcd_reg_idx = ret;
+	}
+
 	context_registered = lrc_desc_registered(guc, desc_idx);
 
 	rcu_read_lock();
@@ -1836,12 +1910,11 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 		prio = ctx->sched.priority;
 	rcu_read_unlock();
 
-	reset_lrc_desc(guc, desc_idx);
 	ret = set_lrc_desc_registered(guc, desc_idx, ce);
 	if (unlikely(ret))
 		return ret;
 
-	desc = __get_lrc_desc(guc, desc_idx);
+	desc = __get_lrc_desc(guc, ce->guc_lrcd_reg_idx);
 	desc->engine_class = engine_class_to_guc_class(engine->class);
 	desc->engine_submit_mask = adjust_engine_mask(engine->class,
 						      engine->mask);
@@ -1879,7 +1952,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 			}
 			spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 			if (unlikely(disabled)) {
-				reset_lrc_desc(guc, desc_idx);
+				clr_lrc_desc_registered(guc, desc_idx);
 				return 0;	/* Will get registered later */
 			}
 		}
@@ -1905,7 +1978,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 		with_intel_runtime_pm(runtime_pm, wakeref)
 			ret = register_context(ce, loop);
 		if (unlikely(ret == -EBUSY))
-			reset_lrc_desc(guc, desc_idx);
+			clr_lrc_desc_registered(guc, desc_idx);
 		else if (unlikely(ret == -ENODEV))
 			ret = 0;	/* Will get registered later */
 	}
@@ -2146,6 +2219,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
 		guc_id = prep_context_pending_disable(ce);
 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
+		free_lrcd_reg_idx(guc, ce);
 		with_intel_runtime_pm(runtime_pm, wakeref)
 			__guc_context_sched_disable(guc, ce, guc_id);
 	} else {
@@ -2224,6 +2298,7 @@ static void __guc_context_destroy(struct intel_context *ce)
 
 	lrc_fini(ce);
 	intel_context_fini(ce);
+	__free_lrcd_reg_idx(ce_to_guc(ce), ce);
 
 	if (intel_engine_is_virtual(ce->engine)) {
 		struct guc_virtual_engine *ve =
@@ -2726,11 +2801,14 @@ static int guc_request_alloc(struct i915_request *rq)
 
 	if (context_needs_lrc_desc_pin(ce, !!ret)) {
 		ret = guc_lrc_desc_pin(ce, true);
-		if (unlikely(ret)) {	/* unwind */
+		if (unlikely(ret == -EBUSY)) {
+			set_context_needs_register(ce);
+		} else if (unlikely(ret)) {	/* unwind */
 			if (ret == -EDEADLK)
 				disable_submission(guc);
 			atomic_dec(&ce->guc_id_ref);
 			unpin_guc_id(guc, ce, true);
+
 			return ret;
 		}
 	}
@@ -3370,6 +3448,8 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
 
 	if (context_pending_enable(ce)) {
 		clr_context_pending_enable(ce);
+
+		free_lrcd_reg_idx(guc, ce);
 	} else if (context_pending_disable(ce)) {
 		bool banned;
 
@@ -3618,6 +3698,8 @@ void intel_guc_log_submission_info(struct intel_guc *guc,
 		   atomic_read(&guc->outstanding_submission_g2h));
 	drm_printf(p, "GuC Number GuC IDs: %d\n", guc->num_guc_ids);
 	drm_printf(p, "GuC Max Number GuC IDs: %d\n\n", guc->max_guc_ids);
+	drm_printf(p, "GuC max context registered: %u\n\n",
+		   guc->lrcd_reg.max_idx);
 
 	for (i = 0; i < GUC_SUBMIT_ENGINE_MAX; ++i)
 		gse_log_submission_info(guc->gse[i], p, i);
-- 
2.28.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-gfx@lists.freedesktop.org>, <dri-devel@lists.freedesktop.org>
Cc: jason.ekstrand@intel.com, daniel.vetter@intel.com
Subject: [Intel-gfx] [RFC PATCH 90/97] drm/i915/guc: Non-static lrc descriptor registration buffer
Date: Thu,  6 May 2021 12:14:44 -0700	[thread overview]
Message-ID: <20210506191451.77768-91-matthew.brost@intel.com> (raw)
In-Reply-To: <20210506191451.77768-1-matthew.brost@intel.com>

Dynamically allocate space for lrc descriptor registration with the GuC
rather than using a large static buffer indexed by the guc_id. If no
space is available to register a context, fall back to tasklet flow
control mechanism. Only allow 1/2 of the space to be allocated outside
the tasklet to prevent unready requests/contexts from consuming all
registration space.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_context_types.h |   3 +
 drivers/gpu/drm/i915/gt/uc/intel_guc.h        |   9 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 198 +++++++++++++-----
 3 files changed, 150 insertions(+), 60 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index cd2ea5b98fc3..0d7173d3eabd 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -182,6 +182,9 @@ struct intel_context {
 	/* GuC scheduling state that does not require a lock. */
 	atomic_t guc_sched_state_no_lock;
 
+	/* GuC lrc descriptor registration buffer */
+	unsigned int guc_lrcd_reg_idx;
+
 	/* GuC lrc descriptor ID */
 	u16 guc_id;
 
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 96849a256be8..97bb262f8a13 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -68,8 +68,13 @@ struct intel_guc {
 	u32 ads_regset_size;
 	u32 ads_golden_ctxt_size;
 
-	struct i915_vma *lrc_desc_pool;
-	void *lrc_desc_pool_vaddr;
+	/* GuC LRC descriptor registration */
+	struct {
+		struct i915_vma *vma;
+		void *vaddr;
+		struct ida ida;
+		unsigned int max_idx;
+	} lrcd_reg;
 
 	/* guc_id to intel_context lookup */
 	struct xarray context_lookup;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 608b30907f4c..79caf9596084 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -437,65 +437,54 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 	return rb_entry(rb, struct i915_priolist, node);
 }
 
-static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, u32 index)
+static u32 __get_lrc_desc_offset(struct intel_guc *guc, int index)
 {
-	struct guc_lrc_desc *base = guc->lrc_desc_pool_vaddr;
-
+	GEM_BUG_ON(index >= guc->lrcd_reg.max_idx);
 	GEM_BUG_ON(index >= guc->max_guc_ids);
 
-	return &base[index];
+	return intel_guc_ggtt_offset(guc, guc->lrcd_reg.vma) +
+		(index * sizeof(struct guc_lrc_desc));
 }
 
-static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
+static struct guc_lrc_desc *__get_lrc_desc(struct intel_guc *guc, int index)
 {
-	struct intel_context *ce = xa_load(&guc->context_lookup, id);
+	struct guc_lrc_desc *desc;
 
-	GEM_BUG_ON(id >= guc->max_guc_ids);
+	GEM_BUG_ON(index >= guc->lrcd_reg.max_idx);
+	GEM_BUG_ON(index >= guc->max_guc_ids);
 
-	return ce;
+	desc = guc->lrcd_reg.vaddr;
+	desc = &desc[index];
+	memset(desc, 0, sizeof(*desc));
+
+	return desc;
 }
 
-static int guc_lrc_desc_pool_create(struct intel_guc *guc)
+static inline struct intel_context *__get_context(struct intel_guc *guc, u32 id)
 {
-	u32 size;
-	int ret;
-
-	size = PAGE_ALIGN(sizeof(struct guc_lrc_desc) * guc->max_guc_ids);
-	ret = intel_guc_allocate_and_map_vma(guc, size, &guc->lrc_desc_pool,
-					     (void **)&guc->lrc_desc_pool_vaddr);
-	if (ret)
-		return ret;
+	struct intel_context *ce = xa_load(&guc->context_lookup, id);
 
-	return 0;
-}
+	GEM_BUG_ON(id >= guc->max_guc_ids);
 
-static void guc_lrc_desc_pool_destroy(struct intel_guc *guc)
-{
-	guc->lrc_desc_pool_vaddr = NULL;
-	i915_vma_unpin_and_release(&guc->lrc_desc_pool, I915_VMA_RELEASE_MAP);
+	return ce;
 }
 
 static inline bool guc_submission_initialized(struct intel_guc *guc)
 {
-	return guc->lrc_desc_pool_vaddr != NULL;
+	return guc->lrcd_reg.max_idx != 0;
 }
 
-static inline void reset_lrc_desc(struct intel_guc *guc, u32 id)
+static inline void clr_lrc_desc_registered(struct intel_guc *guc, u32 id)
 {
-	if (likely(guc_submission_initialized(guc))) {
-		struct guc_lrc_desc *desc = __get_lrc_desc(guc, id);
-		unsigned long flags;
-
-		memset(desc, 0, sizeof(*desc));
+	unsigned long flags;
 
-		/*
-		 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
-		 * the lower level functions directly.
-		 */
-		xa_lock_irqsave(&guc->context_lookup, flags);
-		__xa_erase(&guc->context_lookup, id);
-		xa_unlock_irqrestore(&guc->context_lookup, flags);
-	}
+	/*
+	 * xarray API doesn't have xa_erase_irqsave wrapper, so calling
+	 * the lower level functions directly.
+	 */
+	xa_lock_irqsave(&guc->context_lookup, flags);
+	__xa_erase(&guc->context_lookup, id);
+	xa_unlock_irqrestore(&guc->context_lookup, flags);
 }
 
 static inline bool lrc_desc_registered(struct intel_guc *guc, u32 id)
@@ -1373,6 +1362,9 @@ static void retire_worker_func(struct work_struct *w)
 	}
 }
 
+static int guc_lrcd_reg_init(struct intel_guc *guc);
+static void guc_lrcd_reg_fini(struct intel_guc *guc);
+
 /*
  * Set up the memory resources to be shared with the GuC (via the GGTT)
  * at firmware loading time.
@@ -1381,17 +1373,12 @@ int intel_guc_submission_init(struct intel_guc *guc)
 {
 	int ret;
 
-	if (guc->lrc_desc_pool)
+	if (guc_submission_initialized(guc))
 		return 0;
 
-	ret = guc_lrc_desc_pool_create(guc);
+	ret = guc_lrcd_reg_init(guc);
 	if (ret)
 		return ret;
-	/*
-	 * Keep static analysers happy, let them know that we allocated the
-	 * vma after testing that it didn't exist earlier.
-	 */
-	GEM_BUG_ON(!guc->lrc_desc_pool);
 
 	xa_init_flags(&guc->context_lookup, XA_FLAGS_LOCK_IRQ);
 
@@ -1407,10 +1394,10 @@ void intel_guc_submission_fini(struct intel_guc *guc)
 {
 	int i;
 
-	if (!guc->lrc_desc_pool)
+	if (!guc_submission_initialized(guc))
 		return;
 
-	guc_lrc_desc_pool_destroy(guc);
+	guc_lrcd_reg_fini(guc);
 
 	for (i = 0; i < GUC_SUBMIT_ENGINE_MAX; ++i) {
 		struct i915_sched_engine *sched_engine =
@@ -1481,6 +1468,7 @@ static bool need_tasklet(struct guc_submit_engine *gse, struct intel_context *ce
 	return guc_ids_exhausted(gse) || submission_disabled(gse->guc) ||
 		gse->stalled_rq || gse->stalled_context ||
 		!lrc_desc_registered(gse->guc, ce->guc_id) ||
+		context_needs_register(ce) ||
 		!i915_sched_engine_is_empty(sched_engine);
 }
 
@@ -1533,7 +1521,7 @@ static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
 	if (!context_guc_id_invalid(ce)) {
 		ida_simple_remove(&guc->guc_ids, ce->guc_id);
-		reset_lrc_desc(guc, ce->guc_id);
+		clr_lrc_desc_registered(guc, ce->guc_id);
 		set_context_guc_id_invalid(ce);
 	}
 	if (!list_empty(&ce->guc_id_link))
@@ -1723,14 +1711,14 @@ static void unpin_guc_id(struct intel_guc *guc,
 }
 
 static int __guc_action_register_context(struct intel_guc *guc,
+					 struct intel_context *ce,
 					 u32 guc_id,
-					 u32 offset,
 					 bool loop)
 {
 	u32 action[] = {
 		INTEL_GUC_ACTION_REGISTER_CONTEXT,
 		guc_id,
-		offset,
+		__get_lrc_desc_offset(guc, ce->guc_lrcd_reg_idx),
 	};
 
 	return guc_submission_busy_loop(guc, action, ARRAY_SIZE(action), 0, loop);
@@ -1739,13 +1727,11 @@ static int __guc_action_register_context(struct intel_guc *guc,
 static int register_context(struct intel_context *ce, bool loop)
 {
 	struct intel_guc *guc = ce_to_guc(ce);
-	u32 offset = intel_guc_ggtt_offset(guc, guc->lrc_desc_pool) +
-		ce->guc_id * sizeof(struct guc_lrc_desc);
 	int ret;
 
 	trace_intel_context_register(ce);
 
-	ret = __guc_action_register_context(guc, ce->guc_id, offset, loop);
+	ret = __guc_action_register_context(guc, ce, ce->guc_id, loop);
 	set_context_registered(ce);
 	return ret;
 }
@@ -1804,6 +1790,86 @@ static void guc_context_policy_init(struct intel_engine_cs *engine,
 
 static inline u8 map_i915_prio_to_guc_prio(int prio);
 
+static int alloc_lrcd_reg_idx_buffer(struct intel_guc *guc, int num_per_vma)
+{
+	u32 size = num_per_vma * sizeof(struct guc_lrc_desc);
+	struct i915_vma **vma = &guc->lrcd_reg.vma;
+	void **vaddr = &guc->lrcd_reg.vaddr;
+	int ret;
+
+	GEM_BUG_ON(!is_power_of_2(size));
+
+	ret = intel_guc_allocate_and_map_vma(guc, size, vma, vaddr);
+	if (unlikely(ret))
+		return ret;
+
+	guc->lrcd_reg.max_idx += num_per_vma;
+
+	return 0;
+}
+
+static int alloc_lrcd_reg_idx(struct intel_guc *guc, bool tasklet)
+{
+	int ret;
+	gfp_t gfp = tasklet ? GFP_ATOMIC :
+		GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+
+	might_sleep_if(!tasklet);
+
+	/*
+	 * We only allow 1/2 of the space to be allocated outside of tasklet
+	 * (flow control) to ensure requests that are not ready don't consume
+	 * all context registration space.
+	 */
+	ret = ida_simple_get(&guc->lrcd_reg.ida, 0,
+			     tasklet ? guc->lrcd_reg.max_idx :
+			     guc->lrcd_reg.max_idx / 2, gfp);
+	if (unlikely(ret < 0))
+		return -EBUSY;
+
+	return ret;
+}
+
+static void __free_lrcd_reg_idx(struct intel_guc *guc, struct intel_context *ce)
+{
+	if (ce->guc_lrcd_reg_idx && guc->lrcd_reg.max_idx) {
+		ida_simple_remove(&guc->lrcd_reg.ida, ce->guc_lrcd_reg_idx);
+		ce->guc_lrcd_reg_idx = 0;;
+	}
+}
+
+static void free_lrcd_reg_idx(struct intel_guc *guc, struct intel_context *ce)
+{
+	__free_lrcd_reg_idx(guc, ce);
+}
+
+static int guc_lrcd_reg_init(struct intel_guc *guc)
+{
+	unsigned buffer_size = I915_GTT_PAGE_SIZE_4K * 16;
+	int ret;
+
+	ida_init(&guc->lrcd_reg.ida);
+
+	ret = alloc_lrcd_reg_idx_buffer(guc, buffer_size /
+					sizeof(struct guc_lrc_desc));
+	if (unlikely(ret))
+		return ret;
+
+	/* Zero is reserved */
+	ret = alloc_lrcd_reg_idx(guc, false);
+	GEM_BUG_ON(ret);
+
+	return ret;
+}
+
+static void guc_lrcd_reg_fini(struct intel_guc *guc)
+{
+	i915_vma_unpin_and_release(&guc->lrcd_reg.vma,
+				   I915_VMA_RELEASE_MAP);
+	ida_destroy(&guc->lrcd_reg.ida);
+	guc->lrcd_reg.max_idx = 0;
+}
+
 static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 {
 	struct intel_runtime_pm *runtime_pm =
@@ -1828,6 +1894,14 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 	GEM_BUG_ON(i915_gem_object_is_lmem(guc->ct.vma->obj) !=
 		   i915_gem_object_is_lmem(ce->ring->vma->obj));
 
+	/* Allocate space for registeration */
+	if (likely(!ce->guc_lrcd_reg_idx)) {
+		ret = alloc_lrcd_reg_idx(guc, !loop);
+		if (unlikely(ret < 0))
+			return ret;
+		ce->guc_lrcd_reg_idx = ret;
+	}
+
 	context_registered = lrc_desc_registered(guc, desc_idx);
 
 	rcu_read_lock();
@@ -1836,12 +1910,11 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 		prio = ctx->sched.priority;
 	rcu_read_unlock();
 
-	reset_lrc_desc(guc, desc_idx);
 	ret = set_lrc_desc_registered(guc, desc_idx, ce);
 	if (unlikely(ret))
 		return ret;
 
-	desc = __get_lrc_desc(guc, desc_idx);
+	desc = __get_lrc_desc(guc, ce->guc_lrcd_reg_idx);
 	desc->engine_class = engine_class_to_guc_class(engine->class);
 	desc->engine_submit_mask = adjust_engine_mask(engine->class,
 						      engine->mask);
@@ -1879,7 +1952,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 			}
 			spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 			if (unlikely(disabled)) {
-				reset_lrc_desc(guc, desc_idx);
+				clr_lrc_desc_registered(guc, desc_idx);
 				return 0;	/* Will get registered later */
 			}
 		}
@@ -1905,7 +1978,7 @@ static int guc_lrc_desc_pin(struct intel_context *ce, bool loop)
 		with_intel_runtime_pm(runtime_pm, wakeref)
 			ret = register_context(ce, loop);
 		if (unlikely(ret == -EBUSY))
-			reset_lrc_desc(guc, desc_idx);
+			clr_lrc_desc_registered(guc, desc_idx);
 		else if (unlikely(ret == -ENODEV))
 			ret = 0;	/* Will get registered later */
 	}
@@ -2146,6 +2219,7 @@ static void guc_context_ban(struct intel_context *ce, struct i915_request *rq)
 		guc_id = prep_context_pending_disable(ce);
 		spin_unlock_irqrestore(&ce->guc_state.lock, flags);
 
+		free_lrcd_reg_idx(guc, ce);
 		with_intel_runtime_pm(runtime_pm, wakeref)
 			__guc_context_sched_disable(guc, ce, guc_id);
 	} else {
@@ -2224,6 +2298,7 @@ static void __guc_context_destroy(struct intel_context *ce)
 
 	lrc_fini(ce);
 	intel_context_fini(ce);
+	__free_lrcd_reg_idx(ce_to_guc(ce), ce);
 
 	if (intel_engine_is_virtual(ce->engine)) {
 		struct guc_virtual_engine *ve =
@@ -2726,11 +2801,14 @@ static int guc_request_alloc(struct i915_request *rq)
 
 	if (context_needs_lrc_desc_pin(ce, !!ret)) {
 		ret = guc_lrc_desc_pin(ce, true);
-		if (unlikely(ret)) {	/* unwind */
+		if (unlikely(ret == -EBUSY)) {
+			set_context_needs_register(ce);
+		} else if (unlikely(ret)) {	/* unwind */
 			if (ret == -EDEADLK)
 				disable_submission(guc);
 			atomic_dec(&ce->guc_id_ref);
 			unpin_guc_id(guc, ce, true);
+
 			return ret;
 		}
 	}
@@ -3370,6 +3448,8 @@ int intel_guc_sched_done_process_msg(struct intel_guc *guc,
 
 	if (context_pending_enable(ce)) {
 		clr_context_pending_enable(ce);
+
+		free_lrcd_reg_idx(guc, ce);
 	} else if (context_pending_disable(ce)) {
 		bool banned;
 
@@ -3618,6 +3698,8 @@ void intel_guc_log_submission_info(struct intel_guc *guc,
 		   atomic_read(&guc->outstanding_submission_g2h));
 	drm_printf(p, "GuC Number GuC IDs: %d\n", guc->num_guc_ids);
 	drm_printf(p, "GuC Max Number GuC IDs: %d\n\n", guc->max_guc_ids);
+	drm_printf(p, "GuC max context registered: %u\n\n",
+		   guc->lrcd_reg.max_idx);
 
 	for (i = 0; i < GUC_SUBMIT_ENGINE_MAX; ++i)
 		gse_log_submission_info(guc->gse[i], p, i);
-- 
2.28.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2021-05-06 19:00 UTC|newest]

Thread overview: 504+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-06 19:13 [RFC PATCH 00/97] Basic GuC submission support in the i915 Matthew Brost
2021-05-06 19:13 ` [Intel-gfx] " Matthew Brost
2021-05-06 19:12 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for " Patchwork
2021-05-06 19:13 ` [RFC PATCH 01/97] drm/i915/gt: Move engine setup out of set_default_submission Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-19  0:25   ` Matthew Brost
2021-05-19  0:25     ` [Intel-gfx] " Matthew Brost
2021-05-25  8:44   ` Tvrtko Ursulin
2021-05-25  8:44     ` Tvrtko Ursulin
2021-05-06 19:13 ` [RFC PATCH 02/97] drm/i915/gt: Move submission_method into intel_gt Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-19  3:10   ` Matthew Brost
2021-05-19  3:10     ` [Intel-gfx] " Matthew Brost
2021-05-25  8:44   ` Tvrtko Ursulin
2021-05-25  8:44     ` Tvrtko Ursulin
2021-05-06 19:13 ` [RFC PATCH 03/97] drm/i915/gt: Move CS interrupt handler to the backend Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-19  3:31   ` Matthew Brost
2021-05-19  3:31     ` [Intel-gfx] " Matthew Brost
2021-05-25  8:45   ` Tvrtko Ursulin
2021-05-25  8:45     ` Tvrtko Ursulin
2021-05-06 19:13 ` [RFC PATCH 04/97] drm/i915/guc: skip disabling CTBs before sanitizing the GuC Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-20 16:47   ` Matthew Brost
2021-05-20 16:47     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 05/97] drm/i915/guc: use probe_error log for CT enablement failure Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 10:30   ` Michal Wajdeczko
2021-05-24 10:30     ` [Intel-gfx] " Michal Wajdeczko
2021-05-06 19:13 ` [RFC PATCH 06/97] drm/i915/guc: enable only the user interrupt when using GuC submission Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  0:31   ` Matthew Brost
2021-05-25  0:31     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 07/97] drm/i915/guc: Remove sample_forcewake h2g action Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 10:48   ` Michal Wajdeczko
2021-05-24 10:48     ` [Intel-gfx] " Michal Wajdeczko
2021-05-25  0:36   ` Matthew Brost
2021-05-25  0:36     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 08/97] drm/i915/guc: Keep strict GuC ABI definitions Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 23:52   ` Michał Winiarski
2021-05-24 23:52     ` [Intel-gfx] " Michał Winiarski
2021-05-06 19:13 ` [RFC PATCH 09/97] drm/i915/guc: Stop using fence/status from CTB descriptor Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  2:38   ` Matthew Brost
2021-05-25  2:38     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 10/97] drm/i915: Promote ptrdiff() to i915_utils.h Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  0:42   ` Matthew Brost
2021-05-25  0:42     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 11/97] drm/i915/guc: Only rely on own CTB size Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  2:47   ` Matthew Brost
2021-05-25  2:47     ` [Intel-gfx] " Matthew Brost
2021-05-25 12:48     ` Michal Wajdeczko
2021-05-25 12:48       ` Michal Wajdeczko
2021-05-06 19:13 ` [RFC PATCH 12/97] drm/i915/guc: Don't repeat CTB layout calculations Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  2:53   ` Matthew Brost
2021-05-25  2:53     ` [Intel-gfx] " Matthew Brost
2021-05-25 13:07     ` Michal Wajdeczko
2021-05-25 13:07       ` [Intel-gfx] " Michal Wajdeczko
2021-05-25 16:56       ` Matthew Brost
2021-05-25 16:56         ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 13/97] drm/i915/guc: Replace CTB array with explicit members Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  3:15   ` Matthew Brost
2021-05-25  3:15     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 14/97] drm/i915/guc: Update sizes of CTB buffers Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  2:56   ` Matthew Brost
2021-05-25  2:56     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 15/97] drm/i915/guc: Relax CTB response timeout Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25 18:08   ` Matthew Brost
2021-05-25 18:08     ` [Intel-gfx] " Matthew Brost
2021-05-25 19:37     ` Michal Wajdeczko
2021-05-25 19:37       ` Michal Wajdeczko
2021-05-06 19:13 ` [RFC PATCH 16/97] drm/i915/guc: Start protecting access to CTB descriptors Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  3:21   ` Matthew Brost
2021-05-25  3:21     ` [Intel-gfx] " Matthew Brost
2021-05-25 13:10     ` Michal Wajdeczko
2021-05-25  3:21   ` Matthew Brost
2021-05-25  3:21     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 17/97] drm/i915/guc: Stop using mutex while sending CTB messages Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25 16:14   ` Matthew Brost
2021-05-25 16:14     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 18/97] drm/i915/guc: Don't receive all G2H messages in irq handler Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25 18:15   ` Matthew Brost
2021-05-25 18:15     ` [Intel-gfx] " Matthew Brost
2021-05-25 19:43     ` Michal Wajdeczko
2021-05-25 19:43       ` Michal Wajdeczko
2021-05-06 19:13 ` [RFC PATCH 19/97] drm/i915/guc: Always copy CT message to new allocation Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25 18:25   ` Matthew Brost
2021-05-25 18:25     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 20/97] drm/i915/guc: Introduce unified HXG messages Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-11 15:16   ` Daniel Vetter
2021-05-11 15:16     ` [Intel-gfx] " Daniel Vetter
2021-05-11 17:59     ` Matthew Brost
2021-05-11 17:59       ` [Intel-gfx] " Matthew Brost
2021-05-11 22:11     ` Michal Wajdeczko
2021-05-11 22:11       ` [Intel-gfx] " Michal Wajdeczko
2021-05-12  8:40       ` Daniel Vetter
2021-05-12  8:40         ` [Intel-gfx] " Daniel Vetter
2021-05-06 19:13 ` [RFC PATCH 21/97] drm/i915/guc: Update MMIO based communication Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 22/97] drm/i915/guc: Update CTB response status Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 23/97] drm/i915/guc: Support per context scheduling policies Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  1:15   ` Matthew Brost
2021-05-25  1:15     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 24/97] drm/i915/guc: Add flag for mark broken CTB Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-27 19:44   ` Matthew Brost
2021-05-27 19:44     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 25/97] drm/i915/guc: New definition of the CTB descriptor Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 26/97] drm/i915/guc: New definition of the CTB registration action Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 27/97] drm/i915/guc: New CTB based communication Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 28/97] drm/i915/guc: Kill guc_clients.ct_pool Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  1:01   ` Matthew Brost
2021-05-25  1:01     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 29/97] drm/i915/guc: Update firmware to v60.1.2 Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 30/97] drm/i915/uc: turn on GuC/HuC auto mode by default Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 11:00   ` Michal Wajdeczko
2021-05-24 11:00     ` [Intel-gfx] " Michal Wajdeczko
2021-05-06 19:13 ` [RFC PATCH 31/97] drm/i915/guc: Early initialization of GuC send registers Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-26 20:28   ` Matthew Brost
2021-05-26 20:28     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 32/97] drm/i915: Introduce i915_sched_engine object Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-11 15:18   ` Daniel Vetter
2021-05-11 15:18     ` [Intel-gfx] " Daniel Vetter
2021-05-11 17:56     ` Matthew Brost
2021-05-11 17:56       ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 33/97] drm/i915: Engine relative MMIO Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  9:05   ` Tvrtko Ursulin
2021-05-25  9:05     ` Tvrtko Ursulin
2021-05-06 19:13 ` [RFC PATCH 34/97] drm/i915/guc: Use guc_class instead of engine_class in fw interface Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-26 20:41   ` Matthew Brost
2021-05-26 20:41     ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 35/97] drm/i915/guc: Improve error message for unsolicited CT response Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 11:59   ` Michal Wajdeczko
2021-05-24 11:59     ` [Intel-gfx] " Michal Wajdeczko
2021-05-25 17:32     ` Matthew Brost
2021-05-25 17:32       ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 36/97] drm/i915/guc: Add non blocking CTB send function Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 12:21   ` Michal Wajdeczko
2021-05-24 12:21     ` [Intel-gfx] " Michal Wajdeczko
2021-05-25 17:30     ` Matthew Brost
2021-05-25 17:30       ` [Intel-gfx] " Matthew Brost
2021-05-25  9:21   ` Tvrtko Ursulin
2021-05-25  9:21     ` Tvrtko Ursulin
2021-05-25 17:21     ` Matthew Brost
2021-05-25 17:21       ` Matthew Brost
2021-05-26  8:57       ` Tvrtko Ursulin
2021-05-26  8:57         ` Tvrtko Ursulin
2021-05-26 18:10         ` Matthew Brost
2021-05-26 18:10           ` Matthew Brost
2021-05-27 10:02           ` Tvrtko Ursulin
2021-05-27 10:02             ` Tvrtko Ursulin
2021-05-27 14:35             ` Matthew Brost
2021-05-27 14:35               ` Matthew Brost
2021-05-27 15:11               ` Tvrtko Ursulin
2021-05-27 15:11                 ` Tvrtko Ursulin
2021-06-07 17:31                 ` Matthew Brost
2021-06-07 17:31                   ` Matthew Brost
2021-06-08  8:39                   ` Tvrtko Ursulin
2021-06-08  8:39                     ` Tvrtko Ursulin
2021-06-08  8:46                     ` Daniel Vetter
2021-06-08  8:46                       ` Daniel Vetter
2021-06-09 23:10                       ` Matthew Brost
2021-06-09 23:10                         ` Matthew Brost
2021-06-10 15:27                         ` Daniel Vetter
2021-06-10 15:27                           ` Daniel Vetter
2021-06-24 16:38                           ` Matthew Brost
2021-06-24 16:38                             ` Matthew Brost
2021-06-24 17:25                             ` Daniel Vetter
2021-06-24 17:25                               ` Daniel Vetter
2021-06-09 13:58                     ` Michal Wajdeczko
2021-06-09 13:58                       ` Michal Wajdeczko
2021-06-09 23:05                       ` Matthew Brost
2021-06-09 23:05                         ` Matthew Brost
2021-06-09 14:14                   ` Michal Wajdeczko
2021-06-09 14:14                     ` Michal Wajdeczko
2021-06-09 23:13                     ` Matthew Brost
2021-06-09 23:13                       ` Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 37/97] drm/i915/guc: Add stall timer to " Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 12:58   ` Michal Wajdeczko
2021-05-24 12:58     ` [Intel-gfx] " Michal Wajdeczko
2021-05-24 18:35     ` Matthew Brost
2021-05-24 18:35       ` [Intel-gfx] " Matthew Brost
2021-05-25 14:15       ` Michal Wajdeczko
2021-05-25 14:15         ` [Intel-gfx] " Michal Wajdeczko
2021-05-25 16:54         ` Matthew Brost
2021-05-25 16:54           ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 38/97] drm/i915/guc: Optimize CTB writes and reads Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 13:31   ` Michal Wajdeczko
2021-05-24 13:31     ` [Intel-gfx] " Michal Wajdeczko
2021-05-25 17:39     ` Matthew Brost
2021-05-25 17:39       ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 39/97] drm/i915/guc: Increase size of CTB buffers Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 13:43   ` Michal Wajdeczko
2021-05-24 13:43     ` Michal Wajdeczko
2021-05-24 18:40     ` Matthew Brost
2021-05-24 18:40       ` Matthew Brost
2021-05-25  9:24   ` Tvrtko Ursulin
2021-05-25  9:24     ` Tvrtko Ursulin
2021-05-25 17:15     ` Matthew Brost
2021-05-25 17:15       ` Matthew Brost
2021-05-26  9:30       ` Tvrtko Ursulin
2021-05-26  9:30         ` Tvrtko Ursulin
2021-05-26 18:20         ` Matthew Brost
2021-05-26 18:20           ` Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 40/97] drm/i915/guc: Module load failure test for CT buffer creation Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-24 13:45   ` Michal Wajdeczko
2021-05-24 13:45     ` [Intel-gfx] " Michal Wajdeczko
2021-05-06 19:13 ` [RFC PATCH 41/97] drm/i915/guc: Add new GuC interface defines and structures Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 42/97] drm/i915/guc: Remove GuC stage descriptor, add lrc descriptor Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 43/97] drm/i915/guc: Add lrc descriptor context lookup array Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-11 15:26   ` Daniel Vetter
2021-05-11 15:26     ` [Intel-gfx] " Daniel Vetter
2021-05-11 17:01     ` Matthew Brost
2021-05-11 17:01       ` [Intel-gfx] " Matthew Brost
2021-05-11 17:43       ` Daniel Vetter
2021-05-11 17:43         ` [Intel-gfx] " Daniel Vetter
2021-05-11 19:34         ` Matthew Brost
2021-05-11 19:34           ` [Intel-gfx] " Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 44/97] drm/i915/guc: Implement GuC submission tasklet Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-25  9:43   ` Tvrtko Ursulin
2021-05-25  9:43     ` Tvrtko Ursulin
2021-05-25 17:10     ` Matthew Brost
2021-05-25 17:10       ` Matthew Brost
2021-05-06 19:13 ` [RFC PATCH 45/97] drm/i915/guc: Add bypass tasklet submission path to GuC Matthew Brost
2021-05-06 19:13   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 46/97] drm/i915/guc: Implement GuC context operations for new inteface Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-29 20:32   ` Michal Wajdeczko
2021-05-29 20:32     ` [Intel-gfx] " Michal Wajdeczko
2021-05-06 19:14 ` [RFC PATCH 47/97] drm/i915/guc: Insert fence on context when deregistering Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 48/97] drm/i915/guc: Defer context unpin until scheduling is disabled Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 49/97] drm/i915/guc: Disable engine barriers with GuC during unpin Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-11 15:37   ` Daniel Vetter
2021-05-11 15:37     ` [Intel-gfx] " Daniel Vetter
2021-05-11 16:31     ` Matthew Brost
2021-05-11 16:31       ` [Intel-gfx] " Matthew Brost
2021-05-26 10:26   ` Tvrtko Ursulin
2021-05-26 10:26     ` Tvrtko Ursulin
2021-05-06 19:14 ` [RFC PATCH 50/97] drm/i915/guc: Extend deregistration fence to schedule disable Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 51/97] drm/i915: Disable preempt busywait when using GuC scheduling Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 52/97] drm/i915/guc: Ensure request ordering via completion fences Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 53/97] drm/i915/guc: Disable semaphores when using GuC scheduling Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-25  9:52   ` Tvrtko Ursulin
2021-05-25  9:52     ` Tvrtko Ursulin
2021-05-25 17:01     ` Matthew Brost
2021-05-25 17:01       ` Matthew Brost
2021-05-26  9:25       ` Tvrtko Ursulin
2021-05-26  9:25         ` Tvrtko Ursulin
2021-05-26 18:15         ` Matthew Brost
2021-05-26 18:15           ` Matthew Brost
2021-05-27  8:41           ` Tvrtko Ursulin
2021-05-27  8:41             ` Tvrtko Ursulin
2021-05-27 14:38             ` Matthew Brost
2021-05-27 14:38               ` Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 54/97] drm/i915/guc: Ensure G2H response has space in buffer Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 55/97] drm/i915/guc: Update intel_gt_wait_for_idle to work with GuC Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-07  5:56   ` kernel test robot
2021-05-25 10:06   ` Tvrtko Ursulin
2021-05-25 10:06     ` Tvrtko Ursulin
2021-05-25 17:07     ` Matthew Brost
2021-05-25 17:07       ` Matthew Brost
2021-05-26  9:21       ` Tvrtko Ursulin
2021-05-26  9:21         ` Tvrtko Ursulin
2021-05-26 18:18         ` Matthew Brost
2021-05-26 18:18           ` Matthew Brost
2021-05-27  9:02           ` Tvrtko Ursulin
2021-05-27  9:02             ` Tvrtko Ursulin
2021-05-27 14:37             ` Matthew Brost
2021-05-27 14:37               ` Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 56/97] drm/i915/guc: Update GuC debugfs to support new GuC Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 57/97] drm/i915/guc: Add several request trace points Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 58/97] drm/i915: Add intel_context tracing Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 59/97] drm/i915/guc: GuC virtual engines Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 60/97] drm/i915: Track 'serial' counts for " Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-25 10:16   ` Tvrtko Ursulin
2021-05-25 10:16     ` Tvrtko Ursulin
2021-05-25 17:52     ` Matthew Brost
2021-05-25 17:52       ` Matthew Brost
2021-05-26  8:40       ` Tvrtko Ursulin
2021-05-26  8:40         ` Tvrtko Ursulin
2021-05-26 18:45         ` John Harrison
2021-05-26 18:45           ` John Harrison
2021-05-27  8:53           ` Tvrtko Ursulin
2021-05-27  8:53             ` Tvrtko Ursulin
2021-05-27 17:01             ` John Harrison
2021-05-27 17:01               ` John Harrison
2021-06-01  9:31               ` Tvrtko Ursulin
2021-06-01  9:31                 ` Tvrtko Ursulin
2021-06-02  1:20                 ` John Harrison
2021-06-02  1:20                   ` John Harrison
2021-06-02 12:04                   ` Tvrtko Ursulin
2021-06-02 12:04                     ` Tvrtko Ursulin
2021-06-02 12:09   ` Tvrtko Ursulin
2021-06-02 12:09     ` Tvrtko Ursulin
2021-05-06 19:14 ` [RFC PATCH 61/97] drm/i915: Hold reference to intel_context over life of i915_request Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-06-02 12:18   ` Tvrtko Ursulin
2021-06-02 12:18     ` Tvrtko Ursulin
2021-05-06 19:14 ` [RFC PATCH 62/97] drm/i915/guc: Disable bonding extension with GuC submission Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 63/97] drm/i915/guc: Direct all breadcrumbs for a class to single breadcrumbs Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-06-02 13:31   ` Tvrtko Ursulin
2021-06-02 13:31     ` Tvrtko Ursulin
2021-05-06 19:14 ` [RFC PATCH 64/97] drm/i915/guc: Reset implementation for new GuC interface Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-06-02 14:33   ` Tvrtko Ursulin
2021-06-02 14:33     ` Tvrtko Ursulin
2021-06-04  3:17     ` Matthew Brost
2021-06-04  3:17       ` Matthew Brost
2021-06-04  8:16       ` Daniel Vetter
2021-06-04  8:16         ` Daniel Vetter
2021-06-04 18:02         ` Matthew Brost
2021-06-04 18:02           ` Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 65/97] drm/i915: Reset GPU immediately if submission is disabled Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-06-02 14:36   ` Tvrtko Ursulin
2021-06-02 14:36     ` Tvrtko Ursulin
2021-05-06 19:14 ` [RFC PATCH 66/97] drm/i915/guc: Add disable interrupts to guc sanitize Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-11  8:16   ` [drm/i915/guc] 07336fb545: WARNING:at_drivers/gpu/drm/i915/gt/uc/intel_uc.c:#__uc_sanitize[i915] kernel test robot
2021-05-11  8:16     ` kernel test robot
2021-05-11  8:16     ` [Intel-gfx] " kernel test robot
2021-05-11  8:16     ` kernel test robot
2021-05-06 19:14 ` [RFC PATCH 67/97] drm/i915/guc: Suspend/resume implementation for new interface Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 68/97] drm/i915/guc: Handle context reset notification Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-11 16:25   ` Daniel Vetter
2021-05-11 16:25     ` Daniel Vetter
2021-05-06 19:14 ` [RFC PATCH 69/97] drm/i915/guc: Handle engine reset failure notification Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 70/97] drm/i915/guc: Enable the timer expired interrupt for GuC Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 71/97] drm/i915/guc: Provide mmio list to be saved/restored on engine reset Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 72/97] drm/i915/guc: Don't complain about reset races Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 73/97] drm/i915/guc: Enable GuC engine reset Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 74/97] drm/i915/guc: Capture error state on context reset Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-11 16:28   ` Daniel Vetter
2021-05-11 16:28     ` Daniel Vetter
2021-05-11 17:12     ` Matthew Brost
2021-05-11 17:12       ` Matthew Brost
2021-05-11 17:45       ` Daniel Vetter
2021-05-11 17:45         ` Daniel Vetter
2021-05-06 19:14 ` [RFC PATCH 75/97] drm/i915/guc: Fix for error capture after full GPU reset with GuC Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 76/97] drm/i915/guc: Hook GuC scheduling policies up Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 77/97] drm/i915/guc: Connect reset modparam updates to GuC policy flags Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 78/97] drm/i915/guc: Include scheduling policies in the debugfs state dump Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 79/97] drm/i915/guc: Don't call ring_is_idle in GuC submission Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 80/97] drm/i915/guc: Implement banned contexts for " Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 81/97] drm/i915/guc: Allow flexible number of context ids Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 82/97] drm/i915/guc: Connect the number of guc_ids to debugfs Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 83/97] drm/i915/guc: Don't return -EAGAIN to user when guc_ids exhausted Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-07  6:06   ` kernel test robot
2021-05-06 19:14 ` [RFC PATCH 84/97] drm/i915/guc: Don't allow requests not ready to consume all guc_ids Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 85/97] drm/i915/guc: Introduce guc_submit_engine object Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 86/97] drm/i915/guc: Add golden context to GuC ADS Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 87/97] drm/i915/guc: Implement GuC priority management Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 88/97] drm/i915/guc: Support request cancellation Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 89/97] drm/i915/guc: Check return of __xa_store when registering a context Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` Matthew Brost [this message]
2021-05-06 19:14   ` [Intel-gfx] [RFC PATCH 90/97] drm/i915/guc: Non-static lrc descriptor registration buffer Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 91/97] drm/i915/guc: Take GT PM ref when deregistering context Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 92/97] drm/i915: Add GT PM delayed worker Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 93/97] drm/i915/guc: Take engine PM when a context is pinned with GuC submission Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 94/97] drm/i915/guc: Don't call switch_to_kernel_context " Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 95/97] drm/i915/guc: Selftest for GuC flow control Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 96/97] drm/i915/guc: Update GuC documentation Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-06 19:14 ` [RFC PATCH 97/97] drm/i915/guc: Unblock GuC submission on Gen11+ Matthew Brost
2021-05-06 19:14   ` [Intel-gfx] " Matthew Brost
2021-05-09 17:12 ` [RFC PATCH 00/97] Basic GuC submission support in the i915 Martin Peres
2021-05-09 17:12   ` [Intel-gfx] " Martin Peres
2021-05-09 23:11   ` Jason Ekstrand
2021-05-09 23:11     ` [Intel-gfx] " Jason Ekstrand
2021-05-10 13:55     ` Martin Peres
2021-05-10 13:55       ` [Intel-gfx] " Martin Peres
2021-05-10 16:25       ` Jason Ekstrand
2021-05-10 16:25         ` [Intel-gfx] " Jason Ekstrand
2021-05-11  8:01         ` Martin Peres
2021-05-11  8:01           ` [Intel-gfx] " Martin Peres
2021-05-10 16:33       ` Daniel Vetter
2021-05-10 16:33         ` [Intel-gfx] " Daniel Vetter
2021-05-10 18:30         ` Francisco Jerez
2021-05-10 18:30           ` Francisco Jerez
2021-05-11  8:06         ` Martin Peres
2021-05-11  8:06           ` [Intel-gfx] " Martin Peres
2021-05-11 15:26           ` Bloomfield, Jon
2021-05-11 15:26             ` [Intel-gfx] " Bloomfield, Jon
2021-05-11 16:39             ` Matthew Brost
2021-05-11 16:39               ` [Intel-gfx] " Matthew Brost
2021-05-12  6:26               ` Martin Peres
2021-05-12  6:26                 ` [Intel-gfx] " Martin Peres
2021-05-14 16:31                 ` Jason Ekstrand
2021-05-14 16:31                   ` [Intel-gfx] " Jason Ekstrand
2021-05-25 15:37                   ` Alex Deucher
2021-05-25 15:37                     ` [Intel-gfx] " Alex Deucher
2021-05-11  2:58     ` Dixit, Ashutosh
2021-05-11  2:58       ` [Intel-gfx] " Dixit, Ashutosh
2021-05-11  7:47       ` Martin Peres
2021-05-11  7:47         ` [Intel-gfx] " Martin Peres
2021-05-14 11:11 ` Tvrtko Ursulin
2021-05-14 11:11   ` Tvrtko Ursulin
2021-05-14 16:36   ` Jason Ekstrand
2021-05-14 16:36     ` Jason Ekstrand
2021-05-14 16:46     ` Matthew Brost
2021-05-14 16:46       ` Matthew Brost
2021-05-14 16:41   ` Matthew Brost
2021-05-14 16:41     ` Matthew Brost
2021-05-25 10:32 ` Tvrtko Ursulin
2021-05-25 10:32   ` Tvrtko Ursulin
2021-05-25 16:45   ` Matthew Brost
2021-05-25 16:45     ` Matthew Brost
2021-06-02 15:27     ` Tvrtko Ursulin
2021-06-02 15:27       ` Tvrtko Ursulin
2021-06-02 18:57       ` Daniel Vetter
2021-06-02 18:57         ` Daniel Vetter
2021-06-03  3:41         ` Matthew Brost
2021-06-03  3:41           ` Matthew Brost
2021-06-03  4:47           ` Daniel Vetter
2021-06-03  4:47             ` Daniel Vetter
2021-06-03  9:49             ` Tvrtko Ursulin
2021-06-03  9:49               ` Tvrtko Ursulin
2021-06-03 10:52           ` Tvrtko Ursulin
2021-06-03 10:52             ` Tvrtko Ursulin
2021-06-03  4:10       ` Matthew Brost
2021-06-03  4:10         ` Matthew Brost
2021-06-03  8:51         ` Tvrtko Ursulin
2021-06-03  8:51           ` Tvrtko Ursulin
2021-06-03 16:34           ` Matthew Brost
2021-06-03 16:34             ` Matthew Brost

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210506191451.77768-91-matthew.brost@intel.com \
    --to=matthew.brost@intel.com \
    --cc=daniel.vetter@intel.com \
    --cc=daniele.ceraolospurio@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=jason.ekstrand@intel.com \
    --cc=john.c.harrison@intel.com \
    --cc=jon.bloomfield@intel.com \
    --cc=tvrtko.ursulin@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.