All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-gfx@lists.freedesktop.org>, <dri-devel@lists.freedesktop.org>
Cc: <daniel.vetter@ffwll.ch>, <tony.ye@intel.com>, <zhengguo.xu@intel.com>
Subject: [PATCH 14/27] drm/i915/guc: Assign contexts in parent-child relationship consecutive guc_ids
Date: Fri, 20 Aug 2021 15:44:33 -0700	[thread overview]
Message-ID: <20210820224446.30620-15-matthew.brost@intel.com> (raw)
In-Reply-To: <20210820224446.30620-1-matthew.brost@intel.com>

Assign contexts in parent-child relationship consecutive guc_ids. This
is accomplished by partitioning guc_id space between ones that need to
be consecutive (1/16 available guc_ids) and ones that do not (15/16 of
available guc_ids). The consecutive search is implemented via the bitmap
API.

This is a precursor to the full GuC multi-lrc implementation but aligns
to how GuC mutli-lrc interface is defined - guc_ids must be consecutive
when using the GuC multi-lrc interface.

v2:
 (Daniel Vetter)
  - Explictly state why we assign consecutive guc_ids

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/gt/uc/intel_guc.h        |   6 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 107 +++++++++++++-----
 2 files changed, 86 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 023953e77553..3f95b1b4f15c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -61,9 +61,13 @@ struct intel_guc {
 		 */
 		spinlock_t lock;
 		/**
-		 * @guc_ids: used to allocate new guc_ids
+		 * @guc_ids: used to allocate new guc_ids, single-lrc
 		 */
 		struct ida guc_ids;
+		/**
+		 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+		 */
+		unsigned long *guc_ids_bitmap;
 		/** @num_guc_ids: number of guc_ids that can be used */
 		u32 num_guc_ids;
 		/** @max_guc_ids: max number of guc_ids that can be used */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 00d54bb00bfb..e9dfd43d29a0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -125,6 +125,18 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
 
 #define GUC_REQUEST_SIZE 64 /* bytes */
 
+/*
+ * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
+ * per the GuC submission interface. A different allocation algorithm is used
+ * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
+ * partition the guc_id space. We believe the number of multi-lrc contexts in
+ * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
+ * multi-lrc.
+ */
+#define NUMBER_MULTI_LRC_GUC_ID(guc) \
+	((guc)->submission_state.num_guc_ids / 16 > 32 ? \
+	 (guc)->submission_state.num_guc_ids / 16 : 32)
+
 /*
  * Below is a set of functions which control the GuC scheduling state which
  * require a lock.
@@ -1176,6 +1188,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
 	INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
 	intel_gt_pm_unpark_work_init(&guc->submission_state.destroyed_worker,
 				     destroyed_worker_func);
+	guc->submission_state.guc_ids_bitmap =
+		bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
+	if (!guc->submission_state.guc_ids_bitmap)
+		return -ENOMEM;
 
 	return 0;
 }
@@ -1188,6 +1204,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
 	guc_lrc_desc_pool_destroy(guc);
 	guc_flush_destroyed_contexts(guc);
 	i915_sched_engine_put(guc->sched_engine);
+	bitmap_free(guc->submission_state.guc_ids_bitmap);
 }
 
 static void queue_request(struct i915_sched_engine *sched_engine,
@@ -1239,18 +1256,43 @@ static void guc_submit_request(struct i915_request *rq)
 	spin_unlock_irqrestore(&sched_engine->lock, flags);
 }
 
-static int new_guc_id(struct intel_guc *guc)
+static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
-	return ida_simple_get(&guc->submission_state.guc_ids, 0,
-			      guc->submission_state.num_guc_ids, GFP_KERNEL |
-			      __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+	int ret;
+
+	GEM_BUG_ON(intel_context_is_child(ce));
+
+	if (intel_context_is_parent(ce))
+		ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
+					      NUMBER_MULTI_LRC_GUC_ID(guc),
+					      order_base_2(ce->guc_number_children
+							   + 1));
+	else
+		ret = ida_simple_get(&guc->submission_state.guc_ids,
+				     NUMBER_MULTI_LRC_GUC_ID(guc),
+				     guc->submission_state.num_guc_ids,
+				     GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+				     __GFP_NOWARN);
+	if (unlikely(ret < 0))
+		return ret;
+
+	ce->guc_id.id = ret;
+	return 0;
 }
 
 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
+	GEM_BUG_ON(intel_context_is_child(ce));
+
 	if (!context_guc_id_invalid(ce)) {
-		ida_simple_remove(&guc->submission_state.guc_ids,
-				  ce->guc_id.id);
+		if (intel_context_is_parent(ce))
+			bitmap_release_region(guc->submission_state.guc_ids_bitmap,
+					      ce->guc_id.id,
+					      order_base_2(ce->guc_number_children
+							   + 1));
+		else
+			ida_simple_remove(&guc->submission_state.guc_ids,
+					  ce->guc_id.id);
 		reset_lrc_desc(guc, ce->guc_id.id);
 		set_context_guc_id_invalid(ce);
 	}
@@ -1267,49 +1309,60 @@ static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
 }
 
-static int steal_guc_id(struct intel_guc *guc)
+static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
-	struct intel_context *ce;
-	int guc_id;
+	struct intel_context *cn;
 
 	lockdep_assert_held(&guc->submission_state.lock);
+	GEM_BUG_ON(intel_context_is_child(ce));
+	GEM_BUG_ON(intel_context_is_parent(ce));
 
 	if (!list_empty(&guc->submission_state.guc_id_list)) {
-		ce = list_first_entry(&guc->submission_state.guc_id_list,
+		cn = list_first_entry(&guc->submission_state.guc_id_list,
 				      struct intel_context,
 				      guc_id.link);
 
-		GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
-		GEM_BUG_ON(context_guc_id_invalid(ce));
-
-		list_del_init(&ce->guc_id.link);
-		guc_id = ce->guc_id.id;
+		GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
+		GEM_BUG_ON(context_guc_id_invalid(cn));
+		GEM_BUG_ON(intel_context_is_child(cn));
+		GEM_BUG_ON(intel_context_is_parent(cn));
 
-		spin_lock(&ce->guc_state.lock);
-		clr_context_registered(ce);
-		spin_unlock(&ce->guc_state.lock);
+		list_del_init(&cn->guc_id.link);
+		ce->guc_id = cn->guc_id;
+		clr_context_registered(cn);
+		set_context_guc_id_invalid(cn);
 
-		set_context_guc_id_invalid(ce);
-		return guc_id;
+		return 0;
 	} else {
 		return -EAGAIN;
 	}
 }
 
-static int assign_guc_id(struct intel_guc *guc, u16 *out)
+static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
 	int ret;
 
 	lockdep_assert_held(&guc->submission_state.lock);
+	GEM_BUG_ON(intel_context_is_child(ce));
 
-	ret = new_guc_id(guc);
+	ret = new_guc_id(guc, ce);
 	if (unlikely(ret < 0)) {
-		ret = steal_guc_id(guc);
+		if (intel_context_is_parent(ce))
+			return -ENOSPC;
+
+		ret = steal_guc_id(guc, ce);
 		if (ret < 0)
 			return ret;
 	}
 
-	*out = ret;
+	if (intel_context_is_parent(ce)) {
+		struct intel_context *child;
+		int i = 1;
+
+		for_each_child(ce, child)
+			child->guc_id.id = ce->guc_id.id + i++;
+	}
+
 	return 0;
 }
 
@@ -1327,7 +1380,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
 	might_lock(&ce->guc_state.lock);
 
 	if (context_guc_id_invalid(ce)) {
-		ret = assign_guc_id(guc, &ce->guc_id.id);
+		ret = assign_guc_id(guc, ce);
 		if (ret)
 			goto out_unlock;
 		ret = 1;	/* Indidcates newly assigned guc_id */
@@ -1369,8 +1422,10 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
 	unsigned long flags;
 
 	GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
+	GEM_BUG_ON(intel_context_is_child(ce));
 
-	if (unlikely(context_guc_id_invalid(ce)))
+	if (unlikely(context_guc_id_invalid(ce) ||
+		     intel_context_is_parent(ce)))
 		return;
 
 	spin_lock_irqsave(&guc->submission_state.lock, flags);
-- 
2.32.0


WARNING: multiple messages have this Message-ID (diff)
From: Matthew Brost <matthew.brost@intel.com>
To: <intel-gfx@lists.freedesktop.org>, <dri-devel@lists.freedesktop.org>
Cc: <daniel.vetter@ffwll.ch>, <tony.ye@intel.com>, <zhengguo.xu@intel.com>
Subject: [Intel-gfx] [PATCH 14/27] drm/i915/guc: Assign contexts in parent-child relationship consecutive guc_ids
Date: Fri, 20 Aug 2021 15:44:33 -0700	[thread overview]
Message-ID: <20210820224446.30620-15-matthew.brost@intel.com> (raw)
In-Reply-To: <20210820224446.30620-1-matthew.brost@intel.com>

Assign contexts in parent-child relationship consecutive guc_ids. This
is accomplished by partitioning guc_id space between ones that need to
be consecutive (1/16 available guc_ids) and ones that do not (15/16 of
available guc_ids). The consecutive search is implemented via the bitmap
API.

This is a precursor to the full GuC multi-lrc implementation but aligns
to how GuC mutli-lrc interface is defined - guc_ids must be consecutive
when using the GuC multi-lrc interface.

v2:
 (Daniel Vetter)
  - Explictly state why we assign consecutive guc_ids

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/i915/gt/uc/intel_guc.h        |   6 +-
 .../gpu/drm/i915/gt/uc/intel_guc_submission.c | 107 +++++++++++++-----
 2 files changed, 86 insertions(+), 27 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 023953e77553..3f95b1b4f15c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -61,9 +61,13 @@ struct intel_guc {
 		 */
 		spinlock_t lock;
 		/**
-		 * @guc_ids: used to allocate new guc_ids
+		 * @guc_ids: used to allocate new guc_ids, single-lrc
 		 */
 		struct ida guc_ids;
+		/**
+		 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
+		 */
+		unsigned long *guc_ids_bitmap;
 		/** @num_guc_ids: number of guc_ids that can be used */
 		u32 num_guc_ids;
 		/** @max_guc_ids: max number of guc_ids that can be used */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 00d54bb00bfb..e9dfd43d29a0 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -125,6 +125,18 @@ guc_create_virtual(struct intel_engine_cs **siblings, unsigned int count);
 
 #define GUC_REQUEST_SIZE 64 /* bytes */
 
+/*
+ * We reserve 1/16 of the guc_ids for multi-lrc as these need to be contiguous
+ * per the GuC submission interface. A different allocation algorithm is used
+ * (bitmap vs. ida) between multi-lrc and single-lrc hence the reason to
+ * partition the guc_id space. We believe the number of multi-lrc contexts in
+ * use should be low and 1/16 should be sufficient. Minimum of 32 guc_ids for
+ * multi-lrc.
+ */
+#define NUMBER_MULTI_LRC_GUC_ID(guc) \
+	((guc)->submission_state.num_guc_ids / 16 > 32 ? \
+	 (guc)->submission_state.num_guc_ids / 16 : 32)
+
 /*
  * Below is a set of functions which control the GuC scheduling state which
  * require a lock.
@@ -1176,6 +1188,10 @@ int intel_guc_submission_init(struct intel_guc *guc)
 	INIT_LIST_HEAD(&guc->submission_state.destroyed_contexts);
 	intel_gt_pm_unpark_work_init(&guc->submission_state.destroyed_worker,
 				     destroyed_worker_func);
+	guc->submission_state.guc_ids_bitmap =
+		bitmap_zalloc(NUMBER_MULTI_LRC_GUC_ID(guc), GFP_KERNEL);
+	if (!guc->submission_state.guc_ids_bitmap)
+		return -ENOMEM;
 
 	return 0;
 }
@@ -1188,6 +1204,7 @@ void intel_guc_submission_fini(struct intel_guc *guc)
 	guc_lrc_desc_pool_destroy(guc);
 	guc_flush_destroyed_contexts(guc);
 	i915_sched_engine_put(guc->sched_engine);
+	bitmap_free(guc->submission_state.guc_ids_bitmap);
 }
 
 static void queue_request(struct i915_sched_engine *sched_engine,
@@ -1239,18 +1256,43 @@ static void guc_submit_request(struct i915_request *rq)
 	spin_unlock_irqrestore(&sched_engine->lock, flags);
 }
 
-static int new_guc_id(struct intel_guc *guc)
+static int new_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
-	return ida_simple_get(&guc->submission_state.guc_ids, 0,
-			      guc->submission_state.num_guc_ids, GFP_KERNEL |
-			      __GFP_RETRY_MAYFAIL | __GFP_NOWARN);
+	int ret;
+
+	GEM_BUG_ON(intel_context_is_child(ce));
+
+	if (intel_context_is_parent(ce))
+		ret = bitmap_find_free_region(guc->submission_state.guc_ids_bitmap,
+					      NUMBER_MULTI_LRC_GUC_ID(guc),
+					      order_base_2(ce->guc_number_children
+							   + 1));
+	else
+		ret = ida_simple_get(&guc->submission_state.guc_ids,
+				     NUMBER_MULTI_LRC_GUC_ID(guc),
+				     guc->submission_state.num_guc_ids,
+				     GFP_KERNEL | __GFP_RETRY_MAYFAIL |
+				     __GFP_NOWARN);
+	if (unlikely(ret < 0))
+		return ret;
+
+	ce->guc_id.id = ret;
+	return 0;
 }
 
 static void __release_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
+	GEM_BUG_ON(intel_context_is_child(ce));
+
 	if (!context_guc_id_invalid(ce)) {
-		ida_simple_remove(&guc->submission_state.guc_ids,
-				  ce->guc_id.id);
+		if (intel_context_is_parent(ce))
+			bitmap_release_region(guc->submission_state.guc_ids_bitmap,
+					      ce->guc_id.id,
+					      order_base_2(ce->guc_number_children
+							   + 1));
+		else
+			ida_simple_remove(&guc->submission_state.guc_ids,
+					  ce->guc_id.id);
 		reset_lrc_desc(guc, ce->guc_id.id);
 		set_context_guc_id_invalid(ce);
 	}
@@ -1267,49 +1309,60 @@ static void release_guc_id(struct intel_guc *guc, struct intel_context *ce)
 	spin_unlock_irqrestore(&guc->submission_state.lock, flags);
 }
 
-static int steal_guc_id(struct intel_guc *guc)
+static int steal_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
-	struct intel_context *ce;
-	int guc_id;
+	struct intel_context *cn;
 
 	lockdep_assert_held(&guc->submission_state.lock);
+	GEM_BUG_ON(intel_context_is_child(ce));
+	GEM_BUG_ON(intel_context_is_parent(ce));
 
 	if (!list_empty(&guc->submission_state.guc_id_list)) {
-		ce = list_first_entry(&guc->submission_state.guc_id_list,
+		cn = list_first_entry(&guc->submission_state.guc_id_list,
 				      struct intel_context,
 				      guc_id.link);
 
-		GEM_BUG_ON(atomic_read(&ce->guc_id.ref));
-		GEM_BUG_ON(context_guc_id_invalid(ce));
-
-		list_del_init(&ce->guc_id.link);
-		guc_id = ce->guc_id.id;
+		GEM_BUG_ON(atomic_read(&cn->guc_id.ref));
+		GEM_BUG_ON(context_guc_id_invalid(cn));
+		GEM_BUG_ON(intel_context_is_child(cn));
+		GEM_BUG_ON(intel_context_is_parent(cn));
 
-		spin_lock(&ce->guc_state.lock);
-		clr_context_registered(ce);
-		spin_unlock(&ce->guc_state.lock);
+		list_del_init(&cn->guc_id.link);
+		ce->guc_id = cn->guc_id;
+		clr_context_registered(cn);
+		set_context_guc_id_invalid(cn);
 
-		set_context_guc_id_invalid(ce);
-		return guc_id;
+		return 0;
 	} else {
 		return -EAGAIN;
 	}
 }
 
-static int assign_guc_id(struct intel_guc *guc, u16 *out)
+static int assign_guc_id(struct intel_guc *guc, struct intel_context *ce)
 {
 	int ret;
 
 	lockdep_assert_held(&guc->submission_state.lock);
+	GEM_BUG_ON(intel_context_is_child(ce));
 
-	ret = new_guc_id(guc);
+	ret = new_guc_id(guc, ce);
 	if (unlikely(ret < 0)) {
-		ret = steal_guc_id(guc);
+		if (intel_context_is_parent(ce))
+			return -ENOSPC;
+
+		ret = steal_guc_id(guc, ce);
 		if (ret < 0)
 			return ret;
 	}
 
-	*out = ret;
+	if (intel_context_is_parent(ce)) {
+		struct intel_context *child;
+		int i = 1;
+
+		for_each_child(ce, child)
+			child->guc_id.id = ce->guc_id.id + i++;
+	}
+
 	return 0;
 }
 
@@ -1327,7 +1380,7 @@ static int pin_guc_id(struct intel_guc *guc, struct intel_context *ce)
 	might_lock(&ce->guc_state.lock);
 
 	if (context_guc_id_invalid(ce)) {
-		ret = assign_guc_id(guc, &ce->guc_id.id);
+		ret = assign_guc_id(guc, ce);
 		if (ret)
 			goto out_unlock;
 		ret = 1;	/* Indidcates newly assigned guc_id */
@@ -1369,8 +1422,10 @@ static void unpin_guc_id(struct intel_guc *guc, struct intel_context *ce)
 	unsigned long flags;
 
 	GEM_BUG_ON(atomic_read(&ce->guc_id.ref) < 0);
+	GEM_BUG_ON(intel_context_is_child(ce));
 
-	if (unlikely(context_guc_id_invalid(ce)))
+	if (unlikely(context_guc_id_invalid(ce) ||
+		     intel_context_is_parent(ce)))
 		return;
 
 	spin_lock_irqsave(&guc->submission_state.lock, flags);
-- 
2.32.0


  parent reply	other threads:[~2021-08-20 22:51 UTC|newest]

Thread overview: 145+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-20 22:44 [PATCH 00/27] Parallel submission aka multi-bb execbuf Matthew Brost
2021-08-20 22:44 ` [Intel-gfx] " Matthew Brost
2021-08-20 22:44 ` [PATCH 01/27] drm/i915/guc: Squash Clean up GuC CI failures, simplify locking, and kernel DOC Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-08-20 22:44 ` [PATCH 02/27] drm/i915/guc: Allow flexible number of context ids Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-09 22:13   ` John Harrison
2021-09-10  0:14     ` Matthew Brost
2021-08-20 22:44 ` [PATCH 03/27] drm/i915/guc: Connect the number of guc_ids to debugfs Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-09 22:16   ` John Harrison
2021-09-10  0:16     ` Matthew Brost
2021-08-20 22:44 ` [PATCH 04/27] drm/i915/guc: Take GT PM ref when deregistering context Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-09 22:28   ` John Harrison
2021-09-10  0:21     ` Matthew Brost
2021-09-13  9:55   ` Tvrtko Ursulin
2021-09-13 17:12     ` Matthew Brost
2021-09-14  8:41       ` Tvrtko Ursulin
2021-08-20 22:44 ` [PATCH 05/27] drm/i915: Add GT PM unpark worker Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-09 22:36   ` John Harrison
2021-09-10  0:34     ` Matthew Brost
2021-09-10  8:36   ` Tvrtko Ursulin
2021-09-10 20:09     ` Matthew Brost
2021-09-13 10:33       ` Tvrtko Ursulin
2021-09-13 17:20         ` Matthew Brost
2021-08-20 22:44 ` [PATCH 06/27] drm/i915/guc: Take engine PM when a context is pinned with GuC submission Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-09 22:46   ` John Harrison
2021-09-10  0:41     ` Matthew Brost
2021-09-13 22:26       ` John Harrison
2021-09-14  1:12         ` Matthew Brost
2021-08-20 22:44 ` [PATCH 07/27] drm/i915/guc: Don't call switch_to_kernel_context " Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-09 22:51   ` John Harrison
2021-09-13 16:54     ` Matthew Brost
2021-09-13 22:38       ` John Harrison
2021-09-14  5:02         ` Matthew Brost
2021-09-13 16:55     ` Matthew Brost
2021-08-20 22:44 ` [PATCH 08/27] drm/i915: Add logical engine mapping Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-10 11:12   ` Tvrtko Ursulin
2021-09-10 19:49     ` Matthew Brost
2021-09-13  9:24       ` Tvrtko Ursulin
2021-09-13 16:50         ` Matthew Brost
2021-09-14  8:34           ` Tvrtko Ursulin
2021-09-14 18:04             ` Matthew Brost
2021-09-15  8:24               ` Tvrtko Ursulin
2021-09-15 16:58                 ` Matthew Brost
2021-09-16  8:31                   ` Tvrtko Ursulin
2021-08-20 22:44 ` [PATCH 09/27] drm/i915: Expose logical engine instance to user Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-13 23:06   ` John Harrison
2021-09-14  1:08     ` Matthew Brost
2021-08-20 22:44 ` [PATCH 10/27] drm/i915/guc: Introduce context parent-child relationship Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-13 23:19   ` John Harrison
2021-09-14  1:18     ` Matthew Brost
2021-08-20 22:44 ` [PATCH 11/27] drm/i915/guc: Implement parallel context pin / unpin functions Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-08-20 22:44 ` [PATCH 12/27] drm/i915/guc: Add multi-lrc context registration Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-15 19:21   ` John Harrison
2021-09-15 19:31     ` Matthew Brost
2021-09-15 20:23       ` John Harrison
2021-09-15 20:33         ` Matthew Brost
2021-08-20 22:44 ` [PATCH 13/27] drm/i915/guc: Ensure GuC schedule operations do not operate on child contexts Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-15 19:24   ` John Harrison
2021-09-15 19:34     ` Matthew Brost
2021-08-20 22:44 ` Matthew Brost [this message]
2021-08-20 22:44   ` [Intel-gfx] [PATCH 14/27] drm/i915/guc: Assign contexts in parent-child relationship consecutive guc_ids Matthew Brost
2021-09-15 20:04   ` John Harrison
2021-09-15 20:55     ` Matthew Brost
2021-08-20 22:44 ` [PATCH 15/27] drm/i915/guc: Implement multi-lrc submission Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-08-21 14:04   ` kernel test robot
2021-08-21 14:04     ` kernel test robot
2021-08-21 14:04     ` kernel test robot
2021-08-22  2:18   ` kernel test robot
2021-08-22  2:18     ` kernel test robot
2021-08-22  2:18     ` [Intel-gfx] " kernel test robot
2021-09-20 21:48   ` John Harrison
2021-09-22 16:25     ` Matthew Brost
2021-09-22 20:15       ` John Harrison
2021-09-23  2:44         ` Matthew Brost
2021-08-20 22:44 ` [PATCH 16/27] drm/i915/guc: Insert submit fences between requests in parent-child relationship Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-20 21:57   ` John Harrison
2021-08-20 22:44 ` [PATCH 17/27] drm/i915/guc: Implement multi-lrc reset Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-20 22:44   ` John Harrison
2021-09-22 16:16     ` Matthew Brost
2021-08-20 22:44 ` [Intel-gfx] [PATCH 18/27] drm/i915/guc: Update debugfs for GuC multi-lrc Matthew Brost
2021-08-20 22:44   ` Matthew Brost
2021-09-20 22:48   ` [Intel-gfx] " John Harrison
2021-09-21 19:13     ` Matthew Brost
2021-08-20 22:44 ` [PATCH 19/27] drm/i915: Fix bug in user proto-context creation that leaked contexts Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-20 22:57   ` John Harrison
2021-09-21 14:49     ` Tvrtko Ursulin
2021-09-21 19:28       ` Matthew Brost
2021-09-21 19:28     ` Matthew Brost
2021-08-20 22:44 ` [Intel-gfx] [PATCH 20/27] drm/i915/guc: Connect UAPI to GuC multi-lrc interface Matthew Brost
2021-08-20 22:44   ` Matthew Brost
2021-08-29  4:00   ` [Intel-gfx] " kernel test robot
2021-08-29  4:00     ` kernel test robot
2021-08-29 19:59   ` kernel test robot
2021-08-29 19:59     ` kernel test robot
2021-09-21  0:09   ` John Harrison
2021-09-22 16:38     ` Matthew Brost
2021-08-20 22:44 ` [PATCH 21/27] drm/i915/doc: Update parallel submit doc to point to i915_drm.h Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-21  0:12   ` John Harrison
2021-08-20 22:44 ` [PATCH 22/27] drm/i915/guc: Add basic GuC multi-lrc selftest Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-28 20:47   ` John Harrison
2021-08-20 22:44 ` [PATCH 23/27] drm/i915/guc: Implement no mid batch preemption for multi-lrc Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-10 11:25   ` Tvrtko Ursulin
2021-09-10 20:49     ` Matthew Brost
2021-09-13 10:52       ` Tvrtko Ursulin
2021-09-28 22:20   ` John Harrison
2021-09-28 22:33     ` Matthew Brost
2021-09-28 23:33       ` John Harrison
2021-09-29  0:22         ` Matthew Brost
2021-08-20 22:44 ` [PATCH 24/27] drm/i915: Multi-BB execbuf Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-08-21 19:01   ` kernel test robot
2021-08-21 19:01     ` kernel test robot
2021-08-30  3:46   ` kernel test robot
2021-08-30  3:46     ` kernel test robot
2021-09-30 22:16   ` Matthew Brost
2021-08-20 22:44 ` [PATCH 25/27] drm/i915/guc: Handle errors in multi-lrc requests Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-09-29 20:44   ` John Harrison
2021-09-29 20:58     ` Matthew Brost
2021-08-20 22:44 ` [PATCH 26/27] drm/i915: Enable multi-bb execbuf Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-08-20 22:44 ` [PATCH 27/27] drm/i915/execlists: Weak parallel submission support for execlists Matthew Brost
2021-08-20 22:44   ` [Intel-gfx] " Matthew Brost
2021-08-20 23:13 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Parallel submission aka multi-bb execbuf (rev3) Patchwork
2021-08-20 23:14 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2021-08-20 23:45 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210820224446.30620-15-matthew.brost@intel.com \
    --to=matthew.brost@intel.com \
    --cc=daniel.vetter@ffwll.ch \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=tony.ye@intel.com \
    --cc=zhengguo.xu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.