All of lore.kernel.org
 help / color / mirror / Atom feed
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [RFC 2/5] drm/i915: Move struct intel_virtual_engine to its own header
Date: Wed, 11 Dec 2019 13:12:41 -0800	[thread overview]
Message-ID: <20191211211244.7831-3-daniele.ceraolospurio@intel.com> (raw)
In-Reply-To: <20191211211244.7831-1-daniele.ceraolospurio@intel.com>

From: Matthew Brost <matthew.brost@intel.com>

The upcoming GuC submission code will need to use the structure, so
split it to its own file.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: John Harrison <john.c.harrison@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c           | 103 ++++++------------
 .../drm/i915/gt/intel_virtual_engine_types.h  |  57 ++++++++++
 2 files changed, 92 insertions(+), 68 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_virtual_engine_types.h

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 6d6148e11fd0..e6dea2d3a5c0 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -147,6 +147,7 @@
 #include "intel_mocs.h"
 #include "intel_reset.h"
 #include "intel_ring.h"
+#include "intel_virtual_engine_types.h"
 #include "intel_workarounds.h"
 
 #define RING_EXECLIST_QFULL		(1 << 0x2)
@@ -180,52 +181,11 @@
 #define WA_TAIL_DWORDS 2
 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
 
-struct virtual_engine {
-	struct intel_engine_cs base;
-	struct intel_context context;
-
-	/*
-	 * We allow only a single request through the virtual engine at a time
-	 * (each request in the timeline waits for the completion fence of
-	 * the previous before being submitted). By restricting ourselves to
-	 * only submitting a single request, each request is placed on to a
-	 * physical to maximise load spreading (by virtue of the late greedy
-	 * scheduling -- each real engine takes the next available request
-	 * upon idling).
-	 */
-	struct i915_request *request;
-
-	/*
-	 * We keep a rbtree of available virtual engines inside each physical
-	 * engine, sorted by priority. Here we preallocate the nodes we need
-	 * for the virtual engine, indexed by physical_engine->id.
-	 */
-	struct ve_node {
-		struct rb_node rb;
-		int prio;
-	} nodes[I915_NUM_ENGINES];
-
-	/*
-	 * Keep track of bonded pairs -- restrictions upon on our selection
-	 * of physical engines any particular request may be submitted to.
-	 * If we receive a submit-fence from a master engine, we will only
-	 * use one of sibling_mask physical engines.
-	 */
-	struct ve_bond {
-		const struct intel_engine_cs *master;
-		intel_engine_mask_t sibling_mask;
-	} *bonds;
-	unsigned int num_bonds;
-
-	/* And finally, which physical engines this virtual engine maps onto. */
-	unsigned int num_siblings;
-	struct intel_engine_cs *siblings[0];
-};
-
-static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine)
+static struct intel_virtual_engine *
+to_virtual_engine(struct intel_engine_cs *engine)
 {
 	GEM_BUG_ON(!intel_engine_is_virtual(engine));
-	return container_of(engine, struct virtual_engine, base);
+	return container_of(engine, struct intel_virtual_engine, base);
 }
 
 static int lr_context_alloc(struct intel_context *ce,
@@ -384,7 +344,7 @@ static inline bool need_preempt(const struct intel_engine_cs *engine,
 		return true;
 
 	if (rb) {
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		bool preempt = false;
 
@@ -1144,7 +1104,8 @@ execlists_schedule_in(struct i915_request *rq, int idx)
 
 static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	struct intel_virtual_engine *ve =
+		container_of(ce, typeof(*ve), context);
 	struct i915_request *next = READ_ONCE(ve->request);
 
 	if (next && next->execution_mask & ~rq->execution_mask)
@@ -1448,7 +1409,7 @@ static void virtual_update_register_offsets(u32 *regs,
 	set_offsets(regs, reg_offsets(engine), engine);
 }
 
-static bool virtual_matches(const struct virtual_engine *ve,
+static bool virtual_matches(const struct intel_virtual_engine *ve,
 			    const struct i915_request *rq,
 			    const struct intel_engine_cs *engine)
 {
@@ -1473,7 +1434,7 @@ static bool virtual_matches(const struct virtual_engine *ve,
 	return true;
 }
 
-static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
+static void virtual_xfer_breadcrumbs(struct intel_virtual_engine *ve,
 				     struct intel_engine_cs *engine)
 {
 	struct intel_engine_cs *old = ve->siblings[0];
@@ -1670,7 +1631,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	 */
 
 	for (rb = rb_first_cached(&execlists->virtual); rb; ) {
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		struct i915_request *rq = READ_ONCE(ve->request);
 
@@ -1786,7 +1747,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 	}
 
 	while (rb) { /* XXX virtual is always taking precedence */
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		struct i915_request *rq;
 
@@ -3237,7 +3198,7 @@ static void execlists_cancel_requests(struct intel_engine_cs *engine)
 
 	/* Cancel all attached virtual engines */
 	while ((rb = rb_first_cached(&execlists->virtual))) {
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 
 		rb_erase_cached(rb, &execlists->virtual);
@@ -4198,14 +4159,14 @@ static int lr_context_alloc(struct intel_context *ce,
 	return ret;
 }
 
-static struct list_head *virtual_queue(struct virtual_engine *ve)
+static struct list_head *virtual_queue(struct intel_virtual_engine *ve)
 {
 	return &ve->base.execlists.default_priolist.requests[0];
 }
 
 static void virtual_context_destroy(struct kref *kref)
 {
-	struct virtual_engine *ve =
+	struct intel_virtual_engine *ve =
 		container_of(kref, typeof(*ve), context.ref);
 	unsigned int n;
 
@@ -4239,7 +4200,7 @@ static void virtual_context_destroy(struct kref *kref)
 	kfree(ve);
 }
 
-static void virtual_engine_initial_hint(struct virtual_engine *ve)
+static void virtual_engine_initial_hint(struct intel_virtual_engine *ve)
 {
 	int swp;
 
@@ -4268,7 +4229,8 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve)
 
 static int virtual_context_pin(struct intel_context *ce)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	struct intel_virtual_engine *ve =
+		container_of(ce, typeof(*ve), context);
 	int err;
 
 	/* Note: we must use a real engine class for setting up reg state */
@@ -4282,7 +4244,8 @@ static int virtual_context_pin(struct intel_context *ce)
 
 static void virtual_context_enter(struct intel_context *ce)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	struct intel_virtual_engine *ve =
+		container_of(ce, typeof(*ve), context);
 	unsigned int n;
 
 	for (n = 0; n < ve->num_siblings; n++)
@@ -4293,7 +4256,8 @@ static void virtual_context_enter(struct intel_context *ce)
 
 static void virtual_context_exit(struct intel_context *ce)
 {
-	struct virtual_engine *ve = container_of(ce, typeof(*ve), context);
+	struct intel_virtual_engine *ve =
+		container_of(ce, typeof(*ve), context);
 	unsigned int n;
 
 	intel_timeline_exit(ce->timeline);
@@ -4312,7 +4276,8 @@ static const struct intel_context_ops virtual_context_ops = {
 	.destroy = virtual_context_destroy,
 };
 
-static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
+static intel_engine_mask_t
+virtual_submission_mask(struct intel_virtual_engine *ve)
 {
 	struct i915_request *rq;
 	intel_engine_mask_t mask;
@@ -4339,7 +4304,8 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
 
 static void virtual_submission_tasklet(unsigned long data)
 {
-	struct virtual_engine * const ve = (struct virtual_engine *)data;
+	struct intel_virtual_engine * const ve =
+		(struct intel_virtual_engine *)data;
 	const int prio = ve->base.execlists.queue_priority_hint;
 	intel_engine_mask_t mask;
 	unsigned int n;
@@ -4419,7 +4385,7 @@ static void virtual_submission_tasklet(unsigned long data)
 
 static void virtual_submit_request(struct i915_request *rq)
 {
-	struct virtual_engine *ve = to_virtual_engine(rq->engine);
+	struct intel_virtual_engine *ve = to_virtual_engine(rq->engine);
 	struct i915_request *old;
 	unsigned long flags;
 
@@ -4458,7 +4424,7 @@ static void virtual_submit_request(struct i915_request *rq)
 }
 
 static struct ve_bond *
-virtual_find_bond(struct virtual_engine *ve,
+virtual_find_bond(struct intel_virtual_engine *ve,
 		  const struct intel_engine_cs *master)
 {
 	int i;
@@ -4474,7 +4440,7 @@ virtual_find_bond(struct virtual_engine *ve,
 static void
 virtual_bond_execute(struct i915_request *rq, struct dma_fence *signal)
 {
-	struct virtual_engine *ve = to_virtual_engine(rq->engine);
+	struct intel_virtual_engine *ve = to_virtual_engine(rq->engine);
 	intel_engine_mask_t allowed, exec;
 	struct ve_bond *bond;
 
@@ -4498,7 +4464,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx,
 			       struct intel_engine_cs **siblings,
 			       unsigned int count)
 {
-	struct virtual_engine *ve;
+	struct intel_virtual_engine *ve;
 	unsigned int n;
 	int err;
 
@@ -4639,7 +4605,7 @@ struct intel_context *
 intel_execlists_clone_virtual(struct i915_gem_context *ctx,
 			      struct intel_engine_cs *src)
 {
-	struct virtual_engine *se = to_virtual_engine(src);
+	struct intel_virtual_engine *se = to_virtual_engine(src);
 	struct intel_context *dst;
 
 	dst = intel_execlists_create_virtual(ctx,
@@ -4649,7 +4615,8 @@ intel_execlists_clone_virtual(struct i915_gem_context *ctx,
 		return dst;
 
 	if (se->num_bonds) {
-		struct virtual_engine *de = to_virtual_engine(dst->engine);
+		struct intel_virtual_engine *de =
+			to_virtual_engine(dst->engine);
 
 		de->bonds = kmemdup(se->bonds,
 				    sizeof(*se->bonds) * se->num_bonds,
@@ -4669,7 +4636,7 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine,
 				     const struct intel_engine_cs *master,
 				     const struct intel_engine_cs *sibling)
 {
-	struct virtual_engine *ve = to_virtual_engine(engine);
+	struct intel_virtual_engine *ve = to_virtual_engine(engine);
 	struct ve_bond *bond;
 	int n;
 
@@ -4705,7 +4672,7 @@ struct intel_engine_cs *
 intel_virtual_engine_get_sibling(struct intel_engine_cs *engine,
 				 unsigned int sibling)
 {
-	struct virtual_engine *ve = to_virtual_engine(engine);
+	struct intel_virtual_engine *ve = to_virtual_engine(engine);
 
 	if (sibling >= ve->num_siblings)
 		return NULL;
@@ -4773,7 +4740,7 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
 	last = NULL;
 	count = 0;
 	for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) {
-		struct virtual_engine *ve =
+		struct intel_virtual_engine *ve =
 			rb_entry(rb, typeof(*ve), nodes[engine->id].rb);
 		struct i915_request *rq = READ_ONCE(ve->request);
 
diff --git a/drivers/gpu/drm/i915/gt/intel_virtual_engine_types.h b/drivers/gpu/drm/i915/gt/intel_virtual_engine_types.h
new file mode 100644
index 000000000000..9ba5f0e6e395
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_virtual_engine_types.h
@@ -0,0 +1,57 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_VIRTUAL_ENGINE_TYPES__
+#define __INTEL_VIRTUAL_ENGINE_TYPES__
+
+#include "intel_context_types.h"
+#include "intel_engine_types.h"
+
+struct i915_request;
+
+struct intel_virtual_engine {
+	struct intel_engine_cs base;
+	struct intel_context context;
+
+	/*
+	 * We allow only a single request through the virtual engine at a time
+	 * (each request in the timeline waits for the completion fence of
+	 * the previous before being submitted). By restricting ourselves to
+	 * only submitting a single request, each request is placed on to a
+	 * physical to maximise load spreading (by virtue of the late greedy
+	 * scheduling -- each real engine takes the next available request
+	 * upon idling).
+	 */
+	struct i915_request *request;
+
+	/*
+	 * We keep a rbtree of available virtual engines inside each physical
+	 * engine, sorted by priority. Here we preallocate the nodes we need
+	 * for the virtual engine, indexed by physical_engine->id.
+	 */
+	struct ve_node {
+		struct rb_node rb;
+		int prio;
+	} nodes[I915_NUM_ENGINES];
+
+	/*
+	 * Keep track of bonded pairs -- restrictions upon on our selection
+	 * of physical engines any particular request may be submitted to.
+	 * If we receive a submit-fence from a master engine, we will only
+	 * use one of sibling_mask physical engines.
+	 */
+	struct ve_bond {
+		const struct intel_engine_cs *master;
+		intel_engine_mask_t sibling_mask;
+	} *bonds;
+	unsigned int num_bonds;
+
+	/* And finally, which physical engines this virtual engine maps onto. */
+	unsigned int num_siblings;
+	struct intel_engine_cs *siblings[0];
+};
+
+#endif /* __INTEL_VIRTUAL_ENGINE_TYPES__ */
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-12-11 21:12 UTC|newest]

Thread overview: 21+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-12-11 21:12 [Intel-gfx] [RFC 0/5] Split up intel_lrc.c Daniele Ceraolo Spurio
2019-12-11 21:12 ` [Intel-gfx] [RFC 1/5] drm/i915: introduce logical_ring and lr_context naming Daniele Ceraolo Spurio
2019-12-11 21:20   ` Chris Wilson
2019-12-11 21:33     ` Chris Wilson
2019-12-11 22:04     ` Daniele Ceraolo Spurio
2019-12-11 23:35       ` Matthew Brost
2019-12-11 21:12 ` Daniele Ceraolo Spurio [this message]
2019-12-11 21:22   ` [Intel-gfx] [RFC 2/5] drm/i915: Move struct intel_virtual_engine to its own header Chris Wilson
2019-12-11 21:12 ` [Intel-gfx] [RFC 3/5] drm/i915: split out virtual engine code Daniele Ceraolo Spurio
2019-12-11 21:22   ` Chris Wilson
2019-12-11 21:34     ` Daniele Ceraolo Spurio
2019-12-11 23:09       ` Matthew Brost
2019-12-11 21:12 ` [Intel-gfx] [RFC 4/5] drm/i915: move execlists selftests to their own file Daniele Ceraolo Spurio
2019-12-11 21:26   ` Chris Wilson
2019-12-11 22:07     ` Daniele Ceraolo Spurio
2019-12-11 21:12 ` [Intel-gfx] [RFC 5/5] drm/i915: introduce intel_execlists_submission.<c/h> Daniele Ceraolo Spurio
2019-12-11 21:31   ` Chris Wilson
2019-12-11 22:35     ` Daniele Ceraolo Spurio
2019-12-12  1:27 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for Split up intel_lrc.c Patchwork
2019-12-12  1:49 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2019-12-12 12:51 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191211211244.7831-3-daniele.ceraolospurio@intel.com \
    --to=daniele.ceraolospurio@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.