All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: matthew.auld@intel.com
Subject: [PATCH 09/43] drm/i915: Track active engines within a context
Date: Wed,  6 Mar 2019 14:24:43 +0000	[thread overview]
Message-ID: <20190306142517.22558-10-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20190306142517.22558-1-chris@chris-wilson.co.uk>

For use in the next patch, if we track which engines have been used by
the HW, we can reduce the work required to flush our state off the HW to
those engines.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_debugfs.c           | 18 +++++----------
 drivers/gpu/drm/i915/i915_gem_context.c       |  5 +++++
 drivers/gpu/drm/i915/i915_gem_context.h       |  5 +++++
 drivers/gpu/drm/i915/intel_lrc.c              | 22 +++++++++----------
 drivers/gpu/drm/i915/intel_ringbuffer.c       | 14 +++++++-----
 drivers/gpu/drm/i915/selftests/mock_context.c |  2 ++
 drivers/gpu/drm/i915/selftests/mock_engine.c  |  6 +++++
 7 files changed, 43 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0a6348ad7c98..6a90558de213 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -388,12 +388,9 @@ static void print_context_stats(struct seq_file *m,
 	struct i915_gem_context *ctx;
 
 	list_for_each_entry(ctx, &i915->contexts.list, link) {
-		struct intel_engine_cs *engine;
-		enum intel_engine_id id;
-
-		for_each_engine(engine, i915, id) {
-			struct intel_context *ce = to_intel_context(ctx, engine);
+		struct intel_context *ce;
 
+		list_for_each_entry(ce, &ctx->active_engines, active_link) {
 			if (ce->state)
 				per_file_stats(0, ce->state->obj, &kstats);
 			if (ce->ring)
@@ -1880,9 +1877,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
 {
 	struct drm_i915_private *dev_priv = node_to_i915(m->private);
 	struct drm_device *dev = &dev_priv->drm;
-	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
-	enum intel_engine_id id;
 	int ret;
 
 	ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1890,6 +1885,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		return ret;
 
 	list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
+		struct intel_context *ce;
+
 		seq_puts(m, "HW context ");
 		if (!list_empty(&ctx->hw_id_link))
 			seq_printf(m, "%x [pin %u]", ctx->hw_id,
@@ -1912,11 +1909,8 @@ static int i915_context_status(struct seq_file *m, void *unused)
 		seq_putc(m, ctx->remap_slice ? 'R' : 'r');
 		seq_putc(m, '\n');
 
-		for_each_engine(engine, dev_priv, id) {
-			struct intel_context *ce =
-				to_intel_context(ctx, engine);
-
-			seq_printf(m, "%s: ", engine->name);
+		list_for_each_entry(ce, &ctx->active_engines, active_link) {
+			seq_printf(m, "%s: ", ce->engine->name);
 			if (ce->state)
 				describe_obj(m, ce->state->obj);
 			if (ce->ring)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 486203e9d205..d997695a4f77 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -226,6 +226,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 
 	lockdep_assert_held(&ctx->i915->drm.struct_mutex);
 	GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+	GEM_BUG_ON(!list_empty(&ctx->active_engines));
 
 	release_hw_id(ctx);
 	i915_ppgtt_put(ctx->ppgtt);
@@ -241,6 +242,7 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
 	put_pid(ctx->pid);
 
 	list_del(&ctx->link);
+	mutex_destroy(&ctx->mutex);
 
 	kfree_rcu(ctx, rcu);
 }
@@ -353,6 +355,7 @@ intel_context_init(struct intel_context *ce,
 		   struct intel_engine_cs *engine)
 {
 	ce->gem_context = ctx;
+	ce->engine = engine;
 
 	INIT_LIST_HEAD(&ce->signal_link);
 	INIT_LIST_HEAD(&ce->signals);
@@ -381,6 +384,8 @@ __create_hw_context(struct drm_i915_private *dev_priv,
 	list_add_tail(&ctx->link, &dev_priv->contexts.list);
 	ctx->i915 = dev_priv;
 	ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
+	INIT_LIST_HEAD(&ctx->active_engines);
+	mutex_init(&ctx->mutex);
 
 	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
 		intel_context_init(&ctx->__engine[n], ctx, dev_priv->engine[n]);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.h b/drivers/gpu/drm/i915/i915_gem_context.h
index e1188d77a23d..124c2a082b99 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/i915_gem_context.h
@@ -163,6 +163,9 @@ struct i915_gem_context {
 	atomic_t hw_id_pin_count;
 	struct list_head hw_id_link;
 
+	struct list_head active_engines;
+	struct mutex mutex;
+
 	/**
 	 * @user_handle: userspace identifier
 	 *
@@ -176,7 +179,9 @@ struct i915_gem_context {
 	/** engine: per-engine logical HW state */
 	struct intel_context {
 		struct i915_gem_context *gem_context;
+		struct intel_engine_cs *engine;
 		struct intel_engine_cs *active;
+		struct list_head active_link;
 		struct list_head signal_link;
 		struct list_head signals;
 		struct i915_vma *state;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index f0ba20f2b41d..a9a47dbeac88 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1282,6 +1282,7 @@ static void execlists_context_unpin(struct intel_context *ce)
 	i915_gem_object_unpin_map(ce->state->obj);
 	i915_vma_unpin(ce->state);
 
+	list_del(&ce->active_link);
 	i915_gem_context_put(ce->gem_context);
 }
 
@@ -1366,6 +1367,11 @@ __execlists_context_pin(struct intel_engine_cs *engine,
 	__execlists_update_reg_state(engine, ce);
 
 	ce->state->obj->pin_global++;
+
+	mutex_lock(&ctx->mutex);
+	list_add(&ce->active_link, &ctx->active_engines);
+	mutex_unlock(&ctx->mutex);
+
 	i915_gem_context_get(ctx);
 	return ce;
 
@@ -2887,9 +2893,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
 
 void intel_lr_context_resume(struct drm_i915_private *i915)
 {
-	struct intel_engine_cs *engine;
 	struct i915_gem_context *ctx;
-	enum intel_engine_id id;
+	struct intel_context *ce;
 
 	/*
 	 * Because we emit WA_TAIL_DWORDS there may be a disparity
@@ -2903,17 +2908,10 @@ void intel_lr_context_resume(struct drm_i915_private *i915)
 	 * simplicity, we just zero everything out.
 	 */
 	list_for_each_entry(ctx, &i915->contexts.list, link) {
-		for_each_engine(engine, i915, id) {
-			struct intel_context *ce =
-				to_intel_context(ctx, engine);
-
-			if (!ce->state)
-				continue;
-
+		list_for_each_entry(ce, &ctx->active_engines, active_link) {
+			GEM_BUG_ON(!ce->ring);
 			intel_ring_reset(ce->ring, 0);
-
-			if (ce->pin_count) /* otherwise done in context_pin */
-				__execlists_update_reg_state(engine, ce);
+			__execlists_update_reg_state(ce->engine, ce);
 		}
 	}
 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 1dd0c99b893f..82f33ff94dc8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1431,6 +1431,7 @@ static void intel_ring_context_unpin(struct intel_context *ce)
 	__context_unpin_ppgtt(ce->gem_context);
 	__context_unpin(ce);
 
+	list_del(&ce->active_link);
 	i915_gem_context_put(ce->gem_context);
 }
 
@@ -1510,6 +1511,10 @@ __ring_context_pin(struct intel_engine_cs *engine,
 {
 	int err;
 
+	/* One ringbuffer to rule them all */
+	GEM_BUG_ON(!engine->buffer);
+	ce->ring = engine->buffer;
+
 	if (!ce->state && engine->context_size) {
 		struct i915_vma *vma;
 
@@ -1530,12 +1535,11 @@ __ring_context_pin(struct intel_engine_cs *engine,
 	if (err)
 		goto err_unpin;
 
-	i915_gem_context_get(ctx);
-
-	/* One ringbuffer to rule them all */
-	GEM_BUG_ON(!engine->buffer);
-	ce->ring = engine->buffer;
+	mutex_lock(&ctx->mutex);
+	list_add(&ce->active_link, &ctx->active_engines);
+	mutex_unlock(&ctx->mutex);
 
+	i915_gem_context_get(ctx);
 	return ce;
 
 err_unpin:
diff --git a/drivers/gpu/drm/i915/selftests/mock_context.c b/drivers/gpu/drm/i915/selftests/mock_context.c
index b646cdcdd602..353b37b9f78e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/selftests/mock_context.c
@@ -44,6 +44,8 @@ mock_context(struct drm_i915_private *i915,
 	INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL);
 	INIT_LIST_HEAD(&ctx->handles_list);
 	INIT_LIST_HEAD(&ctx->hw_id_link);
+	INIT_LIST_HEAD(&ctx->active_engines);
+	mutex_init(&ctx->mutex);
 
 	for (n = 0; n < ARRAY_SIZE(ctx->__engine); n++)
 		intel_context_init(&ctx->__engine[n], ctx, i915->engine[n]);
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index c2c954f64226..8032a8a9542f 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -125,6 +125,7 @@ static void hw_delay_complete(struct timer_list *t)
 static void mock_context_unpin(struct intel_context *ce)
 {
 	mock_timeline_unpin(ce->ring->timeline);
+	list_del(&ce->active_link);
 	i915_gem_context_put(ce->gem_context);
 }
 
@@ -160,6 +161,11 @@ mock_context_pin(struct intel_engine_cs *engine,
 	mock_timeline_pin(ce->ring->timeline);
 
 	ce->ops = &mock_context_ops;
+
+	mutex_lock(&ctx->mutex);
+	list_add(&ce->active_link, &ctx->active_engines);
+	mutex_unlock(&ctx->mutex);
+
 	i915_gem_context_get(ctx);
 	return ce;
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-03-06 14:27 UTC|newest]

Thread overview: 78+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-03-06 14:24 RFC Breaking up GEM struct_mutex for async-pages Chris Wilson
2019-03-06 14:24 ` [PATCH 01/43] drm/i915/selftests: Canonicalise gen8 addresses Chris Wilson
2019-03-06 14:24 ` [PATCH 02/43] drm/i915: Force GPU idle on suspend Chris Wilson
2019-03-07  9:38   ` Tvrtko Ursulin
2019-03-06 14:24 ` [PATCH 03/43] drm/i915/selftests: Improve switch-to-kernel-context checking Chris Wilson
2019-03-07 12:40   ` Tvrtko Ursulin
2019-03-07 13:17     ` Chris Wilson
2019-03-07 13:21       ` Tvrtko Ursulin
2019-03-06 14:24 ` [PATCH 04/43] drm/i915: Do a synchronous switch-to-kernel-context on idling Chris Wilson
2019-03-07 13:07   ` Tvrtko Ursulin
2019-03-07 13:29     ` Chris Wilson
2019-03-07 17:06       ` Tvrtko Ursulin
2019-03-07 22:24         ` Chris Wilson
2019-03-08  6:46           ` Tvrtko Ursulin
2019-03-08  8:59             ` Chris Wilson
2019-03-06 14:24 ` [PATCH 05/43] drm/i915: Refactor common code to load initial power context Chris Wilson
2019-03-07 13:19   ` Tvrtko Ursulin
2019-03-07 22:26     ` Chris Wilson
2019-03-08  6:48       ` Tvrtko Ursulin
2019-03-06 14:24 ` [PATCH 06/43] drm/i915: Reduce presumption of request ordering for barriers Chris Wilson
2019-03-07 17:26   ` Tvrtko Ursulin
2019-03-06 14:24 ` [PATCH 07/43] drm/i915: Remove has-kernel-context Chris Wilson
2019-03-07 17:29   ` Tvrtko Ursulin
2019-03-06 14:24 ` [PATCH 08/43] drm/i915: Introduce the i915_user_extension_method Chris Wilson
2019-03-06 14:24 ` Chris Wilson [this message]
2019-03-06 14:24 ` [PATCH 10/43] drm/i915: Introduce a context barrier callback Chris Wilson
2019-03-06 14:24 ` [PATCH 11/43] drm/i915: Create/destroy VM (ppGTT) for use with contexts Chris Wilson
2019-03-06 14:24 ` [PATCH 12/43] drm/i915: Extend CONTEXT_CREATE to set parameters upon construction Chris Wilson
2019-03-06 14:24 ` [PATCH 13/43] drm/i915: Allow contexts to share a single timeline across all engines Chris Wilson
2019-03-06 14:24 ` [PATCH 14/43] drm/i915: Allow userspace to clone contexts on creation Chris Wilson
2019-03-06 14:24 ` [PATCH 15/43] drm/i915: Allow a context to define its set of engines Chris Wilson
2019-03-06 14:24 ` [PATCH 16/43] drm/i915: Extend I915_CONTEXT_PARAM_SSEU to support local ctx->engine[] Chris Wilson
2019-03-06 14:24 ` [PATCH 17/43] drm/i915: Split struct intel_context definition to its own header Chris Wilson
2019-03-06 14:24 ` [PATCH 18/43] drm/i915: Store the intel_context_ops in the intel_engine_cs Chris Wilson
2019-03-06 14:39   ` Tvrtko Ursulin
2019-03-06 14:24 ` [PATCH 19/43] drm/i915: Move over to intel_context_lookup() Chris Wilson
2019-03-06 14:40   ` Tvrtko Ursulin
2019-03-06 14:24 ` [PATCH 20/43] drm/i915: Make context pinning part of intel_context_ops Chris Wilson
2019-03-06 14:24 ` [PATCH 21/43] drm/i915: Track the pinned kernel contexts on each engine Chris Wilson
2019-03-08  9:26   ` Tvrtko Ursulin
2019-03-06 14:24 ` [PATCH 22/43] drm/i915: Introduce intel_context.pin_mutex for pin management Chris Wilson
2019-03-06 14:43   ` Tvrtko Ursulin
2019-03-06 14:51     ` Chris Wilson
2019-03-06 15:46       ` Tvrtko Ursulin
2019-03-06 14:24 ` [PATCH 23/43] drm/i915: Load balancing across a virtual engine Chris Wilson
2019-03-06 14:24 ` [PATCH 24/43] drm/i915: Extend execution fence to support a callback Chris Wilson
2019-03-06 14:24 ` [PATCH 25/43] drm/i915/execlists: Virtual engine bonding Chris Wilson
2019-03-06 14:25 ` [PATCH 26/43] drm/i915: Allow specification of parallel execbuf Chris Wilson
2019-03-06 14:25 ` [PATCH 27/43] drm/i915/selftests: Check preemption support on each engine Chris Wilson
2019-03-08  9:31   ` Tvrtko Ursulin
2019-03-06 14:25 ` [PATCH 28/43] drm/i915/execlists: Skip direct submission if only lite-restore Chris Wilson
2019-03-06 14:25 ` [PATCH 29/43] drm/i915: Split GEM object type definition to its own header Chris Wilson
2019-03-06 15:46   ` Matthew Auld
2019-03-06 14:25 ` [PATCH 30/43] drm/i915: Pull GEM ioctls interface to its own file Chris Wilson
2019-03-06 15:48   ` Matthew Auld
2019-03-06 14:25 ` [PATCH 31/43] drm/i915: Move object->pages API to i915_gem_object.[ch] Chris Wilson
2019-03-06 16:23   ` Matthew Auld
2019-03-06 16:29     ` Chris Wilson
2019-03-06 14:25 ` [PATCH 32/43] drm/i915: Move shmem object setup to its own file Chris Wilson
2019-03-06 17:05   ` Matthew Auld
2019-03-06 17:24     ` Chris Wilson
2019-03-06 14:25 ` [PATCH 33/43] drm/i915: Move phys objects " Chris Wilson
2019-03-07 12:31   ` Matthew Auld
2019-03-06 14:25 ` [PATCH 34/43] drm/i915: Move mmap and friends " Chris Wilson
2019-03-07 12:42   ` Matthew Auld
2019-03-06 14:25 ` [PATCH 35/43] drm/i915: Move GEM domain management " Chris Wilson
2019-03-07 13:00   ` Matthew Auld
2019-03-06 14:25 ` [PATCH 36/43] drm/i915: Move more GEM objects under gem/ Chris Wilson
2019-03-06 14:25 ` [PATCH 37/43] drm/i915: Pull scatterlist utils out of i915_gem.h Chris Wilson
2019-03-06 14:25 ` [PATCH 38/43] drm/i915: Move GEM object domain management from struct_mutex to local Chris Wilson
2019-03-06 14:25 ` [PATCH 39/43] drm/i915: Move GEM object waiting to its own file Chris Wilson
2019-03-06 14:25 ` [PATCH 40/43] drm/i915: Move GEM object busy checking " Chris Wilson
2019-03-06 14:25 ` [PATCH 41/43] drm/i915: Move GEM client throttling " Chris Wilson
2019-03-06 14:25 ` [PATCH 42/43] drm/i915: Drop the deferred active reference Chris Wilson
2019-03-06 14:25 ` [PATCH 43/43] drm/i915: Move object close under its own lock Chris Wilson
2019-03-06 15:08 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/43] drm/i915/selftests: Canonicalise gen8 addresses Patchwork
2019-03-06 15:25 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-03-06 16:19 ` ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190306142517.22558-10-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=matthew.auld@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.