All of lore.kernel.org
 help / color / mirror / Atom feed
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH 10/23] drm/i915: Add ww context handling to context_barrier_task
Date: Fri,  3 Jul 2020 14:22:08 +0200	[thread overview]
Message-ID: <20200703122221.591656-11-maarten.lankhorst@linux.intel.com> (raw)
In-Reply-To: <20200703122221.591656-1-maarten.lankhorst@linux.intel.com>

This is required if we want to pass a ww context in intel_context_pin
and gen6_ppgtt_pin().

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 55 ++++++++++++++-----
 .../drm/i915/gem/selftests/i915_gem_context.c | 22 +++-----
 2 files changed, 48 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 6675447a47b9..c01fc27c34c0 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -1092,6 +1092,7 @@ I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
 static int context_barrier_task(struct i915_gem_context *ctx,
 				intel_engine_mask_t engines,
 				bool (*skip)(struct intel_context *ce, void *data),
+				int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
 				int (*emit)(struct i915_request *rq, void *data),
 				void (*task)(void *data),
 				void *data)
@@ -1099,6 +1100,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 	struct context_barrier_task *cb;
 	struct i915_gem_engines_iter it;
 	struct i915_gem_engines *e;
+	struct i915_gem_ww_ctx ww;
 	struct intel_context *ce;
 	int err = 0;
 
@@ -1136,10 +1138,21 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 		if (skip && skip(ce, data))
 			continue;
 
-		rq = intel_context_create_request(ce);
+		i915_gem_ww_ctx_init(&ww, true);
+retry:
+		err = intel_context_pin(ce);
+		if (err)
+			goto err;
+
+		if (pin)
+			err = pin(ce, &ww, data);
+		if (err)
+			goto err_unpin;
+
+		rq = i915_request_create(ce);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
-			break;
+			goto err_unpin;
 		}
 
 		err = 0;
@@ -1149,6 +1162,16 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 			err = i915_active_add_request(&cb->base, rq);
 
 		i915_request_add(rq);
+err_unpin:
+		intel_context_unpin(ce);
+err:
+		if (err == -EDEADLK) {
+			err = i915_gem_ww_ctx_backoff(&ww);
+			if (!err)
+				goto retry;
+		}
+		i915_gem_ww_ctx_fini(&ww);
+
 		if (err)
 			break;
 	}
@@ -1204,6 +1227,17 @@ static void set_ppgtt_barrier(void *data)
 	i915_vm_close(old);
 }
 
+static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
+{
+	struct i915_address_space *vm = ce->vm;
+
+	if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
+		/* ppGTT is not part of the legacy context image */
+		return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
+
+	return 0;
+}
+
 static int emit_ppgtt_update(struct i915_request *rq, void *data)
 {
 	struct i915_address_space *vm = rq->context->vm;
@@ -1260,20 +1294,10 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
 
 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
 {
-	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
-		return true;
-
 	if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
-		return false;
-
-	if (!atomic_read(&ce->pin_count))
-		return true;
-
-	/* ppGTT is not part of the legacy context image */
-	if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
-		return true;
-
-	return false;
+		return !ce->state;
+	else
+		return !atomic_read(&ce->pin_count);
 }
 
 static int set_ppgtt(struct drm_i915_file_private *file_priv,
@@ -1324,6 +1348,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
 	 */
 	err = context_barrier_task(ctx, ALL_ENGINES,
 				   skip_ppgtt_update,
+				   pin_ppgtt_update,
 				   emit_ppgtt_update,
 				   set_ppgtt_barrier,
 				   old);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 76671f587b9d..1217f7a43069 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1917,8 +1917,8 @@ static int mock_context_barrier(void *arg)
 		return -ENOMEM;
 
 	counter = 0;
-	err = context_barrier_task(ctx, 0,
-				   NULL, NULL, mock_barrier_task, &counter);
+	err = context_barrier_task(ctx, 0, NULL, NULL, NULL,
+				   mock_barrier_task, &counter);
 	if (err) {
 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
 		goto out;
@@ -1930,11 +1930,8 @@ static int mock_context_barrier(void *arg)
 	}
 
 	counter = 0;
-	err = context_barrier_task(ctx, ALL_ENGINES,
-				   skip_unused_engines,
-				   NULL,
-				   mock_barrier_task,
-				   &counter);
+	err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
+				   NULL, NULL, mock_barrier_task, &counter);
 	if (err) {
 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
 		goto out;
@@ -1954,8 +1951,8 @@ static int mock_context_barrier(void *arg)
 
 	counter = 0;
 	context_barrier_inject_fault = BIT(RCS0);
-	err = context_barrier_task(ctx, ALL_ENGINES,
-				   NULL, NULL, mock_barrier_task, &counter);
+	err = context_barrier_task(ctx, ALL_ENGINES, NULL, NULL, NULL,
+				   mock_barrier_task, &counter);
 	context_barrier_inject_fault = 0;
 	if (err == -ENXIO)
 		err = 0;
@@ -1969,11 +1966,8 @@ static int mock_context_barrier(void *arg)
 		goto out;
 
 	counter = 0;
-	err = context_barrier_task(ctx, ALL_ENGINES,
-				   skip_unused_engines,
-				   NULL,
-				   mock_barrier_task,
-				   &counter);
+	err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
+				   NULL, NULL, mock_barrier_task, &counter);
 	if (err) {
 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
 		goto out;
-- 
2.27.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-07-03 12:22 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-03 12:21 [Intel-gfx] [PATCH 00/23] drm/i915: Use ww locking in execbuf submission Maarten Lankhorst
2020-07-03 12:21 ` [Intel-gfx] [PATCH 01/23] Revert "drm/i915/gem: Async GPU relocations only" Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 02/23] drm/i915: Revert relocation chaining commits Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 03/23] Revert "drm/i915/gem: Drop relocation slowpath" Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 04/23] drm/i915: Add an implementation for i915_gem_ww_ctx locking, v2 Maarten Lankhorst
2020-07-03 13:45   ` Tvrtko Ursulin
2020-07-03 12:22 ` [Intel-gfx] [PATCH 05/23] drm/i915: Remove locking from i915_gem_object_prepare_read/write Maarten Lankhorst
2020-07-03 13:43   ` Tvrtko Ursulin
2020-07-06 12:53     ` Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 06/23] drm/i915: Parse command buffer earlier in eb_relocate(slow) Maarten Lankhorst
2020-07-03 13:49   ` Tvrtko Ursulin
2020-07-06 12:53     ` Maarten Lankhorst
2020-07-06 15:47       ` Tvrtko Ursulin
2020-07-03 12:22 ` [Intel-gfx] [PATCH 07/23] Revert "drm/i915/gem: Split eb_vma into its own allocation" Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 08/23] drm/i915: Use per object locking in execbuf, v12 Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 09/23] drm/i915: Use ww locking in intel_renderstate Maarten Lankhorst
2020-07-03 12:22 ` Maarten Lankhorst [this message]
2020-07-03 12:22 ` [Intel-gfx] [PATCH 11/23] drm/i915: Nuke arguments to eb_pin_engine Maarten Lankhorst
2020-07-03 13:38   ` Tvrtko Ursulin
2020-07-03 12:22 ` [Intel-gfx] [PATCH 12/23] drm/i915: Pin engine before pinning all objects, v4 Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 13/23] drm/i915: Rework intel_context pinning to do everything outside of pin_mutex Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 14/23] drm/i915: Make sure execbuffer always passes ww state to i915_vma_pin Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 15/23] drm/i915: Convert i915_gem_object/client_blt.c to use ww locking as well, v2 Maarten Lankhorst
2020-07-07 14:32   ` Dan Carpenter
2020-07-07 14:32     ` Dan Carpenter
2020-07-03 12:22 ` [Intel-gfx] [PATCH 16/23] drm/i915: Kill last user of intel_context_create_request outside of selftests Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 17/23] drm/i915: Convert i915_perf to ww locking as well Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 18/23] drm/i915: Dirty hack to fix selftests locking inversion Maarten Lankhorst
2020-07-03 13:48   ` Tvrtko Ursulin
2020-07-07 10:19     ` Maarten Lankhorst
2020-07-07 10:56       ` Tvrtko Ursulin
2020-07-07 11:00         ` Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 19/23] drm/i915/selftests: Fix locking inversion in lrc selftest Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 20/23] drm/i915: Use ww pinning for intel_context_create_request() Maarten Lankhorst
2020-07-07 14:35   ` Dan Carpenter
2020-07-07 14:35     ` Dan Carpenter
2020-07-03 12:22 ` [Intel-gfx] [PATCH 21/23] drm/i915: Move i915_vma_lock in the selftests to avoid lock inversion, v2 Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 22/23] drm/i915: Add ww locking to vm_fault_gtt Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 23/23] drm/i915: Add ww locking to pin_to_display_plane Maarten Lankhorst
2020-07-03 14:12 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Use ww locking in execbuf submission Patchwork
2020-07-03 14:13 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-03 14:36 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-07-14 11:44 [Intel-gfx] [PATCH 01/23] Revert "drm/i915/gem: Async GPU relocations only" Maarten Lankhorst
2020-07-14 11:45 ` [Intel-gfx] [PATCH 10/23] drm/i915: Add ww context handling to context_barrier_task Maarten Lankhorst

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200703122221.591656-11-maarten.lankhorst@linux.intel.com \
    --to=maarten.lankhorst@linux.intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.