intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH 09/19] drm/i915: Add ww context handling to context_barrier_task
Date: Fri, 14 Feb 2020 11:30:45 +0100	[thread overview]
Message-ID: <20200214103055.2117836-10-maarten.lankhorst@linux.intel.com> (raw)
In-Reply-To: <20200214103055.2117836-1-maarten.lankhorst@linux.intel.com>

This is required if we want to pass a ww context in intel_context_pin
and gen6_ppgtt_pin().

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 55 ++++++++++++++-----
 .../drm/i915/gem/selftests/i915_gem_context.c | 22 +++-----
 2 files changed, 48 insertions(+), 29 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index e946b395ddf6..2152ac209450 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -975,12 +975,14 @@ I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
 static int context_barrier_task(struct i915_gem_context *ctx,
 				intel_engine_mask_t engines,
 				bool (*skip)(struct intel_context *ce, void *data),
+				int (*pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data),
 				int (*emit)(struct i915_request *rq, void *data),
 				void (*task)(void *data),
 				void *data)
 {
 	struct context_barrier_task *cb;
 	struct i915_gem_engines_iter it;
+	struct i915_gem_ww_ctx ww;
 	struct intel_context *ce;
 	int err = 0;
 
@@ -1012,10 +1014,21 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 		if (skip && skip(ce, data))
 			continue;
 
-		rq = intel_context_create_request(ce);
+		i915_gem_ww_ctx_init(&ww, true);
+retry:
+		err = intel_context_pin(ce);
+		if (err)
+			goto err;
+
+		if (pin)
+			err = pin(ce, &ww, data);
+		if (err)
+			goto err_unpin;
+
+		rq = i915_request_create(ce);
 		if (IS_ERR(rq)) {
 			err = PTR_ERR(rq);
-			break;
+			goto err_unpin;
 		}
 
 		err = 0;
@@ -1025,6 +1038,16 @@ static int context_barrier_task(struct i915_gem_context *ctx,
 			err = i915_active_add_request(&cb->base, rq);
 
 		i915_request_add(rq);
+err_unpin:
+		intel_context_unpin(ce);
+err:
+		if (err == -EDEADLK) {
+			err = i915_gem_ww_ctx_backoff(&ww);
+			if (!err)
+				goto retry;
+		}
+		i915_gem_ww_ctx_fini(&ww);
+
 		if (err)
 			break;
 	}
@@ -1080,6 +1103,17 @@ static void set_ppgtt_barrier(void *data)
 	i915_vm_close(old);
 }
 
+static int pin_ppgtt_update(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void *data)
+{
+	struct i915_address_space *vm = ce->vm;
+
+	if (!HAS_LOGICAL_RING_CONTEXTS(vm->i915))
+		/* ppGTT is not part of the legacy context image */
+		return gen6_ppgtt_pin(i915_vm_to_ppgtt(vm));
+
+	return 0;
+}
+
 static int emit_ppgtt_update(struct i915_request *rq, void *data)
 {
 	struct i915_address_space *vm = rq->context->vm;
@@ -1136,20 +1170,10 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data)
 
 static bool skip_ppgtt_update(struct intel_context *ce, void *data)
 {
-	if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
-		return true;
-
 	if (HAS_LOGICAL_RING_CONTEXTS(ce->engine->i915))
-		return false;
-
-	if (!atomic_read(&ce->pin_count))
-		return true;
-
-	/* ppGTT is not part of the legacy context image */
-	if (gen6_ppgtt_pin(i915_vm_to_ppgtt(ce->vm)))
-		return true;
-
-	return false;
+		return !ce->state;
+	else
+		return !atomic_read(&ce->pin_count);
 }
 
 static int set_ppgtt(struct drm_i915_file_private *file_priv,
@@ -1200,6 +1224,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
 	 */
 	err = context_barrier_task(ctx, ALL_ENGINES,
 				   skip_ppgtt_update,
+				   pin_ppgtt_update,
 				   emit_ppgtt_update,
 				   set_ppgtt_barrier,
 				   old);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index d3d8981b268f..50b465b62d88 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1904,8 +1904,8 @@ static int mock_context_barrier(void *arg)
 		return -ENOMEM;
 
 	counter = 0;
-	err = context_barrier_task(ctx, 0,
-				   NULL, NULL, mock_barrier_task, &counter);
+	err = context_barrier_task(ctx, 0, NULL, NULL, NULL,
+				   mock_barrier_task, &counter);
 	if (err) {
 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
 		goto out;
@@ -1917,11 +1917,8 @@ static int mock_context_barrier(void *arg)
 	}
 
 	counter = 0;
-	err = context_barrier_task(ctx, ALL_ENGINES,
-				   skip_unused_engines,
-				   NULL,
-				   mock_barrier_task,
-				   &counter);
+	err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
+				   NULL, NULL, mock_barrier_task, &counter);
 	if (err) {
 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
 		goto out;
@@ -1941,8 +1938,8 @@ static int mock_context_barrier(void *arg)
 
 	counter = 0;
 	context_barrier_inject_fault = BIT(RCS0);
-	err = context_barrier_task(ctx, ALL_ENGINES,
-				   NULL, NULL, mock_barrier_task, &counter);
+	err = context_barrier_task(ctx, ALL_ENGINES, NULL, NULL, NULL,
+				   mock_barrier_task, &counter);
 	context_barrier_inject_fault = 0;
 	if (err == -ENXIO)
 		err = 0;
@@ -1956,11 +1953,8 @@ static int mock_context_barrier(void *arg)
 		goto out;
 
 	counter = 0;
-	err = context_barrier_task(ctx, ALL_ENGINES,
-				   skip_unused_engines,
-				   NULL,
-				   mock_barrier_task,
-				   &counter);
+	err = context_barrier_task(ctx, ALL_ENGINES, skip_unused_engines,
+				   NULL, NULL, mock_barrier_task, &counter);
 	if (err) {
 		pr_err("Failed at line %d, err=%d\n", __LINE__, err);
 		goto out;
-- 
2.25.0.24.g3f081b084b0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-02-14 10:31 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-02-14 10:30 [Intel-gfx] [PATCH 00/19] drm/i915/gem: Implement parallel execbuf submission Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 01/19] drm/i915: Drop inspection of execbuf flags during evict Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 02/19] drm/i915/gem: Extract transient execbuf flags from i915_vma Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 03/19] drm/i915: Separate lookup and pinning in execbuf Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 04/19] drm/i915: Add an implementation for i915_gem_ww_ctx locking, v2 Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 05/19] drm/i915: Remove locking from i915_gem_object_prepare_read/write Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 06/19] drm/i915: Parse command buffer earlier in eb_relocate(slow) Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 07/19] drm/i915: Use per object locking in execbuf on top of struct_mutex, v3 Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 08/19] drm/i915: Use ww locking in intel_renderstate Maarten Lankhorst
2020-02-14 10:30 ` Maarten Lankhorst [this message]
2020-02-14 10:30 ` [Intel-gfx] [PATCH 10/19] drm/i915: Nuke arguments to eb_pin_engine Maarten Lankhorst
2020-02-14 20:07   ` Ruhl, Michael J
2020-02-14 10:30 ` [Intel-gfx] [PATCH 11/19] drm/i915: Pin engine before pinning all objects, v2 Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 12/19] drm/i915: Rework intel_context pinning to do everything outside of pin_mutex Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 13/19] drm/i915: Make sure execbuffer always passes ww state to i915_vma_pin Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 14/19] drm/i915: Convert i915_gem_object/client_blt.c to use ww locking as well Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 15/19] drm/i915: Kill last user of intel_context_create_request outside of selftests Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 16/19] drm/i915: Convert i915_perf to ww locking as well Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 17/19] drm/i915: Dirty hack to fix selftests locking inversion Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 18/19] drm/i915/selftests: Fix locking inversion in lrc selftest Maarten Lankhorst
2020-02-14 10:30 ` [Intel-gfx] [PATCH 19/19] drm/i915: Use ww pinning for intel_context_create_request() Maarten Lankhorst
2020-02-14 20:01   ` Ruhl, Michael J
2020-02-14 11:16 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915/gem: Implement parallel execbuf submission Patchwork
2020-02-14 11:46 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200214103055.2117836-10-maarten.lankhorst@linux.intel.com \
    --to=maarten.lankhorst@linux.intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).