All of lore.kernel.org
 help / color / mirror / Atom feed
From: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH 21/23] drm/i915: Move i915_vma_lock in the selftests to avoid lock inversion, v2.
Date: Fri,  3 Jul 2020 14:22:19 +0200	[thread overview]
Message-ID: <20200703122221.591656-22-maarten.lankhorst@linux.intel.com> (raw)
In-Reply-To: <20200703122221.591656-1-maarten.lankhorst@linux.intel.com>

Make sure vma_lock is not used as inner lock when kernel context is used,
and add ww handling where appropriate.

Ensure that execbuf selftests keep passing by using ww handling.

Signed-off-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
---
 .../i915/gem/selftests/i915_gem_coherency.c   | 26 ++++++------
 .../drm/i915/gem/selftests/i915_gem_context.c |  9 ++--
 .../drm/i915/gem/selftests/i915_gem_mman.c    | 41 ++++++++++++++-----
 drivers/gpu/drm/i915/gt/selftest_rps.c        | 30 ++++++++------
 drivers/gpu/drm/i915/selftests/i915_request.c | 18 +++++---
 5 files changed, 80 insertions(+), 44 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
index dcdfc396f2f8..7049a6bbc03d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
@@ -201,25 +201,25 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
 
 	i915_gem_object_lock(ctx->obj, NULL);
 	err = i915_gem_object_set_to_gtt_domain(ctx->obj, true);
-	i915_gem_object_unlock(ctx->obj);
 	if (err)
-		return err;
+		goto out_unlock;
 
 	vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0);
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
+	if (IS_ERR(vma)) {
+		err = PTR_ERR(vma);
+		goto out_unlock;
+	}
 
 	rq = intel_engine_create_kernel_request(ctx->engine);
 	if (IS_ERR(rq)) {
-		i915_vma_unpin(vma);
-		return PTR_ERR(rq);
+		err = PTR_ERR(rq);
+		goto out_unpin;
 	}
 
 	cs = intel_ring_begin(rq, 4);
 	if (IS_ERR(cs)) {
-		i915_request_add(rq);
-		i915_vma_unpin(vma);
-		return PTR_ERR(cs);
+		err = PTR_ERR(cs);
+		goto out_rq;
 	}
 
 	if (INTEL_GEN(ctx->engine->i915) >= 8) {
@@ -240,14 +240,16 @@ static int gpu_set(struct context *ctx, unsigned long offset, u32 v)
 	}
 	intel_ring_advance(rq, cs);
 
-	i915_vma_lock(vma);
 	err = i915_request_await_object(rq, vma->obj, true);
 	if (err == 0)
 		err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-	i915_vma_unlock(vma);
-	i915_vma_unpin(vma);
 
+out_rq:
 	i915_request_add(rq);
+out_unpin:
+	i915_vma_unpin(vma);
+out_unlock:
+	i915_gem_object_unlock(ctx->obj);
 
 	return err;
 }
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 1217f7a43069..70ea86a22311 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -958,13 +958,12 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
 
 	i915_gem_object_lock(obj, NULL);
 	err = i915_gem_object_set_to_gtt_domain(obj, false);
-	i915_gem_object_unlock(obj);
 	if (err)
-		return err;
+		goto out;
 
 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
 	if (err)
-		return err;
+		goto out;
 
 	batch = rpcs_query_batch(vma);
 	if (IS_ERR(batch)) {
@@ -1013,7 +1012,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
 
 	i915_request_add(rq);
 
-	return 0;
+	goto out;
 
 skip_request:
 	i915_request_set_error_once(rq, err);
@@ -1023,6 +1022,8 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
 err_vma:
 	i915_vma_unpin(vma);
 
+out:
+	i915_gem_object_unlock(obj);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 9fb95a45bcad..d27d87a678c8 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -528,31 +528,42 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
 	for_each_uabi_engine(engine, i915) {
 		struct i915_request *rq;
 		struct i915_vma *vma;
+		struct i915_gem_ww_ctx ww;
 		int err;
 
 		vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL);
 		if (IS_ERR(vma))
 			return PTR_ERR(vma);
 
-		err = i915_vma_pin(vma, 0, 0, PIN_USER);
+		i915_gem_ww_ctx_init(&ww, false);
+retry:
+		err = i915_gem_object_lock(obj, &ww);
+		if (!err)
+			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
 		if (err)
-			return err;
+			goto err;
 
 		rq = intel_engine_create_kernel_request(engine);
 		if (IS_ERR(rq)) {
-			i915_vma_unpin(vma);
-			return PTR_ERR(rq);
+			err = PTR_ERR(rq);
+			goto err_unpin;
 		}
 
-		i915_vma_lock(vma);
 		err = i915_request_await_object(rq, vma->obj, true);
 		if (err == 0)
 			err = i915_vma_move_to_active(vma, rq,
 						      EXEC_OBJECT_WRITE);
-		i915_vma_unlock(vma);
 
 		i915_request_add(rq);
+err_unpin:
 		i915_vma_unpin(vma);
+err:
+		if (err == -EDEADLK) {
+			err = i915_gem_ww_ctx_backoff(&ww);
+			if (!err)
+				goto retry;
+		}
+		i915_gem_ww_ctx_fini(&ww);
 		if (err)
 			return err;
 	}
@@ -1123,6 +1134,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
 	for_each_uabi_engine(engine, i915) {
 		struct i915_request *rq;
 		struct i915_vma *vma;
+		struct i915_gem_ww_ctx ww;
 
 		vma = i915_vma_instance(obj, engine->kernel_context->vm, NULL);
 		if (IS_ERR(vma)) {
@@ -1130,9 +1142,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
 			goto out_unmap;
 		}
 
-		err = i915_vma_pin(vma, 0, 0, PIN_USER);
+		i915_gem_ww_ctx_init(&ww, false);
+retry:
+		err = i915_gem_object_lock(obj, &ww);
+		if (!err)
+			err = i915_vma_pin_ww(vma, &ww, 0, 0, PIN_USER);
 		if (err)
-			goto out_unmap;
+			goto out_ww;
 
 		rq = i915_request_create(engine->kernel_context);
 		if (IS_ERR(rq)) {
@@ -1140,11 +1156,9 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
 			goto out_unpin;
 		}
 
-		i915_vma_lock(vma);
 		err = i915_request_await_object(rq, vma->obj, false);
 		if (err == 0)
 			err = i915_vma_move_to_active(vma, rq, 0);
-		i915_vma_unlock(vma);
 
 		err = engine->emit_bb_start(rq, vma->node.start, 0, 0);
 		i915_request_get(rq);
@@ -1166,6 +1180,13 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
 
 out_unpin:
 		i915_vma_unpin(vma);
+out_ww:
+		if (err == -EDEADLK) {
+			err = i915_gem_ww_ctx_backoff(&ww);
+			if (!err)
+				goto retry;
+		}
+		i915_gem_ww_ctx_fini(&ww);
 		if (err)
 			goto out_unmap;
 	}
diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c
index bb753f0c12eb..ec1cfcfa0706 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rps.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rps.c
@@ -77,20 +77,20 @@ create_spin_counter(struct intel_engine_cs *engine,
 
 	vma = i915_vma_instance(obj, vm, NULL);
 	if (IS_ERR(vma)) {
-		i915_gem_object_put(obj);
-		return vma;
+		err = PTR_ERR(vma);
+		goto err_put;
 	}
 
 	err = i915_vma_pin(vma, 0, 0, PIN_USER);
-	if (err) {
-		i915_vma_put(vma);
-		return ERR_PTR(err);
-	}
+	if (err)
+		goto err_unlock;
+
+	i915_vma_lock(vma);
 
 	base = i915_gem_object_pin_map(obj, I915_MAP_WC);
 	if (IS_ERR(base)) {
-		i915_gem_object_put(obj);
-		return ERR_CAST(base);
+		err = PTR_ERR(base);
+		goto err_unpin;
 	}
 	cs = base;
 
@@ -134,6 +134,14 @@ create_spin_counter(struct intel_engine_cs *engine,
 	*cancel = base + loop;
 	*counter = srm ? memset32(base + end, 0, 1) : NULL;
 	return vma;
+
+err_unpin:
+	i915_vma_unpin(vma);
+err_unlock:
+	i915_vma_unlock(vma);
+err_put:
+	i915_gem_object_put(obj);
+	return ERR_PTR(err);
 }
 
 static u8 wait_for_freq(struct intel_rps *rps, u8 freq, int timeout_ms)
@@ -639,7 +647,6 @@ int live_rps_frequency_cs(void *arg)
 			goto err_vma;
 		}
 
-		i915_vma_lock(vma);
 		err = i915_request_await_object(rq, vma->obj, false);
 		if (!err)
 			err = i915_vma_move_to_active(vma, rq, 0);
@@ -647,7 +654,6 @@ int live_rps_frequency_cs(void *arg)
 			err = rq->engine->emit_bb_start(rq,
 							vma->node.start,
 							PAGE_SIZE, 0);
-		i915_vma_unlock(vma);
 		i915_request_add(rq);
 		if (err)
 			goto err_vma;
@@ -708,6 +714,7 @@ int live_rps_frequency_cs(void *arg)
 		i915_gem_object_flush_map(vma->obj);
 		i915_gem_object_unpin_map(vma->obj);
 		i915_vma_unpin(vma);
+		i915_vma_unlock(vma);
 		i915_vma_put(vma);
 
 		st_engine_heartbeat_enable(engine);
@@ -781,7 +788,6 @@ int live_rps_frequency_srm(void *arg)
 			goto err_vma;
 		}
 
-		i915_vma_lock(vma);
 		err = i915_request_await_object(rq, vma->obj, false);
 		if (!err)
 			err = i915_vma_move_to_active(vma, rq, 0);
@@ -789,7 +795,6 @@ int live_rps_frequency_srm(void *arg)
 			err = rq->engine->emit_bb_start(rq,
 							vma->node.start,
 							PAGE_SIZE, 0);
-		i915_vma_unlock(vma);
 		i915_request_add(rq);
 		if (err)
 			goto err_vma;
@@ -849,6 +854,7 @@ int live_rps_frequency_srm(void *arg)
 		i915_gem_object_flush_map(vma->obj);
 		i915_gem_object_unpin_map(vma->obj);
 		i915_vma_unpin(vma);
+		i915_vma_unlock(vma);
 		i915_vma_put(vma);
 
 		st_engine_heartbeat_enable(engine);
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 9271aad7f779..07311437330f 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -862,6 +862,8 @@ static int live_all_engines(void *arg)
 		goto out_free;
 	}
 
+	i915_vma_lock(batch);
+
 	idx = 0;
 	for_each_uabi_engine(engine, i915) {
 		request[idx] = intel_engine_create_kernel_request(engine);
@@ -872,11 +874,9 @@ static int live_all_engines(void *arg)
 			goto out_request;
 		}
 
-		i915_vma_lock(batch);
 		err = i915_request_await_object(request[idx], batch->obj, 0);
 		if (err == 0)
 			err = i915_vma_move_to_active(batch, request[idx], 0);
-		i915_vma_unlock(batch);
 		GEM_BUG_ON(err);
 
 		err = engine->emit_bb_start(request[idx],
@@ -891,6 +891,8 @@ static int live_all_engines(void *arg)
 		idx++;
 	}
 
+	i915_vma_unlock(batch);
+
 	idx = 0;
 	for_each_uabi_engine(engine, i915) {
 		if (i915_request_completed(request[idx])) {
@@ -981,12 +983,13 @@ static int live_sequential_engines(void *arg)
 			goto out_free;
 		}
 
+		i915_vma_lock(batch);
 		request[idx] = intel_engine_create_kernel_request(engine);
 		if (IS_ERR(request[idx])) {
 			err = PTR_ERR(request[idx]);
 			pr_err("%s: Request allocation failed for %s with err=%d\n",
 			       __func__, engine->name, err);
-			goto out_request;
+			goto out_unlock;
 		}
 
 		if (prev) {
@@ -996,16 +999,14 @@ static int live_sequential_engines(void *arg)
 				i915_request_add(request[idx]);
 				pr_err("%s: Request await failed for %s with err=%d\n",
 				       __func__, engine->name, err);
-				goto out_request;
+				goto out_unlock;
 			}
 		}
 
-		i915_vma_lock(batch);
 		err = i915_request_await_object(request[idx],
 						batch->obj, false);
 		if (err == 0)
 			err = i915_vma_move_to_active(batch, request[idx], 0);
-		i915_vma_unlock(batch);
 		GEM_BUG_ON(err);
 
 		err = engine->emit_bb_start(request[idx],
@@ -1020,6 +1021,11 @@ static int live_sequential_engines(void *arg)
 
 		prev = request[idx];
 		idx++;
+
+out_unlock:
+		i915_vma_unlock(batch);
+		if (err)
+			goto out_request;
 	}
 
 	idx = 0;
-- 
2.27.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-07-03 12:22 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-03 12:21 [Intel-gfx] [PATCH 00/23] drm/i915: Use ww locking in execbuf submission Maarten Lankhorst
2020-07-03 12:21 ` [Intel-gfx] [PATCH 01/23] Revert "drm/i915/gem: Async GPU relocations only" Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 02/23] drm/i915: Revert relocation chaining commits Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 03/23] Revert "drm/i915/gem: Drop relocation slowpath" Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 04/23] drm/i915: Add an implementation for i915_gem_ww_ctx locking, v2 Maarten Lankhorst
2020-07-03 13:45   ` Tvrtko Ursulin
2020-07-03 12:22 ` [Intel-gfx] [PATCH 05/23] drm/i915: Remove locking from i915_gem_object_prepare_read/write Maarten Lankhorst
2020-07-03 13:43   ` Tvrtko Ursulin
2020-07-06 12:53     ` Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 06/23] drm/i915: Parse command buffer earlier in eb_relocate(slow) Maarten Lankhorst
2020-07-03 13:49   ` Tvrtko Ursulin
2020-07-06 12:53     ` Maarten Lankhorst
2020-07-06 15:47       ` Tvrtko Ursulin
2020-07-03 12:22 ` [Intel-gfx] [PATCH 07/23] Revert "drm/i915/gem: Split eb_vma into its own allocation" Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 08/23] drm/i915: Use per object locking in execbuf, v12 Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 09/23] drm/i915: Use ww locking in intel_renderstate Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 10/23] drm/i915: Add ww context handling to context_barrier_task Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 11/23] drm/i915: Nuke arguments to eb_pin_engine Maarten Lankhorst
2020-07-03 13:38   ` Tvrtko Ursulin
2020-07-03 12:22 ` [Intel-gfx] [PATCH 12/23] drm/i915: Pin engine before pinning all objects, v4 Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 13/23] drm/i915: Rework intel_context pinning to do everything outside of pin_mutex Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 14/23] drm/i915: Make sure execbuffer always passes ww state to i915_vma_pin Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 15/23] drm/i915: Convert i915_gem_object/client_blt.c to use ww locking as well, v2 Maarten Lankhorst
2020-07-07 14:32   ` Dan Carpenter
2020-07-07 14:32     ` Dan Carpenter
2020-07-03 12:22 ` [Intel-gfx] [PATCH 16/23] drm/i915: Kill last user of intel_context_create_request outside of selftests Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 17/23] drm/i915: Convert i915_perf to ww locking as well Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 18/23] drm/i915: Dirty hack to fix selftests locking inversion Maarten Lankhorst
2020-07-03 13:48   ` Tvrtko Ursulin
2020-07-07 10:19     ` Maarten Lankhorst
2020-07-07 10:56       ` Tvrtko Ursulin
2020-07-07 11:00         ` Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 19/23] drm/i915/selftests: Fix locking inversion in lrc selftest Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 20/23] drm/i915: Use ww pinning for intel_context_create_request() Maarten Lankhorst
2020-07-07 14:35   ` Dan Carpenter
2020-07-07 14:35     ` Dan Carpenter
2020-07-03 12:22 ` Maarten Lankhorst [this message]
2020-07-03 12:22 ` [Intel-gfx] [PATCH 22/23] drm/i915: Add ww locking to vm_fault_gtt Maarten Lankhorst
2020-07-03 12:22 ` [Intel-gfx] [PATCH 23/23] drm/i915: Add ww locking to pin_to_display_plane Maarten Lankhorst
2020-07-03 14:12 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/i915: Use ww locking in execbuf submission Patchwork
2020-07-03 14:13 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-03 14:36 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-07-14 11:44 [Intel-gfx] [PATCH 01/23] Revert "drm/i915/gem: Async GPU relocations only" Maarten Lankhorst
2020-07-14 11:45 ` [Intel-gfx] [PATCH 21/23] drm/i915: Move i915_vma_lock in the selftests to avoid lock inversion, v2 Maarten Lankhorst

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200703122221.591656-22-maarten.lankhorst@linux.intel.com \
    --to=maarten.lankhorst@linux.intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.