intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 19/22] drm/i915/gem: Separate the ww_mutex walker into its own list
Date: Wed, 20 May 2020 08:55:00 +0100	[thread overview]
Message-ID: <20200520075503.10388-19-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200520075503.10388-1-chris@chris-wilson.co.uk>

In preparation for making eb_vma bigger and heavy to run inn parallel,
we need to stop apply an in-place swap() to reorder around ww_mutex
deadlocks. Keep the array intact and reorder the locks using a dedicated
list.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 48 +++++++++++--------
 drivers/gpu/drm/i915/i915_utils.h             |  6 +++
 2 files changed, 34 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 91e77348282a..653e32f79066 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -36,6 +36,7 @@ struct eb_vma {
 	struct drm_i915_gem_exec_object2 *exec;
 	struct list_head bind_link;
 	struct list_head reloc_link;
+	struct list_head lock_link;
 
 	struct hlist_node node;
 	u32 handle;
@@ -254,6 +255,8 @@ struct i915_execbuffer {
 	/** list of vma that have execobj.relocation_count */
 	struct list_head relocs;
 
+	struct list_head lock;
+
 	/**
 	 * Track the most recently used object for relocations, as we
 	 * frequently have to perform multiple relocations within the same
@@ -399,6 +402,10 @@ static int eb_create(struct i915_execbuffer *eb)
 		eb->lut_size = -eb->buffer_count;
 	}
 
+	INIT_LIST_HEAD(&eb->relocs);
+	INIT_LIST_HEAD(&eb->unbound);
+	INIT_LIST_HEAD(&eb->lock);
+
 	return 0;
 }
 
@@ -604,6 +611,8 @@ eb_add_vma(struct i915_execbuffer *eb,
 		eb_unreserve_vma(ev);
 		list_add_tail(&ev->bind_link, &eb->unbound);
 	}
+
+	list_add_tail(&ev->lock_link, &eb->lock);
 }
 
 static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -880,9 +889,6 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 	unsigned int i;
 	int err = 0;
 
-	INIT_LIST_HEAD(&eb->relocs);
-	INIT_LIST_HEAD(&eb->unbound);
-
 	for (i = 0; i < eb->buffer_count; i++) {
 		struct i915_vma *vma;
 
@@ -1789,38 +1795,39 @@ static int eb_relocate(struct i915_execbuffer *eb)
 
 static int eb_move_to_gpu(struct i915_execbuffer *eb)
 {
-	const unsigned int count = eb->buffer_count;
 	struct ww_acquire_ctx acquire;
-	unsigned int i;
+	struct eb_vma *ev;
 	int err = 0;
 
 	ww_acquire_init(&acquire, &reservation_ww_class);
 
-	for (i = 0; i < count; i++) {
-		struct eb_vma *ev = &eb->vma[i];
+	list_for_each_entry(ev, &eb->lock, lock_link) {
 		struct i915_vma *vma = ev->vma;
 
 		err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
 		if (err == -EDEADLK) {
-			GEM_BUG_ON(i == 0);
-			do {
-				int j = i - 1;
+			struct eb_vma *unlock = ev, *en;
 
-				ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
-
-				swap(eb->vma[i],  eb->vma[j]);
-			} while (--i);
+			list_for_each_entry_safe_continue_reverse(unlock, en, &eb->lock, lock_link) {
+				ww_mutex_unlock(&unlock->vma->resv->lock);
+				list_move_tail(&unlock->lock_link, &eb->lock);
+			}
 
+			GEM_BUG_ON(!list_is_first(&ev->lock_link, &eb->lock));
 			err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
 							       &acquire);
 		}
-		if (err)
-			break;
+		if (err) {
+			list_for_each_entry_continue_reverse(ev, &eb->lock, lock_link)
+				ww_mutex_unlock(&ev->vma->resv->lock);
+
+			ww_acquire_fini(&acquire);
+			goto err_skip;
+		}
 	}
 	ww_acquire_done(&acquire);
 
-	while (i--) {
-		struct eb_vma *ev = &eb->vma[i];
+	list_for_each_entry(ev, &eb->lock, lock_link) {
 		struct i915_vma *vma = ev->vma;
 		unsigned int flags = ev->flags;
 		struct drm_i915_gem_object *obj = vma->obj;
@@ -2121,9 +2128,10 @@ static int eb_parse(struct i915_execbuffer *eb)
 	if (err)
 		goto err_trampoline;
 
-	eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
-	eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
 	eb->batch = &eb->vma[eb->buffer_count++];
+	eb->batch->vma = i915_vma_get(shadow);
+	eb->batch->flags = __EXEC_OBJECT_HAS_PIN;
+	list_add_tail(&eb->batch->lock_link, &eb->lock);
 	eb->vma[eb->buffer_count].vma = NULL;
 
 	eb->trampoline = trampoline;
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 03a73d2bd50d..28813806bc19 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -266,6 +266,12 @@ static inline int list_is_last_rcu(const struct list_head *list,
 	return READ_ONCE(list->next) == head;
 }
 
+#define list_for_each_entry_safe_continue_reverse(pos, n, head, member)	\
+	for (pos = list_prev_entry(pos, member),			\
+		n = list_prev_entry(pos, member);			\
+	     &pos->member != (head);					\
+	     pos = n, n = list_prev_entry(n, member))
+
 /*
  * Wait until the work is finally complete, even if it tries to postpone
  * by requeueing itself. Note, that if the worker never cancels itself,
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-05-20  7:55 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-05-20  7:54 [Intel-gfx] [PATCH 01/22] drm/i915/gem: Suppress some random warnings Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 02/22] drm/i915/execlists: Shortcircuit queue_prio() for no internal levels Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 03/22] drm/i915: Avoid using rq->engine after free during i915_fence_release Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 04/22] drm/i915: Move saturated workload detection back to the context Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 05/22] drm/i915/gt: Use virtual_engine during execlists_dequeue Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 06/22] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 07/22] drm/i915/gt: Resubmit the virtual engine on schedule-out Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 08/22] drm/i915: Improve execute_cb struct packing Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 09/22] dma-buf: Proxy fence, an unsignaled fence placeholder Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 10/22] drm/syncobj: Allow use of dma-fence-proxy Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 11/22] drm/i915/gem: Teach execbuf how to wait on future syncobj Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 12/22] drm/i915/gem: Allow combining submit-fences with syncobj Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 13/22] drm/i915/gt: Declare when we enabled timeslicing Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 14/22] drm/i915/gt: Use built-in active intel_context reference Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 15/22] drm/i915: Drop I915_IDLE_ENGINES_TIMEOUT Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 16/22] drm/i915: Always defer fenced work to the worker Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 17/22] drm/i915/gem: Assign context id for async work Chris Wilson
2020-05-20  7:54 ` [Intel-gfx] [PATCH 18/22] drm/i915: Export a preallocate variant of i915_active_acquire() Chris Wilson
2020-05-20  7:55 ` Chris Wilson [this message]
2020-05-20  7:55 ` [Intel-gfx] [PATCH 20/22] drm/i915/gem: Asynchronous GTT unbinding Chris Wilson
2020-05-20  7:55 ` [Intel-gfx] [PATCH 21/22] drm/i915/gem: Bind the fence async for execbuf Chris Wilson
2020-05-20  7:55 ` [Intel-gfx] [PATCH 22/22] drm/i915: Micro-optimise i915_request_completed() Chris Wilson
2020-05-20  8:37 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/22] drm/i915/gem: Suppress some random warnings Patchwork
2020-05-20  8:38 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-05-20  9:03 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-05-20 20:47 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2020-05-04  4:48 [Intel-gfx] [PATCH 01/22] drm/i915: Allow some leniency in PCU reads Chris Wilson
2020-05-04  4:49 ` [Intel-gfx] [PATCH 19/22] drm/i915/gem: Separate the ww_mutex walker into its own list Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200520075503.10388-19-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).