All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 10/23] drm/i915/gem: Break apart the early i915_vma_pin from execbuf object lookup
Date: Thu,  2 Jul 2020 09:32:12 +0100	[thread overview]
Message-ID: <20200702083225.20044-10-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200702083225.20044-1-chris@chris-wilson.co.uk>

As a prelude to the next step where we want to perform all the object
allocations together under the same lock, we first must delay the
i915_vma_pin() as that implicitly does the allocations for us, one by
one. As it only does the allocations one by one, it is not allowed to
wait/evict, whereas pulling all the allocations together the entire set
can be scheduled as one.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 70 +++++++++++--------
 1 file changed, 39 insertions(+), 31 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 6abbd8e80f05..1348cb5ec7e6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -33,6 +33,8 @@ struct eb_vma {
 
 	/** This vma's place in the execbuf reservation list */
 	struct drm_i915_gem_exec_object2 *exec;
+
+	struct list_head bind_link;
 	struct list_head unbound_link;
 	struct list_head reloc_link;
 
@@ -240,8 +242,8 @@ struct i915_execbuffer {
 	/** actual size of execobj[] as we may extend it for the cmdparser */
 	unsigned int buffer_count;
 
-	/** list of vma not yet bound during reservation phase */
-	struct list_head unbound;
+	/** list of all vma required to bound for this execbuf */
+	struct list_head bind_list;
 
 	/** list of vma that have execobj.relocation_count */
 	struct list_head relocs;
@@ -565,6 +567,8 @@ eb_add_vma(struct i915_execbuffer *eb,
 						    eb->lut_size)]);
 	}
 
+	list_add_tail(&ev->bind_link, &eb->bind_list);
+
 	if (entry->relocation_count)
 		list_add_tail(&ev->reloc_link, &eb->relocs);
 
@@ -586,16 +590,6 @@ eb_add_vma(struct i915_execbuffer *eb,
 
 		eb->batch = ev;
 	}
-
-	if (eb_pin_vma(eb, entry, ev)) {
-		if (entry->offset != vma->node.start) {
-			entry->offset = vma->node.start | UPDATE;
-			eb->args->flags |= __EXEC_HAS_RELOC;
-		}
-	} else {
-		eb_unreserve_vma(ev);
-		list_add_tail(&ev->unbound_link, &eb->unbound);
-	}
 }
 
 static int eb_reserve_vma(const struct i915_execbuffer *eb,
@@ -670,13 +664,31 @@ static int wait_for_timeline(struct intel_timeline *tl)
 	} while (1);
 }
 
-static int eb_reserve(struct i915_execbuffer *eb)
+static int eb_reserve_vm(struct i915_execbuffer *eb)
 {
-	const unsigned int count = eb->buffer_count;
 	unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
-	struct list_head last;
+	struct list_head last, unbound;
 	struct eb_vma *ev;
-	unsigned int i, pass;
+	unsigned int pass;
+
+	INIT_LIST_HEAD(&unbound);
+	list_for_each_entry(ev, &eb->bind_list, bind_link) {
+		struct drm_i915_gem_exec_object2 *entry = ev->exec;
+		struct i915_vma *vma = ev->vma;
+
+		if (eb_pin_vma(eb, entry, ev)) {
+			if (entry->offset != vma->node.start) {
+				entry->offset = vma->node.start | UPDATE;
+				eb->args->flags |= __EXEC_HAS_RELOC;
+			}
+		} else {
+			eb_unreserve_vma(ev);
+			list_add_tail(&ev->unbound_link, &unbound);
+		}
+	}
+
+	if (list_empty(&unbound))
+		return 0;
 
 	/*
 	 * Attempt to pin all of the buffers into the GTT.
@@ -699,7 +711,7 @@ static int eb_reserve(struct i915_execbuffer *eb)
 		if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
 			return -EINTR;
 
-		list_for_each_entry(ev, &eb->unbound, unbound_link) {
+		list_for_each_entry(ev, &unbound, unbound_link) {
 			err = eb_reserve_vma(eb, ev, pin_flags);
 			if (err)
 				break;
@@ -710,13 +722,11 @@ static int eb_reserve(struct i915_execbuffer *eb)
 		}
 
 		/* Resort *all* the objects into priority order */
-		INIT_LIST_HEAD(&eb->unbound);
+		INIT_LIST_HEAD(&unbound);
 		INIT_LIST_HEAD(&last);
-		for (i = 0; i < count; i++) {
-			unsigned int flags;
+		list_for_each_entry(ev, &eb->bind_list, bind_link) {
+			unsigned int flags = ev->flags;
 
-			ev = &eb->vma[i];
-			flags = ev->flags;
 			if (flags & EXEC_OBJECT_PINNED &&
 			    flags & __EXEC_OBJECT_HAS_PIN)
 				continue;
@@ -725,17 +735,17 @@ static int eb_reserve(struct i915_execbuffer *eb)
 
 			if (flags & EXEC_OBJECT_PINNED)
 				/* Pinned must have their slot */
-				list_add(&ev->unbound_link, &eb->unbound);
+				list_add(&ev->unbound_link, &unbound);
 			else if (flags & __EXEC_OBJECT_NEEDS_MAP)
 				/* Map require the lowest 256MiB (aperture) */
-				list_add_tail(&ev->unbound_link, &eb->unbound);
+				list_add_tail(&ev->unbound_link, &unbound);
 			else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
 				/* Prioritise 4GiB region for restricted bo */
 				list_add(&ev->unbound_link, &last);
 			else
 				list_add_tail(&ev->unbound_link, &last);
 		}
-		list_splice_tail(&last, &eb->unbound);
+		list_splice_tail(&last, &unbound);
 		mutex_unlock(&eb->i915->drm.struct_mutex);
 
 		if (err == -EAGAIN) {
@@ -884,8 +894,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 	unsigned int i;
 	int err = 0;
 
+	INIT_LIST_HEAD(&eb->bind_list);
 	INIT_LIST_HEAD(&eb->relocs);
-	INIT_LIST_HEAD(&eb->unbound);
 
 	for (i = 0; i < eb->buffer_count; i++) {
 		struct i915_vma *vma;
@@ -1532,11 +1542,9 @@ static int eb_relocate(struct i915_execbuffer *eb)
 	if (err)
 		return err;
 
-	if (!list_empty(&eb->unbound)) {
-		err = eb_reserve(eb);
-		if (err)
-			return err;
-	}
+	err = eb_reserve_vm(eb);
+	if (err)
+		return err;
 
 	/* The objects are in their final locations, apply the relocations. */
 	if (eb->args->flags & __EXEC_HAS_RELOC) {
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-07-02  8:32 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-02  8:32 [PATCH 01/23] drm/i915: Drop vm.ref for duplicate vma on construction Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] " Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 02/23] drm/i915/gem: Split the context's obj:vma lut into its own mutex Chris Wilson
2020-07-02 22:09   ` Andi Shyti
2020-07-02 22:14     ` Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 03/23] drm/i915/gem: Drop forced struct_mutex from shrinker_taints_mutex Chris Wilson
2020-07-02 22:24   ` Andi Shyti
2020-07-02  8:32 ` [Intel-gfx] [PATCH 04/23] drm/i915/gem: Only revoke mmap handlers if active Chris Wilson
2020-07-02 12:35   ` Tvrtko Ursulin
2020-07-02 12:47     ` Chris Wilson
2020-07-02 12:54       ` Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 05/23] drm/i915: Export ppgtt_bind_vma Chris Wilson
2020-07-03 10:09   ` Andi Shyti
2020-07-02  8:32 ` [Intel-gfx] [PATCH 06/23] drm/i915: Preallocate stashes for vma page-directories Chris Wilson
2020-07-03 16:47   ` Tvrtko Ursulin
2020-07-02  8:32 ` [Intel-gfx] [PATCH 07/23] drm/i915: Switch to object allocations for page directories Chris Wilson
2020-07-03  8:44   ` Tvrtko Ursulin
2020-07-03  9:00     ` Chris Wilson
2020-07-03  9:24       ` Tvrtko Ursulin
2020-07-03  9:49         ` Chris Wilson
2020-07-03 16:34           ` Tvrtko Ursulin
2020-07-03 16:36   ` Tvrtko Ursulin
2020-07-02  8:32 ` [Intel-gfx] [PATCH 08/23] drm/i915/gem: Don't drop the timeline lock during execbuf Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 09/23] drm/i915/gem: Rename execbuf.bind_link to unbound_link Chris Wilson
2020-07-02  8:32 ` Chris Wilson [this message]
2020-07-02  8:32 ` [Intel-gfx] [PATCH 11/23] drm/i915/gem: Remove the call for no-evict i915_vma_pin Chris Wilson
2020-07-03  8:59   ` Tvrtko Ursulin
2020-07-03  9:23     ` Chris Wilson
2020-07-06 14:43       ` Tvrtko Ursulin
2020-07-02  8:32 ` [Intel-gfx] [PATCH 12/23] drm/i915: Add list_for_each_entry_safe_continue_reverse Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 13/23] drm/i915: Always defer fenced work to the worker Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 14/23] drm/i915/gem: Assign context id for async work Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 15/23] drm/i915: Export a preallocate variant of i915_active_acquire() Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 16/23] drm/i915/gem: Separate the ww_mutex walker into its own list Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 17/23] drm/i915/gem: Asynchronous GTT unbinding Chris Wilson
2020-07-05 22:01   ` Andi Shyti
2020-07-05 22:07     ` Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 18/23] drm/i915/gem: Bind the fence async for execbuf Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 19/23] drm/i915/gem: Include cmdparser in common execbuf pinning Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 20/23] drm/i915/gem: Include secure batch " Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 21/23] drm/i915/gem: Reintroduce multiple passes for reloc processing Chris Wilson
2020-07-02  8:32 ` [Intel-gfx] [PATCH 22/23] drm/i915: Add an implementation for i915_gem_ww_ctx locking, v2 Chris Wilson
2020-07-02 22:32   ` kernel test robot
2020-07-02  8:32 ` [Intel-gfx] [PATCH 23/23] drm/i915/gem: Pull execbuf dma resv under a single critical section Chris Wilson
2020-07-02  9:17 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/23] drm/i915: Drop vm.ref for duplicate vma on construction Patchwork
2020-07-02  9:18 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-02  9:40 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-07-02 12:27 ` [Intel-gfx] [PATCH 01/23] " Tvrtko Ursulin
2020-07-02 12:27   ` Tvrtko Ursulin
2020-07-02 13:08 ` [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [01/23] " Patchwork
2020-07-02 20:25 ` [Intel-gfx] [PATCH 01/23] " Andi Shyti
2020-07-02 20:25   ` Andi Shyti
2020-07-02 20:38   ` Chris Wilson
2020-07-02 20:38     ` Chris Wilson
2020-07-02 20:56     ` Andi Shyti
2020-07-02 20:56       ` Andi Shyti

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200702083225.20044-10-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.