intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH 15/15] drm/i915/gem: Bind the fence async for execbuf
Date: Mon, 16 Mar 2020 11:42:37 +0000	[thread overview]
Message-ID: <20200316114237.5436-15-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200316114237.5436-1-chris@chris-wilson.co.uk>

It is illegal to wait on an another vma while holding the vm->mutex, as
that easily leads to ABBA deadlocks (we wait on a second vma that waits
on us to release the vm->mutex). So while the vm->mutex exists, move the
waiting outside of the lock into the async binding pipeline.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 21 +++--
 drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c  | 87 ++++++++++++++++++-
 drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h  |  5 ++
 3 files changed, 103 insertions(+), 10 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 7fb47ff185a3..db5ad6a0df28 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -472,13 +472,19 @@ eb_pin_vma(struct i915_execbuffer *eb,
 		return false;
 
 	if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
-		if (unlikely(i915_vma_pin_fence(vma))) {
-			i915_vma_unpin(vma);
-			return false;
-		}
+		struct i915_fence_reg *reg = vma->fence;
 
-		if (vma->fence)
+		/* Avoid waiting to change the fence; defer to async worker */
+		if (reg) {
+			if (READ_ONCE(reg->dirty))
+				return false;
+
+			atomic_inc(&reg->pin_count);
 			ev->flags |= __EXEC_OBJECT_HAS_FENCE;
+		} else {
+			if (i915_gem_object_is_tiled(vma->obj))
+				return false;
+		}
 	}
 
 	ev->flags |= __EXEC_OBJECT_HAS_PIN;
@@ -955,7 +961,7 @@ static int eb_reserve_vma(struct eb_vm_work *work, struct eb_vma *ev)
 
 pin:
 	if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
-		err = __i915_vma_pin_fence(vma); /* XXX no waiting */
+		err = __i915_vma_pin_fence_async(vma, &work->base);
 		if (unlikely(err))
 			return err;
 
@@ -1030,6 +1036,9 @@ static int eb_bind_vma(struct dma_fence_work *base)
 
 		GEM_BUG_ON(vma->vm != vm);
 
+		if (ev->flags & __EXEC_OBJECT_HAS_FENCE)
+			__i915_vma_apply_fence_async(vma);
+
 		if (!ev->bind_flags)
 			goto put;
 
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 97659a1249fd..d99d57be3505 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -21,10 +21,13 @@
  * IN THE SOFTWARE.
  */
 
+#include "i915_active.h"
 #include "i915_drv.h"
 #include "i915_scatterlist.h"
+#include "i915_sw_fence_work.h"
 #include "i915_pvinfo.h"
 #include "i915_vgpu.h"
+#include "i915_vma.h"
 
 /**
  * DOC: fence register handling
@@ -336,16 +339,14 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
 int __i915_vma_pin_fence(struct i915_vma *vma)
 {
 	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
-	struct i915_fence_reg *fence;
+	struct i915_fence_reg *fence = vma->fence;
 	struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
 	int err;
 
 	lockdep_assert_held(&vma->vm->mutex);
 
 	/* Just update our place in the LRU if our fence is getting reused. */
-	if (vma->fence) {
-		fence = vma->fence;
-		GEM_BUG_ON(fence->vma != vma);
+	if (fence && fence->vma == vma) {
 		atomic_inc(&fence->pin_count);
 		if (!fence->dirty) {
 			list_move_tail(&fence->link, &ggtt->fence_list);
@@ -377,6 +378,84 @@ int __i915_vma_pin_fence(struct i915_vma *vma)
 	return err;
 }
 
+int __i915_vma_pin_fence_async(struct i915_vma *vma,
+			       struct dma_fence_work *work)
+{
+	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+	struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+	struct i915_fence_reg *fence = vma->fence;
+	int err;
+
+	lockdep_assert_held(&vma->vm->mutex);
+
+	/* Just update our place in the LRU if our fence is getting reused. */
+	if (fence && fence->vma == vma) {
+	} else if (set) {
+		fence = fence_find(ggtt);
+		if (IS_ERR(fence))
+			return PTR_ERR(fence);
+
+		GEM_BUG_ON(atomic_read(&fence->pin_count));
+		fence->dirty = true;
+	} else {
+		return 0;
+	}
+
+	atomic_inc(&fence->pin_count);
+	if (!fence->dirty) {
+		list_move_tail(&fence->link, &ggtt->fence_list);
+		return 0;
+	}
+
+	fence->tiling = 0;
+	if (set) {
+		if (INTEL_GEN(fence_to_i915(fence)) < 4) {
+			/* implicit 'unfenced' GPU blits */
+			err = i915_sw_fence_await_active(&work->chain,
+							 &vma->active,
+							 I915_ACTIVE_AWAIT_ALL);
+			if (err) {
+				atomic_dec(&fence->pin_count);
+				return err;
+			}
+		}
+
+		fence->start = vma->node.start;
+		fence->size = vma->fence_size;
+		fence->stride = i915_gem_object_get_stride(vma->obj);
+		fence->tiling = i915_gem_object_get_tiling(vma->obj);
+	}
+
+	set = xchg(&fence->vma, vma);
+	if (set) {
+		err = i915_sw_fence_await_active(&work->chain,
+						 &fence->active,
+						 I915_ACTIVE_AWAIT_ALL);
+		if (err) {
+			fence->vma = set;
+			atomic_dec(&fence->pin_count);
+			return err;
+		}
+
+		if (set != vma) {
+			GEM_BUG_ON(set->fence != fence);
+			i915_vma_revoke_mmap(set);
+			set->fence = NULL;
+		}
+	}
+
+	vma->fence = fence;
+	return 0;
+}
+
+void __i915_vma_apply_fence_async(struct i915_vma *vma)
+{
+	struct i915_fence_reg *fence = vma->fence;
+
+	if (fence->dirty)
+		fence_write(fence);
+}
+
 /**
  * i915_vma_pin_fence - set up fencing for a vma
  * @vma: vma to map through a fence reg
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
index 9eef679e1311..d306ac14d47e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
@@ -30,6 +30,7 @@
 
 #include "i915_active.h"
 
+struct dma_fence_work;
 struct drm_i915_gem_object;
 struct i915_ggtt;
 struct i915_vma;
@@ -70,6 +71,10 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
 					 struct sg_table *pages);
 
+int __i915_vma_pin_fence_async(struct i915_vma *vma,
+			       struct dma_fence_work *work);
+void __i915_vma_apply_fence_async(struct i915_vma *vma);
+
 void intel_ggtt_init_fences(struct i915_ggtt *ggtt);
 void intel_ggtt_fini_fences(struct i915_ggtt *ggtt);
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-03-16 11:42 UTC|newest]

Thread overview: 18+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-03-16 11:42 [Intel-gfx] [PATCH 01/15] drm/i915: Move GGTT fence registers under gt/ Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 02/15] drm/i915/gt: Pull restoration of GGTT fences underneath the GT Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 03/15] drm/i915: Remove manual save/resume of fence register state Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 04/15] drm/i915/gt: Allocate i915_fence_reg array Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 05/15] drm/i915/gt: Only wait for GPU activity before unbinding a GGTT fence Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 06/15] drm/i915/gt: Store the fence details on the fence Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 07/15] drm/i915/gt: Make fence revocation unequivocal Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 08/15] drm/i915/gem: Drop cached obj->bind_count Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 09/15] drm/i915: Immediately execute the fenced work Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 10/15] drm/i915/gem: Assign context id for async work Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 11/15] drm/i915: Export a preallocate variant of i915_active_acquire() Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 12/15] drm/i915/gem: Split eb_vma into its own allocation Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 13/15] drm/i915/gem: Separate the ww_mutex walker into its own list Chris Wilson
2020-03-16 11:42 ` [Intel-gfx] [PATCH 14/15] drm/i915/gem: Asynchronous GTT unbinding Chris Wilson
2020-03-16 11:42 ` Chris Wilson [this message]
2020-03-16 20:34 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/15] drm/i915: Move GGTT fence registers under gt/ Patchwork
2020-03-16 20:54 ` [Intel-gfx] ✗ Fi.CI.DOCS: " Patchwork
2020-03-16 21:02 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200316114237.5436-15-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).