From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 21/22] drm/i915/gem: Bind the fence async for execbuf
Date: Wed, 20 May 2020 08:55:02 +0100 [thread overview]
Message-ID: <20200520075503.10388-21-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200520075503.10388-1-chris@chris-wilson.co.uk>
It is illegal to wait on an another vma while holding the vm->mutex, as
that easily leads to ABBA deadlocks (we wait on a second vma that waits
on us to release the vm->mutex). So while the vm->mutex exists, move the
waiting outside of the lock into the async binding pipeline.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
.../gpu/drm/i915/gem/i915_gem_execbuffer.c | 41 ++++--
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c | 137 +++++++++++++++++-
drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h | 5 +
3 files changed, 166 insertions(+), 17 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 8fadf2dfb4bc..5e3b89aa5beb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -519,13 +519,23 @@ eb_pin_vma(struct i915_execbuffer *eb,
}
if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
- if (unlikely(i915_vma_pin_fence(vma))) {
- i915_vma_unpin(vma);
- return false;
- }
+ struct i915_fence_reg *reg = vma->fence;
- if (vma->fence)
+ /* Avoid waiting to change the fence; defer to async worker */
+ if (reg) {
+ if (READ_ONCE(reg->dirty)) {
+ __i915_vma_unpin(vma);
+ return false;
+ }
+
+ atomic_inc(®->pin_count);
ev->flags |= __EXEC_OBJECT_HAS_FENCE;
+ } else {
+ if (i915_gem_object_is_tiled(vma->obj)) {
+ __i915_vma_unpin(vma);
+ return false;
+ }
+ }
}
ev->flags |= __EXEC_OBJECT_HAS_PIN;
@@ -1066,15 +1076,6 @@ static int eb_reserve_vma(struct eb_vm_work *work, struct eb_vma *ev)
return err;
pin:
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
- err = __i915_vma_pin_fence(vma); /* XXX no waiting */
- if (unlikely(err))
- return err;
-
- if (vma->fence)
- ev->flags |= __EXEC_OBJECT_HAS_FENCE;
- }
-
bind_flags &= ~atomic_read(&vma->flags);
if (bind_flags) {
err = set_bind_fence(vma, work);
@@ -1105,6 +1106,15 @@ static int eb_reserve_vma(struct eb_vm_work *work, struct eb_vma *ev)
ev->flags |= __EXEC_OBJECT_HAS_PIN;
GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
+ if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ err = __i915_vma_pin_fence_async(vma, &work->base);
+ if (unlikely(err))
+ return err;
+
+ if (vma->fence)
+ ev->flags |= __EXEC_OBJECT_HAS_FENCE;
+ }
+
return 0;
}
@@ -1140,6 +1150,9 @@ static int __eb_bind_vma(struct eb_vm_work *work, int err)
list_for_each_entry(ev, &work->unbound, bind_link) {
struct i915_vma *vma = ev->vma;
+ if (ev->flags & __EXEC_OBJECT_HAS_FENCE)
+ __i915_vma_apply_fence_async(vma);
+
if (!ev->bind_flags)
goto put;
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index 7fb36b12fe7a..734b6aa61809 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -21,10 +21,13 @@
* IN THE SOFTWARE.
*/
+#include "i915_active.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
+#include "i915_sw_fence_work.h"
#include "i915_pvinfo.h"
#include "i915_vgpu.h"
+#include "i915_vma.h"
/**
* DOC: fence register handling
@@ -340,19 +343,37 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt)
return ERR_PTR(-EDEADLK);
}
+static int fence_wait_bind(struct i915_fence_reg *reg)
+{
+ struct dma_fence *fence;
+ int err = 0;
+
+ fence = i915_active_fence_get(®->active.excl);
+ if (fence) {
+ err = dma_fence_wait(fence, true);
+ dma_fence_put(fence);
+ }
+
+ return err;
+}
+
int __i915_vma_pin_fence(struct i915_vma *vma)
{
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
- struct i915_fence_reg *fence;
+ struct i915_fence_reg *fence = vma->fence;
struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
int err;
lockdep_assert_held(&vma->vm->mutex);
/* Just update our place in the LRU if our fence is getting reused. */
- if (vma->fence) {
- fence = vma->fence;
+ if (fence) {
GEM_BUG_ON(fence->vma != vma);
+
+ err = fence_wait_bind(fence);
+ if (err)
+ return err;
+
atomic_inc(&fence->pin_count);
if (!fence->dirty) {
list_move_tail(&fence->link, &ggtt->fence_list);
@@ -384,6 +405,116 @@ int __i915_vma_pin_fence(struct i915_vma *vma)
return err;
}
+static int set_bind_fence(struct i915_fence_reg *fence,
+ struct dma_fence_work *work)
+{
+ struct dma_fence *prev;
+ int err;
+
+ if (rcu_access_pointer(fence->active.excl.fence) == &work->dma)
+ return 0;
+
+ err = i915_sw_fence_await_active(&work->chain,
+ &fence->active,
+ I915_ACTIVE_AWAIT_ACTIVE);
+ if (err)
+ return err;
+
+ if (i915_active_acquire(&fence->active))
+ return -ENOENT;
+
+ prev = i915_active_set_exclusive(&fence->active, &work->dma);
+ if (unlikely(prev)) {
+ err = i915_sw_fence_await_dma_fence(&work->chain, prev, 0,
+ GFP_NOWAIT | __GFP_NOWARN);
+ dma_fence_put(prev);
+ }
+
+ i915_active_release(&fence->active);
+ return err < 0 ? err : 0;
+}
+
+int __i915_vma_pin_fence_async(struct i915_vma *vma,
+ struct dma_fence_work *work)
+{
+ struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
+ struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
+ struct i915_fence_reg *fence = vma->fence;
+ int err;
+
+ lockdep_assert_held(&vma->vm->mutex);
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (fence) {
+ GEM_BUG_ON(fence->vma != vma);
+ GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
+ } else if (set) {
+ if (!i915_vma_is_map_and_fenceable(vma))
+ return -EINVAL;
+
+ fence = fence_find(ggtt);
+ if (IS_ERR(fence))
+ return -ENOSPC;
+
+ GEM_BUG_ON(atomic_read(&fence->pin_count));
+ fence->dirty = true;
+ } else {
+ return 0;
+ }
+
+ atomic_inc(&fence->pin_count);
+ list_move_tail(&fence->link, &ggtt->fence_list);
+ if (!fence->dirty)
+ return 0;
+
+ if (INTEL_GEN(fence_to_i915(fence)) < 4 &&
+ rcu_access_pointer(vma->active.excl.fence) != &work->dma) {
+ /* implicit 'unfenced' GPU blits */
+ err = i915_sw_fence_await_active(&work->chain,
+ &vma->active,
+ I915_ACTIVE_AWAIT_ACTIVE);
+ if (err)
+ goto err_unpin;
+ }
+
+ err = set_bind_fence(fence, work);
+ if (err)
+ goto err_unpin;
+
+ if (set) {
+ fence->start = vma->node.start;
+ fence->size = vma->fence_size;
+ fence->stride = i915_gem_object_get_stride(vma->obj);
+ fence->tiling = i915_gem_object_get_tiling(vma->obj);
+
+ vma->fence = fence;
+ } else {
+ fence->tiling = 0;
+ vma->fence = NULL;
+ }
+
+ set = xchg(&fence->vma, set);
+ if (set && set != vma) {
+ GEM_BUG_ON(set->fence != fence);
+ WRITE_ONCE(set->fence, NULL);
+ i915_vma_revoke_mmap(set);
+ }
+
+ return 0;
+
+err_unpin:
+ atomic_dec(&fence->pin_count);
+ return err;
+}
+
+void __i915_vma_apply_fence_async(struct i915_vma *vma)
+{
+ struct i915_fence_reg *fence = vma->fence;
+
+ if (fence->dirty)
+ fence_write(fence);
+}
+
/**
* i915_vma_pin_fence - set up fencing for a vma
* @vma: vma to map through a fence reg
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
index 9eef679e1311..d306ac14d47e 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.h
@@ -30,6 +30,7 @@
#include "i915_active.h"
+struct dma_fence_work;
struct drm_i915_gem_object;
struct i915_ggtt;
struct i915_vma;
@@ -70,6 +71,10 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
struct sg_table *pages);
+int __i915_vma_pin_fence_async(struct i915_vma *vma,
+ struct dma_fence_work *work);
+void __i915_vma_apply_fence_async(struct i915_vma *vma);
+
void intel_ggtt_init_fences(struct i915_ggtt *ggtt);
void intel_ggtt_fini_fences(struct i915_ggtt *ggtt);
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2020-05-20 7:55 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-20 7:54 [Intel-gfx] [PATCH 01/22] drm/i915/gem: Suppress some random warnings Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 02/22] drm/i915/execlists: Shortcircuit queue_prio() for no internal levels Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 03/22] drm/i915: Avoid using rq->engine after free during i915_fence_release Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 04/22] drm/i915: Move saturated workload detection back to the context Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 05/22] drm/i915/gt: Use virtual_engine during execlists_dequeue Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 06/22] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 07/22] drm/i915/gt: Resubmit the virtual engine on schedule-out Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 08/22] drm/i915: Improve execute_cb struct packing Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 09/22] dma-buf: Proxy fence, an unsignaled fence placeholder Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 10/22] drm/syncobj: Allow use of dma-fence-proxy Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 11/22] drm/i915/gem: Teach execbuf how to wait on future syncobj Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 12/22] drm/i915/gem: Allow combining submit-fences with syncobj Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 13/22] drm/i915/gt: Declare when we enabled timeslicing Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 14/22] drm/i915/gt: Use built-in active intel_context reference Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 15/22] drm/i915: Drop I915_IDLE_ENGINES_TIMEOUT Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 16/22] drm/i915: Always defer fenced work to the worker Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 17/22] drm/i915/gem: Assign context id for async work Chris Wilson
2020-05-20 7:54 ` [Intel-gfx] [PATCH 18/22] drm/i915: Export a preallocate variant of i915_active_acquire() Chris Wilson
2020-05-20 7:55 ` [Intel-gfx] [PATCH 19/22] drm/i915/gem: Separate the ww_mutex walker into its own list Chris Wilson
2020-05-20 7:55 ` [Intel-gfx] [PATCH 20/22] drm/i915/gem: Asynchronous GTT unbinding Chris Wilson
2020-05-20 7:55 ` Chris Wilson [this message]
2020-05-20 7:55 ` [Intel-gfx] [PATCH 22/22] drm/i915: Micro-optimise i915_request_completed() Chris Wilson
2020-05-20 8:37 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/22] drm/i915/gem: Suppress some random warnings Patchwork
2020-05-20 8:38 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-05-20 9:03 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-05-20 20:47 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
-- strict thread matches above, loose matches on Subject: below --
2020-05-04 4:48 [Intel-gfx] [PATCH 01/22] drm/i915: Allow some leniency in PCU reads Chris Wilson
2020-05-04 4:49 ` [Intel-gfx] [PATCH 21/22] drm/i915/gem: Bind the fence async for execbuf Chris Wilson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200520075503.10388-21-chris@chris-wilson.co.uk \
--to=chris@chris-wilson.co.uk \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).