From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 27/66] drm/i915/gem: Pull execbuf dma resv under a single critical section
Date: Wed, 15 Jul 2020 12:51:08 +0100 [thread overview]
Message-ID: <20200715115147.11866-27-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200715115147.11866-1-chris@chris-wilson.co.uk>
Acquire all the objects and their backing storage, and page directories,
as used by execbuf under a single common ww_mutex. Albeit we have to
restart the critical section a few times in order to handle various
restrictions (such as avoiding copy_(from|to)_user and mmap_sem).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
.../gpu/drm/i915/gem/i915_gem_execbuffer.c | 168 +++++++++---------
.../i915/gem/selftests/i915_gem_execbuffer.c | 8 +-
2 files changed, 87 insertions(+), 89 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index ebabc0746d50..db433f3f18ec 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -20,6 +20,7 @@
#include "gt/intel_gt_pm.h"
#include "gt/intel_gt_requests.h"
#include "gt/intel_ring.h"
+#include "mm/i915_acquire_ctx.h"
#include "i915_drv.h"
#include "i915_gem_clflush.h"
@@ -244,6 +245,8 @@ struct i915_execbuffer {
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
+ struct i915_acquire_ctx acquire; /** lock for _all_ DMA reservations */
+
struct i915_request *request; /** our request to build */
struct eb_vma *batch; /** identity of the batch obj/vma */
@@ -389,42 +392,6 @@ static void eb_vma_array_put(struct eb_vma_array *arr)
kref_put(&arr->kref, eb_vma_array_destroy);
}
-static int
-eb_lock_vma(struct i915_execbuffer *eb, struct ww_acquire_ctx *acquire)
-{
- struct eb_vma *ev;
- int err = 0;
-
- list_for_each_entry(ev, &eb->submit_list, submit_link) {
- struct i915_vma *vma = ev->vma;
-
- err = ww_mutex_lock_interruptible(&vma->resv->lock, acquire);
- if (err == -EDEADLK) {
- struct eb_vma *unlock = ev, *en;
-
- list_for_each_entry_safe_continue_reverse(unlock, en,
- &eb->submit_list,
- submit_link) {
- ww_mutex_unlock(&unlock->vma->resv->lock);
- list_move_tail(&unlock->submit_link, &eb->submit_list);
- }
-
- GEM_BUG_ON(!list_is_first(&ev->submit_link, &eb->submit_list));
- err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
- acquire);
- }
- if (err) {
- list_for_each_entry_continue_reverse(ev,
- &eb->submit_list,
- submit_link)
- ww_mutex_unlock(&ev->vma->resv->lock);
- break;
- }
- }
-
- return err;
-}
-
static int eb_create(struct i915_execbuffer *eb)
{
/* Allocate an extra slot for use by the sentinel */
@@ -668,6 +635,25 @@ eb_add_vma(struct i915_execbuffer *eb,
}
}
+static int eb_lock_mm(struct i915_execbuffer *eb)
+{
+ struct eb_vma *ev;
+ int err;
+
+ list_for_each_entry(ev, &eb->bind_list, bind_link) {
+ err = i915_acquire_ctx_lock(&eb->acquire, ev->vma->obj);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int eb_acquire_mm(struct i915_execbuffer *eb)
+{
+ return i915_acquire_mm(&eb->acquire);
+}
+
struct eb_vm_work {
struct dma_fence_work base;
struct eb_vma_array *array;
@@ -1390,7 +1376,15 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
unsigned long count;
struct eb_vma *ev;
unsigned int pass;
- int err = 0;
+ int err;
+
+ err = eb_lock_mm(eb);
+ if (err)
+ return err;
+
+ err = eb_acquire_mm(eb);
+ if (err)
+ return err;
count = 0;
INIT_LIST_HEAD(&unbound);
@@ -1416,10 +1410,15 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
if (count == 0)
return 0;
+ /* We need to reserve page directories, release all, start over */
+ i915_acquire_ctx_fini(&eb->acquire);
+
pass = 0;
do {
struct eb_vm_work *work;
+ i915_acquire_ctx_init(&eb->acquire);
+
/*
* We need to hold one lock as we bind all the vma so that
* we have a consistent view of the entire vm and can plan
@@ -1436,6 +1435,11 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
* beneath it, so we have to stage and preallocate all the
* resources we may require before taking the mutex.
*/
+
+ err = eb_lock_mm(eb);
+ if (err)
+ return err;
+
work = eb_vm_work(eb, count);
if (!work)
return -ENOMEM;
@@ -1453,6 +1457,10 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
}
}
+ err = eb_acquire_mm(eb);
+ if (err)
+ return eb_vm_work_cancel(work, err);
+
err = i915_vm_pin_pt_stash(work->vm, &work->stash);
if (err)
return eb_vm_work_cancel(work, err);
@@ -1543,6 +1551,8 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
if (signal_pending(current))
return -EINTR;
+ i915_acquire_ctx_fini(&eb->acquire);
+
/* Now safe to wait with no reservations held */
if (err == -EAGAIN) {
@@ -1566,8 +1576,10 @@ static int eb_reserve_vm(struct i915_execbuffer *eb)
* total ownership of the vm.
*/
err = wait_for_unbinds(eb, &unbound, pass++);
- if (err)
+ if (err) {
+ i915_acquire_ctx_init(&eb->acquire);
return err;
+ }
} while (1);
}
@@ -1994,8 +2006,6 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
struct drm_i915_gem_object *obj = vma->obj;
int err;
- i915_vma_lock(vma);
-
if (obj->cache_dirty & ~obj->cache_coherent)
i915_gem_clflush_object(obj, 0);
obj->write_domain = 0;
@@ -2004,8 +2014,6 @@ static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
if (err == 0)
err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
- i915_vma_unlock(vma);
-
return err;
}
@@ -2334,11 +2342,9 @@ get_gpu_relocs(struct i915_execbuffer *eb,
int err;
GEM_BUG_ON(!vma);
- i915_vma_lock(vma);
err = i915_request_await_object(rq, vma->obj, false);
if (err == 0)
err = i915_vma_move_to_active(vma, rq, 0);
- i915_vma_unlock(vma);
if (err)
return ERR_PTR(err);
@@ -2470,6 +2476,7 @@ static int eb_relocate(struct i915_execbuffer *eb)
/* Drop everything before we copy_from_user */
list_for_each_entry(ev, &eb->bind_list, bind_link)
eb_unreserve_vma(ev);
+ i915_acquire_ctx_fini(&eb->acquire);
/* Pick a single buffer for all relocs, within reason */
c->bufsz *= sizeof(struct drm_i915_gem_relocation_entry);
@@ -2482,6 +2489,7 @@ static int eb_relocate(struct i915_execbuffer *eb)
/* Copy the user's relocations into plain system memory */
err = eb_relocs_copy_user(eb);
+ i915_acquire_ctx_init(&eb->acquire);
if (err)
return err;
@@ -2517,17 +2525,8 @@ static int eb_reserve(struct i915_execbuffer *eb)
static int eb_move_to_gpu(struct i915_execbuffer *eb)
{
- struct ww_acquire_ctx acquire;
struct eb_vma *ev;
- int err = 0;
-
- ww_acquire_init(&acquire, &reservation_ww_class);
-
- err = eb_lock_vma(eb, &acquire);
- if (err)
- goto err_fini;
-
- ww_acquire_done(&acquire);
+ int err;
list_for_each_entry(ev, &eb->submit_list, submit_link) {
struct i915_vma *vma = ev->vma;
@@ -2566,27 +2565,22 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
flags &= ~EXEC_OBJECT_ASYNC;
}
- if (err == 0 && !(flags & EXEC_OBJECT_ASYNC)) {
+ if (!(flags & EXEC_OBJECT_ASYNC)) {
err = i915_request_await_object
(eb->request, obj, flags & EXEC_OBJECT_WRITE);
+ if (unlikely(err))
+ goto err_skip;
}
- if (err == 0)
- err = i915_vma_move_to_active(vma, eb->request, flags);
-
- i915_vma_unlock(vma);
+ err = i915_vma_move_to_active(vma, eb->request, flags);
+ if (unlikely(err))
+ goto err_skip;
}
- ww_acquire_fini(&acquire);
-
- if (unlikely(err))
- goto err_skip;
/* Unconditionally flush any chipset caches (for streaming writes). */
intel_gt_chipset_flush(eb->engine->gt);
return 0;
-err_fini:
- ww_acquire_fini(&acquire);
err_skip:
i915_request_set_error_once(eb->request, err);
return err;
@@ -2749,39 +2743,27 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
/* Mark active refs early for this worker, in case we get interrupted */
err = parser_mark_active(pw, eb->context->timeline);
if (err)
- goto err_commit;
-
- err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
- if (err)
- goto err_commit;
+ goto out;
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
- goto err_commit_unlock;
+ goto out;
/* Wait for all writes (and relocs) into the batch to complete */
err = i915_sw_fence_await_reservation(&pw->base.chain,
pw->batch->resv, NULL, false,
0, I915_FENCE_GFP);
if (err < 0)
- goto err_commit_unlock;
+ goto out;
/* Keep the batch alive and unwritten as we parse */
dma_resv_add_shared_fence(pw->batch->resv, &pw->base.dma);
- dma_resv_unlock(pw->batch->resv);
-
/* Force execution to wait for completion of the parser */
- dma_resv_lock(shadow->resv, NULL);
dma_resv_add_excl_fence(shadow->resv, &pw->base.dma);
- dma_resv_unlock(shadow->resv);
- dma_fence_work_commit_imm(&pw->base);
- return 0;
-
-err_commit_unlock:
- dma_resv_unlock(pw->batch->resv);
-err_commit:
+ err = 0;
+out:
i915_sw_fence_set_error_once(&pw->base.chain, err);
dma_fence_work_commit_imm(&pw->base);
return err;
@@ -2833,10 +2815,6 @@ static int eb_submit(struct i915_execbuffer *eb)
{
int err;
- err = eb_move_to_gpu(eb);
- if (err)
- return err;
-
if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
err = i915_reset_gen7_sol_offsets(eb->request);
if (err)
@@ -3420,6 +3398,9 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_engine;
lockdep_assert_held(&eb.context->timeline->mutex);
+ /* *** DMA-RESV LOCK *** */
+ i915_acquire_ctx_init(&eb.acquire);
+
err = eb_reserve(&eb);
if (err) {
/*
@@ -3433,6 +3414,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
+ /* *** DMA-RESV SEALED *** */
+
err = eb_parse(&eb);
if (err)
goto err_vma;
@@ -3483,9 +3466,20 @@ i915_gem_do_execbuffer(struct drm_device *dev,
intel_gt_buffer_pool_mark_active(eb.parser.shadow->vma->private,
eb.request);
+ err = eb_move_to_gpu(&eb);
+ if (err)
+ goto err_request;
+
+ /* *** DMA-RESV PUBLISHED *** */
+
trace_i915_request_queue(eb.request, eb.batch_flags);
err = eb_submit(&eb);
+
err_request:
+ i915_acquire_ctx_fini(&eb.acquire);
+ eb.acquire.locked = ERR_PTR(-1);
+ /* *** DMA-RESV UNLOCK *** */
+
i915_request_get(eb.request);
eb_request_add(&eb);
@@ -3496,6 +3490,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
i915_request_put(eb.request);
err_vma:
+ if (eb.acquire.locked != ERR_PTR(-1))
+ i915_acquire_ctx_fini(&eb.acquire);
eb_unlock_engine(&eb);
/* *** TIMELINE UNLOCK *** */
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index 8776f2750fa7..57181718acb1 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -101,11 +101,13 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb, struct eb_vma *ev)
return err;
ev->exec->relocation_count = err;
+ i915_acquire_ctx_init(&eb->acquire);
+
err = eb_reserve_vm(eb);
- if (err)
- return err;
+ if (err == 0)
+ err = eb_relocs_gpu(eb);
- err = eb_relocs_gpu(eb);
+ i915_acquire_ctx_fini(&eb->acquire);
if (err)
return err;
--
2.20.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
next prev parent reply other threads:[~2020-07-15 11:52 UTC|newest]
Thread overview: 154+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-07-15 11:50 [Intel-gfx] [PATCH 01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 02/66] drm/i915: Remove i915_request.lock requirement for execution callbacks Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 03/66] drm/i915: Remove requirement for holding i915_request.lock for breadcrumbs Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 04/66] drm/i915: Add a couple of missing i915_active_fini() Chris Wilson
2020-07-17 12:00 ` Tvrtko Ursulin
2020-07-21 12:23 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 05/66] drm/i915: Skip taking acquire mutex for no ref->active callback Chris Wilson
2020-07-17 12:04 ` Tvrtko Ursulin
2020-07-21 12:32 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 06/66] drm/i915: Export a preallocate variant of i915_active_acquire() Chris Wilson
2020-07-17 12:21 ` Tvrtko Ursulin
2020-07-17 12:45 ` Chris Wilson
2020-07-17 13:06 ` Tvrtko Ursulin
2020-07-21 15:33 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 07/66] drm/i915: Keep the most recently used active-fence upon discard Chris Wilson
2020-07-17 12:38 ` Tvrtko Ursulin
2020-07-28 14:22 ` Chris Wilson
2020-07-22 9:46 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 08/66] drm/i915: Make the stale cached active node available for any timeline Chris Wilson
2020-07-17 13:04 ` Tvrtko Ursulin
2020-07-28 14:28 ` Chris Wilson
2020-07-29 12:40 ` Tvrtko Ursulin
2020-07-29 13:42 ` Chris Wilson
2020-07-29 13:53 ` Chris Wilson
2020-07-29 14:22 ` Tvrtko Ursulin
2020-07-29 14:39 ` Chris Wilson
2020-07-29 14:52 ` Chris Wilson
2020-07-29 15:31 ` Tvrtko Ursulin
2020-07-22 11:19 ` Thomas Hellström (Intel)
2020-07-28 14:31 ` Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 09/66] drm/i915: Provide a fastpath for waiting on vma bindings Chris Wilson
2020-07-17 13:23 ` Tvrtko Ursulin
2020-07-28 14:35 ` Chris Wilson
2020-07-29 12:43 ` Tvrtko Ursulin
2020-07-22 15:07 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 10/66] drm/i915: Soften the tasklet flush frequency before waits Chris Wilson
2020-07-16 14:23 ` Mika Kuoppala
2020-07-22 15:10 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 11/66] drm/i915: Preallocate stashes for vma page-directories Chris Wilson
2020-07-20 10:35 ` Matthew Auld
2020-07-23 14:33 ` Thomas Hellström (Intel)
2020-07-28 14:42 ` Chris Wilson
2020-07-31 7:43 ` Thomas Hellström (Intel)
2020-07-27 9:24 ` Thomas Hellström (Intel)
2020-07-28 14:50 ` Chris Wilson
2020-07-30 12:04 ` Thomas Hellström (Intel)
2020-07-30 12:28 ` Thomas Hellström (Intel)
2020-08-04 14:08 ` Chris Wilson
2020-08-04 16:14 ` Daniel Vetter
2020-07-15 11:50 ` [Intel-gfx] [PATCH 12/66] drm/i915: Switch to object allocations for page directories Chris Wilson
2020-07-20 10:34 ` Matthew Auld
2020-07-20 10:40 ` Chris Wilson
2020-07-15 11:50 ` [Intel-gfx] [PATCH 13/66] drm/i915/gem: Don't drop the timeline lock during execbuf Chris Wilson
2020-07-23 16:09 ` Thomas Hellström (Intel)
2020-07-28 14:46 ` Thomas Hellström (Intel)
2020-07-28 14:51 ` Chris Wilson
2020-07-31 8:09 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 14/66] drm/i915/gem: Rename execbuf.bind_link to unbound_link Chris Wilson
2020-07-31 8:11 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 15/66] drm/i915/gem: Break apart the early i915_vma_pin from execbuf object lookup Chris Wilson
2020-07-31 8:51 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 16/66] drm/i915/gem: Remove the call for no-evict i915_vma_pin Chris Wilson
2020-07-17 14:36 ` Tvrtko Ursulin
2020-07-28 15:04 ` Chris Wilson
2020-07-28 9:46 ` Thomas Hellström (Intel)
2020-07-28 15:05 ` Chris Wilson
2020-07-31 8:58 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 17/66] drm/i915: Add list_for_each_entry_safe_continue_reverse Chris Wilson
2020-07-31 8:59 ` Thomas Hellström (Intel)
2020-07-15 11:50 ` [Intel-gfx] [PATCH 18/66] drm/i915: Always defer fenced work to the worker Chris Wilson
2020-07-31 9:03 ` Thomas Hellström (Intel)
2020-07-31 13:28 ` Chris Wilson
2020-07-31 13:31 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 19/66] drm/i915/gem: Assign context id for async work Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 20/66] drm/i915/gem: Separate the ww_mutex walker into its own list Chris Wilson
2020-07-31 9:23 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 21/66] drm/i915/gem: Asynchronous GTT unbinding Chris Wilson
2020-07-31 13:09 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 22/66] drm/i915/gem: Bind the fence async for execbuf Chris Wilson
2020-07-27 18:19 ` Thomas Hellström (Intel)
2020-07-28 15:08 ` Chris Wilson
2020-07-31 13:12 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 23/66] drm/i915/gem: Include cmdparser in common execbuf pinning Chris Wilson
2020-07-31 9:43 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 24/66] drm/i915/gem: Include secure batch " Chris Wilson
2020-07-31 9:47 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 25/66] drm/i915/gem: Reintroduce multiple passes for reloc processing Chris Wilson
2020-07-31 10:05 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 26/66] drm/i915: Add an implementation for i915_gem_ww_ctx locking, v2 Chris Wilson
2020-07-31 10:07 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` Chris Wilson [this message]
2020-07-27 18:08 ` [Intel-gfx] [PATCH 27/66] drm/i915/gem: Pull execbuf dma resv under a single critical section Thomas Hellström (Intel)
2020-07-28 15:16 ` Chris Wilson
2020-07-30 12:57 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 28/66] drm/i915/gem: Replace i915_gem_object.mm.mutex with reservation_ww_class Chris Wilson
2020-07-15 15:43 ` Maarten Lankhorst
2020-07-16 15:53 ` Tvrtko Ursulin
2020-07-28 11:17 ` Thomas Hellström (Intel)
2020-07-29 7:56 ` Thomas Hellström (Intel)
2020-07-29 12:17 ` Tvrtko Ursulin
2020-07-29 13:44 ` Thomas Hellström (Intel)
2020-08-05 12:12 ` Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 29/66] drm/i915: Hold wakeref for the duration of the vma GGTT binding Chris Wilson
2020-07-31 10:09 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 30/66] drm/i915: Specialise " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 31/66] drm/i915/gt: Acquire backing storage for the context Chris Wilson
2020-07-31 10:27 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 32/66] drm/i915/gt: Push the wait for the context to bound to the request Chris Wilson
2020-07-31 10:48 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 33/66] drm/i915: Remove unused i915_gem_evict_vm() Chris Wilson
2020-07-31 10:51 ` Thomas Hellström (Intel)
2020-07-15 11:51 ` [Intel-gfx] [PATCH 34/66] drm/i915/gt: Decouple completed requests on unwind Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 35/66] drm/i915/gt: Check for a completed last request once Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 36/66] drm/i915/gt: Replace direct submit with direct call to tasklet Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 37/66] drm/i915/gt: Free stale request on destroying the virtual engine Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 38/66] drm/i915/gt: Use virtual_engine during execlists_dequeue Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 39/66] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 40/66] drm/i915/gt: Defer schedule_out until after the next dequeue Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 41/66] drm/i915/gt: Resubmit the virtual engine on schedule-out Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 42/66] drm/i915/gt: Simplify virtual engine handling for execlists_hold() Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 43/66] drm/i915/gt: ce->inflight updates are now serialised Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 44/66] drm/i915/gt: Drop atomic for engine->fw_active tracking Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 45/66] drm/i915/gt: Extract busy-stats for ring-scheduler Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 46/66] drm/i915/gt: Convert stats.active to plain unsigned int Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 47/66] drm/i915: Lift waiter/signaler iterators Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 48/66] drm/i915: Strip out internal priorities Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 49/66] drm/i915: Remove I915_USER_PRIORITY_SHIFT Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 50/66] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 51/66] drm/i915/gt: Do not suspend bonded requests if one hangs Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 52/66] drm/i915: Teach the i915_dependency to use a double-lock Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 53/66] drm/i915: Restructure priority inheritance Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 54/66] drm/i915/gt: Remove timeslice suppression Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 55/66] drm/i915: Fair low-latency scheduling Chris Wilson
2020-07-15 15:33 ` [Intel-gfx] [PATCH] " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 56/66] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 57/66] drm/i915: Replace the priority boosting for the display with a deadline Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 58/66] drm/i915: Move saturated workload detection to the GT Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 59/66] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 60/66] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 61/66] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 62/66] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 63/66] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 64/66] drm/i915/gt: Implement ring scheduler for gen6/7 Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 65/66] drm/i915/gt: Enable ring scheduling " Chris Wilson
2020-07-15 11:51 ` [Intel-gfx] [PATCH 66/66] drm/i915/gem: Remove timeline nesting from snb relocs Chris Wilson
2020-07-15 13:27 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait Patchwork
2020-07-15 13:28 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-15 14:20 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-07-15 15:41 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait (rev2) Patchwork
2020-07-15 15:42 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-15 16:03 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-07-15 19:55 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2020-07-23 20:32 ` [Intel-gfx] [PATCH 01/66] drm/i915: Reduce i915_request.lock contention for i915_request_wait Dave Airlie
2020-07-27 9:35 ` Tvrtko Ursulin
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200715115147.11866-27-chris@chris-wilson.co.uk \
--to=chris@chris-wilson.co.uk \
--cc=intel-gfx@lists.freedesktop.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).