All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 18/22] drm/i915/gem: Build the reloc request first
Date: Thu,  4 Jun 2020 11:37:47 +0100	[thread overview]
Message-ID: <20200604103751.18816-18-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200604103751.18816-1-chris@chris-wilson.co.uk>

If we get interrupted in the middle of chaining up the relocation
entries, we will fail to submit the relocation batch. However, we will
report having already completed some of the relocations, and so the
reloc.presumed_offset will no longer match the batch contents, causing
confusion and invalid future batches. If we build the relocation request
packet first, we can always emit as far as we get up in the relocation
chain.

Fixes: 0e97fbb08055 ("drm/i915/gem: Use a single chained reloc batches for a single execbuf")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gem/i915_gem_execbuffer.c    | 51 ++++++++++---------
 .../i915/gem/selftests/i915_gem_execbuffer.c  |  8 +--
 2 files changed, 31 insertions(+), 28 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 06e1a1f2aa1d..4c3461ab8a63 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -1021,11 +1021,27 @@ static unsigned int reloc_bb_flags(const struct reloc_cache *cache)
 	return cache->gen > 5 ? 0 : I915_DISPATCH_SECURE;
 }
 
-static int reloc_gpu_flush(struct reloc_cache *cache)
+static int reloc_gpu_emit(struct reloc_cache *cache)
 {
 	struct i915_request *rq = cache->rq;
 	int err;
 
+	err = 0;
+	if (rq->engine->emit_init_breadcrumb)
+		err = rq->engine->emit_init_breadcrumb(rq);
+	if (!err)
+		err = rq->engine->emit_bb_start(rq,
+						rq->batch->node.start,
+						PAGE_SIZE,
+						reloc_bb_flags(cache));
+
+	return err;
+}
+
+static void reloc_gpu_flush(struct reloc_cache *cache)
+{
+	struct i915_request *rq = cache->rq;
+
 	if (cache->rq_vma) {
 		struct drm_i915_gem_object *obj = cache->rq_vma->obj;
 
@@ -1037,21 +1053,8 @@ static int reloc_gpu_flush(struct reloc_cache *cache)
 		i915_gem_object_unpin_map(obj);
 	}
 
-	err = 0;
-	if (rq->engine->emit_init_breadcrumb)
-		err = rq->engine->emit_init_breadcrumb(rq);
-	if (!err)
-		err = rq->engine->emit_bb_start(rq,
-						rq->batch->node.start,
-						PAGE_SIZE,
-						reloc_bb_flags(cache));
-	if (err)
-		i915_request_set_error_once(rq, err);
-
 	intel_gt_chipset_flush(rq->engine->gt);
 	i915_request_add(rq);
-
-	return err;
 }
 
 static int reloc_move_to_gpu(struct i915_request *rq, struct i915_vma *vma)
@@ -1139,7 +1142,7 @@ __reloc_gpu_alloc(struct i915_execbuffer *eb, struct intel_engine_cs *engine)
 		err = i915_vma_move_to_active(batch, rq, 0);
 	i915_vma_unlock(batch);
 	if (err)
-		goto skip_request;
+		goto err_request;
 
 	rq->batch = batch;
 	i915_vma_unpin(batch);
@@ -1152,8 +1155,6 @@ __reloc_gpu_alloc(struct i915_execbuffer *eb, struct intel_engine_cs *engine)
 	/* Return with batch mapping (cmd) still pinned */
 	goto out_pool;
 
-skip_request:
-	i915_request_set_error_once(rq, err);
 err_request:
 	i915_request_add(rq);
 err_unpin:
@@ -1186,10 +1187,8 @@ static u32 *reloc_batch_grow(struct i915_execbuffer *eb,
 	if (unlikely(cache->rq_size + len >
 		     PAGE_SIZE / sizeof(u32) - RELOC_TAIL)) {
 		err = reloc_gpu_chain(cache);
-		if (unlikely(err)) {
-			i915_request_set_error_once(cache->rq, err);
+		if (unlikely(err))
 			return ERR_PTR(err);
-		}
 	}
 
 	GEM_BUG_ON(cache->rq_size + len >= PAGE_SIZE  / sizeof(u32));
@@ -1571,23 +1570,25 @@ static int reloc_gpu_alloc(struct i915_execbuffer *eb)
 static int reloc_gpu(struct i915_execbuffer *eb)
 {
 	struct eb_vma *ev;
-	int flush, err;
+	int err;
 
 	err = reloc_gpu_alloc(eb);
 	if (err)
 		return err;
 	GEM_BUG_ON(!eb->reloc_cache.rq);
 
+	err = reloc_gpu_emit(&eb->reloc_cache);
+	if (err)
+		goto out;
+
 	list_for_each_entry(ev, &eb->relocs, reloc_link) {
 		err = eb_relocate_vma(eb, ev);
 		if (err)
-			goto out;
+			break;
 	}
 
 out:
-	flush = reloc_gpu_flush(&eb->reloc_cache);
-	if (!err)
-		err = flush;
+	reloc_gpu_flush(&eb->reloc_cache);
 	return err;
 }
 
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
index 50fe22d87ae1..faed6480a792 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_execbuffer.c
@@ -40,6 +40,10 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
 	if (err)
 		goto unpin_vma;
 
+	err = reloc_gpu_emit(&eb->reloc_cache);
+	if (err)
+		goto unpin_vma;
+
 	/* 8-Byte aligned */
 	err = __reloc_entry_gpu(eb, vma, offsets[0] * sizeof(u32), 0);
 	if (err)
@@ -64,9 +68,7 @@ static int __igt_gpu_reloc(struct i915_execbuffer *eb,
 
 	GEM_BUG_ON(!eb->reloc_cache.rq);
 	rq = i915_request_get(eb->reloc_cache.rq);
-	err = reloc_gpu_flush(&eb->reloc_cache);
-	if (err)
-		goto put_rq;
+	reloc_gpu_flush(&eb->reloc_cache);
 
 	err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE, HZ / 2);
 	if (err) {
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-06-04 10:38 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-04 10:37 [Intel-gfx] [PATCH 01/22] drm/i915/gem: Mark the buffer pool as active for the cmdparser Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 02/22] drm/i915: Trim set_timer_ms() intervals Chris Wilson
2020-06-04 12:54   ` Matthew Auld
2020-06-04 10:37 ` [Intel-gfx] [PATCH 03/22] drm/i915/gt: Set timeslicing priority from queue Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 04/22] drm/i915/gt: Always check to enable timeslicing if not submitting Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 05/22] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 06/22] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 07/22] drm/i915/gt: Support creation of 'internal' rings Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 08/22] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 09/22] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 10/22] drm/i915/gt: Enable busy-stats for ring-scheduler Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 11/22] drm/i915/gt: Track if an engine requires forcewake w/a Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 12/22] drm/i915/gt: Implement ring scheduler for gen6/7 Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 13/22] drm/i915/gt: Enable ring scheduling " Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 14/22] drm/i915/gem: Async GPU relocations only Chris Wilson
2020-06-04 13:37   ` Matthew Auld
2020-06-04 13:44     ` Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 15/22] drm/i915: Add list_for_each_entry_safe_continue_reverse Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 16/22] drm/i915/gem: Separate reloc validation into an earlier step Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 17/22] drm/i915/gem: Lift GPU relocation allocation Chris Wilson
2020-06-04 10:37 ` Chris Wilson [this message]
2020-06-04 10:37 ` [Intel-gfx] [PATCH 19/22] drm/i915/gem: Add all GPU reloc awaits/signals en masse Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 20/22] dma-buf: Proxy fence, an unsignaled fence placeholder Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 21/22] drm/i915: Unpeel awaits on a proxy fence Chris Wilson
2020-06-04 10:37 ` [Intel-gfx] [PATCH 22/22] drm/i915/gem: Make relocations atomic within execbuf Chris Wilson
2020-06-04 11:36 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/22] drm/i915/gem: Mark the buffer pool as active for the cmdparser Patchwork
2020-06-04 11:37 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-06-04 12:06 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2020-06-04 12:49 ` [Intel-gfx] [PATCH 01/22] " Matthew Auld

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200604103751.18816-18-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.