All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/6] drm/i915: Remove redundant trailing request flush
@ 2018-12-28 17:16 Chris Wilson
  2018-12-28 17:16 ` [PATCH 2/6] drm/i915/ringbuffer: Remove irq-seqno w/a for gen6/7 rcs Chris Wilson
                   ` (8 more replies)
  0 siblings, 9 replies; 22+ messages in thread
From: Chris Wilson @ 2018-12-28 17:16 UTC (permalink / raw)
  To: intel-gfx

Now that we perform the request flushing inline with emitting the
breadcrumb, we can remove the now redundant manual flush. And we can
also remove the infrastructure that remained only for its purpose.

v2: emit_breadcrumb_sz is in dwords, but rq->reserved_space is in bytes

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_request.c          | 14 +++++++-------
 drivers/gpu/drm/i915/intel_ringbuffer.c      | 16 ----------------
 drivers/gpu/drm/i915/intel_ringbuffer.h      | 10 ----------
 drivers/gpu/drm/i915/selftests/mock_engine.c |  2 --
 4 files changed, 7 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 6cedcfea33b5..ea4620f2ac9e 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -521,10 +521,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 
 	reserve_gt(i915);
 
-	ret = intel_ring_wait_for_space(ce->ring, MIN_SPACE_FOR_ADD_REQUEST);
-	if (ret)
-		goto err_unreserve;
-
 	/* Move our oldest request to the slab-cache (if not in use!) */
 	rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
 	if (!list_is_last(&rq->ring_link, &ce->ring->request_list) &&
@@ -616,9 +612,13 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	 * i915_request_add() call can't fail. Note that the reserve may need
 	 * to be redone if the request is not actually submitted straight
 	 * away, e.g. because a GPU scheduler has deferred it.
+	 *
+	 * Note that due to how we add reserved_space to intel_ring_begin()
+	 * we need to double our request to ensure that if we need to wrap
+	 * around inside i915_request_add() there is sufficient space at
+	 * the beginning of the ring as well.
 	 */
-	rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
-	GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz);
+	rq->reserved_space = 2 * engine->emit_breadcrumb_sz * sizeof(u32);
 
 	/*
 	 * Record the position of the start of the request so that
@@ -860,8 +860,8 @@ void i915_request_add(struct i915_request *request)
 	 * should already have been reserved in the ring buffer. Let the ring
 	 * know that it is time to use that space up.
 	 */
+	GEM_BUG_ON(request->reserved_space > request->ring->space);
 	request->reserved_space = 0;
-	engine->emit_flush(request, EMIT_FLUSH);
 
 	/*
 	 * Record the position of the start of the breadcrumb so that
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index fc1e29305951..d773f7dd32a9 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1904,22 +1904,6 @@ static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
 	return 0;
 }
 
-int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
-{
-	GEM_BUG_ON(bytes > ring->effective_size);
-	if (unlikely(bytes > ring->effective_size - ring->emit))
-		bytes += ring->size - ring->emit;
-
-	if (unlikely(bytes > ring->space)) {
-		int ret = wait_for_space(ring, bytes);
-		if (unlikely(ret))
-			return ret;
-	}
-
-	GEM_BUG_ON(ring->space < bytes);
-	return 0;
-}
-
 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
 {
 	struct intel_ring *ring = rq->ring;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 32606d795af3..99e2cb75d29a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -754,7 +754,6 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
 int __must_check intel_ring_cacheline_align(struct i915_request *rq);
 
-int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
 u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n);
 
 static inline void intel_ring_advance(struct i915_request *rq, u32 *cs)
@@ -895,15 +894,6 @@ static inline bool intel_engine_has_started(struct intel_engine_cs *engine,
 void intel_engine_get_instdone(struct intel_engine_cs *engine,
 			       struct intel_instdone *instdone);
 
-/*
- * Arbitrary size for largest possible 'add request' sequence. The code paths
- * are complex and variable. Empirical measurement shows that the worst case
- * is BDW at 192 bytes (6 + 6 + 36 dwords), then ILK at 136 bytes. However,
- * we need to allocate double the largest single packet within that emission
- * to account for tail wraparound (so 6 + 6 + 72 dwords for BDW).
- */
-#define MIN_SPACE_FOR_ADD_REQUEST 336
-
 static inline u32 intel_hws_seqno_address(struct intel_engine_cs *engine)
 {
 	return engine->status_page.ggtt_offset + I915_GEM_HWS_INDEX_ADDR;
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index d0c44c18db42..50e1a0b1af7e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -148,8 +148,6 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 	const unsigned long sz = PAGE_SIZE / 2;
 	struct mock_ring *ring;
 
-	BUILD_BUG_ON(MIN_SPACE_FOR_ADD_REQUEST > sz);
-
 	ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
 	if (!ring)
 		return NULL;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2018-12-31 15:38 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-12-28 17:16 [PATCH 1/6] drm/i915: Remove redundant trailing request flush Chris Wilson
2018-12-28 17:16 ` [PATCH 2/6] drm/i915/ringbuffer: Remove irq-seqno w/a for gen6/7 rcs Chris Wilson
2018-12-31 10:28   ` Tvrtko Ursulin
2018-12-28 17:16 ` [PATCH 3/6] drm/i915/ringbuffer: Remove irq-seqno w/a for gen6 xcs Chris Wilson
2018-12-31 10:31   ` Tvrtko Ursulin
2018-12-28 17:16 ` [PATCH 4/6] drm/i915/ringbuffer: Move irq seqno barrier to the GPU for gen7 Chris Wilson
2018-12-31 10:43   ` Tvrtko Ursulin
2018-12-31 10:56     ` Chris Wilson
2018-12-28 17:16 ` [PATCH 5/6] drm/i915/ringbuffer: Move irq seqno barrier to the GPU for gen5 Chris Wilson
2018-12-31 10:49   ` Tvrtko Ursulin
2018-12-31 11:07     ` Chris Wilson
2018-12-31 11:21       ` Tvrtko Ursulin
2018-12-31 15:25     ` Chris Wilson
2018-12-28 17:16 ` [PATCH 6/6] drm/i915: Drop unused engine->irq_seqno_barrier w/a Chris Wilson
2018-12-31 11:35   ` Tvrtko Ursulin
2018-12-28 17:48 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/6] drm/i915: Remove redundant trailing request flush Patchwork
2018-12-28 18:18 ` ✓ Fi.CI.BAT: success " Patchwork
2018-12-31 15:38   ` Chris Wilson
2018-12-28 22:00 ` ✓ Fi.CI.IGT: " Patchwork
2018-12-31 10:25 ` [PATCH 1/6] " Tvrtko Ursulin
2018-12-31 10:32   ` Chris Wilson
2018-12-31 11:24     ` Tvrtko Ursulin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.