intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [Intel-gfx] [PATCH 29/33] drm/i915/gt: Support creation of 'internal' rings
Date: Wed,  1 Jul 2020 09:40:49 +0100	[thread overview]
Message-ID: <20200701084053.6086-29-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20200701084053.6086-1-chris@chris-wilson.co.uk>

To support legacy ring buffer scheduling, we want a virtual ringbuffer
for each client. These rings are purely for holding the requests as they
are being constructed on the CPU and never accessed by the GPU, so they
should not be bound into the GGTT, and we can use plain old WB mapped
pages.

As they are not bound, we need to nerf a few assumptions that a rq->ring
is in the GGTT.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_context.c    |  2 +-
 drivers/gpu/drm/i915/gt/intel_engine_cs.c  | 17 ++----
 drivers/gpu/drm/i915/gt/intel_ring.c       | 63 ++++++++++++++--------
 drivers/gpu/drm/i915/gt/intel_ring.h       | 12 ++++-
 drivers/gpu/drm/i915/gt/intel_ring_types.h |  2 +
 5 files changed, 57 insertions(+), 39 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index e4aece20bc80..fd71977c010a 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -127,7 +127,7 @@ int __intel_context_do_pin(struct intel_context *ce)
 			goto err_active;
 
 		CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
-			 i915_ggtt_offset(ce->ring->vma),
+			 intel_ring_address(ce->ring),
 			 ce->ring->head, ce->ring->tail);
 
 		smp_mb__before_atomic(); /* flush pin before it is visible */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 413d8393b989..a5e83c115b23 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1267,7 +1267,7 @@ static int print_ring(char *buf, int sz, struct i915_request *rq)
 
 		len = scnprintf(buf, sz,
 				"ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
-				i915_ggtt_offset(rq->ring->vma),
+				intel_ring_address(rq->ring),
 				tl ? tl->hwsp_offset : 0,
 				hwsp_seqno(rq),
 				DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
@@ -1559,7 +1559,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		print_request(m, rq, "\t\tactive ");
 
 		drm_printf(m, "\t\tring->start:  0x%08x\n",
-			   i915_ggtt_offset(rq->ring->vma));
+			   intel_ring_address(rq->ring));
 		drm_printf(m, "\t\tring->head:   0x%08x\n",
 			   rq->ring->head);
 		drm_printf(m, "\t\tring->tail:   0x%08x\n",
@@ -1640,13 +1640,6 @@ ktime_t intel_engine_get_busy_time(struct intel_engine_cs *engine, ktime_t *now)
 	return total;
 }
 
-static bool match_ring(struct i915_request *rq)
-{
-	u32 ring = ENGINE_READ(rq->engine, RING_START);
-
-	return ring == i915_ggtt_offset(rq->ring->vma);
-}
-
 struct i915_request *
 intel_engine_find_active_request(struct intel_engine_cs *engine)
 {
@@ -1686,11 +1679,7 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
 			continue;
 
 		if (!i915_request_started(request))
-			continue;
-
-		/* More than one preemptible request may match! */
-		if (!match_ring(request))
-			continue;
+			break;
 
 		active = request;
 		break;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index bdb324167ef3..c17cb9f24962 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -24,33 +24,42 @@ unsigned int intel_ring_update_space(struct intel_ring *ring)
 int intel_ring_pin(struct intel_ring *ring)
 {
 	struct i915_vma *vma = ring->vma;
-	unsigned int flags;
 	void *addr;
 	int ret;
 
 	if (atomic_fetch_inc(&ring->pin_count))
 		return 0;
 
-	/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
-	flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+	if (!(ring->flags & INTEL_RING_CREATE_INTERNAL)) {
+		unsigned int pin;
 
-	if (vma->obj->stolen)
-		flags |= PIN_MAPPABLE;
-	else
-		flags |= PIN_HIGH;
+		/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
+		pin |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
 
-	ret = i915_ggtt_pin(vma, 0, flags);
-	if (unlikely(ret))
-		goto err_unpin;
+		if (vma->obj->stolen)
+			pin |= PIN_MAPPABLE;
+		else
+			pin |= PIN_HIGH;
 
-	if (i915_vma_is_map_and_fenceable(vma))
-		addr = (void __force *)i915_vma_pin_iomap(vma);
-	else
-		addr = i915_gem_object_pin_map(vma->obj,
-					       i915_coherent_map_type(vma->vm->i915));
-	if (IS_ERR(addr)) {
-		ret = PTR_ERR(addr);
-		goto err_ring;
+		ret = i915_ggtt_pin(vma, 0, pin);
+		if (unlikely(ret))
+			goto err_unpin;
+
+		if (i915_vma_is_map_and_fenceable(vma))
+			addr = (void __force *)i915_vma_pin_iomap(vma);
+		else
+			addr = i915_gem_object_pin_map(vma->obj,
+						       i915_coherent_map_type(vma->vm->i915));
+		if (IS_ERR(addr)) {
+			ret = PTR_ERR(addr);
+			goto err_ring;
+		}
+	} else {
+		addr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
+		if (IS_ERR(addr)) {
+			ret = PTR_ERR(addr);
+			goto err_ring;
+		}
 	}
 
 	i915_vma_make_unshrinkable(vma);
@@ -91,10 +100,12 @@ void intel_ring_unpin(struct intel_ring *ring)
 		i915_gem_object_unpin_map(vma->obj);
 
 	i915_vma_make_purgeable(vma);
-	i915_vma_unpin(vma);
+	if (!(ring->flags & INTEL_RING_CREATE_INTERNAL))
+		i915_vma_unpin(vma);
 }
 
-static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
+static struct i915_vma *
+create_ring_vma(struct i915_ggtt *ggtt, int size, unsigned int flags)
 {
 	struct i915_address_space *vm = &ggtt->vm;
 	struct drm_i915_private *i915 = vm->i915;
@@ -102,7 +113,8 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
 	struct i915_vma *vma;
 
 	obj = ERR_PTR(-ENODEV);
-	if (i915_ggtt_has_aperture(ggtt))
+	if (!(flags & INTEL_RING_CREATE_INTERNAL) &&
+	    i915_ggtt_has_aperture(ggtt))
 		obj = i915_gem_object_create_stolen(i915, size);
 	if (IS_ERR(obj))
 		obj = i915_gem_object_create_internal(i915, size);
@@ -128,12 +140,14 @@ static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
 }
 
 struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size)
+intel_engine_create_ring(struct intel_engine_cs *engine, unsigned int size)
 {
 	struct drm_i915_private *i915 = engine->i915;
+	unsigned int flags = size & GENMASK(11, 0);
 	struct intel_ring *ring;
 	struct i915_vma *vma;
 
+	size ^= flags;
 	GEM_BUG_ON(!is_power_of_2(size));
 	GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
 
@@ -142,8 +156,10 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 		return ERR_PTR(-ENOMEM);
 
 	kref_init(&ring->ref);
+
 	ring->size = size;
 	ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
+	ring->flags = flags;
 
 	/*
 	 * Workaround an erratum on the i830 which causes a hang if
@@ -156,11 +172,12 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 
 	intel_ring_update_space(ring);
 
-	vma = create_ring_vma(engine->gt->ggtt, size);
+	vma = create_ring_vma(engine->gt->ggtt, size, flags);
 	if (IS_ERR(vma)) {
 		kfree(ring);
 		return ERR_CAST(vma);
 	}
+
 	ring->vma = vma;
 
 	return ring;
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index cc0ebca65167..d022fa209325 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -9,12 +9,14 @@
 
 #include "i915_gem.h" /* GEM_BUG_ON */
 #include "i915_request.h"
+#include "i915_vma.h"
 #include "intel_ring_types.h"
 
 struct intel_engine_cs;
 
 struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine, int size);
+intel_engine_create_ring(struct intel_engine_cs *engine, unsigned int size);
+#define INTEL_RING_CREATE_INTERNAL BIT(0)
 
 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords);
 int intel_ring_cacheline_align(struct i915_request *rq);
@@ -137,4 +139,12 @@ __intel_ring_space(unsigned int head, unsigned int tail, unsigned int size)
 	return (head - tail - CACHELINE_BYTES) & (size - 1);
 }
 
+static inline u32 intel_ring_address(const struct intel_ring *ring)
+{
+	if (ring->flags & INTEL_RING_CREATE_INTERNAL)
+		return -1;
+
+	return i915_ggtt_offset(ring->vma);
+}
+
 #endif /* INTEL_RING_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
index 1a189ea00fd8..d927deafcb33 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -47,6 +47,8 @@ struct intel_ring {
 	u32 size;
 	u32 wrap;
 	u32 effective_size;
+
+	unsigned long flags;
 };
 
 #endif /* INTEL_RING_TYPES_H */
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-07-01  8:41 UTC|newest]

Thread overview: 42+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-01  8:40 [Intel-gfx] [PATCH 01/33] drm/i915/gt: Harden the heartbeat against a stuck driver Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 02/33] drm/i915/gt: Move the heartbeat into the highprio system wq Chris Wilson
2020-07-02  9:33   ` Mika Kuoppala
2020-07-01  8:40 ` [Intel-gfx] [PATCH 03/33] drm/i915/gt: Decouple completed requests on unwind Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 04/33] drm/i915/gt: Check for a completed last request once Chris Wilson
2020-07-02 15:46   ` Mika Kuoppala
2020-07-02 15:53     ` Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 05/33] drm/i915/gt: Replace direct submit with direct call to tasklet Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 06/33] drm/i915/gt: Use virtual_engine during execlists_dequeue Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 07/33] drm/i915/gt: Decouple inflight virtual engines Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 08/33] drm/i915/gt: Defer schedule_out until after the next dequeue Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 09/33] drm/i915/gt: Resubmit the virtual engine on schedule-out Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 10/33] drm/i915/gt: Simplify virtual engine handling for execlists_hold() Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 11/33] drm/i915/gt: ce->inflight updates are now serialised Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 12/33] drm/i915/gt: Drop atomic for engine->fw_active tracking Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 13/33] drm/i915/gt: Extract busy-stats for ring-scheduler Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 14/33] drm/i915/gt: Convert stats.active to plain unsigned int Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 15/33] drm/i915: Lift waiter/signaler iterators Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 16/33] drm/i915: Strip out internal priorities Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 17/33] drm/i915: Remove I915_USER_PRIORITY_SHIFT Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 18/33] drm/i915: Replace engine->schedule() with a known request operation Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 19/33] drm/i915/gt: Do not suspend bonded requests if one hangs Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 20/33] drm/i915: Teach the i915_dependency to use a double-lock Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 21/33] drm/i915: Restructure priority inheritance Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 22/33] drm/i915/gt: Remove timeslice suppression Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 23/33] drm/i915: Fair low-latency scheduling Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 24/33] drm/i915/gt: Specify a deadline for the heartbeat Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 25/33] drm/i915: Replace the priority boosting for the display with a deadline Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 26/33] drm/i915: Move saturated workload detection to the GT Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 27/33] Restore "drm/i915: drop engine_pin/unpin_breadcrumbs_irq" Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 28/33] drm/i915/gt: Couple tasklet scheduling for all CS interrupts Chris Wilson
2020-07-01  8:40 ` Chris Wilson [this message]
2020-07-01  8:40 ` [Intel-gfx] [PATCH 30/33] drm/i915/gt: Use client timeline address for seqno writes Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 31/33] drm/i915/gt: Infrastructure for ring scheduling Chris Wilson
2020-07-01 19:42   ` kernel test robot
2020-07-01  8:40 ` [Intel-gfx] [PATCH 32/33] drm/i915/gt: Implement ring scheduler for gen6/7 Chris Wilson
2020-07-01  8:40 ` [Intel-gfx] [PATCH 33/33] drm/i915/gt: Enable ring scheduling " Chris Wilson
2020-07-01  9:37 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/33] drm/i915/gt: Harden the heartbeat against a stuck driver Patchwork
2020-07-01  9:38 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-01  9:58 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-07-01 14:36 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2020-07-02  9:39 ` [Intel-gfx] [PATCH 01/33] " Mika Kuoppala

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200701084053.6086-29-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).