All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 13/41] drm/i915: Reuse the active golden render state batch
Date: Thu, 20 Oct 2016 16:03:55 +0100	[thread overview]
Message-ID: <20161020150423.4560-14-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20161020150423.4560-1-chris@chris-wilson.co.uk>

The golden render state is constant, but we recreate the batch setting
it up for every new context. If we keep that batch in a volatile cache
we can safely reuse it whenever we need to initialise a new context. We
mark the pages as purgeable and use the shrinker to recover pages from
the batch whenever we face memory pressues, recreating that batch afresh
on the next new context.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtien@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_gem_render_state.c | 184 +++++++++++++++++----------
 drivers/gpu/drm/i915/i915_gem_render_state.h |   4 +-
 drivers/gpu/drm/i915/intel_engine_cs.c       |   5 +
 drivers/gpu/drm/i915/intel_lrc.c             |   2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.c      |   2 +-
 drivers/gpu/drm/i915/intel_ringbuffer.h      |   3 +
 6 files changed, 129 insertions(+), 71 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.c b/drivers/gpu/drm/i915/i915_gem_render_state.c
index 217e0b58b930..9625e1a662ed 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.c
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.c
@@ -28,17 +28,19 @@
 #include "i915_drv.h"
 #include "intel_renderstate.h"
 
-struct render_state {
+struct intel_render_state {
 	const struct intel_renderstate_rodata *rodata;
 	struct i915_vma *vma;
-	u32 aux_batch_size;
-	u32 aux_batch_offset;
+	u32 batch_offset;
+	u32 batch_size;
+	u32 aux_offset;
+	u32 aux_size;
 };
 
 static const struct intel_renderstate_rodata *
-render_state_get_rodata(const struct drm_i915_gem_request *req)
+render_state_get_rodata(const struct intel_engine_cs *engine)
 {
-	switch (INTEL_GEN(req->i915)) {
+	switch (INTEL_GEN(engine->i915)) {
 	case 6:
 		return &gen6_null_state;
 	case 7:
@@ -63,29 +65,27 @@ render_state_get_rodata(const struct drm_i915_gem_request *req)
  */
 #define OUT_BATCH(batch, i, val)				\
 	do {							\
-		if (WARN_ON((i) >= PAGE_SIZE / sizeof(u32))) {	\
-			ret = -ENOSPC;				\
-			goto err_out;				\
-		}						\
+		if ((i) >= PAGE_SIZE / sizeof(u32))		\
+			goto err;				\
 		(batch)[(i)++] = (val);				\
 	} while(0)
 
-static int render_state_setup(struct render_state *so)
+static int render_state_setup(struct intel_render_state *so,
+			      struct drm_i915_private *i915)
 {
-	struct drm_i915_private *dev_priv = to_i915(so->vma->vm->dev);
 	const struct intel_renderstate_rodata *rodata = so->rodata;
-	const bool has_64bit_reloc = INTEL_GEN(dev_priv) >= 8;
+	const bool has_64bit_reloc = INTEL_GEN(i915) >= 8;
+	struct drm_i915_gem_object *obj = so->vma->obj;
 	unsigned int i = 0, reloc_index = 0;
-	struct page *page;
+	unsigned int needs_clflush;
 	u32 *d;
 	int ret;
 
-	ret = i915_gem_object_set_to_cpu_domain(so->vma->obj, true);
+	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
 	if (ret)
 		return ret;
 
-	page = i915_gem_object_get_dirty_page(so->vma->obj, 0);
-	d = kmap(page);
+	d = kmap_atomic(i915_gem_object_get_dirty_page(obj, 0));
 
 	while (i < rodata->batch_items) {
 		u32 s = rodata->batch[i];
@@ -95,10 +95,8 @@ static int render_state_setup(struct render_state *so)
 			s = lower_32_bits(r);
 			if (has_64bit_reloc) {
 				if (i + 1 >= rodata->batch_items ||
-				    rodata->batch[i + 1] != 0) {
-					ret = -EINVAL;
-					goto err_out;
-				}
+				    rodata->batch[i + 1] != 0)
+					goto err;
 
 				d[i++] = s;
 				s = upper_32_bits(r);
@@ -110,12 +108,20 @@ static int render_state_setup(struct render_state *so)
 		d[i++] = s;
 	}
 
+	if (rodata->reloc[reloc_index] != -1) {
+		DRM_ERROR("only %d relocs resolved\n", reloc_index);
+		goto err;
+	}
+
+	so->batch_offset = so->vma->node.start;
+	so->batch_size = rodata->batch_items * sizeof(u32);
+
 	while (i % CACHELINE_DWORDS)
 		OUT_BATCH(d, i, MI_NOOP);
 
-	so->aux_batch_offset = i * sizeof(u32);
+	so->aux_offset = i * sizeof(u32);
 
-	if (HAS_POOLED_EU(dev_priv)) {
+	if (HAS_POOLED_EU(i915)) {
 		/*
 		 * We always program 3x6 pool config but depending upon which
 		 * subslice is disabled HW drops down to appropriate config
@@ -143,89 +149,131 @@ static int render_state_setup(struct render_state *so)
 	}
 
 	OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
-	so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
-
+	so->aux_size = i * sizeof(u32) - so->aux_offset;
+	so->aux_offset += so->batch_offset;
 	/*
 	 * Since we are sending length, we need to strictly conform to
 	 * all requirements. For Gen2 this must be a multiple of 8.
 	 */
-	so->aux_batch_size = ALIGN(so->aux_batch_size, 8);
-
-	kunmap(page);
-
-	ret = i915_gem_object_set_to_gtt_domain(so->vma->obj, false);
-	if (ret)
-		return ret;
-
-	if (rodata->reloc[reloc_index] != -1) {
-		DRM_ERROR("only %d relocs resolved\n", reloc_index);
-		return -EINVAL;
-	}
+	so->aux_size = ALIGN(so->aux_size, 8);
 
-	return 0;
+	if (needs_clflush)
+		drm_clflush_virt_range(d, i * sizeof(u32));
+	kunmap_atomic(d);
 
-err_out:
-	kunmap(page);
+	ret = i915_gem_object_set_to_gtt_domain(obj, false);
+out:
+	i915_gem_obj_finish_shmem_access(obj);
 	return ret;
+
+err:
+	kunmap_atomic(d);
+	ret = -EINVAL;
+	goto out;
 }
 
 #undef OUT_BATCH
 
-int i915_gem_render_state_init(struct drm_i915_gem_request *req)
+int i915_gem_render_state_init(struct intel_engine_cs *engine)
 {
-	struct render_state so;
+	struct intel_render_state *so;
+	const struct intel_renderstate_rodata *rodata;
 	struct drm_i915_gem_object *obj;
 	int ret;
 
-	if (WARN_ON(req->engine->id != RCS))
-		return -ENOENT;
+	if (engine->id != RCS)
+		return 0;
 
-	so.rodata = render_state_get_rodata(req);
-	if (!so.rodata)
+	rodata = render_state_get_rodata(engine);
+	if (!rodata)
 		return 0;
 
-	if (so.rodata->batch_items * 4 > 4096)
+	if (rodata->batch_items * 4 > 4096)
 		return -EINVAL;
 
-	obj = i915_gem_object_create_internal(req->i915, 4096);
-	if (IS_ERR(obj))
-		return PTR_ERR(obj);
+	so = kmalloc(sizeof(*so), GFP_KERNEL);
+	if (!so)
+		return -ENOMEM;
 
-	so.vma = i915_vma_create(obj, &req->i915->ggtt.base, NULL);
-	if (IS_ERR(so.vma)) {
-		ret = PTR_ERR(so.vma);
-		goto err_obj;
+	obj = i915_gem_object_create_internal(engine->i915, 4096);
+	if (IS_ERR(obj)) {
+		ret = PTR_ERR(obj);
+		goto err_free;
 	}
 
-	ret = i915_vma_pin(so.vma, 0, 0, PIN_GLOBAL);
-	if (ret)
+	so->vma = i915_vma_create(obj, &engine->i915->ggtt.base, NULL);
+	if (IS_ERR(so->vma)) {
+		ret = PTR_ERR(so->vma);
 		goto err_obj;
+	}
+
+	so->rodata = rodata;
+	engine->render_state = so;
+	return 0;
 
-	ret = render_state_setup(&so);
+err_obj:
+	i915_gem_object_put(obj);
+err_free:
+	kfree(so);
+	return ret;
+}
+
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
+{
+	struct intel_render_state *so;
+	int ret;
+
+	so = req->engine->render_state;
+	if (!so)
+		return 0;
+
+	/* Recreate the page after shrinking */
+	if (!so->vma->obj->pages)
+		so->batch_offset = -1;
+
+	ret = i915_vma_pin(so->vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
 	if (ret)
-		goto err_unpin;
+		return ret;
 
-	ret = req->engine->emit_bb_start(req, so.vma->node.start,
-					 so.rodata->batch_items * 4,
+	if (so->vma->node.start != so->batch_offset) {
+		ret = render_state_setup(so, req->i915);
+		if (ret)
+			goto err_unpin;
+	}
+
+	ret = req->engine->emit_bb_start(req,
+					 so->batch_offset, so->batch_size,
 					 I915_DISPATCH_SECURE);
 	if (ret)
 		goto err_unpin;
 
-	if (so.aux_batch_size > 8) {
+	if (so->aux_size > 8) {
 		ret = req->engine->emit_bb_start(req,
-						 (so.vma->node.start +
-						  so.aux_batch_offset),
-						 so.aux_batch_size,
+						 so->aux_offset, so->aux_size,
 						 I915_DISPATCH_SECURE);
 		if (ret)
 			goto err_unpin;
 	}
 
-	i915_vma_move_to_active(so.vma, req, 0);
+	i915_vma_move_to_active(so->vma, req, 0);
 err_unpin:
-	i915_vma_unpin(so.vma);
-	i915_vma_close(so.vma);
-err_obj:
-	__i915_gem_object_release_unless_active(obj);
+	i915_vma_unpin(so->vma);
 	return ret;
 }
+
+void i915_gem_render_state_fini(struct intel_engine_cs *engine)
+{
+	struct intel_render_state *so;
+	struct drm_i915_gem_object *obj;
+
+	so = fetch_and_zero(&engine->render_state);
+	if (!so)
+		return;
+
+	obj = so->vma->obj;
+
+	i915_vma_close(so->vma);
+	__i915_gem_object_release_unless_active(obj);
+
+	kfree(so);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_render_state.h b/drivers/gpu/drm/i915/i915_gem_render_state.h
index 18cce3f06e9c..87481845799d 100644
--- a/drivers/gpu/drm/i915/i915_gem_render_state.h
+++ b/drivers/gpu/drm/i915/i915_gem_render_state.h
@@ -26,6 +26,8 @@
 
 struct drm_i915_gem_request;
 
-int i915_gem_render_state_init(struct drm_i915_gem_request *req);
+int i915_gem_render_state_init(struct intel_engine_cs *engine);
+int i915_gem_render_state_emit(struct drm_i915_gem_request *req);
+void i915_gem_render_state_fini(struct intel_engine_cs *engine);
 
 #endif /* _I915_GEM_RENDER_STATE_H_ */
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index c7ac7e26f2dc..a2104fa920c8 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -314,6 +314,10 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
+	ret = i915_gem_render_state_init(engine);
+	if (ret)
+		return ret;
+
 	return 0;
 }
 
@@ -328,6 +332,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 {
 	intel_engine_cleanup_scratch(engine);
 
+	i915_gem_render_state_fini(engine);
 	intel_engine_fini_breadcrumbs(engine);
 	intel_engine_cleanup_cmd_parser(engine);
 	i915_gem_batch_pool_fini(&engine->batch_pool);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index bc86585b9fbb..1c1bd30e8b2d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1637,7 +1637,7 @@ static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
 	if (ret)
 		DRM_ERROR("MOCS failed to program: expect performance issues.\n");
 
-	return i915_gem_render_state_init(req);
+	return i915_gem_render_state_emit(req);
 }
 
 /**
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2ce28d41f405..32ba02d9a6d8 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -648,7 +648,7 @@ static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
 	if (ret != 0)
 		return ret;
 
-	ret = i915_gem_render_state_init(req);
+	ret = i915_gem_render_state_emit(req);
 	if (ret)
 		return ret;
 
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 09bb89cfb7c3..cb6e96c6cd47 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -157,6 +157,7 @@ struct i915_ctx_workarounds {
 };
 
 struct drm_i915_gem_request;
+struct intel_render_state;
 
 struct intel_engine_cs {
 	struct drm_i915_private *i915;
@@ -184,6 +185,8 @@ struct intel_engine_cs {
 	unsigned int irq_shift;
 	struct intel_ring *buffer;
 
+	struct intel_render_state *render_state;
+
 	/* Rather than have every client wait upon all user interrupts,
 	 * with the herd waking after every interrupt and each doing the
 	 * heavyweight seqno dance, we delegate the task (of being the
-- 
2.9.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2016-10-20 15:04 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-10-20 15:03 Multiple timelines, multiple times Chris Wilson
2016-10-20 15:03 ` [PATCH 01/41] drm/i915: Move user fault tracking to a separate list Chris Wilson
2016-10-20 15:03 ` [PATCH 02/41] drm/i915: Use RPM as the barrier for controlling user mmap access Chris Wilson
2016-10-20 15:03 ` [PATCH 03/41] drm/i915: Remove superfluous locking around userfault_list Chris Wilson
2016-10-20 15:03 ` [PATCH 04/41] drm/i915: Remove RPM sequence checking Chris Wilson
2016-10-21 10:19   ` Imre Deak
2016-10-20 15:03 ` [PATCH 05/41] drm/i915: Move fence cancellation to runtime suspend Chris Wilson
2016-10-21 12:48   ` Imre Deak
2016-10-21 13:52     ` Chris Wilson
2016-10-21 14:05     ` [PATCH v2] " Chris Wilson
2016-10-24  9:55       ` Imre Deak
2016-10-20 15:03 ` [PATCH 06/41] drm/i915: Support asynchronous waits on struct fence from i915_gem_request Chris Wilson
2016-10-20 15:03 ` [PATCH 07/41] drm/i915: Allow i915_sw_fence_await_sw_fence() to allocate Chris Wilson
2016-10-20 15:03 ` [PATCH 08/41] drm/i915: Remove superfluous wait_for_error() from throttle-ioctl Chris Wilson
2016-10-20 15:41   ` Joonas Lahtinen
2016-10-20 15:03 ` [PATCH 09/41] drm/i915: Rearrange i915_wait_request() accounting with callers Chris Wilson
2016-10-20 15:03 ` [PATCH 10/41] drm/i915: Remove unused i915_gem_active_wait() in favour of _unlocked() Chris Wilson
2016-10-20 15:03 ` [PATCH 11/41] drm/i915: Defer active reference until required Chris Wilson
2016-10-20 15:03 ` [PATCH 12/41] drm/i915: Introduce an internal allocator for disposable private objects Chris Wilson
2016-10-20 16:22   ` Tvrtko Ursulin
2016-10-20 20:36     ` Chris Wilson
2016-10-21  7:21       ` Tvrtko Ursulin
2016-10-21  7:50         ` Chris Wilson
2016-10-21  7:53           ` Tvrtko Ursulin
2016-10-21  7:56   ` [PATCH v4] " Chris Wilson
2016-10-21  8:07     ` Tvrtko Ursulin
2016-10-20 15:03 ` Chris Wilson [this message]
2016-10-20 15:03 ` [PATCH 14/41] drm/i915: Markup GEM API with lockdep asserts Chris Wilson
2016-10-20 15:03 ` [PATCH 15/41] drm/i915: Use a radixtree for random access to the object's backing storage Chris Wilson
2016-10-20 15:55   ` Tvrtko Ursulin
2016-10-20 15:03 ` [PATCH 16/41] drm/i915: Use radixtree to jump start intel_partial_pages() Chris Wilson
2016-10-20 15:03 ` [PATCH 17/41] drm/i915: Refactor object page API Chris Wilson
2016-10-20 15:04 ` [PATCH 18/41] drm/i915: Pass around sg_table to get_pages/put_pages backend Chris Wilson
2016-10-20 15:04 ` [PATCH 19/41] drm/i915: Move object backing storage manipulation to its own locking Chris Wilson
2016-10-20 15:04 ` [PATCH 20/41] drm/i915/dmabuf: Acquire the backing storage outside of struct_mutex Chris Wilson
2016-10-20 15:04 ` [PATCH 21/41] drm/i915: Implement pread without struct-mutex Chris Wilson
2016-10-20 15:04 ` [PATCH 22/41] drm/i915: Implement pwrite " Chris Wilson
2016-10-20 15:04 ` [PATCH 23/41] drm/i915: Acquire the backing storage outside of struct_mutex in set-domain Chris Wilson
2016-10-20 15:04 ` [PATCH 24/41] drm/i915: Move object release to a freelist + worker Chris Wilson
2016-10-20 15:04 ` [PATCH 25/41] drm/i915: Use lockless object free Chris Wilson
2016-10-20 15:04 ` [PATCH 26/41] drm/i915: Move GEM activity tracking into a common struct reservation_object Chris Wilson
2016-10-20 15:04 ` [PATCH 27/41] drm/i915: Restore nonblocking awaits for modesetting Chris Wilson
2016-10-20 15:04 ` [PATCH 28/41] drm/i915: Combine seqno + tracking into a global timeline struct Chris Wilson
2016-10-20 15:04 ` [PATCH 29/41] drm/i915: Queue the idling context switch after all other timelines Chris Wilson
2016-10-20 15:04 ` [PATCH 30/41] drm/i915: Wait first for submission, before waiting for request completion Chris Wilson
2016-10-20 15:04 ` [PATCH 31/41] drm/i915: Introduce a global_seqno for each request Chris Wilson
2016-10-20 15:04 ` [PATCH 32/41] drm/i915: Rename ->emit_request to ->emit_breadcrumb Chris Wilson
2016-10-20 15:04 ` [PATCH 33/41] drm/i915: Record space required for breadcrumb emission Chris Wilson
2016-10-20 15:04 ` [PATCH 34/41] drm/i915: Defer " Chris Wilson
2016-10-20 15:04 ` [PATCH 35/41] drm/i915: Move the global sync optimisation to the timeline Chris Wilson
2016-10-20 15:04 ` [PATCH 36/41] drm/i915: Create a unique name for the context Chris Wilson
2016-10-20 15:04 ` [PATCH 37/41] drm/i915: Reserve space in the global seqno during request allocation Chris Wilson
2016-10-20 15:04 ` [PATCH 38/41] drm/i915: Defer setting of global seqno on request to submission Chris Wilson
2016-10-20 15:04 ` [PATCH 39/41] drm/i915: Enable multiple timelines Chris Wilson
2016-10-20 15:04 ` [PATCH 40/41] drm/i915: Enable userspace to opt-out of implicit fencing Chris Wilson
2016-10-20 15:04 ` [PATCH 41/41] drm/i915: Support explicit fencing for execbuf Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161020150423.4560-14-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.