All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Subject: [PATCH 11/21] drm/i915: Rearrange switch_context to load the aliasing ppgtt on first use
Date: Sun, 24 Apr 2016 08:18:42 +0100	[thread overview]
Message-ID: <1461482332-21975-11-git-send-email-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <1461482332-21975-1-git-send-email-chris@chris-wilson.co.uk>

The code to switch_mm() is already handled by i915_switch_context(), the
only difference required to setup the aliasing ppgtt is that we need to
emit te switch_mm() on the first context, i.e. when transitioning from
engine->last_context == NULL. This allows us to defer the
initialisation of the GPU from early device initialisation to first use,
which should marginally speed up both. The caveat is that we then defer
the context initialisation until first use - i.e. we cannot assume that
the GPU engines are initialised. For example, this means that power
contexts for rc6 (Ironlake) need to explicitly loaded, as they are.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h         |  1 -
 drivers/gpu/drm/i915/i915_gem.c         | 30 -------------
 drivers/gpu/drm/i915/i915_gem_context.c | 77 +++++++++++++--------------------
 drivers/gpu/drm/i915/i915_gem_gtt.c     | 14 ------
 drivers/gpu/drm/i915/i915_gem_gtt.h     |  1 -
 5 files changed, 31 insertions(+), 92 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 7dd79c9a8ce3..7adc7c83a116 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3304,7 +3304,6 @@ void i915_gem_context_lost(struct drm_i915_private *dev_priv);
 void i915_gem_context_fini(struct drm_device *dev);
 void i915_gem_context_reset(struct drm_device *dev);
 int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_gem_request *req);
 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
 int i915_switch_context(struct drm_i915_gem_request *req);
 struct intel_context *
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aafcb4942acf..3b294dcf0add 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4907,36 +4907,6 @@ i915_gem_init_hw(struct drm_device *dev)
 	 * on re-initialisation
 	 */
 	ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
-	if (ret)
-		goto out;
-
-	/* Now it is safe to go back round and do everything else: */
-	for_each_engine(engine, dev_priv) {
-		struct drm_i915_gem_request *req;
-
-		req = i915_gem_request_alloc(engine, NULL);
-		if (IS_ERR(req)) {
-			ret = PTR_ERR(req);
-			break;
-		}
-
-		ret = i915_ppgtt_init_ring(req);
-		if (ret)
-			goto err_request;
-
-		ret = i915_gem_context_enable(req);
-		if (ret)
-			goto err_request;
-
-err_request:
-		i915_add_request_no_flush(req);
-		if (ret) {
-			DRM_ERROR("Failed to enable %s, error=%d\n",
-				  engine->name, ret);
-			i915_gem_cleanup_engines(dev);
-			break;
-		}
-	}
 
 out:
 	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index e5b3d74f8222..4312e2382453 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -431,27 +431,6 @@ void i915_gem_context_fini(struct drm_device *dev)
 	dev_priv->kernel_context = NULL;
 }
 
-int i915_gem_context_enable(struct drm_i915_gem_request *req)
-{
-	struct intel_engine_cs *engine = req->engine;
-	int ret;
-
-	if (i915.enable_execlists) {
-		if (engine->init_context == NULL)
-			return 0;
-
-		ret = engine->init_context(req);
-	} else
-		ret = i915_switch_context(req);
-
-	if (ret) {
-		DRM_ERROR("ring init context: %d\n", ret);
-		return ret;
-	}
-
-	return 0;
-}
-
 static int context_idr_cleanup(int id, void *p, void *data)
 {
 	struct intel_context *ctx = p;
@@ -630,7 +609,8 @@ static int remap_l3(struct drm_i915_gem_request *req, int slice)
 	return 0;
 }
 
-static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
+static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
+				   struct intel_engine_cs *engine,
 				   struct intel_context *to)
 {
 	if (to->remap_slice)
@@ -639,21 +619,27 @@ static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
 	if (!to->legacy_hw_ctx.initialized)
 		return false;
 
-	if (to->ppgtt &&
-	    !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+	if (ppgtt && (intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
 		return false;
 
 	return to == engine->last_context;
 }
 
 static bool
-needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
+needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt,
+		  struct intel_engine_cs *engine,
+		  struct intel_context *to)
 {
-	if (!to->ppgtt)
+	if (!ppgtt)
 		return false;
 
+	/* Always load the ppgtt on first use */
+	if (!engine->last_context)
+		return true;
+
+	/* Same context without new entries, skip */
 	if (engine->last_context == to &&
-	    !(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
+	    !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
 		return false;
 
 	if (engine->id != RCS)
@@ -666,9 +652,11 @@ needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
 }
 
 static bool
-needs_pd_load_post(struct intel_context *to, u32 hw_flags)
+needs_pd_load_post(struct i915_hw_ppgtt *ppgtt,
+		   struct intel_context *to,
+		   u32 hw_flags)
 {
-	if (!to->ppgtt)
+	if (!ppgtt)
 		return false;
 
 	if (!IS_GEN8(to->i915))
@@ -684,11 +672,12 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 {
 	struct intel_context *to = req->ctx;
 	struct intel_engine_cs *engine = req->engine;
+	struct i915_hw_ppgtt *ppgtt = to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
 	struct intel_context *from;
 	u32 hw_flags;
 	int ret, i;
 
-	if (skip_rcs_switch(engine, to))
+	if (skip_rcs_switch(ppgtt, engine, to))
 		return 0;
 
 	/* Trying to pin first makes error handling easier. */
@@ -719,13 +708,13 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 	if (ret)
 		goto unpin_out;
 
-	if (needs_pd_load_pre(engine, to)) {
+	if (needs_pd_load_pre(ppgtt, engine, to)) {
 		/* Older GENs and non render rings still want the load first,
 		 * "PP_DCLV followed by PP_DIR_BASE register through Load
 		 * Register Immediate commands in Ring Buffer before submitting
 		 * a context."*/
 		trace_switch_mm(engine, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, req);
+		ret = ppgtt->switch_mm(ppgtt, req);
 		if (ret)
 			goto unpin_out;
 	}
@@ -736,16 +725,11 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 		 * space. This means we must enforce that a page table load
 		 * occur when this occurs. */
 		hw_flags = MI_RESTORE_INHIBIT;
-	else if (to->ppgtt &&
-		 intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
+	else if (ppgtt && intel_engine_flag(engine) & ppgtt->pd_dirty_rings)
 		hw_flags = MI_FORCE_RESTORE;
 	else
 		hw_flags = 0;
 
-	/* We should never emit switch_mm more than once */
-	WARN_ON(needs_pd_load_pre(engine, to) &&
-		needs_pd_load_post(to, hw_flags));
-
 	if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
 		ret = mi_set_context(req, hw_flags);
 		if (ret)
@@ -780,9 +764,9 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 	/* GEN8 does *not* require an explicit reload if the PDPs have been
 	 * setup, and we do not wish to move them.
 	 */
-	if (needs_pd_load_post(to, hw_flags)) {
+	if (needs_pd_load_post(ppgtt, to, hw_flags)) {
 		trace_switch_mm(engine, to);
-		ret = to->ppgtt->switch_mm(to->ppgtt, req);
+		ret = ppgtt->switch_mm(ppgtt, req);
 		/* The hardware context switch is emitted, but we haven't
 		 * actually changed the state - so it's probably safe to bail
 		 * here. Still, let the user know something dangerous has
@@ -792,8 +776,8 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
 			return ret;
 	}
 
-	if (to->ppgtt)
-		to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+	if (ppgtt)
+		ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
 
 	for (i = 0; i < MAX_L3_SLICES; i++) {
 		if (!(to->remap_slice & (1<<i)))
@@ -846,17 +830,18 @@ int i915_switch_context(struct drm_i915_gem_request *req)
 	if (engine->id != RCS ||
 	    req->ctx->legacy_hw_ctx.rcs_state == NULL) {
 		struct intel_context *to = req->ctx;
+		struct i915_hw_ppgtt *ppgtt =
+			to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
 
-		if (needs_pd_load_pre(engine, to)) {
+		if (needs_pd_load_pre(ppgtt, engine, to)) {
 			int ret;
 
 			trace_switch_mm(engine, to);
-			ret = to->ppgtt->switch_mm(to->ppgtt, req);
+			ret = ppgtt->switch_mm(ppgtt, req);
 			if (ret)
 				return ret;
 
-			/* Doing a PD load always reloads the page dirs */
-			to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
+			ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
 		}
 
 		if (to != engine->last_context) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 942b98f910f8..750569af4b0e 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2186,20 +2186,6 @@ int i915_ppgtt_init_hw(struct drm_device *dev)
 	return 0;
 }
 
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req)
-{
-	struct drm_i915_private *dev_priv = req->i915;
-	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
-	if (i915.enable_execlists)
-		return 0;
-
-	if (!ppgtt)
-		return 0;
-
-	return ppgtt->switch_mm(ppgtt, req);
-}
-
 struct i915_hw_ppgtt *
 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
 {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index b0ae6632c01a..114850368bca 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -522,7 +522,6 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev);
 
 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt);
 int i915_ppgtt_init_hw(struct drm_device *dev);
-int i915_ppgtt_init_ring(struct drm_i915_gem_request *req);
 void i915_ppgtt_release(struct kref *kref);
 struct i915_hw_ppgtt *i915_ppgtt_create(struct drm_device *dev,
 					struct drm_i915_file_private *fpriv);
-- 
2.8.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2016-04-24  7:19 UTC|newest]

Thread overview: 23+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-24  7:18 [PATCH 01/21] drm/i915/fbdev: Call intel_unpin_fb_obj() on release Chris Wilson
2016-04-24  7:18 ` [PATCH 02/21] drm/i915/overlay: Replace i915_gem_obj_ggtt_offset() with the known flip_addr Chris Wilson
2016-04-24  7:18 ` [PATCH 03/21] io-mapping: Specify mapping size for io_mapping_map_wc() Chris Wilson
2016-04-24  7:18   ` Chris Wilson
2016-04-24  7:18   ` Chris Wilson
2016-04-24  7:18 ` [PATCH 04/21] drm/i915: Introduce i915_vm_to_ggtt() Chris Wilson
2016-04-24  7:18 ` [PATCH 05/21] drm/i915: Move ioremap_wc tracking onto VMA Chris Wilson
2016-04-24  7:18 ` [PATCH 06/21] drm/i915: Use i915_vma_pin_iomap on the ringbuffer object Chris Wilson
2016-04-24  7:18 ` [PATCH 07/21] drm/i915: Mark the current context as lost on suspend Chris Wilson
2016-04-24  7:18 ` [PATCH 08/21] drm/i915: L3 cache remapping is part of context switching Chris Wilson
2016-04-24  7:18 ` [PATCH 09/21] drm/i915: Consolidate L3 remapping LRI Chris Wilson
2016-04-24  7:18 ` [PATCH 10/21] drm/i915: Remove early l3-remap Chris Wilson
2016-04-24  7:18 ` Chris Wilson [this message]
2016-04-24  7:18 ` [PATCH 12/21] drm/i915: Assign every HW context a unique ID Chris Wilson
2016-04-24  7:18 ` [PATCH 13/21] drm/i915: Replace the pinned context address with its " Chris Wilson
2016-04-24  7:18 ` [PATCH 14/21] drm/i915: Refactor execlists default context pinning Chris Wilson
2016-04-24  7:18 ` [PATCH 15/21] drm/i915: Move context initialisation to first-use Chris Wilson
2016-04-24  7:18 ` [PATCH 16/21] drm/i915: Move the magical deferred context allocation into the request Chris Wilson
2016-04-24  7:18 ` [PATCH 17/21] drm/i915: Move releasing of the GEM request from free to retire/cancel Chris Wilson
2016-04-24  7:18 ` [PATCH 18/21] drm/i915: Track the previous pinned context inside the request Chris Wilson
2016-04-24  7:18 ` [PATCH 19/21] drm/i915: Store LRC hardware id in " Chris Wilson
2016-04-24  7:18 ` [PATCH 20/21] drm/i915: Stop tracking execlists retired requests Chris Wilson
2016-04-24  7:32 ` [PATCH 01/21] drm/i915/fbdev: Call intel_unpin_fb_obj() on release Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1461482332-21975-11-git-send-email-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=mika.kuoppala@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.