All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH 14/32] drm/i915: Explicitly pin the logical context for execbuf
Date: Wed, 17 Apr 2019 08:56:39 +0100	[thread overview]
Message-ID: <20190417075657.19456-14-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20190417075657.19456-1-chris@chris-wilson.co.uk>

In order to separate the reservation phase of building a request from
its emission phase, we need to pull some of the request alloc activities
from deep inside i915_request to the surface, GEM_EXECBUFFER.

v2: Be frivolous, use a local drm_i915_private..

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/i915_gem_execbuffer.c | 108 +++++++++++++--------
 drivers/gpu/drm/i915/i915_request.c        |   9 --
 2 files changed, 69 insertions(+), 48 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 3d672c9edb94..794af8edc6a2 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -34,6 +34,8 @@
 #include <drm/drm_syncobj.h>
 #include <drm/i915_drm.h>
 
+#include "gt/intel_gt_pm.h"
+
 #include "i915_drv.h"
 #include "i915_gem_clflush.h"
 #include "i915_trace.h"
@@ -236,7 +238,8 @@ struct i915_execbuffer {
 	unsigned int *flags;
 
 	struct intel_engine_cs *engine; /** engine to queue the request to */
-	struct i915_gem_context *ctx; /** context for building the request */
+	struct intel_context *context; /* logical state for the request */
+	struct i915_gem_context *gem_context; /** caller's context */
 	struct i915_address_space *vm; /** GTT and vma for the request */
 
 	struct i915_request *request; /** our request to build */
@@ -738,7 +741,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
 	if (unlikely(!ctx))
 		return -ENOENT;
 
-	eb->ctx = ctx;
+	eb->gem_context = ctx;
 	if (ctx->ppgtt) {
 		eb->vm = &ctx->ppgtt->vm;
 		eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
@@ -784,7 +787,6 @@ static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
 
 static int eb_wait_for_ring(const struct i915_execbuffer *eb)
 {
-	const struct intel_context *ce;
 	struct i915_request *rq;
 	int ret = 0;
 
@@ -794,11 +796,7 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
 	 * keeping all of their resources pinned.
 	 */
 
-	ce = intel_context_lookup(eb->ctx, eb->engine);
-	if (!ce || !ce->ring) /* first use, assume empty! */
-		return 0;
-
-	rq = __eb_wait_for_ring(ce->ring);
+	rq = __eb_wait_for_ring(eb->context->ring);
 	if (rq) {
 		mutex_unlock(&eb->i915->drm.struct_mutex);
 
@@ -817,15 +815,15 @@ static int eb_wait_for_ring(const struct i915_execbuffer *eb)
 
 static int eb_lookup_vmas(struct i915_execbuffer *eb)
 {
-	struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
+	struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
 	struct drm_i915_gem_object *obj;
 	unsigned int i, batch;
 	int err;
 
-	if (unlikely(i915_gem_context_is_closed(eb->ctx)))
+	if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
 		return -ENOENT;
 
-	if (unlikely(i915_gem_context_is_banned(eb->ctx)))
+	if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
 		return -EIO;
 
 	INIT_LIST_HEAD(&eb->relocs);
@@ -870,8 +868,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
 		if (!vma->open_count++)
 			i915_vma_reopen(vma);
 		list_add(&lut->obj_link, &obj->lut_list);
-		list_add(&lut->ctx_link, &eb->ctx->handles_list);
-		lut->ctx = eb->ctx;
+		list_add(&lut->ctx_link, &eb->gem_context->handles_list);
+		lut->ctx = eb->gem_context;
 		lut->handle = handle;
 
 add_vma:
@@ -1227,7 +1225,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	if (err)
 		goto err_unmap;
 
-	rq = i915_request_alloc(eb->engine, eb->ctx);
+	rq = i915_request_create(eb->context);
 	if (IS_ERR(rq)) {
 		err = PTR_ERR(rq);
 		goto err_unpin;
@@ -2088,31 +2086,65 @@ static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
 	[I915_EXEC_VEBOX]	= VECS0
 };
 
-static struct intel_engine_cs *
-eb_select_engine(struct drm_i915_private *dev_priv,
+static int eb_pin_context(struct i915_execbuffer *eb,
+			  struct intel_engine_cs *engine)
+{
+	struct intel_context *ce;
+	int err;
+
+	/*
+	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
+	 * EIO if the GPU is already wedged.
+	 */
+	err = i915_terminally_wedged(eb->i915);
+	if (err)
+		return err;
+
+	/*
+	 * Pinning the contexts may generate requests in order to acquire
+	 * GGTT space, so do this first before we reserve a seqno for
+	 * ourselves.
+	 */
+	ce = intel_context_pin(eb->gem_context, engine);
+	if (IS_ERR(ce))
+		return PTR_ERR(ce);
+
+	eb->engine = engine;
+	eb->context = ce;
+	return 0;
+}
+
+static void eb_unpin_context(struct i915_execbuffer *eb)
+{
+	intel_context_unpin(eb->context);
+}
+
+static int
+eb_select_engine(struct i915_execbuffer *eb,
 		 struct drm_file *file,
 		 struct drm_i915_gem_execbuffer2 *args)
 {
+	struct drm_i915_private *i915 = eb->i915;
 	unsigned int user_ring_id = args->flags & I915_EXEC_RING_MASK;
 	struct intel_engine_cs *engine;
 
 	if (user_ring_id > I915_USER_RINGS) {
 		DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
-		return NULL;
+		return -EINVAL;
 	}
 
 	if ((user_ring_id != I915_EXEC_BSD) &&
 	    ((args->flags & I915_EXEC_BSD_MASK) != 0)) {
 		DRM_DEBUG("execbuf with non bsd ring but with invalid "
 			  "bsd dispatch flags: %d\n", (int)(args->flags));
-		return NULL;
+		return -EINVAL;
 	}
 
-	if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
+	if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(i915, VCS1)) {
 		unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
 
 		if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
-			bsd_idx = gen8_dispatch_bsd_engine(dev_priv, file);
+			bsd_idx = gen8_dispatch_bsd_engine(i915, file);
 		} else if (bsd_idx >= I915_EXEC_BSD_RING1 &&
 			   bsd_idx <= I915_EXEC_BSD_RING2) {
 			bsd_idx >>= I915_EXEC_BSD_SHIFT;
@@ -2120,20 +2152,20 @@ eb_select_engine(struct drm_i915_private *dev_priv,
 		} else {
 			DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
 				  bsd_idx);
-			return NULL;
+			return -EINVAL;
 		}
 
-		engine = dev_priv->engine[_VCS(bsd_idx)];
+		engine = i915->engine[_VCS(bsd_idx)];
 	} else {
-		engine = dev_priv->engine[user_ring_map[user_ring_id]];
+		engine = i915->engine[user_ring_map[user_ring_id]];
 	}
 
 	if (!engine) {
 		DRM_DEBUG("execbuf with invalid ring: %u\n", user_ring_id);
-		return NULL;
+		return -EINVAL;
 	}
 
-	return engine;
+	return eb_pin_context(eb, engine);
 }
 
 static void
@@ -2275,7 +2307,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	struct i915_execbuffer eb;
 	struct dma_fence *in_fence = NULL;
 	struct sync_file *out_fence = NULL;
-	intel_wakeref_t wakeref;
 	int out_fence_fd = -1;
 	int err;
 
@@ -2335,12 +2366,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	if (unlikely(err))
 		goto err_destroy;
 
-	eb.engine = eb_select_engine(eb.i915, file, args);
-	if (!eb.engine) {
-		err = -EINVAL;
-		goto err_engine;
-	}
-
 	/*
 	 * Take a local wakeref for preparing to dispatch the execbuf as
 	 * we expect to access the hardware fairly frequently in the
@@ -2348,16 +2373,20 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	 * wakeref that we hold until the GPU has been idle for at least
 	 * 100ms.
 	 */
-	wakeref = intel_runtime_pm_get(eb.i915);
+	intel_gt_pm_get(eb.i915);
 
 	err = i915_mutex_lock_interruptible(dev);
 	if (err)
 		goto err_rpm;
 
-	err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+	err = eb_select_engine(&eb, file, args);
 	if (unlikely(err))
 		goto err_unlock;
 
+	err = eb_wait_for_ring(&eb); /* may temporarily drop struct_mutex */
+	if (unlikely(err))
+		goto err_engine;
+
 	err = eb_relocate(&eb);
 	if (err) {
 		/*
@@ -2441,7 +2470,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	GEM_BUG_ON(eb.reloc_cache.rq);
 
 	/* Allocate a request for this batch buffer nice and early. */
-	eb.request = i915_request_alloc(eb.engine, eb.ctx);
+	eb.request = i915_request_create(eb.context);
 	if (IS_ERR(eb.request)) {
 		err = PTR_ERR(eb.request);
 		goto err_batch_unpin;
@@ -2479,8 +2508,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 	trace_i915_request_queue(eb.request, eb.batch_flags);
 	err = eb_submit(&eb);
 err_request:
-	i915_request_add(eb.request);
 	add_to_client(eb.request, file);
+	i915_request_add(eb.request);
 
 	if (fences)
 		signal_fence_array(&eb, fences);
@@ -2502,12 +2531,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
 err_vma:
 	if (eb.exec)
 		eb_release_vmas(&eb);
+err_engine:
+	eb_unpin_context(&eb);
 err_unlock:
 	mutex_unlock(&dev->struct_mutex);
 err_rpm:
-	intel_runtime_pm_put(eb.i915, wakeref);
-err_engine:
-	i915_gem_context_put(eb.ctx);
+	intel_gt_pm_put(eb.i915);
+	i915_gem_context_put(eb.gem_context);
 err_destroy:
 	eb_destroy(&eb);
 err_out_fence:
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index d116b5e69826..818d6dc6b8c8 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -786,7 +786,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	struct drm_i915_private *i915 = engine->i915;
 	struct intel_context *ce;
 	struct i915_request *rq;
-	int ret;
 
 	/*
 	 * Preempt contexts are reserved for exclusive use to inject a
@@ -795,14 +794,6 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
 	 */
 	GEM_BUG_ON(ctx == i915->preempt_context);
 
-	/*
-	 * ABI: Before userspace accesses the GPU (e.g. execbuffer), report
-	 * EIO if the GPU is already wedged.
-	 */
-	ret = i915_terminally_wedged(i915);
-	if (ret)
-		return ERR_PTR(ret);
-
 	/*
 	 * Pinning the contexts may generate requests in order to acquire
 	 * GGTT space, so do this first before we reserve a seqno for
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-04-17  7:57 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-17  7:56 [PATCH 01/32] drm/i915: Seal races between async GPU cancellation, retirement and signaling Chris Wilson
2019-04-17  7:56 ` [PATCH 02/32] drm/i915: Verify workarounds immediately after application Chris Wilson
2019-04-17  7:56 ` [PATCH 03/32] drm/i915: Verify the engine workarounds stick on application Chris Wilson
2019-04-17  7:56 ` [PATCH 04/32] drm/i915: Make workaround verification *optional* Chris Wilson
2019-04-17  9:37   ` Tvrtko Ursulin
2019-04-17  7:56 ` [PATCH 05/32] drm/i915/selftests: Verify whitelist of context registers Chris Wilson
2019-04-17  7:56 ` [PATCH 06/32] drm/i915: Store the default sseu setup on the engine Chris Wilson
2019-04-17  9:40   ` Tvrtko Ursulin
2019-04-24  9:45     ` Chris Wilson
2019-04-17  7:56 ` [PATCH 07/32] drm/i915: Move GraphicsTechnology files under gt/ Chris Wilson
2019-04-17  9:42   ` Tvrtko Ursulin
2019-04-18 12:04   ` Joonas Lahtinen
2019-04-23  8:57     ` Joonas Lahtinen
2019-04-23  9:40       ` Jani Nikula
2019-04-23 16:46         ` Rodrigo Vivi
2019-04-17  7:56 ` [PATCH 08/32] drm/i915: Introduce struct intel_wakeref Chris Wilson
2019-04-17  9:45   ` Tvrtko Ursulin
2019-04-17  7:56 ` [PATCH 09/32] drm/i915: Pull the GEM powermangement coupling into its own file Chris Wilson
2019-04-17  7:56 ` [PATCH 10/32] drm/i915: Introduce context->enter() and context->exit() Chris Wilson
2019-04-17  7:56 ` [PATCH 11/32] drm/i915: Pass intel_context to i915_request_create() Chris Wilson
2019-04-17  7:56 ` [PATCH 12/32] drm/i915: Invert the GEM wakeref hierarchy Chris Wilson
2019-04-18 12:42   ` Tvrtko Ursulin
2019-04-18 13:07     ` Chris Wilson
2019-04-18 13:22       ` Chris Wilson
2019-04-23 13:02   ` Tvrtko Ursulin
2019-04-17  7:56 ` [PATCH 13/32] drm/i915/gvt: Pin the per-engine GVT shadow contexts Chris Wilson
2019-04-17  7:56 ` Chris Wilson [this message]
2019-04-17  7:56 ` [PATCH 15/32] drm/i915: Export intel_context_instance() Chris Wilson
2019-04-17  7:56 ` [PATCH 16/32] drm/i915/selftests: Use the real kernel context for sseu isolation tests Chris Wilson
2019-04-17  7:56 ` [PATCH 17/32] drm/i915/selftests: Pass around intel_context for sseu Chris Wilson
2019-04-17  7:56 ` [PATCH 18/32] drm/i915: Pass intel_context to intel_context_pin_lock() Chris Wilson
2019-04-17  7:56 ` [PATCH 19/32] drm/i915: Split engine setup/init into two phases Chris Wilson
2019-04-17  7:56 ` [PATCH 20/32] drm/i915: Switch back to an array of logical per-engine HW contexts Chris Wilson
2019-04-17  7:56 ` [PATCH 21/32] drm/i915: Remove intel_context.active_link Chris Wilson
2019-04-17  9:47   ` Tvrtko Ursulin
2019-04-17  7:56 ` [PATCH 22/32] drm/i915: Move i915_request_alloc into selftests/ Chris Wilson
2019-04-17  7:56 ` [PATCH 23/32] drm/i915: Allow multiple user handles to the same VM Chris Wilson
2019-04-17  7:56 ` [PATCH 24/32] drm/i915: Restore control over ppgtt for context creation ABI Chris Wilson
2019-04-17  7:56 ` [PATCH 25/32] drm/i915: Allow a context to define its set of engines Chris Wilson
2019-04-17  9:50   ` Tvrtko Ursulin
2019-04-17  7:56 ` [PATCH 26/32] drm/i915: Re-expose SINGLE_TIMELINE flags for context creation Chris Wilson
2019-04-17  7:56 ` [PATCH 27/32] drm/i915: Allow userspace to clone contexts on creation Chris Wilson
2019-04-17  9:50   ` Tvrtko Ursulin
2019-04-17  7:56 ` [PATCH 28/32] drm/i915: Load balancing across a virtual engine Chris Wilson
2019-04-17 11:26   ` Tvrtko Ursulin
2019-04-17 13:51     ` Chris Wilson
2019-04-17  7:56 ` [PATCH 29/32] drm/i915: Apply an execution_mask to the virtual_engine Chris Wilson
2019-04-17 11:43   ` Tvrtko Ursulin
2019-04-17 11:57     ` Chris Wilson
2019-04-17 12:35       ` Tvrtko Ursulin
2019-04-17 12:46         ` Chris Wilson
2019-04-17 13:32           ` Tvrtko Ursulin
2019-04-18  7:24             ` Chris Wilson
2019-04-17  7:56 ` [PATCH 30/32] drm/i915: Extend execution fence to support a callback Chris Wilson
2019-04-17  7:56 ` [PATCH 31/32] drm/i915/execlists: Virtual engine bonding Chris Wilson
2019-04-18  6:47   ` Tvrtko Ursulin
2019-04-18  6:57     ` Chris Wilson
2019-04-18  8:57       ` Tvrtko Ursulin
2019-04-18  9:13         ` Chris Wilson
2019-04-18  9:50           ` Tvrtko Ursulin
2019-04-18  9:59             ` Chris Wilson
2019-04-18 10:24               ` Tvrtko Ursulin
2019-04-17  7:56 ` [PATCH 32/32] drm/i915: Allow specification of parallel execbuf Chris Wilson
2019-04-17  8:46 ` [PATCH 01/32] drm/i915: Seal races between async GPU cancellation, retirement and signaling Chris Wilson
2019-04-17 11:33 ` ✗ Fi.CI.BAT: failure for series starting with [01/32] " Patchwork
2019-04-18 10:32 ` [PATCH 01/32] " Tvrtko Ursulin
2019-04-18 10:40   ` Chris Wilson
2019-04-23 12:59 ` Tvrtko Ursulin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190417075657.19456-14-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.