* [PATCH] drm/i915: Use engine->context_pin() to report the intel_ring
@ 2017-05-03 12:46 Chris Wilson
2017-05-03 13:04 ` ✓ Fi.CI.BAT: success for " Patchwork
2017-05-03 17:04 ` [PATCH] " Oscar Mateo
0 siblings, 2 replies; 3+ messages in thread
From: Chris Wilson @ 2017-05-03 12:46 UTC (permalink / raw)
To: intel-gfx
Since unifying ringbuffer/execlist submission to use
engine->pin_context, we ensure that the intel_ring is available before
we start constructing the request. We can therefore move the assignment
of the request->ring to the central i915_gem_request_alloc() and not
require it in every engine->request_alloc() callback. Another small step
towards simplification (of the core, but at a cost of handling error
pointers in less important callers of engine->pin_context).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Oscar Mateo <oscar.mateo@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
drivers/gpu/drm/i915/gvt/scheduler.c | 6 ++++--
drivers/gpu/drm/i915/i915_gem_request.c | 9 ++++++---
drivers/gpu/drm/i915/i915_perf.c | 7 +++++--
drivers/gpu/drm/i915/intel_engine_cs.c | 7 ++++---
drivers/gpu/drm/i915/intel_lrc.c | 14 ++++++--------
drivers/gpu/drm/i915/intel_ringbuffer.c | 16 ++++++++--------
drivers/gpu/drm/i915/intel_ringbuffer.h | 4 ++--
drivers/gpu/drm/i915/selftests/mock_engine.c | 8 ++++----
8 files changed, 39 insertions(+), 32 deletions(-)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1256fe21850b..6ae286cb5804 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -180,6 +180,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
+ struct intel_ring *ring;
int ret;
gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
@@ -198,8 +199,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
* shadow_ctx pages invalid. So gvt need to pin itself. After update
* the guest context, gvt can unpin the shadow_ctx safely.
*/
- ret = engine->context_pin(engine, shadow_ctx);
- if (ret) {
+ ring = engine->context_pin(engine, shadow_ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
gvt_vgpu_err("fail to pin shadow context\n");
workload->status = ret;
mutex_unlock(&dev_priv->drm.struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 8d2a5c8e5005..a988de74c398 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -551,6 +551,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
{
struct drm_i915_private *dev_priv = engine->i915;
struct drm_i915_gem_request *req;
+ struct intel_ring *ring;
int ret;
lockdep_assert_held(&dev_priv->drm.struct_mutex);
@@ -565,9 +566,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
* GGTT space, so do this first before we reserve a seqno for
* ourselves.
*/
- ret = engine->context_pin(engine, ctx);
- if (ret)
- return ERR_PTR(ret);
+ ring = engine->context_pin(engine, ctx);
+ if (IS_ERR(ring))
+ return ERR_CAST(ring);
+ GEM_BUG_ON(!ring);
ret = reserve_seqno(engine);
if (ret)
@@ -633,6 +635,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
req->i915 = dev_priv;
req->engine = engine;
req->ctx = ctx;
+ req->ring = ring;
/* No zalloc, must clear what we need by hand */
req->global_seqno = 0;
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 060b171480d5..21e44942f561 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -744,6 +744,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
{
struct drm_i915_private *dev_priv = stream->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[RCS];
+ struct intel_ring *ring;
int ret;
ret = i915_mutex_lock_interruptible(&dev_priv->drm);
@@ -755,9 +756,11 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
*
* NB: implied RCS engine...
*/
- ret = engine->context_pin(engine, stream->ctx);
- if (ret)
+ ring = engine->context_pin(engine, stream->ctx);
+ if (IS_ERR(ring)) {
+ ret = PTR_ERR(ring);
goto unlock;
+ }
/* Explicitly track the ID (instead of calling i915_ggtt_offset()
* on the fly) considering the difference with gen8+ and
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index 6d3d83876da9..483ed7635692 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -469,6 +469,7 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
*/
int intel_engine_init_common(struct intel_engine_cs *engine)
{
+ struct intel_ring *ring;
int ret;
engine->set_default_submission(engine);
@@ -480,9 +481,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
* be available. To avoid this we always pin the default
* context.
*/
- ret = engine->context_pin(engine, engine->i915->kernel_context);
- if (ret)
- return ret;
+ ring = engine->context_pin(engine, engine->i915->kernel_context);
+ if (IS_ERR(ring))
+ return PTR_ERR(ring);
ret = intel_engine_init_breadcrumbs(engine);
if (ret)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 0909549ad320..ab5bb22dc23d 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -740,8 +740,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
/* XXX Do we need to preempt to make room for us and our deps? */
}
-static int execlists_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_ring *
+execlists_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
unsigned int flags;
@@ -751,7 +752,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
if (ce->pin_count++)
- return 0;
+ return ce->ring;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
if (!ce->state) {
@@ -788,7 +789,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
ce->state->obj->mm.dirty = true;
i915_gem_context_get(ctx);
- return 0;
+ return ce->ring;
unpin_map:
i915_gem_object_unpin_map(ce->state->obj);
@@ -796,7 +797,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
__i915_vma_unpin(ce->state);
err:
ce->pin_count = 0;
- return ret;
+ return ERR_PTR(ret);
}
static void execlists_context_unpin(struct intel_engine_cs *engine,
@@ -833,9 +834,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += EXECLISTS_REQUEST_SIZE;
- GEM_BUG_ON(!ce->ring);
- request->ring = ce->ring;
-
if (i915.enable_guc_submission) {
/*
* Check that the GuC has space for the request before
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 00d60605cd89..3cafa85d62eb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1484,8 +1484,9 @@ alloc_context_vma(struct intel_engine_cs *engine)
return vma;
}
-static int intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_ring *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
int ret;
@@ -1493,7 +1494,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
if (ce->pin_count++)
- return 0;
+ return engine->buffer;
GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
if (!ce->state && engine->context_size) {
@@ -1527,11 +1528,13 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
ce->initialised = true;
i915_gem_context_get(ctx);
- return 0;
+
+ /* One ringbuffer to rule them all */
+ return engine->buffer;
error:
ce->pin_count = 0;
- return ret;
+ return ERR_PTR(ret);
}
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
@@ -1643,9 +1646,6 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- GEM_BUG_ON(!request->engine->buffer);
- request->ring = request->engine->buffer;
-
cs = intel_ring_begin(request, 0);
if (IS_ERR(cs))
return PTR_ERR(cs);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3aed97bf0bb6..ec16fb6fde62 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -260,8 +260,8 @@ struct intel_engine_cs {
void (*set_default_submission)(struct intel_engine_cs *engine);
- int (*context_pin)(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx);
+ struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx);
void (*context_unpin)(struct intel_engine_cs *engine,
struct i915_gem_context *ctx);
int (*request_alloc)(struct drm_i915_gem_request *req);
diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
index b8e53bdc3cc4..5b18a2dc19a8 100644
--- a/drivers/gpu/drm/i915/selftests/mock_engine.c
+++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
@@ -52,11 +52,12 @@ static void hw_delay_complete(unsigned long data)
spin_unlock(&engine->hw_lock);
}
-static int mock_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_ring *
+mock_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
i915_gem_context_get(ctx);
- return 0;
+ return engine->buffer;
}
static void mock_context_unpin(struct intel_engine_cs *engine,
@@ -72,7 +73,6 @@ static int mock_request_alloc(struct drm_i915_gem_request *request)
INIT_LIST_HEAD(&mock->link);
mock->delay = 0;
- request->ring = request->engine->buffer;
return 0;
}
--
2.11.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 3+ messages in thread
* ✓ Fi.CI.BAT: success for drm/i915: Use engine->context_pin() to report the intel_ring
2017-05-03 12:46 [PATCH] drm/i915: Use engine->context_pin() to report the intel_ring Chris Wilson
@ 2017-05-03 13:04 ` Patchwork
2017-05-03 17:04 ` [PATCH] " Oscar Mateo
1 sibling, 0 replies; 3+ messages in thread
From: Patchwork @ 2017-05-03 13:04 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/i915: Use engine->context_pin() to report the intel_ring
URL : https://patchwork.freedesktop.org/series/23884/
State : success
== Summary ==
Series 23884v1 drm/i915: Use engine->context_pin() to report the intel_ring
https://patchwork.freedesktop.org/api/1.0/series/23884/revisions/1/mbox/
Test gem_exec_suspend:
Subgroup basic-s4-devices:
pass -> DMESG-WARN (fi-kbl-7560u) fdo#100125
fdo#100125 https://bugs.freedesktop.org/show_bug.cgi?id=100125
fi-bdw-5557u total:278 pass:267 dwarn:0 dfail:0 fail:0 skip:11 time:430s
fi-bdw-gvtdvm total:278 pass:256 dwarn:8 dfail:0 fail:0 skip:14 time:426s
fi-bsw-n3050 total:278 pass:242 dwarn:0 dfail:0 fail:0 skip:36 time:582s
fi-bxt-j4205 total:278 pass:259 dwarn:0 dfail:0 fail:0 skip:19 time:508s
fi-bxt-t5700 total:278 pass:258 dwarn:0 dfail:0 fail:0 skip:20 time:552s
fi-byt-j1900 total:278 pass:254 dwarn:0 dfail:0 fail:0 skip:24 time:489s
fi-byt-n2820 total:278 pass:250 dwarn:0 dfail:0 fail:0 skip:28 time:486s
fi-hsw-4770 total:278 pass:262 dwarn:0 dfail:0 fail:0 skip:16 time:411s
fi-hsw-4770r total:278 pass:262 dwarn:0 dfail:0 fail:0 skip:16 time:407s
fi-ilk-650 total:278 pass:228 dwarn:0 dfail:0 fail:0 skip:50 time:417s
fi-ivb-3520m total:278 pass:260 dwarn:0 dfail:0 fail:0 skip:18 time:482s
fi-ivb-3770 total:278 pass:260 dwarn:0 dfail:0 fail:0 skip:18 time:468s
fi-kbl-7500u total:278 pass:260 dwarn:0 dfail:0 fail:0 skip:18 time:462s
fi-kbl-7560u total:278 pass:267 dwarn:1 dfail:0 fail:0 skip:10 time:569s
fi-skl-6260u total:278 pass:268 dwarn:0 dfail:0 fail:0 skip:10 time:462s
fi-skl-6700hq total:278 pass:261 dwarn:0 dfail:0 fail:0 skip:17 time:572s
fi-skl-6700k total:278 pass:256 dwarn:4 dfail:0 fail:0 skip:18 time:466s
fi-skl-6770hq total:278 pass:268 dwarn:0 dfail:0 fail:0 skip:10 time:498s
fi-skl-gvtdvm total:278 pass:265 dwarn:0 dfail:0 fail:0 skip:13 time:430s
fi-snb-2520m total:278 pass:250 dwarn:0 dfail:0 fail:0 skip:28 time:529s
fi-snb-2600 total:278 pass:249 dwarn:0 dfail:0 fail:0 skip:29 time:404s
26ef9256ab34901a953bd5338a3c94388c745c99 drm-tip: 2017y-05m-03d-10h-11m-12s UTC integration manifest
acf8b75 drm/i915: Use engine->context_pin() to report the intel_ring
== Logs ==
For more details see: https://intel-gfx-ci.01.org/CI/Patchwork_4611/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] drm/i915: Use engine->context_pin() to report the intel_ring
2017-05-03 12:46 [PATCH] drm/i915: Use engine->context_pin() to report the intel_ring Chris Wilson
2017-05-03 13:04 ` ✓ Fi.CI.BAT: success for " Patchwork
@ 2017-05-03 17:04 ` Oscar Mateo
1 sibling, 0 replies; 3+ messages in thread
From: Oscar Mateo @ 2017-05-03 17:04 UTC (permalink / raw)
To: Chris Wilson, intel-gfx
On 05/03/2017 12:46 PM, Chris Wilson wrote:
> Since unifying ringbuffer/execlist submission to use
> engine->pin_context, we ensure that the intel_ring is available before
> we start constructing the request. We can therefore move the assignment
> of the request->ring to the central i915_gem_request_alloc() and not
> require it in every engine->request_alloc() callback. Another small step
> towards simplification (of the core, but at a cost of handling error
> pointers in less important callers of engine->pin_context).
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Oscar Mateo <oscar.mateo@intel.com>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> ---
> drivers/gpu/drm/i915/gvt/scheduler.c | 6 ++++--
> drivers/gpu/drm/i915/i915_gem_request.c | 9 ++++++---
> drivers/gpu/drm/i915/i915_perf.c | 7 +++++--
> drivers/gpu/drm/i915/intel_engine_cs.c | 7 ++++---
> drivers/gpu/drm/i915/intel_lrc.c | 14 ++++++--------
> drivers/gpu/drm/i915/intel_ringbuffer.c | 16 ++++++++--------
> drivers/gpu/drm/i915/intel_ringbuffer.h | 4 ++--
> drivers/gpu/drm/i915/selftests/mock_engine.c | 8 ++++----
> 8 files changed, 39 insertions(+), 32 deletions(-)
Much better than my first impulse to reuse ce->ring in legacy ringbuffer
mode
Reviewed-by: Oscar Mateo <oscar.mateo@intel.com>
> diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
> index 1256fe21850b..6ae286cb5804 100644
> --- a/drivers/gpu/drm/i915/gvt/scheduler.c
> +++ b/drivers/gpu/drm/i915/gvt/scheduler.c
> @@ -180,6 +180,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
> struct intel_engine_cs *engine = dev_priv->engine[ring_id];
> struct drm_i915_gem_request *rq;
> struct intel_vgpu *vgpu = workload->vgpu;
> + struct intel_ring *ring;
> int ret;
>
> gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
> @@ -198,8 +199,9 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
> * shadow_ctx pages invalid. So gvt need to pin itself. After update
> * the guest context, gvt can unpin the shadow_ctx safely.
> */
> - ret = engine->context_pin(engine, shadow_ctx);
> - if (ret) {
> + ring = engine->context_pin(engine, shadow_ctx);
> + if (IS_ERR(ring)) {
> + ret = PTR_ERR(ring);
> gvt_vgpu_err("fail to pin shadow context\n");
> workload->status = ret;
> mutex_unlock(&dev_priv->drm.struct_mutex);
> diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
> index 8d2a5c8e5005..a988de74c398 100644
> --- a/drivers/gpu/drm/i915/i915_gem_request.c
> +++ b/drivers/gpu/drm/i915/i915_gem_request.c
> @@ -551,6 +551,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
> {
> struct drm_i915_private *dev_priv = engine->i915;
> struct drm_i915_gem_request *req;
> + struct intel_ring *ring;
> int ret;
>
> lockdep_assert_held(&dev_priv->drm.struct_mutex);
> @@ -565,9 +566,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
> * GGTT space, so do this first before we reserve a seqno for
> * ourselves.
> */
> - ret = engine->context_pin(engine, ctx);
> - if (ret)
> - return ERR_PTR(ret);
> + ring = engine->context_pin(engine, ctx);
> + if (IS_ERR(ring))
> + return ERR_CAST(ring);
> + GEM_BUG_ON(!ring);
>
> ret = reserve_seqno(engine);
> if (ret)
> @@ -633,6 +635,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
> req->i915 = dev_priv;
> req->engine = engine;
> req->ctx = ctx;
> + req->ring = ring;
>
> /* No zalloc, must clear what we need by hand */
> req->global_seqno = 0;
> diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
> index 060b171480d5..21e44942f561 100644
> --- a/drivers/gpu/drm/i915/i915_perf.c
> +++ b/drivers/gpu/drm/i915/i915_perf.c
> @@ -744,6 +744,7 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
> {
> struct drm_i915_private *dev_priv = stream->dev_priv;
> struct intel_engine_cs *engine = dev_priv->engine[RCS];
> + struct intel_ring *ring;
> int ret;
>
> ret = i915_mutex_lock_interruptible(&dev_priv->drm);
> @@ -755,9 +756,11 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
> *
> * NB: implied RCS engine...
> */
> - ret = engine->context_pin(engine, stream->ctx);
> - if (ret)
> + ring = engine->context_pin(engine, stream->ctx);
> + if (IS_ERR(ring)) {
> + ret = PTR_ERR(ring);
> goto unlock;
> + }
>
> /* Explicitly track the ID (instead of calling i915_ggtt_offset()
> * on the fly) considering the difference with gen8+ and
> diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
> index 6d3d83876da9..483ed7635692 100644
> --- a/drivers/gpu/drm/i915/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/intel_engine_cs.c
> @@ -469,6 +469,7 @@ static void intel_engine_cleanup_scratch(struct intel_engine_cs *engine)
> */
> int intel_engine_init_common(struct intel_engine_cs *engine)
> {
> + struct intel_ring *ring;
> int ret;
>
> engine->set_default_submission(engine);
> @@ -480,9 +481,9 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
> * be available. To avoid this we always pin the default
> * context.
> */
> - ret = engine->context_pin(engine, engine->i915->kernel_context);
> - if (ret)
> - return ret;
> + ring = engine->context_pin(engine, engine->i915->kernel_context);
> + if (IS_ERR(ring))
> + return PTR_ERR(ring);
>
> ret = intel_engine_init_breadcrumbs(engine);
> if (ret)
> diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
> index 0909549ad320..ab5bb22dc23d 100644
> --- a/drivers/gpu/drm/i915/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/intel_lrc.c
> @@ -740,8 +740,9 @@ static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
> /* XXX Do we need to preempt to make room for us and our deps? */
> }
>
> -static int execlists_context_pin(struct intel_engine_cs *engine,
> - struct i915_gem_context *ctx)
> +static struct intel_ring *
> +execlists_context_pin(struct intel_engine_cs *engine,
> + struct i915_gem_context *ctx)
> {
> struct intel_context *ce = &ctx->engine[engine->id];
> unsigned int flags;
> @@ -751,7 +752,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
> lockdep_assert_held(&ctx->i915->drm.struct_mutex);
>
> if (ce->pin_count++)
> - return 0;
> + return ce->ring;
> GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
>
> if (!ce->state) {
> @@ -788,7 +789,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
> ce->state->obj->mm.dirty = true;
>
> i915_gem_context_get(ctx);
> - return 0;
> + return ce->ring;
>
> unpin_map:
> i915_gem_object_unpin_map(ce->state->obj);
> @@ -796,7 +797,7 @@ static int execlists_context_pin(struct intel_engine_cs *engine,
> __i915_vma_unpin(ce->state);
> err:
> ce->pin_count = 0;
> - return ret;
> + return ERR_PTR(ret);
> }
>
> static void execlists_context_unpin(struct intel_engine_cs *engine,
> @@ -833,9 +834,6 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
> */
> request->reserved_space += EXECLISTS_REQUEST_SIZE;
>
> - GEM_BUG_ON(!ce->ring);
> - request->ring = ce->ring;
> -
> if (i915.enable_guc_submission) {
> /*
> * Check that the GuC has space for the request before
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
> index 00d60605cd89..3cafa85d62eb 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.c
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
> @@ -1484,8 +1484,9 @@ alloc_context_vma(struct intel_engine_cs *engine)
> return vma;
> }
>
> -static int intel_ring_context_pin(struct intel_engine_cs *engine,
> - struct i915_gem_context *ctx)
> +static struct intel_ring *
> +intel_ring_context_pin(struct intel_engine_cs *engine,
> + struct i915_gem_context *ctx)
> {
> struct intel_context *ce = &ctx->engine[engine->id];
> int ret;
> @@ -1493,7 +1494,7 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
> lockdep_assert_held(&ctx->i915->drm.struct_mutex);
>
> if (ce->pin_count++)
> - return 0;
> + return engine->buffer;
> GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
>
> if (!ce->state && engine->context_size) {
> @@ -1527,11 +1528,13 @@ static int intel_ring_context_pin(struct intel_engine_cs *engine,
> ce->initialised = true;
>
> i915_gem_context_get(ctx);
> - return 0;
> +
> + /* One ringbuffer to rule them all */
> + return engine->buffer;
>
> error:
> ce->pin_count = 0;
> - return ret;
> + return ERR_PTR(ret);
> }
>
> static void intel_ring_context_unpin(struct intel_engine_cs *engine,
> @@ -1643,9 +1646,6 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
> */
> request->reserved_space += LEGACY_REQUEST_SIZE;
>
> - GEM_BUG_ON(!request->engine->buffer);
> - request->ring = request->engine->buffer;
> -
> cs = intel_ring_begin(request, 0);
> if (IS_ERR(cs))
> return PTR_ERR(cs);
> diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
> index 3aed97bf0bb6..ec16fb6fde62 100644
> --- a/drivers/gpu/drm/i915/intel_ringbuffer.h
> +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
> @@ -260,8 +260,8 @@ struct intel_engine_cs {
>
> void (*set_default_submission)(struct intel_engine_cs *engine);
>
> - int (*context_pin)(struct intel_engine_cs *engine,
> - struct i915_gem_context *ctx);
> + struct intel_ring *(*context_pin)(struct intel_engine_cs *engine,
> + struct i915_gem_context *ctx);
> void (*context_unpin)(struct intel_engine_cs *engine,
> struct i915_gem_context *ctx);
> int (*request_alloc)(struct drm_i915_gem_request *req);
> diff --git a/drivers/gpu/drm/i915/selftests/mock_engine.c b/drivers/gpu/drm/i915/selftests/mock_engine.c
> index b8e53bdc3cc4..5b18a2dc19a8 100644
> --- a/drivers/gpu/drm/i915/selftests/mock_engine.c
> +++ b/drivers/gpu/drm/i915/selftests/mock_engine.c
> @@ -52,11 +52,12 @@ static void hw_delay_complete(unsigned long data)
> spin_unlock(&engine->hw_lock);
> }
>
> -static int mock_context_pin(struct intel_engine_cs *engine,
> - struct i915_gem_context *ctx)
> +static struct intel_ring *
> +mock_context_pin(struct intel_engine_cs *engine,
> + struct i915_gem_context *ctx)
> {
> i915_gem_context_get(ctx);
> - return 0;
> + return engine->buffer;
> }
>
> static void mock_context_unpin(struct intel_engine_cs *engine,
> @@ -72,7 +73,6 @@ static int mock_request_alloc(struct drm_i915_gem_request *request)
> INIT_LIST_HEAD(&mock->link);
> mock->delay = 0;
>
> - request->ring = request->engine->buffer;
> return 0;
> }
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2017-05-04 0:04 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-03 12:46 [PATCH] drm/i915: Use engine->context_pin() to report the intel_ring Chris Wilson
2017-05-03 13:04 ` ✓ Fi.CI.BAT: success for " Patchwork
2017-05-03 17:04 ` [PATCH] " Oscar Mateo
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.