All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] drm/i915: Make request's wait-for-space explicit
@ 2017-11-10 22:50 Chris Wilson
  2017-11-10 22:50 ` [PATCH 2/3] drm/i915: Automatic i915_switch_context for legacy Chris Wilson
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Chris Wilson @ 2017-11-10 22:50 UTC (permalink / raw)
  To: intel-gfx

At the start of building a request, we would wait for roughly enough
space to fit the average request (to reduce the likelihood of having to
wait and abort partway through request construction). To achieve we
would try to begin a 0-length command packet, this just adds extra
confusion so make the wait-for-space explicit, as in the next patch we
want to move it from the backend to the i915_gem_request_alloc() so it
can ensure that the wait-for-space is the first operation in building a
new request.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/intel_lrc.c        |  8 ++---
 drivers/gpu/drm/i915/intel_ringbuffer.c | 56 +++++++++++++++++++++------------
 drivers/gpu/drm/i915/intel_ringbuffer.h |  1 +
 3 files changed, 41 insertions(+), 24 deletions(-)

diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 58d050a9a866..ebd9596fe83b 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1180,7 +1180,7 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 {
 	struct intel_engine_cs *engine = request->engine;
 	struct intel_context *ce = &request->ctx->engine[engine->id];
-	u32 *cs;
+	int ret;
 
 	GEM_BUG_ON(!ce->pin_count);
 
@@ -1190,9 +1190,9 @@ static int execlists_request_alloc(struct drm_i915_gem_request *request)
 	 */
 	request->reserved_space += EXECLISTS_REQUEST_SIZE;
 
-	cs = intel_ring_begin(request, 0);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
+	ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
+	if (ret)
+		return ret;
 
 	/* Note that after this point, we have committed to using
 	 * this request as it is being used to both track the
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 3321b801e77d..12e734b29463 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1578,7 +1578,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
 
 static int ring_request_alloc(struct drm_i915_gem_request *request)
 {
-	u32 *cs;
+	int ret;
 
 	GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
 
@@ -1588,37 +1588,24 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 	 */
 	request->reserved_space += LEGACY_REQUEST_SIZE;
 
-	cs = intel_ring_begin(request, 0);
-	if (IS_ERR(cs))
-		return PTR_ERR(cs);
+	ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
+	if (ret)
+		return ret;
 
 	request->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
 }
 
-static noinline int wait_for_space(struct drm_i915_gem_request *req,
-				   unsigned int bytes)
+static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
 {
-	struct intel_ring *ring = req->ring;
 	struct drm_i915_gem_request *target;
 	long timeout;
 
-	lockdep_assert_held(&req->i915->drm.struct_mutex);
+	lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
 
 	if (intel_ring_update_space(ring) >= bytes)
 		return 0;
 
-	/*
-	 * Space is reserved in the ringbuffer for finalising the request,
-	 * as that cannot be allowed to fail. During request finalisation,
-	 * reserved_space is set to 0 to stop the overallocation and the
-	 * assumption is that then we never need to wait (which has the
-	 * risk of failing with EINTR).
-	 *
-	 * See also i915_gem_request_alloc() and i915_add_request().
-	 */
-	GEM_BUG_ON(!req->reserved_space);
-
 	list_for_each_entry(target, &ring->request_list, ring_link) {
 		/* Would completion of this request free enough space? */
 		if (bytes <= __intel_ring_space(target->postfix,
@@ -1642,6 +1629,22 @@ static noinline int wait_for_space(struct drm_i915_gem_request *req,
 	return 0;
 }
 
+int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes)
+{
+	GEM_BUG_ON(bytes > ring->effective_size);
+	if (unlikely(bytes > ring->effective_size - ring->emit))
+		bytes += ring->size - ring->emit;
+
+	if (unlikely(bytes > ring->space)) {
+		int ret = wait_for_space(ring, bytes);
+		if (unlikely(ret))
+			return ret;
+	}
+
+	GEM_BUG_ON(ring->space < bytes);
+	return 0;
+}
+
 u32 *intel_ring_begin(struct drm_i915_gem_request *req,
 		      unsigned int num_dwords)
 {
@@ -1681,7 +1684,20 @@ u32 *intel_ring_begin(struct drm_i915_gem_request *req,
 	}
 
 	if (unlikely(total_bytes > ring->space)) {
-		int ret = wait_for_space(req, total_bytes);
+		int ret;
+
+		/*
+		 * Space is reserved in the ringbuffer for finalising the
+		 * request, as that cannot be allowed to fail. During request
+		 * finalisation, reserved_space is set to 0 to stop the
+		 * overallocation and the assumption is that then we never need
+		 * to wait (which has the risk of failing with EINTR).
+		 *
+		 * See also i915_gem_request_alloc() and i915_add_request().
+		 */
+		GEM_BUG_ON(!req->reserved_space);
+
+		ret = wait_for_space(ring, total_bytes);
 		if (unlikely(ret))
 			return ERR_PTR(ret);
 	}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 2b0ae5468f48..0531d939fe1a 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -661,6 +661,7 @@ void intel_legacy_submission_resume(struct drm_i915_private *dev_priv);
 
 int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req);
 
+int intel_ring_wait_for_space(struct intel_ring *ring, unsigned int bytes);
 u32 __must_check *intel_ring_begin(struct drm_i915_gem_request *req,
 				   unsigned int n);
 
-- 
2.15.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/3] drm/i915: Automatic i915_switch_context for legacy
  2017-11-10 22:50 [PATCH 1/3] drm/i915: Make request's wait-for-space explicit Chris Wilson
@ 2017-11-10 22:50 ` Chris Wilson
  2017-11-10 22:50 ` [PATCH 3/3] drm/i915: Remove redundant intel_autoenable_gt_powersave() Chris Wilson
  2017-11-10 23:35 ` ✗ Fi.CI.BAT: failure for series starting with [1/3] drm/i915: Make request's wait-for-space explicit Patchwork
  2 siblings, 0 replies; 4+ messages in thread
From: Chris Wilson @ 2017-11-10 22:50 UTC (permalink / raw)
  To: intel-gfx

During request construction, after pinning the context we know whether
or not we have to emit a context switch. So move this common operation
from every caller into i915_gem_request_alloc() itself.

v2: Always submit the request if we emitted some commands during request
construction, as typically it also involves changes in global state.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/i915_gem.c                   |  2 +-
 drivers/gpu/drm/i915/i915_gem_context.c           |  7 +------
 drivers/gpu/drm/i915/i915_gem_execbuffer.c        |  8 --------
 drivers/gpu/drm/i915/i915_gem_request.c           | 18 +++++++++++++-----
 drivers/gpu/drm/i915/intel_ringbuffer.c           |  4 ++++
 drivers/gpu/drm/i915/selftests/i915_gem_context.c |  4 ----
 drivers/gpu/drm/i915/selftests/i915_gem_request.c | 10 ----------
 drivers/gpu/drm/i915/selftests/intel_hangcheck.c  |  4 ----
 8 files changed, 19 insertions(+), 38 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index a2e5a54128c1..bf4995f93357 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -5001,7 +5001,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 			goto out_ctx;
 		}
 
-		err = i915_switch_context(rq);
+		err = 0;
 		if (engine->init_context)
 			err = engine->init_context(rq);
 
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 2db040695035..c1efbaf02bf2 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -842,8 +842,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
 	struct intel_engine_cs *engine = req->engine;
 
 	lockdep_assert_held(&req->i915->drm.struct_mutex);
-	if (i915_modparams.enable_execlists)
-		return 0;
+	GEM_BUG_ON(i915_modparams.enable_execlists);
 
 	if (!req->ctx->engine[engine->id].state) {
 		struct i915_gem_context *to = req->ctx;
@@ -899,7 +898,6 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 
 	for_each_engine(engine, dev_priv, id) {
 		struct drm_i915_gem_request *req;
-		int ret;
 
 		if (engine_has_idle_kernel_context(engine))
 			continue;
@@ -922,10 +920,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
 								 GFP_KERNEL);
 		}
 
-		ret = i915_switch_context(req);
 		i915_add_request(req);
-		if (ret)
-			return ret;
 	}
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 435ed95df144..85c7e8afe26e 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -1115,10 +1115,6 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
 	if (err)
 		goto err_request;
 
-	err = i915_switch_context(rq);
-	if (err)
-		goto err_request;
-
 	err = eb->engine->emit_bb_start(rq,
 					batch->node.start, PAGE_SIZE,
 					cache->gen > 5 ? 0 : I915_DISPATCH_SECURE);
@@ -1965,10 +1961,6 @@ static int eb_submit(struct i915_execbuffer *eb)
 	if (err)
 		return err;
 
-	err = i915_switch_context(eb->request);
-	if (err)
-		return err;
-
 	if (eb->args->flags & I915_EXEC_GEN7_SOL_RESET) {
 		err = i915_reset_gen7_sol_offsets(eb->request);
 		if (err)
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index e0d6221022a8..445495f9893c 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -624,6 +624,10 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	if (ret)
 		goto err_unpin;
 
+	ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST);
+	if (ret)
+		goto err_unreserve;
+
 	/* Move the oldest request to the slab-cache (if not in use!) */
 	req = list_first_entry_or_null(&engine->timeline->requests,
 				       typeof(*req), link);
@@ -703,10 +707,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
 	GEM_BUG_ON(req->reserved_space < engine->emit_breadcrumb_sz);
 
-	ret = engine->request_alloc(req);
-	if (ret)
-		goto err_ctx;
-
 	/* Record the position of the start of the request so that
 	 * should we detect the updated seqno part-way through the
 	 * GPU processing the request, we never over-estimate the
@@ -714,16 +714,24 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
 	 */
 	req->head = req->ring->emit;
 
+	ret = engine->request_alloc(req);
+	if (ret)
+		goto err_ctx;
+
 	/* Check that we didn't interrupt ourselves with a new request */
 	GEM_BUG_ON(req->timeline->seqno != req->fence.seqno);
 	return req;
 
 err_ctx:
+	if (req->ring->emit != req->head) {
+		__i915_add_request(req, false);
+		return ERR_PTR(ret);
+	}
+
 	/* Make sure we didn't add ourselves to external state before freeing */
 	GEM_BUG_ON(!list_empty(&req->active_list));
 	GEM_BUG_ON(!list_empty(&req->priotree.signalers_list));
 	GEM_BUG_ON(!list_empty(&req->priotree.waiters_list));
-
 	kmem_cache_free(dev_priv->requests, req);
 err_unreserve:
 	unreserve_engine(engine);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 12e734b29463..be98868115bf 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1592,6 +1592,10 @@ static int ring_request_alloc(struct drm_i915_gem_request *request)
 	if (ret)
 		return ret;
 
+	ret = i915_switch_context(request);
+	if (ret)
+		return ret;
+
 	request->reserved_space -= LEGACY_REQUEST_SIZE;
 	return 0;
 }
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
index def5052862ae..61fcfa2c4dfd 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_context.c
@@ -162,10 +162,6 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
 	if (err)
 		goto err_request;
 
-	err = i915_switch_context(rq);
-	if (err)
-		goto err_request;
-
 	flags = 0;
 	if (INTEL_GEN(vm->i915) <= 5)
 		flags |= I915_DISPATCH_SECURE;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_request.c b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
index a999161e8db1..9a35ebd5c876 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem_request.c
@@ -463,10 +463,6 @@ empty_request(struct intel_engine_cs *engine,
 	if (err)
 		goto out_request;
 
-	err = i915_switch_context(request);
-	if (err)
-		goto out_request;
-
 	err = engine->emit_bb_start(request,
 				    batch->node.start,
 				    batch->node.size,
@@ -678,9 +674,6 @@ static int live_all_engines(void *arg)
 		err = engine->emit_flush(request[id], EMIT_INVALIDATE);
 		GEM_BUG_ON(err);
 
-		err = i915_switch_context(request[id]);
-		GEM_BUG_ON(err);
-
 		err = engine->emit_bb_start(request[id],
 					    batch->node.start,
 					    batch->node.size,
@@ -800,9 +793,6 @@ static int live_sequential_engines(void *arg)
 		err = engine->emit_flush(request[id], EMIT_INVALIDATE);
 		GEM_BUG_ON(err);
 
-		err = i915_switch_context(request[id]);
-		GEM_BUG_ON(err);
-
 		err = engine->emit_bb_start(request[id],
 					    batch->node.start,
 					    batch->node.size,
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 71ce06680d66..cafe39e2e0f7 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -118,10 +118,6 @@ static int emit_recurse_batch(struct hang *h,
 	if (err)
 		goto unpin_hws;
 
-	err = i915_switch_context(rq);
-	if (err)
-		goto unpin_hws;
-
 	i915_vma_move_to_active(vma, rq, 0);
 	if (!i915_gem_object_has_active_reference(vma->obj)) {
 		i915_gem_object_get(vma->obj);
-- 
2.15.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/3] drm/i915: Remove redundant intel_autoenable_gt_powersave()
  2017-11-10 22:50 [PATCH 1/3] drm/i915: Make request's wait-for-space explicit Chris Wilson
  2017-11-10 22:50 ` [PATCH 2/3] drm/i915: Automatic i915_switch_context for legacy Chris Wilson
@ 2017-11-10 22:50 ` Chris Wilson
  2017-11-10 23:35 ` ✗ Fi.CI.BAT: failure for series starting with [1/3] drm/i915: Make request's wait-for-space explicit Patchwork
  2 siblings, 0 replies; 4+ messages in thread
From: Chris Wilson @ 2017-11-10 22:50 UTC (permalink / raw)
  To: intel-gfx

Now that we always execute a context switch upon module load, there is
no need to queue a delayed task for doing so. The purpose of the delayed
task is to enable GT powersaving, for which we need the HW state to be
valid (i.e. having loaded a context and initialised basic state). We
used to defer this operation as historically it was slow (due to slow
register polling, fixed with commit 1758b90e38f5 ("drm/i915: Use a hybrid
scheme for fast register waits")) but now we have a requirement to save
the default HW state.

v2: Load the kernel context (to provide the power context) upon resume.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/i915_drv.c  | 11 +------
 drivers/gpu/drm/i915/i915_drv.h  |  1 -
 drivers/gpu/drm/i915/i915_gem.c  | 31 ++++++++++++++-----
 drivers/gpu/drm/i915/intel_drv.h |  1 -
 drivers/gpu/drm/i915/intel_pm.c  | 66 ----------------------------------------
 5 files changed, 24 insertions(+), 86 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index d97fe9c9439a..8b8a3b46a564 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1684,8 +1684,6 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	intel_csr_ucode_resume(dev_priv);
 
-	i915_gem_resume(dev_priv);
-
 	i915_restore_state(dev_priv);
 	intel_pps_unlock_regs_wa(dev_priv);
 	intel_opregion_setup(dev_priv);
@@ -1706,12 +1704,7 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	drm_mode_config_reset(dev);
 
-	mutex_lock(&dev->struct_mutex);
-	if (i915_gem_init_hw(dev_priv)) {
-		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
-		i915_gem_set_wedged(dev_priv);
-	}
-	mutex_unlock(&dev->struct_mutex);
+	i915_gem_resume(dev_priv);
 
 	intel_guc_resume(dev_priv);
 
@@ -1746,8 +1739,6 @@ static int i915_drm_resume(struct drm_device *dev)
 
 	intel_opregion_notify_adapter(dev_priv, PCI_D0);
 
-	intel_autoenable_gt_powersave(dev_priv);
-
 	enable_rpm_wakeref_asserts(dev_priv);
 
 	return 0;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 40012b6daea2..07b9e78bbe96 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1392,7 +1392,6 @@ struct intel_gen6_power_mgmt {
 	struct intel_rps rps;
 	struct intel_rc6 rc6;
 	struct intel_llc_pstate llc_pstate;
-	struct delayed_work autoenable_work;
 };
 
 /* defined intel_pm.c */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index bf4995f93357..80963c81c2ca 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4806,23 +4806,38 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
 	return ret;
 }
 
-void i915_gem_resume(struct drm_i915_private *dev_priv)
+void i915_gem_resume(struct drm_i915_private *i915)
 {
-	struct drm_device *dev = &dev_priv->drm;
+	WARN_ON(i915->gt.awake);
 
-	WARN_ON(dev_priv->gt.awake);
+	mutex_lock(&i915->drm.struct_mutex);
+	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
 
-	mutex_lock(&dev->struct_mutex);
-	i915_gem_restore_gtt_mappings(dev_priv);
-	i915_gem_restore_fences(dev_priv);
+	i915_gem_restore_gtt_mappings(i915);
+	i915_gem_restore_fences(i915);
 
 	/* As we didn't flush the kernel context before suspend, we cannot
 	 * guarantee that the context image is complete. So let's just reset
 	 * it and start again.
 	 */
-	dev_priv->gt.resume(dev_priv);
+	i915->gt.resume(i915);
 
-	mutex_unlock(&dev->struct_mutex);
+	if (i915_gem_init_hw(i915))
+		goto err_wedged;
+
+	/* Always reload a context for powersaving. */
+	if (i915_gem_switch_to_kernel_context(i915))
+		goto err_wedged;
+
+out_unlock:
+	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+	mutex_unlock(&i915->drm.struct_mutex);
+	return;
+
+err_wedged:
+	DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+	i915_gem_set_wedged(i915);
+	goto out_unlock;
 }
 
 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a0a5cead868c..10bec8796239 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1880,7 +1880,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_enable_gt_powersave(struct drm_i915_private *dev_priv);
-void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_disable_gt_powersave(struct drm_i915_private *dev_priv);
 void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv);
 void gen6_rps_busy(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 48a127ac6de5..c1a56809f143 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -7959,8 +7959,6 @@ void intel_init_gt_powersave(struct drm_i915_private *dev_priv)
 	rps->boost_freq = rps->max_freq;
 
 	mutex_unlock(&dev_priv->pcu_lock);
-
-	intel_autoenable_gt_powersave(dev_priv);
 }
 
 void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv)
@@ -7985,9 +7983,6 @@ void intel_suspend_gt_powersave(struct drm_i915_private *dev_priv)
 	if (INTEL_GEN(dev_priv) < 6)
 		return;
 
-	if (cancel_delayed_work_sync(&dev_priv->gt_pm.autoenable_work))
-		intel_runtime_pm_put(dev_priv);
-
 	/* gen6_rps_idle() will be called later to disable interrupts */
 }
 
@@ -8146,65 +8141,6 @@ void intel_enable_gt_powersave(struct drm_i915_private *dev_priv)
 	mutex_unlock(&dev_priv->pcu_lock);
 }
 
-static void __intel_autoenable_gt_powersave(struct work_struct *work)
-{
-	struct drm_i915_private *dev_priv =
-		container_of(work,
-			     typeof(*dev_priv),
-			     gt_pm.autoenable_work.work);
-	struct intel_engine_cs *rcs;
-	struct drm_i915_gem_request *req;
-
-	rcs = dev_priv->engine[RCS];
-	if (rcs->last_retired_context)
-		goto out;
-
-	if (!rcs->init_context)
-		goto out;
-
-	mutex_lock(&dev_priv->drm.struct_mutex);
-
-	req = i915_gem_request_alloc(rcs, dev_priv->kernel_context);
-	if (IS_ERR(req))
-		goto unlock;
-
-	if (!i915_modparams.enable_execlists && i915_switch_context(req) == 0)
-		rcs->init_context(req);
-
-	/* Mark the device busy, calling intel_enable_gt_powersave() */
-	i915_add_request(req);
-
-unlock:
-	mutex_unlock(&dev_priv->drm.struct_mutex);
-out:
-	intel_runtime_pm_put(dev_priv);
-}
-
-void intel_autoenable_gt_powersave(struct drm_i915_private *dev_priv)
-{
-	if (IS_IRONLAKE_M(dev_priv)) {
-		ironlake_enable_drps(dev_priv);
-		intel_init_emon(dev_priv);
-	} else if (INTEL_INFO(dev_priv)->gen >= 6) {
-		/*
-		 * PCU communication is slow and this doesn't need to be
-		 * done at any specific time, so do this out of our fast path
-		 * to make resume and init faster.
-		 *
-		 * We depend on the HW RC6 power context save/restore
-		 * mechanism when entering D3 through runtime PM suspend. So
-		 * disable RPM until RPS/RC6 is properly setup. We can only
-		 * get here via the driver load/system resume/runtime resume
-		 * paths, so the _noresume version is enough (and in case of
-		 * runtime resume it's necessary).
-		 */
-		if (queue_delayed_work(dev_priv->wq,
-				       &dev_priv->gt_pm.autoenable_work,
-				       round_jiffies_up_relative(HZ)))
-			intel_runtime_pm_get_noresume(dev_priv);
-	}
-}
-
 static void ibx_init_clock_gating(struct drm_i915_private *dev_priv)
 {
 	/*
@@ -9435,8 +9371,6 @@ void intel_pm_setup(struct drm_i915_private *dev_priv)
 {
 	mutex_init(&dev_priv->pcu_lock);
 
-	INIT_DELAYED_WORK(&dev_priv->gt_pm.autoenable_work,
-			  __intel_autoenable_gt_powersave);
 	atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0);
 
 	dev_priv->runtime_pm.suspended = false;
-- 
2.15.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* ✗ Fi.CI.BAT: failure for series starting with [1/3] drm/i915: Make request's wait-for-space explicit
  2017-11-10 22:50 [PATCH 1/3] drm/i915: Make request's wait-for-space explicit Chris Wilson
  2017-11-10 22:50 ` [PATCH 2/3] drm/i915: Automatic i915_switch_context for legacy Chris Wilson
  2017-11-10 22:50 ` [PATCH 3/3] drm/i915: Remove redundant intel_autoenable_gt_powersave() Chris Wilson
@ 2017-11-10 23:35 ` Patchwork
  2 siblings, 0 replies; 4+ messages in thread
From: Patchwork @ 2017-11-10 23:35 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/3] drm/i915: Make request's wait-for-space explicit
URL   : https://patchwork.freedesktop.org/series/33644/
State : failure

== Summary ==

Series 33644v1 series starting with [1/3] drm/i915: Make request's wait-for-space explicit
https://patchwork.freedesktop.org/api/1.0/series/33644/revisions/1/mbox/

Test gem_exec_reloc:
        Subgroup basic-gtt-read-active:
                fail       -> PASS       (fi-gdg-551) fdo#102582
Test kms_frontbuffer_tracking:
        Subgroup basic:
                pass       -> FAIL       (fi-skl-6600u)

fdo#102582 https://bugs.freedesktop.org/show_bug.cgi?id=102582

fi-bdw-5557u     total:289  pass:268  dwarn:0   dfail:0   fail:0   skip:21  time:441s
fi-bdw-gvtdvm    total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:457s
fi-blb-e6850     total:289  pass:223  dwarn:1   dfail:0   fail:0   skip:65  time:378s
fi-bsw-n3050     total:289  pass:243  dwarn:0   dfail:0   fail:0   skip:46  time:537s
fi-bwr-2160      total:289  pass:183  dwarn:0   dfail:0   fail:0   skip:106 time:274s
fi-bxt-dsi       total:289  pass:259  dwarn:0   dfail:0   fail:0   skip:30  time:501s
fi-bxt-j4205     total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:504s
fi-byt-j1900     total:289  pass:254  dwarn:0   dfail:0   fail:0   skip:35  time:497s
fi-byt-n2820     total:289  pass:250  dwarn:0   dfail:0   fail:0   skip:39  time:486s
fi-elk-e7500     total:289  pass:229  dwarn:0   dfail:0   fail:0   skip:60  time:428s
fi-gdg-551       total:289  pass:178  dwarn:1   dfail:0   fail:1   skip:109 time:261s
fi-glk-1         total:289  pass:261  dwarn:0   dfail:0   fail:0   skip:28  time:540s
fi-hsw-4770      total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:433s
fi-hsw-4770r     total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:439s
fi-ilk-650       total:289  pass:228  dwarn:0   dfail:0   fail:0   skip:61  time:429s
fi-ivb-3520m     total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:482s
fi-ivb-3770      total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:463s
fi-kbl-7500u     total:289  pass:264  dwarn:1   dfail:0   fail:0   skip:24  time:484s
fi-kbl-7560u     total:289  pass:270  dwarn:0   dfail:0   fail:0   skip:19  time:524s
fi-kbl-7567u     total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:473s
fi-kbl-r         total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:531s
fi-skl-6260u     total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:460s
fi-skl-6600u     total:289  pass:261  dwarn:0   dfail:0   fail:1   skip:27  time:546s
fi-skl-6700hq    total:289  pass:263  dwarn:0   dfail:0   fail:0   skip:26  time:565s
fi-skl-6700k     total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:518s
fi-skl-6770hq    total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:491s
fi-skl-gvtdvm    total:289  pass:266  dwarn:0   dfail:0   fail:0   skip:23  time:459s
fi-snb-2520m     total:289  pass:250  dwarn:0   dfail:0   fail:0   skip:39  time:553s
fi-snb-2600      total:289  pass:249  dwarn:0   dfail:0   fail:0   skip:40  time:420s
Blacklisted hosts:
fi-cfl-s         total:289  pass:254  dwarn:3   dfail:0   fail:0   skip:32  time:525s
fi-cnl-y         total:289  pass:261  dwarn:0   dfail:0   fail:1   skip:27  time:570s
fi-glk-dsi       total:289  pass:259  dwarn:0   dfail:0   fail:0   skip:30  time:492s

1342299348dec3bd4890284390a2209a00beafce drm-tip: 2017y-11m-10d-18h-11m-22s UTC integration manifest
41a400751d79 drm/i915: Remove redundant intel_autoenable_gt_powersave()
acd92505cfb5 drm/i915: Automatic i915_switch_context for legacy
b5b1bafd9bd5 drm/i915: Make request's wait-for-space explicit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_7075/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-11-10 23:35 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-10 22:50 [PATCH 1/3] drm/i915: Make request's wait-for-space explicit Chris Wilson
2017-11-10 22:50 ` [PATCH 2/3] drm/i915: Automatic i915_switch_context for legacy Chris Wilson
2017-11-10 22:50 ` [PATCH 3/3] drm/i915: Remove redundant intel_autoenable_gt_powersave() Chris Wilson
2017-11-10 23:35 ` ✗ Fi.CI.BAT: failure for series starting with [1/3] drm/i915: Make request's wait-for-space explicit Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.