All of lore.kernel.org
 help / color / mirror / Atom feed
From: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH v2] drm/i915/guc: Remove preemption support for current fw
Date: Mon,  1 Jul 2019 13:28:38 -0700	[thread overview]
Message-ID: <20190701202838.8396-1-daniele.ceraolospurio@intel.com> (raw)

From: Chris Wilson <chris@chris-wilson.co.uk>

Preemption via GuC submission is not being supported with its current
legacy incarnation. The current FW does support a similar pre-emption
flow via H2G, but it is class-based instead of being instance-based,
which doesn't fit well with the i915 tracking. To fix this, the
firmware is being updated to better support our needs with a new flow,
so we can safely remove the old code.

v2 (Daniele): resurrect & rebase, reword commit message, remove
preempt_context as well

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michal Wajdeczko <michal.wajdeczko@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Cc: John Harrison <John.C.Harrison@Intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c  |  17 --
 drivers/gpu/drm/i915/gt/intel_engine_cs.c    |  13 --
 drivers/gpu/drm/i915/gt/intel_engine_types.h |   1 -
 drivers/gpu/drm/i915/gt/intel_gt_pm.c        |   4 -
 drivers/gpu/drm/i915/i915_debugfs.c          |   5 -
 drivers/gpu/drm/i915/i915_drv.h              |   2 -
 drivers/gpu/drm/i915/intel_guc.c             |  31 ---
 drivers/gpu/drm/i915/intel_guc.h             |   9 -
 drivers/gpu/drm/i915/intel_guc_submission.c  | 220 +------------------
 drivers/gpu/drm/i915/selftests/intel_guc.c   |  31 +--
 10 files changed, 14 insertions(+), 319 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 8a9787cf0cd0..9c695910bc43 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -644,18 +644,12 @@ static void init_contexts(struct drm_i915_private *i915)
 	init_llist_head(&i915->contexts.free_list);
 }
 
-static bool needs_preempt_context(struct drm_i915_private *i915)
-{
-	return USES_GUC_SUBMISSION(i915);
-}
-
 int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 {
 	struct i915_gem_context *ctx;
 
 	/* Reassure ourselves we are only called once */
 	GEM_BUG_ON(dev_priv->kernel_context);
-	GEM_BUG_ON(dev_priv->preempt_context);
 
 	intel_engine_init_ctx_wa(dev_priv->engine[RCS0]);
 	init_contexts(dev_priv);
@@ -677,15 +671,6 @@ int i915_gem_contexts_init(struct drm_i915_private *dev_priv)
 	GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count));
 	dev_priv->kernel_context = ctx;
 
-	/* highest priority; preempting task */
-	if (needs_preempt_context(dev_priv)) {
-		ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX);
-		if (!IS_ERR(ctx))
-			dev_priv->preempt_context = ctx;
-		else
-			DRM_ERROR("Failed to create preempt context; disabling preemption\n");
-	}
-
 	DRM_DEBUG_DRIVER("%s context support initialized\n",
 			 DRIVER_CAPS(dev_priv)->has_logical_contexts ?
 			 "logical" : "fake");
@@ -696,8 +681,6 @@ void i915_gem_contexts_fini(struct drm_i915_private *i915)
 {
 	lockdep_assert_held(&i915->drm.struct_mutex);
 
-	if (i915->preempt_context)
-		destroy_kernel_context(&i915->preempt_context);
 	destroy_kernel_context(&i915->kernel_context);
 
 	/* Must free all deferred contexts (via flush_workqueue) first */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d1508f0b4c84..55b11409c1f0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -842,15 +842,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	if (ret)
 		return ret;
 
-	/*
-	 * Similarly the preempt context must always be available so that
-	 * we can interrupt the engine at any time. However, as preemption
-	 * is optional, we allow it to fail.
-	 */
-	if (i915->preempt_context)
-		pin_context(i915->preempt_context, engine,
-			    &engine->preempt_context);
-
 	ret = measure_breadcrumb_dw(engine);
 	if (ret < 0)
 		goto err_unpin;
@@ -862,8 +853,6 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
 	return 0;
 
 err_unpin:
-	if (engine->preempt_context)
-		intel_context_unpin(engine->preempt_context);
 	intel_context_unpin(engine->kernel_context);
 	return ret;
 }
@@ -888,8 +877,6 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 	if (engine->default_state)
 		i915_gem_object_put(engine->default_state);
 
-	if (engine->preempt_context)
-		intel_context_unpin(engine->preempt_context);
 	intel_context_unpin(engine->kernel_context);
 	GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 7e056114344e..8be63019d707 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -288,7 +288,6 @@ struct intel_engine_cs {
 	struct llist_head barrier_tasks;
 
 	struct intel_context *kernel_context; /* pinned */
-	struct intel_context *preempt_context; /* pinned; optional */
 
 	intel_engine_mask_t saturated; /* submitting semaphores too late? */
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 36ba80e6a0b7..da81b3a92d16 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -145,10 +145,6 @@ int intel_gt_resume(struct intel_gt *gt)
 		if (ce)
 			ce->ops->reset(ce);
 
-		ce = engine->preempt_context;
-		if (ce)
-			ce->ops->reset(ce);
-
 		engine->serial++; /* kernel context lost */
 		err = engine->resume(engine);
 
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index eeecdad0e3ca..02eaa15d47c0 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -2017,11 +2017,6 @@ static int i915_guc_info(struct seq_file *m, void *data)
 
 	seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client);
 	i915_guc_client_info(m, dev_priv, guc->execbuf_client);
-	if (guc->preempt_client) {
-		seq_printf(m, "\nGuC preempt client @ %p:\n",
-			   guc->preempt_client);
-		i915_guc_client_info(m, dev_priv, guc->preempt_client);
-	}
 
 	/* Add more as required ... */
 
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 02dd9f9f3a89..1cdd06539dc5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1378,8 +1378,6 @@ struct drm_i915_private {
 	struct intel_engine_cs *engine[I915_NUM_ENGINES];
 	/* Context used internally to idle the GPU and setup initial state */
 	struct i915_gem_context *kernel_context;
-	/* Context only to be used for injecting preemption commands */
-	struct i915_gem_context *preempt_context;
 	struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1]
 					    [MAX_ENGINE_INSTANCE + 1];
 
diff --git a/drivers/gpu/drm/i915/intel_guc.c b/drivers/gpu/drm/i915/intel_guc.c
index c40a6efdd33a..501b74f44374 100644
--- a/drivers/gpu/drm/i915/intel_guc.c
+++ b/drivers/gpu/drm/i915/intel_guc.c
@@ -101,8 +101,6 @@ void intel_guc_init_early(struct intel_guc *guc)
 
 static int guc_init_wq(struct intel_guc *guc)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-
 	/*
 	 * GuC log buffer flush work item has to do register access to
 	 * send the ack to GuC and this work item, if not synced before
@@ -122,31 +120,6 @@ static int guc_init_wq(struct intel_guc *guc)
 		return -ENOMEM;
 	}
 
-	/*
-	 * Even though both sending GuC action, and adding a new workitem to
-	 * GuC workqueue are serialized (each with its own locking), since
-	 * we're using mutliple engines, it's possible that we're going to
-	 * issue a preempt request with two (or more - each for different
-	 * engine) workitems in GuC queue. In this situation, GuC may submit
-	 * all of them, which will make us very confused.
-	 * Our preemption contexts may even already be complete - before we
-	 * even had the chance to sent the preempt action to GuC!. Rather
-	 * than introducing yet another lock, we can just use ordered workqueue
-	 * to make sure we're always sending a single preemption request with a
-	 * single workitem.
-	 */
-	if (HAS_LOGICAL_RING_PREEMPTION(dev_priv) &&
-	    USES_GUC_SUBMISSION(dev_priv)) {
-		guc->preempt_wq = alloc_ordered_workqueue("i915-guc_preempt",
-							  WQ_HIGHPRI);
-		if (!guc->preempt_wq) {
-			destroy_workqueue(guc->log.relay.flush_wq);
-			DRM_ERROR("Couldn't allocate workqueue for GuC "
-				  "preemption\n");
-			return -ENOMEM;
-		}
-	}
-
 	return 0;
 }
 
@@ -154,10 +127,6 @@ static void guc_fini_wq(struct intel_guc *guc)
 {
 	struct workqueue_struct *wq;
 
-	wq = fetch_and_zero(&guc->preempt_wq);
-	if (wq)
-		destroy_workqueue(wq);
-
 	wq = fetch_and_zero(&guc->log.relay.flush_wq);
 	if (wq)
 		destroy_workqueue(wq);
diff --git a/drivers/gpu/drm/i915/intel_guc.h b/drivers/gpu/drm/i915/intel_guc.h
index d91c96679dbb..ec1038c1f50e 100644
--- a/drivers/gpu/drm/i915/intel_guc.h
+++ b/drivers/gpu/drm/i915/intel_guc.h
@@ -37,11 +37,6 @@
 
 struct __guc_ads_blob;
 
-struct guc_preempt_work {
-	struct work_struct work;
-	struct intel_engine_cs *engine;
-};
-
 /*
  * Top level structure of GuC. It handles firmware loading and manages client
  * pool and doorbells. intel_guc owns a intel_guc_client to replace the legacy
@@ -76,10 +71,6 @@ struct intel_guc {
 	void *shared_data_vaddr;
 
 	struct intel_guc_client *execbuf_client;
-	struct intel_guc_client *preempt_client;
-
-	struct guc_preempt_work preempt_work[I915_NUM_ENGINES];
-	struct workqueue_struct *preempt_wq;
 
 	DECLARE_BITMAP(doorbell_bitmap, GUC_NUM_DOORBELLS);
 	/* Cyclic counter mod pagesize	*/
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 12c22359fdac..8520bb224175 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -46,11 +46,10 @@ enum {
  *
  * GuC client:
  * A intel_guc_client refers to a submission path through GuC. Currently, there
- * are two clients. One of them (the execbuf_client) is charged with all
- * submissions to the GuC, the other one (preempt_client) is responsible for
- * preempting the execbuf_client. This struct is the owner of a doorbell, a
- * process descriptor and a workqueue (all of them inside a single gem object
- * that contains all required pages for these elements).
+ * is only one client, which is charged with all submissions to the GuC. This
+ * struct is the owner of a doorbell, a process descriptor and a workqueue (all
+ * of them inside a single gem object that contains all required pages for these
+ * elements).
  *
  * GuC stage descriptor:
  * During initialization, the driver allocates a static pool of 1024 such
@@ -88,12 +87,6 @@ enum {
  *
  */
 
-static inline u32 intel_hws_preempt_done_address(struct intel_engine_cs *engine)
-{
-	return (i915_ggtt_offset(engine->status_page.vma) +
-		I915_GEM_HWS_PREEMPT_ADDR);
-}
-
 static inline struct i915_priolist *to_priolist(struct rb_node *rb)
 {
 	return rb_entry(rb, struct i915_priolist, node);
@@ -563,126 +556,6 @@ static void flush_ggtt_writes(struct i915_vma *vma)
 		intel_uncore_posting_read_fw(&i915->uncore, GUC_STATUS);
 }
 
-static void inject_preempt_context(struct work_struct *work)
-{
-	struct guc_preempt_work *preempt_work =
-		container_of(work, typeof(*preempt_work), work);
-	struct intel_engine_cs *engine = preempt_work->engine;
-	struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
-					     preempt_work[engine->id]);
-	struct intel_guc_client *client = guc->preempt_client;
-	struct guc_stage_desc *stage_desc = __get_stage_desc(client);
-	struct intel_context *ce = engine->preempt_context;
-	u32 data[7];
-
-	if (!ce->ring->emit) { /* recreate upon load/resume */
-		u32 addr = intel_hws_preempt_done_address(engine);
-		u32 *cs;
-
-		cs = ce->ring->vaddr;
-		if (engine->class == RENDER_CLASS) {
-			cs = gen8_emit_ggtt_write_rcs(cs,
-						      GUC_PREEMPT_FINISHED,
-						      addr,
-						      PIPE_CONTROL_CS_STALL);
-		} else {
-			cs = gen8_emit_ggtt_write(cs,
-						  GUC_PREEMPT_FINISHED,
-						  addr,
-						  0);
-			*cs++ = MI_NOOP;
-			*cs++ = MI_NOOP;
-		}
-		*cs++ = MI_USER_INTERRUPT;
-		*cs++ = MI_NOOP;
-
-		ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
-		GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
-
-		flush_ggtt_writes(ce->ring->vma);
-	}
-
-	spin_lock_irq(&client->wq_lock);
-	guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
-			   GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
-	spin_unlock_irq(&client->wq_lock);
-
-	/*
-	 * If GuC firmware performs an engine reset while that engine had
-	 * a preemption pending, it will set the terminated attribute bit
-	 * on our preemption stage descriptor. GuC firmware retains all
-	 * pending work items for a high-priority GuC client, unlike the
-	 * normal-priority GuC client where work items are dropped. It
-	 * wants to make sure the preempt-to-idle work doesn't run when
-	 * scheduling resumes, and uses this bit to inform its scheduler
-	 * and presumably us as well. Our job is to clear it for the next
-	 * preemption after reset, otherwise that and future preemptions
-	 * will never complete. We'll just clear it every time.
-	 */
-	stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
-
-	data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
-	data[1] = client->stage_id;
-	data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
-		  INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
-	data[3] = engine->guc_id;
-	data[4] = guc->execbuf_client->priority;
-	data[5] = guc->execbuf_client->stage_id;
-	data[6] = intel_guc_ggtt_offset(guc, guc->shared_data);
-
-	if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
-		intel_write_status_page(engine,
-					I915_GEM_HWS_PREEMPT,
-					GUC_PREEMPT_NONE);
-		tasklet_schedule(&engine->execlists.tasklet);
-	}
-
-	(void)I915_SELFTEST_ONLY(engine->execlists.preempt_hang.count++);
-}
-
-/*
- * We're using user interrupt and HWSP value to mark that preemption has
- * finished and GPU is idle. Normally, we could unwind and continue similar to
- * execlists submission path. Unfortunately, with GuC we also need to wait for
- * it to finish its own postprocessing, before attempting to submit. Otherwise
- * GuC may silently ignore our submissions, and thus we risk losing request at
- * best, executing out-of-order and causing kernel panic at worst.
- */
-#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
-static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
-{
-	struct intel_guc *guc = &engine->i915->guc;
-	struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
-	struct guc_ctx_report *report =
-		&data->preempt_ctx_report[engine->guc_id];
-
-	if (wait_for_atomic(report->report_return_status ==
-			    INTEL_GUC_REPORT_STATUS_COMPLETE,
-			    GUC_PREEMPT_POSTPROCESS_DELAY_MS))
-		DRM_ERROR("Timed out waiting for GuC preemption report\n");
-	/*
-	 * GuC is expecting that we're also going to clear the affected context
-	 * counter, let's also reset the return status to not depend on GuC
-	 * resetting it after recieving another preempt action
-	 */
-	report->affected_count = 0;
-	report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
-}
-
-static void complete_preempt_context(struct intel_engine_cs *engine)
-{
-	struct intel_engine_execlists *execlists = &engine->execlists;
-
-	if (inject_preempt_hang(execlists))
-		return;
-
-	execlists_cancel_port_requests(execlists);
-	execlists_unwind_incomplete_requests(execlists);
-
-	wait_for_guc_preempt_report(engine);
-	intel_write_status_page(engine, I915_GEM_HWS_PREEMPT, GUC_PREEMPT_NONE);
-}
-
 static void guc_submit(struct intel_engine_cs *engine,
 		       struct i915_request **out,
 		       struct i915_request **end)
@@ -742,21 +615,6 @@ static void __guc_dequeue(struct intel_engine_cs *engine)
 	lockdep_assert_held(&engine->active.lock);
 
 	if (last) {
-		if (intel_engine_has_preemption(engine)) {
-			struct guc_preempt_work *preempt_work =
-				&engine->i915->guc.preempt_work[engine->id];
-			int prio = execlists->queue_priority_hint;
-
-			if (i915_scheduler_need_preempt(prio, rq_prio(last))) {
-				intel_write_status_page(engine,
-							I915_GEM_HWS_PREEMPT,
-							GUC_PREEMPT_INPROGRESS);
-				queue_work(engine->i915->guc.preempt_wq,
-					   &preempt_work->work);
-				return;
-			}
-		}
-
 		if (*++first)
 			return;
 
@@ -820,12 +678,7 @@ static void guc_submission_tasklet(unsigned long data)
 		memmove(execlists->inflight, port, rem * sizeof(*port));
 	}
 
-	if (intel_read_status_page(engine, I915_GEM_HWS_PREEMPT) ==
-	    GUC_PREEMPT_FINISHED)
-		complete_preempt_context(engine);
-
-	if (!intel_read_status_page(engine, I915_GEM_HWS_PREEMPT))
-		__guc_dequeue(engine);
+	__guc_dequeue(engine);
 
 	spin_unlock_irqrestore(&engine->active.lock, flags);
 }
@@ -846,16 +699,6 @@ static void guc_reset_prepare(struct intel_engine_cs *engine)
 	 * prevents the race.
 	 */
 	__tasklet_disable_sync_once(&execlists->tasklet);
-
-	/*
-	 * We're using worker to queue preemption requests from the tasklet in
-	 * GuC submission mode.
-	 * Even though tasklet was disabled, we may still have a worker queued.
-	 * Let's make sure that all workers scheduled before disabling the
-	 * tasklet are completed before continuing with the reset.
-	 */
-	if (engine->i915->guc.preempt_wq)
-		flush_workqueue(engine->i915->guc.preempt_wq);
 }
 
 static void guc_reset(struct intel_engine_cs *engine, bool stalled)
@@ -1112,7 +955,6 @@ static int guc_clients_create(struct intel_guc *guc)
 	struct intel_guc_client *client;
 
 	GEM_BUG_ON(guc->execbuf_client);
-	GEM_BUG_ON(guc->preempt_client);
 
 	client = guc_client_alloc(dev_priv,
 				  INTEL_INFO(dev_priv)->engine_mask,
@@ -1124,20 +966,6 @@ static int guc_clients_create(struct intel_guc *guc)
 	}
 	guc->execbuf_client = client;
 
-	if (dev_priv->preempt_context) {
-		client = guc_client_alloc(dev_priv,
-					  INTEL_INFO(dev_priv)->engine_mask,
-					  GUC_CLIENT_PRIORITY_KMD_HIGH,
-					  dev_priv->preempt_context);
-		if (IS_ERR(client)) {
-			DRM_ERROR("Failed to create GuC client for preemption!\n");
-			guc_client_free(guc->execbuf_client);
-			guc->execbuf_client = NULL;
-			return PTR_ERR(client);
-		}
-		guc->preempt_client = client;
-	}
-
 	return 0;
 }
 
@@ -1145,10 +973,6 @@ static void guc_clients_destroy(struct intel_guc *guc)
 {
 	struct intel_guc_client *client;
 
-	client = fetch_and_zero(&guc->preempt_client);
-	if (client)
-		guc_client_free(client);
-
 	client = fetch_and_zero(&guc->execbuf_client);
 	if (client)
 		guc_client_free(client);
@@ -1191,28 +1015,11 @@ static void __guc_client_disable(struct intel_guc_client *client)
 
 static int guc_clients_enable(struct intel_guc *guc)
 {
-	int ret;
-
-	ret = __guc_client_enable(guc->execbuf_client);
-	if (ret)
-		return ret;
-
-	if (guc->preempt_client) {
-		ret = __guc_client_enable(guc->preempt_client);
-		if (ret) {
-			__guc_client_disable(guc->execbuf_client);
-			return ret;
-		}
-	}
-
-	return 0;
+	return __guc_client_enable(guc->execbuf_client);
 }
 
 static void guc_clients_disable(struct intel_guc *guc)
 {
-	if (guc->preempt_client)
-		__guc_client_disable(guc->preempt_client);
-
 	if (guc->execbuf_client)
 		__guc_client_disable(guc->execbuf_client);
 }
@@ -1223,9 +1030,6 @@ static void guc_clients_disable(struct intel_guc *guc)
  */
 int intel_guc_submission_init(struct intel_guc *guc)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
 	int ret;
 
 	if (guc->stage_desc_pool)
@@ -1245,11 +1049,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
 	if (ret)
 		goto err_pool;
 
-	for_each_engine(engine, dev_priv, id) {
-		guc->preempt_work[id].engine = engine;
-		INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
-	}
-
 	return 0;
 
 err_pool:
@@ -1259,13 +1058,6 @@ int intel_guc_submission_init(struct intel_guc *guc)
 
 void intel_guc_submission_fini(struct intel_guc *guc)
 {
-	struct drm_i915_private *dev_priv = guc_to_i915(guc);
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-
-	for_each_engine(engine, dev_priv, id)
-		cancel_work_sync(&guc->preempt_work[id].work);
-
 	guc_clients_destroy(guc);
 	WARN_ON(!guc_verify_doorbells(guc));
 
diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c
index 6ca8584cd64c..1a1915e44f6b 100644
--- a/drivers/gpu/drm/i915/selftests/intel_guc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_guc.c
@@ -103,13 +103,10 @@ static int ring_doorbell_nop(struct intel_guc_client *client)
 /*
  * Basic client sanity check, handy to validate create_clients.
  */
-static int validate_client(struct intel_guc_client *client,
-			   int client_priority,
-			   bool is_preempt_client)
+static int validate_client(struct intel_guc_client *client, int client_priority)
 {
 	struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
-	struct i915_gem_context *ctx_owner = is_preempt_client ?
-			dev_priv->preempt_context : dev_priv->kernel_context;
+	struct i915_gem_context *ctx_owner = dev_priv->kernel_context;
 
 	if (client->owner != ctx_owner ||
 	    client->engines != INTEL_INFO(dev_priv)->engine_mask ||
@@ -163,7 +160,7 @@ static int igt_guc_clients(void *args)
 	 */
 	guc_clients_disable(guc);
 	guc_clients_destroy(guc);
-	if (guc->execbuf_client || guc->preempt_client) {
+	if (guc->execbuf_client) {
 		pr_err("guc_clients_destroy lied!\n");
 		err = -EINVAL;
 		goto unlock;
@@ -177,24 +174,14 @@ static int igt_guc_clients(void *args)
 	GEM_BUG_ON(!guc->execbuf_client);
 
 	err = validate_client(guc->execbuf_client,
-			      GUC_CLIENT_PRIORITY_KMD_NORMAL, false);
+			      GUC_CLIENT_PRIORITY_KMD_NORMAL);
 	if (err) {
 		pr_err("execbug client validation failed\n");
 		goto out;
 	}
 
-	if (guc->preempt_client) {
-		err = validate_client(guc->preempt_client,
-				      GUC_CLIENT_PRIORITY_KMD_HIGH, true);
-		if (err) {
-			pr_err("preempt client validation failed\n");
-			goto out;
-		}
-	}
-
-	/* each client should now have reserved a doorbell */
-	if (!has_doorbell(guc->execbuf_client) ||
-	    (guc->preempt_client && !has_doorbell(guc->preempt_client))) {
+	/* the client should now have reserved a doorbell */
+	if (!has_doorbell(guc->execbuf_client)) {
 		pr_err("guc_clients_create didn't reserve doorbells\n");
 		err = -EINVAL;
 		goto out;
@@ -204,8 +191,7 @@ static int igt_guc_clients(void *args)
 	guc_clients_enable(guc);
 
 	/* each client should now have received a doorbell */
-	if (!client_doorbell_in_sync(guc->execbuf_client) ||
-	    !client_doorbell_in_sync(guc->preempt_client)) {
+	if (!client_doorbell_in_sync(guc->execbuf_client)) {
 		pr_err("failed to initialize the doorbells\n");
 		err = -EINVAL;
 		goto out;
@@ -300,8 +286,7 @@ static int igt_guc_doorbells(void *arg)
 			goto out;
 		}
 
-		err = validate_client(clients[i],
-				      i % GUC_CLIENT_PRIORITY_NUM, false);
+		err = validate_client(clients[i], i % GUC_CLIENT_PRIORITY_NUM);
 		if (err) {
 			pr_err("[%d] client_alloc sanity check failed!\n", i);
 			err = -EINVAL;
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

             reply	other threads:[~2019-07-01 20:29 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-01 20:28 Daniele Ceraolo Spurio [this message]
2019-07-01 21:00 ` ✗ Fi.CI.SPARSE: warning for drm/i915/guc: Remove preemption support for current fw (rev2) Patchwork
2019-07-01 21:29 ` ✓ Fi.CI.BAT: success " Patchwork
2019-07-03  1:10 ` ✓ Fi.CI.IGT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190701202838.8396-1-daniele.ceraolospurio@intel.com \
    --to=daniele.ceraolospurio@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.