All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler
@ 2019-10-10  7:14 Chris Wilson
  2019-10-10  7:14 ` [PATCH 02/10] drm/i915/execlists: Leave tell-tales as to why pending[] is bad Chris Wilson
                   ` (14 more replies)
  0 siblings, 15 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

Since writing the comment that the scheduler is entirely passive, we've
added minimal timeslicing which adds the most primitive of active
elements (a timeout and reschedule).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
---
 drivers/gpu/drm/i915/i915_scheduler_types.h | 9 +++++++++
 1 file changed, 9 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
index aad81acba9dc..d18e70550054 100644
--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
+++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
@@ -49,6 +49,15 @@ struct i915_sched_attr {
  * DAG of each request, we are able to insert it into a sorted queue when it
  * is ready, and are able to reorder its portion of the graph to accommodate
  * dynamic priority changes.
+ *
+ * Ok, there is now one active element to the "scheduler" in the backends.
+ * We let a new context run for a small amount of time before re-evaluating
+ * the run order. As we re-evaluate, we maintain the strict ordering of
+ * dependencies, but attempt to rotate the active contexts (the current context
+ * is put to the back of its priority queue, then reshuffling its dependents).
+ * This provides minimal timeslicing and prevents a userspace hog (e.g.
+ * something waiting on a user semaphore [VkEvent]) from denying service to
+ * others.
  */
 struct i915_sched_node {
 	struct list_head signalers_list; /* those before us, we depend upon */
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH 02/10] drm/i915/execlists: Leave tell-tales as to why pending[] is bad
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
@ 2019-10-10  7:14 ` Chris Wilson
  2019-10-11  8:39   ` Tvrtko Ursulin
  2019-10-10  7:14 ` [PATCH 03/10] drm/i915: Expose engine properties via sysfs Chris Wilson
                   ` (13 subsequent siblings)
  14 siblings, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

Before we BUG out with bad pending state, leave a telltale as to which
test failed.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c | 30 ++++++++++++++++++++++++-----
 drivers/gpu/drm/i915/i915_gem.h     |  8 ++++----
 2 files changed, 29 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index a0777b3ad68a..5040fbdd81af 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1138,25 +1138,45 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
 
 	trace_ports(execlists, msg, execlists->pending);
 
-	if (!execlists->pending[0])
+	if (!execlists->pending[0]) {
+		GEM_TRACE_ERR("Nothing pending for promotion!\n");
 		return false;
+	}
 
-	if (execlists->pending[execlists_num_ports(execlists)])
+	if (execlists->pending[execlists_num_ports(execlists)]) {
+		GEM_TRACE_ERR("Excess pending[%d] for promotion!\n",
+			      execlists_num_ports(execlists));
 		return false;
+	}
 
 	for (port = execlists->pending; (rq = *port); port++) {
-		if (ce == rq->hw_context)
+		if (ce == rq->hw_context) {
+			GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
+				      port - execlists->pending);
 			return false;
+		}
 
 		ce = rq->hw_context;
 		if (i915_request_completed(rq))
 			continue;
 
-		if (i915_active_is_idle(&ce->active))
+		if (i915_active_is_idle(&ce->active)) {
+			GEM_TRACE_ERR("Inactive context in pending[%zd]\n",
+				      port - execlists->pending);
+			return false;
+		}
+
+		if (!i915_vma_is_pinned(ce->state)) {
+			GEM_TRACE_ERR("Unpinned context in pending[%zd]\n",
+				      port - execlists->pending);
 			return false;
+		}
 
-		if (!i915_vma_is_pinned(ce->state))
+		if (!i915_vma_is_pinned(ce->ring->vma)) {
+			GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n",
+				      port - execlists->pending);
 			return false;
+		}
 	}
 
 	return ce;
diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
index 6795f1daa3d5..63dab3765106 100644
--- a/drivers/gpu/drm/i915/i915_gem.h
+++ b/drivers/gpu/drm/i915/i915_gem.h
@@ -37,10 +37,8 @@ struct drm_i915_private;
 #define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
 
 #define GEM_BUG_ON(condition) do { if (unlikely((condition))) {	\
-		pr_err("%s:%d GEM_BUG_ON(%s)\n", \
-		       __func__, __LINE__, __stringify(condition)); \
-		GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
-			  __func__, __LINE__, __stringify(condition)); \
+		GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
+			      __func__, __LINE__, __stringify(condition)); \
 		BUG(); \
 		} \
 	} while(0)
@@ -66,11 +64,13 @@ struct drm_i915_private;
 
 #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
 #define GEM_TRACE(...) trace_printk(__VA_ARGS__)
+#define GEM_TRACE_ERR(...) do { pr_err(__VA_ARGS__); trace_printk(__VA_ARGS__); } while (0)
 #define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
 #define GEM_TRACE_DUMP_ON(expr) \
 	do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
 #else
 #define GEM_TRACE(...) do { } while (0)
+#define GEM_TRACE_ERR(...) do { } while (0)
 #define GEM_TRACE_DUMP() do { } while (0)
 #define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
 #endif
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH 03/10] drm/i915: Expose engine properties via sysfs
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
  2019-10-10  7:14 ` [PATCH 02/10] drm/i915/execlists: Leave tell-tales as to why pending[] is bad Chris Wilson
@ 2019-10-10  7:14 ` Chris Wilson
  2019-10-11  8:44   ` Tvrtko Ursulin
  2019-10-11  9:40   ` [PATCH v2] " Chris Wilson
  2019-10-10  7:14 ` [PATCH 04/10] drm/i915/execlists: Force preemption Chris Wilson
                   ` (12 subsequent siblings)
  14 siblings, 2 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

Preliminary stub to add engines underneath /sys/class/drm/cardN/, so
that we can expose properties on each engine to the sysadmin.

To start with we have basic analogues of the i915_query ioctl so that we
can pretty print engine discovery from the shell, and flesh out the
directory structure. Later we will add writeable sysadmin properties such
as per-engine timeout controls.

An example tree of the engine properties on Braswell:
    /sys/class/drm/card0
    └── engine
        ├── bcs0
        │   ├── class
        │   ├── heartbeat_interval_ms
        │   ├── instance
        │   ├── mmio_base
        │   └── name
        ├── rcs0
        │   ├── class
        │   ├── heartbeat_interval_ms
        │   ├── instance
        │   ├── mmio_base
        │   └── name
        ├── vcs0
        │   ├── class
        │   ├── heartbeat_interval_ms
        │   ├── instance
        │   ├── mmio_base
        │   └── name
        └── vecs0
            ├── class
            ├── heartbeat_interval_ms
            ├── instance
            ├── mmio_base
            └── name

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
---
 drivers/gpu/drm/i915/Makefile                |   3 +-
 drivers/gpu/drm/i915/gt/intel_engine_sysfs.c | 119 +++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_engine_sysfs.h |  14 +++
 drivers/gpu/drm/i915/i915_sysfs.c            |   3 +
 4 files changed, 138 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_sysfs.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e791d9323b51..cd9a10ba2516 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -78,8 +78,9 @@ gt-y += \
 	gt/intel_breadcrumbs.o \
 	gt/intel_context.o \
 	gt/intel_engine_cs.o \
-	gt/intel_engine_pool.o \
 	gt/intel_engine_pm.o \
+	gt/intel_engine_pool.o \
+	gt/intel_engine_sysfs.o \
 	gt/intel_engine_user.o \
 	gt/intel_gt.o \
 	gt/intel_gt_irq.o \
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
new file mode 100644
index 000000000000..cbe9ec59beeb
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
@@ -0,0 +1,119 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_sysfs.h"
+
+struct kobj_engine {
+	struct kobject base;
+	struct intel_engine_cs *engine;
+};
+
+static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
+{
+	return container_of(kobj, struct kobj_engine, base)->engine;
+}
+
+static ssize_t
+name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+}
+
+static ssize_t
+class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+}
+
+static ssize_t
+inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+}
+
+static ssize_t
+mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+}
+
+static struct kobj_attribute name_attr = __ATTR(name, 0444, name_show, NULL);
+static struct kobj_attribute class_attr = __ATTR(class, 0444, class_show, NULL);
+static struct kobj_attribute inst_attr = __ATTR(instance, 0444, inst_show, NULL);
+static struct kobj_attribute mmio_attr = __ATTR(mmio_base, 0444, mmio_show, NULL);
+
+static void kobj_engine_release(struct kobject *kobj)
+{
+	kfree(kobj);
+}
+
+static struct kobj_type kobj_engine_type = {
+	.release = kobj_engine_release,
+	.sysfs_ops = &kobj_sysfs_ops
+};
+
+static struct kobject *
+kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
+{
+	struct kobj_engine *ke;
+
+	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+	if (!ke)
+		return NULL;
+
+	kobject_init(&ke->base, &kobj_engine_type);
+	ke->engine = engine;
+
+	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
+		kobject_put(&ke->base);
+		return NULL;
+	}
+
+	/* xfer ownership to sysfs tree */
+	return &ke->base;
+}
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915)
+{
+	static const struct attribute *files[] = {
+		&name_attr.attr,
+		&class_attr.attr,
+		&inst_attr.attr,
+		&mmio_attr.attr,
+		NULL
+	};
+
+	struct device *kdev = i915->drm.primary->kdev;
+	struct intel_engine_cs *engine;
+	struct kobject *dir;
+
+	dir = kobject_create_and_add("engine", &kdev->kobj);
+	if (!dir)
+		return;
+
+	for_each_uabi_engine(engine, i915) {
+		struct kobject *kobj;
+
+		kobj = kobj_engine(dir, engine);
+		if (!kobj)
+			goto err_engine;
+
+		if (sysfs_create_files(kobj, files))
+			goto err_engine;
+
+		if (0) {
+err_engine:
+			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
+				engine->name);
+			break;
+		}
+	}
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.h b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.h
new file mode 100644
index 000000000000..ef44a745b70a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_SYSFS_H
+#define INTEL_ENGINE_SYSFS_H
+
+struct drm_i915_private;
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_SYSFS_H */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index bf039b8ba593..7b665f69f301 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -30,6 +30,7 @@
 #include <linux/stat.h>
 #include <linux/sysfs.h>
 
+#include "gt/intel_engine_sysfs.h"
 #include "gt/intel_rc6.h"
 
 #include "i915_drv.h"
@@ -616,6 +617,8 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 		DRM_ERROR("RPS sysfs setup failed\n");
 
 	i915_setup_error_capture(kdev);
+
+	intel_engines_add_sysfs(dev_priv);
 }
 
 void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH 04/10] drm/i915/execlists: Force preemption
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
  2019-10-10  7:14 ` [PATCH 02/10] drm/i915/execlists: Leave tell-tales as to why pending[] is bad Chris Wilson
  2019-10-10  7:14 ` [PATCH 03/10] drm/i915: Expose engine properties via sysfs Chris Wilson
@ 2019-10-10  7:14 ` Chris Wilson
  2019-10-10  7:14 ` [PATCH 05/10] drm/i915: Mark up "sentinel" requests Chris Wilson
                   ` (11 subsequent siblings)
  14 siblings, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

If the preempted context takes too long to relinquish control, e.g. it
is stuck inside a shader with arbitration disabled, evict that context
with an engine reset. This ensures that preemptions are reasonably
responsive, providing a tighter QoS for the more important context at
the cost of flagging unresponsive contexts more frequently (i.e. instead
of using an ~10s hangcheck, we now evict at ~100ms).  The challenge of
lies in picking a timeout that can be reasonably serviced by HW for
typical workloads, balancing the existing clients against the needs for
responsiveness.

Note that coupled with timeslicing, this will lead to rapid GPU "hang"
detection with multiple active contexts vying for GPU time.

The preempt timeout can be adjusted per-engine using,

	/sys/class/drm/card?/engine/*/preempt_timeout_ms

v2: Couple in sysfs control of preemption timeout

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
---
 drivers/gpu/drm/i915/Kconfig.profile         | 15 ++++
 drivers/gpu/drm/i915/gt/intel_engine_cs.c    |  2 +
 drivers/gpu/drm/i915/gt/intel_engine_sysfs.c | 32 +++++++
 drivers/gpu/drm/i915/gt/intel_engine_types.h |  9 ++
 drivers/gpu/drm/i915/gt/intel_lrc.c          | 95 ++++++++++++++++++--
 drivers/gpu/drm/i915/i915_params.h           |  2 +-
 6 files changed, 146 insertions(+), 9 deletions(-)

diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 48df8889a88a..8fceea85937b 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -25,3 +25,18 @@ config DRM_I915_SPIN_REQUEST
 	  May be 0 to disable the initial spin. In practice, we estimate
 	  the cost of enabling the interrupt (if currently disabled) to be
 	  a few microseconds.
+
+config DRM_I915_PREEMPT_TIMEOUT
+	int "Preempt timeout (ms)"
+	default 100 # milliseconds
+	help
+	  How long to wait (in milliseconds) for a preemption event to occur
+	  when submitting a new context via execlists. If the current context
+	  does not hit an arbitration point and yield to HW before the timer
+	  expires, the HW will be reset to allow the more important context
+	  to execute.
+
+	  This is adjustable via
+	  /sys/class/drm/card?/engine/*/preempt_timeout_ms
+
+	  May be 0 to disable the timeout.
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index c9d639c6becb..1eb51147839a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -304,6 +304,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	engine->instance = info->instance;
 	__sprint_engine_name(engine);
 
+	engine->props.preempt_timeout = CONFIG_DRM_I915_PREEMPT_TIMEOUT;
+
 	/*
 	 * To be overridden by the backend on setup. However to facilitate
 	 * cleanup on error during setup, we always provide the destroy vfunc.
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
index cbe9ec59beeb..aac26097c916 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
@@ -45,10 +45,37 @@ mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
 }
 
+static ssize_t
+preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
+		     char *buf)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+	return sprintf(buf, "%lu\n", engine->props.preempt_timeout);
+}
+
+static ssize_t
+preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+		      const char *buf, size_t count)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+	unsigned long timeout;
+	int err;
+
+	err = kstrtoul(buf, 0, &timeout);
+	if (err)
+		return err;
+
+	engine->props.preempt_timeout = timeout;
+	return count;
+}
+
 static struct kobj_attribute name_attr = __ATTR(name, 0444, name_show, NULL);
 static struct kobj_attribute class_attr = __ATTR(class, 0444, class_show, NULL);
 static struct kobj_attribute inst_attr = __ATTR(instance, 0444, inst_show, NULL);
 static struct kobj_attribute mmio_attr = __ATTR(mmio_base, 0444, mmio_show, NULL);
+static struct kobj_attribute preempt_timeout_attr =
+__ATTR(preempt_timeout_ms, 0600, preempt_timeout_show, preempt_timeout_store);
 
 static void kobj_engine_release(struct kobject *kobj)
 {
@@ -109,6 +136,11 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
 		if (sysfs_create_files(kobj, files))
 			goto err_engine;
 
+		if (CONFIG_DRM_I915_PREEMPT_TIMEOUT &&
+		    intel_engine_has_preemption(engine) &&
+		    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
+			goto err_engine;
+
 		if (0) {
 err_engine:
 			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 6199064f332b..6af9b0096975 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -173,6 +173,11 @@ struct intel_engine_execlists {
 	 */
 	struct timer_list timer;
 
+	/**
+	 * @preempt: reset the current context if it fails to give way
+	 */
+	struct timer_list preempt;
+
 	/**
 	 * @default_priolist: priority list for I915_PRIORITY_NORMAL
 	 */
@@ -541,6 +546,10 @@ struct intel_engine_cs {
 		 */
 		ktime_t total;
 	} stats;
+
+	struct {
+		unsigned long preempt_timeout;
+	} props;
 };
 
 static inline bool
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 5040fbdd81af..aa52e5f34dab 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1414,6 +1414,29 @@ static void record_preemption(struct intel_engine_execlists *execlists)
 	(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
 }
 
+static void set_preempt_timeout(struct intel_engine_cs *engine)
+{
+	unsigned long timeout;
+
+	if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+		return;
+
+	timeout = READ_ONCE(engine->props.preempt_timeout);
+	if (!timeout)
+		return;
+
+	timeout = msecs_to_jiffies_timeout(timeout);
+	/*
+	 * Paranoia to make sure the compiler computes the timeout before
+	 * loading 'jiffies' as jiffies is volatile and may be updated in
+	 * the background by a timer tick. All to reduce the complexity
+	 * of the addition and reduce the risk of losing a jiffie.
+	 */
+	barrier();
+
+	mod_timer(&engine->execlists.preempt, jiffies + timeout);
+}
+
 static void execlists_dequeue(struct intel_engine_cs *engine)
 {
 	struct intel_engine_execlists * const execlists = &engine->execlists;
@@ -1777,6 +1800,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 
 		memset(port + 1, 0, (last_port - port) * sizeof(*port));
 		execlists_submit_ports(engine);
+
+		set_preempt_timeout(engine);
 	} else {
 skip_submit:
 		ring_set_paused(engine, 0);
@@ -2008,6 +2033,42 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
 	}
 }
 
+static noinline void preempt_reset(struct intel_engine_cs *engine)
+{
+	const unsigned int bit = I915_RESET_ENGINE + engine->id;
+	unsigned long *lock = &engine->gt->reset.flags;
+
+	if (i915_modparams.reset < 3)
+		return;
+
+	if (test_and_set_bit(bit, lock))
+		return;
+
+	/* Mark this tasklet as disabled to avoid waiting for it to complete */
+	tasklet_disable_nosync(&engine->execlists.tasklet);
+
+	GEM_TRACE("%s: preempt timeout %lu+%ums\n",
+		  engine->name,
+		  engine->props.preempt_timeout,
+		  jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
+	intel_engine_reset(engine, "preemption time out");
+
+	tasklet_enable(&engine->execlists.tasklet);
+	clear_and_wake_up_bit(bit, lock);
+}
+
+static bool preempt_timeout(struct intel_engine_cs *const engine)
+{
+	if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT)
+		return false;
+
+	if (!intel_engine_has_preemption(engine))
+		return false;
+
+	return !timer_pending(&engine->execlists.preempt) &&
+		READ_ONCE(engine->execlists.pending[0]);
+}
+
 /*
  * Check the unread Context Status Buffers and manage the submission of new
  * contexts to the ELSP accordingly.
@@ -2015,23 +2076,39 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
 static void execlists_submission_tasklet(unsigned long data)
 {
 	struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
-	unsigned long flags;
+	bool timeout = preempt_timeout(engine);
 
 	process_csb(engine);
-	if (!READ_ONCE(engine->execlists.pending[0])) {
+	if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
+		unsigned long flags;
+
 		spin_lock_irqsave(&engine->active.lock, flags);
 		__execlists_submission_tasklet(engine);
 		spin_unlock_irqrestore(&engine->active.lock, flags);
+
+		/* Recheck after serialising with direct-submission */
+		if (timeout && preempt_timeout(engine))
+			preempt_reset(engine);
 	}
 }
 
-static void execlists_submission_timer(struct timer_list *timer)
+static void __execlists_kick(struct intel_engine_execlists *execlists)
 {
-	struct intel_engine_cs *engine =
-		from_timer(engine, timer, execlists.timer);
-
 	/* Kick the tasklet for some interrupt coalescing and reset handling */
-	tasklet_hi_schedule(&engine->execlists.tasklet);
+	tasklet_hi_schedule(&execlists->tasklet);
+}
+
+#define execlists_kick(t, member) \
+	__execlists_kick(container_of(t, struct intel_engine_execlists, member))
+
+static void execlists_timeslice(struct timer_list *timer)
+{
+	execlists_kick(timer, timer);
+}
+
+static void execlists_preempt(struct timer_list *timer)
+{
+	execlists_kick(timer, preempt);
 }
 
 static void queue_request(struct intel_engine_cs *engine,
@@ -3449,6 +3526,7 @@ gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs)
 static void execlists_park(struct intel_engine_cs *engine)
 {
 	del_timer(&engine->execlists.timer);
+	del_timer(&engine->execlists.preempt);
 }
 
 void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
@@ -3566,7 +3644,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 {
 	tasklet_init(&engine->execlists.tasklet,
 		     execlists_submission_tasklet, (unsigned long)engine);
-	timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
+	timer_setup(&engine->execlists.timer, execlists_timeslice, 0);
+	timer_setup(&engine->execlists.preempt, execlists_preempt, 0);
 
 	logical_ring_default_vfuncs(engine);
 	logical_ring_default_irqs(engine);
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index d29ade3b7de6..56058978bb27 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -61,7 +61,7 @@ struct drm_printer;
 	param(char *, dmc_firmware_path, NULL) \
 	param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
 	param(int, edp_vswing, 0) \
-	param(int, reset, 2) \
+	param(int, reset, 3) \
 	param(unsigned int, inject_load_failure, 0) \
 	param(int, fastboot, -1) \
 	param(int, enable_dpcd_backlight, 0) \
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH 05/10] drm/i915: Mark up "sentinel" requests
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (2 preceding siblings ...)
  2019-10-10  7:14 ` [PATCH 04/10] drm/i915/execlists: Force preemption Chris Wilson
@ 2019-10-10  7:14 ` Chris Wilson
  2019-10-11  8:45   ` Tvrtko Ursulin
  2019-10-10  7:14 ` [PATCH 06/10] drm/i915/gt: Introduce barrier pulses along engines Chris Wilson
                   ` (10 subsequent siblings)
  14 siblings, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

Sometimes we want to emit a terminator request, a request that flushes
the pipeline and allows no request to come after it. This can be used
for a "preempt-to-idle" to ensure that upon processing the
context-switch to that request, all other active contexts have been
flushed.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c |  6 ++++++
 drivers/gpu/drm/i915/i915_request.h | 10 ++++++++--
 2 files changed, 14 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index aa52e5f34dab..eb99f1e804f7 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -1253,6 +1253,9 @@ static bool can_merge_rq(const struct i915_request *prev,
 	if (i915_request_completed(next))
 		return true;
 
+	if (i915_request_has_sentinel(prev))
+		return false;
+
 	if (!can_merge_ctx(prev->hw_context, next->hw_context))
 		return false;
 
@@ -1724,6 +1727,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
 				if (last->hw_context == rq->hw_context)
 					goto done;
 
+				if (i915_request_has_sentinel(last))
+					goto done;
+
 				/*
 				 * If GVT overrides us we only ever submit
 				 * port[0], leaving port[1] empty. Note that we
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 6a95242b280d..96991d64759c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -216,8 +216,9 @@ struct i915_request {
 	unsigned long emitted_jiffies;
 
 	unsigned long flags;
-#define I915_REQUEST_WAITBOOST BIT(0)
-#define I915_REQUEST_NOPREEMPT BIT(1)
+#define I915_REQUEST_WAITBOOST	BIT(0)
+#define I915_REQUEST_NOPREEMPT	BIT(1)
+#define I915_REQUEST_SENTINEL	BIT(2)
 
 	/** timeline->request entry for this request */
 	struct list_head link;
@@ -440,6 +441,11 @@ static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
 	return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
 }
 
+static inline bool i915_request_has_sentinel(const struct i915_request *rq)
+{
+	return unlikely(rq->flags & I915_REQUEST_SENTINEL);
+}
+
 static inline struct intel_timeline *
 i915_request_timeline(struct i915_request *rq)
 {
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH 06/10] drm/i915/gt: Introduce barrier pulses along engines
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (3 preceding siblings ...)
  2019-10-10  7:14 ` [PATCH 05/10] drm/i915: Mark up "sentinel" requests Chris Wilson
@ 2019-10-10  7:14 ` Chris Wilson
  2019-10-11  9:11   ` Tvrtko Ursulin
  2019-10-10  7:14 ` [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out Chris Wilson
                   ` (9 subsequent siblings)
  14 siblings, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

To flush idle barriers, and even inflight requests, we want to send a
preemptive 'pulse' along an engine. We use a no-op request along the
pinned kernel_context at high priority so that it should run or else
kick off the stuck requests. We can use this to ensure idle barriers are
immediately flushed, as part of a context cancellation mechanism, or as
part of a heartbeat mechanism to detect and reset a stuck GPU.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/Makefile                 |  1 +
 .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 56 +++++++++++++++++++
 .../gpu/drm/i915/gt/intel_engine_heartbeat.h  | 14 +++++
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |  2 +-
 drivers/gpu/drm/i915/i915_priolist_types.h    |  1 +
 5 files changed, 73 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cd9a10ba2516..cfab7c8585b3 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -78,6 +78,7 @@ gt-y += \
 	gt/intel_breadcrumbs.o \
 	gt/intel_context.o \
 	gt/intel_engine_cs.o \
+	gt/intel_engine_heartbeat.o \
 	gt/intel_engine_pm.o \
 	gt/intel_engine_pool.o \
 	gt/intel_engine_sysfs.o \
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
new file mode 100644
index 000000000000..2fc413f9d506
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -0,0 +1,56 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_request.h"
+
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
+#include "intel_engine_pm.h"
+#include "intel_engine.h"
+#include "intel_gt.h"
+
+static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
+{
+	engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
+	i915_request_add_active_barriers(rq);
+}
+
+int intel_engine_pulse(struct intel_engine_cs *engine)
+{
+	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
+	struct intel_context *ce = engine->kernel_context;
+	struct i915_request *rq;
+	int err = 0;
+
+	if (!intel_engine_has_preemption(engine))
+		return -ENODEV;
+
+	if (!intel_engine_pm_get_if_awake(engine))
+		return 0;
+
+	if (mutex_lock_interruptible(&ce->timeline->mutex))
+		goto out_rpm;
+
+	intel_context_enter(ce);
+	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+	intel_context_exit(ce);
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out_unlock;
+	}
+
+	rq->flags |= I915_REQUEST_SENTINEL;
+	idle_pulse(engine, rq);
+
+	__i915_request_commit(rq);
+	__i915_request_queue(rq, &attr);
+
+out_unlock:
+	mutex_unlock(&ce->timeline->mutex);
+out_rpm:
+	intel_engine_pm_put(engine);
+	return err;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
new file mode 100644
index 000000000000..b950451b5998
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_HEARTBEAT_H
+#define INTEL_ENGINE_HEARTBEAT_H
+
+struct intel_engine_cs;
+
+int intel_engine_pulse(struct intel_engine_cs *engine);
+
+#endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 67eb6183648a..7d76611d9df1 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -111,7 +111,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
 	i915_request_add_active_barriers(rq);
 
 	/* Install ourselves as a preemption barrier */
-	rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
+	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
 	__i915_request_commit(rq);
 
 	/* Release our exclusive hold on the engine */
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index 21037a2e2038..ae8bb3cb627e 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -39,6 +39,7 @@ enum {
  * active request.
  */
 #define I915_PRIORITY_UNPREEMPTABLE INT_MAX
+#define I915_PRIORITY_BARRIER INT_MAX
 
 #define __NO_PREEMPTION (I915_PRIORITY_WAIT)
 
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (4 preceding siblings ...)
  2019-10-10  7:14 ` [PATCH 06/10] drm/i915/gt: Introduce barrier pulses along engines Chris Wilson
@ 2019-10-10  7:14 ` Chris Wilson
  2019-10-11  9:47   ` Tvrtko Ursulin
  2019-10-11 11:16   ` [PATCH v2] " Chris Wilson
  2019-10-10  7:14 ` [PATCH 08/10] drm/i915: Cancel non-persistent contexts on close Chris Wilson
                   ` (8 subsequent siblings)
  14 siblings, 2 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

On completion of a banned context, scrub the context image so that we do
not replay the active payload. The intent is that we skip banned
payloads on request submission so that the timeline advancement
continues on in the background. However, if we are returning to a
preempted request, i915_request_skip() is ineffective and instead we
need to patch up the context image so that it continues from the start
of the next request.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c    |  58 ++++++
 drivers/gpu/drm/i915/gt/selftest_lrc.c | 273 +++++++++++++++++++++++++
 2 files changed, 331 insertions(+)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index eb99f1e804f7..79c7ebea2fcc 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -234,6 +234,9 @@ static void execlists_init_reg_state(u32 *reg_state,
 				     const struct intel_engine_cs *engine,
 				     const struct intel_ring *ring,
 				     bool close);
+static void
+__execlists_update_reg_state(const struct intel_context *ce,
+			     const struct intel_engine_cs *engine);
 
 static void __context_pin_acquire(struct intel_context *ce)
 {
@@ -1022,6 +1025,58 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
 		tasklet_schedule(&ve->base.execlists.tasklet);
 }
 
+static void
+mark_complete(struct i915_request *rq, struct intel_engine_cs *engine)
+{
+	const struct intel_timeline * const tl = rcu_dereference(rq->timeline);
+
+	*(u32 *)tl->hwsp_seqno = rq->fence.seqno;
+	GEM_BUG_ON(!i915_request_completed(rq));
+
+	list_for_each_entry_from_reverse(rq, &tl->requests, link) {
+		if (i915_request_signaled(rq))
+			break;
+
+		mark_eio(rq);
+	}
+
+	intel_engine_queue_breadcrumbs(engine);
+}
+
+static void cancel_active(struct i915_request *rq,
+			  struct intel_engine_cs *engine)
+{
+	struct intel_context * const ce = rq->hw_context;
+	u32 *regs = ce->lrc_reg_state;
+
+	if (i915_request_completed(rq))
+		return;
+
+	GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
+		  __func__, engine->name, rq->fence.context, rq->fence.seqno);
+	__context_pin_acquire(ce);
+
+	/* Scrub the context image to prevent replaying the previous batch */
+	memcpy(regs, /* skip restoring the vanilla PPHWSP */
+	       engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+	       engine->context_size - PAGE_SIZE);
+	execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+
+	/* Ring will be advanced on retire; here we need to reset the context */
+	ce->ring->head = intel_ring_wrap(ce->ring, rq->wa_tail);
+	__execlists_update_reg_state(ce, engine);
+
+	/* We've switched away, so this should be a no-op, but intent matters */
+	ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+
+	/* Let everyone know that the request may now be retired */
+	rcu_read_lock();
+	mark_complete(rq, engine);
+	rcu_read_unlock();
+
+	__context_pin_release(ce);
+}
+
 static inline void
 __execlists_schedule_out(struct i915_request *rq,
 			 struct intel_engine_cs * const engine)
@@ -1032,6 +1087,9 @@ __execlists_schedule_out(struct i915_request *rq,
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 	intel_gt_pm_put(engine->gt);
 
+	if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+		cancel_active(rq, engine);
+
 	/*
 	 * If this is part of a virtual engine, its next request may
 	 * have been blocked waiting for access to the active context.
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 198cf2f754f4..1703130ef0ef 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
 #include <linux/prime_numbers.h>
 
 #include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_reset.h"
 
 #include "i915_selftest.h"
@@ -986,6 +987,277 @@ static int live_nopreempt(void *arg)
 	goto err_client_b;
 }
 
+struct live_preempt_cancel {
+	struct intel_engine_cs *engine;
+	struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq;
+	struct igt_live_test t;
+	int err;
+
+	/* Preempt cancel of ELSP0 */
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+
+	if (igt_live_test_begin(&t, arg->engine->i915,
+				__func__, arg->engine->name))
+		return -EIO;
+
+	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+	rq = spinner_create_request(&arg->a.spin,
+				    arg->a.ctx, arg->engine,
+				    MI_ARB_CHECK);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+		err = -EIO;
+		goto out;
+	}
+
+	i915_gem_context_set_banned(arg->a.ctx);
+	err = intel_engine_pulse(arg->engine);
+	if (err)
+		goto out;
+
+	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	if (rq->fence.error != -EIO) {
+		pr_err("Cancelled inflight0 request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+out:
+	i915_request_put(rq);
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq[2] = {};
+	struct igt_live_test t;
+	int err;
+
+	/* Preempt cancel of ELSP1 */
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+
+	if (igt_live_test_begin(&t, arg->engine->i915,
+				__func__, arg->engine->name))
+		return -EIO;
+
+	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+	rq[0] = spinner_create_request(&arg->a.spin,
+				       arg->a.ctx, arg->engine,
+				       MI_NOOP); /* no preemption */
+	if (IS_ERR(rq[0]))
+		return PTR_ERR(rq[0]);
+
+	i915_request_get(rq[0]);
+	i915_request_add(rq[0]);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+		err = -EIO;
+		goto out;
+	}
+
+	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+	rq[1] = spinner_create_request(&arg->b.spin,
+				       arg->b.ctx, arg->engine,
+				       MI_ARB_CHECK);
+	if (IS_ERR(rq[1])) {
+		err = PTR_ERR(rq[1]);
+		goto out;
+	}
+
+	i915_request_get(rq[1]);
+	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+	i915_request_add(rq[1]);
+	if (err)
+		goto out;
+
+	i915_gem_context_set_banned(arg->b.ctx);
+	err = intel_engine_pulse(arg->engine);
+	if (err)
+		goto out;
+
+	igt_spinner_end(&arg->a.spin);
+	if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	if (rq[0]->fence.error != 0) {
+		pr_err("Normal inflight0 request did not complete\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (rq[1]->fence.error != -EIO) {
+		pr_err("Cancelled inflight1 request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+out:
+	i915_request_put(rq[1]);
+	i915_request_put(rq[0]);
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq[3] = {};
+	struct igt_live_test t;
+	int err;
+
+	/* Full ELSP and one in the wings */
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+
+	if (igt_live_test_begin(&t, arg->engine->i915,
+				__func__, arg->engine->name))
+		return -EIO;
+
+	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+	rq[0] = spinner_create_request(&arg->a.spin,
+				       arg->a.ctx, arg->engine,
+				       MI_ARB_CHECK);
+	if (IS_ERR(rq[0]))
+		return PTR_ERR(rq[0]);
+
+	i915_request_get(rq[0]);
+	i915_request_add(rq[0]);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+		err = -EIO;
+		goto out;
+	}
+
+	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+	rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+	if (IS_ERR(rq[1])) {
+		err = PTR_ERR(rq[1]);
+		goto out;
+	}
+
+	i915_request_get(rq[1]);
+	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+	i915_request_add(rq[1]);
+	if (err)
+		goto out;
+
+	rq[2] = spinner_create_request(&arg->b.spin,
+				       arg->a.ctx, arg->engine,
+				       MI_ARB_CHECK);
+	if (IS_ERR(rq[2])) {
+		err = PTR_ERR(rq[2]);
+		goto out;
+	}
+
+	i915_request_get(rq[2]);
+	err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+	i915_request_add(rq[2]);
+	if (err)
+		goto out;
+
+	i915_gem_context_set_banned(arg->a.ctx);
+	err = intel_engine_pulse(arg->engine);
+	if (err)
+		goto out;
+
+	if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	if (rq[0]->fence.error != -EIO) {
+		pr_err("Cancelled inflight0 request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (rq[1]->fence.error != 0) {
+		pr_err("Normal inflight1 request did not complete\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (rq[2]->fence.error != -EIO) {
+		pr_err("Cancelled queued request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+out:
+	i915_request_put(rq[2]);
+	i915_request_put(rq[1]);
+	i915_request_put(rq[0]);
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct live_preempt_cancel data;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	/*
+	 * To cancel an inflight context, we need to first remove it from the
+	 * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+	 */
+
+	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+		return 0;
+
+	if (preempt_client_init(i915, &data.a))
+		return -ENOMEM;
+	if (preempt_client_init(i915, &data.b))
+		goto err_client_a;
+
+	for_each_engine(data.engine, i915, id) {
+		if (!intel_engine_has_preemption(data.engine))
+			continue;
+
+		err = __cancel_active0(&data);
+		if (err)
+			goto err_wedged;
+
+		err = __cancel_active1(&data);
+		if (err)
+			goto err_wedged;
+
+		err = __cancel_queued(&data);
+		if (err)
+			goto err_wedged;
+	}
+
+	err = 0;
+err_client_b:
+	preempt_client_fini(&data.b);
+err_client_a:
+	preempt_client_fini(&data.a);
+	return err;
+
+err_wedged:
+	GEM_TRACE_DUMP();
+	igt_spinner_end(&data.b.spin);
+	igt_spinner_end(&data.a.spin);
+	intel_gt_set_wedged(&i915->gt);
+	goto err_client_b;
+}
+
 static int live_suppress_self_preempt(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -2270,6 +2542,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_preempt),
 		SUBTEST(live_late_preempt),
 		SUBTEST(live_nopreempt),
+		SUBTEST(live_preempt_cancel),
 		SUBTEST(live_suppress_self_preempt),
 		SUBTEST(live_suppress_wait_preempt),
 		SUBTEST(live_chain_preempt),
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH 08/10] drm/i915: Cancel non-persistent contexts on close
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (5 preceding siblings ...)
  2019-10-10  7:14 ` [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out Chris Wilson
@ 2019-10-10  7:14 ` Chris Wilson
  2019-10-11 13:55   ` Tvrtko Ursulin
  2019-10-10  7:14 ` [PATCH 09/10] drm/i915: Replace hangcheck by heartbeats Chris Wilson
                   ` (7 subsequent siblings)
  14 siblings, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

Normally, we rely on our hangcheck to prevent persistent batches from
hogging the GPU. However, if the user disables hangcheck, this mechanism
breaks down. Despite our insistence that this is unsafe, the users are
equally insistent that they want to use endless batches and will disable
the hangcheck mechanism. We are looking at perhaps replacing hangcheck
with a softer mechanism, that sends a pulse down the engine to check if
it is well. We can use the same preemptive pulse to flush an active
persistent context off the GPU upon context close, preventing resources
being lost and unkillable requests remaining on the GPU after process
termination. To avoid changing the ABI and accidentally breaking
existing userspace, we make the persistence of a context explicit and
enable it by default (matching current ABI). Userspace can opt out of
persistent mode (forcing requests to be cancelled when the context is
closed by process termination or explicitly) by a context parameter. To
facilitate existing use-cases of disabling hangcheck, if the modparam is
disabled (i915.enable_hangcheck=0), we disable persistence mode by
default.  (Note, one of the outcomes for supporting endless mode will be
the removal of hangchecking, at which point opting into persistent mode
will be mandatory, or maybe the default perhaps controlled by cgroups.)

v2: Check for hangchecking at context termination, so that we are not
left with undying contexts from a crafty user.

Testcase: igt/gem_ctx_persistence
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Michał Winiarski <michal.winiarski@intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_context.c   | 132 ++++++++++++++++++
 drivers/gpu/drm/i915/gem/i915_gem_context.h   |  15 ++
 .../gpu/drm/i915/gem/i915_gem_context_types.h |   1 +
 .../gpu/drm/i915/gem/selftests/mock_context.c |   2 +
 include/uapi/drm/i915_drm.h                   |  15 ++
 5 files changed, 165 insertions(+)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index 5d8221c7ba83..46e5b3b53288 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -70,6 +70,7 @@
 #include <drm/i915_drm.h>
 
 #include "gt/intel_lrc_reg.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_engine_user.h"
 
 #include "i915_gem_context.h"
@@ -269,6 +270,78 @@ void i915_gem_context_release(struct kref *ref)
 		schedule_work(&gc->free_work);
 }
 
+static inline struct i915_gem_engines *
+__context_engines_static(struct i915_gem_context *ctx)
+{
+	return rcu_dereference_protected(ctx->engines, true);
+}
+
+static void kill_context(struct i915_gem_context *ctx)
+{
+	intel_engine_mask_t tmp, active, reset;
+	struct intel_gt *gt = &ctx->i915->gt;
+	struct i915_gem_engines_iter it;
+	struct intel_engine_cs *engine;
+	struct intel_context *ce;
+
+	/*
+	 * If we are already banned, it was due to a guilty request causing
+	 * a reset and the entire context being evicted from the GPU.
+	 */
+	if (i915_gem_context_is_banned(ctx))
+		return;
+
+	i915_gem_context_set_banned(ctx);
+
+	/*
+	 * Map the user's engine back to the actual engines; one virtual
+	 * engine will be mapped to multiple engines, and using ctx->engine[]
+	 * the same engine may be have multiple instances in the user's map.
+	 * However, we only care about pending requests, so only include
+	 * engines on which there are incomplete requests.
+	 */
+	active = 0;
+	for_each_gem_engine(ce, __context_engines_static(ctx), it) {
+		struct dma_fence *fence;
+
+		if (!ce->timeline)
+			continue;
+
+		fence = i915_active_fence_get(&ce->timeline->last_request);
+		if (!fence)
+			continue;
+
+		engine = to_request(fence)->engine;
+		if (HAS_EXECLISTS(gt->i915))
+			engine = intel_context_inflight(ce);
+		if (engine)
+			active |= engine->mask;
+
+		dma_fence_put(fence);
+	}
+
+	/*
+	 * Send a "high priority pulse" down the engine to cause the
+	 * current request to be momentarily preempted. (If it fails to
+	 * be preempted, it will be reset). As we have marked our context
+	 * as banned, any incomplete request, including any running, will
+	 * be skipped following the preemption.
+	 */
+	reset = 0;
+	for_each_engine_masked(engine, gt->i915, active, tmp)
+		if (intel_engine_pulse(engine))
+			reset |= engine->mask;
+
+	/*
+	 * If we are unable to send a preemptive pulse to bump
+	 * the context from the GPU, we have to resort to a full
+	 * reset. We hope the collateral damage is worth it.
+	 */
+	if (reset)
+		intel_gt_handle_error(gt, reset, 0,
+				      "context closure in %s", ctx->name);
+}
+
 static void context_close(struct i915_gem_context *ctx)
 {
 	struct i915_address_space *vm;
@@ -291,9 +364,47 @@ static void context_close(struct i915_gem_context *ctx)
 	lut_close(ctx);
 
 	mutex_unlock(&ctx->mutex);
+
+	/*
+	 * If the user has disabled hangchecking, we can not be sure that
+	 * the batches will ever complete after the context is closed,
+	 * keep the context and all resources pinned forever. So in this
+	 * case we opt to forcibly kill off all remaining requests on
+	 * context close.
+	 */
+	if (!i915_gem_context_is_persistent(ctx) ||
+	    !i915_modparams.enable_hangcheck)
+		kill_context(ctx);
+
 	i915_gem_context_put(ctx);
 }
 
+static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
+{
+	if (i915_gem_context_is_persistent(ctx) == state)
+		return 0;
+
+	if (state) {
+		/*
+		 * Only contexts that are short-lived [that will expire or be
+		 * reset] are allowed to survive past termination. We require
+		 * hangcheck to ensure that the persistent requests are healthy.
+		 */
+		if (!i915_modparams.enable_hangcheck)
+			return -EINVAL;
+
+		i915_gem_context_set_persistence(ctx);
+	} else {
+		/* To cancel a context we use "preempt-to-idle" */
+		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
+			return -ENODEV;
+
+		i915_gem_context_clear_persistence(ctx);
+	}
+
+	return 0;
+}
+
 static struct i915_gem_context *
 __create_context(struct drm_i915_private *i915)
 {
@@ -328,6 +439,7 @@ __create_context(struct drm_i915_private *i915)
 
 	i915_gem_context_set_bannable(ctx);
 	i915_gem_context_set_recoverable(ctx);
+	__context_set_persistence(ctx, true /* cgroup hook? */);
 
 	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
 		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
@@ -484,6 +596,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
 		return ctx;
 
 	i915_gem_context_clear_bannable(ctx);
+	i915_gem_context_set_persistence(ctx);
 	ctx->sched.priority = I915_USER_PRIORITY(prio);
 
 	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
@@ -1594,6 +1707,16 @@ get_engines(struct i915_gem_context *ctx,
 	return err;
 }
 
+static int
+set_persistence(struct i915_gem_context *ctx,
+		const struct drm_i915_gem_context_param *args)
+{
+	if (args->size)
+		return -EINVAL;
+
+	return __context_set_persistence(ctx, args->value);
+}
+
 static int ctx_setparam(struct drm_i915_file_private *fpriv,
 			struct i915_gem_context *ctx,
 			struct drm_i915_gem_context_param *args)
@@ -1671,6 +1794,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
 		ret = set_engines(ctx, args);
 		break;
 
+	case I915_CONTEXT_PARAM_PERSISTENCE:
+		ret = set_persistence(ctx, args);
+		break;
+
 	case I915_CONTEXT_PARAM_BAN_PERIOD:
 	default:
 		ret = -EINVAL;
@@ -2123,6 +2250,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
 		ret = get_engines(ctx, args);
 		break;
 
+	case I915_CONTEXT_PARAM_PERSISTENCE:
+		args->size = 0;
+		args->value = i915_gem_context_is_persistent(ctx);
+		break;
+
 	case I915_CONTEXT_PARAM_BAN_PERIOD:
 	default:
 		ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 9234586830d1..2eec035382a2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -76,6 +76,21 @@ static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *c
 	clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
 }
 
+static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
+{
+	return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
+{
+	set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
+static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
+{
+	clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
+}
+
 static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
 {
 	return test_bit(CONTEXT_BANNED, &ctx->flags);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index ab8e1367dfc8..a3ecd19f2303 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -137,6 +137,7 @@ struct i915_gem_context {
 #define UCONTEXT_NO_ERROR_CAPTURE	1
 #define UCONTEXT_BANNABLE		2
 #define UCONTEXT_RECOVERABLE		3
+#define UCONTEXT_PERSISTENCE		4
 
 	/**
 	 * @flags: small set of booleans
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 74ddd682c9cd..29b8984f0e47 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -22,6 +22,8 @@ mock_context(struct drm_i915_private *i915,
 	INIT_LIST_HEAD(&ctx->link);
 	ctx->i915 = i915;
 
+	i915_gem_context_set_persistence(ctx);
+
 	mutex_init(&ctx->engines_mutex);
 	e = default_engines(ctx);
 	if (IS_ERR(e))
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 30c542144016..eb9e704d717a 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1565,6 +1565,21 @@ struct drm_i915_gem_context_param {
  *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
  */
 #define I915_CONTEXT_PARAM_ENGINES	0xa
+
+/*
+ * I915_CONTEXT_PARAM_PERSISTENCE:
+ *
+ * Allow the context and active rendering to survive the process until
+ * completion. Persistence allows fire-and-forget clients to queue up a
+ * bunch of work, hand the output over to a display server and the quit.
+ * If the context is not marked as persistent, upon closing (either via
+ * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
+ * or process termination), the context and any outstanding requests will be
+ * cancelled (and exported fences for cancelled requests marked as -EIO).
+ *
+ * By default, new contexts allow persistence.
+ */
+#define I915_CONTEXT_PARAM_PERSISTENCE	0xb
 /* Must be kept compact -- no holes and well documented */
 
 	__u64 value;
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH 09/10] drm/i915: Replace hangcheck by heartbeats
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (6 preceding siblings ...)
  2019-10-10  7:14 ` [PATCH 08/10] drm/i915: Cancel non-persistent contexts on close Chris Wilson
@ 2019-10-10  7:14 ` Chris Wilson
  2019-10-11 14:24   ` Tvrtko Ursulin
  2019-10-10  7:14 ` [PATCH 10/10] drm/i915: Flush idle barriers when waiting Chris Wilson
                   ` (6 subsequent siblings)
  14 siblings, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

Replace sampling the engine state every so often with a periodic
heartbeat request to measure the health of an engine. This is coupled
with the forced-preemption to allow long running requests to survive so
long as they do not block other users.

The heartbeat interval can be adjusted per-engine using,

	/sys/class/drm/card?/engine/*/heartbeat_interval_ms

v2: Couple in sysfs controls

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
---
 drivers/gpu/drm/i915/Kconfig.profile          |  14 +
 drivers/gpu/drm/i915/Makefile                 |   1 -
 drivers/gpu/drm/i915/display/intel_display.c  |   2 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.h    |   1 -
 drivers/gpu/drm/i915/gem/i915_gem_pm.c        |   2 -
 drivers/gpu/drm/i915/gt/intel_engine.h        |  32 --
 drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  11 +-
 .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 115 ++++++
 .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |   5 +
 drivers/gpu/drm/i915/gt/intel_engine_pm.c     |   5 +-
 drivers/gpu/drm/i915/gt/intel_engine_sysfs.c  |  29 ++
 drivers/gpu/drm/i915/gt/intel_engine_types.h  |  17 +-
 drivers/gpu/drm/i915/gt/intel_gt.c            |   1 -
 drivers/gpu/drm/i915/gt/intel_gt.h            |   4 -
 drivers/gpu/drm/i915/gt/intel_gt_pm.c         |   1 -
 drivers/gpu/drm/i915/gt/intel_gt_types.h      |   9 -
 drivers/gpu/drm/i915/gt/intel_hangcheck.c     | 361 ------------------
 drivers/gpu/drm/i915/gt/intel_reset.c         |   3 +-
 drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |   4 -
 drivers/gpu/drm/i915/i915_debugfs.c           |  87 -----
 drivers/gpu/drm/i915/i915_drv.c               |   3 -
 drivers/gpu/drm/i915/i915_drv.h               |   1 -
 drivers/gpu/drm/i915/i915_gpu_error.c         |  33 +-
 drivers/gpu/drm/i915/i915_gpu_error.h         |   2 -
 drivers/gpu/drm/i915/i915_priolist_types.h    |   6 +
 25 files changed, 194 insertions(+), 555 deletions(-)
 delete mode 100644 drivers/gpu/drm/i915/gt/intel_hangcheck.c

diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index 8fceea85937b..d3950aabb497 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -40,3 +40,17 @@ config DRM_I915_PREEMPT_TIMEOUT
 	  /sys/class/drm/card?/engine/*/preempt_timeout_ms
 
 	  May be 0 to disable the timeout.
+
+config DRM_I915_HEARTBEAT_INTERVAL
+	int "Interval between heartbeat pulses (ms)"
+	default 2500 # milliseconds
+	help
+	  While active the driver uses a periodic request, a heartbeat, to
+	  check the wellness of the GPU and to regularly flush state changes
+	  (idle barriers).
+
+	  This is adjustable via
+	  /sys/class/drm/card?/engine/*/heartbeat_interval_ms
+
+	  May be 0 to disable heartbeats and therefore disable automatic GPU
+	  hang detection.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index cfab7c8585b3..59d356cc406c 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -88,7 +88,6 @@ gt-y += \
 	gt/intel_gt_pm.o \
 	gt/intel_gt_pm_irq.o \
 	gt/intel_gt_requests.o \
-	gt/intel_hangcheck.o \
 	gt/intel_lrc.o \
 	gt/intel_rc6.o \
 	gt/intel_renderstate.o \
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 1a533ccdb54f..5e5de3081f48 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -14338,7 +14338,7 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
 static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
 {
 	struct i915_sched_attr attr = {
-		.priority = I915_PRIORITY_DISPLAY,
+		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
 	};
 
 	i915_gem_object_wait_priority(obj, 0, &attr);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index c5e14c9c805c..5bd51e397371 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -460,6 +460,5 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
 int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
 				  unsigned int flags,
 				  const struct i915_sched_attr *attr);
-#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
 
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index 7987b54fb1f5..0e97520cb1bb 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -100,8 +100,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
 	intel_gt_suspend(&i915->gt);
 	intel_uc_suspend(&i915->gt.uc);
 
-	cancel_delayed_work_sync(&i915->gt.hangcheck.work);
-
 	i915_gem_drain_freed_objects(i915);
 }
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 93ea367fe624..8ad57eace351 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -89,38 +89,6 @@ struct drm_printer;
 /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
  * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
  */
-enum intel_engine_hangcheck_action {
-	ENGINE_IDLE = 0,
-	ENGINE_WAIT,
-	ENGINE_ACTIVE_SEQNO,
-	ENGINE_ACTIVE_HEAD,
-	ENGINE_ACTIVE_SUBUNITS,
-	ENGINE_WAIT_KICK,
-	ENGINE_DEAD,
-};
-
-static inline const char *
-hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
-{
-	switch (a) {
-	case ENGINE_IDLE:
-		return "idle";
-	case ENGINE_WAIT:
-		return "wait";
-	case ENGINE_ACTIVE_SEQNO:
-		return "active seqno";
-	case ENGINE_ACTIVE_HEAD:
-		return "active head";
-	case ENGINE_ACTIVE_SUBUNITS:
-		return "active subunits";
-	case ENGINE_WAIT_KICK:
-		return "wait kick";
-	case ENGINE_DEAD:
-		return "dead";
-	}
-
-	return "unknown";
-}
 
 static inline unsigned int
 execlists_num_ports(const struct intel_engine_execlists * const execlists)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index 1eb51147839a..d829ad340ca0 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -305,6 +305,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
 	__sprint_engine_name(engine);
 
 	engine->props.preempt_timeout = CONFIG_DRM_I915_PREEMPT_TIMEOUT;
+	engine->props.heartbeat_interval = CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
 
 	/*
 	 * To be overridden by the backend on setup. However to facilitate
@@ -599,7 +600,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
 	intel_engine_init_active(engine, ENGINE_PHYSICAL);
 	intel_engine_init_breadcrumbs(engine);
 	intel_engine_init_execlists(engine);
-	intel_engine_init_hangcheck(engine);
 	intel_engine_init_cmd_parser(engine);
 	intel_engine_init__pm(engine);
 
@@ -1432,8 +1432,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
 		drm_printf(m, "*** WEDGED ***\n");
 
 	drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
-	drm_printf(m, "\tHangcheck: %d ms ago\n",
-		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
+
+	rcu_read_lock();
+	rq = READ_ONCE(engine->heartbeat.systole);
+	if (rq)
+		drm_printf(m, "\tHeartbeat: %d ms ago\n",
+			   jiffies_to_msecs(jiffies - rq->emitted_jiffies));
+	rcu_read_unlock();
 	drm_printf(m, "\tReset count: %d (global %d)\n",
 		   i915_reset_engine_count(error, engine),
 		   i915_reset_count(error));
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 2fc413f9d506..f68acf9118f3 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -11,6 +11,27 @@
 #include "intel_engine_pm.h"
 #include "intel_engine.h"
 #include "intel_gt.h"
+#include "intel_reset.h"
+
+/*
+ * While the engine is active, we send a periodic pulse along the engine
+ * to check on its health and to flush any idle-barriers. If that request
+ * is stuck, and we fail to preempt it, we declare the engine hung and
+ * issue a reset -- in the hope that restores progress.
+ */
+
+static void next_heartbeat(struct intel_engine_cs *engine)
+{
+	long delay;
+
+	delay = READ_ONCE(engine->props.heartbeat_interval);
+	if (!delay)
+		return;
+
+	delay = msecs_to_jiffies_timeout(delay);
+	schedule_delayed_work(&engine->heartbeat.work,
+			      round_jiffies_up_relative(delay));
+}
 
 static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
 {
@@ -18,6 +39,100 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
 	i915_request_add_active_barriers(rq);
 }
 
+static void heartbeat(struct work_struct *wrk)
+{
+	struct i915_sched_attr attr = {
+		.priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
+	};
+	struct intel_engine_cs *engine =
+		container_of(wrk, typeof(*engine), heartbeat.work.work);
+	struct intel_context *ce = engine->kernel_context;
+	struct i915_request *rq;
+
+	if (!intel_engine_pm_get_if_awake(engine))
+		return;
+
+	rq = engine->heartbeat.systole;
+	if (rq && i915_request_completed(rq)) {
+		i915_request_put(rq);
+		engine->heartbeat.systole = NULL;
+	}
+
+	if (intel_gt_is_wedged(engine->gt))
+		goto out;
+
+	if (engine->heartbeat.systole) {
+		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
+			struct drm_printer p = drm_debug_printer(__func__);
+
+			intel_engine_dump(engine, &p,
+					  "%s heartbeat not ticking\n",
+					  engine->name);
+		}
+
+		if (engine->schedule &&
+		    rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
+			attr.priority =
+				I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
+			if (rq->sched.attr.priority >= attr.priority)
+				attr.priority = I915_PRIORITY_BARRIER;
+
+			local_bh_disable();
+			engine->schedule(rq, &attr);
+			local_bh_enable();
+		} else {
+			intel_gt_handle_error(engine->gt, engine->mask,
+					      I915_ERROR_CAPTURE,
+					      "stopped heartbeat on %s",
+					      engine->name);
+		}
+		goto out;
+	}
+
+	if (engine->wakeref_serial == engine->serial)
+		goto out;
+
+	mutex_lock(&ce->timeline->mutex);
+
+	intel_context_enter(ce);
+	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
+	intel_context_exit(ce);
+	if (IS_ERR(rq))
+		goto unlock;
+
+	idle_pulse(engine, rq);
+	if (i915_modparams.enable_hangcheck)
+		engine->heartbeat.systole = i915_request_get(rq);
+
+	__i915_request_commit(rq);
+	__i915_request_queue(rq, &attr);
+
+unlock:
+	mutex_unlock(&ce->timeline->mutex);
+out:
+	next_heartbeat(engine);
+	intel_engine_pm_put(engine);
+}
+
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
+{
+	if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
+		return;
+
+	next_heartbeat(engine);
+}
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
+{
+	cancel_delayed_work(&engine->heartbeat.work);
+	i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
+}
+
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
+{
+	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
+}
+
 int intel_engine_pulse(struct intel_engine_cs *engine)
 {
 	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
index b950451b5998..39391004554d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -9,6 +9,11 @@
 
 struct intel_engine_cs;
 
+void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
+
+void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
+void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
+
 int intel_engine_pulse(struct intel_engine_cs *engine);
 
 #endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index 7d76611d9df1..6fbfa2162e54 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 
 #include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
 #include "intel_engine_pm.h"
 #include "intel_engine_pool.h"
 #include "intel_gt.h"
@@ -34,7 +35,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
 	if (engine->unpark)
 		engine->unpark(engine);
 
-	intel_engine_init_hangcheck(engine);
+	intel_engine_unpark_heartbeat(engine);
 	return 0;
 }
 
@@ -158,6 +159,7 @@ static int __engine_park(struct intel_wakeref *wf)
 
 	call_idle_barriers(engine); /* cleanup after wedging */
 
+	intel_engine_park_heartbeat(engine);
 	intel_engine_disarm_breadcrumbs(engine);
 	intel_engine_pool_park(&engine->pool);
 
@@ -188,6 +190,7 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
 	struct intel_runtime_pm *rpm = engine->uncore->rpm;
 
 	intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
+	intel_engine_init_heartbeat(engine);
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
index aac26097c916..8532f9cdc885 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
@@ -70,12 +70,38 @@ preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
 	return count;
 }
 
+static ssize_t
+heartbeat_interval_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+	return sprintf(buf, "%lu\n", engine->props.heartbeat_interval);
+}
+
+static ssize_t
+heartbeat_interval_store(struct kobject *kobj, struct kobj_attribute *attr,
+			 const char *buf, size_t count)
+{
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+	unsigned long delay;
+	int err;
+
+	err = kstrtoul(buf, 0, &delay);
+	if (err)
+		return err;
+
+	engine->props.heartbeat_interval = delay;
+	return count;
+}
+
 static struct kobj_attribute name_attr = __ATTR(name, 0444, name_show, NULL);
 static struct kobj_attribute class_attr = __ATTR(class, 0444, class_show, NULL);
 static struct kobj_attribute inst_attr = __ATTR(instance, 0444, inst_show, NULL);
 static struct kobj_attribute mmio_attr = __ATTR(mmio_base, 0444, mmio_show, NULL);
 static struct kobj_attribute preempt_timeout_attr =
 __ATTR(preempt_timeout_ms, 0600, preempt_timeout_show, preempt_timeout_store);
+static struct kobj_attribute heartbeat_interval_attr =
+__ATTR(heartbeat_interval_ms, 0600, heartbeat_interval_show, heartbeat_interval_store);
 
 static void kobj_engine_release(struct kobject *kobj)
 {
@@ -115,6 +141,9 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
 		&class_attr.attr,
 		&inst_attr.attr,
 		&mmio_attr.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+		&heartbeat_interval_attr.attr,
+#endif
 		NULL
 	};
 
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 6af9b0096975..ad3be2fbd71a 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -15,6 +15,7 @@
 #include <linux/rbtree.h>
 #include <linux/timer.h>
 #include <linux/types.h>
+#include <linux/workqueue.h>
 
 #include "i915_gem.h"
 #include "i915_pmu.h"
@@ -76,14 +77,6 @@ struct intel_instdone {
 	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
 };
 
-struct intel_engine_hangcheck {
-	u64 acthd;
-	u32 last_ring;
-	u32 last_head;
-	unsigned long action_timestamp;
-	struct intel_instdone instdone;
-};
-
 struct intel_ring {
 	struct kref ref;
 	struct i915_vma *vma;
@@ -330,6 +323,11 @@ struct intel_engine_cs {
 
 	intel_engine_mask_t saturated; /* submitting semaphores too late? */
 
+	struct {
+		struct delayed_work work;
+		struct i915_request *systole;
+	} heartbeat;
+
 	unsigned long serial;
 
 	unsigned long wakeref_serial;
@@ -480,8 +478,6 @@ struct intel_engine_cs {
 	/* status_notifier: list of callbacks for context-switch changes */
 	struct atomic_notifier_head context_status_notifier;
 
-	struct intel_engine_hangcheck hangcheck;
-
 #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
 #define I915_ENGINE_SUPPORTS_STATS   BIT(1)
 #define I915_ENGINE_HAS_PREEMPTION   BIT(2)
@@ -549,6 +545,7 @@ struct intel_engine_cs {
 
 	struct {
 		unsigned long preempt_timeout;
+		unsigned long heartbeat_interval;
 	} props;
 };
 
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index b3619a2a5d0e..f3e1925987e1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -22,7 +22,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
 	INIT_LIST_HEAD(&gt->closed_vma);
 	spin_lock_init(&gt->closed_lock);
 
-	intel_gt_init_hangcheck(gt);
 	intel_gt_init_reset(gt);
 	intel_gt_init_requests(gt);
 	intel_gt_pm_init_early(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index e6ab0bff0efb..5b6effed3713 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -46,8 +46,6 @@ void intel_gt_clear_error_registers(struct intel_gt *gt,
 void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
 void intel_gt_chipset_flush(struct intel_gt *gt);
 
-void intel_gt_init_hangcheck(struct intel_gt *gt);
-
 static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
 					  enum intel_gt_scratch_field field)
 {
@@ -59,6 +57,4 @@ static inline bool intel_gt_is_wedged(struct intel_gt *gt)
 	return __intel_reset_failed(&gt->reset);
 }
 
-void intel_gt_queue_hangcheck(struct intel_gt *gt);
-
 #endif /* __INTEL_GT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index 87e34e0b6427..85af0d16f869 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -52,7 +52,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
 
 	i915_pmu_gt_unparked(i915);
 
-	intel_gt_queue_hangcheck(gt);
 	intel_gt_unpark_requests(gt);
 
 	pm_notify(gt, INTEL_GT_UNPARK);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
index 802f516a3430..59f8ee0aa151 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
@@ -26,14 +26,6 @@ struct i915_ggtt;
 struct intel_engine_cs;
 struct intel_uncore;
 
-struct intel_hangcheck {
-	/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
-#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
-
-	struct delayed_work work;
-};
-
 struct intel_gt {
 	struct drm_i915_private *i915;
 	struct intel_uncore *uncore;
@@ -67,7 +59,6 @@ struct intel_gt {
 	struct list_head closed_vma;
 	spinlock_t closed_lock; /* guards the list of closed_vma */
 
-	struct intel_hangcheck hangcheck;
 	struct intel_reset reset;
 
 	/**
diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
deleted file mode 100644
index c14dbeb3ccc3..000000000000
--- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
+++ /dev/null
@@ -1,361 +0,0 @@
-/*
- * Copyright © 2016 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- */
-
-#include "i915_drv.h"
-#include "intel_engine.h"
-#include "intel_gt.h"
-#include "intel_reset.h"
-
-struct hangcheck {
-	u64 acthd;
-	u32 ring;
-	u32 head;
-	enum intel_engine_hangcheck_action action;
-	unsigned long action_timestamp;
-	int deadlock;
-	struct intel_instdone instdone;
-	bool wedged:1;
-	bool stalled:1;
-};
-
-static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
-{
-	u32 tmp = current_instdone | *old_instdone;
-	bool unchanged;
-
-	unchanged = tmp == *old_instdone;
-	*old_instdone |= tmp;
-
-	return unchanged;
-}
-
-static bool subunits_stuck(struct intel_engine_cs *engine)
-{
-	struct drm_i915_private *dev_priv = engine->i915;
-	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-	struct intel_instdone instdone;
-	struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
-	bool stuck;
-	int slice;
-	int subslice;
-
-	intel_engine_get_instdone(engine, &instdone);
-
-	/* There might be unstable subunit states even when
-	 * actual head is not moving. Filter out the unstable ones by
-	 * accumulating the undone -> done transitions and only
-	 * consider those as progress.
-	 */
-	stuck = instdone_unchanged(instdone.instdone,
-				   &accu_instdone->instdone);
-	stuck &= instdone_unchanged(instdone.slice_common,
-				    &accu_instdone->slice_common);
-
-	for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) {
-		stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
-					    &accu_instdone->sampler[slice][subslice]);
-		stuck &= instdone_unchanged(instdone.row[slice][subslice],
-					    &accu_instdone->row[slice][subslice]);
-	}
-
-	return stuck;
-}
-
-static enum intel_engine_hangcheck_action
-head_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
-	if (acthd != engine->hangcheck.acthd) {
-
-		/* Clear subunit states on head movement */
-		memset(&engine->hangcheck.instdone, 0,
-		       sizeof(engine->hangcheck.instdone));
-
-		return ENGINE_ACTIVE_HEAD;
-	}
-
-	if (!subunits_stuck(engine))
-		return ENGINE_ACTIVE_SUBUNITS;
-
-	return ENGINE_DEAD;
-}
-
-static enum intel_engine_hangcheck_action
-engine_stuck(struct intel_engine_cs *engine, u64 acthd)
-{
-	enum intel_engine_hangcheck_action ha;
-	u32 tmp;
-
-	ha = head_stuck(engine, acthd);
-	if (ha != ENGINE_DEAD)
-		return ha;
-
-	if (IS_GEN(engine->i915, 2))
-		return ENGINE_DEAD;
-
-	/* Is the chip hanging on a WAIT_FOR_EVENT?
-	 * If so we can simply poke the RB_WAIT bit
-	 * and break the hang. This should work on
-	 * all but the second generation chipsets.
-	 */
-	tmp = ENGINE_READ(engine, RING_CTL);
-	if (tmp & RING_WAIT) {
-		intel_gt_handle_error(engine->gt, engine->mask, 0,
-				      "stuck wait on %s", engine->name);
-		ENGINE_WRITE(engine, RING_CTL, tmp);
-		return ENGINE_WAIT_KICK;
-	}
-
-	return ENGINE_DEAD;
-}
-
-static void hangcheck_load_sample(struct intel_engine_cs *engine,
-				  struct hangcheck *hc)
-{
-	hc->acthd = intel_engine_get_active_head(engine);
-	hc->ring = ENGINE_READ(engine, RING_START);
-	hc->head = ENGINE_READ(engine, RING_HEAD);
-}
-
-static void hangcheck_store_sample(struct intel_engine_cs *engine,
-				   const struct hangcheck *hc)
-{
-	engine->hangcheck.acthd = hc->acthd;
-	engine->hangcheck.last_ring = hc->ring;
-	engine->hangcheck.last_head = hc->head;
-}
-
-static enum intel_engine_hangcheck_action
-hangcheck_get_action(struct intel_engine_cs *engine,
-		     const struct hangcheck *hc)
-{
-	if (intel_engine_is_idle(engine))
-		return ENGINE_IDLE;
-
-	if (engine->hangcheck.last_ring != hc->ring)
-		return ENGINE_ACTIVE_SEQNO;
-
-	if (engine->hangcheck.last_head != hc->head)
-		return ENGINE_ACTIVE_SEQNO;
-
-	return engine_stuck(engine, hc->acthd);
-}
-
-static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
-					struct hangcheck *hc)
-{
-	unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
-
-	hc->action = hangcheck_get_action(engine, hc);
-
-	/* We always increment the progress
-	 * if the engine is busy and still processing
-	 * the same request, so that no single request
-	 * can run indefinitely (such as a chain of
-	 * batches). The only time we do not increment
-	 * the hangcheck score on this ring, if this
-	 * engine is in a legitimate wait for another
-	 * engine. In that case the waiting engine is a
-	 * victim and we want to be sure we catch the
-	 * right culprit. Then every time we do kick
-	 * the ring, make it as a progress as the seqno
-	 * advancement might ensure and if not, it
-	 * will catch the hanging engine.
-	 */
-
-	switch (hc->action) {
-	case ENGINE_IDLE:
-	case ENGINE_ACTIVE_SEQNO:
-		/* Clear head and subunit states on seqno movement */
-		hc->acthd = 0;
-
-		memset(&engine->hangcheck.instdone, 0,
-		       sizeof(engine->hangcheck.instdone));
-
-		/* Intentional fall through */
-	case ENGINE_WAIT_KICK:
-	case ENGINE_WAIT:
-		engine->hangcheck.action_timestamp = jiffies;
-		break;
-
-	case ENGINE_ACTIVE_HEAD:
-	case ENGINE_ACTIVE_SUBUNITS:
-		/*
-		 * Seqno stuck with still active engine gets leeway,
-		 * in hopes that it is just a long shader.
-		 */
-		timeout = I915_SEQNO_DEAD_TIMEOUT;
-		break;
-
-	case ENGINE_DEAD:
-		break;
-
-	default:
-		MISSING_CASE(hc->action);
-	}
-
-	hc->stalled = time_after(jiffies,
-				 engine->hangcheck.action_timestamp + timeout);
-	hc->wedged = time_after(jiffies,
-				 engine->hangcheck.action_timestamp +
-				 I915_ENGINE_WEDGED_TIMEOUT);
-}
-
-static void hangcheck_declare_hang(struct intel_gt *gt,
-				   intel_engine_mask_t hung,
-				   intel_engine_mask_t stuck)
-{
-	struct intel_engine_cs *engine;
-	intel_engine_mask_t tmp;
-	char msg[80];
-	int len;
-
-	/* If some rings hung but others were still busy, only
-	 * blame the hanging rings in the synopsis.
-	 */
-	if (stuck != hung)
-		hung &= ~stuck;
-	len = scnprintf(msg, sizeof(msg),
-			"%s on ", stuck == hung ? "no progress" : "hang");
-	for_each_engine_masked(engine, gt->i915, hung, tmp)
-		len += scnprintf(msg + len, sizeof(msg) - len,
-				 "%s, ", engine->name);
-	msg[len-2] = '\0';
-
-	return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg);
-}
-
-/*
- * This is called when the chip hasn't reported back with completed
- * batchbuffers in a long time. We keep track per ring seqno progress and
- * if there are no progress, hangcheck score for that ring is increased.
- * Further, acthd is inspected to see if the ring is stuck. On stuck case
- * we kick the ring. If we see no progress on three subsequent calls
- * we assume chip is wedged and try to fix it by resetting the chip.
- */
-static void hangcheck_elapsed(struct work_struct *work)
-{
-	struct intel_gt *gt =
-		container_of(work, typeof(*gt), hangcheck.work.work);
-	intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
-	struct intel_engine_cs *engine;
-	enum intel_engine_id id;
-	intel_wakeref_t wakeref;
-
-	if (!i915_modparams.enable_hangcheck)
-		return;
-
-	if (!READ_ONCE(gt->awake))
-		return;
-
-	if (intel_gt_is_wedged(gt))
-		return;
-
-	wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
-	if (!wakeref)
-		return;
-
-	/* As enabling the GPU requires fairly extensive mmio access,
-	 * periodically arm the mmio checker to see if we are triggering
-	 * any invalid access.
-	 */
-	intel_uncore_arm_unclaimed_mmio_detection(gt->uncore);
-
-	for_each_engine(engine, gt->i915, id) {
-		struct hangcheck hc;
-
-		intel_engine_breadcrumbs_irq(engine);
-
-		hangcheck_load_sample(engine, &hc);
-		hangcheck_accumulate_sample(engine, &hc);
-		hangcheck_store_sample(engine, &hc);
-
-		if (hc.stalled) {
-			hung |= engine->mask;
-			if (hc.action != ENGINE_DEAD)
-				stuck |= engine->mask;
-		}
-
-		if (hc.wedged)
-			wedged |= engine->mask;
-	}
-
-	if (GEM_SHOW_DEBUG() && (hung | stuck)) {
-		struct drm_printer p = drm_debug_printer("hangcheck");
-
-		for_each_engine(engine, gt->i915, id) {
-			if (intel_engine_is_idle(engine))
-				continue;
-
-			intel_engine_dump(engine, &p, "%s\n", engine->name);
-		}
-	}
-
-	if (wedged) {
-		dev_err(gt->i915->drm.dev,
-			"GPU recovery timed out,"
-			" cancelling all in-flight rendering.\n");
-		GEM_TRACE_DUMP();
-		intel_gt_set_wedged(gt);
-	}
-
-	if (hung)
-		hangcheck_declare_hang(gt, hung, stuck);
-
-	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
-
-	/* Reset timer in case GPU hangs without another request being added */
-	intel_gt_queue_hangcheck(gt);
-}
-
-void intel_gt_queue_hangcheck(struct intel_gt *gt)
-{
-	unsigned long delay;
-
-	if (unlikely(!i915_modparams.enable_hangcheck))
-		return;
-
-	/*
-	 * Don't continually defer the hangcheck so that it is always run at
-	 * least once after work has been scheduled on any ring. Otherwise,
-	 * we will ignore a hung ring if a second ring is kept busy.
-	 */
-
-	delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
-	queue_delayed_work(system_long_wq, &gt->hangcheck.work, delay);
-}
-
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
-{
-	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
-	engine->hangcheck.action_timestamp = jiffies;
-}
-
-void intel_gt_init_hangcheck(struct intel_gt *gt)
-{
-	INIT_DELAYED_WORK(&gt->hangcheck.work, hangcheck_elapsed);
-}
-
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-#include "selftest_hangcheck.c"
-#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 34791fc79dea..9ed2cf91a46d 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -1018,8 +1018,6 @@ void intel_gt_reset(struct intel_gt *gt,
 	if (ret)
 		goto taint;
 
-	intel_gt_queue_hangcheck(gt);
-
 finish:
 	reset_finish(gt, awake);
 unlock:
@@ -1347,4 +1345,5 @@ void __intel_fini_wedge(struct intel_wedge_me *w)
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftest_reset.c"
+#include "selftest_hangcheck.c"
 #endif
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 569a4105d49e..570546eda5e8 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -1686,7 +1686,6 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 	};
 	struct intel_gt *gt = &i915->gt;
 	intel_wakeref_t wakeref;
-	bool saved_hangcheck;
 	int err;
 
 	if (!intel_has_gpu_reset(gt))
@@ -1696,12 +1695,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
 		return -EIO; /* we're long past hope of a successful reset */
 
 	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
-	saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
-	drain_delayed_work(&gt->hangcheck.work); /* flush param */
 
 	err = intel_gt_live_subtests(tests, gt);
 
-	i915_modparams.enable_hangcheck = saved_hangcheck;
 	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
 
 	return err;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 277f31297f29..55852e045c3a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1011,92 +1011,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
 	return ret;
 }
 
-static void i915_instdone_info(struct drm_i915_private *dev_priv,
-			       struct seq_file *m,
-			       struct intel_instdone *instdone)
-{
-	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
-	int slice;
-	int subslice;
-
-	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
-		   instdone->instdone);
-
-	if (INTEL_GEN(dev_priv) <= 3)
-		return;
-
-	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
-		   instdone->slice_common);
-
-	if (INTEL_GEN(dev_priv) <= 6)
-		return;
-
-	for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice)
-		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
-			   slice, subslice, instdone->sampler[slice][subslice]);
-
-	for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice)
-		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
-			   slice, subslice, instdone->row[slice][subslice]);
-}
-
-static int i915_hangcheck_info(struct seq_file *m, void *unused)
-{
-	struct drm_i915_private *i915 = node_to_i915(m->private);
-	struct intel_gt *gt = &i915->gt;
-	struct intel_engine_cs *engine;
-	intel_wakeref_t wakeref;
-	enum intel_engine_id id;
-
-	seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
-	if (test_bit(I915_WEDGED, &gt->reset.flags))
-		seq_puts(m, "\tWedged\n");
-	if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
-		seq_puts(m, "\tDevice (global) reset in progress\n");
-
-	if (!i915_modparams.enable_hangcheck) {
-		seq_puts(m, "Hangcheck disabled\n");
-		return 0;
-	}
-
-	if (timer_pending(&gt->hangcheck.work.timer))
-		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
-			   jiffies_to_msecs(gt->hangcheck.work.timer.expires -
-					    jiffies));
-	else if (delayed_work_pending(&gt->hangcheck.work))
-		seq_puts(m, "Hangcheck active, work pending\n");
-	else
-		seq_puts(m, "Hangcheck inactive\n");
-
-	seq_printf(m, "GT active? %s\n", yesno(gt->awake));
-
-	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
-		for_each_engine(engine, i915, id) {
-			struct intel_instdone instdone;
-
-			seq_printf(m, "%s: %d ms ago\n",
-				   engine->name,
-				   jiffies_to_msecs(jiffies -
-						    engine->hangcheck.action_timestamp));
-
-			seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
-				   (long long)engine->hangcheck.acthd,
-				   intel_engine_get_active_head(engine));
-
-			intel_engine_get_instdone(engine, &instdone);
-
-			seq_puts(m, "\tinstdone read =\n");
-			i915_instdone_info(i915, m, &instdone);
-
-			seq_puts(m, "\tinstdone accu =\n");
-			i915_instdone_info(i915, m,
-					   &engine->hangcheck.instdone);
-		}
-	}
-
-	return 0;
-}
-
 static int ironlake_drpc_info(struct seq_file *m)
 {
 	struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -4303,7 +4217,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
 	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
 	{"i915_huc_load_status", i915_huc_load_status_info, 0},
 	{"i915_frequency_info", i915_frequency_info, 0},
-	{"i915_hangcheck_info", i915_hangcheck_info, 0},
 	{"i915_drpc_info", i915_drpc_info, 0},
 	{"i915_ring_freq_table", i915_ring_freq_table, 0},
 	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f02a34722217..1dae43ed4c48 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -1546,10 +1546,7 @@ void i915_driver_remove(struct drm_i915_private *i915)
 
 	i915_driver_modeset_remove(i915);
 
-	/* Free error state after interrupts are fully disabled. */
-	cancel_delayed_work_sync(&i915->gt.hangcheck.work);
 	i915_reset_error_state(i915);
-
 	i915_gem_driver_remove(i915);
 
 	intel_power_domains_driver_remove(i915);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d284b04c492b..58340c99af02 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1845,7 +1845,6 @@ void i915_driver_remove(struct drm_i915_private *i915);
 int i915_resume_switcheroo(struct drm_i915_private *i915);
 int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
 
-void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
 
 static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 5cf4eed5add8..47239df653f2 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -534,10 +534,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
 	}
 	err_printf(m, "  ring->head: 0x%08x\n", ee->cpu_ring_head);
 	err_printf(m, "  ring->tail: 0x%08x\n", ee->cpu_ring_tail);
-	err_printf(m, "  hangcheck timestamp: %dms (%lu%s)\n",
-		   jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
-		   ee->hangcheck_timestamp,
-		   ee->hangcheck_timestamp == epoch ? "; epoch" : "");
 	err_printf(m, "  engine reset count: %u\n", ee->reset_count);
 
 	for (n = 0; n < ee->num_ports; n++) {
@@ -679,11 +675,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 	ts = ktime_to_timespec64(error->uptime);
 	err_printf(m, "Uptime: %lld s %ld us\n",
 		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
-	err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
-	err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
-		   error->capture,
-		   jiffies_to_msecs(jiffies - error->capture),
-		   jiffies_to_msecs(error->capture - error->epoch));
+	err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
+		   error->capture, jiffies_to_msecs(jiffies - error->capture));
 
 	for (ee = error->engine; ee; ee = ee->next)
 		err_printf(m, "Active process (on ring %s): %s [%d]\n",
@@ -742,7 +735,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 		err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache);
 
 	for (ee = error->engine; ee; ee = ee->next)
-		error_print_engine(m, ee, error->epoch);
+		error_print_engine(m, ee, error->capture);
 
 	for (ee = error->engine; ee; ee = ee->next) {
 		const struct drm_i915_error_object *obj;
@@ -770,7 +763,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
 			for (j = 0; j < ee->num_requests; j++)
 				error_print_request(m, " ",
 						    &ee->requests[j],
-						    error->epoch);
+						    error->capture);
 		}
 
 		print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
@@ -1144,8 +1137,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
 	}
 
 	ee->idle = intel_engine_is_idle(engine);
-	if (!ee->idle)
-		ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
 	ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
 						  engine);
 
@@ -1657,20 +1648,6 @@ static void capture_params(struct i915_gpu_state *error)
 	i915_params_copy(&error->params, &i915_modparams);
 }
 
-static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
-{
-	const struct drm_i915_error_engine *ee;
-	unsigned long epoch = error->capture;
-
-	for (ee = error->engine; ee; ee = ee->next) {
-		if (ee->hangcheck_timestamp &&
-		    time_before(ee->hangcheck_timestamp, epoch))
-			epoch = ee->hangcheck_timestamp;
-	}
-
-	return epoch;
-}
-
 static void capture_finish(struct i915_gpu_state *error)
 {
 	struct i915_ggtt *ggtt = &error->i915->ggtt;
@@ -1722,8 +1699,6 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
 	error->overlay = intel_overlay_capture_error_state(i915);
 	error->display = intel_display_capture_error_state(i915);
 
-	error->epoch = capture_find_epoch(error);
-
 	capture_finish(error);
 	compress_fini(&compress);
 
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 7f1cd0b1fef7..4dc36d6ee3a2 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -34,7 +34,6 @@ struct i915_gpu_state {
 	ktime_t boottime;
 	ktime_t uptime;
 	unsigned long capture;
-	unsigned long epoch;
 
 	struct drm_i915_private *i915;
 
@@ -86,7 +85,6 @@ struct i915_gpu_state {
 
 		/* Software tracked state */
 		bool idle;
-		unsigned long hangcheck_timestamp;
 		int num_requests;
 		u32 reset_count;
 
diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
index ae8bb3cb627e..732aad148881 100644
--- a/drivers/gpu/drm/i915/i915_priolist_types.h
+++ b/drivers/gpu/drm/i915/i915_priolist_types.h
@@ -16,6 +16,12 @@ enum {
 	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
 	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
 	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
+
+	/* A preemptive pulse used to monitor the health of each engine */
+	I915_PRIORITY_HEARTBEAT,
+
+	/* Interactive workload, scheduled for immediate pageflipping */
+	I915_PRIORITY_DISPLAY,
 };
 
 #define I915_USER_PRIORITY_SHIFT 2
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* [PATCH 10/10] drm/i915: Flush idle barriers when waiting
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (7 preceding siblings ...)
  2019-10-10  7:14 ` [PATCH 09/10] drm/i915: Replace hangcheck by heartbeats Chris Wilson
@ 2019-10-10  7:14 ` Chris Wilson
  2019-10-11 14:56   ` Tvrtko Ursulin
  2019-10-10  8:18 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Patchwork
                   ` (5 subsequent siblings)
  14 siblings, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-10  7:14 UTC (permalink / raw)
  To: intel-gfx

If we do find ourselves with an idle barrier inside our active while
waiting, attempt to flush it by emitting a pulse using the kernel
context.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 14 +++++++++++++
 .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  1 +
 drivers/gpu/drm/i915/i915_active.c            | 21 +++++++++++++++++--
 3 files changed, 34 insertions(+), 2 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index f68acf9118f3..e27bb7f028bd 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -169,3 +169,17 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
 	intel_engine_pm_put(engine);
 	return err;
 }
+
+int intel_engine_flush_barriers(struct intel_engine_cs *engine)
+{
+	struct i915_request *rq;
+
+	rq = i915_request_create(engine->kernel_context);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	idle_pulse(engine, rq);
+	i915_request_add(rq);
+
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
index 39391004554d..0c1ad0fc091d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
@@ -15,5 +15,6 @@ void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
 void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
 
 int intel_engine_pulse(struct intel_engine_cs *engine);
+int intel_engine_flush_barriers(struct intel_engine_cs *engine);
 
 #endif /* INTEL_ENGINE_HEARTBEAT_H */
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index aa37c07004b9..98d5fe1c7e19 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -6,6 +6,7 @@
 
 #include <linux/debugobjects.h>
 
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_engine_pm.h"
 
 #include "i915_drv.h"
@@ -435,6 +436,21 @@ static void enable_signaling(struct i915_active_fence *active)
 	dma_fence_put(fence);
 }
 
+static int flush_barrier(struct active_node *it)
+{
+	struct intel_engine_cs *engine;
+
+	if (!is_barrier(&it->base))
+		return 0;
+
+	engine = __barrier_to_engine(it);
+	smp_rmb(); /* serialise with add_active_barriers */
+	if (!is_barrier(&it->base))
+		return 0;
+
+	return intel_engine_flush_barriers(engine);
+}
+
 int i915_active_wait(struct i915_active *ref)
 {
 	struct active_node *it, *n;
@@ -448,8 +464,9 @@ int i915_active_wait(struct i915_active *ref)
 	/* Flush lazy signals */
 	enable_signaling(&ref->excl);
 	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
-		if (is_barrier(&it->base)) /* unconnected idle barrier */
-			continue;
+		err = flush_barrier(it); /* unconnected idle barrier? */
+		if (err)
+			break;
 
 		enable_signaling(&it->base);
 	}
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (8 preceding siblings ...)
  2019-10-10  7:14 ` [PATCH 10/10] drm/i915: Flush idle barriers when waiting Chris Wilson
@ 2019-10-10  8:18 ` Patchwork
  2019-10-10  8:42 ` ✓ Fi.CI.BAT: success " Patchwork
                   ` (4 subsequent siblings)
  14 siblings, 0 replies; 42+ messages in thread
From: Patchwork @ 2019-10-10  8:18 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler
URL   : https://patchwork.freedesktop.org/series/67827/
State : warning

== Summary ==

$ dim checkpatch origin/drm-tip
cdd8a8ed5e08 drm/i915: Note the addition of timeslicing to the pretend scheduler
421324199ac5 drm/i915/execlists: Leave tell-tales as to why pending[] is bad
634779d033e3 drm/i915: Expose engine properties via sysfs
-:68: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#68: 
new file mode 100644

-:73: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#73: FILE: drivers/gpu/drm/i915/gt/intel_engine_sysfs.c:1:
+/*

-:74: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#74: FILE: drivers/gpu/drm/i915/gt/intel_engine_sysfs.c:2:
+ * SPDX-License-Identifier: MIT

-:198: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#198: FILE: drivers/gpu/drm/i915/gt/intel_engine_sysfs.h:1:
+/*

-:199: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#199: FILE: drivers/gpu/drm/i915/gt/intel_engine_sysfs.h:2:
+ * SPDX-License-Identifier: MIT

total: 0 errors, 5 warnings, 0 checks, 158 lines checked
b91c01a8795f drm/i915/execlists: Force preemption
cd5299b94752 drm/i915: Mark up "sentinel" requests
f4cbdbdb52f4 drm/i915/gt: Introduce barrier pulses along engines
-:28: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#28: 
new file mode 100644

-:33: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#33: FILE: drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:1:
+/*

-:34: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#34: FILE: drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:2:
+ * SPDX-License-Identifier: MIT

-:95: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#95: FILE: drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h:1:
+/*

-:96: WARNING:SPDX_LICENSE_TAG: Misplaced SPDX-License-Identifier tag - use line 1 instead
#96: FILE: drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h:2:
+ * SPDX-License-Identifier: MIT

total: 0 errors, 5 warnings, 0 checks, 92 lines checked
9d4c48416b66 drm/i915/execlists: Cancel banned contexts on schedule-out
7c1088b89959 drm/i915: Cancel non-persistent contexts on close
475f4b866f55 drm/i915: Replace hangcheck by heartbeats
-:236: WARNING:EMBEDDED_FUNCTION_NAME: Prefer using '"%s...", __func__' to using 'heartbeat', this function's name, in a string
#236: FILE: drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:69:
+					  "%s heartbeat not ticking\n",

-:253: WARNING:EMBEDDED_FUNCTION_NAME: Prefer using '"%s...", __func__' to using 'heartbeat', this function's name, in a string
#253: FILE: drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c:86:
+					      "stopped heartbeat on %s",

-:540: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#540: 
deleted file mode 100644

total: 0 errors, 3 warnings, 0 checks, 672 lines checked
082c39527d40 drm/i915: Flush idle barriers when waiting

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (9 preceding siblings ...)
  2019-10-10  8:18 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Patchwork
@ 2019-10-10  8:42 ` Patchwork
  2019-10-10 16:19 ` ✗ Fi.CI.IGT: failure " Patchwork
                   ` (3 subsequent siblings)
  14 siblings, 0 replies; 42+ messages in thread
From: Patchwork @ 2019-10-10  8:42 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler
URL   : https://patchwork.freedesktop.org/series/67827/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_7047 -> Patchwork_14742
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/index.html

Known issues
------------

  Here are the changes found in Patchwork_14742 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_create@basic:
    - fi-icl-u3:          [PASS][1] -> [DMESG-WARN][2] ([fdo#107724]) +2 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/fi-icl-u3/igt@gem_exec_create@basic.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/fi-icl-u3/igt@gem_exec_create@basic.html

  * igt@gem_exec_suspend@basic-s3:
    - fi-blb-e6850:       [PASS][3] -> [INCOMPLETE][4] ([fdo#107718])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/fi-blb-e6850/igt@gem_exec_suspend@basic-s3.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/fi-blb-e6850/igt@gem_exec_suspend@basic-s3.html

  * igt@i915_selftest@live_hangcheck:
    - fi-skl-lmem:        [PASS][5] -> [INCOMPLETE][6] ([fdo#108744])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/fi-skl-lmem/igt@i915_selftest@live_hangcheck.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/fi-skl-lmem/igt@i915_selftest@live_hangcheck.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-kbl-7500u:       [PASS][7] -> [FAIL][8] ([fdo#111407])
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html

  
#### Possible fixes ####

  * igt@gem_flink_basic@basic:
    - fi-icl-u3:          [DMESG-WARN][9] ([fdo#107724]) -> [PASS][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/fi-icl-u3/igt@gem_flink_basic@basic.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/fi-icl-u3/igt@gem_flink_basic@basic.html

  * igt@gem_sync@basic-many-each:
    - {fi-tgl-u}:         [INCOMPLETE][11] ([fdo#111880]) -> [PASS][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/fi-tgl-u/igt@gem_sync@basic-many-each.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/fi-tgl-u/igt@gem_sync@basic-many-each.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#107718]: https://bugs.freedesktop.org/show_bug.cgi?id=107718
  [fdo#107724]: https://bugs.freedesktop.org/show_bug.cgi?id=107724
  [fdo#108744]: https://bugs.freedesktop.org/show_bug.cgi?id=108744
  [fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
  [fdo#111880]: https://bugs.freedesktop.org/show_bug.cgi?id=111880


Participating hosts (51 -> 47)
------------------------------

  Additional (3): fi-byt-j1900 fi-bsw-n3050 fi-pnv-d510 
  Missing    (7): fi-ilk-m540 fi-hsw-4200u fi-byt-squawks fi-bsw-cyan fi-icl-y fi-byt-clapper fi-bdw-samus 


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7047 -> Patchwork_14742

  CI-20190529: 20190529
  CI_DRM_7047: 23ba5b1f97d3d114d30eead1ca95d5a846a9027c @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5220: 1e38e32d721210a780198c8293a6b8c8e881df68 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14742: 082c39527d400c14faa558e01b66b7f93deea46c @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

082c39527d40 drm/i915: Flush idle barriers when waiting
475f4b866f55 drm/i915: Replace hangcheck by heartbeats
7c1088b89959 drm/i915: Cancel non-persistent contexts on close
9d4c48416b66 drm/i915/execlists: Cancel banned contexts on schedule-out
f4cbdbdb52f4 drm/i915/gt: Introduce barrier pulses along engines
cd5299b94752 drm/i915: Mark up "sentinel" requests
b91c01a8795f drm/i915/execlists: Force preemption
634779d033e3 drm/i915: Expose engine properties via sysfs
421324199ac5 drm/i915/execlists: Leave tell-tales as to why pending[] is bad
cdd8a8ed5e08 drm/i915: Note the addition of timeslicing to the pretend scheduler

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* ✗ Fi.CI.IGT: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (10 preceding siblings ...)
  2019-10-10  8:42 ` ✓ Fi.CI.BAT: success " Patchwork
@ 2019-10-10 16:19 ` Patchwork
  2019-10-11  8:16 ` [PATCH 01/10] " Tvrtko Ursulin
                   ` (2 subsequent siblings)
  14 siblings, 0 replies; 42+ messages in thread
From: Patchwork @ 2019-10-10 16:19 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler
URL   : https://patchwork.freedesktop.org/series/67827/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_7047_full -> Patchwork_14742_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_14742_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_14742_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_14742_full:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_hangman@error-state-capture-bcs0:
    - shard-apl:          [PASS][1] -> [FAIL][2] +3 similar issues
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-apl1/igt@i915_hangman@error-state-capture-bcs0.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-apl1/igt@i915_hangman@error-state-capture-bcs0.html
    - shard-iclb:         [PASS][3] -> [FAIL][4] +2 similar issues
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb3/igt@i915_hangman@error-state-capture-bcs0.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb2/igt@i915_hangman@error-state-capture-bcs0.html

  * igt@i915_hangman@error-state-capture-rcs0:
    - shard-skl:          [PASS][5] -> [FAIL][6] +3 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-skl3/igt@i915_hangman@error-state-capture-rcs0.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-skl1/igt@i915_hangman@error-state-capture-rcs0.html
    - shard-glk:          [PASS][7] -> [FAIL][8] +3 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-glk9/igt@i915_hangman@error-state-capture-rcs0.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-glk1/igt@i915_hangman@error-state-capture-rcs0.html

  * igt@i915_hangman@error-state-capture-vcs0:
    - shard-kbl:          [PASS][9] -> [FAIL][10] +4 similar issues
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-kbl4/igt@i915_hangman@error-state-capture-vcs0.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-kbl7/igt@i915_hangman@error-state-capture-vcs0.html

  * igt@i915_hangman@error-state-capture-vcs1:
    - shard-iclb:         NOTRUN -> [FAIL][11]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb1/igt@i915_hangman@error-state-capture-vcs1.html

  * igt@kms_fbcon_fbt@psr-suspend:
    - shard-iclb:         [PASS][12] -> [DMESG-WARN][13]
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb2/igt@kms_fbcon_fbt@psr-suspend.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb1/igt@kms_fbcon_fbt@psr-suspend.html

  
#### Suppressed ####

  The following results come from untrusted machines, tests, or statuses.
  They do not affect the overall result.

  * igt@i915_hangman@error-state-capture-rcs0:
    - {shard-tglb}:       [PASS][14] -> [FAIL][15]
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-tglb6/igt@i915_hangman@error-state-capture-rcs0.html
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-tglb6/igt@i915_hangman@error-state-capture-rcs0.html

  * igt@syncobj_wait@invalid-multi-wait-all-unsubmitted-signaled:
    - {shard-tglb}:       [PASS][16] -> [INCOMPLETE][17]
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-tglb2/igt@syncobj_wait@invalid-multi-wait-all-unsubmitted-signaled.html
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-tglb6/igt@syncobj_wait@invalid-multi-wait-all-unsubmitted-signaled.html

  

### Piglit changes ###

#### Possible regressions ####

  * spec@arb_shader_image_load_store@shader-mem-barrier (NEW):
    - pig-glk-j5005:      NOTRUN -> [FAIL][18]
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/pig-glk-j5005/spec@arb_shader_image_load_store@shader-mem-barrier.html

  
New tests
---------

  New tests have been introduced between CI_DRM_7047_full and Patchwork_14742_full:

### New Piglit tests (1) ###

  * spec@arb_shader_image_load_store@shader-mem-barrier:
    - Statuses : 1 fail(s)
    - Exec time: [2.79] s

  

Known issues
------------

  Here are the changes found in Patchwork_14742_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_schedule@independent-bsd2:
    - shard-iclb:         [PASS][19] -> [SKIP][20] ([fdo#109276]) +17 similar issues
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb4/igt@gem_exec_schedule@independent-bsd2.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb5/igt@gem_exec_schedule@independent-bsd2.html

  * igt@gem_exec_schedule@reorder-wide-bsd:
    - shard-iclb:         [PASS][21] -> [SKIP][22] ([fdo#111325]) +6 similar issues
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb5/igt@gem_exec_schedule@reorder-wide-bsd.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb4/igt@gem_exec_schedule@reorder-wide-bsd.html

  * igt@gem_userptr_blits@sync-unmap-cycles:
    - shard-hsw:          [PASS][23] -> [DMESG-WARN][24] ([fdo#111870]) +1 similar issue
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-hsw8/igt@gem_userptr_blits@sync-unmap-cycles.html
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-hsw4/igt@gem_userptr_blits@sync-unmap-cycles.html

  * igt@i915_suspend@sysfs-reader:
    - shard-apl:          [PASS][25] -> [DMESG-WARN][26] ([fdo#108566]) +7 similar issues
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-apl8/igt@i915_suspend@sysfs-reader.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-apl7/igt@i915_suspend@sysfs-reader.html

  * igt@kms_cursor_legacy@cursora-vs-flipa-atomic-transitions:
    - shard-iclb:         [PASS][27] -> [INCOMPLETE][28] ([fdo#107713]) +2 similar issues
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb6/igt@kms_cursor_legacy@cursora-vs-flipa-atomic-transitions.html
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb7/igt@kms_cursor_legacy@cursora-vs-flipa-atomic-transitions.html

  * igt@kms_flip@2x-flip-vs-expired-vblank-interruptible:
    - shard-glk:          [PASS][29] -> [FAIL][30] ([fdo#105363])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-glk2/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible.html
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-glk5/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render:
    - shard-iclb:         [PASS][31] -> [FAIL][32] ([fdo#103167]) +5 similar issues
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb3/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-cur-indfb-draw-render.html

  * igt@kms_plane_lowres@pipe-a-tiling-y:
    - shard-iclb:         [PASS][33] -> [FAIL][34] ([fdo#103166])
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb5/igt@kms_plane_lowres@pipe-a-tiling-y.html
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb6/igt@kms_plane_lowres@pipe-a-tiling-y.html

  * igt@kms_psr@psr2_cursor_mmap_gtt:
    - shard-iclb:         [PASS][35] -> [SKIP][36] ([fdo#109441])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_gtt.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb1/igt@kms_psr@psr2_cursor_mmap_gtt.html

  * igt@kms_setmode@basic:
    - shard-hsw:          [PASS][37] -> [FAIL][38] ([fdo#99912])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-hsw1/igt@kms_setmode@basic.html
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-hsw7/igt@kms_setmode@basic.html

  
#### Possible fixes ####

  * igt@gem_ctx_shared@exec-single-timeline-bsd:
    - shard-iclb:         [SKIP][39] ([fdo#110841]) -> [PASS][40]
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb2/igt@gem_ctx_shared@exec-single-timeline-bsd.html
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb8/igt@gem_ctx_shared@exec-single-timeline-bsd.html

  * igt@gem_eio@reset-stress:
    - shard-snb:          [FAIL][41] ([fdo#109661]) -> [PASS][42]
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-snb1/igt@gem_eio@reset-stress.html
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-snb5/igt@gem_eio@reset-stress.html

  * igt@gem_exec_schedule@preempt-queue-bsd1:
    - shard-iclb:         [SKIP][43] ([fdo#109276]) -> [PASS][44] +17 similar issues
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb6/igt@gem_exec_schedule@preempt-queue-bsd1.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb2/igt@gem_exec_schedule@preempt-queue-bsd1.html

  * igt@gem_exec_schedule@wide-bsd:
    - shard-iclb:         [SKIP][45] ([fdo#111325]) -> [PASS][46] +2 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb4/igt@gem_exec_schedule@wide-bsd.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb3/igt@gem_exec_schedule@wide-bsd.html

  * igt@gem_tiled_swapping@non-threaded:
    - shard-hsw:          [INCOMPLETE][47] ([fdo#103540] / [fdo#108686]) -> [PASS][48]
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-hsw4/igt@gem_tiled_swapping@non-threaded.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-hsw1/igt@gem_tiled_swapping@non-threaded.html
    - shard-snb:          [INCOMPLETE][49] ([fdo#105411] / [fdo#108686]) -> [PASS][50]
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-snb7/igt@gem_tiled_swapping@non-threaded.html
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-snb6/igt@gem_tiled_swapping@non-threaded.html

  * igt@gem_userptr_blits@map-fixed-invalidate-busy-gup:
    - shard-hsw:          [DMESG-WARN][51] ([fdo#111870]) -> [PASS][52]
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-hsw1/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-hsw7/igt@gem_userptr_blits@map-fixed-invalidate-busy-gup.html

  * igt@i915_suspend@fence-restore-tiled2untiled:
    - shard-apl:          [DMESG-WARN][53] ([fdo#108566]) -> [PASS][54] +1 similar issue
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-apl4/igt@i915_suspend@fence-restore-tiled2untiled.html
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-apl4/igt@i915_suspend@fence-restore-tiled2untiled.html

  * igt@kms_big_fb@x-tiled-64bpp-rotate-0:
    - {shard-tglb}:       [INCOMPLETE][55] -> [PASS][56]
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-tglb1/igt@kms_big_fb@x-tiled-64bpp-rotate-0.html
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-tglb2/igt@kms_big_fb@x-tiled-64bpp-rotate-0.html

  * igt@kms_busy@extended-pageflip-hang-newfb-render-a:
    - shard-apl:          [DMESG-WARN][57] ([fdo#103558] / [fdo#105602] / [fdo#110222]) -> [PASS][58]
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-apl3/igt@kms_busy@extended-pageflip-hang-newfb-render-a.html
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-apl3/igt@kms_busy@extended-pageflip-hang-newfb-render-a.html

  * igt@kms_concurrent@pipe-b:
    - shard-apl:          [DMESG-WARN][59] ([fdo#103558] / [fdo#105602]) -> [PASS][60] +18 similar issues
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-apl3/igt@kms_concurrent@pipe-b.html
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-apl3/igt@kms_concurrent@pipe-b.html

  * igt@kms_cursor_crc@pipe-c-cursor-dpms:
    - shard-kbl:          [FAIL][61] ([fdo#103232]) -> [PASS][62]
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-kbl2/igt@kms_cursor_crc@pipe-c-cursor-dpms.html
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-kbl2/igt@kms_cursor_crc@pipe-c-cursor-dpms.html

  * igt@kms_flip@absolute-wf_vblank:
    - shard-apl:          [INCOMPLETE][63] ([fdo#103927]) -> [PASS][64]
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-apl1/igt@kms_flip@absolute-wf_vblank.html
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-apl8/igt@kms_flip@absolute-wf_vblank.html

  * igt@kms_flip@flip-vs-suspend:
    - shard-skl:          [INCOMPLETE][65] ([fdo#109507]) -> [PASS][66]
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-skl1/igt@kms_flip@flip-vs-suspend.html
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-skl6/igt@kms_flip@flip-vs-suspend.html

  * igt@kms_flip_tiling@flip-changes-tiling-yf:
    - shard-skl:          [FAIL][67] ([fdo#108303]) -> [PASS][68]
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-skl4/igt@kms_flip_tiling@flip-changes-tiling-yf.html
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-skl5/igt@kms_flip_tiling@flip-changes-tiling-yf.html

  * igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw:
    - shard-iclb:         [FAIL][69] ([fdo#103167]) -> [PASS][70] +2 similar issues
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb2/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb1/igt@kms_frontbuffer_tracking@fbc-1p-pri-indfb-multidraw.html

  * igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min:
    - shard-skl:          [FAIL][71] ([fdo#108145]) -> [PASS][72] +1 similar issue
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-skl5/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-skl4/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html

  * igt@kms_vblank@pipe-c-wait-idle:
    - shard-hsw:          [INCOMPLETE][73] ([fdo#103540]) -> [PASS][74]
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-hsw2/igt@kms_vblank@pipe-c-wait-idle.html
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-hsw8/igt@kms_vblank@pipe-c-wait-idle.html

  * igt@perf@polling:
    - shard-skl:          [FAIL][75] ([fdo#110728]) -> [PASS][76]
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-skl6/igt@perf@polling.html
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-skl9/igt@perf@polling.html

  
#### Warnings ####

  * igt@gem_mocs_settings@mocs-reset-bsd2:
    - shard-iclb:         [FAIL][77] ([fdo#111330]) -> [SKIP][78] ([fdo#109276]) +1 similar issue
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-iclb4/igt@gem_mocs_settings@mocs-reset-bsd2.html
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-iclb3/igt@gem_mocs_settings@mocs-reset-bsd2.html

  * igt@kms_content_protection@atomic:
    - shard-apl:          [DMESG-FAIL][79] ([fdo#103558] / [fdo#105602] / [fdo#110321]) -> [FAIL][80] ([fdo#110321] / [fdo#110336])
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-apl3/igt@kms_content_protection@atomic.html
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-apl3/igt@kms_content_protection@atomic.html

  * igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-mmap-wc:
    - shard-apl:          [SKIP][81] ([fdo#105602] / [fdo#109271]) -> [SKIP][82] ([fdo#109271]) +20 similar issues
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-apl3/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-mmap-wc.html
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-apl3/igt@kms_frontbuffer_tracking@fbc-2p-scndscrn-spr-indfb-draw-mmap-wc.html

  * igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb:
    - shard-apl:          [DMESG-FAIL][83] ([fdo#103558] / [fdo#105602] / [fdo#108145]) -> [FAIL][84] ([fdo#108145])
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7047/shard-apl3/igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb.html
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/shard-apl3/igt@kms_plane_alpha_blend@pipe-c-alpha-opaque-fb.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#103166]: https://bugs.freedesktop.org/show_bug.cgi?id=103166
  [fdo#103167]: https://bugs.freedesktop.org/show_bug.cgi?id=103167
  [fdo#103232]: https://bugs.freedesktop.org/show_bug.cgi?id=103232
  [fdo#103540]: https://bugs.freedesktop.org/show_bug.cgi?id=103540
  [fdo#103558]: https://bugs.freedesktop.org/show_bug.cgi?id=103558
  [fdo#103927]: https://bugs.freedesktop.org/show_bug.cgi?id=103927
  [fdo#105363]: https://bugs.freedesktop.org/show_bug.cgi?id=105363
  [fdo#105411]: https://bugs.freedesktop.org/show_bug.cgi?id=105411
  [fdo#105602]: https://bugs.freedesktop.org/show_bug.cgi?id=105602
  [fdo#107713]: https://bugs.freedesktop.org/show_bug.cgi?id=107713
  [fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
  [fdo#108303]: https://bugs.freedesktop.org/show_bug.cgi?id=108303
  [fdo#108566]: https://bugs.freedesktop.org/show_bug.cgi?id=108566
  [fdo#108686]: https://bugs.freedesktop.org/show_bug.cgi?id=108686
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
  [fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
  [fdo#109507]: https://bugs.freedesktop.org/show_bug.cgi?id=109507
  [fdo#109661]: https://bugs.freedesktop.org/show_bug.cgi?id=109661
  [fdo#110222]: https://bugs.freedesktop.org/show_bug.cgi?id=110222
  [fdo#110321]: https://bugs.freedesktop.org/show_bug.cgi?id=110321
  [fdo#110336]: https://bugs.freedesktop.org/show_bug.cgi?id=110336
  [fdo#110728]: https://bugs.freedesktop.org/show_bug.cgi?id=110728
  [fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
  [fdo#111325]: https://bugs.freedesktop.org/show_bug.cgi?id=111325
  [fdo#111330]: https://bugs.freedesktop.org/show_bug.cgi?id=111330
  [fdo#111870]: https://bugs.freedesktop.org/show_bug.cgi?id=111870
  [fdo#99912]: https://bugs.freedesktop.org/show_bug.cgi?id=99912


Participating hosts (11 -> 11)
------------------------------

  No changes in participating hosts


Build changes
-------------

  * CI: CI-20190529 -> None
  * Linux: CI_DRM_7047 -> Patchwork_14742

  CI-20190529: 20190529
  CI_DRM_7047: 23ba5b1f97d3d114d30eead1ca95d5a846a9027c @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_5220: 1e38e32d721210a780198c8293a6b8c8e881df68 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
  Patchwork_14742: 082c39527d400c14faa558e01b66b7f93deea46c @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_14742/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (11 preceding siblings ...)
  2019-10-10 16:19 ` ✗ Fi.CI.IGT: failure " Patchwork
@ 2019-10-11  8:16 ` Tvrtko Ursulin
  2019-10-11  9:49 ` ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev2) Patchwork
  2019-10-11 11:39 ` ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev3) Patchwork
  14 siblings, 0 replies; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11  8:16 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/10/2019 08:14, Chris Wilson wrote:
> Since writing the comment that the scheduler is entirely passive, we've
> added minimal timeslicing which adds the most primitive of active
> elements (a timeout and reschedule).
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
> Cc: Ramalingam C <ramalingam.c@intel.com>
> ---
>   drivers/gpu/drm/i915/i915_scheduler_types.h | 9 +++++++++
>   1 file changed, 9 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
> index aad81acba9dc..d18e70550054 100644
> --- a/drivers/gpu/drm/i915/i915_scheduler_types.h
> +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
> @@ -49,6 +49,15 @@ struct i915_sched_attr {
>    * DAG of each request, we are able to insert it into a sorted queue when it
>    * is ready, and are able to reorder its portion of the graph to accommodate
>    * dynamic priority changes.
> + *
> + * Ok, there is now one active element to the "scheduler" in the backends.
> + * We let a new context run for a small amount of time before re-evaluating
> + * the run order. As we re-evaluate, we maintain the strict ordering of
> + * dependencies, but attempt to rotate the active contexts (the current context
> + * is put to the back of its priority queue, then reshuffling its dependents).
> + * This provides minimal timeslicing and prevents a userspace hog (e.g.
> + * something waiting on a user semaphore [VkEvent]) from denying service to
> + * others.
>    */
>   struct i915_sched_node {
>   	struct list_head signalers_list; /* those before us, we depend upon */
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 02/10] drm/i915/execlists: Leave tell-tales as to why pending[] is bad
  2019-10-10  7:14 ` [PATCH 02/10] drm/i915/execlists: Leave tell-tales as to why pending[] is bad Chris Wilson
@ 2019-10-11  8:39   ` Tvrtko Ursulin
  0 siblings, 0 replies; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11  8:39 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/10/2019 08:14, Chris Wilson wrote:
> Before we BUG out with bad pending state, leave a telltale as to which
> test failed.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_lrc.c | 30 ++++++++++++++++++++++++-----
>   drivers/gpu/drm/i915/i915_gem.h     |  8 ++++----
>   2 files changed, 29 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index a0777b3ad68a..5040fbdd81af 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1138,25 +1138,45 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
>   
>   	trace_ports(execlists, msg, execlists->pending);
>   
> -	if (!execlists->pending[0])
> +	if (!execlists->pending[0]) {
> +		GEM_TRACE_ERR("Nothing pending for promotion!\n");
>   		return false;
> +	}
>   
> -	if (execlists->pending[execlists_num_ports(execlists)])
> +	if (execlists->pending[execlists_num_ports(execlists)]) {
> +		GEM_TRACE_ERR("Excess pending[%d] for promotion!\n",
> +			      execlists_num_ports(execlists));
>   		return false;
> +	}
>   
>   	for (port = execlists->pending; (rq = *port); port++) {
> -		if (ce == rq->hw_context)
> +		if (ce == rq->hw_context) {
> +			GEM_TRACE_ERR("Duplicate context in pending[%zd]\n",
> +				      port - execlists->pending);
>   			return false;
> +		}
>   
>   		ce = rq->hw_context;
>   		if (i915_request_completed(rq))
>   			continue;
>   
> -		if (i915_active_is_idle(&ce->active))
> +		if (i915_active_is_idle(&ce->active)) {
> +			GEM_TRACE_ERR("Inactive context in pending[%zd]\n",
> +				      port - execlists->pending);
> +			return false;
> +		}
> +
> +		if (!i915_vma_is_pinned(ce->state)) {
> +			GEM_TRACE_ERR("Unpinned context in pending[%zd]\n",
> +				      port - execlists->pending);
>   			return false;
> +		}
>   
> -		if (!i915_vma_is_pinned(ce->state))
> +		if (!i915_vma_is_pinned(ce->ring->vma)) {
> +			GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n",
> +				      port - execlists->pending);
>   			return false;
> +		}
>   	}
>   
>   	return ce;
> diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h
> index 6795f1daa3d5..63dab3765106 100644
> --- a/drivers/gpu/drm/i915/i915_gem.h
> +++ b/drivers/gpu/drm/i915/i915_gem.h
> @@ -37,10 +37,8 @@ struct drm_i915_private;
>   #define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER)
>   
>   #define GEM_BUG_ON(condition) do { if (unlikely((condition))) {	\
> -		pr_err("%s:%d GEM_BUG_ON(%s)\n", \
> -		       __func__, __LINE__, __stringify(condition)); \
> -		GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
> -			  __func__, __LINE__, __stringify(condition)); \
> +		GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \
> +			      __func__, __LINE__, __stringify(condition)); \
>   		BUG(); \
>   		} \
>   	} while(0)
> @@ -66,11 +64,13 @@ struct drm_i915_private;
>   
>   #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM)
>   #define GEM_TRACE(...) trace_printk(__VA_ARGS__)
> +#define GEM_TRACE_ERR(...) do { pr_err(__VA_ARGS__); trace_printk(__VA_ARGS__); } while (0)
>   #define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL)
>   #define GEM_TRACE_DUMP_ON(expr) \
>   	do { if (expr) ftrace_dump(DUMP_ALL); } while (0)
>   #else
>   #define GEM_TRACE(...) do { } while (0)
> +#define GEM_TRACE_ERR(...) do { } while (0)
>   #define GEM_TRACE_DUMP() do { } while (0)
>   #define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr)
>   #endif
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 03/10] drm/i915: Expose engine properties via sysfs
  2019-10-10  7:14 ` [PATCH 03/10] drm/i915: Expose engine properties via sysfs Chris Wilson
@ 2019-10-11  8:44   ` Tvrtko Ursulin
  2019-10-11  8:49     ` Chris Wilson
  2019-10-11  9:40   ` [PATCH v2] " Chris Wilson
  1 sibling, 1 reply; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11  8:44 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/10/2019 08:14, Chris Wilson wrote:
> Preliminary stub to add engines underneath /sys/class/drm/cardN/, so
> that we can expose properties on each engine to the sysadmin.
> 
> To start with we have basic analogues of the i915_query ioctl so that we
> can pretty print engine discovery from the shell, and flesh out the
> directory structure. Later we will add writeable sysadmin properties such
> as per-engine timeout controls.
> 
> An example tree of the engine properties on Braswell:
>      /sys/class/drm/card0
>      └── engine
>          ├── bcs0
>          │   ├── class
>          │   ├── heartbeat_interval_ms

Not present in this patch.

>          │   ├── instance
>          │   ├── mmio_base

I vote for putting mmio_base in a followup patch.

And how about we add capabilities in the first patch? So we get another 
way of engine discovery. Ideally with mapping of bits to user friendly 
strings.

Regards,

Tvrtko

>          │   └── name
>          ├── rcs0
>          │   ├── class
>          │   ├── heartbeat_interval_ms
>          │   ├── instance
>          │   ├── mmio_base
>          │   └── name
>          ├── vcs0
>          │   ├── class
>          │   ├── heartbeat_interval_ms
>          │   ├── instance
>          │   ├── mmio_base
>          │   └── name
>          └── vecs0
>              ├── class
>              ├── heartbeat_interval_ms
>              ├── instance
>              ├── mmio_base
>              └── name
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
> Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
> ---
>   drivers/gpu/drm/i915/Makefile                |   3 +-
>   drivers/gpu/drm/i915/gt/intel_engine_sysfs.c | 119 +++++++++++++++++++
>   drivers/gpu/drm/i915/gt/intel_engine_sysfs.h |  14 +++
>   drivers/gpu/drm/i915/i915_sysfs.c            |   3 +
>   4 files changed, 138 insertions(+), 1 deletion(-)
>   create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
>   create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_sysfs.h
> 
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index e791d9323b51..cd9a10ba2516 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -78,8 +78,9 @@ gt-y += \
>   	gt/intel_breadcrumbs.o \
>   	gt/intel_context.o \
>   	gt/intel_engine_cs.o \
> -	gt/intel_engine_pool.o \
>   	gt/intel_engine_pm.o \
> +	gt/intel_engine_pool.o \
> +	gt/intel_engine_sysfs.o \
>   	gt/intel_engine_user.o \
>   	gt/intel_gt.o \
>   	gt/intel_gt_irq.o \
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
> new file mode 100644
> index 000000000000..cbe9ec59beeb
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
> @@ -0,0 +1,119 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include <linux/kobject.h>
> +#include <linux/sysfs.h>
> +
> +#include "i915_drv.h"
> +#include "intel_engine.h"
> +#include "intel_engine_sysfs.h"
> +
> +struct kobj_engine {
> +	struct kobject base;
> +	struct intel_engine_cs *engine;
> +};
> +
> +static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
> +{
> +	return container_of(kobj, struct kobj_engine, base)->engine;
> +}
> +
> +static ssize_t
> +name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
> +{
> +	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
> +}
> +
> +static ssize_t
> +class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
> +{
> +	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
> +}
> +
> +static ssize_t
> +inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
> +{
> +	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
> +}
> +
> +static ssize_t
> +mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
> +{
> +	return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
> +}
> +
> +static struct kobj_attribute name_attr = __ATTR(name, 0444, name_show, NULL);
> +static struct kobj_attribute class_attr = __ATTR(class, 0444, class_show, NULL);
> +static struct kobj_attribute inst_attr = __ATTR(instance, 0444, inst_show, NULL);
> +static struct kobj_attribute mmio_attr = __ATTR(mmio_base, 0444, mmio_show, NULL);
> +
> +static void kobj_engine_release(struct kobject *kobj)
> +{
> +	kfree(kobj);
> +}
> +
> +static struct kobj_type kobj_engine_type = {
> +	.release = kobj_engine_release,
> +	.sysfs_ops = &kobj_sysfs_ops
> +};
> +
> +static struct kobject *
> +kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
> +{
> +	struct kobj_engine *ke;
> +
> +	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
> +	if (!ke)
> +		return NULL;
> +
> +	kobject_init(&ke->base, &kobj_engine_type);
> +	ke->engine = engine;
> +
> +	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
> +		kobject_put(&ke->base);
> +		return NULL;
> +	}
> +
> +	/* xfer ownership to sysfs tree */
> +	return &ke->base;
> +}
> +
> +void intel_engines_add_sysfs(struct drm_i915_private *i915)
> +{
> +	static const struct attribute *files[] = {
> +		&name_attr.attr,
> +		&class_attr.attr,
> +		&inst_attr.attr,
> +		&mmio_attr.attr,
> +		NULL
> +	};
> +
> +	struct device *kdev = i915->drm.primary->kdev;
> +	struct intel_engine_cs *engine;
> +	struct kobject *dir;
> +
> +	dir = kobject_create_and_add("engine", &kdev->kobj);
> +	if (!dir)
> +		return;
> +
> +	for_each_uabi_engine(engine, i915) {
> +		struct kobject *kobj;
> +
> +		kobj = kobj_engine(dir, engine);
> +		if (!kobj)
> +			goto err_engine;
> +
> +		if (sysfs_create_files(kobj, files))
> +			goto err_engine;
> +
> +		if (0) {
> +err_engine:
> +			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
> +				engine->name);
> +			break;
> +		}
> +	}
> +}
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.h b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.h
> new file mode 100644
> index 000000000000..ef44a745b70a
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.h
> @@ -0,0 +1,14 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#ifndef INTEL_ENGINE_SYSFS_H
> +#define INTEL_ENGINE_SYSFS_H
> +
> +struct drm_i915_private;
> +
> +void intel_engines_add_sysfs(struct drm_i915_private *i915);
> +
> +#endif /* INTEL_ENGINE_SYSFS_H */
> diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
> index bf039b8ba593..7b665f69f301 100644
> --- a/drivers/gpu/drm/i915/i915_sysfs.c
> +++ b/drivers/gpu/drm/i915/i915_sysfs.c
> @@ -30,6 +30,7 @@
>   #include <linux/stat.h>
>   #include <linux/sysfs.h>
>   
> +#include "gt/intel_engine_sysfs.h"
>   #include "gt/intel_rc6.h"
>   
>   #include "i915_drv.h"
> @@ -616,6 +617,8 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
>   		DRM_ERROR("RPS sysfs setup failed\n");
>   
>   	i915_setup_error_capture(kdev);
> +
> +	intel_engines_add_sysfs(dev_priv);
>   }
>   
>   void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 05/10] drm/i915: Mark up "sentinel" requests
  2019-10-10  7:14 ` [PATCH 05/10] drm/i915: Mark up "sentinel" requests Chris Wilson
@ 2019-10-11  8:45   ` Tvrtko Ursulin
  0 siblings, 0 replies; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11  8:45 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/10/2019 08:14, Chris Wilson wrote:
> Sometimes we want to emit a terminator request, a request that flushes
> the pipeline and allows no request to come after it. This can be used
> for a "preempt-to-idle" to ensure that upon processing the
> context-switch to that request, all other active contexts have been
> flushed.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/gt/intel_lrc.c |  6 ++++++
>   drivers/gpu/drm/i915/i915_request.h | 10 ++++++++--
>   2 files changed, 14 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index aa52e5f34dab..eb99f1e804f7 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -1253,6 +1253,9 @@ static bool can_merge_rq(const struct i915_request *prev,
>   	if (i915_request_completed(next))
>   		return true;
>   
> +	if (i915_request_has_sentinel(prev))
> +		return false;
> +
>   	if (!can_merge_ctx(prev->hw_context, next->hw_context))
>   		return false;
>   
> @@ -1724,6 +1727,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
>   				if (last->hw_context == rq->hw_context)
>   					goto done;
>   
> +				if (i915_request_has_sentinel(last))
> +					goto done;
> +
>   				/*
>   				 * If GVT overrides us we only ever submit
>   				 * port[0], leaving port[1] empty. Note that we
> diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
> index 6a95242b280d..96991d64759c 100644
> --- a/drivers/gpu/drm/i915/i915_request.h
> +++ b/drivers/gpu/drm/i915/i915_request.h
> @@ -216,8 +216,9 @@ struct i915_request {
>   	unsigned long emitted_jiffies;
>   
>   	unsigned long flags;
> -#define I915_REQUEST_WAITBOOST BIT(0)
> -#define I915_REQUEST_NOPREEMPT BIT(1)
> +#define I915_REQUEST_WAITBOOST	BIT(0)
> +#define I915_REQUEST_NOPREEMPT	BIT(1)
> +#define I915_REQUEST_SENTINEL	BIT(2)
>   
>   	/** timeline->request entry for this request */
>   	struct list_head link;
> @@ -440,6 +441,11 @@ static inline bool i915_request_has_nopreempt(const struct i915_request *rq)
>   	return unlikely(rq->flags & I915_REQUEST_NOPREEMPT);
>   }
>   
> +static inline bool i915_request_has_sentinel(const struct i915_request *rq)
> +{
> +	return unlikely(rq->flags & I915_REQUEST_SENTINEL);
> +}
> +
>   static inline struct intel_timeline *
>   i915_request_timeline(struct i915_request *rq)
>   {
> 

Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 03/10] drm/i915: Expose engine properties via sysfs
  2019-10-11  8:44   ` Tvrtko Ursulin
@ 2019-10-11  8:49     ` Chris Wilson
  2019-10-11  9:04       ` Tvrtko Ursulin
  0 siblings, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-11  8:49 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-11 09:44:16)
> 
> On 10/10/2019 08:14, Chris Wilson wrote:
> > Preliminary stub to add engines underneath /sys/class/drm/cardN/, so
> > that we can expose properties on each engine to the sysadmin.
> > 
> > To start with we have basic analogues of the i915_query ioctl so that we
> > can pretty print engine discovery from the shell, and flesh out the
> > directory structure. Later we will add writeable sysadmin properties such
> > as per-engine timeout controls.
> > 
> > An example tree of the engine properties on Braswell:
> >      /sys/class/drm/card0
> >      └── engine
> >          ├── bcs0
> >          │   ├── class
> >          │   ├── heartbeat_interval_ms
> 
> Not present in this patch.

I did say an example tree, not this tree :)

> >          │   ├── instance
> >          │   ├── mmio_base
> 
> I vote for putting mmio_base in a followup patch.

Darn your eagle eyes ;)

> 
> And how about we add capabilities in the first patch? So we get another 
> way of engine discovery. Ideally with mapping of bits to user friendly 
> strings.

Right, I was about to ask if we should do a /proc/cpuinfo style
capabilities. Do we need both? Or just stick to the more human readable
output for sysfs?
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 03/10] drm/i915: Expose engine properties via sysfs
  2019-10-11  8:49     ` Chris Wilson
@ 2019-10-11  9:04       ` Tvrtko Ursulin
  0 siblings, 0 replies; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11  9:04 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 11/10/2019 09:49, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-10-11 09:44:16)
>>
>> On 10/10/2019 08:14, Chris Wilson wrote:
>>> Preliminary stub to add engines underneath /sys/class/drm/cardN/, so
>>> that we can expose properties on each engine to the sysadmin.
>>>
>>> To start with we have basic analogues of the i915_query ioctl so that we
>>> can pretty print engine discovery from the shell, and flesh out the
>>> directory structure. Later we will add writeable sysadmin properties such
>>> as per-engine timeout controls.
>>>
>>> An example tree of the engine properties on Braswell:
>>>       /sys/class/drm/card0
>>>       └── engine
>>>           ├── bcs0
>>>           │   ├── class
>>>           │   ├── heartbeat_interval_ms
>>
>> Not present in this patch.
> 
> I did say an example tree, not this tree :)
> 
>>>           │   ├── instance
>>>           │   ├── mmio_base
>>
>> I vote for putting mmio_base in a followup patch.
> 
> Darn your eagle eyes ;)
> 
>>
>> And how about we add capabilities in the first patch? So we get another
>> way of engine discovery. Ideally with mapping of bits to user friendly
>> strings.
> 
> Right, I was about to ask if we should do a /proc/cpuinfo style
> capabilities. Do we need both? Or just stick to the more human readable
> output for sysfs?

Interesting question and I am not sure. I'd definitely have human 
readable and that even being an aggregation of engine->flags and 
engine->uabi_capabilities. Whether or not to also put hex in there.. For 
uabi_capabilities it's possible, but for the rest not so much. So that 
probably means only human readable?

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 06/10] drm/i915/gt: Introduce barrier pulses along engines
  2019-10-10  7:14 ` [PATCH 06/10] drm/i915/gt: Introduce barrier pulses along engines Chris Wilson
@ 2019-10-11  9:11   ` Tvrtko Ursulin
  2019-10-11  9:52     ` Chris Wilson
  0 siblings, 1 reply; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11  9:11 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/10/2019 08:14, Chris Wilson wrote:
> To flush idle barriers, and even inflight requests, we want to send a
> preemptive 'pulse' along an engine. We use a no-op request along the
> pinned kernel_context at high priority so that it should run or else
> kick off the stuck requests. We can use this to ensure idle barriers are
> immediately flushed, as part of a context cancellation mechanism, or as
> part of a heartbeat mechanism to detect and reset a stuck GPU.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/Makefile                 |  1 +
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 56 +++++++++++++++++++
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.h  | 14 +++++
>   drivers/gpu/drm/i915/gt/intel_engine_pm.c     |  2 +-
>   drivers/gpu/drm/i915/i915_priolist_types.h    |  1 +
>   5 files changed, 73 insertions(+), 1 deletion(-)
>   create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
>   create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> 
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index cd9a10ba2516..cfab7c8585b3 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -78,6 +78,7 @@ gt-y += \
>   	gt/intel_breadcrumbs.o \
>   	gt/intel_context.o \
>   	gt/intel_engine_cs.o \
> +	gt/intel_engine_heartbeat.o \
>   	gt/intel_engine_pm.o \
>   	gt/intel_engine_pool.o \
>   	gt/intel_engine_sysfs.o \
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> new file mode 100644
> index 000000000000..2fc413f9d506
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> @@ -0,0 +1,56 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#include "i915_request.h"
> +
> +#include "intel_context.h"
> +#include "intel_engine_heartbeat.h"
> +#include "intel_engine_pm.h"
> +#include "intel_engine.h"
> +#include "intel_gt.h"
> +
> +static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
> +{
> +	engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
> +	i915_request_add_active_barriers(rq);

Why do you need active barriers with the idle pulse? Just because it is 
a handy point to release the previously pinned contexts? But they may 
get reused as soon as idle pulse finishes, no?

Regards,

Tvrtko

> +}
> +
> +int intel_engine_pulse(struct intel_engine_cs *engine)
> +{
> +	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
> +	struct intel_context *ce = engine->kernel_context;
> +	struct i915_request *rq;
> +	int err = 0;
> +
> +	if (!intel_engine_has_preemption(engine))
> +		return -ENODEV;
> +
> +	if (!intel_engine_pm_get_if_awake(engine))
> +		return 0;
> +
> +	if (mutex_lock_interruptible(&ce->timeline->mutex))
> +		goto out_rpm;
> +
> +	intel_context_enter(ce);
> +	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
> +	intel_context_exit(ce);
> +	if (IS_ERR(rq)) {
> +		err = PTR_ERR(rq);
> +		goto out_unlock;
> +	}
> +
> +	rq->flags |= I915_REQUEST_SENTINEL;
> +	idle_pulse(engine, rq);
> +
> +	__i915_request_commit(rq);
> +	__i915_request_queue(rq, &attr);
> +
> +out_unlock:
> +	mutex_unlock(&ce->timeline->mutex);
> +out_rpm:
> +	intel_engine_pm_put(engine);
> +	return err;
> +}
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> new file mode 100644
> index 000000000000..b950451b5998
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> @@ -0,0 +1,14 @@
> +/*
> + * SPDX-License-Identifier: MIT
> + *
> + * Copyright © 2019 Intel Corporation
> + */
> +
> +#ifndef INTEL_ENGINE_HEARTBEAT_H
> +#define INTEL_ENGINE_HEARTBEAT_H
> +
> +struct intel_engine_cs;
> +
> +int intel_engine_pulse(struct intel_engine_cs *engine);
> +
> +#endif /* INTEL_ENGINE_HEARTBEAT_H */
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> index 67eb6183648a..7d76611d9df1 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> @@ -111,7 +111,7 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine)
>   	i915_request_add_active_barriers(rq);
>   
>   	/* Install ourselves as a preemption barrier */
> -	rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE;
> +	rq->sched.attr.priority = I915_PRIORITY_BARRIER;
>   	__i915_request_commit(rq);
>   
>   	/* Release our exclusive hold on the engine */
> diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
> index 21037a2e2038..ae8bb3cb627e 100644
> --- a/drivers/gpu/drm/i915/i915_priolist_types.h
> +++ b/drivers/gpu/drm/i915/i915_priolist_types.h
> @@ -39,6 +39,7 @@ enum {
>    * active request.
>    */
>   #define I915_PRIORITY_UNPREEMPTABLE INT_MAX
> +#define I915_PRIORITY_BARRIER INT_MAX
>   
>   #define __NO_PREEMPTION (I915_PRIORITY_WAIT)
>   
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2] drm/i915: Expose engine properties via sysfs
  2019-10-10  7:14 ` [PATCH 03/10] drm/i915: Expose engine properties via sysfs Chris Wilson
  2019-10-11  8:44   ` Tvrtko Ursulin
@ 2019-10-11  9:40   ` Chris Wilson
  1 sibling, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-11  9:40 UTC (permalink / raw)
  To: intel-gfx

Preliminary stub to add engines underneath /sys/class/drm/cardN/, so
that we can expose properties on each engine to the sysadmin.

To start with we have basic analogues of the i915_query ioctl so that we
can pretty print engine discovery from the shell, and flesh out the
directory structure. Later we will add writeable sysadmin properties such
as per-engine timeout controls.

An example tree of the engine properties on Braswell:
    /sys/class/drm/card0
    └── engine
        ├── bcs0
        │   ├── capabilities
        │   ├── class
        │   ├── instance
        │   └── name
        ├── rcs0
        │   ├── capabilities
        │   ├── class
        │   ├── instance
        │   └── name
        ├── vcs0
        │   ├── capabilities
        │   ├── class
        │   ├── instance
        │   └── name
        └── vecs0
            ├── capabilities
            ├── class
            ├── instance
            └── name

v2: Include stringified capabilities

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Acked-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
---
Tvrtko, you mentioned exposing flags as well, I haven't spotted what
should be included for that field.
---
 drivers/gpu/drm/i915/Makefile                |   3 +-
 drivers/gpu/drm/i915/gt/intel_engine_sysfs.c | 175 +++++++++++++++++++
 drivers/gpu/drm/i915/gt/intel_engine_sysfs.h |  14 ++
 drivers/gpu/drm/i915/i915_sysfs.c            |   3 +
 4 files changed, 194 insertions(+), 1 deletion(-)
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
 create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_sysfs.h

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index e791d9323b51..cd9a10ba2516 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -78,8 +78,9 @@ gt-y += \
 	gt/intel_breadcrumbs.o \
 	gt/intel_context.o \
 	gt/intel_engine_cs.o \
-	gt/intel_engine_pool.o \
 	gt/intel_engine_pm.o \
+	gt/intel_engine_pool.o \
+	gt/intel_engine_sysfs.o \
 	gt/intel_engine_user.o \
 	gt/intel_gt.o \
 	gt/intel_gt_irq.o \
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
new file mode 100644
index 000000000000..bfc3a4f631a5
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
@@ -0,0 +1,175 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_sysfs.h"
+
+struct kobj_engine {
+	struct kobject base;
+	struct intel_engine_cs *engine;
+};
+
+static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
+{
+	return container_of(kobj, struct kobj_engine, base)->engine;
+}
+
+static ssize_t
+name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+}
+
+static struct kobj_attribute name_attr =
+__ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+}
+
+static struct kobj_attribute class_attr =
+__ATTR(class, 0444, class_show, NULL);
+
+static ssize_t
+inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+}
+
+static struct kobj_attribute inst_attr =
+__ATTR(instance, 0444, inst_show, NULL);
+
+static ssize_t repr_trim(char *buf, ssize_t len)
+{
+	/* Trim off the trailing space */
+	if (len > PAGE_SIZE)
+		len = PAGE_SIZE;
+	if (len > 0)
+		buf[--len] = '\0';
+
+	return len;
+}
+
+static ssize_t
+caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+	static const char *vcs_repr[] = {
+	       [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
+	       [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+	};
+	static const char *vecs_repr[] = {
+	       [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+	};
+	struct intel_engine_cs *engine = kobj_to_engine(kobj);
+	const char **repr;
+	int num_repr, n;
+	ssize_t len;
+
+	switch (engine->class) {
+	case VIDEO_DECODE_CLASS:
+		repr = vcs_repr;
+		num_repr = ARRAY_SIZE(vcs_repr);
+		break;
+
+	case VIDEO_ENHANCEMENT_CLASS:
+		repr = vecs_repr;
+		num_repr = ARRAY_SIZE(vecs_repr);
+		break;
+
+	default:
+		repr = NULL;
+		num_repr = 0;
+		break;
+	}
+
+	len = 0;
+	for_each_set_bit(n, (unsigned long *)&engine->uabi_capabilities, 64) {
+		if (n < num_repr && repr[n])
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"%s ", repr[n]);
+		else
+			len += snprintf(buf + len, PAGE_SIZE - len,
+					"[%d] ", n);
+	}
+	return repr_trim(buf, len);
+}
+
+static struct kobj_attribute caps_attr =
+__ATTR(capabilities, 0444, caps_show, NULL);
+
+static void kobj_engine_release(struct kobject *kobj)
+{
+	kfree(kobj);
+}
+
+static struct kobj_type kobj_engine_type = {
+	.release = kobj_engine_release,
+	.sysfs_ops = &kobj_sysfs_ops
+};
+
+static struct kobject *
+kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
+{
+	struct kobj_engine *ke;
+
+	ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+	if (!ke)
+		return NULL;
+
+	kobject_init(&ke->base, &kobj_engine_type);
+	ke->engine = engine;
+
+	if (kobject_add(&ke->base, dir, "%s", engine->name)) {
+		kobject_put(&ke->base);
+		return NULL;
+	}
+
+	/* xfer ownership to sysfs tree */
+	return &ke->base;
+}
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915)
+{
+	static const struct attribute *files[] = {
+		&name_attr.attr,
+		&class_attr.attr,
+		&inst_attr.attr,
+		&caps_attr.attr,
+		NULL
+	};
+
+	struct device *kdev = i915->drm.primary->kdev;
+	struct intel_engine_cs *engine;
+	struct kobject *dir;
+
+	dir = kobject_create_and_add("engine", &kdev->kobj);
+	if (!dir)
+		return;
+
+	for_each_uabi_engine(engine, i915) {
+		struct kobject *kobj;
+
+		kobj = kobj_engine(dir, engine);
+		if (!kobj)
+			goto err_engine;
+
+		if (sysfs_create_files(kobj, files))
+			goto err_engine;
+
+		if (0) {
+err_engine:
+			dev_err(kdev, "Failed to add sysfs engine '%s'\n",
+				engine->name);
+			break;
+		}
+	}
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.h b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.h
new file mode 100644
index 000000000000..ef44a745b70a
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_SYSFS_H
+#define INTEL_ENGINE_SYSFS_H
+
+struct drm_i915_private;
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_SYSFS_H */
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index bf039b8ba593..7b665f69f301 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -30,6 +30,7 @@
 #include <linux/stat.h>
 #include <linux/sysfs.h>
 
+#include "gt/intel_engine_sysfs.h"
 #include "gt/intel_rc6.h"
 
 #include "i915_drv.h"
@@ -616,6 +617,8 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
 		DRM_ERROR("RPS sysfs setup failed\n");
 
 	i915_setup_error_capture(kdev);
+
+	intel_engines_add_sysfs(dev_priv);
 }
 
 void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* Re: [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out
  2019-10-10  7:14 ` [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out Chris Wilson
@ 2019-10-11  9:47   ` Tvrtko Ursulin
  2019-10-11 10:03     ` Chris Wilson
  2019-10-11 10:15     ` Chris Wilson
  2019-10-11 11:16   ` [PATCH v2] " Chris Wilson
  1 sibling, 2 replies; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11  9:47 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/10/2019 08:14, Chris Wilson wrote:
> On completion of a banned context, scrub the context image so that we do

s/completion/schedule out/ like in the subject line? Otherwise I 
struggle to understand how banned context is completing. Presumably it 
was banned because it keeps hanging.

> not replay the active payload. The intent is that we skip banned
> payloads on request submission so that the timeline advancement
> continues on in the background. However, if we are returning to a
> preempted request, i915_request_skip() is ineffective and instead we
> need to patch up the context image so that it continues from the start
> of the next request.

But if the context is banned why do we want to continue from the start 
of the next request? Don't we want to zap all submitted so far?

> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   drivers/gpu/drm/i915/gt/intel_lrc.c    |  58 ++++++
>   drivers/gpu/drm/i915/gt/selftest_lrc.c | 273 +++++++++++++++++++++++++
>   2 files changed, 331 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index eb99f1e804f7..79c7ebea2fcc 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -234,6 +234,9 @@ static void execlists_init_reg_state(u32 *reg_state,
>   				     const struct intel_engine_cs *engine,
>   				     const struct intel_ring *ring,
>   				     bool close);
> +static void
> +__execlists_update_reg_state(const struct intel_context *ce,
> +			     const struct intel_engine_cs *engine);
>   
>   static void __context_pin_acquire(struct intel_context *ce)
>   {
> @@ -1022,6 +1025,58 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
>   		tasklet_schedule(&ve->base.execlists.tasklet);
>   }
>   
> +static void
> +mark_complete(struct i915_request *rq, struct intel_engine_cs *engine)
> +{
> +	const struct intel_timeline * const tl = rcu_dereference(rq->timeline);
> +
> +	*(u32 *)tl->hwsp_seqno = rq->fence.seqno;
> +	GEM_BUG_ON(!i915_request_completed(rq));
> +
> +	list_for_each_entry_from_reverse(rq, &tl->requests, link) {
> +		if (i915_request_signaled(rq))
> +			break;
> +
> +		mark_eio(rq);

This would -EIO requests which have potentially be completed but not 
retired yet? If so why?

> +	}
> +
> +	intel_engine_queue_breadcrumbs(engine);
> +}
> +
> +static void cancel_active(struct i915_request *rq,
> +			  struct intel_engine_cs *engine)
> +{
> +	struct intel_context * const ce = rq->hw_context;
> +	u32 *regs = ce->lrc_reg_state;
> +
> +	if (i915_request_completed(rq))
> +		return;
> +
> +	GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
> +		  __func__, engine->name, rq->fence.context, rq->fence.seqno);
> +	__context_pin_acquire(ce);
> +
> +	/* Scrub the context image to prevent replaying the previous batch */
> +	memcpy(regs, /* skip restoring the vanilla PPHWSP */
> +	       engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
> +	       engine->context_size - PAGE_SIZE);

context_size - LRC_STATE_PN * PAGE_SIZE ?

> +	execlists_init_reg_state(regs, ce, engine, ce->ring, false);
> +
> +	/* Ring will be advanced on retire; here we need to reset the context */
> +	ce->ring->head = intel_ring_wrap(ce->ring, rq->wa_tail);
> +	__execlists_update_reg_state(ce, engine);
> +
> +	/* We've switched away, so this should be a no-op, but intent matters */
> +	ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
> +
> +	/* Let everyone know that the request may now be retired */
> +	rcu_read_lock();
> +	mark_complete(rq, engine);
> +	rcu_read_unlock();
> +
> +	__context_pin_release(ce);
> +}
> +
>   static inline void
>   __execlists_schedule_out(struct i915_request *rq,
>   			 struct intel_engine_cs * const engine)
> @@ -1032,6 +1087,9 @@ __execlists_schedule_out(struct i915_request *rq,
>   	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
>   	intel_gt_pm_put(engine->gt);
>   
> +	if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
> +		cancel_active(rq, engine);

Or you are counting this is already the last runnable request from this 
context due coalescing? It wouldn't work if for any reason coalescing 
would be prevented. Either with GVT, or I had some ideas to prevent 
coalescing for contexts where watchdog is enabled in the future. In 
which case this would be a hidden gotcha. Maybe all that's needed in 
mark_complete is also to look towards the end of the list?

Regards,

Tvrtko

> +
>   	/*
>   	 * If this is part of a virtual engine, its next request may
>   	 * have been blocked waiting for access to the active context.
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index 198cf2f754f4..1703130ef0ef 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -7,6 +7,7 @@
>   #include <linux/prime_numbers.h>
>   
>   #include "gem/i915_gem_pm.h"
> +#include "gt/intel_engine_heartbeat.h"
>   #include "gt/intel_reset.h"
>   
>   #include "i915_selftest.h"
> @@ -986,6 +987,277 @@ static int live_nopreempt(void *arg)
>   	goto err_client_b;
>   }
>   
> +struct live_preempt_cancel {
> +	struct intel_engine_cs *engine;
> +	struct preempt_client a, b;
> +};
> +
> +static int __cancel_active0(struct live_preempt_cancel *arg)
> +{
> +	struct i915_request *rq;
> +	struct igt_live_test t;
> +	int err;
> +
> +	/* Preempt cancel of ELSP0 */
> +	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
> +
> +	if (igt_live_test_begin(&t, arg->engine->i915,
> +				__func__, arg->engine->name))
> +		return -EIO;
> +
> +	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
> +	rq = spinner_create_request(&arg->a.spin,
> +				    arg->a.ctx, arg->engine,
> +				    MI_ARB_CHECK);
> +	if (IS_ERR(rq))
> +		return PTR_ERR(rq);
> +
> +	i915_request_get(rq);
> +	i915_request_add(rq);
> +	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	i915_gem_context_set_banned(arg->a.ctx);
> +	err = intel_engine_pulse(arg->engine);
> +	if (err)
> +		goto out;
> +
> +	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	if (rq->fence.error != -EIO) {
> +		pr_err("Cancelled inflight0 request did not report -EIO\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +out:
> +	i915_request_put(rq);
> +	if (igt_live_test_end(&t))
> +		err = -EIO;
> +	return err;
> +}
> +
> +static int __cancel_active1(struct live_preempt_cancel *arg)
> +{
> +	struct i915_request *rq[2] = {};
> +	struct igt_live_test t;
> +	int err;
> +
> +	/* Preempt cancel of ELSP1 */
> +	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
> +
> +	if (igt_live_test_begin(&t, arg->engine->i915,
> +				__func__, arg->engine->name))
> +		return -EIO;
> +
> +	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
> +	rq[0] = spinner_create_request(&arg->a.spin,
> +				       arg->a.ctx, arg->engine,
> +				       MI_NOOP); /* no preemption */
> +	if (IS_ERR(rq[0]))
> +		return PTR_ERR(rq[0]);
> +
> +	i915_request_get(rq[0]);
> +	i915_request_add(rq[0]);
> +	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
> +	rq[1] = spinner_create_request(&arg->b.spin,
> +				       arg->b.ctx, arg->engine,
> +				       MI_ARB_CHECK);
> +	if (IS_ERR(rq[1])) {
> +		err = PTR_ERR(rq[1]);
> +		goto out;
> +	}
> +
> +	i915_request_get(rq[1]);
> +	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
> +	i915_request_add(rq[1]);
> +	if (err)
> +		goto out;
> +
> +	i915_gem_context_set_banned(arg->b.ctx);
> +	err = intel_engine_pulse(arg->engine);
> +	if (err)
> +		goto out;
> +
> +	igt_spinner_end(&arg->a.spin);
> +	if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	if (rq[0]->fence.error != 0) {
> +		pr_err("Normal inflight0 request did not complete\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +	if (rq[1]->fence.error != -EIO) {
> +		pr_err("Cancelled inflight1 request did not report -EIO\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +out:
> +	i915_request_put(rq[1]);
> +	i915_request_put(rq[0]);
> +	if (igt_live_test_end(&t))
> +		err = -EIO;
> +	return err;
> +}
> +
> +static int __cancel_queued(struct live_preempt_cancel *arg)
> +{
> +	struct i915_request *rq[3] = {};
> +	struct igt_live_test t;
> +	int err;
> +
> +	/* Full ELSP and one in the wings */
> +	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
> +
> +	if (igt_live_test_begin(&t, arg->engine->i915,
> +				__func__, arg->engine->name))
> +		return -EIO;
> +
> +	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
> +	rq[0] = spinner_create_request(&arg->a.spin,
> +				       arg->a.ctx, arg->engine,
> +				       MI_ARB_CHECK);
> +	if (IS_ERR(rq[0]))
> +		return PTR_ERR(rq[0]);
> +
> +	i915_request_get(rq[0]);
> +	i915_request_add(rq[0]);
> +	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
> +	rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
> +	if (IS_ERR(rq[1])) {
> +		err = PTR_ERR(rq[1]);
> +		goto out;
> +	}
> +
> +	i915_request_get(rq[1]);
> +	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
> +	i915_request_add(rq[1]);
> +	if (err)
> +		goto out;
> +
> +	rq[2] = spinner_create_request(&arg->b.spin,
> +				       arg->a.ctx, arg->engine,
> +				       MI_ARB_CHECK);
> +	if (IS_ERR(rq[2])) {
> +		err = PTR_ERR(rq[2]);
> +		goto out;
> +	}
> +
> +	i915_request_get(rq[2]);
> +	err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
> +	i915_request_add(rq[2]);
> +	if (err)
> +		goto out;
> +
> +	i915_gem_context_set_banned(arg->a.ctx);
> +	err = intel_engine_pulse(arg->engine);
> +	if (err)
> +		goto out;
> +
> +	if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	if (rq[0]->fence.error != -EIO) {
> +		pr_err("Cancelled inflight0 request did not report -EIO\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +	if (rq[1]->fence.error != 0) {
> +		pr_err("Normal inflight1 request did not complete\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +	if (rq[2]->fence.error != -EIO) {
> +		pr_err("Cancelled queued request did not report -EIO\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +out:
> +	i915_request_put(rq[2]);
> +	i915_request_put(rq[1]);
> +	i915_request_put(rq[0]);
> +	if (igt_live_test_end(&t))
> +		err = -EIO;
> +	return err;
> +}
> +
> +static int live_preempt_cancel(void *arg)
> +{
> +	struct drm_i915_private *i915 = arg;
> +	struct live_preempt_cancel data;
> +	enum intel_engine_id id;
> +	int err = -ENOMEM;
> +
> +	/*
> +	 * To cancel an inflight context, we need to first remove it from the
> +	 * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
> +	 */
> +
> +	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
> +		return 0;
> +
> +	if (preempt_client_init(i915, &data.a))
> +		return -ENOMEM;
> +	if (preempt_client_init(i915, &data.b))
> +		goto err_client_a;
> +
> +	for_each_engine(data.engine, i915, id) {
> +		if (!intel_engine_has_preemption(data.engine))
> +			continue;
> +
> +		err = __cancel_active0(&data);
> +		if (err)
> +			goto err_wedged;
> +
> +		err = __cancel_active1(&data);
> +		if (err)
> +			goto err_wedged;
> +
> +		err = __cancel_queued(&data);
> +		if (err)
> +			goto err_wedged;
> +	}
> +
> +	err = 0;
> +err_client_b:
> +	preempt_client_fini(&data.b);
> +err_client_a:
> +	preempt_client_fini(&data.a);
> +	return err;
> +
> +err_wedged:
> +	GEM_TRACE_DUMP();
> +	igt_spinner_end(&data.b.spin);
> +	igt_spinner_end(&data.a.spin);
> +	intel_gt_set_wedged(&i915->gt);
> +	goto err_client_b;
> +}
> +
>   static int live_suppress_self_preempt(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
> @@ -2270,6 +2542,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
>   		SUBTEST(live_preempt),
>   		SUBTEST(live_late_preempt),
>   		SUBTEST(live_nopreempt),
> +		SUBTEST(live_preempt_cancel),
>   		SUBTEST(live_suppress_self_preempt),
>   		SUBTEST(live_suppress_wait_preempt),
>   		SUBTEST(live_chain_preempt),
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev2)
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (12 preceding siblings ...)
  2019-10-11  8:16 ` [PATCH 01/10] " Tvrtko Ursulin
@ 2019-10-11  9:49 ` Patchwork
  2019-10-11 11:39 ` ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev3) Patchwork
  14 siblings, 0 replies; 42+ messages in thread
From: Patchwork @ 2019-10-11  9:49 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev2)
URL   : https://patchwork.freedesktop.org/series/67827/
State : failure

== Summary ==

Applying: drm/i915: Note the addition of timeslicing to the pretend scheduler
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/i915_scheduler_types.h
Falling back to patching base and 3-way merge...
No changes -- Patch already applied.
Applying: drm/i915/execlists: Leave tell-tales as to why pending[] is bad
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/gt/intel_lrc.c
M	drivers/gpu/drm/i915/i915_gem.h
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/i915_gem.h
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/i915_gem.h
Auto-merging drivers/gpu/drm/i915/gt/intel_lrc.c
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/gt/intel_lrc.c
error: Failed to merge in the changes.
hint: Use 'git am --show-current-patch' to see the failed patch
Patch failed at 0002 drm/i915/execlists: Leave tell-tales as to why pending[] is bad
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 06/10] drm/i915/gt: Introduce barrier pulses along engines
  2019-10-11  9:11   ` Tvrtko Ursulin
@ 2019-10-11  9:52     ` Chris Wilson
  0 siblings, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-11  9:52 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-11 10:11:58)
> 
> On 10/10/2019 08:14, Chris Wilson wrote:
> > +#include "intel_context.h"
> > +#include "intel_engine_heartbeat.h"
> > +#include "intel_engine_pm.h"
> > +#include "intel_engine.h"
> > +#include "intel_gt.h"
> > +
> > +static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
> > +{
> > +     engine->wakeref_serial = READ_ONCE(engine->serial) + 1;
> > +     i915_request_add_active_barriers(rq);
> 
> Why do you need active barriers with the idle pulse? Just because it is 
> a handy point to release the previously pinned contexts? But they may 
> get reused as soon as idle pulse finishes, no?

Yes. It is a known point in time where the other context has finished,
and when this request runs has completed a context switch.

Remember all that time we were arguing about idle barriers and how we
needed to run them periodically to allow them to be reaped and avoid
having the entire aperture pinned with stale contexts forcing a stall.
And avoiding making the idle barriers themselves a global serialising
barrier. :|

The idea we had was that we would take advantage of any guaranteed
context switches and send regular pulses from the kernel context to pick
up stragglers. So we could use any context switch after the we retire
the old context to unpin it, but to keep the locking and preallocations
of the rbtree simple (you've seen i915_active, simple is anything but),
I left it to the engine->kernel_context to manage.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out
  2019-10-11  9:47   ` Tvrtko Ursulin
@ 2019-10-11 10:03     ` Chris Wilson
  2019-10-11 10:15     ` Chris Wilson
  1 sibling, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-11 10:03 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-11 10:47:26)
> 
> On 10/10/2019 08:14, Chris Wilson wrote:
> > On completion of a banned context, scrub the context image so that we do
> 
> s/completion/schedule out/ like in the subject line? Otherwise I 
> struggle to understand how banned context is completing. Presumably it 
> was banned because it keeps hanging.

Ok, I had the CS completion event in mind, but i915_request_completed()
does muddle the waters.
 
> > not replay the active payload. The intent is that we skip banned
> > payloads on request submission so that the timeline advancement
> > continues on in the background. However, if we are returning to a
> > preempted request, i915_request_skip() is ineffective and instead we
> > need to patch up the context image so that it continues from the start
> > of the next request.
> 
> But if the context is banned why do we want to continue from the start 
> of the next request? Don't we want to zap all submitted so far?

We scrub the payload, but the request itself is still a vital part of
the web of dependencies. That is we still execute the semaphores and
breadcrumbs of the cancelled requests to maintain global ordering.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out
  2019-10-11  9:47   ` Tvrtko Ursulin
  2019-10-11 10:03     ` Chris Wilson
@ 2019-10-11 10:15     ` Chris Wilson
  2019-10-11 10:40       ` Chris Wilson
  1 sibling, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-11 10:15 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-11 10:47:26)
> > +static void
> > +mark_complete(struct i915_request *rq, struct intel_engine_cs *engine)
> > +{
> > +     const struct intel_timeline * const tl = rcu_dereference(rq->timeline);
> > +
> > +     *(u32 *)tl->hwsp_seqno = rq->fence.seqno;
> > +     GEM_BUG_ON(!i915_request_completed(rq));
> > +
> > +     list_for_each_entry_from_reverse(rq, &tl->requests, link) {
> > +             if (i915_request_signaled(rq))
> > +                     break;
> > +
> > +             mark_eio(rq);
> 
> This would -EIO requests which have potentially be completed but not 
> retired yet? If so why?

Hmm. That's a bit of an oversight, yes.

> > +     }
> > +
> > +     intel_engine_queue_breadcrumbs(engine);
> > +}
> > +
> > +static void cancel_active(struct i915_request *rq,
> > +                       struct intel_engine_cs *engine)
> > +{
> > +     struct intel_context * const ce = rq->hw_context;
> > +     u32 *regs = ce->lrc_reg_state;
> > +
> > +     if (i915_request_completed(rq))
> > +             return;
> > +
> > +     GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
> > +               __func__, engine->name, rq->fence.context, rq->fence.seqno);
> > +     __context_pin_acquire(ce);
> > +
> > +     /* Scrub the context image to prevent replaying the previous batch */
> > +     memcpy(regs, /* skip restoring the vanilla PPHWSP */
> > +            engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
> > +            engine->context_size - PAGE_SIZE);
> 
> context_size - LRC_STATE_PN * PAGE_SIZE ?

context_size excludes the guc header pages, so it's a bit of a kerfuffle.
 
> > +     execlists_init_reg_state(regs, ce, engine, ce->ring, false);
> > +
> > +     /* Ring will be advanced on retire; here we need to reset the context */
> > +     ce->ring->head = intel_ring_wrap(ce->ring, rq->wa_tail);
> > +     __execlists_update_reg_state(ce, engine);
> > +
> > +     /* We've switched away, so this should be a no-op, but intent matters */
> > +     ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
> > +
> > +     /* Let everyone know that the request may now be retired */
> > +     rcu_read_lock();
> > +     mark_complete(rq, engine);
> > +     rcu_read_unlock();
> > +
> > +     __context_pin_release(ce);
> > +}
> > +
> >   static inline void
> >   __execlists_schedule_out(struct i915_request *rq,
> >                        struct intel_engine_cs * const engine)
> > @@ -1032,6 +1087,9 @@ __execlists_schedule_out(struct i915_request *rq,
> >       execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
> >       intel_gt_pm_put(engine->gt);
> >   
> > +     if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
> > +             cancel_active(rq, engine);
> 
> Or you are counting this is already the last runnable request from this 
> context due coalescing? It wouldn't work if for any reason coalescing 
> would be prevented. Either with GVT, or I had some ideas to prevent 
> coalescing for contexts where watchdog is enabled in the future. In 
> which case this would be a hidden gotcha. Maybe all that's needed in 
> mark_complete is also to look towards the end of the list?

I'm not following. We are looking at the context here, which is track by
the last request submitted for that context.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out
  2019-10-11 10:15     ` Chris Wilson
@ 2019-10-11 10:40       ` Chris Wilson
  0 siblings, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-11 10:40 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Chris Wilson (2019-10-11 11:15:58)
> Quoting Tvrtko Ursulin (2019-10-11 10:47:26)
> > > +     if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
> > > +             cancel_active(rq, engine);
> > 
> > Or you are counting this is already the last runnable request from this 
> > context due coalescing? It wouldn't work if for any reason coalescing 
> > would be prevented. Either with GVT, or I had some ideas to prevent 
> > coalescing for contexts where watchdog is enabled in the future. In 
> > which case this would be a hidden gotcha. Maybe all that's needed in 
> > mark_complete is also to look towards the end of the list?
> 
> I'm not following. We are looking at the context here, which is track by
> the last request submitted for that context.

Oh I see, you were pointing out that I had not walked back along the context
to find the incomplete request for correct patching.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* [PATCH v2] drm/i915/execlists: Cancel banned contexts on schedule-out
  2019-10-10  7:14 ` [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out Chris Wilson
  2019-10-11  9:47   ` Tvrtko Ursulin
@ 2019-10-11 11:16   ` Chris Wilson
  2019-10-11 13:10     ` Tvrtko Ursulin
  1 sibling, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-11 11:16 UTC (permalink / raw)
  To: intel-gfx

On schedule-out (CS completion) of a banned context, scrub the context
image so that we do not replay the active payload. The intent is that we
skip banned payloads on request submission so that the timeline
advancement continues on in the background. However, if we are returning
to a preempted request, i915_request_skip() is ineffective and instead we
need to patch up the context image so that it continues from the start
of the next request.

v2: Fixup cancellation so that we only scrub the payload of the active
request and do not short-circuit the breadcrumbs (which might cause
other contexts to execute out of order).

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 drivers/gpu/drm/i915/gt/intel_lrc.c    |  91 ++++++---
 drivers/gpu/drm/i915/gt/selftest_lrc.c | 273 +++++++++++++++++++++++++
 2 files changed, 341 insertions(+), 23 deletions(-)

diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 09fc5ecfdd09..809a5dd97c14 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -234,6 +234,9 @@ static void execlists_init_reg_state(u32 *reg_state,
 				     const struct intel_engine_cs *engine,
 				     const struct intel_ring *ring,
 				     bool close);
+static void
+__execlists_update_reg_state(const struct intel_context *ce,
+			     const struct intel_engine_cs *engine);
 
 static void __context_pin_acquire(struct intel_context *ce)
 {
@@ -256,6 +259,29 @@ static void mark_eio(struct i915_request *rq)
 	i915_request_mark_complete(rq);
 }
 
+static struct i915_request *active_request(struct i915_request *rq)
+{
+	const struct intel_context * const ce = rq->hw_context;
+	struct i915_request *active = NULL;
+	struct list_head *list;
+
+	if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
+		return rq;
+
+	list = &i915_request_active_timeline(rq)->requests;
+	list_for_each_entry_from_reverse(rq, list, link) {
+		if (i915_request_completed(rq))
+			break;
+
+		if (rq->hw_context != ce)
+			break;
+
+		active = rq;
+	}
+
+	return active;
+}
+
 static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
 {
 	return (i915_ggtt_offset(engine->status_page.vma) +
@@ -977,6 +1003,45 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
 		tasklet_schedule(&ve->base.execlists.tasklet);
 }
 
+static void cancel_active(struct i915_request *rq,
+			  struct intel_engine_cs *engine)
+{
+	struct intel_context * const ce = rq->hw_context;
+	u32 *regs = ce->lrc_reg_state;
+
+	/*
+	 * The executing context has been cancelled. Fixup the context so that
+	 * it continues on from the breadcrumb after the batch and will be
+	 * marked as incomplete [-EIO] upon signaling. We preserve the
+	 * breadcrumbs and semaphores of the subsequent requests so that
+	 * inter-timeline dependencies remain correctly ordered.
+	 */
+	GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
+		  __func__, engine->name, rq->fence.context, rq->fence.seqno);
+
+	__context_pin_acquire(ce);
+
+	/* On resubmission of the active request, it's payload be scrubbed */
+	rq = active_request(rq);
+	if (rq)
+		ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
+	else
+		ce->ring->head = ce->ring->tail;
+
+	/* Scrub the context image to prevent replaying the previous batch */
+	memcpy(regs, /* skip restoring the vanilla PPHWSP */
+	       engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
+	       engine->context_size - PAGE_SIZE);
+
+	execlists_init_reg_state(regs, ce, engine, ce->ring, false);
+	__execlists_update_reg_state(ce, engine);
+
+	/* We've switched away, so this should be a no-op, but intent matters */
+	ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
+
+	__context_pin_release(ce);
+}
+
 static inline void
 __execlists_schedule_out(struct i915_request *rq,
 			 struct intel_engine_cs * const engine)
@@ -987,6 +1052,9 @@ __execlists_schedule_out(struct i915_request *rq,
 	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
 	intel_gt_pm_put(engine->gt);
 
+	if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
+		cancel_active(rq, engine);
+
 	/*
 	 * If this is part of a virtual engine, its next request may
 	 * have been blocked waiting for access to the active context.
@@ -2776,29 +2844,6 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
 			       &execlists->csb_status[reset_value]);
 }
 
-static struct i915_request *active_request(struct i915_request *rq)
-{
-	const struct intel_context * const ce = rq->hw_context;
-	struct i915_request *active = NULL;
-	struct list_head *list;
-
-	if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
-		return rq;
-
-	list = &i915_request_active_timeline(rq)->requests;
-	list_for_each_entry_from_reverse(rq, list, link) {
-		if (i915_request_completed(rq))
-			break;
-
-		if (rq->hw_context != ce)
-			break;
-
-		active = rq;
-	}
-
-	return active;
-}
-
 static void __execlists_reset_reg_state(const struct intel_context *ce,
 					const struct intel_engine_cs *engine)
 {
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 1276da059dc6..9d842e327aa1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -7,6 +7,7 @@
 #include <linux/prime_numbers.h>
 
 #include "gem/i915_gem_pm.h"
+#include "gt/intel_engine_heartbeat.h"
 #include "gt/intel_reset.h"
 
 #include "i915_selftest.h"
@@ -1016,6 +1017,277 @@ static int live_nopreempt(void *arg)
 	goto err_client_b;
 }
 
+struct live_preempt_cancel {
+	struct intel_engine_cs *engine;
+	struct preempt_client a, b;
+};
+
+static int __cancel_active0(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq;
+	struct igt_live_test t;
+	int err;
+
+	/* Preempt cancel of ELSP0 */
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+
+	if (igt_live_test_begin(&t, arg->engine->i915,
+				__func__, arg->engine->name))
+		return -EIO;
+
+	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+	rq = spinner_create_request(&arg->a.spin,
+				    arg->a.ctx, arg->engine,
+				    MI_ARB_CHECK);
+	if (IS_ERR(rq))
+		return PTR_ERR(rq);
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
+		err = -EIO;
+		goto out;
+	}
+
+	i915_gem_context_set_banned(arg->a.ctx);
+	err = intel_engine_pulse(arg->engine);
+	if (err)
+		goto out;
+
+	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	if (rq->fence.error != -EIO) {
+		pr_err("Cancelled inflight0 request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+out:
+	i915_request_put(rq);
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int __cancel_active1(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq[2] = {};
+	struct igt_live_test t;
+	int err;
+
+	/* Preempt cancel of ELSP1 */
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+
+	if (igt_live_test_begin(&t, arg->engine->i915,
+				__func__, arg->engine->name))
+		return -EIO;
+
+	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+	rq[0] = spinner_create_request(&arg->a.spin,
+				       arg->a.ctx, arg->engine,
+				       MI_NOOP); /* no preemption */
+	if (IS_ERR(rq[0]))
+		return PTR_ERR(rq[0]);
+
+	i915_request_get(rq[0]);
+	i915_request_add(rq[0]);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+		err = -EIO;
+		goto out;
+	}
+
+	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+	rq[1] = spinner_create_request(&arg->b.spin,
+				       arg->b.ctx, arg->engine,
+				       MI_ARB_CHECK);
+	if (IS_ERR(rq[1])) {
+		err = PTR_ERR(rq[1]);
+		goto out;
+	}
+
+	i915_request_get(rq[1]);
+	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+	i915_request_add(rq[1]);
+	if (err)
+		goto out;
+
+	i915_gem_context_set_banned(arg->b.ctx);
+	err = intel_engine_pulse(arg->engine);
+	if (err)
+		goto out;
+
+	igt_spinner_end(&arg->a.spin);
+	if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	if (rq[0]->fence.error != 0) {
+		pr_err("Normal inflight0 request did not complete\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (rq[1]->fence.error != -EIO) {
+		pr_err("Cancelled inflight1 request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+out:
+	i915_request_put(rq[1]);
+	i915_request_put(rq[0]);
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int __cancel_queued(struct live_preempt_cancel *arg)
+{
+	struct i915_request *rq[3] = {};
+	struct igt_live_test t;
+	int err;
+
+	/* Full ELSP and one in the wings */
+	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
+
+	if (igt_live_test_begin(&t, arg->engine->i915,
+				__func__, arg->engine->name))
+		return -EIO;
+
+	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
+	rq[0] = spinner_create_request(&arg->a.spin,
+				       arg->a.ctx, arg->engine,
+				       MI_ARB_CHECK);
+	if (IS_ERR(rq[0]))
+		return PTR_ERR(rq[0]);
+
+	i915_request_get(rq[0]);
+	i915_request_add(rq[0]);
+	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
+		err = -EIO;
+		goto out;
+	}
+
+	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
+	rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
+	if (IS_ERR(rq[1])) {
+		err = PTR_ERR(rq[1]);
+		goto out;
+	}
+
+	i915_request_get(rq[1]);
+	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
+	i915_request_add(rq[1]);
+	if (err)
+		goto out;
+
+	rq[2] = spinner_create_request(&arg->b.spin,
+				       arg->a.ctx, arg->engine,
+				       MI_ARB_CHECK);
+	if (IS_ERR(rq[2])) {
+		err = PTR_ERR(rq[2]);
+		goto out;
+	}
+
+	i915_request_get(rq[2]);
+	err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
+	i915_request_add(rq[2]);
+	if (err)
+		goto out;
+
+	i915_gem_context_set_banned(arg->a.ctx);
+	err = intel_engine_pulse(arg->engine);
+	if (err)
+		goto out;
+
+	if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+		err = -EIO;
+		goto out;
+	}
+
+	if (rq[0]->fence.error != -EIO) {
+		pr_err("Cancelled inflight0 request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (rq[1]->fence.error != 0) {
+		pr_err("Normal inflight1 request did not complete\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (rq[2]->fence.error != -EIO) {
+		pr_err("Cancelled queued request did not report -EIO\n");
+		err = -EINVAL;
+		goto out;
+	}
+
+out:
+	i915_request_put(rq[2]);
+	i915_request_put(rq[1]);
+	i915_request_put(rq[0]);
+	if (igt_live_test_end(&t))
+		err = -EIO;
+	return err;
+}
+
+static int live_preempt_cancel(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct live_preempt_cancel data;
+	enum intel_engine_id id;
+	int err = -ENOMEM;
+
+	/*
+	 * To cancel an inflight context, we need to first remove it from the
+	 * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
+	 */
+
+	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
+		return 0;
+
+	if (preempt_client_init(i915, &data.a))
+		return -ENOMEM;
+	if (preempt_client_init(i915, &data.b))
+		goto err_client_a;
+
+	for_each_engine(data.engine, i915, id) {
+		if (!intel_engine_has_preemption(data.engine))
+			continue;
+
+		err = __cancel_active0(&data);
+		if (err)
+			goto err_wedged;
+
+		err = __cancel_active1(&data);
+		if (err)
+			goto err_wedged;
+
+		err = __cancel_queued(&data);
+		if (err)
+			goto err_wedged;
+	}
+
+	err = 0;
+err_client_b:
+	preempt_client_fini(&data.b);
+err_client_a:
+	preempt_client_fini(&data.a);
+	return err;
+
+err_wedged:
+	GEM_TRACE_DUMP();
+	igt_spinner_end(&data.b.spin);
+	igt_spinner_end(&data.a.spin);
+	intel_gt_set_wedged(&i915->gt);
+	goto err_client_b;
+}
+
 static int live_suppress_self_preempt(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
@@ -2452,6 +2724,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
 		SUBTEST(live_preempt),
 		SUBTEST(live_late_preempt),
 		SUBTEST(live_nopreempt),
+		SUBTEST(live_preempt_cancel),
 		SUBTEST(live_suppress_self_preempt),
 		SUBTEST(live_suppress_wait_preempt),
 		SUBTEST(live_chain_preempt),
-- 
2.23.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 42+ messages in thread

* ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev3)
  2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
                   ` (13 preceding siblings ...)
  2019-10-11  9:49 ` ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev2) Patchwork
@ 2019-10-11 11:39 ` Patchwork
  14 siblings, 0 replies; 42+ messages in thread
From: Patchwork @ 2019-10-11 11:39 UTC (permalink / raw)
  To: Chris Wilson; +Cc: intel-gfx

== Series Details ==

Series: series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev3)
URL   : https://patchwork.freedesktop.org/series/67827/
State : failure

== Summary ==

Applying: drm/i915: Note the addition of timeslicing to the pretend scheduler
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/i915_scheduler_types.h
Falling back to patching base and 3-way merge...
No changes -- Patch already applied.
Applying: drm/i915/execlists: Leave tell-tales as to why pending[] is bad
Using index info to reconstruct a base tree...
M	drivers/gpu/drm/i915/gt/intel_lrc.c
M	drivers/gpu/drm/i915/i915_gem.h
Falling back to patching base and 3-way merge...
Auto-merging drivers/gpu/drm/i915/i915_gem.h
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/i915_gem.h
Auto-merging drivers/gpu/drm/i915/gt/intel_lrc.c
CONFLICT (content): Merge conflict in drivers/gpu/drm/i915/gt/intel_lrc.c
error: Failed to merge in the changes.
hint: Use 'git am --show-current-patch' to see the failed patch
Patch failed at 0002 drm/i915/execlists: Leave tell-tales as to why pending[] is bad
When you have resolved this problem, run "git am --continue".
If you prefer to skip this patch, run "git am --skip" instead.
To restore the original branch and stop patching, run "git am --abort".

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v2] drm/i915/execlists: Cancel banned contexts on schedule-out
  2019-10-11 11:16   ` [PATCH v2] " Chris Wilson
@ 2019-10-11 13:10     ` Tvrtko Ursulin
  2019-10-11 14:10       ` Chris Wilson
  0 siblings, 1 reply; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11 13:10 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 11/10/2019 12:16, Chris Wilson wrote:
> On schedule-out (CS completion) of a banned context, scrub the context
> image so that we do not replay the active payload. The intent is that we
> skip banned payloads on request submission so that the timeline
> advancement continues on in the background. However, if we are returning
> to a preempted request, i915_request_skip() is ineffective and instead we
> need to patch up the context image so that it continues from the start
> of the next request.
> 
> v2: Fixup cancellation so that we only scrub the payload of the active
> request and do not short-circuit the breadcrumbs (which might cause
> other contexts to execute out of order).
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> ---
>   drivers/gpu/drm/i915/gt/intel_lrc.c    |  91 ++++++---
>   drivers/gpu/drm/i915/gt/selftest_lrc.c | 273 +++++++++++++++++++++++++
>   2 files changed, 341 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> index 09fc5ecfdd09..809a5dd97c14 100644
> --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> @@ -234,6 +234,9 @@ static void execlists_init_reg_state(u32 *reg_state,
>   				     const struct intel_engine_cs *engine,
>   				     const struct intel_ring *ring,
>   				     bool close);
> +static void
> +__execlists_update_reg_state(const struct intel_context *ce,
> +			     const struct intel_engine_cs *engine);
>   
>   static void __context_pin_acquire(struct intel_context *ce)
>   {
> @@ -256,6 +259,29 @@ static void mark_eio(struct i915_request *rq)
>   	i915_request_mark_complete(rq);
>   }
>   
> +static struct i915_request *active_request(struct i915_request *rq)
> +{
> +	const struct intel_context * const ce = rq->hw_context;
> +	struct i915_request *active = NULL;
> +	struct list_head *list;
> +
> +	if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
> +		return rq;
> +
> +	list = &i915_request_active_timeline(rq)->requests;
> +	list_for_each_entry_from_reverse(rq, list, link) {
> +		if (i915_request_completed(rq))
> +			break;
> +
> +		if (rq->hw_context != ce)
> +			break;

Would it be of any value here to also check the initial breadcrumb matches?

> +
> +		active = rq;
> +	}
> +
> +	return active;
> +}
> +
>   static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine)
>   {
>   	return (i915_ggtt_offset(engine->status_page.vma) +
> @@ -977,6 +1003,45 @@ static void kick_siblings(struct i915_request *rq, struct intel_context *ce)
>   		tasklet_schedule(&ve->base.execlists.tasklet);
>   }
>   
> +static void cancel_active(struct i915_request *rq,
> +			  struct intel_engine_cs *engine)
> +{
> +	struct intel_context * const ce = rq->hw_context;
> +	u32 *regs = ce->lrc_reg_state;
> +
> +	/*
> +	 * The executing context has been cancelled. Fixup the context so that
> +	 * it continues on from the breadcrumb after the batch and will be
> +	 * marked as incomplete [-EIO] upon signaling. We preserve the

Where does the -EIO marking happen now?

> +	 * breadcrumbs and semaphores of the subsequent requests so that
> +	 * inter-timeline dependencies remain correctly ordered.
> +	 */
> +	GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
> +		  __func__, engine->name, rq->fence.context, rq->fence.seqno);
> +
> +	__context_pin_acquire(ce);
> +
> +	/* On resubmission of the active request, it's payload be scrubbed */
> +	rq = active_request(rq);
> +	if (rq)
> +		ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
> +	else
> +		ce->ring->head = ce->ring->tail;

I don't quite understand yet.

If a context was banned I'd expect all requests on the tl->requests to 
be zapped and we only move to execute the last breadcrumb, no?

So if you find the active_request and you set ring head to 
active_rq->head how does that skip the payload?

Furthermore, if I try to sketch the rq->requests timeline like this:

   R0 r1 r2 r[elsp] r4 r5

'R' = completed; 'r' = incomplete

On schedule_out(r[elsp]) I'd expect you want to find r5 and set ring 
head to final breadcrumb of it. And mark r1-r5 and -EIO. Am I completely 
on the wrong track?

(Bear with me with r4 and r5, assuming someone has set the context as 
single submission for future proofing the code.)

Regards,

Tvrtko

> +
> +	/* Scrub the context image to prevent replaying the previous batch */
> +	memcpy(regs, /* skip restoring the vanilla PPHWSP */
> +	       engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE,
> +	       engine->context_size - PAGE_SIZE);
> +
> +	execlists_init_reg_state(regs, ce, engine, ce->ring, false);
> +	__execlists_update_reg_state(ce, engine);
> +
> +	/* We've switched away, so this should be a no-op, but intent matters */
> +	ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
> +
> +	__context_pin_release(ce);
> +}
> +
>   static inline void
>   __execlists_schedule_out(struct i915_request *rq,
>   			 struct intel_engine_cs * const engine)
> @@ -987,6 +1052,9 @@ __execlists_schedule_out(struct i915_request *rq,
>   	execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
>   	intel_gt_pm_put(engine->gt);
>   
> +	if (unlikely(i915_gem_context_is_banned(ce->gem_context)))
> +		cancel_active(rq, engine);
> +
>   	/*
>   	 * If this is part of a virtual engine, its next request may
>   	 * have been blocked waiting for access to the active context.
> @@ -2776,29 +2844,6 @@ static void reset_csb_pointers(struct intel_engine_cs *engine)
>   			       &execlists->csb_status[reset_value]);
>   }
>   
> -static struct i915_request *active_request(struct i915_request *rq)
> -{
> -	const struct intel_context * const ce = rq->hw_context;
> -	struct i915_request *active = NULL;
> -	struct list_head *list;
> -
> -	if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
> -		return rq;
> -
> -	list = &i915_request_active_timeline(rq)->requests;
> -	list_for_each_entry_from_reverse(rq, list, link) {
> -		if (i915_request_completed(rq))
> -			break;
> -
> -		if (rq->hw_context != ce)
> -			break;
> -
> -		active = rq;
> -	}
> -
> -	return active;
> -}
> -
>   static void __execlists_reset_reg_state(const struct intel_context *ce,
>   					const struct intel_engine_cs *engine)
>   {
> diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> index 1276da059dc6..9d842e327aa1 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
> @@ -7,6 +7,7 @@
>   #include <linux/prime_numbers.h>
>   
>   #include "gem/i915_gem_pm.h"
> +#include "gt/intel_engine_heartbeat.h"
>   #include "gt/intel_reset.h"
>   
>   #include "i915_selftest.h"
> @@ -1016,6 +1017,277 @@ static int live_nopreempt(void *arg)
>   	goto err_client_b;
>   }
>   
> +struct live_preempt_cancel {
> +	struct intel_engine_cs *engine;
> +	struct preempt_client a, b;
> +};
> +
> +static int __cancel_active0(struct live_preempt_cancel *arg)
> +{
> +	struct i915_request *rq;
> +	struct igt_live_test t;
> +	int err;
> +
> +	/* Preempt cancel of ELSP0 */
> +	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
> +
> +	if (igt_live_test_begin(&t, arg->engine->i915,
> +				__func__, arg->engine->name))
> +		return -EIO;
> +
> +	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
> +	rq = spinner_create_request(&arg->a.spin,
> +				    arg->a.ctx, arg->engine,
> +				    MI_ARB_CHECK);
> +	if (IS_ERR(rq))
> +		return PTR_ERR(rq);
> +
> +	i915_request_get(rq);
> +	i915_request_add(rq);
> +	if (!igt_wait_for_spinner(&arg->a.spin, rq)) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	i915_gem_context_set_banned(arg->a.ctx);
> +	err = intel_engine_pulse(arg->engine);
> +	if (err)
> +		goto out;
> +
> +	if (i915_request_wait(rq, 0, HZ / 5) < 0) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	if (rq->fence.error != -EIO) {
> +		pr_err("Cancelled inflight0 request did not report -EIO\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +out:
> +	i915_request_put(rq);
> +	if (igt_live_test_end(&t))
> +		err = -EIO;
> +	return err;
> +}
> +
> +static int __cancel_active1(struct live_preempt_cancel *arg)
> +{
> +	struct i915_request *rq[2] = {};
> +	struct igt_live_test t;
> +	int err;
> +
> +	/* Preempt cancel of ELSP1 */
> +	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
> +
> +	if (igt_live_test_begin(&t, arg->engine->i915,
> +				__func__, arg->engine->name))
> +		return -EIO;
> +
> +	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
> +	rq[0] = spinner_create_request(&arg->a.spin,
> +				       arg->a.ctx, arg->engine,
> +				       MI_NOOP); /* no preemption */
> +	if (IS_ERR(rq[0]))
> +		return PTR_ERR(rq[0]);
> +
> +	i915_request_get(rq[0]);
> +	i915_request_add(rq[0]);
> +	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
> +	rq[1] = spinner_create_request(&arg->b.spin,
> +				       arg->b.ctx, arg->engine,
> +				       MI_ARB_CHECK);
> +	if (IS_ERR(rq[1])) {
> +		err = PTR_ERR(rq[1]);
> +		goto out;
> +	}
> +
> +	i915_request_get(rq[1]);
> +	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
> +	i915_request_add(rq[1]);
> +	if (err)
> +		goto out;
> +
> +	i915_gem_context_set_banned(arg->b.ctx);
> +	err = intel_engine_pulse(arg->engine);
> +	if (err)
> +		goto out;
> +
> +	igt_spinner_end(&arg->a.spin);
> +	if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	if (rq[0]->fence.error != 0) {
> +		pr_err("Normal inflight0 request did not complete\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +	if (rq[1]->fence.error != -EIO) {
> +		pr_err("Cancelled inflight1 request did not report -EIO\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +out:
> +	i915_request_put(rq[1]);
> +	i915_request_put(rq[0]);
> +	if (igt_live_test_end(&t))
> +		err = -EIO;
> +	return err;
> +}
> +
> +static int __cancel_queued(struct live_preempt_cancel *arg)
> +{
> +	struct i915_request *rq[3] = {};
> +	struct igt_live_test t;
> +	int err;
> +
> +	/* Full ELSP and one in the wings */
> +	GEM_TRACE("%s(%s)\n", __func__, arg->engine->name);
> +
> +	if (igt_live_test_begin(&t, arg->engine->i915,
> +				__func__, arg->engine->name))
> +		return -EIO;
> +
> +	clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags);
> +	rq[0] = spinner_create_request(&arg->a.spin,
> +				       arg->a.ctx, arg->engine,
> +				       MI_ARB_CHECK);
> +	if (IS_ERR(rq[0]))
> +		return PTR_ERR(rq[0]);
> +
> +	i915_request_get(rq[0]);
> +	i915_request_add(rq[0]);
> +	if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags);
> +	rq[1] = igt_request_alloc(arg->b.ctx, arg->engine);
> +	if (IS_ERR(rq[1])) {
> +		err = PTR_ERR(rq[1]);
> +		goto out;
> +	}
> +
> +	i915_request_get(rq[1]);
> +	err = i915_request_await_dma_fence(rq[1], &rq[0]->fence);
> +	i915_request_add(rq[1]);
> +	if (err)
> +		goto out;
> +
> +	rq[2] = spinner_create_request(&arg->b.spin,
> +				       arg->a.ctx, arg->engine,
> +				       MI_ARB_CHECK);
> +	if (IS_ERR(rq[2])) {
> +		err = PTR_ERR(rq[2]);
> +		goto out;
> +	}
> +
> +	i915_request_get(rq[2]);
> +	err = i915_request_await_dma_fence(rq[2], &rq[1]->fence);
> +	i915_request_add(rq[2]);
> +	if (err)
> +		goto out;
> +
> +	i915_gem_context_set_banned(arg->a.ctx);
> +	err = intel_engine_pulse(arg->engine);
> +	if (err)
> +		goto out;
> +
> +	if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
> +		err = -EIO;
> +		goto out;
> +	}
> +
> +	if (rq[0]->fence.error != -EIO) {
> +		pr_err("Cancelled inflight0 request did not report -EIO\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +	if (rq[1]->fence.error != 0) {
> +		pr_err("Normal inflight1 request did not complete\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +	if (rq[2]->fence.error != -EIO) {
> +		pr_err("Cancelled queued request did not report -EIO\n");
> +		err = -EINVAL;
> +		goto out;
> +	}
> +
> +out:
> +	i915_request_put(rq[2]);
> +	i915_request_put(rq[1]);
> +	i915_request_put(rq[0]);
> +	if (igt_live_test_end(&t))
> +		err = -EIO;
> +	return err;
> +}
> +
> +static int live_preempt_cancel(void *arg)
> +{
> +	struct drm_i915_private *i915 = arg;
> +	struct live_preempt_cancel data;
> +	enum intel_engine_id id;
> +	int err = -ENOMEM;
> +
> +	/*
> +	 * To cancel an inflight context, we need to first remove it from the
> +	 * GPU. That sounds like preemption! Plus a little bit of bookkeeping.
> +	 */
> +
> +	if (!HAS_LOGICAL_RING_PREEMPTION(i915))
> +		return 0;
> +
> +	if (preempt_client_init(i915, &data.a))
> +		return -ENOMEM;
> +	if (preempt_client_init(i915, &data.b))
> +		goto err_client_a;
> +
> +	for_each_engine(data.engine, i915, id) {
> +		if (!intel_engine_has_preemption(data.engine))
> +			continue;
> +
> +		err = __cancel_active0(&data);
> +		if (err)
> +			goto err_wedged;
> +
> +		err = __cancel_active1(&data);
> +		if (err)
> +			goto err_wedged;
> +
> +		err = __cancel_queued(&data);
> +		if (err)
> +			goto err_wedged;
> +	}
> +
> +	err = 0;
> +err_client_b:
> +	preempt_client_fini(&data.b);
> +err_client_a:
> +	preempt_client_fini(&data.a);
> +	return err;
> +
> +err_wedged:
> +	GEM_TRACE_DUMP();
> +	igt_spinner_end(&data.b.spin);
> +	igt_spinner_end(&data.a.spin);
> +	intel_gt_set_wedged(&i915->gt);
> +	goto err_client_b;
> +}
> +
>   static int live_suppress_self_preempt(void *arg)
>   {
>   	struct drm_i915_private *i915 = arg;
> @@ -2452,6 +2724,7 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
>   		SUBTEST(live_preempt),
>   		SUBTEST(live_late_preempt),
>   		SUBTEST(live_nopreempt),
> +		SUBTEST(live_preempt_cancel),
>   		SUBTEST(live_suppress_self_preempt),
>   		SUBTEST(live_suppress_wait_preempt),
>   		SUBTEST(live_chain_preempt),
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 08/10] drm/i915: Cancel non-persistent contexts on close
  2019-10-10  7:14 ` [PATCH 08/10] drm/i915: Cancel non-persistent contexts on close Chris Wilson
@ 2019-10-11 13:55   ` Tvrtko Ursulin
  2019-10-11 14:22     ` Chris Wilson
  0 siblings, 1 reply; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11 13:55 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/10/2019 08:14, Chris Wilson wrote:
> Normally, we rely on our hangcheck to prevent persistent batches from
> hogging the GPU. However, if the user disables hangcheck, this mechanism
> breaks down. Despite our insistence that this is unsafe, the users are
> equally insistent that they want to use endless batches and will disable
> the hangcheck mechanism. We are looking at perhaps replacing hangcheck
> with a softer mechanism, that sends a pulse down the engine to check if
> it is well. We can use the same preemptive pulse to flush an active
> persistent context off the GPU upon context close, preventing resources
> being lost and unkillable requests remaining on the GPU after process
> termination. To avoid changing the ABI and accidentally breaking
> existing userspace, we make the persistence of a context explicit and
> enable it by default (matching current ABI). Userspace can opt out of
> persistent mode (forcing requests to be cancelled when the context is
> closed by process termination or explicitly) by a context parameter. To
> facilitate existing use-cases of disabling hangcheck, if the modparam is
> disabled (i915.enable_hangcheck=0), we disable persistence mode by
> default.  (Note, one of the outcomes for supporting endless mode will be
> the removal of hangchecking, at which point opting into persistent mode
> will be mandatory, or maybe the default perhaps controlled by cgroups.)
> 
> v2: Check for hangchecking at context termination, so that we are not
> left with undying contexts from a crafty user.
> 
> Testcase: igt/gem_ctx_persistence
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Michał Winiarski <michal.winiarski@intel.com>
> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
> ---
>   drivers/gpu/drm/i915/gem/i915_gem_context.c   | 132 ++++++++++++++++++
>   drivers/gpu/drm/i915/gem/i915_gem_context.h   |  15 ++
>   .../gpu/drm/i915/gem/i915_gem_context_types.h |   1 +
>   .../gpu/drm/i915/gem/selftests/mock_context.c |   2 +
>   include/uapi/drm/i915_drm.h                   |  15 ++
>   5 files changed, 165 insertions(+)
> 
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> index 5d8221c7ba83..46e5b3b53288 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> @@ -70,6 +70,7 @@
>   #include <drm/i915_drm.h>
>   
>   #include "gt/intel_lrc_reg.h"
> +#include "gt/intel_engine_heartbeat.h"
>   #include "gt/intel_engine_user.h"
>   
>   #include "i915_gem_context.h"
> @@ -269,6 +270,78 @@ void i915_gem_context_release(struct kref *ref)
>   		schedule_work(&gc->free_work);
>   }
>   
> +static inline struct i915_gem_engines *
> +__context_engines_static(struct i915_gem_context *ctx)
> +{
> +	return rcu_dereference_protected(ctx->engines, true);
> +}
> +
> +static void kill_context(struct i915_gem_context *ctx)
> +{
> +	intel_engine_mask_t tmp, active, reset;
> +	struct intel_gt *gt = &ctx->i915->gt;
> +	struct i915_gem_engines_iter it;
> +	struct intel_engine_cs *engine;
> +	struct intel_context *ce;
> +
> +	/*
> +	 * If we are already banned, it was due to a guilty request causing
> +	 * a reset and the entire context being evicted from the GPU.
> +	 */
> +	if (i915_gem_context_is_banned(ctx))
> +		return;
> +
> +	i915_gem_context_set_banned(ctx);
> +
> +	/*
> +	 * Map the user's engine back to the actual engines; one virtual
> +	 * engine will be mapped to multiple engines, and using ctx->engine[]
> +	 * the same engine may be have multiple instances in the user's map.
> +	 * However, we only care about pending requests, so only include
> +	 * engines on which there are incomplete requests.
> +	 */
> +	active = 0;
> +	for_each_gem_engine(ce, __context_engines_static(ctx), it) {
> +		struct dma_fence *fence;
> +
> +		if (!ce->timeline)
> +			continue;
> +
> +		fence = i915_active_fence_get(&ce->timeline->last_request);
> +		if (!fence)
> +			continue;
> +
> +		engine = to_request(fence)->engine;
> +		if (HAS_EXECLISTS(gt->i915))
> +			engine = intel_context_inflight(ce);

Okay preemption implies execlists, was confused for a moment.

When can engine be NULL here?

> +		if (engine)
> +			active |= engine->mask;
> +
> +		dma_fence_put(fence);
> +	}
> +
> +	/*
> +	 * Send a "high priority pulse" down the engine to cause the
> +	 * current request to be momentarily preempted. (If it fails to
> +	 * be preempted, it will be reset). As we have marked our context
> +	 * as banned, any incomplete request, including any running, will
> +	 * be skipped following the preemption.
> +	 */
> +	reset = 0;
> +	for_each_engine_masked(engine, gt->i915, active, tmp)
> +		if (intel_engine_pulse(engine))
> +			reset |= engine->mask;

What if we were able to send a pulse, but the hog cannot be preempted 
and hangcheck is obviously disabled - who will do the reset?

> +
> +	/*
> +	 * If we are unable to send a preemptive pulse to bump
> +	 * the context from the GPU, we have to resort to a full
> +	 * reset. We hope the collateral damage is worth it.
> +	 */
> +	if (reset)
> +		intel_gt_handle_error(gt, reset, 0,
> +				      "context closure in %s", ctx->name);
> +}
> +
>   static void context_close(struct i915_gem_context *ctx)
>   {
>   	struct i915_address_space *vm;
> @@ -291,9 +364,47 @@ static void context_close(struct i915_gem_context *ctx)
>   	lut_close(ctx);
>   
>   	mutex_unlock(&ctx->mutex);
> +
> +	/*
> +	 * If the user has disabled hangchecking, we can not be sure that
> +	 * the batches will ever complete after the context is closed,
> +	 * keep the context and all resources pinned forever. So in this

s/keep/keeping/, I think.

> +	 * case we opt to forcibly kill off all remaining requests on
> +	 * context close.
> +	 */
> +	if (!i915_gem_context_is_persistent(ctx) ||
> +	    !i915_modparams.enable_hangcheck)
> +		kill_context(ctx);
> +
>   	i915_gem_context_put(ctx);
>   }
>   
> +static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
> +{
> +	if (i915_gem_context_is_persistent(ctx) == state)
> +		return 0;
> +
> +	if (state) {
> +		/*
> +		 * Only contexts that are short-lived [that will expire or be
> +		 * reset] are allowed to survive past termination. We require
> +		 * hangcheck to ensure that the persistent requests are healthy.
> +		 */
> +		if (!i915_modparams.enable_hangcheck)
> +			return -EINVAL;
> +
> +		i915_gem_context_set_persistence(ctx);
> +	} else {
> +		/* To cancel a context we use "preempt-to-idle" */
> +		if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
> +			return -ENODEV;
> +
> +		i915_gem_context_clear_persistence(ctx);
> +	}
> +
> +	return 0;
> +}
> +
>   static struct i915_gem_context *
>   __create_context(struct drm_i915_private *i915)
>   {
> @@ -328,6 +439,7 @@ __create_context(struct drm_i915_private *i915)
>   
>   	i915_gem_context_set_bannable(ctx);
>   	i915_gem_context_set_recoverable(ctx);
> +	__context_set_persistence(ctx, true /* cgroup hook? */);
>   
>   	for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++)
>   		ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES;
> @@ -484,6 +596,7 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio)
>   		return ctx;
>   
>   	i915_gem_context_clear_bannable(ctx);
> +	i915_gem_context_set_persistence(ctx);
>   	ctx->sched.priority = I915_USER_PRIORITY(prio);
>   
>   	GEM_BUG_ON(!i915_gem_context_is_kernel(ctx));
> @@ -1594,6 +1707,16 @@ get_engines(struct i915_gem_context *ctx,
>   	return err;
>   }
>   
> +static int
> +set_persistence(struct i915_gem_context *ctx,
> +		const struct drm_i915_gem_context_param *args)
> +{
> +	if (args->size)
> +		return -EINVAL;
> +
> +	return __context_set_persistence(ctx, args->value);
> +}
> +
>   static int ctx_setparam(struct drm_i915_file_private *fpriv,
>   			struct i915_gem_context *ctx,
>   			struct drm_i915_gem_context_param *args)
> @@ -1671,6 +1794,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
>   		ret = set_engines(ctx, args);
>   		break;
>   
> +	case I915_CONTEXT_PARAM_PERSISTENCE:
> +		ret = set_persistence(ctx, args);
> +		break;
> +
>   	case I915_CONTEXT_PARAM_BAN_PERIOD:
>   	default:
>   		ret = -EINVAL;
> @@ -2123,6 +2250,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
>   		ret = get_engines(ctx, args);
>   		break;
>   
> +	case I915_CONTEXT_PARAM_PERSISTENCE:
> +		args->size = 0;
> +		args->value = i915_gem_context_is_persistent(ctx);
> +		break;
> +
>   	case I915_CONTEXT_PARAM_BAN_PERIOD:
>   	default:
>   		ret = -EINVAL;
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
> index 9234586830d1..2eec035382a2 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
> @@ -76,6 +76,21 @@ static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *c
>   	clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags);
>   }
>   
> +static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx)
> +{
> +	return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
> +}
> +
> +static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx)
> +{
> +	set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
> +}
> +
> +static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx)
> +{
> +	clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags);
> +}
> +
>   static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx)
>   {
>   	return test_bit(CONTEXT_BANNED, &ctx->flags);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> index ab8e1367dfc8..a3ecd19f2303 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
> @@ -137,6 +137,7 @@ struct i915_gem_context {
>   #define UCONTEXT_NO_ERROR_CAPTURE	1
>   #define UCONTEXT_BANNABLE		2
>   #define UCONTEXT_RECOVERABLE		3
> +#define UCONTEXT_PERSISTENCE		4
>   
>   	/**
>   	 * @flags: small set of booleans
> diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> index 74ddd682c9cd..29b8984f0e47 100644
> --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
> @@ -22,6 +22,8 @@ mock_context(struct drm_i915_private *i915,
>   	INIT_LIST_HEAD(&ctx->link);
>   	ctx->i915 = i915;
>   
> +	i915_gem_context_set_persistence(ctx);
> +
>   	mutex_init(&ctx->engines_mutex);
>   	e = default_engines(ctx);
>   	if (IS_ERR(e))
> diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
> index 30c542144016..eb9e704d717a 100644
> --- a/include/uapi/drm/i915_drm.h
> +++ b/include/uapi/drm/i915_drm.h
> @@ -1565,6 +1565,21 @@ struct drm_i915_gem_context_param {
>    *   i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
>    */
>   #define I915_CONTEXT_PARAM_ENGINES	0xa
> +
> +/*
> + * I915_CONTEXT_PARAM_PERSISTENCE:
> + *
> + * Allow the context and active rendering to survive the process until
> + * completion. Persistence allows fire-and-forget clients to queue up a
> + * bunch of work, hand the output over to a display server and the quit.
> + * If the context is not marked as persistent, upon closing (either via
> + * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
> + * or process termination), the context and any outstanding requests will be
> + * cancelled (and exported fences for cancelled requests marked as -EIO).
> + *
> + * By default, new contexts allow persistence.
> + */
> +#define I915_CONTEXT_PARAM_PERSISTENCE	0xb
>   /* Must be kept compact -- no holes and well documented */
>   
>   	__u64 value;
> 

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH v2] drm/i915/execlists: Cancel banned contexts on schedule-out
  2019-10-11 13:10     ` Tvrtko Ursulin
@ 2019-10-11 14:10       ` Chris Wilson
  0 siblings, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-11 14:10 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-11 14:10:21)
> 
> On 11/10/2019 12:16, Chris Wilson wrote:
> > On schedule-out (CS completion) of a banned context, scrub the context
> > image so that we do not replay the active payload. The intent is that we
> > skip banned payloads on request submission so that the timeline
> > advancement continues on in the background. However, if we are returning
> > to a preempted request, i915_request_skip() is ineffective and instead we
> > need to patch up the context image so that it continues from the start
> > of the next request.
> > 
> > v2: Fixup cancellation so that we only scrub the payload of the active
> > request and do not short-circuit the breadcrumbs (which might cause
> > other contexts to execute out of order).
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gt/intel_lrc.c    |  91 ++++++---
> >   drivers/gpu/drm/i915/gt/selftest_lrc.c | 273 +++++++++++++++++++++++++
> >   2 files changed, 341 insertions(+), 23 deletions(-)
> > 
> > diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > index 09fc5ecfdd09..809a5dd97c14 100644
> > --- a/drivers/gpu/drm/i915/gt/intel_lrc.c
> > +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
> > @@ -234,6 +234,9 @@ static void execlists_init_reg_state(u32 *reg_state,
> >                                    const struct intel_engine_cs *engine,
> >                                    const struct intel_ring *ring,
> >                                    bool close);
> > +static void
> > +__execlists_update_reg_state(const struct intel_context *ce,
> > +                          const struct intel_engine_cs *engine);
> >   
> >   static void __context_pin_acquire(struct intel_context *ce)
> >   {
> > @@ -256,6 +259,29 @@ static void mark_eio(struct i915_request *rq)
> >       i915_request_mark_complete(rq);
> >   }
> >   
> > +static struct i915_request *active_request(struct i915_request *rq)
> > +{
> > +     const struct intel_context * const ce = rq->hw_context;
> > +     struct i915_request *active = NULL;
> > +     struct list_head *list;
> > +
> > +     if (!i915_request_is_active(rq)) /* unwound, but incomplete! */
> > +             return rq;
> > +
> > +     list = &i915_request_active_timeline(rq)->requests;
> > +     list_for_each_entry_from_reverse(rq, list, link) {
> > +             if (i915_request_completed(rq))
> > +                     break;
> > +
> > +             if (rq->hw_context != ce)
> > +                     break;
> 
> Would it be of any value here to also check the initial breadcrumb matches?

Not currently. I don't it makes any difference whether or not we are
inside the payload on cancel_active() path as we know we an active
context. More fun and games for the reset path as we need to minimise
collateral damage.

> > +static void cancel_active(struct i915_request *rq,
> > +                       struct intel_engine_cs *engine)
> > +{
> > +     struct intel_context * const ce = rq->hw_context;
> > +     u32 *regs = ce->lrc_reg_state;
> > +
> > +     /*
> > +      * The executing context has been cancelled. Fixup the context so that
> > +      * it continues on from the breadcrumb after the batch and will be
> > +      * marked as incomplete [-EIO] upon signaling. We preserve the
> 
> Where does the -EIO marking happen now?

On the next __i915_request_submit()

> > +      * breadcrumbs and semaphores of the subsequent requests so that
> > +      * inter-timeline dependencies remain correctly ordered.
> > +      */
> > +     GEM_TRACE("%s(%s): { rq=%llx:%lld }\n",
> > +               __func__, engine->name, rq->fence.context, rq->fence.seqno);
> > +
> > +     __context_pin_acquire(ce);
> > +
> > +     /* On resubmission of the active request, it's payload be scrubbed */
> > +     rq = active_request(rq);
> > +     if (rq)
> > +             ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
> > +     else
> > +             ce->ring->head = ce->ring->tail;
> 
> I don't quite understand yet.
> 
> If a context was banned I'd expect all requests on the tl->requests to 
> be zapped and we only move to execute the last breadcrumb, no?

We do zap them all, on __i915_request_submit(). What we are preserving
is the dependency chains as we don't want to emit the final breadcrumb
before its dependencies have been signaled. (Otherwise our optimisation
of only waiting for the end of the chain will be broken, as that context
will begin before its prerequisites have run.)
 
> So if you find the active_request and you set ring head to 
> active_rq->head how does that skip the payload?

We do memset(rq->infix, 0, rq->postfix-rq->infix) in
__i915_request_submit() if (context_is_banned)

> Furthermore, if I try to sketch the rq->requests timeline like this:
> 
>    R0 r1 r2 r[elsp] r4 r5
> 
> 'R' = completed; 'r' = incomplete
> 
> On schedule_out(r[elsp]) I'd expect you want to find r5 and set ring 
> head to final breadcrumb of it. And mark r1-r5 and -EIO. Am I completely 
> on the wrong track?
> 
> (Bear with me with r4 and r5, assuming someone has set the context as 
> single submission for future proofing the code.)

If we only had to be concerned about this timeline, sure, we could just
skip to the end. It's timeline C that was waiting on timeline A via
timeline B, we have to be concerned about when cancelling timeline B.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 08/10] drm/i915: Cancel non-persistent contexts on close
  2019-10-11 13:55   ` Tvrtko Ursulin
@ 2019-10-11 14:22     ` Chris Wilson
  2019-10-11 15:41       ` Chris Wilson
  0 siblings, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-11 14:22 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-11 14:55:00)
> 
> On 10/10/2019 08:14, Chris Wilson wrote:
> > Normally, we rely on our hangcheck to prevent persistent batches from
> > hogging the GPU. However, if the user disables hangcheck, this mechanism
> > breaks down. Despite our insistence that this is unsafe, the users are
> > equally insistent that they want to use endless batches and will disable
> > the hangcheck mechanism. We are looking at perhaps replacing hangcheck
> > with a softer mechanism, that sends a pulse down the engine to check if
> > it is well. We can use the same preemptive pulse to flush an active
> > persistent context off the GPU upon context close, preventing resources
> > being lost and unkillable requests remaining on the GPU after process
> > termination. To avoid changing the ABI and accidentally breaking
> > existing userspace, we make the persistence of a context explicit and
> > enable it by default (matching current ABI). Userspace can opt out of
> > persistent mode (forcing requests to be cancelled when the context is
> > closed by process termination or explicitly) by a context parameter. To
> > facilitate existing use-cases of disabling hangcheck, if the modparam is
> > disabled (i915.enable_hangcheck=0), we disable persistence mode by
> > default.  (Note, one of the outcomes for supporting endless mode will be
> > the removal of hangchecking, at which point opting into persistent mode
> > will be mandatory, or maybe the default perhaps controlled by cgroups.)
> > 
> > v2: Check for hangchecking at context termination, so that we are not
> > left with undying contexts from a crafty user.
> > 
> > Testcase: igt/gem_ctx_persistence
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> > Cc: Michał Winiarski <michal.winiarski@intel.com>
> > Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> > Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
> > ---
> >   drivers/gpu/drm/i915/gem/i915_gem_context.c   | 132 ++++++++++++++++++
> >   drivers/gpu/drm/i915/gem/i915_gem_context.h   |  15 ++
> >   .../gpu/drm/i915/gem/i915_gem_context_types.h |   1 +
> >   .../gpu/drm/i915/gem/selftests/mock_context.c |   2 +
> >   include/uapi/drm/i915_drm.h                   |  15 ++
> >   5 files changed, 165 insertions(+)
> > 
> > diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > index 5d8221c7ba83..46e5b3b53288 100644
> > --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
> > @@ -70,6 +70,7 @@
> >   #include <drm/i915_drm.h>
> >   
> >   #include "gt/intel_lrc_reg.h"
> > +#include "gt/intel_engine_heartbeat.h"
> >   #include "gt/intel_engine_user.h"
> >   
> >   #include "i915_gem_context.h"
> > @@ -269,6 +270,78 @@ void i915_gem_context_release(struct kref *ref)
> >               schedule_work(&gc->free_work);
> >   }
> >   
> > +static inline struct i915_gem_engines *
> > +__context_engines_static(struct i915_gem_context *ctx)
> > +{
> > +     return rcu_dereference_protected(ctx->engines, true);
> > +}
> > +
> > +static void kill_context(struct i915_gem_context *ctx)
> > +{
> > +     intel_engine_mask_t tmp, active, reset;
> > +     struct intel_gt *gt = &ctx->i915->gt;
> > +     struct i915_gem_engines_iter it;
> > +     struct intel_engine_cs *engine;
> > +     struct intel_context *ce;
> > +
> > +     /*
> > +      * If we are already banned, it was due to a guilty request causing
> > +      * a reset and the entire context being evicted from the GPU.
> > +      */
> > +     if (i915_gem_context_is_banned(ctx))
> > +             return;
> > +
> > +     i915_gem_context_set_banned(ctx);
> > +
> > +     /*
> > +      * Map the user's engine back to the actual engines; one virtual
> > +      * engine will be mapped to multiple engines, and using ctx->engine[]
> > +      * the same engine may be have multiple instances in the user's map.
> > +      * However, we only care about pending requests, so only include
> > +      * engines on which there are incomplete requests.
> > +      */
> > +     active = 0;
> > +     for_each_gem_engine(ce, __context_engines_static(ctx), it) {
> > +             struct dma_fence *fence;
> > +
> > +             if (!ce->timeline)
> > +                     continue;
> > +
> > +             fence = i915_active_fence_get(&ce->timeline->last_request);
> > +             if (!fence)
> > +                     continue;
> > +
> > +             engine = to_request(fence)->engine;
> > +             if (HAS_EXECLISTS(gt->i915))
> > +                     engine = intel_context_inflight(ce);
> 
> Okay preemption implies execlists, was confused for a moment.
> 
> When can engine be NULL here?

The engine is not paused, and an interrupt can cause a schedule-out as
we gather up the state.

> 
> > +             if (engine)
> > +                     active |= engine->mask;
> > +
> > +             dma_fence_put(fence);
> > +     }
> > +
> > +     /*
> > +      * Send a "high priority pulse" down the engine to cause the
> > +      * current request to be momentarily preempted. (If it fails to
> > +      * be preempted, it will be reset). As we have marked our context
> > +      * as banned, any incomplete request, including any running, will
> > +      * be skipped following the preemption.
> > +      */
> > +     reset = 0;
> > +     for_each_engine_masked(engine, gt->i915, active, tmp)
> > +             if (intel_engine_pulse(engine))
> > +                     reset |= engine->mask;
> 
> What if we were able to send a pulse, but the hog cannot be preempted 
> and hangcheck is obviously disabled - who will do the reset?

Hmm, the idea is that forced-preemption causes the reset.
(See igt/gem_ctx_persistence/hostile)

However, if we give the sysadmin the means to disable force-preemption,
we just gave them another shovel to dig a hole with.

A last resort would be another timer here to ensure the context was
terminated.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 09/10] drm/i915: Replace hangcheck by heartbeats
  2019-10-10  7:14 ` [PATCH 09/10] drm/i915: Replace hangcheck by heartbeats Chris Wilson
@ 2019-10-11 14:24   ` Tvrtko Ursulin
  2019-10-11 15:06     ` Chris Wilson
  0 siblings, 1 reply; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11 14:24 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/10/2019 08:14, Chris Wilson wrote:
> Replace sampling the engine state every so often with a periodic
> heartbeat request to measure the health of an engine. This is coupled
> with the forced-preemption to allow long running requests to survive so
> long as they do not block other users.
> 
> The heartbeat interval can be adjusted per-engine using,
> 
> 	/sys/class/drm/card?/engine/*/heartbeat_interval_ms
> 
> v2: Couple in sysfs controls
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> Reviewed-by: Jon Bloomfield <jon.bloomfield@intel.com>
> ---
>   drivers/gpu/drm/i915/Kconfig.profile          |  14 +
>   drivers/gpu/drm/i915/Makefile                 |   1 -
>   drivers/gpu/drm/i915/display/intel_display.c  |   2 +-
>   drivers/gpu/drm/i915/gem/i915_gem_object.h    |   1 -
>   drivers/gpu/drm/i915/gem/i915_gem_pm.c        |   2 -
>   drivers/gpu/drm/i915/gt/intel_engine.h        |  32 --
>   drivers/gpu/drm/i915/gt/intel_engine_cs.c     |  11 +-
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 115 ++++++
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |   5 +
>   drivers/gpu/drm/i915/gt/intel_engine_pm.c     |   5 +-
>   drivers/gpu/drm/i915/gt/intel_engine_sysfs.c  |  29 ++
>   drivers/gpu/drm/i915/gt/intel_engine_types.h  |  17 +-
>   drivers/gpu/drm/i915/gt/intel_gt.c            |   1 -
>   drivers/gpu/drm/i915/gt/intel_gt.h            |   4 -
>   drivers/gpu/drm/i915/gt/intel_gt_pm.c         |   1 -
>   drivers/gpu/drm/i915/gt/intel_gt_types.h      |   9 -
>   drivers/gpu/drm/i915/gt/intel_hangcheck.c     | 361 ------------------
>   drivers/gpu/drm/i915/gt/intel_reset.c         |   3 +-
>   drivers/gpu/drm/i915/gt/selftest_hangcheck.c  |   4 -
>   drivers/gpu/drm/i915/i915_debugfs.c           |  87 -----
>   drivers/gpu/drm/i915/i915_drv.c               |   3 -
>   drivers/gpu/drm/i915/i915_drv.h               |   1 -
>   drivers/gpu/drm/i915/i915_gpu_error.c         |  33 +-
>   drivers/gpu/drm/i915/i915_gpu_error.h         |   2 -
>   drivers/gpu/drm/i915/i915_priolist_types.h    |   6 +
>   25 files changed, 194 insertions(+), 555 deletions(-)
>   delete mode 100644 drivers/gpu/drm/i915/gt/intel_hangcheck.c
> 
> diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
> index 8fceea85937b..d3950aabb497 100644
> --- a/drivers/gpu/drm/i915/Kconfig.profile
> +++ b/drivers/gpu/drm/i915/Kconfig.profile
> @@ -40,3 +40,17 @@ config DRM_I915_PREEMPT_TIMEOUT
>   	  /sys/class/drm/card?/engine/*/preempt_timeout_ms
>   
>   	  May be 0 to disable the timeout.
> +
> +config DRM_I915_HEARTBEAT_INTERVAL
> +	int "Interval between heartbeat pulses (ms)"
> +	default 2500 # milliseconds
> +	help
> +	  While active the driver uses a periodic request, a heartbeat, to
> +	  check the wellness of the GPU and to regularly flush state changes
> +	  (idle barriers).

Should this be somehow reworded to be more end user friendly? My idea, 
may need to be corrected for bad English:

The driver sends a periodic heartbeat down all active GT engines to 
check the health of the GPU and undertake regular house-keeping of 
internal driver state.

Main points from the user perspective: "request" - whaat? "idle 
barriers" - ditto. "Wellness" - a bit unusual in this context, no?

> +
> +	  This is adjustable via
> +	  /sys/class/drm/card?/engine/*/heartbeat_interval_ms
> +
> +	  May be 0 to disable heartbeats and therefore disable automatic GPU
> +	  hang detection.
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index cfab7c8585b3..59d356cc406c 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -88,7 +88,6 @@ gt-y += \
>   	gt/intel_gt_pm.o \
>   	gt/intel_gt_pm_irq.o \
>   	gt/intel_gt_requests.o \
> -	gt/intel_hangcheck.o \
>   	gt/intel_lrc.o \
>   	gt/intel_rc6.o \
>   	gt/intel_renderstate.o \
> diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
> index 1a533ccdb54f..5e5de3081f48 100644
> --- a/drivers/gpu/drm/i915/display/intel_display.c
> +++ b/drivers/gpu/drm/i915/display/intel_display.c
> @@ -14338,7 +14338,7 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
>   static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
>   {
>   	struct i915_sched_attr attr = {
> -		.priority = I915_PRIORITY_DISPLAY,
> +		.priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY),
>   	};
>   
>   	i915_gem_object_wait_priority(obj, 0, &attr);
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> index c5e14c9c805c..5bd51e397371 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
> @@ -460,6 +460,5 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj,
>   int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
>   				  unsigned int flags,
>   				  const struct i915_sched_attr *attr);
> -#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX)
>   
>   #endif
> diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> index 7987b54fb1f5..0e97520cb1bb 100644
> --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
> @@ -100,8 +100,6 @@ void i915_gem_suspend(struct drm_i915_private *i915)
>   	intel_gt_suspend(&i915->gt);
>   	intel_uc_suspend(&i915->gt.uc);
>   
> -	cancel_delayed_work_sync(&i915->gt.hangcheck.work);
> -
>   	i915_gem_drain_freed_objects(i915);
>   }
>   
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
> index 93ea367fe624..8ad57eace351 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine.h
> @@ -89,38 +89,6 @@ struct drm_printer;
>   /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to
>    * do the writes, and that must have qw aligned offsets, simply pretend it's 8b.
>    */
> -enum intel_engine_hangcheck_action {
> -	ENGINE_IDLE = 0,
> -	ENGINE_WAIT,
> -	ENGINE_ACTIVE_SEQNO,
> -	ENGINE_ACTIVE_HEAD,
> -	ENGINE_ACTIVE_SUBUNITS,
> -	ENGINE_WAIT_KICK,
> -	ENGINE_DEAD,
> -};
> -
> -static inline const char *
> -hangcheck_action_to_str(const enum intel_engine_hangcheck_action a)
> -{
> -	switch (a) {
> -	case ENGINE_IDLE:
> -		return "idle";
> -	case ENGINE_WAIT:
> -		return "wait";
> -	case ENGINE_ACTIVE_SEQNO:
> -		return "active seqno";
> -	case ENGINE_ACTIVE_HEAD:
> -		return "active head";
> -	case ENGINE_ACTIVE_SUBUNITS:
> -		return "active subunits";
> -	case ENGINE_WAIT_KICK:
> -		return "wait kick";
> -	case ENGINE_DEAD:
> -		return "dead";
> -	}
> -
> -	return "unknown";
> -}
>   
>   static inline unsigned int
>   execlists_num_ports(const struct intel_engine_execlists * const execlists)
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> index 1eb51147839a..d829ad340ca0 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
> @@ -305,6 +305,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
>   	__sprint_engine_name(engine);
>   
>   	engine->props.preempt_timeout = CONFIG_DRM_I915_PREEMPT_TIMEOUT;
> +	engine->props.heartbeat_interval = CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
>   
>   	/*
>   	 * To be overridden by the backend on setup. However to facilitate
> @@ -599,7 +600,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
>   	intel_engine_init_active(engine, ENGINE_PHYSICAL);
>   	intel_engine_init_breadcrumbs(engine);
>   	intel_engine_init_execlists(engine);
> -	intel_engine_init_hangcheck(engine);
>   	intel_engine_init_cmd_parser(engine);
>   	intel_engine_init__pm(engine);
>   
> @@ -1432,8 +1432,13 @@ void intel_engine_dump(struct intel_engine_cs *engine,
>   		drm_printf(m, "*** WEDGED ***\n");
>   
>   	drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count));
> -	drm_printf(m, "\tHangcheck: %d ms ago\n",
> -		   jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp));
> +
> +	rcu_read_lock();
> +	rq = READ_ONCE(engine->heartbeat.systole);
> +	if (rq)
> +		drm_printf(m, "\tHeartbeat: %d ms ago\n",
> +			   jiffies_to_msecs(jiffies - rq->emitted_jiffies));
> +	rcu_read_unlock();
>   	drm_printf(m, "\tReset count: %d (global %d)\n",
>   		   i915_reset_engine_count(error, engine),
>   		   i915_reset_count(error));
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> index 2fc413f9d506..f68acf9118f3 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> @@ -11,6 +11,27 @@
>   #include "intel_engine_pm.h"
>   #include "intel_engine.h"
>   #include "intel_gt.h"
> +#include "intel_reset.h"
> +
> +/*
> + * While the engine is active, we send a periodic pulse along the engine
> + * to check on its health and to flush any idle-barriers. If that request
> + * is stuck, and we fail to preempt it, we declare the engine hung and
> + * issue a reset -- in the hope that restores progress.
> + */
> +
> +static void next_heartbeat(struct intel_engine_cs *engine)
> +{
> +	long delay;
> +
> +	delay = READ_ONCE(engine->props.heartbeat_interval);
> +	if (!delay)
> +		return;
> +
> +	delay = msecs_to_jiffies_timeout(delay);
> +	schedule_delayed_work(&engine->heartbeat.work,
> +			      round_jiffies_up_relative(delay));
> +}
>   
>   static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
>   {
> @@ -18,6 +39,100 @@ static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq)
>   	i915_request_add_active_barriers(rq);
>   }
>   
> +static void heartbeat(struct work_struct *wrk)
> +{
> +	struct i915_sched_attr attr = {
> +		.priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),

You were saying it's better to start from zero, right?

> +	};
> +	struct intel_engine_cs *engine =
> +		container_of(wrk, typeof(*engine), heartbeat.work.work);
> +	struct intel_context *ce = engine->kernel_context;
> +	struct i915_request *rq;
> +
> +	if (!intel_engine_pm_get_if_awake(engine))
> +		return;
> +
> +	rq = engine->heartbeat.systole;
> +	if (rq && i915_request_completed(rq)) {
> +		i915_request_put(rq);
> +		engine->heartbeat.systole = NULL;
> +	}
> +
> +	if (intel_gt_is_wedged(engine->gt))
> +		goto out;
> +
> +	if (engine->heartbeat.systole) {
> +		if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
> +			struct drm_printer p = drm_debug_printer(__func__);
> +
> +			intel_engine_dump(engine, &p,
> +					  "%s heartbeat not ticking\n",
> +					  engine->name);

This could perhaps be better only when we have reached a higher priority 
attempt. Okay it's under DEBUG_GEM but still, not sure there is value in 
being so panicky if for any reason preemption does not work. Heartbeat 
does not depend on preemption as far as I could spot, right?

> +		}
> +
> +		if (engine->schedule &&
> +		    rq->sched.attr.priority < I915_PRIORITY_BARRIER) {
> +			attr.priority =
> +				I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT);
> +			if (rq->sched.attr.priority >= attr.priority)
> +				attr.priority = I915_PRIORITY_BARRIER;
> +
> +			local_bh_disable();
> +			engine->schedule(rq, &attr);
> +			local_bh_enable();
> +		} else {
> +			intel_gt_handle_error(engine->gt, engine->mask,
> +					      I915_ERROR_CAPTURE,
> +					      "stopped heartbeat on %s",
> +					      engine->name);
> +		}
> +		goto out;
> +	}
> +
> +	if (engine->wakeref_serial == engine->serial)
> +		goto out;
> +
> +	mutex_lock(&ce->timeline->mutex);
> +
> +	intel_context_enter(ce);
> +	rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
> +	intel_context_exit(ce);
> +	if (IS_ERR(rq))
> +		goto unlock;
> +
> +	idle_pulse(engine, rq);
> +	if (i915_modparams.enable_hangcheck)
> +		engine->heartbeat.systole = i915_request_get(rq);
> +
> +	__i915_request_commit(rq);
> +	__i915_request_queue(rq, &attr);
> +
> +unlock:
> +	mutex_unlock(&ce->timeline->mutex);
> +out:
> +	next_heartbeat(engine);
> +	intel_engine_pm_put(engine);
> +}
> +
> +void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine)
> +{
> +	if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL)
> +		return;
> +
> +	next_heartbeat(engine);
> +}
> +
> +void intel_engine_park_heartbeat(struct intel_engine_cs *engine)
> +{
> +	cancel_delayed_work(&engine->heartbeat.work);
> +	i915_request_put(fetch_and_zero(&engine->heartbeat.systole));
> +}
> +
> +void intel_engine_init_heartbeat(struct intel_engine_cs *engine)
> +{
> +	INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat);
> +}
> +
>   int intel_engine_pulse(struct intel_engine_cs *engine)
>   {
>   	struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> index b950451b5998..39391004554d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> @@ -9,6 +9,11 @@
>   
>   struct intel_engine_cs;
>   
> +void intel_engine_init_heartbeat(struct intel_engine_cs *engine);
> +
> +void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
> +void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
> +
>   int intel_engine_pulse(struct intel_engine_cs *engine);
>   
>   #endif /* INTEL_ENGINE_HEARTBEAT_H */
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> index 7d76611d9df1..6fbfa2162e54 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
> @@ -7,6 +7,7 @@
>   #include "i915_drv.h"
>   
>   #include "intel_engine.h"
> +#include "intel_engine_heartbeat.h"
>   #include "intel_engine_pm.h"
>   #include "intel_engine_pool.h"
>   #include "intel_gt.h"
> @@ -34,7 +35,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
>   	if (engine->unpark)
>   		engine->unpark(engine);
>   
> -	intel_engine_init_hangcheck(engine);
> +	intel_engine_unpark_heartbeat(engine);
>   	return 0;
>   }
>   
> @@ -158,6 +159,7 @@ static int __engine_park(struct intel_wakeref *wf)
>   
>   	call_idle_barriers(engine); /* cleanup after wedging */
>   
> +	intel_engine_park_heartbeat(engine);
>   	intel_engine_disarm_breadcrumbs(engine);
>   	intel_engine_pool_park(&engine->pool);
>   
> @@ -188,6 +190,7 @@ void intel_engine_init__pm(struct intel_engine_cs *engine)
>   	struct intel_runtime_pm *rpm = engine->uncore->rpm;
>   
>   	intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
> +	intel_engine_init_heartbeat(engine);
>   }
>   
>   #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
> index aac26097c916..8532f9cdc885 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_sysfs.c
> @@ -70,12 +70,38 @@ preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
>   	return count;
>   }
>   
> +static ssize_t
> +heartbeat_interval_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
> +{
> +	struct intel_engine_cs *engine = kobj_to_engine(kobj);
> +
> +	return sprintf(buf, "%lu\n", engine->props.heartbeat_interval);
> +}
> +
> +static ssize_t
> +heartbeat_interval_store(struct kobject *kobj, struct kobj_attribute *attr,
> +			 const char *buf, size_t count)
> +{
> +	struct intel_engine_cs *engine = kobj_to_engine(kobj);
> +	unsigned long delay;
> +	int err;
> +
> +	err = kstrtoul(buf, 0, &delay);
> +	if (err)
> +		return err;
> +
> +	engine->props.heartbeat_interval = delay;
> +	return count;
> +}
> +
>   static struct kobj_attribute name_attr = __ATTR(name, 0444, name_show, NULL);
>   static struct kobj_attribute class_attr = __ATTR(class, 0444, class_show, NULL);
>   static struct kobj_attribute inst_attr = __ATTR(instance, 0444, inst_show, NULL);
>   static struct kobj_attribute mmio_attr = __ATTR(mmio_base, 0444, mmio_show, NULL);
>   static struct kobj_attribute preempt_timeout_attr =
>   __ATTR(preempt_timeout_ms, 0600, preempt_timeout_show, preempt_timeout_store);
> +static struct kobj_attribute heartbeat_interval_attr =
> +__ATTR(heartbeat_interval_ms, 0600, heartbeat_interval_show, heartbeat_interval_store);
>   
>   static void kobj_engine_release(struct kobject *kobj)
>   {
> @@ -115,6 +141,9 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
>   		&class_attr.attr,
>   		&inst_attr.attr,
>   		&mmio_attr.attr,
> +#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
> +		&heartbeat_interval_attr.attr,
> +#endif

Presumably compiler is happy (or the linker) with only this part getting 
the #ifdef treatment? (The show/store functions above don't have it.)

>   		NULL
>   	};
>   
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> index 6af9b0096975..ad3be2fbd71a 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
> @@ -15,6 +15,7 @@
>   #include <linux/rbtree.h>
>   #include <linux/timer.h>
>   #include <linux/types.h>
> +#include <linux/workqueue.h>
>   
>   #include "i915_gem.h"
>   #include "i915_pmu.h"
> @@ -76,14 +77,6 @@ struct intel_instdone {
>   	u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
>   };
>   
> -struct intel_engine_hangcheck {
> -	u64 acthd;
> -	u32 last_ring;
> -	u32 last_head;
> -	unsigned long action_timestamp;
> -	struct intel_instdone instdone;
> -};
> -
>   struct intel_ring {
>   	struct kref ref;
>   	struct i915_vma *vma;
> @@ -330,6 +323,11 @@ struct intel_engine_cs {
>   
>   	intel_engine_mask_t saturated; /* submitting semaphores too late? */
>   
> +	struct {
> +		struct delayed_work work;
> +		struct i915_request *systole;
> +	} heartbeat;
> +
>   	unsigned long serial;
>   
>   	unsigned long wakeref_serial;
> @@ -480,8 +478,6 @@ struct intel_engine_cs {
>   	/* status_notifier: list of callbacks for context-switch changes */
>   	struct atomic_notifier_head context_status_notifier;
>   
> -	struct intel_engine_hangcheck hangcheck;
> -
>   #define I915_ENGINE_NEEDS_CMD_PARSER BIT(0)
>   #define I915_ENGINE_SUPPORTS_STATS   BIT(1)
>   #define I915_ENGINE_HAS_PREEMPTION   BIT(2)
> @@ -549,6 +545,7 @@ struct intel_engine_cs {
>   
>   	struct {
>   		unsigned long preempt_timeout;
> +		unsigned long heartbeat_interval;
>   	} props;
>   };
>   
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
> index b3619a2a5d0e..f3e1925987e1 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt.c
> @@ -22,7 +22,6 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915)
>   	INIT_LIST_HEAD(&gt->closed_vma);
>   	spin_lock_init(&gt->closed_lock);
>   
> -	intel_gt_init_hangcheck(gt);
>   	intel_gt_init_reset(gt);
>   	intel_gt_init_requests(gt);
>   	intel_gt_pm_init_early(gt);
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
> index e6ab0bff0efb..5b6effed3713 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gt.h
> @@ -46,8 +46,6 @@ void intel_gt_clear_error_registers(struct intel_gt *gt,
>   void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
>   void intel_gt_chipset_flush(struct intel_gt *gt);
>   
> -void intel_gt_init_hangcheck(struct intel_gt *gt);
> -
>   static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt,
>   					  enum intel_gt_scratch_field field)
>   {
> @@ -59,6 +57,4 @@ static inline bool intel_gt_is_wedged(struct intel_gt *gt)
>   	return __intel_reset_failed(&gt->reset);
>   }
>   
> -void intel_gt_queue_hangcheck(struct intel_gt *gt);
> -
>   #endif /* __INTEL_GT_H__ */
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> index 87e34e0b6427..85af0d16f869 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
> @@ -52,7 +52,6 @@ static int __gt_unpark(struct intel_wakeref *wf)
>   
>   	i915_pmu_gt_unparked(i915);
>   
> -	intel_gt_queue_hangcheck(gt);
>   	intel_gt_unpark_requests(gt);
>   
>   	pm_notify(gt, INTEL_GT_UNPARK);
> diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> index 802f516a3430..59f8ee0aa151 100644
> --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h
> +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h
> @@ -26,14 +26,6 @@ struct i915_ggtt;
>   struct intel_engine_cs;
>   struct intel_uncore;
>   
> -struct intel_hangcheck {
> -	/* For hangcheck timer */
> -#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
> -#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
> -
> -	struct delayed_work work;
> -};
> -
>   struct intel_gt {
>   	struct drm_i915_private *i915;
>   	struct intel_uncore *uncore;
> @@ -67,7 +59,6 @@ struct intel_gt {
>   	struct list_head closed_vma;
>   	spinlock_t closed_lock; /* guards the list of closed_vma */
>   
> -	struct intel_hangcheck hangcheck;
>   	struct intel_reset reset;
>   
>   	/**
> diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c
> deleted file mode 100644
> index c14dbeb3ccc3..000000000000
> --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c
> +++ /dev/null
> @@ -1,361 +0,0 @@
> -/*
> - * Copyright © 2016 Intel Corporation
> - *
> - * Permission is hereby granted, free of charge, to any person obtaining a
> - * copy of this software and associated documentation files (the "Software"),
> - * to deal in the Software without restriction, including without limitation
> - * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> - * and/or sell copies of the Software, and to permit persons to whom the
> - * Software is furnished to do so, subject to the following conditions:
> - *
> - * The above copyright notice and this permission notice (including the next
> - * paragraph) shall be included in all copies or substantial portions of the
> - * Software.
> - *
> - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> - * IN THE SOFTWARE.
> - *
> - */
> -
> -#include "i915_drv.h"
> -#include "intel_engine.h"
> -#include "intel_gt.h"
> -#include "intel_reset.h"
> -
> -struct hangcheck {
> -	u64 acthd;
> -	u32 ring;
> -	u32 head;
> -	enum intel_engine_hangcheck_action action;
> -	unsigned long action_timestamp;
> -	int deadlock;
> -	struct intel_instdone instdone;
> -	bool wedged:1;
> -	bool stalled:1;
> -};
> -
> -static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone)
> -{
> -	u32 tmp = current_instdone | *old_instdone;
> -	bool unchanged;
> -
> -	unchanged = tmp == *old_instdone;
> -	*old_instdone |= tmp;
> -
> -	return unchanged;
> -}
> -
> -static bool subunits_stuck(struct intel_engine_cs *engine)
> -{
> -	struct drm_i915_private *dev_priv = engine->i915;
> -	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
> -	struct intel_instdone instdone;
> -	struct intel_instdone *accu_instdone = &engine->hangcheck.instdone;
> -	bool stuck;
> -	int slice;
> -	int subslice;
> -
> -	intel_engine_get_instdone(engine, &instdone);
> -
> -	/* There might be unstable subunit states even when
> -	 * actual head is not moving. Filter out the unstable ones by
> -	 * accumulating the undone -> done transitions and only
> -	 * consider those as progress.
> -	 */
> -	stuck = instdone_unchanged(instdone.instdone,
> -				   &accu_instdone->instdone);
> -	stuck &= instdone_unchanged(instdone.slice_common,
> -				    &accu_instdone->slice_common);
> -
> -	for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice) {
> -		stuck &= instdone_unchanged(instdone.sampler[slice][subslice],
> -					    &accu_instdone->sampler[slice][subslice]);
> -		stuck &= instdone_unchanged(instdone.row[slice][subslice],
> -					    &accu_instdone->row[slice][subslice]);
> -	}
> -
> -	return stuck;
> -}
> -
> -static enum intel_engine_hangcheck_action
> -head_stuck(struct intel_engine_cs *engine, u64 acthd)
> -{
> -	if (acthd != engine->hangcheck.acthd) {
> -
> -		/* Clear subunit states on head movement */
> -		memset(&engine->hangcheck.instdone, 0,
> -		       sizeof(engine->hangcheck.instdone));
> -
> -		return ENGINE_ACTIVE_HEAD;
> -	}
> -
> -	if (!subunits_stuck(engine))
> -		return ENGINE_ACTIVE_SUBUNITS;
> -
> -	return ENGINE_DEAD;
> -}
> -
> -static enum intel_engine_hangcheck_action
> -engine_stuck(struct intel_engine_cs *engine, u64 acthd)
> -{
> -	enum intel_engine_hangcheck_action ha;
> -	u32 tmp;
> -
> -	ha = head_stuck(engine, acthd);
> -	if (ha != ENGINE_DEAD)
> -		return ha;
> -
> -	if (IS_GEN(engine->i915, 2))
> -		return ENGINE_DEAD;
> -
> -	/* Is the chip hanging on a WAIT_FOR_EVENT?
> -	 * If so we can simply poke the RB_WAIT bit
> -	 * and break the hang. This should work on
> -	 * all but the second generation chipsets.
> -	 */
> -	tmp = ENGINE_READ(engine, RING_CTL);
> -	if (tmp & RING_WAIT) {
> -		intel_gt_handle_error(engine->gt, engine->mask, 0,
> -				      "stuck wait on %s", engine->name);
> -		ENGINE_WRITE(engine, RING_CTL, tmp);
> -		return ENGINE_WAIT_KICK;
> -	}
> -
> -	return ENGINE_DEAD;
> -}
> -
> -static void hangcheck_load_sample(struct intel_engine_cs *engine,
> -				  struct hangcheck *hc)
> -{
> -	hc->acthd = intel_engine_get_active_head(engine);
> -	hc->ring = ENGINE_READ(engine, RING_START);
> -	hc->head = ENGINE_READ(engine, RING_HEAD);
> -}
> -
> -static void hangcheck_store_sample(struct intel_engine_cs *engine,
> -				   const struct hangcheck *hc)
> -{
> -	engine->hangcheck.acthd = hc->acthd;
> -	engine->hangcheck.last_ring = hc->ring;
> -	engine->hangcheck.last_head = hc->head;
> -}
> -
> -static enum intel_engine_hangcheck_action
> -hangcheck_get_action(struct intel_engine_cs *engine,
> -		     const struct hangcheck *hc)
> -{
> -	if (intel_engine_is_idle(engine))
> -		return ENGINE_IDLE;
> -
> -	if (engine->hangcheck.last_ring != hc->ring)
> -		return ENGINE_ACTIVE_SEQNO;
> -
> -	if (engine->hangcheck.last_head != hc->head)
> -		return ENGINE_ACTIVE_SEQNO;
> -
> -	return engine_stuck(engine, hc->acthd);
> -}
> -
> -static void hangcheck_accumulate_sample(struct intel_engine_cs *engine,
> -					struct hangcheck *hc)
> -{
> -	unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT;
> -
> -	hc->action = hangcheck_get_action(engine, hc);
> -
> -	/* We always increment the progress
> -	 * if the engine is busy and still processing
> -	 * the same request, so that no single request
> -	 * can run indefinitely (such as a chain of
> -	 * batches). The only time we do not increment
> -	 * the hangcheck score on this ring, if this
> -	 * engine is in a legitimate wait for another
> -	 * engine. In that case the waiting engine is a
> -	 * victim and we want to be sure we catch the
> -	 * right culprit. Then every time we do kick
> -	 * the ring, make it as a progress as the seqno
> -	 * advancement might ensure and if not, it
> -	 * will catch the hanging engine.
> -	 */
> -
> -	switch (hc->action) {
> -	case ENGINE_IDLE:
> -	case ENGINE_ACTIVE_SEQNO:
> -		/* Clear head and subunit states on seqno movement */
> -		hc->acthd = 0;
> -
> -		memset(&engine->hangcheck.instdone, 0,
> -		       sizeof(engine->hangcheck.instdone));
> -
> -		/* Intentional fall through */
> -	case ENGINE_WAIT_KICK:
> -	case ENGINE_WAIT:
> -		engine->hangcheck.action_timestamp = jiffies;
> -		break;
> -
> -	case ENGINE_ACTIVE_HEAD:
> -	case ENGINE_ACTIVE_SUBUNITS:
> -		/*
> -		 * Seqno stuck with still active engine gets leeway,
> -		 * in hopes that it is just a long shader.
> -		 */
> -		timeout = I915_SEQNO_DEAD_TIMEOUT;
> -		break;
> -
> -	case ENGINE_DEAD:
> -		break;
> -
> -	default:
> -		MISSING_CASE(hc->action);
> -	}
> -
> -	hc->stalled = time_after(jiffies,
> -				 engine->hangcheck.action_timestamp + timeout);
> -	hc->wedged = time_after(jiffies,
> -				 engine->hangcheck.action_timestamp +
> -				 I915_ENGINE_WEDGED_TIMEOUT);
> -}
> -
> -static void hangcheck_declare_hang(struct intel_gt *gt,
> -				   intel_engine_mask_t hung,
> -				   intel_engine_mask_t stuck)
> -{
> -	struct intel_engine_cs *engine;
> -	intel_engine_mask_t tmp;
> -	char msg[80];
> -	int len;
> -
> -	/* If some rings hung but others were still busy, only
> -	 * blame the hanging rings in the synopsis.
> -	 */
> -	if (stuck != hung)
> -		hung &= ~stuck;
> -	len = scnprintf(msg, sizeof(msg),
> -			"%s on ", stuck == hung ? "no progress" : "hang");
> -	for_each_engine_masked(engine, gt->i915, hung, tmp)
> -		len += scnprintf(msg + len, sizeof(msg) - len,
> -				 "%s, ", engine->name);
> -	msg[len-2] = '\0';
> -
> -	return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg);
> -}
> -
> -/*
> - * This is called when the chip hasn't reported back with completed
> - * batchbuffers in a long time. We keep track per ring seqno progress and
> - * if there are no progress, hangcheck score for that ring is increased.
> - * Further, acthd is inspected to see if the ring is stuck. On stuck case
> - * we kick the ring. If we see no progress on three subsequent calls
> - * we assume chip is wedged and try to fix it by resetting the chip.
> - */
> -static void hangcheck_elapsed(struct work_struct *work)
> -{
> -	struct intel_gt *gt =
> -		container_of(work, typeof(*gt), hangcheck.work.work);
> -	intel_engine_mask_t hung = 0, stuck = 0, wedged = 0;
> -	struct intel_engine_cs *engine;
> -	enum intel_engine_id id;
> -	intel_wakeref_t wakeref;
> -
> -	if (!i915_modparams.enable_hangcheck)
> -		return;
> -
> -	if (!READ_ONCE(gt->awake))
> -		return;
> -
> -	if (intel_gt_is_wedged(gt))
> -		return;
> -
> -	wakeref = intel_runtime_pm_get_if_in_use(gt->uncore->rpm);
> -	if (!wakeref)
> -		return;
> -
> -	/* As enabling the GPU requires fairly extensive mmio access,
> -	 * periodically arm the mmio checker to see if we are triggering
> -	 * any invalid access.
> -	 */
> -	intel_uncore_arm_unclaimed_mmio_detection(gt->uncore);
> -
> -	for_each_engine(engine, gt->i915, id) {
> -		struct hangcheck hc;
> -
> -		intel_engine_breadcrumbs_irq(engine);
> -
> -		hangcheck_load_sample(engine, &hc);
> -		hangcheck_accumulate_sample(engine, &hc);
> -		hangcheck_store_sample(engine, &hc);
> -
> -		if (hc.stalled) {
> -			hung |= engine->mask;
> -			if (hc.action != ENGINE_DEAD)
> -				stuck |= engine->mask;
> -		}
> -
> -		if (hc.wedged)
> -			wedged |= engine->mask;
> -	}
> -
> -	if (GEM_SHOW_DEBUG() && (hung | stuck)) {
> -		struct drm_printer p = drm_debug_printer("hangcheck");
> -
> -		for_each_engine(engine, gt->i915, id) {
> -			if (intel_engine_is_idle(engine))
> -				continue;
> -
> -			intel_engine_dump(engine, &p, "%s\n", engine->name);
> -		}
> -	}
> -
> -	if (wedged) {
> -		dev_err(gt->i915->drm.dev,
> -			"GPU recovery timed out,"
> -			" cancelling all in-flight rendering.\n");
> -		GEM_TRACE_DUMP();
> -		intel_gt_set_wedged(gt);
> -	}
> -
> -	if (hung)
> -		hangcheck_declare_hang(gt, hung, stuck);
> -
> -	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
> -
> -	/* Reset timer in case GPU hangs without another request being added */
> -	intel_gt_queue_hangcheck(gt);
> -}
> -
> -void intel_gt_queue_hangcheck(struct intel_gt *gt)
> -{
> -	unsigned long delay;
> -
> -	if (unlikely(!i915_modparams.enable_hangcheck))
> -		return;
> -
> -	/*
> -	 * Don't continually defer the hangcheck so that it is always run at
> -	 * least once after work has been scheduled on any ring. Otherwise,
> -	 * we will ignore a hung ring if a second ring is kept busy.
> -	 */
> -
> -	delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES);
> -	queue_delayed_work(system_long_wq, &gt->hangcheck.work, delay);
> -}
> -
> -void intel_engine_init_hangcheck(struct intel_engine_cs *engine)
> -{
> -	memset(&engine->hangcheck, 0, sizeof(engine->hangcheck));
> -	engine->hangcheck.action_timestamp = jiffies;
> -}
> -
> -void intel_gt_init_hangcheck(struct intel_gt *gt)
> -{
> -	INIT_DELAYED_WORK(&gt->hangcheck.work, hangcheck_elapsed);
> -}
> -
> -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
> -#include "selftest_hangcheck.c"
> -#endif
> diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
> index 34791fc79dea..9ed2cf91a46d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_reset.c
> +++ b/drivers/gpu/drm/i915/gt/intel_reset.c
> @@ -1018,8 +1018,6 @@ void intel_gt_reset(struct intel_gt *gt,
>   	if (ret)
>   		goto taint;
>   
> -	intel_gt_queue_hangcheck(gt);
> -
>   finish:
>   	reset_finish(gt, awake);
>   unlock:
> @@ -1347,4 +1345,5 @@ void __intel_fini_wedge(struct intel_wedge_me *w)
>   
>   #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
>   #include "selftest_reset.c"
> +#include "selftest_hangcheck.c"
>   #endif
> diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> index 569a4105d49e..570546eda5e8 100644
> --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
> @@ -1686,7 +1686,6 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
>   	};
>   	struct intel_gt *gt = &i915->gt;
>   	intel_wakeref_t wakeref;
> -	bool saved_hangcheck;
>   	int err;
>   
>   	if (!intel_has_gpu_reset(gt))
> @@ -1696,12 +1695,9 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915)
>   		return -EIO; /* we're long past hope of a successful reset */
>   
>   	wakeref = intel_runtime_pm_get(gt->uncore->rpm);
> -	saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck);
> -	drain_delayed_work(&gt->hangcheck.work); /* flush param */
>   
>   	err = intel_gt_live_subtests(tests, gt);
>   
> -	i915_modparams.enable_hangcheck = saved_hangcheck;
>   	intel_runtime_pm_put(gt->uncore->rpm, wakeref);
>   
>   	return err;
> diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
> index 277f31297f29..55852e045c3a 100644
> --- a/drivers/gpu/drm/i915/i915_debugfs.c
> +++ b/drivers/gpu/drm/i915/i915_debugfs.c
> @@ -1011,92 +1011,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
>   	return ret;
>   }
>   
> -static void i915_instdone_info(struct drm_i915_private *dev_priv,
> -			       struct seq_file *m,
> -			       struct intel_instdone *instdone)
> -{
> -	const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu;
> -	int slice;
> -	int subslice;
> -
> -	seq_printf(m, "\t\tINSTDONE: 0x%08x\n",
> -		   instdone->instdone);
> -
> -	if (INTEL_GEN(dev_priv) <= 3)
> -		return;
> -
> -	seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n",
> -		   instdone->slice_common);
> -
> -	if (INTEL_GEN(dev_priv) <= 6)
> -		return;
> -
> -	for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice)
> -		seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n",
> -			   slice, subslice, instdone->sampler[slice][subslice]);
> -
> -	for_each_instdone_slice_subslice(dev_priv, sseu, slice, subslice)
> -		seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n",
> -			   slice, subslice, instdone->row[slice][subslice]);
> -}
> -
> -static int i915_hangcheck_info(struct seq_file *m, void *unused)
> -{
> -	struct drm_i915_private *i915 = node_to_i915(m->private);
> -	struct intel_gt *gt = &i915->gt;
> -	struct intel_engine_cs *engine;
> -	intel_wakeref_t wakeref;
> -	enum intel_engine_id id;
> -
> -	seq_printf(m, "Reset flags: %lx\n", gt->reset.flags);
> -	if (test_bit(I915_WEDGED, &gt->reset.flags))
> -		seq_puts(m, "\tWedged\n");
> -	if (test_bit(I915_RESET_BACKOFF, &gt->reset.flags))
> -		seq_puts(m, "\tDevice (global) reset in progress\n");
> -
> -	if (!i915_modparams.enable_hangcheck) {
> -		seq_puts(m, "Hangcheck disabled\n");
> -		return 0;
> -	}
> -
> -	if (timer_pending(&gt->hangcheck.work.timer))
> -		seq_printf(m, "Hangcheck active, timer fires in %dms\n",
> -			   jiffies_to_msecs(gt->hangcheck.work.timer.expires -
> -					    jiffies));
> -	else if (delayed_work_pending(&gt->hangcheck.work))
> -		seq_puts(m, "Hangcheck active, work pending\n");
> -	else
> -		seq_puts(m, "Hangcheck inactive\n");
> -
> -	seq_printf(m, "GT active? %s\n", yesno(gt->awake));
> -
> -	with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
> -		for_each_engine(engine, i915, id) {
> -			struct intel_instdone instdone;
> -
> -			seq_printf(m, "%s: %d ms ago\n",
> -				   engine->name,
> -				   jiffies_to_msecs(jiffies -
> -						    engine->hangcheck.action_timestamp));
> -
> -			seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n",
> -				   (long long)engine->hangcheck.acthd,
> -				   intel_engine_get_active_head(engine));
> -
> -			intel_engine_get_instdone(engine, &instdone);
> -
> -			seq_puts(m, "\tinstdone read =\n");
> -			i915_instdone_info(i915, m, &instdone);
> -
> -			seq_puts(m, "\tinstdone accu =\n");
> -			i915_instdone_info(i915, m,
> -					   &engine->hangcheck.instdone);
> -		}
> -	}
> -
> -	return 0;
> -}
> -
>   static int ironlake_drpc_info(struct seq_file *m)
>   {
>   	struct drm_i915_private *i915 = node_to_i915(m->private);
> @@ -4303,7 +4217,6 @@ static const struct drm_info_list i915_debugfs_list[] = {
>   	{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
>   	{"i915_huc_load_status", i915_huc_load_status_info, 0},
>   	{"i915_frequency_info", i915_frequency_info, 0},
> -	{"i915_hangcheck_info", i915_hangcheck_info, 0},
>   	{"i915_drpc_info", i915_drpc_info, 0},
>   	{"i915_ring_freq_table", i915_ring_freq_table, 0},
>   	{"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
> diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
> index f02a34722217..1dae43ed4c48 100644
> --- a/drivers/gpu/drm/i915/i915_drv.c
> +++ b/drivers/gpu/drm/i915/i915_drv.c
> @@ -1546,10 +1546,7 @@ void i915_driver_remove(struct drm_i915_private *i915)
>   
>   	i915_driver_modeset_remove(i915);
>   
> -	/* Free error state after interrupts are fully disabled. */
> -	cancel_delayed_work_sync(&i915->gt.hangcheck.work);
>   	i915_reset_error_state(i915);
> -
>   	i915_gem_driver_remove(i915);
>   
>   	intel_power_domains_driver_remove(i915);
> diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
> index d284b04c492b..58340c99af02 100644
> --- a/drivers/gpu/drm/i915/i915_drv.h
> +++ b/drivers/gpu/drm/i915/i915_drv.h
> @@ -1845,7 +1845,6 @@ void i915_driver_remove(struct drm_i915_private *i915);
>   int i915_resume_switcheroo(struct drm_i915_private *i915);
>   int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
>   
> -void intel_engine_init_hangcheck(struct intel_engine_cs *engine);
>   int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
>   
>   static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
> index 5cf4eed5add8..47239df653f2 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.c
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.c
> @@ -534,10 +534,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
>   	}
>   	err_printf(m, "  ring->head: 0x%08x\n", ee->cpu_ring_head);
>   	err_printf(m, "  ring->tail: 0x%08x\n", ee->cpu_ring_tail);
> -	err_printf(m, "  hangcheck timestamp: %dms (%lu%s)\n",
> -		   jiffies_to_msecs(ee->hangcheck_timestamp - epoch),
> -		   ee->hangcheck_timestamp,
> -		   ee->hangcheck_timestamp == epoch ? "; epoch" : "");
>   	err_printf(m, "  engine reset count: %u\n", ee->reset_count);
>   
>   	for (n = 0; n < ee->num_ports; n++) {
> @@ -679,11 +675,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
>   	ts = ktime_to_timespec64(error->uptime);
>   	err_printf(m, "Uptime: %lld s %ld us\n",
>   		   (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC);
> -	err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ);
> -	err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n",
> -		   error->capture,
> -		   jiffies_to_msecs(jiffies - error->capture),
> -		   jiffies_to_msecs(error->capture - error->epoch));
> +	err_printf(m, "Capture: %lu jiffies; %d ms ago\n",
> +		   error->capture, jiffies_to_msecs(jiffies - error->capture));
>   
>   	for (ee = error->engine; ee; ee = ee->next)
>   		err_printf(m, "Active process (on ring %s): %s [%d]\n",
> @@ -742,7 +735,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
>   		err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache);
>   
>   	for (ee = error->engine; ee; ee = ee->next)
> -		error_print_engine(m, ee, error->epoch);
> +		error_print_engine(m, ee, error->capture);
>   
>   	for (ee = error->engine; ee; ee = ee->next) {
>   		const struct drm_i915_error_object *obj;
> @@ -770,7 +763,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m,
>   			for (j = 0; j < ee->num_requests; j++)
>   				error_print_request(m, " ",
>   						    &ee->requests[j],
> -						    error->epoch);
> +						    error->capture);
>   		}
>   
>   		print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer);
> @@ -1144,8 +1137,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
>   	}
>   
>   	ee->idle = intel_engine_is_idle(engine);
> -	if (!ee->idle)
> -		ee->hangcheck_timestamp = engine->hangcheck.action_timestamp;
>   	ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error,
>   						  engine);
>   
> @@ -1657,20 +1648,6 @@ static void capture_params(struct i915_gpu_state *error)
>   	i915_params_copy(&error->params, &i915_modparams);
>   }
>   
> -static unsigned long capture_find_epoch(const struct i915_gpu_state *error)
> -{
> -	const struct drm_i915_error_engine *ee;
> -	unsigned long epoch = error->capture;
> -
> -	for (ee = error->engine; ee; ee = ee->next) {
> -		if (ee->hangcheck_timestamp &&
> -		    time_before(ee->hangcheck_timestamp, epoch))
> -			epoch = ee->hangcheck_timestamp;
> -	}
> -
> -	return epoch;
> -}
> -
>   static void capture_finish(struct i915_gpu_state *error)
>   {
>   	struct i915_ggtt *ggtt = &error->i915->ggtt;
> @@ -1722,8 +1699,6 @@ i915_capture_gpu_state(struct drm_i915_private *i915)
>   	error->overlay = intel_overlay_capture_error_state(i915);
>   	error->display = intel_display_capture_error_state(i915);
>   
> -	error->epoch = capture_find_epoch(error);
> -
>   	capture_finish(error);
>   	compress_fini(&compress);
>   
> diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
> index 7f1cd0b1fef7..4dc36d6ee3a2 100644
> --- a/drivers/gpu/drm/i915/i915_gpu_error.h
> +++ b/drivers/gpu/drm/i915/i915_gpu_error.h
> @@ -34,7 +34,6 @@ struct i915_gpu_state {
>   	ktime_t boottime;
>   	ktime_t uptime;
>   	unsigned long capture;
> -	unsigned long epoch;
>   
>   	struct drm_i915_private *i915;
>   
> @@ -86,7 +85,6 @@ struct i915_gpu_state {
>   
>   		/* Software tracked state */
>   		bool idle;
> -		unsigned long hangcheck_timestamp;
>   		int num_requests;
>   		u32 reset_count;
>   
> diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h
> index ae8bb3cb627e..732aad148881 100644
> --- a/drivers/gpu/drm/i915/i915_priolist_types.h
> +++ b/drivers/gpu/drm/i915/i915_priolist_types.h
> @@ -16,6 +16,12 @@ enum {
>   	I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1,
>   	I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY,
>   	I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1,
> +
> +	/* A preemptive pulse used to monitor the health of each engine */
> +	I915_PRIORITY_HEARTBEAT,
> +
> +	/* Interactive workload, scheduled for immediate pageflipping */
> +	I915_PRIORITY_DISPLAY,
>   };
>   
>   #define I915_USER_PRIORITY_SHIFT 2
> 

Regards,

Tvrtko
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 10/10] drm/i915: Flush idle barriers when waiting
  2019-10-10  7:14 ` [PATCH 10/10] drm/i915: Flush idle barriers when waiting Chris Wilson
@ 2019-10-11 14:56   ` Tvrtko Ursulin
  2019-10-11 15:11     ` Chris Wilson
  0 siblings, 1 reply; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-11 14:56 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 10/10/2019 08:14, Chris Wilson wrote:
> If we do find ourselves with an idle barrier inside our active while
> waiting, attempt to flush it by emitting a pulse using the kernel
> context.

The point of this one completely escapes me at the moment. Idle barriers 
are kept in there to be consumed by the engine_pm parking, so if any 
random waiter finds some (there will always be some, as long as the 
engine executed some user context, right?), why would it want to handle 
them? Again just to use the opportunity for some house keeping? But what 
if the system is otherwise quite busy and a low-priority client just 
happens to want to wait on something silly?

Regards,

Tvrtko

> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.c  | 14 +++++++++++++
>   .../gpu/drm/i915/gt/intel_engine_heartbeat.h  |  1 +
>   drivers/gpu/drm/i915/i915_active.c            | 21 +++++++++++++++++--
>   3 files changed, 34 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> index f68acf9118f3..e27bb7f028bd 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
> @@ -169,3 +169,17 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
>   	intel_engine_pm_put(engine);
>   	return err;
>   }
> +
> +int intel_engine_flush_barriers(struct intel_engine_cs *engine)
> +{
> +	struct i915_request *rq;
> +
> +	rq = i915_request_create(engine->kernel_context);
> +	if (IS_ERR(rq))
> +		return PTR_ERR(rq);
> +
> +	idle_pulse(engine, rq);
> +	i915_request_add(rq);
> +
> +	return 0;
> +}
> diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> index 39391004554d..0c1ad0fc091d 100644
> --- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h
> @@ -15,5 +15,6 @@ void intel_engine_park_heartbeat(struct intel_engine_cs *engine);
>   void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine);
>   
>   int intel_engine_pulse(struct intel_engine_cs *engine);
> +int intel_engine_flush_barriers(struct intel_engine_cs *engine);
>   
>   #endif /* INTEL_ENGINE_HEARTBEAT_H */
> diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
> index aa37c07004b9..98d5fe1c7e19 100644
> --- a/drivers/gpu/drm/i915/i915_active.c
> +++ b/drivers/gpu/drm/i915/i915_active.c
> @@ -6,6 +6,7 @@
>   
>   #include <linux/debugobjects.h>
>   
> +#include "gt/intel_engine_heartbeat.h"
>   #include "gt/intel_engine_pm.h"
>   
>   #include "i915_drv.h"
> @@ -435,6 +436,21 @@ static void enable_signaling(struct i915_active_fence *active)
>   	dma_fence_put(fence);
>   }
>   
> +static int flush_barrier(struct active_node *it)
> +{
> +	struct intel_engine_cs *engine;
> +
> +	if (!is_barrier(&it->base))
> +		return 0;
> +
> +	engine = __barrier_to_engine(it);
> +	smp_rmb(); /* serialise with add_active_barriers */
> +	if (!is_barrier(&it->base))
> +		return 0;
> +
> +	return intel_engine_flush_barriers(engine);
> +}
> +
>   int i915_active_wait(struct i915_active *ref)
>   {
>   	struct active_node *it, *n;
> @@ -448,8 +464,9 @@ int i915_active_wait(struct i915_active *ref)
>   	/* Flush lazy signals */
>   	enable_signaling(&ref->excl);
>   	rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
> -		if (is_barrier(&it->base)) /* unconnected idle barrier */
> -			continue;
> +		err = flush_barrier(it); /* unconnected idle barrier? */
> +		if (err)
> +			break;
>   
>   		enable_signaling(&it->base);
>   	}
> 
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 09/10] drm/i915: Replace hangcheck by heartbeats
  2019-10-11 14:24   ` Tvrtko Ursulin
@ 2019-10-11 15:06     ` Chris Wilson
  0 siblings, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-11 15:06 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-11 15:24:21)
> 
> On 10/10/2019 08:14, Chris Wilson wrote:
> > +config DRM_I915_HEARTBEAT_INTERVAL
> > +     int "Interval between heartbeat pulses (ms)"
> > +     default 2500 # milliseconds
> > +     help
> > +       While active the driver uses a periodic request, a heartbeat, to
> > +       check the wellness of the GPU and to regularly flush state changes
> > +       (idle barriers).
> 
> Should this be somehow reworded to be more end user friendly? My idea, 
> may need to be corrected for bad English:

End user friendly. Sure, but that means I didn't hide this well enough
;)
 
> The driver sends a periodic heartbeat down all active GT engines to 
> check the health of the GPU and undertake regular house-keeping of 
> internal driver state.
> 
> Main points from the user perspective: "request" - whaat? "idle 
> barriers" - ditto. "Wellness" - a bit unusual in this context, no?

> > +static void heartbeat(struct work_struct *wrk)
> > +{
> > +     struct i915_sched_attr attr = {
> > +             .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN),
> 
> You were saying it's better to start from zero, right?

The first bump. Starting at lowest, means run when first idle. Then we
jump to 0 and be scheduled like any other normal user.

> > +     };
> > +     struct intel_engine_cs *engine =
> > +             container_of(wrk, typeof(*engine), heartbeat.work.work);
> > +     struct intel_context *ce = engine->kernel_context;
> > +     struct i915_request *rq;
> > +
> > +     if (!intel_engine_pm_get_if_awake(engine))
> > +             return;
> > +
> > +     rq = engine->heartbeat.systole;
> > +     if (rq && i915_request_completed(rq)) {
> > +             i915_request_put(rq);
> > +             engine->heartbeat.systole = NULL;
> > +     }
> > +
> > +     if (intel_gt_is_wedged(engine->gt))
> > +             goto out;
> > +
> > +     if (engine->heartbeat.systole) {
> > +             if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
> > +                     struct drm_printer p = drm_debug_printer(__func__);
> > +
> > +                     intel_engine_dump(engine, &p,
> > +                                       "%s heartbeat not ticking\n",
> > +                                       engine->name);
> 
> This could perhaps be better only when we have reached a higher priority 
> attempt. Okay it's under DEBUG_GEM but still, not sure there is value in 
> being so panicky if for any reason preemption does not work. Heartbeat 
> does not depend on preemption as far as I could spot, right?

The challenge is evident by the else path where we immediately reset.
If we cause a preemption event from the heartbeat (even strictly at min
prio we could cause a timeslice to expire) it is useful to have the
debug in dmesg (as in CI we don't get error-state very often).

Yes, I've tried trimming it to only on the vital paths, but so far
haven't found a satisfactory means.

To make me happy I think I need to push it down into the reset routines
themselves. Hmm. Except those we definitely don't want dmesg spam as
they get runs 10s of thousands times during CI.

It'll do for now. I'm sure we'll get tired of it and find it a new home.

> > +static struct kobj_attribute heartbeat_interval_attr =
> > +__ATTR(heartbeat_interval_ms, 0600, heartbeat_interval_show, heartbeat_interval_store);
> >   
> >   static void kobj_engine_release(struct kobject *kobj)
> >   {
> > @@ -115,6 +141,9 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
> >               &class_attr.attr,
> >               &inst_attr.attr,
> >               &mmio_attr.attr,
> > +#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
> > +             &heartbeat_interval_attr.attr,
> > +#endif
> 
> Presumably compiler is happy (or the linker) with only this part getting 
> the #ifdef treatment? (The show/store functions above don't have it.)

Yup, it's not annoying enough to complain about the dead globals. Although
it should be more than smart enough to remove them.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 10/10] drm/i915: Flush idle barriers when waiting
  2019-10-11 14:56   ` Tvrtko Ursulin
@ 2019-10-11 15:11     ` Chris Wilson
  2019-10-14 13:08       ` Tvrtko Ursulin
  0 siblings, 1 reply; 42+ messages in thread
From: Chris Wilson @ 2019-10-11 15:11 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-11 15:56:35)
> 
> On 10/10/2019 08:14, Chris Wilson wrote:
> > If we do find ourselves with an idle barrier inside our active while
> > waiting, attempt to flush it by emitting a pulse using the kernel
> > context.
> 
> The point of this one completely escapes me at the moment. Idle barriers 
> are kept in there to be consumed by the engine_pm parking, so if any 
> random waiter finds some (there will always be some, as long as the 
> engine executed some user context, right?),

Not any random waiter; the waiter has to be waiting on a context that
was active and so setup a barrier.

> why would it want to handle 
> them? Again just to use the opportunity for some house keeping? But what 
> if the system is otherwise quite busy and a low-priority client just 
> happens to want to wait on something silly?

There's no guarantee that it will ever be flushed. So why wouldn't we
use a low priority request to give a semblance of forward progress and
give a guarantee that the wait will complete.

It's a hypothetical point, there are no waiters that need to wait upon
their own barriers at present. We are just completing the picture for
idle barrier tracking.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 08/10] drm/i915: Cancel non-persistent contexts on close
  2019-10-11 14:22     ` Chris Wilson
@ 2019-10-11 15:41       ` Chris Wilson
  0 siblings, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-11 15:41 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Chris Wilson (2019-10-11 15:22:17)
> Quoting Tvrtko Ursulin (2019-10-11 14:55:00)
> > 
> > On 10/10/2019 08:14, Chris Wilson wrote:
> > > +             if (engine)
> > > +                     active |= engine->mask;
> > > +
> > > +             dma_fence_put(fence);
> > > +     }
> > > +
> > > +     /*
> > > +      * Send a "high priority pulse" down the engine to cause the
> > > +      * current request to be momentarily preempted. (If it fails to
> > > +      * be preempted, it will be reset). As we have marked our context
> > > +      * as banned, any incomplete request, including any running, will
> > > +      * be skipped following the preemption.
> > > +      */
> > > +     reset = 0;
> > > +     for_each_engine_masked(engine, gt->i915, active, tmp)
> > > +             if (intel_engine_pulse(engine))
> > > +                     reset |= engine->mask;
> > 
> > What if we were able to send a pulse, but the hog cannot be preempted 
> > and hangcheck is obviously disabled - who will do the reset?
> 
> Hmm, the idea is that forced-preemption causes the reset.
> (See igt/gem_ctx_persistence/hostile)
> 
> However, if we give the sysadmin the means to disable force-preemption,
> we just gave them another shovel to dig a hole with.
> 
> A last resort would be another timer here to ensure the context was
> terminated.

That does not cut it, as we only looking at it from the pov of the
context being guilty and not the victim. So the answer remains forced
preemption, and a backdoor if that is disabled.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 10/10] drm/i915: Flush idle barriers when waiting
  2019-10-11 15:11     ` Chris Wilson
@ 2019-10-14 13:08       ` Tvrtko Ursulin
  2019-10-14 13:38         ` Chris Wilson
  2019-10-23 15:33           ` [Intel-gfx] " Chris Wilson
  0 siblings, 2 replies; 42+ messages in thread
From: Tvrtko Ursulin @ 2019-10-14 13:08 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx


On 11/10/2019 16:11, Chris Wilson wrote:
> Quoting Tvrtko Ursulin (2019-10-11 15:56:35)
>>
>> On 10/10/2019 08:14, Chris Wilson wrote:
>>> If we do find ourselves with an idle barrier inside our active while
>>> waiting, attempt to flush it by emitting a pulse using the kernel
>>> context.
>>
>> The point of this one completely escapes me at the moment. Idle barriers
>> are kept in there to be consumed by the engine_pm parking, so if any
>> random waiter finds some (there will always be some, as long as the
>> engine executed some user context, right?),
> 
> Not any random waiter; the waiter has to be waiting on a context that
> was active and so setup a barrier.
> 
>> why would it want to handle
>> them? Again just to use the opportunity for some house keeping? But what
>> if the system is otherwise quite busy and a low-priority client just
>> happens to want to wait on something silly?
> 
> There's no guarantee that it will ever be flushed. So why wouldn't we
> use a low priority request to give a semblance of forward progress and
> give a guarantee that the wait will complete.
> 
> It's a hypothetical point, there are no waiters that need to wait upon
> their own barriers at present. We are just completing the picture for
> idle barrier tracking.

Hm I was mistakenly remembering things like rpcs reconfiguration would 
wait on ce->active, but I forgot about your trick with putting kernel 
context request on an user timeline.

I guess it is fine there, but since, and as you have said, it is 
hypothetical, then this patch is dead code and can wait.

Regards,

Tvrtko

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 10/10] drm/i915: Flush idle barriers when waiting
  2019-10-14 13:08       ` Tvrtko Ursulin
@ 2019-10-14 13:38         ` Chris Wilson
  2019-10-23 15:33           ` [Intel-gfx] " Chris Wilson
  1 sibling, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-14 13:38 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-14 14:08:12)
> 
> On 11/10/2019 16:11, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-10-11 15:56:35)
> >>
> >> On 10/10/2019 08:14, Chris Wilson wrote:
> >>> If we do find ourselves with an idle barrier inside our active while
> >>> waiting, attempt to flush it by emitting a pulse using the kernel
> >>> context.
> >>
> >> The point of this one completely escapes me at the moment. Idle barriers
> >> are kept in there to be consumed by the engine_pm parking, so if any
> >> random waiter finds some (there will always be some, as long as the
> >> engine executed some user context, right?),
> > 
> > Not any random waiter; the waiter has to be waiting on a context that
> > was active and so setup a barrier.
> > 
> >> why would it want to handle
> >> them? Again just to use the opportunity for some house keeping? But what
> >> if the system is otherwise quite busy and a low-priority client just
> >> happens to want to wait on something silly?
> > 
> > There's no guarantee that it will ever be flushed. So why wouldn't we
> > use a low priority request to give a semblance of forward progress and
> > give a guarantee that the wait will complete.
> > 
> > It's a hypothetical point, there are no waiters that need to wait upon
> > their own barriers at present. We are just completing the picture for
> > idle barrier tracking.
> 
> Hm I was mistakenly remembering things like rpcs reconfiguration would 
> wait on ce->active, but I forgot about your trick with putting kernel 
> context request on an user timeline.
> 
> I guess it is fine there, but since, and as you have said, it is 
> hypothetical, then this patch is dead code and can wait.

Why would we even bother checking against the potential invalid pointer
dereference then?... :-p
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [PATCH 10/10] drm/i915: Flush idle barriers when waiting
@ 2019-10-23 15:33           ` Chris Wilson
  0 siblings, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-23 15:33 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-14 14:08:12)
> 
> On 11/10/2019 16:11, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-10-11 15:56:35)
> >>
> >> On 10/10/2019 08:14, Chris Wilson wrote:
> >>> If we do find ourselves with an idle barrier inside our active while
> >>> waiting, attempt to flush it by emitting a pulse using the kernel
> >>> context.
> >>
> >> The point of this one completely escapes me at the moment. Idle barriers
> >> are kept in there to be consumed by the engine_pm parking, so if any
> >> random waiter finds some (there will always be some, as long as the
> >> engine executed some user context, right?),
> > 
> > Not any random waiter; the waiter has to be waiting on a context that
> > was active and so setup a barrier.
> > 
> >> why would it want to handle
> >> them? Again just to use the opportunity for some house keeping? But what
> >> if the system is otherwise quite busy and a low-priority client just
> >> happens to want to wait on something silly?
> > 
> > There's no guarantee that it will ever be flushed. So why wouldn't we
> > use a low priority request to give a semblance of forward progress and
> > give a guarantee that the wait will complete.
> > 
> > It's a hypothetical point, there are no waiters that need to wait upon
> > their own barriers at present. We are just completing the picture for
> > idle barrier tracking.
> 
> Hm I was mistakenly remembering things like rpcs reconfiguration would 
> wait on ce->active, but I forgot about your trick with putting kernel 
> context request on an user timeline.
> 
> I guess it is fine there, but since, and as you have said, it is 
> hypothetical, then this patch is dead code and can wait.

Ok, I have a use for this now! In "drm/i915: Allow userspace to specify
ringsize on construction" we need to wait on the context itself to idle,
i.e. i915_active_wait(&ce->active) and so now it is possible for us to
be waiting on an idle_barrier() and so the flush be beneficial.

static int __apply_ringsize(struct intel_context *ce, void *sz)
{
       int err;

       err = i915_active_wait(&ce->active);
       if (err < 0)
               return err;

       if (intel_context_lock_pinned(ce))
               return -EINTR;

       if (intel_context_is_pinned(ce)) {
               err = -EBUSY; /* In active use, come back later! */
               goto unlock;
       }

       if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
               struct intel_ring *ring;

               /* Replace the existing ringbuffer */
               ring = intel_engine_create_ring(ce->engine,
                                               (unsigned long)sz);
               if (IS_ERR(ring)) {
                       err = PTR_ERR(ring);
                       goto unlock;
               }

               intel_ring_put(ce->ring);
               ce->ring = ring;

               /* Context image will be updated on next pin */
       } else {
               ce->ring = sz;
       }

unlock:
       intel_context_unlock_pinned(ce);
       return err;
}

-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

* Re: [Intel-gfx] [PATCH 10/10] drm/i915: Flush idle barriers when waiting
@ 2019-10-23 15:33           ` Chris Wilson
  0 siblings, 0 replies; 42+ messages in thread
From: Chris Wilson @ 2019-10-23 15:33 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx

Quoting Tvrtko Ursulin (2019-10-14 14:08:12)
> 
> On 11/10/2019 16:11, Chris Wilson wrote:
> > Quoting Tvrtko Ursulin (2019-10-11 15:56:35)
> >>
> >> On 10/10/2019 08:14, Chris Wilson wrote:
> >>> If we do find ourselves with an idle barrier inside our active while
> >>> waiting, attempt to flush it by emitting a pulse using the kernel
> >>> context.
> >>
> >> The point of this one completely escapes me at the moment. Idle barriers
> >> are kept in there to be consumed by the engine_pm parking, so if any
> >> random waiter finds some (there will always be some, as long as the
> >> engine executed some user context, right?),
> > 
> > Not any random waiter; the waiter has to be waiting on a context that
> > was active and so setup a barrier.
> > 
> >> why would it want to handle
> >> them? Again just to use the opportunity for some house keeping? But what
> >> if the system is otherwise quite busy and a low-priority client just
> >> happens to want to wait on something silly?
> > 
> > There's no guarantee that it will ever be flushed. So why wouldn't we
> > use a low priority request to give a semblance of forward progress and
> > give a guarantee that the wait will complete.
> > 
> > It's a hypothetical point, there are no waiters that need to wait upon
> > their own barriers at present. We are just completing the picture for
> > idle barrier tracking.
> 
> Hm I was mistakenly remembering things like rpcs reconfiguration would 
> wait on ce->active, but I forgot about your trick with putting kernel 
> context request on an user timeline.
> 
> I guess it is fine there, but since, and as you have said, it is 
> hypothetical, then this patch is dead code and can wait.

Ok, I have a use for this now! In "drm/i915: Allow userspace to specify
ringsize on construction" we need to wait on the context itself to idle,
i.e. i915_active_wait(&ce->active) and so now it is possible for us to
be waiting on an idle_barrier() and so the flush be beneficial.

static int __apply_ringsize(struct intel_context *ce, void *sz)
{
       int err;

       err = i915_active_wait(&ce->active);
       if (err < 0)
               return err;

       if (intel_context_lock_pinned(ce))
               return -EINTR;

       if (intel_context_is_pinned(ce)) {
               err = -EBUSY; /* In active use, come back later! */
               goto unlock;
       }

       if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
               struct intel_ring *ring;

               /* Replace the existing ringbuffer */
               ring = intel_engine_create_ring(ce->engine,
                                               (unsigned long)sz);
               if (IS_ERR(ring)) {
                       err = PTR_ERR(ring);
                       goto unlock;
               }

               intel_ring_put(ce->ring);
               ce->ring = ring;

               /* Context image will be updated on next pin */
       } else {
               ce->ring = sz;
       }

unlock:
       intel_context_unlock_pinned(ce);
       return err;
}

-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 42+ messages in thread

end of thread, other threads:[~2019-10-23 15:33 UTC | newest]

Thread overview: 42+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-10  7:14 [PATCH 01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Chris Wilson
2019-10-10  7:14 ` [PATCH 02/10] drm/i915/execlists: Leave tell-tales as to why pending[] is bad Chris Wilson
2019-10-11  8:39   ` Tvrtko Ursulin
2019-10-10  7:14 ` [PATCH 03/10] drm/i915: Expose engine properties via sysfs Chris Wilson
2019-10-11  8:44   ` Tvrtko Ursulin
2019-10-11  8:49     ` Chris Wilson
2019-10-11  9:04       ` Tvrtko Ursulin
2019-10-11  9:40   ` [PATCH v2] " Chris Wilson
2019-10-10  7:14 ` [PATCH 04/10] drm/i915/execlists: Force preemption Chris Wilson
2019-10-10  7:14 ` [PATCH 05/10] drm/i915: Mark up "sentinel" requests Chris Wilson
2019-10-11  8:45   ` Tvrtko Ursulin
2019-10-10  7:14 ` [PATCH 06/10] drm/i915/gt: Introduce barrier pulses along engines Chris Wilson
2019-10-11  9:11   ` Tvrtko Ursulin
2019-10-11  9:52     ` Chris Wilson
2019-10-10  7:14 ` [PATCH 07/10] drm/i915/execlists: Cancel banned contexts on schedule-out Chris Wilson
2019-10-11  9:47   ` Tvrtko Ursulin
2019-10-11 10:03     ` Chris Wilson
2019-10-11 10:15     ` Chris Wilson
2019-10-11 10:40       ` Chris Wilson
2019-10-11 11:16   ` [PATCH v2] " Chris Wilson
2019-10-11 13:10     ` Tvrtko Ursulin
2019-10-11 14:10       ` Chris Wilson
2019-10-10  7:14 ` [PATCH 08/10] drm/i915: Cancel non-persistent contexts on close Chris Wilson
2019-10-11 13:55   ` Tvrtko Ursulin
2019-10-11 14:22     ` Chris Wilson
2019-10-11 15:41       ` Chris Wilson
2019-10-10  7:14 ` [PATCH 09/10] drm/i915: Replace hangcheck by heartbeats Chris Wilson
2019-10-11 14:24   ` Tvrtko Ursulin
2019-10-11 15:06     ` Chris Wilson
2019-10-10  7:14 ` [PATCH 10/10] drm/i915: Flush idle barriers when waiting Chris Wilson
2019-10-11 14:56   ` Tvrtko Ursulin
2019-10-11 15:11     ` Chris Wilson
2019-10-14 13:08       ` Tvrtko Ursulin
2019-10-14 13:38         ` Chris Wilson
2019-10-23 15:33         ` Chris Wilson
2019-10-23 15:33           ` [Intel-gfx] " Chris Wilson
2019-10-10  8:18 ` ✗ Fi.CI.CHECKPATCH: warning for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler Patchwork
2019-10-10  8:42 ` ✓ Fi.CI.BAT: success " Patchwork
2019-10-10 16:19 ` ✗ Fi.CI.IGT: failure " Patchwork
2019-10-11  8:16 ` [PATCH 01/10] " Tvrtko Ursulin
2019-10-11  9:49 ` ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev2) Patchwork
2019-10-11 11:39 ` ✗ Fi.CI.BUILD: failure for series starting with [01/10] drm/i915: Note the addition of timeslicing to the pretend scheduler (rev3) Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.