intel-gfx.lists.freedesktop.org archive mirror
 help / color / mirror / Atom feed
* [Intel-gfx] [PATCH i-g-t 1/5] i915: Start putting the mmio_base to wider use
@ 2020-01-27 12:18 Chris Wilson
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 2/5] i915/gem_ctx_isolation: Check engine relative registers Chris Wilson
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Chris Wilson @ 2020-01-27 12:18 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

Several tests depend upon the implicit engine->mmio_base but have no
means of determining the physical layout. Since the kernel has started
providing this information, start putting it to use.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 lib/i915/gem_engine_topology.c | 84 ++++++++++++++++++++++++++++++++++
 lib/i915/gem_engine_topology.h |  5 ++
 tests/i915/gem_ctx_shared.c    | 38 +++++----------
 tests/i915/gem_exec_latency.c  | 17 ++++---
 4 files changed, 111 insertions(+), 33 deletions(-)

diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
index 43a99e0ff..058983123 100644
--- a/lib/i915/gem_engine_topology.c
+++ b/lib/i915/gem_engine_topology.c
@@ -21,7 +21,12 @@
  * IN THE SOFTWARE.
  */
 
+#include <fcntl.h>
+#include <unistd.h>
+
 #include "drmtest.h"
+#include "igt_sysfs.h"
+#include "intel_chipset.h"
 #include "ioctl_wrappers.h"
 
 #include "i915/gem_engine_topology.h"
@@ -331,3 +336,82 @@ bool gem_engine_is_equal(const struct intel_execution_engine2 *e1,
 {
 	return e1->class == e2->class && e1->instance == e2->instance;
 }
+
+static int descend(int dir, const char *path)
+{
+	int fd;
+
+	fd = openat(dir, path, O_RDONLY);
+	close(dir);
+
+	return fd;
+}
+
+int gem_engine_property_scanf(int i915, const char *engine, const char *attr,
+			      const char *fmt, ...)
+{
+	FILE *file;
+	va_list ap;
+	int ret;
+	int fd;
+
+	fd = igt_sysfs_open(i915);
+	if (fd < 0)
+		return fd;
+
+	fd = descend(fd, "engine");
+	if (fd < 0)
+		return fd;
+
+	fd = descend(fd, engine);
+	if (fd < 0)
+		return fd;
+
+	fd = descend(fd, attr);
+	if (fd < 0)
+		return fd;
+
+	file = fdopen(fd, "r");
+	if (!file) {
+		close(fd);
+		return -1;
+	}
+
+	va_start(ap, fmt);
+	ret = vfscanf(file, fmt, ap);
+	va_end(ap);
+
+	fclose(file);
+	return ret;
+}
+
+uint32_t gem_engine_mmio_base(int i915, const char *engine)
+{
+	unsigned int mmio = 0;
+
+	if (gem_engine_property_scanf(i915, engine, "mmio_base",
+				      "%x", &mmio) < 0) {
+		int gen = intel_gen(intel_get_drm_devid(i915));
+
+		/* The layout of xcs1+ is unreliable -- hence the property! */
+		if (!strcmp(engine, "rcs0")) {
+			mmio = 0x2000;
+		} else if (!strcmp(engine, "bcs0")) {
+			mmio = 0x22000;
+		} else if (!strcmp(engine, "vcs0")) {
+			if (gen < 6)
+				mmio = 0x4000;
+			else if (gen < 11)
+				mmio = 0x12000;
+			else
+				mmio = 0x1c0000;
+		} else if (!strcmp(engine, "vecs0")) {
+			if (gen < 11)
+				mmio = 0x1a000;
+			else
+				mmio = 0x1c8000;
+		}
+	}
+
+	return mmio;
+}
diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
index e40d7ec83..7a2e21f66 100644
--- a/lib/i915/gem_engine_topology.h
+++ b/lib/i915/gem_engine_topology.h
@@ -72,4 +72,9 @@ struct intel_execution_engine2 gem_eb_flags_to_engine(unsigned int flags);
 	     ((e__) = intel_get_current_physical_engine(&i__)); \
 	     intel_next_engine(&i__))
 
+__attribute__((format(scanf, 4, 5)))
+int gem_engine_property_scanf(int i915, const char *engine, const char *attr,
+			      const char *fmt, ...);
+uint32_t gem_engine_mmio_base(int i915, const char *engine);
+
 #endif /* GEM_ENGINE_TOPOLOGY_H */
diff --git a/tests/i915/gem_ctx_shared.c b/tests/i915/gem_ctx_shared.c
index 30e37c3cc..6fc3f8cd8 100644
--- a/tests/i915/gem_ctx_shared.c
+++ b/tests/i915/gem_ctx_shared.c
@@ -38,6 +38,7 @@
 
 #include <drm.h>
 
+#include "i915/gem_engine_topology.h"
 #include "igt_rand.h"
 #include "igt_vgem.h"
 #include "sync_file.h"
@@ -548,6 +549,14 @@ static uint32_t store_timestamp(int i915,
 	return obj.handle;
 }
 
+static uint32_t ring_base(int i915, unsigned ring)
+{
+	if (ring == I915_EXEC_DEFAULT)
+		ring = I915_EXEC_RENDER; /* XXX */
+
+	return gem_engine_mmio_base(i915, gem_eb_flags_to_engine(ring).name);
+}
+
 static void independent(int i915, unsigned ring, unsigned flags)
 {
 	const int TIMESTAMP = 1023;
@@ -555,33 +564,8 @@ static void independent(int i915, unsigned ring, unsigned flags)
 	igt_spin_t *spin[MAX_ELSP_QLEN];
 	unsigned int mmio_base;
 
-	/* XXX i915_query()! */
-	switch (ring) {
-	case I915_EXEC_DEFAULT:
-	case I915_EXEC_RENDER:
-		mmio_base = 0x2000;
-		break;
-#if 0
-	case I915_EXEC_BSD:
-		mmio_base = 0x12000;
-		break;
-#endif
-	case I915_EXEC_BLT:
-		mmio_base = 0x22000;
-		break;
-
-#define GEN11_VECS0_BASE 0x1c8000
-#define GEN11_VECS1_BASE 0x1d8000
-	case I915_EXEC_VEBOX:
-		if (intel_gen(intel_get_drm_devid(i915)) >= 11)
-			mmio_base = GEN11_VECS0_BASE;
-		else
-			mmio_base = 0x1a000;
-		break;
-
-	default:
-		igt_skip("mmio base not known\n");
-	}
+	mmio_base = ring_base(i915, ring);
+	igt_require_f(mmio_base, "mmio base not known\n");
 
 	for (int n = 0; n < ARRAY_SIZE(spin); n++) {
 		const struct igt_spin_factory opts = {
diff --git a/tests/i915/gem_exec_latency.c b/tests/i915/gem_exec_latency.c
index 3d99182a0..d2159f317 100644
--- a/tests/i915/gem_exec_latency.c
+++ b/tests/i915/gem_exec_latency.c
@@ -109,7 +109,7 @@ poll_ring(int fd, unsigned ring, const char *name)
 	igt_spin_free(fd, spin[0]);
 }
 
-#define RCS_TIMESTAMP (0x2000 + 0x358)
+#define TIMESTAMP (0x358)
 static void latency_on_ring(int fd,
 			    unsigned ring, const char *name,
 			    unsigned flags)
@@ -119,6 +119,7 @@ static void latency_on_ring(int fd,
 	struct drm_i915_gem_exec_object2 obj[3];
 	struct drm_i915_gem_relocation_entry reloc;
 	struct drm_i915_gem_execbuffer2 execbuf;
+	const uint32_t mmio_base = gem_engine_mmio_base(fd, name);
 	igt_spin_t *spin = NULL;
 	IGT_CORK_HANDLE(c);
 	volatile uint32_t *reg;
@@ -128,7 +129,8 @@ static void latency_on_ring(int fd,
 	double gpu_latency;
 	int i, j;
 
-	reg = (volatile uint32_t *)((volatile char *)igt_global_mmio + RCS_TIMESTAMP);
+	igt_require(mmio_base);
+	reg = (volatile uint32_t *)((volatile char *)igt_global_mmio + mmio_base + TIMESTAMP);
 
 	memset(&execbuf, 0, sizeof(execbuf));
 	execbuf.buffers_ptr = to_user_pointer(&obj[1]);
@@ -176,7 +178,7 @@ static void latency_on_ring(int fd,
 		map[i++] = 0x24 << 23 | 1;
 		if (has_64bit_reloc)
 			map[i-1]++;
-		map[i++] = RCS_TIMESTAMP; /* ring local! */
+		map[i++] = mmio_base + TIMESTAMP;
 		map[i++] = offset;
 		if (has_64bit_reloc)
 			map[i++] = offset >> 32;
@@ -266,11 +268,14 @@ static void latency_from_ring(int fd,
 	struct drm_i915_gem_exec_object2 obj[3];
 	struct drm_i915_gem_relocation_entry reloc;
 	struct drm_i915_gem_execbuffer2 execbuf;
+	const uint32_t mmio_base = gem_engine_mmio_base(fd, name);
 	const unsigned int repeats = ring_size / 2;
 	uint32_t *map, *results;
 	uint32_t ctx[2] = {};
 	int i, j;
 
+	igt_require(mmio_base);
+
 	if (flags & PREEMPT) {
 		ctx[0] = gem_context_create(fd);
 		gem_context_set_priority(fd, ctx[0], -1023);
@@ -351,7 +356,7 @@ static void latency_from_ring(int fd,
 			map[i++] = 0x24 << 23 | 1;
 			if (has_64bit_reloc)
 				map[i-1]++;
-			map[i++] = RCS_TIMESTAMP; /* ring local! */
+			map[i++] = mmio_base + TIMESTAMP;
 			map[i++] = offset;
 			if (has_64bit_reloc)
 				map[i++] = offset >> 32;
@@ -376,7 +381,7 @@ static void latency_from_ring(int fd,
 			map[i++] = 0x24 << 23 | 1;
 			if (has_64bit_reloc)
 				map[i-1]++;
-			map[i++] = RCS_TIMESTAMP; /* ring local! */
+			map[i++] = mmio_base + TIMESTAMP;
 			map[i++] = offset;
 			if (has_64bit_reloc)
 				map[i++] = offset >> 32;
@@ -669,7 +674,7 @@ igt_main
 			ring_size = 1024;
 
 		intel_register_access_init(&mmio_data, intel_get_pci_device(), false, device);
-		rcs_clock = clockrate(device, RCS_TIMESTAMP);
+		rcs_clock = clockrate(device, 0x2000 + TIMESTAMP);
 		igt_info("RCS timestamp clock: %.0fKHz, %.1fns\n",
 			 rcs_clock / 1e3, 1e9 / rcs_clock);
 		rcs_clock = 1e9 / rcs_clock;
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [Intel-gfx] [PATCH i-g-t 2/5] i915/gem_ctx_isolation: Check engine relative registers
  2020-01-27 12:18 [Intel-gfx] [PATCH i-g-t 1/5] i915: Start putting the mmio_base to wider use Chris Wilson
@ 2020-01-27 12:18 ` Chris Wilson
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 3/5] i915: Exercise preemption timeout controls in sysfs Chris Wilson
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 11+ messages in thread
From: Chris Wilson @ 2020-01-27 12:18 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

Some of the non-privileged registers are at the same offset on each
engine. We can improve our coverage for unknown HW layout by using the
reported engine->mmio_base for relative offsets.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/i915/gem_ctx_isolation.c | 164 ++++++++++++++++++++-------------
 1 file changed, 100 insertions(+), 64 deletions(-)

diff --git a/tests/i915/gem_ctx_isolation.c b/tests/i915/gem_ctx_isolation.c
index 8b72a16ad..5b472d693 100644
--- a/tests/i915/gem_ctx_isolation.c
+++ b/tests/i915/gem_ctx_isolation.c
@@ -70,6 +70,7 @@ static const struct named_register {
 	uint32_t ignore_bits;
 	uint32_t write_mask; /* some registers bits do not exist */
 	bool masked;
+	bool relative;
 } nonpriv_registers[] = {
 	{ "NOPID", NOCTX, RCS0, 0x2094 },
 	{ "MI_PREDICATE_RESULT_2", NOCTX, RCS0, 0x23bc },
@@ -109,7 +110,6 @@ static const struct named_register {
 	{ "PS_DEPTH_COUNT_1", GEN8, RCS0, 0x22f8, 2 },
 	{ "BB_OFFSET", GEN8, RCS0, 0x2158, .ignore_bits = 0x7 },
 	{ "MI_PREDICATE_RESULT_1", GEN8, RCS0, 0x241c },
-	{ "CS_GPR", GEN8, RCS0, 0x2600, 32 },
 	{ "OA_CTX_CONTROL", GEN8, RCS0, 0x2360 },
 	{ "OACTXID", GEN8, RCS0, 0x2364 },
 	{ "PS_INVOCATION_COUNT_2", GEN8, RCS0, 0x2448, 2, .write_mask = ~0x3 },
@@ -138,79 +138,56 @@ static const struct named_register {
 
 	{ "CTX_PREEMPT", NOCTX /* GEN10 */, RCS0, 0x2248 },
 	{ "CS_CHICKEN1", GEN11, RCS0, 0x2580, .masked = true },
-	{ "HDC_CHICKEN1", GEN_RANGE(10, 10), RCS0, 0x7304, .masked = true },
 
 	/* Privileged (enabled by w/a + FORCE_TO_NONPRIV) */
 	{ "CTX_PREEMPT", NOCTX /* GEN9 */, RCS0, 0x2248 },
 	{ "CS_CHICKEN1", GEN_RANGE(9, 10), RCS0, 0x2580, .masked = true },
 	{ "COMMON_SLICE_CHICKEN2", GEN_RANGE(9, 9), RCS0, 0x7014, .masked = true },
-	{ "HDC_CHICKEN1", GEN_RANGE(9, 9), RCS0, 0x7304, .masked = true },
+	{ "HDC_CHICKEN1", GEN_RANGE(9, 10), RCS0, 0x7304, .masked = true },
 	{ "SLICE_COMMON_ECO_CHICKEN1", GEN_RANGE(11, 11) /* + glk */, RCS0,  0x731c, .masked = true },
 	{ "L3SQREG4", NOCTX /* GEN9:skl,kbl */, RCS0, 0xb118, .write_mask = ~0x1ffff0 },
 	{ "HALF_SLICE_CHICKEN7", GEN_RANGE(11, 11), RCS0, 0xe194, .masked = true },
 	{ "SAMPLER_MODE", GEN_RANGE(11, 11), RCS0, 0xe18c, .masked = true },
 
-	{ "BCS_GPR", GEN9, BCS0, 0x22600, 32 },
 	{ "BCS_SWCTRL", GEN8, BCS0, 0x22200, .write_mask = 0x3, .masked = true },
 
 	{ "MFC_VDBOX1", NOCTX, VCS0, 0x12800, 64 },
 	{ "MFC_VDBOX2", NOCTX, VCS1, 0x1c800, 64 },
 
-	{ "VCS0_GPR", GEN_RANGE(9, 10), VCS0, 0x12600, 32 },
-	{ "VCS1_GPR", GEN_RANGE(9, 10), VCS1, 0x1c600, 32 },
-	{ "VECS_GPR", GEN_RANGE(9, 10), VECS0, 0x1a600, 32 },
-
-	{ "VCS0_GPR", GEN11, VCS0, 0x1c0600, 32 },
-	{ "VCS1_GPR", GEN11, VCS1, 0x1c4600, 32 },
-	{ "VCS2_GPR", GEN11, VCS2, 0x1d0600, 32 },
-	{ "VCS3_GPR", GEN11, VCS3, 0x1d4600, 32 },
-	{ "VECS_GPR", GEN11, VECS0, 0x1c8600, 32 },
+	{ "xCS_GPR", GEN9, ALL, 0x600, 32, .relative = true },
 
 	{}
 }, ignore_registers[] = {
 	{ "RCS timestamp", GEN6, ~0u, 0x2358 },
 	{ "BCS timestamp", GEN7, ~0u, 0x22358 },
 
-	{ "VCS0 timestamp", GEN_RANGE(7, 10), ~0u, 0x12358 },
-	{ "VCS1 timestamp", GEN_RANGE(7, 10), ~0u, 0x1c358 },
-	{ "VECS timestamp", GEN_RANGE(8, 10), ~0u, 0x1a358 },
-
-	{ "VCS0 timestamp", GEN11, ~0u, 0x1c0358 },
-	{ "VCS1 timestamp", GEN11, ~0u, 0x1c4358 },
-	{ "VCS2 timestamp", GEN11, ~0u, 0x1d0358 },
-	{ "VCS3 timestamp", GEN11, ~0u, 0x1d4358 },
-	{ "VECS timestamp", GEN11, ~0u, 0x1c8358 },
+	{ "xCS timestamp", GEN8, ALL, 0x358, .relative = true },
 
 	/* huc read only */
-	{ "BSD0 0x2000", GEN11, ~0u, 0x1c0000 + 0x2000 },
-	{ "BSD0 0x2000", GEN11, ~0u, 0x1c0000 + 0x2014 },
-	{ "BSD0 0x2000", GEN11, ~0u, 0x1c0000 + 0x23b0 },
-
-	{ "BSD1 0x2000", GEN11, ~0u, 0x1c4000 + 0x2000 },
-	{ "BSD1 0x2000", GEN11, ~0u, 0x1c4000 + 0x2014 },
-	{ "BSD1 0x2000", GEN11, ~0u, 0x1c4000 + 0x23b0 },
-
-	{ "BSD2 0x2000", GEN11, ~0u, 0x1d0000 + 0x2000 },
-	{ "BSD2 0x2000", GEN11, ~0u, 0x1d0000 + 0x2014 },
-	{ "BSD2 0x2000", GEN11, ~0u, 0x1d0000 + 0x23b0 },
-
-	{ "BSD3 0x2000", GEN11, ~0u, 0x1d4000 + 0x2000 },
-	{ "BSD3 0x2000", GEN11, ~0u, 0x1d4000 + 0x2014 },
-	{ "BSD3 0x2000", GEN11, ~0u, 0x1d4000 + 0x23b0 },
+	{ "BSD 0x2000", GEN11, ALL, 0x2000, .relative = true },
+	{ "BSD 0x2014", GEN11, ALL, 0x2014, .relative = true },
+	{ "BSD 0x23b0", GEN11, ALL, 0x23b0, .relative = true },
 
 	{}
 };
 
-static const char *register_name(uint32_t offset, char *buf, size_t len)
+static const char *
+register_name(uint32_t offset, uint32_t mmio_base, char *buf, size_t len)
 {
 	for (const struct named_register *r = nonpriv_registers; r->name; r++) {
 		unsigned int width = r->count ? 4*r->count : 4;
-		if (offset >= r->offset && offset < r->offset + width) {
+		uint32_t base;
+
+		base = r->offset;
+		if (r->relative)
+			base += mmio_base;
+
+		if (offset >= base && offset < base + width) {
 			if (r->count <= 1)
 				return r->name;
 
 			snprintf(buf, len, "%s[%d]",
-				 r->name, (offset - r->offset)/4);
+				 r->name, (offset - base) / 4);
 			return buf;
 		}
 	}
@@ -218,22 +195,35 @@ static const char *register_name(uint32_t offset, char *buf, size_t len)
 	return "unknown";
 }
 
-static const struct named_register *lookup_register(uint32_t offset)
+static const struct named_register *
+lookup_register(uint32_t offset, uint32_t mmio_base)
 {
 	for (const struct named_register *r = nonpriv_registers; r->name; r++) {
 		unsigned int width = r->count ? 4*r->count : 4;
-		if (offset >= r->offset && offset < r->offset + width)
+		uint32_t base;
+
+		base = r->offset;
+		if (r->relative)
+			base += mmio_base;
+
+		if (offset >= base && offset < base + width)
 			return r;
 	}
 
 	return NULL;
 }
 
-static bool ignore_register(uint32_t offset)
+static bool ignore_register(uint32_t offset, uint32_t mmio_base)
 {
 	for (const struct named_register *r = ignore_registers; r->name; r++) {
 		unsigned int width = r->count ? 4*r->count : 4;
-		if (offset >= r->offset && offset < r->offset + width)
+		uint32_t base;
+
+		base = r->offset;
+		if (r->relative)
+			base += mmio_base;
+
+		if (offset >= base && offset < base + width)
 			return true;
 	}
 
@@ -248,6 +238,7 @@ static void tmpl_regs(int fd,
 {
 	const unsigned int gen_bit = 1 << intel_gen(intel_get_drm_devid(fd));
 	const unsigned int engine_bit = ENGINE(e->class, e->instance);
+	const uint32_t mmio_base = gem_engine_mmio_base(fd, e->name);
 	unsigned int regs_size;
 	uint32_t *regs;
 
@@ -259,12 +250,20 @@ static void tmpl_regs(int fd,
 		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
 
 	for (const struct named_register *r = nonpriv_registers; r->name; r++) {
+		uint32_t offset;
+
 		if (!(r->engine_mask & engine_bit))
 			continue;
 		if (!(r->gen_mask & gen_bit))
 			continue;
-		for (unsigned count = r->count ?: 1, offset = r->offset;
-		     count--; offset += 4) {
+		if (r->relative && !mmio_base)
+			continue;
+
+		offset = r->offset;
+		if (r->relative)
+			offset += mmio_base;
+
+		for (unsigned count = r->count ?: 1; count--; offset += 4) {
 			uint32_t x = value;
 			if (r->write_mask)
 				x &= r->write_mask;
@@ -284,6 +283,7 @@ static uint32_t read_regs(int fd,
 	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
 	const unsigned int gen_bit = 1 << gen;
 	const unsigned int engine_bit = ENGINE(e->class, e->instance);
+	const uint32_t mmio_base = gem_engine_mmio_base(fd, e->name);
 	const bool r64b = gen >= 8;
 	struct drm_i915_gem_exec_object2 obj[2];
 	struct drm_i915_gem_relocation_entry *reloc;
@@ -311,13 +311,20 @@ static uint32_t read_regs(int fd,
 
 	n = 0;
 	for (const struct named_register *r = nonpriv_registers; r->name; r++) {
+		uint32_t offset;
+
 		if (!(r->engine_mask & engine_bit))
 			continue;
 		if (!(r->gen_mask & gen_bit))
 			continue;
+		if (r->relative && !mmio_base)
+			continue;
+
+		offset = r->offset;
+		if (r->relative)
+			offset += mmio_base;
 
-		for (unsigned count = r->count ?: 1, offset = r->offset;
-		     count--; offset += 4) {
+		for (unsigned count = r->count ?: 1; count--; offset += 4) {
 			*b++ = 0x24 << 23 | (1 + r64b); /* SRM */
 			*b++ = offset;
 			reloc[n].target_handle = obj[0].handle;
@@ -357,6 +364,7 @@ static void write_regs(int fd,
 {
 	const unsigned int gen_bit = 1 << intel_gen(intel_get_drm_devid(fd));
 	const unsigned int engine_bit = ENGINE(e->class, e->instance);
+	const uint32_t mmio_base = gem_engine_mmio_base(fd, e->name);
 	struct drm_i915_gem_exec_object2 obj;
 	struct drm_i915_gem_execbuffer2 execbuf;
 	unsigned int batch_size;
@@ -372,12 +380,20 @@ static void write_regs(int fd,
 	gem_set_domain(fd, obj.handle,
 		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
 	for (const struct named_register *r = nonpriv_registers; r->name; r++) {
+		uint32_t offset;
+
 		if (!(r->engine_mask & engine_bit))
 			continue;
 		if (!(r->gen_mask & gen_bit))
 			continue;
-		for (unsigned count = r->count ?: 1, offset = r->offset;
-		     count--; offset += 4) {
+		if (r->relative && !mmio_base)
+			continue;
+
+		offset = r->offset;
+		if (r->relative)
+			offset += mmio_base;
+
+		for (unsigned count = r->count ?: 1; count--; offset += 4) {
 			uint32_t x = value;
 			if (r->write_mask)
 				x &= r->write_mask;
@@ -410,6 +426,7 @@ static void restore_regs(int fd,
 	const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
 	const unsigned int gen_bit = 1 << gen;
 	const unsigned int engine_bit = ENGINE(e->class, e->instance);
+	const uint32_t mmio_base = gem_engine_mmio_base(fd, e->name);
 	const bool r64b = gen >= 8;
 	struct drm_i915_gem_exec_object2 obj[2];
 	struct drm_i915_gem_execbuffer2 execbuf;
@@ -437,13 +454,20 @@ static void restore_regs(int fd,
 
 	n = 0;
 	for (const struct named_register *r = nonpriv_registers; r->name; r++) {
+		uint32_t offset;
+
 		if (!(r->engine_mask & engine_bit))
 			continue;
 		if (!(r->gen_mask & gen_bit))
 			continue;
+		if (r->relative && !mmio_base)
+			continue;
+
+		offset = r->offset;
+		if (r->relative)
+			offset += mmio_base;
 
-		for (unsigned count = r->count ?: 1, offset = r->offset;
-		     count--; offset += 4) {
+		for (unsigned count = r->count ?: 1; count--; offset += 4) {
 			*b++ = 0x29 << 23 | (1 + r64b); /* LRM */
 			*b++ = offset;
 			reloc[n].target_handle = obj[0].handle;
@@ -479,6 +503,7 @@ static void dump_regs(int fd,
 	const int gen = intel_gen(intel_get_drm_devid(fd));
 	const unsigned int gen_bit = 1 << gen;
 	const unsigned int engine_bit = ENGINE(e->class, e->instance);
+	const uint32_t mmio_base = gem_engine_mmio_base(fd, e->name);
 	unsigned int regs_size;
 	uint32_t *out;
 
@@ -489,26 +514,36 @@ static void dump_regs(int fd,
 	gem_set_domain(fd, regs, I915_GEM_DOMAIN_CPU, 0);
 
 	for (const struct named_register *r = nonpriv_registers; r->name; r++) {
+		uint32_t offset;
+
 		if (!(r->engine_mask & engine_bit))
 			continue;
 		if (!(r->gen_mask & gen_bit))
 			continue;
+		if (r->relative && !mmio_base)
+			continue;
+
+		offset = r->offset;
+		if (r->relative)
+			offset += mmio_base;
 
 		if (r->count <= 1) {
 			igt_debug("0x%04x (%s): 0x%08x\n",
-				  r->offset, r->name, out[r->offset/4]);
+				  offset, r->name, out[offset / 4]);
 		} else {
 			for (unsigned x = 0; x < r->count; x++)
 				igt_debug("0x%04x (%s[%d]): 0x%08x\n",
-					  r->offset+4*x, r->name, x,
-					  out[r->offset/4 + x]);
+					  offset + 4 * x, r->name, x,
+					  out[offset / 4 + x]);
 		}
 	}
 	munmap(out, regs_size);
 }
 
-static void compare_regs(int fd, uint32_t A, uint32_t B, const char *who)
+static void compare_regs(int fd, const struct intel_execution_engine2 *e,
+			 uint32_t A, uint32_t B, const char *who)
 {
+	const uint32_t mmio_base = gem_engine_mmio_base(fd, e->name);
 	unsigned int num_errors;
 	unsigned int regs_size;
 	uint32_t *a, *b;
@@ -532,11 +567,11 @@ static void compare_regs(int fd, uint32_t A, uint32_t B, const char *who)
 		if (a[n] == b[n])
 			continue;
 
-		if (ignore_register(offset))
+		if (ignore_register(offset, mmio_base))
 			continue;
 
 		mask = ~0u;
-		r = lookup_register(offset);
+		r = lookup_register(offset, mmio_base);
 		if (r && r->masked)
 			mask >>= 16;
 		if (r && r->ignore_bits)
@@ -547,7 +582,7 @@ static void compare_regs(int fd, uint32_t A, uint32_t B, const char *who)
 
 		igt_warn("Register 0x%04x (%s): A=%08x B=%08x\n",
 			 offset,
-			 register_name(offset, buf, sizeof(buf)),
+			 register_name(offset, mmio_base, buf, sizeof(buf)),
 			 a[n] & mask, b[n] & mask);
 		num_errors++;
 	}
@@ -638,7 +673,7 @@ static void nonpriv(int fd,
 
 		igt_spin_free(fd, spin);
 
-		compare_regs(fd, tmpl, regs[1], "nonpriv read/writes");
+		compare_regs(fd, e, tmpl, regs[1], "nonpriv read/writes");
 
 		for (int n = 0; n < ARRAY_SIZE(regs); n++)
 			gem_close(fd, regs[n]);
@@ -708,8 +743,9 @@ static void isolation(int fd,
 		igt_spin_free(fd, spin);
 
 		if (!(flags & DIRTY1))
-			compare_regs(fd, regs[0], tmp, "two reads of the same ctx");
-		compare_regs(fd, regs[0], regs[1], "two virgin contexts");
+			compare_regs(fd, e, regs[0], tmp,
+				     "two reads of the same ctx");
+		compare_regs(fd, e, regs[0], regs[1], "two virgin contexts");
 
 		for (int n = 0; n < ARRAY_SIZE(ctx); n++) {
 			gem_close(fd, regs[n]);
@@ -829,13 +865,13 @@ static void preservation(int fd,
 		char buf[80];
 
 		snprintf(buf, sizeof(buf), "dirty %x context\n", values[v]);
-		compare_regs(fd, regs[v][0], regs[v][1], buf);
+		compare_regs(fd, e, regs[v][0], regs[v][1], buf);
 
 		gem_close(fd, regs[v][0]);
 		gem_close(fd, regs[v][1]);
 		gem_context_destroy(fd, ctx[v]);
 	}
-	compare_regs(fd, regs[num_values][0], regs[num_values][1], "clean");
+	compare_regs(fd, e, regs[num_values][0], regs[num_values][1], "clean");
 	gem_context_destroy(fd, ctx[num_values]);
 }
 
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [Intel-gfx] [PATCH i-g-t 3/5] i915: Exercise preemption timeout controls in sysfs
  2020-01-27 12:18 [Intel-gfx] [PATCH i-g-t 1/5] i915: Start putting the mmio_base to wider use Chris Wilson
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 2/5] i915/gem_ctx_isolation: Check engine relative registers Chris Wilson
@ 2020-01-27 12:18 ` Chris Wilson
  2020-02-12 14:32   ` [Intel-gfx] [igt-dev] " Petri Latvala
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls Chris Wilson
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 5/5] i915: Exercise timeslice sysfs property Chris Wilson
  3 siblings, 1 reply; 11+ messages in thread
From: Chris Wilson @ 2020-01-27 12:18 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

We [will] expose various per-engine scheduling controls. One of which,
'preempt_timeout_ms', defines how we wait for a preemption request to be
honoured by the currently executing context. If it fails to relieve the
GPU within the required timeout, the engine is reset and the miscreant
forcibly evicted.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 lib/i915/gem_context.c             |  41 ++++
 lib/i915/gem_context.h             |   2 +
 lib/i915/gem_engine_topology.c     |  48 +++++
 lib/i915/gem_engine_topology.h     |   3 +
 tests/Makefile.sources             |   3 +
 tests/i915/sysfs_preempt_timeout.c | 309 +++++++++++++++++++++++++++++
 tests/meson.build                  |   1 +
 7 files changed, 407 insertions(+)
 create mode 100644 tests/i915/sysfs_preempt_timeout.c

diff --git a/lib/i915/gem_context.c b/lib/i915/gem_context.c
index 0b6a554df..fc874a187 100644
--- a/lib/i915/gem_context.c
+++ b/lib/i915/gem_context.c
@@ -462,3 +462,44 @@ bool gem_context_has_engine(int fd, uint32_t ctx, uint64_t engine)
 
 	return __gem_execbuf(fd, &execbuf) == -ENOENT;
 }
+
+static int create_ext_ioctl(int i915,
+			    struct drm_i915_gem_context_create_ext *arg)
+{
+	int err;
+
+	err = 0;
+	if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, arg)) {
+		err = -errno;
+		igt_assume(err);
+	}
+
+	errno = 0;
+	return err;
+}
+
+uint32_t gem_context_create_for_engine(int i915, unsigned int class, unsigned int inst)
+{
+	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
+		.engines = { { .engine_class = class, .engine_instance = inst } }
+	};
+	struct drm_i915_gem_context_create_ext_setparam p_engines = {
+		.base = {
+			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+			.next_extension = 0, /* end of chain */
+		},
+		.param = {
+			.param = I915_CONTEXT_PARAM_ENGINES,
+			.value = to_user_pointer(&engines),
+			.size = sizeof(engines),
+		},
+	};
+	struct drm_i915_gem_context_create_ext create = {
+		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
+		.extensions = to_user_pointer(&p_engines),
+	};
+
+	igt_assert_eq(create_ext_ioctl(i915, &create), 0);
+	igt_assert_neq(create.ctx_id, 0);
+	return create.ctx_id;
+}
diff --git a/lib/i915/gem_context.h b/lib/i915/gem_context.h
index cf2ba33fe..ded75bb9c 100644
--- a/lib/i915/gem_context.h
+++ b/lib/i915/gem_context.h
@@ -34,6 +34,8 @@ int __gem_context_create(int fd, uint32_t *ctx_id);
 void gem_context_destroy(int fd, uint32_t ctx_id);
 int __gem_context_destroy(int fd, uint32_t ctx_id);
 
+uint32_t gem_context_create_for_engine(int fd, unsigned int class, unsigned int inst);
+
 int __gem_context_clone(int i915,
 			uint32_t src, unsigned int share,
 			unsigned int flags,
diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
index 058983123..81faf3c15 100644
--- a/lib/i915/gem_engine_topology.c
+++ b/lib/i915/gem_engine_topology.c
@@ -22,6 +22,8 @@
  */
 
 #include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
 #include <unistd.h>
 
 #include "drmtest.h"
@@ -415,3 +417,49 @@ uint32_t gem_engine_mmio_base(int i915, const char *engine)
 
 	return mmio;
 }
+
+void dyn_sysfs_engines(int i915, int engines, const char *file,
+		       void (*test)(int, int))
+{
+	char buf[512];
+	int len;
+
+	lseek(engines, 0, SEEK_SET);
+	while ((len = syscall(SYS_getdents64, engines, buf, sizeof(buf))) > 0) {
+		void *ptr = buf;
+
+		while (len) {
+			struct linux_dirent64 {
+				ino64_t        d_ino;
+				off64_t        d_off;
+				unsigned short d_reclen;
+				unsigned char  d_type;
+				char           d_name[];
+			} *de = ptr;
+			char *name;
+			int engine;
+
+			ptr += de->d_reclen;
+			len -= de->d_reclen;
+
+			engine = openat(engines, de->d_name, O_RDONLY);
+			name = igt_sysfs_get(engine, "name");
+			if (!name) {
+				close(engine);
+				continue;
+			}
+
+			igt_dynamic(name) {
+				if (file) {
+					struct stat st;
+
+					igt_require(fstatat(engine, file, &st, 0) == 0);
+				}
+
+				test(i915, engine);
+			}
+
+			close(engine);
+		}
+	}
+}
diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
index 7a2e21f66..456c806f5 100644
--- a/lib/i915/gem_engine_topology.h
+++ b/lib/i915/gem_engine_topology.h
@@ -77,4 +77,7 @@ int gem_engine_property_scanf(int i915, const char *engine, const char *attr,
 			      const char *fmt, ...);
 uint32_t gem_engine_mmio_base(int i915, const char *engine);
 
+void dyn_sysfs_engines(int i915, int engines, const char *file,
+		       void (*test)(int i915, int engine));
+
 #endif /* GEM_ENGINE_TOPOLOGY_H */
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 7c5693457..fc9e04e97 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -102,6 +102,9 @@ TESTS_progs = \
 	vgem_slow \
 	$(NULL)
 
+TESTS_progs += sysfs_preempt_timeout
+sysfs_preempt_timeout_SOURCES = i915/sysfs_preempt_timeout
+
 TESTS_progs += gem_bad_reloc
 gem_bad_reloc_SOURCES = i915/gem_bad_reloc.c
 
diff --git a/tests/i915/sysfs_preempt_timeout.c b/tests/i915/sysfs_preempt_timeout.c
new file mode 100644
index 000000000..f465c8a71
--- /dev/null
+++ b/tests/i915/sysfs_preempt_timeout.c
@@ -0,0 +1,309 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "drmtest.h" /* gem_quiescent_gpu()! */
+#include "i915/gem_engine_topology.h"
+#include "igt_dummyload.h"
+#include "igt_sysfs.h"
+#include "ioctl_wrappers.h" /* igt_require_gem()! */
+#include "sw_sync.h"
+
+#include "igt_debugfs.h"
+
+static bool __enable_hangcheck(int dir, bool state)
+{
+	return igt_sysfs_set(dir, "enable_hangcheck", state ? "1" : "0");
+}
+
+static bool enable_hangcheck(int i915, bool state)
+{
+	bool success;
+	int dir;
+
+	dir = igt_sysfs_open_parameters(i915);
+	if (dir < 0) /* no parameters, must be default! */
+		return false;
+
+	success = __enable_hangcheck(dir, state);
+	close(dir);
+
+	return success;
+}
+
+static void set_preempt_timeout(int engine, unsigned int value)
+{
+	unsigned int delay;
+
+	igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", value);
+	igt_sysfs_scanf(engine, "preempt_timeout_ms", "%u", &delay);
+	igt_assert_eq(delay, value);
+}
+
+static void test_idempotent(int i915, int engine)
+{
+	unsigned int delays[] = { 0, 1, 1000, 1234, 654321 };
+	unsigned int saved;
+
+	/* Quick test that store/show reports the same values */
+
+	igt_assert(igt_sysfs_scanf(engine, "preempt_timeout_ms", "%u", &saved) == 1);
+	igt_debug("Initial preempt_timeout_ms:%u\n", saved);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++)
+		set_preempt_timeout(engine, delays[i]);
+
+	set_preempt_timeout(engine, saved);
+}
+
+static void test_invalid(int i915, int engine)
+{
+	unsigned int saved, delay;
+
+	/* Quick test that values that are not representable are rejected */
+
+	igt_assert(igt_sysfs_scanf(engine, "preempt_timeout_ms", "%u", &saved) == 1);
+	igt_debug("Initial preempt_timeout_ms:%u\n", saved);
+
+	igt_sysfs_printf(engine, "preempt_timeout_ms", PRIu64, -1);
+	igt_sysfs_scanf(engine, "preempt_timeout_ms", "%u", &delay);
+	igt_assert_eq(delay, saved);
+
+	igt_sysfs_printf(engine, "preempt_timeout_ms", "%d", -1);
+	igt_sysfs_scanf(engine, "preempt_timeout_ms", "%u", &delay);
+	igt_assert_eq(delay, saved);
+
+	igt_sysfs_printf(engine, "preempt_timeout_ms", PRIu64, 40ull << 32);
+	igt_sysfs_scanf(engine, "preempt_timeout_ms", "%u", &delay);
+	igt_assert_eq(delay, saved);
+}
+
+static void set_unbannable(int i915, uint32_t ctx)
+{
+	struct drm_i915_gem_context_param p = {
+		.ctx_id = ctx,
+		.param = I915_CONTEXT_PARAM_BANNABLE,
+	};
+
+	igt_assert_eq(__gem_context_set_param(i915, &p), 0);
+}
+
+static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
+{
+	uint32_t ctx;
+
+	ctx = gem_context_create_for_engine(i915, class, inst);
+	set_unbannable(i915, ctx);
+	gem_context_set_priority(i915, ctx, prio);
+
+	return ctx;
+}
+
+static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
+{
+	unsigned int class, inst;
+	struct timespec ts = {};
+	igt_spin_t *spin[2];
+	uint64_t elapsed;
+	uint32_t ctx[2];
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_preempt_timeout(engine, timeout);
+
+	ctx[0] = create_context(i915, class, inst, -1023);
+	spin[0] = igt_spin_new(i915, ctx[0],
+			       .flags = (IGT_SPIN_NO_PREEMPTION |
+					 IGT_SPIN_POLL_RUN |
+					 IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin[0]);
+
+	ctx[1] = create_context(i915, class, inst, 1023);
+	igt_nsec_elapsed(&ts);
+	spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+	igt_spin_busywait_until_started(spin[1]);
+	elapsed = igt_nsec_elapsed(&ts);
+
+	igt_spin_free(i915, spin[1]);
+
+	igt_assert_eq(sync_fence_wait(spin[0]->out_fence, 1), 0);
+	igt_assert_eq(sync_fence_status(spin[0]->out_fence), -EIO);
+
+	igt_spin_free(i915, spin[0]);
+
+	gem_context_destroy(i915, ctx[1]);
+	gem_context_destroy(i915, ctx[0]);
+	gem_quiescent_gpu(i915);
+
+	return elapsed;
+}
+
+static void test_timeout(int i915, int engine)
+{
+	int delays[] = { 1, 50, 100, 500 };
+	unsigned int saved;
+
+	/*
+	 * Send down some non-preemptable workloads and then request a
+	 * switch to a higher priority context. The HW will not be able to
+	 * respond, so the kernel will be forced to reset the hog. This
+	 * timeout should match our specification, and so we can measure
+	 * the delay from requesting the preemption to its completion.
+	 */
+
+	igt_assert(igt_sysfs_scanf(engine, "preempt_timeout_ms", "%u", &saved) == 1);
+	igt_debug("Initial preempt_timeout_ms:%u\n", saved);
+
+	gem_quiescent_gpu(i915);
+	igt_require(enable_hangcheck(i915, false));
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++) {
+		uint64_t elapsed;
+
+		elapsed = __test_timeout(i915, engine, delays[i]);
+		igt_info("preempt_timeout_ms:%d, elapsed=%.3fms\n",
+			 delays[i], elapsed * 1e-6);
+
+		/*
+		 * We need to give a couple of jiffies slack for the scheduler timeouts
+		 * and then a little more slack fr the overhead in submitting and
+		 * measuring. 50ms should cover all of our sins and be useful
+		 * tolerance.
+		 */
+		igt_assert_f(elapsed / 1000 / 1000 < delays[i] + 50,
+			     "Forced preemption timeout exceeded request!\n");
+	}
+
+	igt_assert(enable_hangcheck(i915, true));
+	gem_quiescent_gpu(i915);
+	set_preempt_timeout(engine, saved);
+}
+
+static void test_off(int i915, int engine)
+{
+	unsigned int class, inst;
+	igt_spin_t *spin[2];
+	unsigned int saved;
+	uint32_t ctx[2];
+
+	/*
+	 * We support setting the timeout to 0 to disable the reset on
+	 * preemption failure. Having established that we can do forced
+	 * preemption on demand, we use the same setup (non-preeemptable hog
+	 * followed by a high priority context) and verify that the hog is
+	 * never reset. Never is a long time, so we settle for 150s.
+	 */
+
+	igt_assert(igt_sysfs_scanf(engine, "preempt_timeout_ms", "%u", &saved) == 1);
+	igt_debug("Initial preempt_timeout_ms:%u\n", saved);
+
+	gem_quiescent_gpu(i915);
+	igt_require(enable_hangcheck(i915, false));
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_preempt_timeout(engine, 0);
+
+	ctx[0] = create_context(i915, class, inst, -1023);
+	spin[0] = igt_spin_new(i915, ctx[0],
+			       .flags = (IGT_SPIN_NO_PREEMPTION |
+					 IGT_SPIN_POLL_RUN |
+					 IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin[0]);
+
+	ctx[1] = create_context(i915, class, inst, 1023);
+	spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+
+	for (int i = 0; i < 150; i++) {
+		igt_assert_eq(sync_fence_status(spin[0]->out_fence), 0);
+		sleep(1);
+	}
+
+	set_preempt_timeout(engine, 1);
+
+	igt_spin_busywait_until_started(spin[1]);
+	igt_spin_free(i915, spin[1]);
+
+	igt_assert_eq(sync_fence_wait(spin[0]->out_fence, 1), 0);
+	igt_assert_eq(sync_fence_status(spin[0]->out_fence), -EIO);
+
+	igt_spin_free(i915, spin[0]);
+
+	gem_context_destroy(i915, ctx[1]);
+	gem_context_destroy(i915, ctx[0]);
+
+	igt_assert(enable_hangcheck(i915, true));
+	gem_quiescent_gpu(i915);
+
+	set_preempt_timeout(engine, saved);
+}
+
+igt_main
+{
+	int i915 = -1, engines = -1;
+
+	igt_fixture {
+		int sys;
+
+		i915 = drm_open_driver(DRIVER_INTEL);
+		igt_require_gem(i915);
+		igt_allow_hang(i915, 0, 0);
+
+		sys = igt_sysfs_open(i915);
+		igt_require(sys != -1);
+
+		engines = openat(sys, "engine", O_RDONLY);
+		igt_require(engines != -1);
+
+		close(sys);
+	}
+
+	igt_subtest_with_dynamic("idempotent")
+		dyn_sysfs_engines(i915, engines, "preempt_timeout_ms",
+				  test_idempotent);
+
+	igt_subtest_with_dynamic("invalid")
+		dyn_sysfs_engines(i915, engines, "preempt_timeout_ms",
+				  test_invalid);
+
+	igt_subtest_with_dynamic("timeout")
+		dyn_sysfs_engines(i915, engines, "preempt_timeout_ms",
+				  test_timeout);
+
+	igt_subtest_with_dynamic("off")
+		dyn_sysfs_engines(i915, engines, "preempt_timeout_ms",
+				  test_off);
+
+	igt_fixture {
+		close(engines);
+		close(i915);
+	}
+}
diff --git a/tests/meson.build b/tests/meson.build
index 62783e2cf..1893f2c34 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -238,6 +238,7 @@ i915_progs = [
 	'i915_query',
 	'i915_selftest',
 	'i915_suspend',
+	'sysfs_preempt_timeout',
 ]
 
 test_deps = [ igt_deps ]
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls
  2020-01-27 12:18 [Intel-gfx] [PATCH i-g-t 1/5] i915: Start putting the mmio_base to wider use Chris Wilson
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 2/5] i915/gem_ctx_isolation: Check engine relative registers Chris Wilson
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 3/5] i915: Exercise preemption timeout controls in sysfs Chris Wilson
@ 2020-01-27 12:18 ` Chris Wilson
  2020-02-12 14:33   ` [Intel-gfx] [igt-dev] " Petri Latvala
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 5/5] i915: Exercise timeslice sysfs property Chris Wilson
  3 siblings, 1 reply; 11+ messages in thread
From: Chris Wilson @ 2020-01-27 12:18 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

We [will] expose various per-engine scheduling controls. One of which,
'heartbeat_duration_ms', defines how often we send a heartbeat down the
engine to check upon the health of the engine. If a heartbeat does not
complete within the interval (or two), the engine is declared hung.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/Makefile.sources                |   3 +
 tests/i915/sysfs_heartbeat_interval.c | 466 ++++++++++++++++++++++++++
 tests/meson.build                     |   1 +
 3 files changed, 470 insertions(+)
 create mode 100644 tests/i915/sysfs_heartbeat_interval.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index fc9e04e97..fd6f67a73 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -102,6 +102,9 @@ TESTS_progs = \
 	vgem_slow \
 	$(NULL)
 
+TESTS_progs += sysfs_heartbeat_interval
+sysfs_heartbeat_interval_SOURCES = i915/sysfs_heartbeat_interval
+
 TESTS_progs += sysfs_preempt_timeout
 sysfs_preempt_timeout_SOURCES = i915/sysfs_preempt_timeout
 
diff --git a/tests/i915/sysfs_heartbeat_interval.c b/tests/i915/sysfs_heartbeat_interval.c
new file mode 100644
index 000000000..9ce7b7b85
--- /dev/null
+++ b/tests/i915/sysfs_heartbeat_interval.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "drmtest.h" /* gem_quiescent_gpu()! */
+#include "i915/gem_engine_topology.h"
+#include "igt_dummyload.h"
+#include "igt_sysfs.h"
+#include "ioctl_wrappers.h" /* igt_require_gem()! */
+#include "sw_sync.h"
+
+#include "igt_debugfs.h"
+
+static bool __enable_hangcheck(int dir, bool state)
+{
+	return igt_sysfs_set(dir, "enable_hangcheck", state ? "1" : "0");
+}
+
+static void enable_hangcheck(int i915, bool state)
+{
+	int dir;
+
+	dir = igt_sysfs_open_parameters(i915);
+	if (dir < 0) /* no parameters, must be default! */
+		return;
+
+	__enable_hangcheck(dir, state);
+	close(dir);
+}
+
+static void set_heartbeat(int engine, unsigned int value)
+{
+	unsigned int delay = ~value;
+
+	igt_sysfs_printf(engine, "heartbeat_interval_ms", "%u", value);
+	igt_sysfs_scanf(engine, "heartbeat_interval_ms", "%u", &delay);
+	igt_assert_eq(delay, value);
+}
+
+static void test_idempotent(int i915, int engine)
+{
+	unsigned int delays[] = { 1, 1000, 5000, 50000, 123456789 };
+	unsigned int saved;
+
+	/* Quick test that the property reports the values we set */
+
+	igt_assert(igt_sysfs_scanf(engine, "heartbeat_interval_ms", "%u", &saved) == 1);
+	igt_debug("Initial heartbeat_interval_ms:%u\n", saved);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++)
+		set_heartbeat(engine, delays[i]);
+
+	set_heartbeat(engine, saved);
+}
+
+static void test_invalid(int i915, int engine)
+{
+	unsigned int saved, delay;
+
+	/* Quick test that we reject any unrepresentable intervals */
+
+	igt_assert(igt_sysfs_scanf(engine, "heartbeat_interval_ms", "%u", &saved) == 1);
+	igt_debug("Initial heartbeat_interval_ms:%u\n", saved);
+
+	igt_sysfs_printf(engine, "heartbeat_interval_ms", PRIu64, -1);
+	igt_sysfs_scanf(engine, "heartbeat_interval_ms", "%u", &delay);
+	igt_assert_eq(delay, saved);
+
+	igt_sysfs_printf(engine, "heartbeat_interval_ms", PRIu64, 10ull << 32);
+	igt_sysfs_scanf(engine, "heartbeat_interval_ms", "%u", &delay);
+	igt_assert_eq(delay, saved);
+}
+
+static void set_unbannable(int i915, uint32_t ctx)
+{
+	struct drm_i915_gem_context_param p = {
+		.ctx_id = ctx,
+		.param = I915_CONTEXT_PARAM_BANNABLE,
+	};
+
+	igt_assert_eq(__gem_context_set_param(i915, &p), 0);
+}
+
+static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
+{
+	uint32_t ctx;
+
+	ctx = gem_context_create_for_engine(i915, class, inst);
+	set_unbannable(i915, ctx);
+	gem_context_set_priority(i915, ctx, prio);
+
+	return ctx;
+}
+
+static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
+{
+	unsigned int class, inst;
+	struct timespec ts = {};
+	igt_spin_t *spin[2];
+	uint64_t elapsed;
+	uint32_t ctx[2];
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_heartbeat(engine, timeout);
+
+	ctx[0] = create_context(i915, class, inst, 1023);
+	spin[0] = igt_spin_new(i915, ctx[0],
+			       .flags = (IGT_SPIN_NO_PREEMPTION |
+					 IGT_SPIN_POLL_RUN |
+					 IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin[0]);
+
+	ctx[1] = create_context(i915, class, inst, -1023);
+	igt_nsec_elapsed(&ts);
+	spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+	igt_spin_busywait_until_started(spin[1]);
+	elapsed = igt_nsec_elapsed(&ts);
+
+	igt_spin_free(i915, spin[1]);
+
+	igt_assert_eq(sync_fence_wait(spin[0]->out_fence, 1), 0);
+	igt_assert_eq(sync_fence_status(spin[0]->out_fence), -EIO);
+
+	igt_spin_free(i915, spin[0]);
+
+	gem_context_destroy(i915, ctx[1]);
+	gem_context_destroy(i915, ctx[0]);
+	gem_quiescent_gpu(i915);
+
+	return elapsed;
+}
+
+static void test_precise(int i915, int engine)
+{
+	int delays[] = { 1, 50, 100, 500 };
+	unsigned int saved;
+
+	/*
+	 * The heartbeat interval defines how long the kernel waits between
+	 * checking on the status of the engines. It first sends down a
+	 * heartbeat pulse, waits the interval and sees if the system managed
+	 * to complete the pulse. If not, it gives a priority bump to the pulse
+	 * and waits again. This is repeated until the priority cannot be bumped
+	 * any more, and the system is declared hung.
+	 *
+	 * If we combine the preemptive pulse with forced preemption, we instead
+	 * get a much faster hang detection. Thus in combination we can measure
+	 * the system response time to reseting a hog as a measure of the
+	 * heartbeat interval, and so confirm it matches our specification.
+	 */
+
+	igt_require(igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 1) == 1);
+
+	igt_assert(igt_sysfs_scanf(engine, "heartbeat_interval_ms", "%u", &saved) == 1);
+	igt_debug("Initial heartbeat_interval_ms:%u\n", saved);
+	gem_quiescent_gpu(i915);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++) {
+		uint64_t elapsed;
+
+		elapsed = __test_timeout(i915, engine, delays[i]);
+		igt_info("heartbeat_interval_ms:%d, elapsed=%.3fms[%d]\n",
+			 delays[i], elapsed * 1e-6,
+				(int)(elapsed / 1000 / 1000)
+			 );
+
+		/*
+		 * It takes a couple of missed heartbeats before we start
+		 * terminating hogs, and a little bit of jiffie slack for
+		 * scheduling at each step. 150ms should cover all of our
+		 * sins and be useful tolerance.
+		 */
+		igt_assert_f(elapsed / 1000 / 1000 < 3 * delays[i] + 150,
+			     "Heartbeat interval (and CPR) exceeded request!\n");
+	}
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+static void test_nopreempt(int i915, int engine)
+{
+	int delays[] = { 1, 50, 100, 500 };
+	unsigned int saved;
+
+	/*
+	 * The same principle as test_precise(), except that forced preemption
+	 * is disabled (or simply not supported by the platform). This time,
+	 * it waits until the system misses a few heartbeat before doing a
+	 * per-engine/full-gpu reset. As such it is less precise, but we
+	 * can still estimate an upper bound for our specified heartbeat
+	 * interval and verify the system conforms.
+	 */
+
+	/* Test heartbeats with forced preemption  disabled */
+	igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 0);
+
+	igt_assert(igt_sysfs_scanf(engine, "heartbeat_interval_ms", "%u", &saved) == 1);
+	igt_debug("Initial heartbeat_interval_ms:%u\n", saved);
+	gem_quiescent_gpu(i915);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++) {
+		uint64_t elapsed;
+
+		elapsed = __test_timeout(i915, engine, delays[i]);
+		igt_info("heartbeat_interval_ms:%d, elapsed=%.3fms[%d]\n",
+			 delays[i], elapsed * 1e-6,
+				(int)(elapsed / 1000 / 1000)
+			 );
+
+		/*
+		 * It takes a several missed heartbeats before we start
+		 * terminating hogs, and a little bit of jiffie slack for
+		 * scheduling at each step. 250ms should cover all of our
+		 * sins and be useful tolerance.
+		 */
+		igt_assert_f(elapsed / 1000 / 1000 < 5 * delays[i] + 150,
+			     "Heartbeat interval (and CPR) exceeded request!\n");
+	}
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+static void client(int i915, int engine, int *ctl, int duration, int expect)
+{
+	unsigned int class, inst;
+	unsigned long count = 0;
+	uint32_t ctx;
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	ctx = create_context(i915, class, inst, 0);
+
+	while (!READ_ONCE(*ctl)) {
+		igt_spin_t *spin;
+
+		spin = igt_spin_new(i915, ctx,
+				    .flags = (IGT_SPIN_NO_PREEMPTION |
+					      IGT_SPIN_POLL_RUN |
+					      IGT_SPIN_FENCE_OUT));
+		igt_spin_busywait_until_started(spin);
+
+		igt_spin_set_timeout(spin, (uint64_t)duration * 1000 * 1000);
+		sync_fence_wait(spin->out_fence, -1);
+
+		igt_assert_eq(sync_fence_status(spin->out_fence), expect);
+		count++;
+	}
+
+	gem_context_destroy(i915, ctx);
+	igt_info("%s client completed %lu spins\n",
+		 expect < 0 ? "Bad" : "Good", count);
+}
+
+static void sigign(int sig)
+{
+}
+
+static void wait_until(int duration)
+{
+	signal(SIGCHLD, sigign);
+	sleep(duration);
+	signal(SIGCHLD, SIG_IGN);
+}
+
+static void __test_mixed(int i915, int engine,
+			 int heartbeat,
+			 int good,
+			 int bad,
+			 int duration)
+{
+	unsigned int saved;
+	int *shared;
+
+	/*
+	 * Given two clients of which one is a hog, be sure we cleanly
+	 * terminate the hog leaving the good client to run.
+	 */
+
+	igt_assert(igt_sysfs_scanf(engine, "heartbeat_interval_ms", "%u", &saved) == 1);
+	igt_debug("Initial heartbeat_interval_ms:%u\n", saved);
+	gem_quiescent_gpu(i915);
+
+	shared = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+	igt_assert(shared != MAP_FAILED);
+
+	set_heartbeat(engine, heartbeat);
+
+	igt_fork(child, 1) /* good client */
+		client(i915, engine, shared, good, 1);
+	igt_fork(child, 1) /* bad client */
+		client(i915, engine, shared, bad, -EIO);
+
+	wait_until(duration);
+
+	*shared = true;
+	igt_waitchildren();
+	munmap(shared, 4096);
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+static void test_mixed(int i915, int engine)
+{
+	/*
+	 * Hogs rarely run alone. Our hang detection must carefully wean
+	 * out the hogs from the innocent clients. Thus we run a mixed workload
+	 * with non-preemptable hogs that exceed the heartbeat, and quicker
+	 * innocents. We inspect the fence status of each to verify that
+	 * only the hogs are reset.
+	 */
+	igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 1);
+	__test_mixed(i915, engine, 10, 10, 100, 5);
+}
+
+static void test_long(int i915, int engine)
+{
+	/*
+	 * Some clients relish being hogs, and demand that the system
+	 * never do hangchecking. Never is hard to test, so instead we
+	 * run over a day and verify that only the super hogs are reset.
+	 */
+	igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 0);
+	__test_mixed(i915, engine,
+		     60 * 1000, /* 60s */
+		     60 * 1000, /* 60s */
+		     300 * 1000, /* 5min */
+		     24 * 3600 /* 24hours */);
+}
+
+static void test_off(int i915, int engine)
+{
+	unsigned int class, inst;
+	unsigned int saved;
+	igt_spin_t *spin;
+	uint32_t ctx;
+
+	/*
+	 * Some other clients request that there is never any interruption
+	 * or jitter in their workload and so demand that the kernel never
+	 * sends a heartbeat to steal precious cycles from their workload.
+	 * Turn off the heartbeat and check that the workload is uninterrupted
+	 * for 150s.
+	 */
+
+	igt_assert(igt_sysfs_scanf(engine, "heartbeat_interval_ms", "%u", &saved) == 1);
+	igt_debug("Initial heartbeat_interval_ms:%u\n", saved);
+	gem_quiescent_gpu(i915);
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_heartbeat(engine, 0);
+
+	ctx = create_context(i915, class, inst, 0);
+
+	spin = igt_spin_new(i915, ctx,
+			    .flags = (IGT_SPIN_POLL_RUN |
+				      IGT_SPIN_NO_PREEMPTION |
+				      IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin);
+
+	for (int i = 0; i < 150; i++) {
+		igt_assert_eq(sync_fence_status(spin->out_fence), 0);
+		sleep(1);
+	}
+
+	set_heartbeat(engine, 1);
+
+	igt_assert_eq(sync_fence_wait(spin->out_fence, 250), 0);
+	igt_assert_eq(sync_fence_status(spin->out_fence), -EIO);
+
+	igt_spin_free(i915, spin);
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+igt_main
+{
+	int i915 = -1, engines = -1;
+
+	igt_fixture {
+		int sys;
+
+		i915 = drm_open_driver(DRIVER_INTEL);
+		igt_require_gem(i915);
+		igt_allow_hang(i915, 0, 0);
+
+		sys = igt_sysfs_open(i915);
+		igt_require(sys != -1);
+
+		engines = openat(sys, "engine", O_RDONLY);
+		igt_require(engines != -1);
+		close(sys);
+
+		enable_hangcheck(i915, true);
+	}
+
+	igt_subtest_with_dynamic("idempotent")
+		dyn_sysfs_engines(i915, engines, "heartbeat_interval_ms",
+				  test_idempotent);
+
+	igt_subtest_with_dynamic("invalid")
+		dyn_sysfs_engines(i915, engines, "heartbeat_interval_ms",
+				  test_invalid);
+
+	igt_subtest_with_dynamic("precise")
+		dyn_sysfs_engines(i915, engines, "heartbeat_interval_ms",
+				  test_precise);
+
+	igt_subtest_with_dynamic("nopreempt")
+		dyn_sysfs_engines(i915, engines, "heartbeat_interval_ms",
+				  test_nopreempt);
+
+	igt_subtest_with_dynamic("mixed")
+		dyn_sysfs_engines(i915, engines, "heartbeat_interval_ms",
+				  test_mixed);
+
+	igt_subtest_with_dynamic("off")
+		dyn_sysfs_engines(i915, engines, "heartbeat_interval_ms",
+				  test_off);
+
+	igt_subtest_with_dynamic("long")
+		dyn_sysfs_engines(i915, engines, "heartbeat_interval_ms",
+				  test_long);
+
+	igt_fixture {
+		close(engines);
+		close(i915);
+	}
+}
diff --git a/tests/meson.build b/tests/meson.build
index 1893f2c34..8795dfe1b 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -238,6 +238,7 @@ i915_progs = [
 	'i915_query',
 	'i915_selftest',
 	'i915_suspend',
+	'sysfs_heartbeat_interval',
 	'sysfs_preempt_timeout',
 ]
 
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [Intel-gfx] [PATCH i-g-t 5/5] i915: Exercise timeslice sysfs property
  2020-01-27 12:18 [Intel-gfx] [PATCH i-g-t 1/5] i915: Start putting the mmio_base to wider use Chris Wilson
                   ` (2 preceding siblings ...)
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls Chris Wilson
@ 2020-01-27 12:18 ` Chris Wilson
  3 siblings, 0 replies; 11+ messages in thread
From: Chris Wilson @ 2020-01-27 12:18 UTC (permalink / raw)
  To: igt-dev; +Cc: intel-gfx

We [will] expose various per-engine scheduling controls. One of which,
'timeslice_duration_ms', defines the scheduling quantum. If a context
exhausts its timeslice, it will be preempted in favour of running one of
its compatriots.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/Makefile.sources                |   3 +
 tests/i915/sysfs_timeslice_duration.c | 503 ++++++++++++++++++++++++++
 tests/meson.build                     |   1 +
 3 files changed, 507 insertions(+)
 create mode 100644 tests/i915/sysfs_timeslice_duration.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index fd6f67a73..41682040d 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -108,6 +108,9 @@ sysfs_heartbeat_interval_SOURCES = i915/sysfs_heartbeat_interval
 TESTS_progs += sysfs_preempt_timeout
 sysfs_preempt_timeout_SOURCES = i915/sysfs_preempt_timeout
 
+TESTS_progs += sysfs_timeslice_duration
+sysfs_timeslice_duration_SOURCES = i915/sysfs_timeslice_duration
+
 TESTS_progs += gem_bad_reloc
 gem_bad_reloc_SOURCES = i915/gem_bad_reloc.c
 
diff --git a/tests/i915/sysfs_timeslice_duration.c b/tests/i915/sysfs_timeslice_duration.c
new file mode 100644
index 000000000..42fff43e2
--- /dev/null
+++ b/tests/i915/sysfs_timeslice_duration.c
@@ -0,0 +1,503 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "drmtest.h" /* gem_quiescent_gpu()! */
+#include "i915/gem_engine_topology.h"
+#include "i915/gem_mman.h"
+#include "igt_dummyload.h"
+#include "igt_sysfs.h"
+#include "ioctl_wrappers.h" /* igt_require_gem()! */
+#include "intel_chipset.h"
+#include "intel_reg.h"
+#include "sw_sync.h"
+
+#define MI_SEMAPHORE_WAIT		(0x1c << 23)
+#define   MI_SEMAPHORE_POLL             (1 << 15)
+#define   MI_SEMAPHORE_SAD_GT_SDD       (0 << 12)
+#define   MI_SEMAPHORE_SAD_GTE_SDD      (1 << 12)
+#define   MI_SEMAPHORE_SAD_LT_SDD       (2 << 12)
+#define   MI_SEMAPHORE_SAD_LTE_SDD      (3 << 12)
+#define   MI_SEMAPHORE_SAD_EQ_SDD       (4 << 12)
+#define   MI_SEMAPHORE_SAD_NEQ_SDD      (5 << 12)
+
+static bool __enable_hangcheck(int dir, bool state)
+{
+	return igt_sysfs_set(dir, "enable_hangcheck", state ? "1" : "0");
+}
+
+static bool enable_hangcheck(int i915, bool state)
+{
+	bool success;
+	int dir;
+
+	dir = igt_sysfs_open_parameters(i915);
+	if (dir < 0) /* no parameters, must be default! */
+		return false;
+
+	success = __enable_hangcheck(dir, state);
+	close(dir);
+
+	return success;
+}
+
+static void set_timeslice(int engine, unsigned int value)
+{
+	unsigned int delay;
+
+	igt_sysfs_printf(engine, "timeslice_duration_ms", "%u", value);
+	igt_sysfs_scanf(engine, "timeslice_duration_ms", "%u", &delay);
+	igt_assert_eq(delay, value);
+}
+
+static void test_idempotent(int i915, int engine)
+{
+	const unsigned int delays[] = { 0, 1, 1234, 654321 };
+	unsigned int saved;
+
+	/* Quick test to verify the kernel reports the same values as we write */
+
+	igt_assert(igt_sysfs_scanf(engine, "timeslice_duration_ms", "%u", &saved) == 1);
+	igt_debug("Initial timeslice_duration_ms:%u\n", saved);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++)
+		set_timeslice(engine, delays[i]);
+
+	set_timeslice(engine, saved);
+}
+
+static void test_invalid(int i915, int engine)
+{
+	unsigned int saved, delay;
+
+	/* Quick test that non-representable delays are rejected */
+
+	igt_assert(igt_sysfs_scanf(engine, "timeslice_duration_ms", "%u", &saved) == 1);
+	igt_debug("Initial timeslice_duration_ms:%u\n", saved);
+
+	igt_sysfs_printf(engine, "timeslice_duration_ms", PRIu64, -1);
+	igt_sysfs_scanf(engine, "timeslice_duration_ms", "%u", &delay);
+	igt_assert_eq(delay, saved);
+
+	igt_sysfs_printf(engine, "timeslice_duration_ms", "%d", -1);
+	igt_sysfs_scanf(engine, "timeslice_duration_ms", "%u", &delay);
+	igt_assert_eq(delay, saved);
+
+	igt_sysfs_printf(engine, "timeslice_duration_ms", PRIu64, 123ull << 32);
+	igt_sysfs_scanf(engine, "timeslice_duration_ms", "%u", &delay);
+	igt_assert_eq(delay, saved);
+}
+
+static void set_unbannable(int i915, uint32_t ctx)
+{
+	struct drm_i915_gem_context_param p = {
+		.ctx_id = ctx,
+		.param = I915_CONTEXT_PARAM_BANNABLE,
+	};
+
+	igt_assert_eq(__gem_context_set_param(i915, &p), 0);
+}
+
+static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
+{
+	uint32_t ctx;
+
+	ctx = gem_context_create_for_engine(i915, class, inst);
+	set_unbannable(i915, ctx);
+	gem_context_set_priority(i915, ctx, prio);
+
+	return ctx;
+}
+
+static int cmp_u32(const void *_a, const void *_b)
+{
+	const uint32_t *a = _a, *b = _b;
+
+	return *a - *b;
+}
+
+static double clockrate(int i915)
+{
+	int freq;
+	drm_i915_getparam_t gp = {
+		.value = &freq,
+		.param = I915_PARAM_CS_TIMESTAMP_FREQUENCY,
+	};
+
+	igt_require(igt_ioctl(i915, DRM_IOCTL_I915_GETPARAM, &gp) == 0);
+	return 1e9 / freq;
+}
+
+static uint64_t __test_duration(int i915, int engine, unsigned int timeout)
+{
+	struct drm_i915_gem_exec_object2 obj[3] = {
+		{
+			.handle = gem_create(i915, 4096),
+			.offset = 0,
+			.flags = EXEC_OBJECT_PINNED,
+		},
+		{
+			.handle = gem_create(i915, 4096),
+			.offset = 4096,
+			.flags = EXEC_OBJECT_PINNED,
+		},
+		{ gem_create(i915, 4096) }
+	};
+	struct drm_i915_gem_execbuffer2 eb = {
+		.buffer_count = ARRAY_SIZE(obj),
+		.buffers_ptr = to_user_pointer(obj),
+	};
+	const int gen = intel_gen(intel_get_drm_devid(i915));
+	double duration = clockrate(i915);
+	unsigned int class, inst, mmio;
+	uint32_t *cs, *map;
+	uint32_t ctx[2];
+	int start;
+	int i;
+
+	igt_require(gem_scheduler_has_preemption(i915));
+	igt_require(gen >= 8); /* MI_SEMAPHORE_WAIT */
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+	igt_require(igt_sysfs_scanf(engine, "mmio_base", "%x", &mmio) == 1);
+
+	set_timeslice(engine, timeout);
+
+	ctx[0] = create_context(i915, class, inst, 0);
+	ctx[1] = create_context(i915, class, inst, 0);
+
+	map = gem_mmap__cpu(i915, obj[2].handle, 0, 4096, PROT_WRITE);
+
+	cs = map;
+	for (i = 0; i < 10; i++) {
+		*cs++ = MI_SEMAPHORE_WAIT |
+			MI_SEMAPHORE_POLL |
+			MI_SEMAPHORE_SAD_NEQ_SDD |
+			(4 - 2 + (gen >= 12));
+		*cs++ = 0;
+		*cs++ = obj[0].offset + sizeof(uint32_t) * i;
+		*cs++ = 0;
+		if (gen >= 12)
+			*cs++ = 0;
+
+		*cs++ = 0x24 << 23 | 2; /* SRM */
+		*cs++ = mmio + 0x358;
+		*cs++ = obj[1].offset + sizeof(uint32_t) * i;
+		*cs++ = 0;
+
+		*cs++ = MI_STORE_DWORD_IMM;
+		*cs++ = obj[0].offset +
+			4096 - sizeof(uint32_t) * i - sizeof(uint32_t);
+		*cs++ = 0;
+		*cs++ = 1;
+	}
+	*cs++ = MI_BATCH_BUFFER_END;
+
+	cs += 16 - ((cs - map) & 15);
+	start = (cs - map) * sizeof(*cs);
+	for (i = 0; i < 10; i++) {
+		*cs++ = MI_STORE_DWORD_IMM;
+		*cs++ = obj[0].offset + sizeof(uint32_t) * i;
+		*cs++ = 0;
+		*cs++ = 1;
+
+		*cs++ = MI_SEMAPHORE_WAIT |
+			MI_SEMAPHORE_POLL |
+			MI_SEMAPHORE_SAD_NEQ_SDD |
+			(4 - 2 + (gen >= 12));
+		*cs++ = 0;
+		*cs++ = obj[0].offset +
+			4096 - sizeof(uint32_t) * i - sizeof(uint32_t);
+		*cs++ = 0;
+		if (gen >= 12)
+			*cs++ = 0;
+	}
+	*cs++ = MI_BATCH_BUFFER_END;
+	igt_assert(cs - map < 4096 / sizeof(*cs));
+	munmap(map, 4096);
+
+	eb.rsvd1 = ctx[0];
+	gem_execbuf(i915, &eb);
+
+	eb.rsvd1 = ctx[1];
+	eb.batch_start_offset = start;
+	gem_execbuf(i915, &eb);
+
+	gem_sync(i915, obj[2].handle);
+
+	gem_set_domain(i915, obj[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	map = gem_mmap__cpu(i915, obj[1].handle, 0, 4096, PROT_WRITE);
+	for (i = 0; i < 9; i++)
+		map[i] = map[i + 1] - map[i];
+	qsort(map, 9, sizeof(*map), cmp_u32);
+	duration *= map[4] / 2; /* 2 sema-waits between timestamp updates */
+	munmap(map, 4096);
+
+	for (i = 0; i < ARRAY_SIZE(ctx); i++)
+		gem_context_destroy(i915, ctx[i]);
+
+	for (i = 0; i < ARRAY_SIZE(obj); i++)
+		gem_close(i915, obj[i].handle);
+
+	return duration;
+}
+
+static void test_duration(int i915, int engine)
+{
+	int delays[] = { 1, 50, 100, 500 };
+	unsigned int saved;
+
+	/*
+	 * Timeslicing at its very basic level is sharing the GPU by
+	 * running one context for interval before running another. After
+	 * each interval the running context is swapped for another runnable
+	 * context.
+	 *
+	 * We can measure this directly by watching the xCS_TIMESTAMP and
+	 * recording its value every time we switch into the context, using
+	 * a couple of semaphores to busyspin for the timeslice.
+	 */
+
+	igt_assert(igt_sysfs_scanf(engine, "timeslice_duration_ms", "%u", &saved) == 1);
+	igt_debug("Initial timeslice_duration_ms:%u\n", saved);
+
+	gem_quiescent_gpu(i915);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++) {
+		uint64_t elapsed;
+
+		elapsed = __test_duration(i915, engine, delays[i]);
+		igt_info("timeslice_duration_ms:%d, elapsed=%.3fms\n",
+			 delays[i], elapsed * 1e-6);
+
+		/*
+		 * We need to give a couple of jiffies slack for the scheduler timeouts
+		 * and then a little more slack fr the overhead in submitting and
+		 * measuring. 50ms should cover all of our sins and be useful
+		 * tolerance.
+		 */
+		igt_assert_f(elapsed / 1000 / 1000 < delays[i] + 50,
+			     "Timeslice exceeded request!\n");
+	}
+
+	gem_quiescent_gpu(i915);
+	set_timeslice(engine, saved);
+}
+
+static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
+{
+	unsigned int class, inst;
+	struct timespec ts = {};
+	igt_spin_t *spin[2];
+	uint64_t elapsed;
+	uint32_t ctx[2];
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_timeslice(engine, timeout);
+
+	ctx[0] = create_context(i915, class, inst, 0);
+	spin[0] = igt_spin_new(i915, ctx[0],
+			       .flags = (IGT_SPIN_NO_PREEMPTION |
+					 IGT_SPIN_POLL_RUN |
+					 IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin[0]);
+
+	ctx[1] = create_context(i915, class, inst, 0);
+	igt_nsec_elapsed(&ts);
+	spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+	igt_spin_busywait_until_started(spin[1]);
+	elapsed = igt_nsec_elapsed(&ts);
+
+	igt_spin_free(i915, spin[1]);
+
+	igt_assert_eq(sync_fence_wait(spin[0]->out_fence, 1), 0);
+	igt_assert_eq(sync_fence_status(spin[0]->out_fence), -EIO);
+
+	igt_spin_free(i915, spin[0]);
+
+	gem_context_destroy(i915, ctx[1]);
+	gem_context_destroy(i915, ctx[0]);
+	gem_quiescent_gpu(i915);
+
+	return elapsed;
+}
+
+static void test_timeout(int i915, int engine)
+{
+	int delays[] = { 1, 50, 100, 500 };
+	unsigned int saved;
+
+	/*
+	 * Timeslicing requires us to preempt the running context in order to
+	 * switch into its contemporary. If we couple a unpreemptable hog
+	 * with a fast forced reset, we can measure the timeslice by how long
+	 * it takes for the hog to be reset and the high priority context
+	 * to complete.
+	 */
+
+	igt_require(igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 1) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "timeslice_duration_ms", "%u", &saved) == 1);
+	igt_debug("Initial timeslice_duration_ms:%u\n", saved);
+
+	gem_quiescent_gpu(i915);
+	igt_require(enable_hangcheck(i915, false));
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++) {
+		uint64_t elapsed;
+
+		elapsed = __test_timeout(i915, engine, delays[i]);
+		igt_info("timeslice_duration_ms:%d, elapsed=%.3fms\n",
+			 delays[i], elapsed * 1e-6);
+
+		/*
+		 * We need to give a couple of jiffies slack for the scheduler timeouts
+		 * and then a little more slack fr the overhead in submitting and
+		 * measuring. 50ms should cover all of our sins and be useful
+		 * tolerance.
+		 */
+		igt_assert_f(elapsed / 1000 / 1000 < delays[i] + 50,
+			     "Timeslice exceeded request!\n");
+	}
+
+	igt_assert(enable_hangcheck(i915, true));
+	gem_quiescent_gpu(i915);
+	set_timeslice(engine, saved);
+}
+
+static void test_off(int i915, int engine)
+{
+	unsigned int class, inst;
+	unsigned int saved;
+	igt_spin_t *spin[2];
+	uint32_t ctx[2];
+
+	/*
+	 * As always, there are some who must run uninterrupted and simply do
+	 * not want to share the GPU even for a microsecond. Those greedy
+	 * clients can disable timeslicing entirely, and so set the timeslice
+	 * to 0. We test that a hog is not preempted within the 150s of
+	 * our boredom threshold.
+	 */
+
+	igt_require(igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 1) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "timeslice_duration_ms", "%u", &saved) == 1);
+	igt_debug("Initial timeslice_duration_ms:%u\n", saved);
+
+	gem_quiescent_gpu(i915);
+	igt_require(enable_hangcheck(i915, false));
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_timeslice(engine, 0);
+
+	ctx[0] = create_context(i915, class, inst, 0);
+	spin[0] = igt_spin_new(i915, ctx[0],
+			       .flags = (IGT_SPIN_NO_PREEMPTION |
+					 IGT_SPIN_POLL_RUN |
+					 IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin[0]);
+
+	ctx[1] = create_context(i915, class, inst, 0);
+	spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+
+	for (int i = 0; i < 150; i++) {
+		igt_assert_eq(sync_fence_status(spin[0]->out_fence), 0);
+		sleep(1);
+	}
+
+	set_timeslice(engine, 1);
+
+	igt_spin_busywait_until_started(spin[1]);
+	igt_spin_free(i915, spin[1]);
+
+	igt_assert_eq(sync_fence_wait(spin[0]->out_fence, 1), 0);
+	igt_assert_eq(sync_fence_status(spin[0]->out_fence), -EIO);
+
+	igt_spin_free(i915, spin[0]);
+
+	gem_context_destroy(i915, ctx[1]);
+	gem_context_destroy(i915, ctx[0]);
+
+	igt_assert(enable_hangcheck(i915, true));
+	gem_quiescent_gpu(i915);
+
+	set_timeslice(engine, saved);
+}
+
+igt_main
+{
+	int i915 = -1, engines = -1;
+
+	igt_fixture {
+		int sys;
+
+		i915 = drm_open_driver(DRIVER_INTEL);
+		igt_require_gem(i915);
+		igt_allow_hang(i915, 0, 0);
+
+		sys = igt_sysfs_open(i915);
+		igt_require(sys != -1);
+
+		engines = openat(sys, "engine", O_RDONLY);
+		igt_require(engines != -1);
+
+		close(sys);
+	}
+
+	igt_subtest_with_dynamic("idempotent")
+		dyn_sysfs_engines(i915, engines, "timeslice_duration_ms",
+				  test_idempotent);
+
+	igt_subtest_with_dynamic("invalid")
+		dyn_sysfs_engines(i915, engines, "timeslice_duration_ms",
+				  test_invalid);
+
+	igt_subtest_with_dynamic("duration")
+		dyn_sysfs_engines(i915, engines, "timeslice_duration_ms",
+				  test_duration);
+
+	igt_subtest_with_dynamic("timeout")
+		dyn_sysfs_engines(i915, engines, "timeslice_duration_ms",
+				  test_timeout);
+
+	igt_subtest_with_dynamic("off")
+		dyn_sysfs_engines(i915, engines, "timeslice_duration_ms",
+				  test_off);
+
+	igt_fixture {
+		close(engines);
+		close(i915);
+	}
+}
diff --git a/tests/meson.build b/tests/meson.build
index 8795dfe1b..cd3f25e48 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -240,6 +240,7 @@ i915_progs = [
 	'i915_suspend',
 	'sysfs_heartbeat_interval',
 	'sysfs_preempt_timeout',
+	'sysfs_timeslice_duration',
 ]
 
 test_deps = [ igt_deps ]
-- 
2.25.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [Intel-gfx] [igt-dev] [PATCH i-g-t 3/5] i915: Exercise preemption timeout controls in sysfs
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 3/5] i915: Exercise preemption timeout controls in sysfs Chris Wilson
@ 2020-02-12 14:32   ` Petri Latvala
  0 siblings, 0 replies; 11+ messages in thread
From: Petri Latvala @ 2020-02-12 14:32 UTC (permalink / raw)
  To: Chris Wilson; +Cc: igt-dev, intel-gfx

On Mon, Jan 27, 2020 at 12:18:16PM +0000, Chris Wilson wrote:
> We [will] expose various per-engine scheduling controls. One of which,
> 'preempt_timeout_ms', defines how we wait for a preemption request to be
> honoured by the currently executing context. If it fails to relieve the
> GPU within the required timeout, the engine is reset and the miscreant
> forcibly evicted.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  lib/i915/gem_context.c             |  41 ++++
>  lib/i915/gem_context.h             |   2 +
>  lib/i915/gem_engine_topology.c     |  48 +++++
>  lib/i915/gem_engine_topology.h     |   3 +
>  tests/Makefile.sources             |   3 +
>  tests/i915/sysfs_preempt_timeout.c | 309 +++++++++++++++++++++++++++++
>  tests/meson.build                  |   1 +
>  7 files changed, 407 insertions(+)
>  create mode 100644 tests/i915/sysfs_preempt_timeout.c
> 
> diff --git a/lib/i915/gem_context.c b/lib/i915/gem_context.c
> index 0b6a554df..fc874a187 100644
> --- a/lib/i915/gem_context.c
> +++ b/lib/i915/gem_context.c
> @@ -462,3 +462,44 @@ bool gem_context_has_engine(int fd, uint32_t ctx, uint64_t engine)
>  
>  	return __gem_execbuf(fd, &execbuf) == -ENOENT;
>  }
> +
> +static int create_ext_ioctl(int i915,
> +			    struct drm_i915_gem_context_create_ext *arg)
> +{
> +	int err;
> +
> +	err = 0;
> +	if (igt_ioctl(i915, DRM_IOCTL_I915_GEM_CONTEXT_CREATE_EXT, arg)) {
> +		err = -errno;
> +		igt_assume(err);
> +	}
> +
> +	errno = 0;
> +	return err;
> +}
> +
> +uint32_t gem_context_create_for_engine(int i915, unsigned int class, unsigned int inst)
> +{
> +	I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
> +		.engines = { { .engine_class = class, .engine_instance = inst } }
> +	};
> +	struct drm_i915_gem_context_create_ext_setparam p_engines = {
> +		.base = {
> +			.name = I915_CONTEXT_CREATE_EXT_SETPARAM,
> +			.next_extension = 0, /* end of chain */
> +		},
> +		.param = {
> +			.param = I915_CONTEXT_PARAM_ENGINES,
> +			.value = to_user_pointer(&engines),
> +			.size = sizeof(engines),
> +		},
> +	};
> +	struct drm_i915_gem_context_create_ext create = {
> +		.flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
> +		.extensions = to_user_pointer(&p_engines),
> +	};
> +
> +	igt_assert_eq(create_ext_ioctl(i915, &create), 0);
> +	igt_assert_neq(create.ctx_id, 0);
> +	return create.ctx_id;
> +}
> diff --git a/lib/i915/gem_context.h b/lib/i915/gem_context.h
> index cf2ba33fe..ded75bb9c 100644
> --- a/lib/i915/gem_context.h
> +++ b/lib/i915/gem_context.h
> @@ -34,6 +34,8 @@ int __gem_context_create(int fd, uint32_t *ctx_id);
>  void gem_context_destroy(int fd, uint32_t ctx_id);
>  int __gem_context_destroy(int fd, uint32_t ctx_id);
>  
> +uint32_t gem_context_create_for_engine(int fd, unsigned int class, unsigned int inst);
> +
>  int __gem_context_clone(int i915,
>  			uint32_t src, unsigned int share,
>  			unsigned int flags,
> diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
> index 058983123..81faf3c15 100644
> --- a/lib/i915/gem_engine_topology.c
> +++ b/lib/i915/gem_engine_topology.c
> @@ -22,6 +22,8 @@
>   */
>  
>  #include <fcntl.h>
> +#include <sys/stat.h>
> +#include <sys/syscall.h>
>  #include <unistd.h>
>  
>  #include "drmtest.h"
> @@ -415,3 +417,49 @@ uint32_t gem_engine_mmio_base(int i915, const char *engine)
>  
>  	return mmio;
>  }
> +
> +void dyn_sysfs_engines(int i915, int engines, const char *file,
> +		       void (*test)(int, int))
> +{
> +	char buf[512];
> +	int len;
> +
> +	lseek(engines, 0, SEEK_SET);
> +	while ((len = syscall(SYS_getdents64, engines, buf, sizeof(buf))) > 0) {
> +		void *ptr = buf;
> +
> +		while (len) {
> +			struct linux_dirent64 {
> +				ino64_t        d_ino;
> +				off64_t        d_off;
> +				unsigned short d_reclen;
> +				unsigned char  d_type;
> +				char           d_name[];
> +			} *de = ptr;
> +			char *name;
> +			int engine;
> +
> +			ptr += de->d_reclen;
> +			len -= de->d_reclen;
> +
> +			engine = openat(engines, de->d_name, O_RDONLY);
> +			name = igt_sysfs_get(engine, "name");
> +			if (!name) {
> +				close(engine);
> +				continue;
> +			}
> +
> +			igt_dynamic(name) {
> +				if (file) {
> +					struct stat st;
> +
> +					igt_require(fstatat(engine, file, &st, 0) == 0);
> +				}
> +
> +				test(i915, engine);
> +			}
> +
> +			close(engine);
> +		}
> +	}
> +}
> diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
> index 7a2e21f66..456c806f5 100644
> --- a/lib/i915/gem_engine_topology.h
> +++ b/lib/i915/gem_engine_topology.h
> @@ -77,4 +77,7 @@ int gem_engine_property_scanf(int i915, const char *engine, const char *attr,
>  			      const char *fmt, ...);
>  uint32_t gem_engine_mmio_base(int i915, const char *engine);
>  
> +void dyn_sysfs_engines(int i915, int engines, const char *file,
> +		       void (*test)(int i915, int engine));
> +
>  #endif /* GEM_ENGINE_TOPOLOGY_H */
> diff --git a/tests/Makefile.sources b/tests/Makefile.sources
> index 7c5693457..fc9e04e97 100644
> --- a/tests/Makefile.sources
> +++ b/tests/Makefile.sources
> @@ -102,6 +102,9 @@ TESTS_progs = \
>  	vgem_slow \
>  	$(NULL)
>  
> +TESTS_progs += sysfs_preempt_timeout
> +sysfs_preempt_timeout_SOURCES = i915/sysfs_preempt_timeout

Your .c dropped off.


-- 
Petri Latvala
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Intel-gfx] [igt-dev] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls
  2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls Chris Wilson
@ 2020-02-12 14:33   ` Petri Latvala
  0 siblings, 0 replies; 11+ messages in thread
From: Petri Latvala @ 2020-02-12 14:33 UTC (permalink / raw)
  To: Chris Wilson; +Cc: igt-dev, intel-gfx

On Mon, Jan 27, 2020 at 12:18:17PM +0000, Chris Wilson wrote:
> We [will] expose various per-engine scheduling controls. One of which,
> 'heartbeat_duration_ms', defines how often we send a heartbeat down the
> engine to check upon the health of the engine. If a heartbeat does not
> complete within the interval (or two), the engine is declared hung.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  tests/Makefile.sources                |   3 +
>  tests/i915/sysfs_heartbeat_interval.c | 466 ++++++++++++++++++++++++++
>  tests/meson.build                     |   1 +
>  3 files changed, 470 insertions(+)
>  create mode 100644 tests/i915/sysfs_heartbeat_interval.c
> 
> diff --git a/tests/Makefile.sources b/tests/Makefile.sources
> index fc9e04e97..fd6f67a73 100644
> --- a/tests/Makefile.sources
> +++ b/tests/Makefile.sources
> @@ -102,6 +102,9 @@ TESTS_progs = \
>  	vgem_slow \
>  	$(NULL)
>  
> +TESTS_progs += sysfs_heartbeat_interval
> +sysfs_heartbeat_interval_SOURCES = i915/sysfs_heartbeat_interval

Another missing .c


-- 
Petri Latvala
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls
  2020-03-11  9:34 [Intel-gfx] [PATCH i-g-t 1/5] lib/i915: Create a context wrapping one specific engine Chris Wilson
@ 2020-03-11  9:34 ` Chris Wilson
  0 siblings, 0 replies; 11+ messages in thread
From: Chris Wilson @ 2020-03-11  9:34 UTC (permalink / raw)
  To: intel-gfx; +Cc: igt-dev

We [will] expose various per-engine scheduling controls. One of which,
'heartbeat_duration_ms', defines how often we send a heartbeat down the
engine to check upon the health of the engine. If a heartbeat does not
complete within the interval (or two), the engine is declared hung.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/Makefile.sources                |   3 +
 tests/i915/sysfs_heartbeat_interval.c | 446 ++++++++++++++++++++++++++
 tests/intel-ci/blacklist.txt          |   1 +
 tests/meson.build                     |   1 +
 4 files changed, 451 insertions(+)
 create mode 100644 tests/i915/sysfs_heartbeat_interval.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index c1711b336..7b1f8194d 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -104,6 +104,9 @@ TESTS_progs = \
 	vgem_slow \
 	$(NULL)
 
+TESTS_progs += sysfs_heartbeat_interval
+sysfs_heartbeat_interval_SOURCES = i915/sysfs_heartbeat_interval.c
+
 TESTS_progs += sysfs_preempt_timeout
 sysfs_preempt_timeout_SOURCES = i915/sysfs_preempt_timeout.c
 
diff --git a/tests/i915/sysfs_heartbeat_interval.c b/tests/i915/sysfs_heartbeat_interval.c
new file mode 100644
index 000000000..0ba19b263
--- /dev/null
+++ b/tests/i915/sysfs_heartbeat_interval.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "drmtest.h" /* gem_quiescent_gpu()! */
+#include "i915/gem_engine_topology.h"
+#include "igt_dummyload.h"
+#include "igt_sysfs.h"
+#include "ioctl_wrappers.h" /* igt_require_gem()! */
+#include "sw_sync.h"
+
+#include "igt_debugfs.h"
+
+#define ATTR "heartbeat_interval_ms"
+#define RESET_TIMEOUT 20 /* milliseconds, at least one jiffie for kworker */
+
+static bool __enable_hangcheck(int dir, bool state)
+{
+	return igt_sysfs_set(dir, "enable_hangcheck", state ? "1" : "0");
+}
+
+static void enable_hangcheck(int i915, bool state)
+{
+	int dir;
+
+	dir = igt_sysfs_open_parameters(i915);
+	if (dir < 0) /* no parameters, must be default! */
+		return;
+
+	__enable_hangcheck(dir, state);
+	close(dir);
+}
+
+static void set_heartbeat(int engine, unsigned int value)
+{
+	unsigned int delay = ~value;
+
+	igt_debug("set %s:%d\n", ATTR, value);
+	igt_require(igt_sysfs_printf(engine, ATTR, "%u", value) > 0);
+	igt_sysfs_scanf(engine, ATTR, "%u", &delay);
+	igt_assert_eq(delay, value);
+}
+
+static void test_idempotent(int i915, int engine)
+{
+	unsigned int delays[] = { 1, 1000, 5000, 50000, 123456789 };
+	unsigned int saved;
+
+	/* Quick test that the property reports the values we set */
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++)
+		set_heartbeat(engine, delays[i]);
+
+	set_heartbeat(engine, saved);
+}
+
+static void test_invalid(int i915, int engine)
+{
+	unsigned int saved, delay;
+
+	/* Quick test that we reject any unrepresentable intervals */
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+
+	igt_sysfs_printf(engine, ATTR, PRIu64, -1);
+	igt_sysfs_scanf(engine, ATTR, "%u", &delay);
+	igt_assert_eq(delay, saved);
+
+	igt_sysfs_printf(engine, ATTR, PRIu64, 10ull << 32);
+	igt_sysfs_scanf(engine, ATTR, "%u", &delay);
+	igt_assert_eq(delay, saved);
+}
+
+static void set_unbannable(int i915, uint32_t ctx)
+{
+	struct drm_i915_gem_context_param p = {
+		.ctx_id = ctx,
+		.param = I915_CONTEXT_PARAM_BANNABLE,
+	};
+
+	gem_context_set_param(i915, &p);
+}
+
+static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
+{
+	uint32_t ctx;
+
+	ctx = gem_context_create_for_engine(i915, class, inst);
+	set_unbannable(i915, ctx);
+	gem_context_set_priority(i915, ctx, prio);
+
+	return ctx;
+}
+
+static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
+{
+	unsigned int class, inst;
+	struct timespec ts = {};
+	igt_spin_t *spin[2];
+	uint64_t elapsed;
+	uint32_t ctx[2];
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_heartbeat(engine, timeout);
+
+	ctx[0] = create_context(i915, class, inst, 1023);
+	spin[0] = igt_spin_new(i915, ctx[0],
+			       .flags = (IGT_SPIN_NO_PREEMPTION |
+					 IGT_SPIN_POLL_RUN |
+					 IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin[0]);
+
+	ctx[1] = create_context(i915, class, inst, -1023);
+	igt_nsec_elapsed(&ts);
+	spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+	igt_spin_busywait_until_started(spin[1]);
+	elapsed = igt_nsec_elapsed(&ts);
+
+	igt_spin_free(i915, spin[1]);
+
+	igt_assert_eq(sync_fence_wait(spin[0]->out_fence, RESET_TIMEOUT), 0);
+	igt_assert_eq(sync_fence_status(spin[0]->out_fence), -EIO);
+
+	igt_spin_free(i915, spin[0]);
+
+	gem_context_destroy(i915, ctx[1]);
+	gem_context_destroy(i915, ctx[0]);
+	gem_quiescent_gpu(i915);
+
+	return elapsed;
+}
+
+static void test_precise(int i915, int engine)
+{
+	int delays[] = { 1, 50, 100, 500 };
+	unsigned int saved;
+
+	/*
+	 * The heartbeat interval defines how long the kernel waits between
+	 * checking on the status of the engines. It first sends down a
+	 * heartbeat pulse, waits the interval and sees if the system managed
+	 * to complete the pulse. If not, it gives a priority bump to the pulse
+	 * and waits again. This is repeated until the priority cannot be bumped
+	 * any more, and the system is declared hung.
+	 *
+	 * If we combine the preemptive pulse with forced preemption, we instead
+	 * get a much faster hang detection. Thus in combination we can measure
+	 * the system response time to reseting a hog as a measure of the
+	 * heartbeat interval, and so confirm it matches our specification.
+	 */
+
+	igt_require(igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 1) == 1);
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+	gem_quiescent_gpu(i915);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++) {
+		uint64_t elapsed;
+
+		elapsed = __test_timeout(i915, engine, delays[i]);
+		igt_info("%s:%d, elapsed=%.3fms[%d]\n", ATTR,
+			 delays[i], elapsed * 1e-6,
+			 (int)(elapsed / 1000 / 1000));
+
+		/*
+		 * It takes a couple of missed heartbeats before we start
+		 * terminating hogs, and a little bit of jiffie slack for
+		 * scheduling at each step. 150ms should cover all of our
+		 * sins and be useful tolerance.
+		 */
+		igt_assert_f(elapsed / 1000 / 1000 < 3 * delays[i] + 150,
+			     "Heartbeat interval (and CPR) exceeded request!\n");
+	}
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+static void test_nopreempt(int i915, int engine)
+{
+	int delays[] = { 1, 50, 100, 500 };
+	unsigned int saved;
+
+	/*
+	 * The same principle as test_precise(), except that forced preemption
+	 * is disabled (or simply not supported by the platform). This time,
+	 * it waits until the system misses a few heartbeat before doing a
+	 * per-engine/full-gpu reset. As such it is less precise, but we
+	 * can still estimate an upper bound for our specified heartbeat
+	 * interval and verify the system conforms.
+	 */
+
+	/* Test heartbeats with forced preemption  disabled */
+	igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 0);
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+	gem_quiescent_gpu(i915);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++) {
+		uint64_t elapsed;
+
+		elapsed = __test_timeout(i915, engine, delays[i]);
+		igt_info("%s:%d, elapsed=%.3fms[%d]\n", ATTR,
+			 delays[i], elapsed * 1e-6,
+			 (int)(elapsed / 1000 / 1000));
+
+		/*
+		 * It takes a few missed heartbeats before we start
+		 * terminating hogs, and a little bit of jiffie slack for
+		 * scheduling at each step. 250ms should cover all of our
+		 * sins and be useful tolerance.
+		 */
+		igt_assert_f(elapsed / 1000 / 1000 < 5 * delays[i] + 250,
+			     "Heartbeat interval (and CPR) exceeded request!\n");
+	}
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+static void client(int i915, int engine, int *ctl, int duration, int expect)
+{
+	unsigned int class, inst;
+	unsigned long count = 0;
+	uint32_t ctx;
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	ctx = create_context(i915, class, inst, 0);
+
+	while (!READ_ONCE(*ctl)) {
+		igt_spin_t *spin;
+
+		spin = igt_spin_new(i915, ctx,
+				    .flags = (IGT_SPIN_NO_PREEMPTION |
+					      IGT_SPIN_POLL_RUN |
+					      IGT_SPIN_FENCE_OUT));
+		igt_spin_busywait_until_started(spin);
+
+		igt_spin_set_timeout(spin, (uint64_t)duration * 1000 * 1000);
+		sync_fence_wait(spin->out_fence, -1);
+
+		igt_assert_eq(sync_fence_status(spin->out_fence), expect);
+		count++;
+	}
+
+	gem_context_destroy(i915, ctx);
+	igt_info("%s client completed %lu spins\n",
+		 expect < 0 ? "Bad" : "Good", count);
+}
+
+static void __test_mixed(int i915, int engine,
+			 int heartbeat,
+			 int good,
+			 int bad,
+			 int duration)
+{
+	unsigned int saved;
+	int *shared;
+
+	/*
+	 * Given two clients of which one is a hog, be sure we cleanly
+	 * terminate the hog leaving the good client to run.
+	 */
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+	gem_quiescent_gpu(i915);
+
+	shared = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+	igt_assert(shared != MAP_FAILED);
+
+	set_heartbeat(engine, heartbeat);
+
+	igt_fork(child, 1) /* good client */
+		client(i915, engine, shared, good, 1);
+	igt_fork(child, 1) /* bad client */
+		client(i915, engine, shared, bad, -EIO);
+
+	sleep(duration);
+
+	*shared = true;
+	igt_waitchildren();
+	munmap(shared, 4096);
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+static void test_mixed(int i915, int engine)
+{
+	/*
+	 * Hogs rarely run alone. Our hang detection must carefully wean
+	 * out the hogs from the innocent clients. Thus we run a mixed workload
+	 * with non-preemptable hogs that exceed the heartbeat, and quicker
+	 * innocents. We inspect the fence status of each to verify that
+	 * only the hogs are reset.
+	 */
+	igt_require(igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 1) == 1);
+	__test_mixed(i915, engine, 10, 10, 100, 5);
+}
+
+static void test_long(int i915, int engine)
+{
+	/*
+	 * Some clients relish being hogs, and demand that the system
+	 * never do hangchecking. Never is hard to test, so instead we
+	 * run over a day and verify that only the super hogs are reset.
+	 */
+	igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 0);
+	__test_mixed(i915, engine,
+		     60 * 1000, /* 60s */
+		     60 * 1000, /* 60s */
+		     300 * 1000, /* 5min */
+		     24 * 3600 /* 24hours */);
+}
+
+static void test_off(int i915, int engine)
+{
+	unsigned int class, inst;
+	unsigned int saved;
+	igt_spin_t *spin;
+	uint32_t ctx;
+
+	/*
+	 * Some other clients request that there is never any interruption
+	 * or jitter in their workload and so demand that the kernel never
+	 * sends a heartbeat to steal precious cycles from their workload.
+	 * Turn off the heartbeat and check that the workload is uninterrupted
+	 * for 150s.
+	 */
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+	gem_quiescent_gpu(i915);
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_heartbeat(engine, 0);
+
+	ctx = create_context(i915, class, inst, 0);
+
+	spin = igt_spin_new(i915, ctx,
+			    .flags = (IGT_SPIN_POLL_RUN |
+				      IGT_SPIN_NO_PREEMPTION |
+				      IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin);
+
+	for (int i = 0; i < 150; i++) {
+		igt_assert_eq(sync_fence_status(spin->out_fence), 0);
+		sleep(1);
+	}
+
+	set_heartbeat(engine, 1);
+
+	igt_assert_eq(sync_fence_wait(spin->out_fence, 250), 0);
+	igt_assert_eq(sync_fence_status(spin->out_fence), -EIO);
+
+	igt_spin_free(i915, spin);
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+igt_main
+{
+	static const struct {
+		const char *name;
+		void (*fn)(int, int);
+	} tests[] = {
+		{ "idempotent", test_idempotent },
+		{ "invalid", test_invalid },
+		{ "precise", test_precise },
+		{ "nopreempt", test_nopreempt },
+		{ "mixed", test_mixed },
+		{ "off", test_off },
+		{ "long", test_long },
+		{ }
+	};
+	int i915 = -1, engines = -1;
+
+	igt_fixture {
+		int sys;
+
+		i915 = drm_open_driver(DRIVER_INTEL);
+		igt_require_gem(i915);
+		igt_allow_hang(i915, 0, 0);
+
+		sys = igt_sysfs_open(i915);
+		igt_require(sys != -1);
+
+		engines = openat(sys, "engine", O_RDONLY);
+		igt_require(engines != -1);
+		close(sys);
+
+		enable_hangcheck(i915, true);
+	}
+
+	for (typeof(*tests) *t = tests; t->name; t++)
+		igt_subtest_with_dynamic(t->name)
+			dyn_sysfs_engines(i915, engines, ATTR, t->fn);
+
+	igt_fixture {
+		close(engines);
+		close(i915);
+	}
+}
diff --git a/tests/intel-ci/blacklist.txt b/tests/intel-ci/blacklist.txt
index 1b5442278..a02ef67e6 100644
--- a/tests/intel-ci/blacklist.txt
+++ b/tests/intel-ci/blacklist.txt
@@ -60,6 +60,7 @@ igt@gem_sync@(?!.*basic).*
 igt@gem_tiled_swapping@(?!non-threaded).*
 igt@gem_userptr_blits@(major|minor|forked|mlocked|swapping).*
 igt@gem_wait@.*hang.*
+igt@sysfs_heartbeat_timeout@long.*
 igt@sysfs_preemption_timeout@off.*
 ###############################################
 # GEM: Not worth fixing
diff --git a/tests/meson.build b/tests/meson.build
index bafd00513..3e5952101 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -227,6 +227,7 @@ i915_progs = [
 	'i915_query',
 	'i915_selftest',
 	'i915_suspend',
+	'sysfs_heartbeat_interval',
 	'sysfs_preempt_timeout',
 ]
 
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls
  2020-02-28 23:34   ` Andi Shyti
@ 2020-02-28 23:37     ` Chris Wilson
  0 siblings, 0 replies; 11+ messages in thread
From: Chris Wilson @ 2020-02-28 23:37 UTC (permalink / raw)
  To: Andi Shyti; +Cc: igt-dev, intel-gfx

Quoting Andi Shyti (2020-02-28 23:34:48)
> On Fri, Feb 28, 2020 at 10:43:39AM +0000, Chris Wilson wrote:
> > We [will] expose various per-engine scheduling controls. One of which,
> > 'heartbeat_duration_ms', defines how often we send a heartbeat down the
> > engine to check upon the health of the engine. If a heartbeat does not
> > complete within the interval (or two), the engine is declared hung.
> > 
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> 
> Someone with not a good eye might swear to have read this patch
> once, and at patch 5/5 he will ask again the same question.
> 
> Why don't we put together in a library the things that patch
> 3/4/5 have in common?

They are. It's basically a repeating pattern of testing with local
assumptions. For the sole reason that I'm not very inventive.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls
  2020-02-28 10:43 ` [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls Chris Wilson
@ 2020-02-28 23:34   ` Andi Shyti
  2020-02-28 23:37     ` Chris Wilson
  0 siblings, 1 reply; 11+ messages in thread
From: Andi Shyti @ 2020-02-28 23:34 UTC (permalink / raw)
  To: Chris Wilson; +Cc: igt-dev, intel-gfx

On Fri, Feb 28, 2020 at 10:43:39AM +0000, Chris Wilson wrote:
> We [will] expose various per-engine scheduling controls. One of which,
> 'heartbeat_duration_ms', defines how often we send a heartbeat down the
> engine to check upon the health of the engine. If a heartbeat does not
> complete within the interval (or two), the engine is declared hung.
> 
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>

Someone with not a good eye might swear to have read this patch
once, and at patch 5/5 he will ask again the same question.

Why don't we put together in a library the things that patch
3/4/5 have in common?

Andi
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls
  2020-02-28 10:43 [Intel-gfx] [PATCH i-g-t 1/5] i915: Start putting the mmio_base to wider use Chris Wilson
@ 2020-02-28 10:43 ` Chris Wilson
  2020-02-28 23:34   ` Andi Shyti
  0 siblings, 1 reply; 11+ messages in thread
From: Chris Wilson @ 2020-02-28 10:43 UTC (permalink / raw)
  To: intel-gfx; +Cc: igt-dev

We [will] expose various per-engine scheduling controls. One of which,
'heartbeat_duration_ms', defines how often we send a heartbeat down the
engine to check upon the health of the engine. If a heartbeat does not
complete within the interval (or two), the engine is declared hung.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/Makefile.sources                |   3 +
 tests/i915/sysfs_heartbeat_interval.c | 456 ++++++++++++++++++++++++++
 tests/intel-ci/blacklist.txt          |   1 +
 tests/meson.build                     |   1 +
 4 files changed, 461 insertions(+)
 create mode 100644 tests/i915/sysfs_heartbeat_interval.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index cc1d1fdd1..cc3ef5355 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -103,6 +103,9 @@ TESTS_progs = \
 	vgem_slow \
 	$(NULL)
 
+TESTS_progs += sysfs_heartbeat_interval
+sysfs_heartbeat_interval_SOURCES = i915/sysfs_heartbeat_interval.c
+
 TESTS_progs += sysfs_preempt_timeout
 sysfs_preempt_timeout_SOURCES = i915/sysfs_preempt_timeout.c
 
diff --git a/tests/i915/sysfs_heartbeat_interval.c b/tests/i915/sysfs_heartbeat_interval.c
new file mode 100644
index 000000000..131105437
--- /dev/null
+++ b/tests/i915/sysfs_heartbeat_interval.c
@@ -0,0 +1,456 @@
+/*
+ * Copyright © 2019 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <signal.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#include "drmtest.h" /* gem_quiescent_gpu()! */
+#include "i915/gem_engine_topology.h"
+#include "igt_dummyload.h"
+#include "igt_sysfs.h"
+#include "ioctl_wrappers.h" /* igt_require_gem()! */
+#include "sw_sync.h"
+
+#include "igt_debugfs.h"
+
+#define ATTR "heartbeat_interval_ms"
+
+static bool __enable_hangcheck(int dir, bool state)
+{
+	return igt_sysfs_set(dir, "enable_hangcheck", state ? "1" : "0");
+}
+
+static void enable_hangcheck(int i915, bool state)
+{
+	int dir;
+
+	dir = igt_sysfs_open_parameters(i915);
+	if (dir < 0) /* no parameters, must be default! */
+		return;
+
+	__enable_hangcheck(dir, state);
+	close(dir);
+}
+
+static void set_heartbeat(int engine, unsigned int value)
+{
+	unsigned int delay = ~value;
+
+	igt_debug("set %s:%d\n", ATTR, value);
+	igt_require(igt_sysfs_printf(engine, ATTR, "%u", value) > 0);
+	igt_sysfs_scanf(engine, ATTR, "%u", &delay);
+	igt_assert_eq(delay, value);
+}
+
+static void test_idempotent(int i915, int engine)
+{
+	unsigned int delays[] = { 1, 1000, 5000, 50000, 123456789 };
+	unsigned int saved;
+
+	/* Quick test that the property reports the values we set */
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++)
+		set_heartbeat(engine, delays[i]);
+
+	set_heartbeat(engine, saved);
+}
+
+static void test_invalid(int i915, int engine)
+{
+	unsigned int saved, delay;
+
+	/* Quick test that we reject any unrepresentable intervals */
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+
+	igt_sysfs_printf(engine, ATTR, PRIu64, -1);
+	igt_sysfs_scanf(engine, ATTR, "%u", &delay);
+	igt_assert_eq(delay, saved);
+
+	igt_sysfs_printf(engine, ATTR, PRIu64, 10ull << 32);
+	igt_sysfs_scanf(engine, ATTR, "%u", &delay);
+	igt_assert_eq(delay, saved);
+}
+
+static void set_unbannable(int i915, uint32_t ctx)
+{
+	struct drm_i915_gem_context_param p = {
+		.ctx_id = ctx,
+		.param = I915_CONTEXT_PARAM_BANNABLE,
+	};
+
+	igt_assert_eq(__gem_context_set_param(i915, &p), 0);
+}
+
+static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
+{
+	uint32_t ctx;
+
+	ctx = gem_context_create_for_engine(i915, class, inst);
+	set_unbannable(i915, ctx);
+	gem_context_set_priority(i915, ctx, prio);
+
+	return ctx;
+}
+
+static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
+{
+	unsigned int class, inst;
+	struct timespec ts = {};
+	igt_spin_t *spin[2];
+	uint64_t elapsed;
+	uint32_t ctx[2];
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_heartbeat(engine, timeout);
+
+	ctx[0] = create_context(i915, class, inst, 1023);
+	spin[0] = igt_spin_new(i915, ctx[0],
+			       .flags = (IGT_SPIN_NO_PREEMPTION |
+					 IGT_SPIN_POLL_RUN |
+					 IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin[0]);
+
+	ctx[1] = create_context(i915, class, inst, -1023);
+	igt_nsec_elapsed(&ts);
+	spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+	igt_spin_busywait_until_started(spin[1]);
+	elapsed = igt_nsec_elapsed(&ts);
+
+	igt_spin_free(i915, spin[1]);
+
+	igt_assert_eq(sync_fence_wait(spin[0]->out_fence, 1), 0);
+	igt_assert_eq(sync_fence_status(spin[0]->out_fence), -EIO);
+
+	igt_spin_free(i915, spin[0]);
+
+	gem_context_destroy(i915, ctx[1]);
+	gem_context_destroy(i915, ctx[0]);
+	gem_quiescent_gpu(i915);
+
+	return elapsed;
+}
+
+static void test_precise(int i915, int engine)
+{
+	int delays[] = { 1, 50, 100, 500 };
+	unsigned int saved;
+
+	/*
+	 * The heartbeat interval defines how long the kernel waits between
+	 * checking on the status of the engines. It first sends down a
+	 * heartbeat pulse, waits the interval and sees if the system managed
+	 * to complete the pulse. If not, it gives a priority bump to the pulse
+	 * and waits again. This is repeated until the priority cannot be bumped
+	 * any more, and the system is declared hung.
+	 *
+	 * If we combine the preemptive pulse with forced preemption, we instead
+	 * get a much faster hang detection. Thus in combination we can measure
+	 * the system response time to reseting a hog as a measure of the
+	 * heartbeat interval, and so confirm it matches our specification.
+	 */
+
+	igt_require(igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 1) == 1);
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+	gem_quiescent_gpu(i915);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++) {
+		uint64_t elapsed;
+
+		elapsed = __test_timeout(i915, engine, delays[i]);
+		igt_info("%s:%d, elapsed=%.3fms[%d]\n", ATTR,
+			 delays[i], elapsed * 1e-6,
+			 (int)(elapsed / 1000 / 1000));
+
+		/*
+		 * It takes a couple of missed heartbeats before we start
+		 * terminating hogs, and a little bit of jiffie slack for
+		 * scheduling at each step. 150ms should cover all of our
+		 * sins and be useful tolerance.
+		 */
+		igt_assert_f(elapsed / 1000 / 1000 < 3 * delays[i] + 150,
+			     "Heartbeat interval (and CPR) exceeded request!\n");
+	}
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+static void test_nopreempt(int i915, int engine)
+{
+	int delays[] = { 1, 50, 100, 500 };
+	unsigned int saved;
+
+	/*
+	 * The same principle as test_precise(), except that forced preemption
+	 * is disabled (or simply not supported by the platform). This time,
+	 * it waits until the system misses a few heartbeat before doing a
+	 * per-engine/full-gpu reset. As such it is less precise, but we
+	 * can still estimate an upper bound for our specified heartbeat
+	 * interval and verify the system conforms.
+	 */
+
+	/* Test heartbeats with forced preemption  disabled */
+	igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 0);
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+	gem_quiescent_gpu(i915);
+
+	for (int i = 0; i < ARRAY_SIZE(delays); i++) {
+		uint64_t elapsed;
+
+		elapsed = __test_timeout(i915, engine, delays[i]);
+		igt_info("%s:%d, elapsed=%.3fms[%d]\n", ATTR,
+			 delays[i], elapsed * 1e-6,
+			 (int)(elapsed / 1000 / 1000));
+
+		/*
+		 * It takes a few missed heartbeats before we start
+		 * terminating hogs, and a little bit of jiffie slack for
+		 * scheduling at each step. 250ms should cover all of our
+		 * sins and be useful tolerance.
+		 */
+		igt_assert_f(elapsed / 1000 / 1000 < 5 * delays[i] + 250,
+			     "Heartbeat interval (and CPR) exceeded request!\n");
+	}
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+static void client(int i915, int engine, int *ctl, int duration, int expect)
+{
+	unsigned int class, inst;
+	unsigned long count = 0;
+	uint32_t ctx;
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	ctx = create_context(i915, class, inst, 0);
+
+	while (!READ_ONCE(*ctl)) {
+		igt_spin_t *spin;
+
+		spin = igt_spin_new(i915, ctx,
+				    .flags = (IGT_SPIN_NO_PREEMPTION |
+					      IGT_SPIN_POLL_RUN |
+					      IGT_SPIN_FENCE_OUT));
+		igt_spin_busywait_until_started(spin);
+
+		igt_spin_set_timeout(spin, (uint64_t)duration * 1000 * 1000);
+		sync_fence_wait(spin->out_fence, -1);
+
+		igt_assert_eq(sync_fence_status(spin->out_fence), expect);
+		count++;
+	}
+
+	gem_context_destroy(i915, ctx);
+	igt_info("%s client completed %lu spins\n",
+		 expect < 0 ? "Bad" : "Good", count);
+}
+
+static void sigign(int sig)
+{
+}
+
+static void wait_until(int duration)
+{
+	signal(SIGCHLD, sigign);
+	sleep(duration);
+	signal(SIGCHLD, SIG_IGN);
+}
+
+static void __test_mixed(int i915, int engine,
+			 int heartbeat,
+			 int good,
+			 int bad,
+			 int duration)
+{
+	unsigned int saved;
+	int *shared;
+
+	/*
+	 * Given two clients of which one is a hog, be sure we cleanly
+	 * terminate the hog leaving the good client to run.
+	 */
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+	gem_quiescent_gpu(i915);
+
+	shared = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+	igt_assert(shared != MAP_FAILED);
+
+	set_heartbeat(engine, heartbeat);
+
+	igt_fork(child, 1) /* good client */
+		client(i915, engine, shared, good, 1);
+	igt_fork(child, 1) /* bad client */
+		client(i915, engine, shared, bad, -EIO);
+
+	wait_until(duration);
+
+	*shared = true;
+	igt_waitchildren();
+	munmap(shared, 4096);
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+static void test_mixed(int i915, int engine)
+{
+	/*
+	 * Hogs rarely run alone. Our hang detection must carefully wean
+	 * out the hogs from the innocent clients. Thus we run a mixed workload
+	 * with non-preemptable hogs that exceed the heartbeat, and quicker
+	 * innocents. We inspect the fence status of each to verify that
+	 * only the hogs are reset.
+	 */
+	igt_require(igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 1) == 1);
+	__test_mixed(i915, engine, 10, 10, 100, 5);
+}
+
+static void test_long(int i915, int engine)
+{
+	/*
+	 * Some clients relish being hogs, and demand that the system
+	 * never do hangchecking. Never is hard to test, so instead we
+	 * run over a day and verify that only the super hogs are reset.
+	 */
+	igt_sysfs_printf(engine, "preempt_timeout_ms", "%u", 0);
+	__test_mixed(i915, engine,
+		     60 * 1000, /* 60s */
+		     60 * 1000, /* 60s */
+		     300 * 1000, /* 5min */
+		     24 * 3600 /* 24hours */);
+}
+
+static void test_off(int i915, int engine)
+{
+	unsigned int class, inst;
+	unsigned int saved;
+	igt_spin_t *spin;
+	uint32_t ctx;
+
+	/*
+	 * Some other clients request that there is never any interruption
+	 * or jitter in their workload and so demand that the kernel never
+	 * sends a heartbeat to steal precious cycles from their workload.
+	 * Turn off the heartbeat and check that the workload is uninterrupted
+	 * for 150s.
+	 */
+
+	igt_assert(igt_sysfs_scanf(engine, ATTR, "%u", &saved) == 1);
+	igt_debug("Initial %s:%u\n", ATTR, saved);
+	gem_quiescent_gpu(i915);
+
+	igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
+	igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
+
+	set_heartbeat(engine, 0);
+
+	ctx = create_context(i915, class, inst, 0);
+
+	spin = igt_spin_new(i915, ctx,
+			    .flags = (IGT_SPIN_POLL_RUN |
+				      IGT_SPIN_NO_PREEMPTION |
+				      IGT_SPIN_FENCE_OUT));
+	igt_spin_busywait_until_started(spin);
+
+	for (int i = 0; i < 150; i++) {
+		igt_assert_eq(sync_fence_status(spin->out_fence), 0);
+		sleep(1);
+	}
+
+	set_heartbeat(engine, 1);
+
+	igt_assert_eq(sync_fence_wait(spin->out_fence, 250), 0);
+	igt_assert_eq(sync_fence_status(spin->out_fence), -EIO);
+
+	igt_spin_free(i915, spin);
+
+	gem_quiescent_gpu(i915);
+	set_heartbeat(engine, saved);
+}
+
+igt_main
+{
+	static const struct {
+		const char *name;
+		void (*fn)(int, int);
+	} tests[] = {
+		{ "idempotent", test_idempotent },
+		{ "invalid", test_invalid },
+		{ "precise", test_precise },
+		{ "nopreempt", test_nopreempt },
+		{ "mixed", test_mixed },
+		{ "off", test_off },
+		{ "long", test_long },
+		{ }
+	};
+	int i915 = -1, engines = -1;
+
+	igt_fixture {
+		int sys;
+
+		i915 = drm_open_driver(DRIVER_INTEL);
+		igt_require_gem(i915);
+		igt_allow_hang(i915, 0, 0);
+
+		sys = igt_sysfs_open(i915);
+		igt_require(sys != -1);
+
+		engines = openat(sys, "engine", O_RDONLY);
+		igt_require(engines != -1);
+		close(sys);
+
+		enable_hangcheck(i915, true);
+	}
+
+	for (typeof(*tests) *t = tests; t->name; t++)
+		igt_subtest_with_dynamic(t->name)
+			dyn_sysfs_engines(i915, engines, ATTR, t->fn);
+
+	igt_fixture {
+		close(engines);
+		close(i915);
+	}
+}
diff --git a/tests/intel-ci/blacklist.txt b/tests/intel-ci/blacklist.txt
index 01cd176ee..eb5c9226a 100644
--- a/tests/intel-ci/blacklist.txt
+++ b/tests/intel-ci/blacklist.txt
@@ -59,6 +59,7 @@ igt@gem_sync@(?!.*basic).*
 igt@gem_tiled_swapping@(?!non-threaded).*
 igt@gem_userptr_blits@(major|minor|forked|mlocked|swapping).*
 igt@gem_wait@.*hang.*
+igt@sysfs_heartbeat_timeout@long.*
 igt@sysfs_preemption_timeout@off.*
 ###############################################
 # GEM: Not worth fixing
diff --git a/tests/meson.build b/tests/meson.build
index 0af1ed1e1..bc1af9b1e 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -226,6 +226,7 @@ i915_progs = [
 	'i915_query',
 	'i915_selftest',
 	'i915_suspend',
+	'sysfs_heartbeat_interval',
 	'sysfs_preempt_timeout',
 ]
 
-- 
2.25.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2020-03-11  9:35 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-27 12:18 [Intel-gfx] [PATCH i-g-t 1/5] i915: Start putting the mmio_base to wider use Chris Wilson
2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 2/5] i915/gem_ctx_isolation: Check engine relative registers Chris Wilson
2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 3/5] i915: Exercise preemption timeout controls in sysfs Chris Wilson
2020-02-12 14:32   ` [Intel-gfx] [igt-dev] " Petri Latvala
2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls Chris Wilson
2020-02-12 14:33   ` [Intel-gfx] [igt-dev] " Petri Latvala
2020-01-27 12:18 ` [Intel-gfx] [PATCH i-g-t 5/5] i915: Exercise timeslice sysfs property Chris Wilson
2020-02-28 10:43 [Intel-gfx] [PATCH i-g-t 1/5] i915: Start putting the mmio_base to wider use Chris Wilson
2020-02-28 10:43 ` [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls Chris Wilson
2020-02-28 23:34   ` Andi Shyti
2020-02-28 23:37     ` Chris Wilson
2020-03-11  9:34 [Intel-gfx] [PATCH i-g-t 1/5] lib/i915: Create a context wrapping one specific engine Chris Wilson
2020-03-11  9:34 ` [Intel-gfx] [PATCH i-g-t 4/5] i915: Exercise sysfs heartbeat controls Chris Wilson

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).