All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Matthew Auld <matthew.auld@intel.com>
Subject: [PATCH] drm/i915/selftests: Spin on all engines simultaneously
Date: Thu, 31 Oct 2019 21:23:36 +0000	[thread overview]
Message-ID: <20191031212336.14311-1-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20191031211747.13892-1-chris@chris-wilson.co.uk>

Vanshidhar Konda asked for the simplest test "to verify that the kernel
can submit and hardware can execute batch buffers on all the command
streamers in parallel." We have a number of tests in userspace that
submit load to each engine and verify that it is present, but strictly
we have no selftest to prove that the kernel can _simultaneously_
execute on all known engines. (We have tests to demonstrate that we can
submit to HW in parallel, but we don't insist that they execute in
parallel.)

Suggested-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Vanshidhar Konda <vanshidhar.r.konda@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h               |  6 ++
 drivers/gpu/drm/i915/selftests/i915_request.c | 63 +++++++++++++++++++
 2 files changed, 69 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a22d969cb352..0c3ab6020bc6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,6 +891,10 @@ struct intel_cdclk_state {
 	u8 voltage_level;
 };
 
+struct i915_selftest_stash {
+	atomic_t counter;
+};
+
 struct drm_i915_private {
 	struct drm_device drm;
 
@@ -1286,6 +1290,8 @@ struct drm_i915_private {
 	/* Mutex to protect the above hdcp component related values. */
 	struct mutex hdcp_comp_mutex;
 
+	I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
+
 	/*
 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
 	 * will be rejected. Instead look for a better place.
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 30ae34f62176..6181b327b4ac 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -32,6 +32,7 @@
 #include "i915_random.h"
 #include "i915_selftest.h"
 #include "igt_live_test.h"
+#include "igt_spinner.h"
 #include "lib_sw_fence.h"
 
 #include "mock_drm.h"
@@ -1115,12 +1116,72 @@ static int __live_parallel_engineN(void *arg)
 	return 0;
 }
 
+static int wait_for_all(struct drm_i915_private *i915)
+{
+	if (atomic_dec_and_test(&i915->selftest.counter)) {
+		wake_up_var(&i915->selftest.counter);
+		return 0;
+	}
+
+	if (wait_var_event_timeout(&i915->selftest.counter,
+				   !atomic_read(&i915->selftest.counter),
+				   i915_selftest.timeout_jiffies))
+		return 0;
+
+	return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+	struct intel_engine_cs *engine = arg;
+	struct igt_spinner spin;
+	struct i915_request *rq;
+	int err = 0;
+
+	/*
+	 * Create a spinner running for eternity on each engine. If a second
+	 * spinner is incorrectly placed on the same engine, it will not be
+	 * able to start in time.
+	 */
+
+	if (igt_spinner_init(&spin, engine->gt))
+		return -ENOMEM;
+
+	rq = igt_spinner_create_request(&spin,
+					engine->kernel_context,
+					MI_NOOP); /* no preemption */
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out_spin;
+	}
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (igt_wait_for_spinner(&spin, rq)) {
+		/* Occupy this engine for the whole test */
+		err = wait_for_all(engine->i915);
+	} else {
+		pr_err("Failed to start spinner on %s\n", engine->name);
+		err = -EINVAL;
+	}
+	igt_spinner_end(&spin);
+
+	if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+		err = -EIO;
+	i915_request_put(rq);
+
+out_spin:
+	igt_spinner_fini(&spin);
+	return err;
+}
+
 static int live_parallel_engines(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	static int (* const func[])(void *arg) = {
 		__live_parallel_engine1,
 		__live_parallel_engineN,
+		__live_parallel_spin,
 		NULL,
 	};
 	const unsigned int nengines = num_uabi_engines(i915);
@@ -1146,6 +1207,8 @@ static int live_parallel_engines(void *arg)
 		if (err)
 			break;
 
+		atomic_set(&i915->selftest.counter, nengines);
+
 		idx = 0;
 		for_each_uabi_engine(engine, i915) {
 			tsk[idx] = kthread_run(*fn, engine,
-- 
2.24.0.rc2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

WARNING: multiple messages have this Message-ID (diff)
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: Matthew Auld <matthew.auld@intel.com>
Subject: [Intel-gfx] [PATCH] drm/i915/selftests: Spin on all engines simultaneously
Date: Thu, 31 Oct 2019 21:23:36 +0000	[thread overview]
Message-ID: <20191031212336.14311-1-chris@chris-wilson.co.uk> (raw)
Message-ID: <20191031212336.jSq457pndG5s3j65KEJUlqLa_yrVUFG7jmFAg2hlB-I@z> (raw)
In-Reply-To: <20191031211747.13892-1-chris@chris-wilson.co.uk>

Vanshidhar Konda asked for the simplest test "to verify that the kernel
can submit and hardware can execute batch buffers on all the command
streamers in parallel." We have a number of tests in userspace that
submit load to each engine and verify that it is present, but strictly
we have no selftest to prove that the kernel can _simultaneously_
execute on all known engines. (We have tests to demonstrate that we can
submit to HW in parallel, but we don't insist that they execute in
parallel.)

Suggested-by: Vanshidhar Konda <vanshidhar.r.konda@intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Vanshidhar Konda <vanshidhar.r.konda@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
---
 drivers/gpu/drm/i915/i915_drv.h               |  6 ++
 drivers/gpu/drm/i915/selftests/i915_request.c | 63 +++++++++++++++++++
 2 files changed, 69 insertions(+)

diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index a22d969cb352..0c3ab6020bc6 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -891,6 +891,10 @@ struct intel_cdclk_state {
 	u8 voltage_level;
 };
 
+struct i915_selftest_stash {
+	atomic_t counter;
+};
+
 struct drm_i915_private {
 	struct drm_device drm;
 
@@ -1286,6 +1290,8 @@ struct drm_i915_private {
 	/* Mutex to protect the above hdcp component related values. */
 	struct mutex hdcp_comp_mutex;
 
+	I915_SELFTEST_DECLARE(struct i915_selftest_stash selftest;)
+
 	/*
 	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
 	 * will be rejected. Instead look for a better place.
diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c
index 30ae34f62176..6181b327b4ac 100644
--- a/drivers/gpu/drm/i915/selftests/i915_request.c
+++ b/drivers/gpu/drm/i915/selftests/i915_request.c
@@ -32,6 +32,7 @@
 #include "i915_random.h"
 #include "i915_selftest.h"
 #include "igt_live_test.h"
+#include "igt_spinner.h"
 #include "lib_sw_fence.h"
 
 #include "mock_drm.h"
@@ -1115,12 +1116,72 @@ static int __live_parallel_engineN(void *arg)
 	return 0;
 }
 
+static int wait_for_all(struct drm_i915_private *i915)
+{
+	if (atomic_dec_and_test(&i915->selftest.counter)) {
+		wake_up_var(&i915->selftest.counter);
+		return 0;
+	}
+
+	if (wait_var_event_timeout(&i915->selftest.counter,
+				   !atomic_read(&i915->selftest.counter),
+				   i915_selftest.timeout_jiffies))
+		return 0;
+
+	return -ETIME;
+}
+
+static int __live_parallel_spin(void *arg)
+{
+	struct intel_engine_cs *engine = arg;
+	struct igt_spinner spin;
+	struct i915_request *rq;
+	int err = 0;
+
+	/*
+	 * Create a spinner running for eternity on each engine. If a second
+	 * spinner is incorrectly placed on the same engine, it will not be
+	 * able to start in time.
+	 */
+
+	if (igt_spinner_init(&spin, engine->gt))
+		return -ENOMEM;
+
+	rq = igt_spinner_create_request(&spin,
+					engine->kernel_context,
+					MI_NOOP); /* no preemption */
+	if (IS_ERR(rq)) {
+		err = PTR_ERR(rq);
+		goto out_spin;
+	}
+
+	i915_request_get(rq);
+	i915_request_add(rq);
+	if (igt_wait_for_spinner(&spin, rq)) {
+		/* Occupy this engine for the whole test */
+		err = wait_for_all(engine->i915);
+	} else {
+		pr_err("Failed to start spinner on %s\n", engine->name);
+		err = -EINVAL;
+	}
+	igt_spinner_end(&spin);
+
+	if (err == 0 && i915_request_wait(rq, 0, HZ / 5) < 0)
+		err = -EIO;
+	i915_request_put(rq);
+
+out_spin:
+	igt_spinner_fini(&spin);
+	return err;
+}
+
 static int live_parallel_engines(void *arg)
 {
 	struct drm_i915_private *i915 = arg;
 	static int (* const func[])(void *arg) = {
 		__live_parallel_engine1,
 		__live_parallel_engineN,
+		__live_parallel_spin,
 		NULL,
 	};
 	const unsigned int nengines = num_uabi_engines(i915);
@@ -1146,6 +1207,8 @@ static int live_parallel_engines(void *arg)
 		if (err)
 			break;
 
+		atomic_set(&i915->selftest.counter, nengines);
+
 		idx = 0;
 		for_each_uabi_engine(engine, i915) {
 			tsk[idx] = kthread_run(*fn, engine,
-- 
2.24.0.rc2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  reply	other threads:[~2019-10-31 21:23 UTC|newest]

Thread overview: 8+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-31 21:17 [PATCH] drm/i915/selftests: Spin on all engines simultaneously Chris Wilson
2019-10-31 21:17 ` [Intel-gfx] " Chris Wilson
2019-10-31 21:23 ` Chris Wilson [this message]
2019-10-31 21:23   ` Chris Wilson
2019-10-31 22:36   ` Vanshidhar Konda
2019-10-31 22:36     ` [Intel-gfx] " Vanshidhar Konda
2019-10-31 23:10 ` ✗ Fi.CI.BAT: failure for drm/i915/selftests: Spin on all engines simultaneously (rev2) Patchwork
2019-10-31 23:10   ` [Intel-gfx] " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191031212336.14311-1-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=matthew.auld@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.