All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andi Shyti <andi.shyti@intel.com>
To: IGT dev <igt-dev@lists.freedesktop.org>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>, Andi Shyti <andi@etezian.org>
Subject: [igt-dev] [PATCH v24 06/14] test: perf_pmu: use the gem_engine_topology library
Date: Mon, 13 May 2019 20:56:05 +0300	[thread overview]
Message-ID: <20190513175613.2507-7-andi.shyti@intel.com> (raw)
In-Reply-To: <20190513175613.2507-1-andi.shyti@intel.com>

Replace the legacy for_each_engine* defines with the ones
implemented in the gem_engine_topology library.

Use whenever possible gem_engine_can_store_dword() that checks
class instead of flags.

Now the __for_each_engine_class_instance and
for_each_engine_class_instance are unused, remove them.

Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Signed-off-by: Andi Shyti <andi.shyti@intel.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 lib/igt_gt.h     |   7 ----
 tests/perf_pmu.c | 102 ++++++++++++++++++++++++++---------------------
 2 files changed, 56 insertions(+), 53 deletions(-)

diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index 0b5c7fcb4c3c..77318e2a82b8 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -119,11 +119,4 @@ void gem_require_engine(int gem_fd,
 	igt_require(gem_has_engine(gem_fd, class, instance));
 }
 
-#define __for_each_engine_class_instance(e__) \
-	for ((e__) = intel_execution_engines2; (e__)->name; (e__)++)
-
-#define for_each_engine_class_instance(fd__, e__) \
-	for ((e__) = intel_execution_engines2; (e__)->name; (e__)++) \
-		for_if (gem_has_engine((fd__), (e__)->class, (e__)->instance))
-
 #endif /* IGT_GT_H */
diff --git a/tests/perf_pmu.c b/tests/perf_pmu.c
index e719a292a3d1..97aa1f57fe04 100644
--- a/tests/perf_pmu.c
+++ b/tests/perf_pmu.c
@@ -82,7 +82,7 @@ init(int gem_fd, const struct intel_execution_engine2 *e, uint8_t sample)
 	if (fd < 0)
 		err = errno;
 
-	exists = gem_has_engine(gem_fd, e->class, e->instance);
+	exists = gem_context_has_engine(gem_fd, 0, e->flags);
 	if (intel_gen(intel_get_drm_devid(gem_fd)) < 6 &&
 	    sample == I915_SAMPLE_SEMA)
 		exists = false;
@@ -158,11 +158,6 @@ static unsigned int measured_usleep(unsigned int usec)
 	return igt_nsec_elapsed(&ts);
 }
 
-static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
-{
-	return gem_class_instance_to_eb_flags(gem_fd, e->class, e->instance);
-}
-
 #define TEST_BUSY (1)
 #define FLAG_SYNC (2)
 #define TEST_TRAILING_IDLE (4)
@@ -170,14 +165,15 @@ static unsigned int e2ring(int gem_fd, const struct intel_execution_engine2 *e)
 #define FLAG_LONG (16)
 #define FLAG_HANG (32)
 
-static igt_spin_t * __spin_poll(int fd, uint32_t ctx, unsigned long flags)
+static igt_spin_t * __spin_poll(int fd, uint32_t ctx,
+				const struct intel_execution_engine2 *e)
 {
 	struct igt_spin_factory opts = {
 		.ctx = ctx,
-		.engine = flags,
+		.engine = e->flags,
 	};
 
-	if (gem_can_store_dword(fd, flags))
+	if (gem_class_can_store_dword(fd, e->class))
 		opts.flags |= IGT_SPIN_POLL_RUN;
 
 	return __igt_spin_factory(fd, &opts);
@@ -209,20 +205,34 @@ static unsigned long __spin_wait(int fd, igt_spin_t *spin)
 	return igt_nsec_elapsed(&start);
 }
 
-static igt_spin_t * __spin_sync(int fd, uint32_t ctx, unsigned long flags)
+static igt_spin_t * __spin_sync(int fd, uint32_t ctx,
+				const struct intel_execution_engine2 *e)
 {
-	igt_spin_t *spin = __spin_poll(fd, ctx, flags);
+	igt_spin_t *spin = __spin_poll(fd, ctx, e);
 
 	__spin_wait(fd, spin);
 
 	return spin;
 }
 
-static igt_spin_t * spin_sync(int fd, uint32_t ctx, unsigned long flags)
+static igt_spin_t * spin_sync(int fd, uint32_t ctx,
+			      const struct intel_execution_engine2 *e)
 {
 	igt_require_gem(fd);
 
-	return __spin_sync(fd, ctx, flags);
+	return __spin_sync(fd, ctx, e);
+}
+
+static igt_spin_t * spin_sync_flags(int fd, uint32_t ctx, unsigned int flags)
+{
+	struct intel_execution_engine2 e = { };
+
+	e.class = gem_execbuf_flags_to_engine_class(flags);
+	e.instance = (flags & (I915_EXEC_BSD_MASK | I915_EXEC_RING_MASK)) ==
+		     (I915_EXEC_BSD | I915_EXEC_BSD_RING2) ? 1 : 0;
+	e.flags = flags;
+
+	return spin_sync(fd, ctx, &e);
 }
 
 static void end_spin(int fd, igt_spin_t *spin, unsigned int flags)
@@ -267,7 +277,7 @@ single(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
 	fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
 
 	if (flags & TEST_BUSY)
-		spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+		spin = spin_sync(gem_fd, 0, e);
 	else
 		spin = NULL;
 
@@ -316,7 +326,7 @@ busy_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	sleep(2);
 
-	spin = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+	spin = __spin_sync(gem_fd, 0, e);
 
 	fd = open_pmu(I915_PMU_ENGINE_BUSY(e->class, e->instance));
 
@@ -347,6 +357,7 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
 	int fd;
 
 	ctx = gem_context_create(gem_fd);
+	gem_context_set_all_engines(gem_fd, ctx);
 
 	/*
 	 * Defeat the busy stats delayed disable, we need to guarantee we are
@@ -359,11 +370,11 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
 	 * re-submission in execlists mode. Make sure busyness is correctly
 	 * reported with the engine busy, and after the engine went idle.
 	 */
-	spin[0] = __spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+	spin[0] = __spin_sync(gem_fd, 0, e);
 	usleep(500e3);
 	spin[1] = __igt_spin_new(gem_fd,
 				 .ctx = ctx,
-				 .engine = e2ring(gem_fd, e));
+				 .engine = e->flags);
 
 	/*
 	 * Open PMU as fast as possible after the second spin batch in attempt
@@ -424,7 +435,7 @@ static void
 busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 	       const unsigned int num_engines, unsigned int flags)
 {
-	const struct intel_execution_engine2 *e_;
+	struct intel_execution_engine2 *e_;
 	uint64_t tval[2][num_engines];
 	unsigned int busy_idx = 0, i;
 	uint64_t val[num_engines];
@@ -434,8 +445,8 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 
 	i = 0;
 	fd[0] = -1;
-	for_each_engine_class_instance(gem_fd, e_) {
-		if (e == e_)
+	__for_each_physical_engine(gem_fd, e_) {
+		if (e->class == e_->class && e->instance == e_->instance)
 			busy_idx = i;
 
 		fd[i++] = open_group(I915_PMU_ENGINE_BUSY(e_->class,
@@ -445,7 +456,7 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 
 	igt_assert_eq(i, num_engines);
 
-	spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+	spin = spin_sync(gem_fd, 0, e);
 	pmu_read_multi(fd[0], num_engines, tval[0]);
 	slept = measured_usleep(batch_duration_ns / 1000);
 	if (flags & TEST_TRAILING_IDLE)
@@ -478,7 +489,7 @@ __submit_spin(int gem_fd, igt_spin_t *spin,
 	struct drm_i915_gem_execbuffer2 eb = spin->execbuf;
 
 	eb.flags &= ~(0x3f | I915_EXEC_BSD_MASK);
-	eb.flags |= e2ring(gem_fd, e) | I915_EXEC_NO_RELOC;
+	eb.flags |= e->flags | I915_EXEC_NO_RELOC;
 	eb.batch_start_offset += offset;
 
 	gem_execbuf(gem_fd, &eb);
@@ -488,7 +499,7 @@ static void
 most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 		    const unsigned int num_engines, unsigned int flags)
 {
-	const struct intel_execution_engine2 *e_;
+	struct intel_execution_engine2 *e_;
 	uint64_t tval[2][num_engines];
 	uint64_t val[num_engines];
 	int fd[num_engines];
@@ -497,13 +508,13 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
 	unsigned int idle_idx, i;
 
 	i = 0;
-	for_each_engine_class_instance(gem_fd, e_) {
-		if (e == e_)
+	__for_each_physical_engine(gem_fd, e_) {
+		if (e->class == e_->class && e->instance == e_->instance)
 			idle_idx = i;
 		else if (spin)
 			__submit_spin(gem_fd, spin, e_, 64);
 		else
-			spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e_));
+			spin = __spin_poll(gem_fd, 0, e_);
 
 		val[i++] = I915_PMU_ENGINE_BUSY(e_->class, e_->instance);
 	}
@@ -545,7 +556,7 @@ static void
 all_busy_check_all(int gem_fd, const unsigned int num_engines,
 		   unsigned int flags)
 {
-	const struct intel_execution_engine2 *e;
+	struct intel_execution_engine2 *e;
 	uint64_t tval[2][num_engines];
 	uint64_t val[num_engines];
 	int fd[num_engines];
@@ -554,11 +565,11 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
 	unsigned int i;
 
 	i = 0;
-	for_each_engine_class_instance(gem_fd, e) {
+	__for_each_physical_engine(gem_fd, e) {
 		if (spin)
 			__submit_spin(gem_fd, spin, e, 64);
 		else
-			spin = __spin_poll(gem_fd, 0, e2ring(gem_fd, e));
+			spin = __spin_poll(gem_fd, 0, e);
 
 		val[i++] = I915_PMU_ENGINE_BUSY(e->class, e->instance);
 	}
@@ -602,7 +613,7 @@ no_sema(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
 	open_group(I915_PMU_ENGINE_WAIT(e->class, e->instance), fd);
 
 	if (flags & TEST_BUSY)
-		spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+		spin = spin_sync(gem_fd, 0, e);
 	else
 		spin = NULL;
 
@@ -689,7 +700,7 @@ sema_wait(int gem_fd, const struct intel_execution_engine2 *e,
 
 	eb.buffer_count = 2;
 	eb.buffers_ptr = to_user_pointer(obj);
-	eb.flags = e2ring(gem_fd, e);
+	eb.flags = e->flags;
 
 	/**
 	 * Start the semaphore wait PMU and after some known time let the above
@@ -845,7 +856,7 @@ event_wait(int gem_fd, const struct intel_execution_engine2 *e)
 
 	eb.buffer_count = 1;
 	eb.buffers_ptr = to_user_pointer(&obj);
-	eb.flags = e2ring(gem_fd, e) | I915_EXEC_SECURE;
+	eb.flags = e->flags | I915_EXEC_SECURE;
 
 	for_each_pipe_with_valid_output(&data.display, p, output) {
 		struct igt_helper_process waiter = { };
@@ -936,7 +947,7 @@ multi_client(int gem_fd, const struct intel_execution_engine2 *e)
 	 */
 	fd[1] = open_pmu(config);
 
-	spin = spin_sync(gem_fd, 0, e2ring(gem_fd, e));
+	spin = spin_sync(gem_fd, 0, e);
 
 	val[0] = val[1] = __pmu_read_single(fd[0], &ts[0]);
 	slept[1] = measured_usleep(batch_duration_ns / 1000);
@@ -1301,7 +1312,7 @@ test_frequency(int gem_fd)
 	igt_require(igt_sysfs_get_u32(sysfs, "gt_boost_freq_mhz") == min_freq);
 
 	gem_quiescent_gpu(gem_fd); /* Idle to be sure the change takes effect */
-	spin = spin_sync(gem_fd, 0, I915_EXEC_RENDER);
+	spin = spin_sync_flags(gem_fd, 0, I915_EXEC_RENDER);
 
 	slept = pmu_read_multi(fd, 2, start);
 	measured_usleep(batch_duration_ns / 1000);
@@ -1327,7 +1338,7 @@ test_frequency(int gem_fd)
 	igt_require(igt_sysfs_get_u32(sysfs, "gt_min_freq_mhz") == max_freq);
 
 	gem_quiescent_gpu(gem_fd);
-	spin = spin_sync(gem_fd, 0, I915_EXEC_RENDER);
+	spin = spin_sync_flags(gem_fd, 0, I915_EXEC_RENDER);
 
 	slept = pmu_read_multi(fd, 2, start);
 	measured_usleep(batch_duration_ns / 1000);
@@ -1458,14 +1469,14 @@ test_enable_race(int gem_fd, const struct intel_execution_engine2 *e)
 	int fd;
 
 	igt_require(gem_has_execlists(gem_fd));
-	igt_require(gem_has_engine(gem_fd, e->class, e->instance));
+	igt_require(gem_context_has_engine(gem_fd, 0, e->flags));
 
 	obj.handle = gem_create(gem_fd, 4096);
 	gem_write(gem_fd, obj.handle, 0, &bbend, sizeof(bbend));
 
 	eb.buffer_count = 1;
 	eb.buffers_ptr = to_user_pointer(&obj);
-	eb.flags = e2ring(gem_fd, e);
+	eb.flags = e->flags;
 
 	/*
 	 * This test is probabilistic so run in a few times to increase the
@@ -1562,7 +1573,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
 		igt_spin_t *spin;
 
 		/* Allocate our spin batch and idle it. */
-		spin = igt_spin_new(gem_fd, .engine = e2ring(gem_fd, e));
+		spin = igt_spin_new(gem_fd, .engine = e->flags);
 		igt_spin_end(spin);
 		gem_sync(gem_fd, spin->handle);
 
@@ -1666,7 +1677,7 @@ igt_main
 				I915_PMU_LAST - __I915_PMU_OTHER(0) + 1;
 	unsigned int num_engines = 0;
 	int fd = -1;
-	const struct intel_execution_engine2 *e;
+	struct intel_execution_engine2 *e;
 	unsigned int i;
 
 	igt_fixture {
@@ -1675,7 +1686,7 @@ igt_main
 		igt_require_gem(fd);
 		igt_require(i915_type_id() > 0);
 
-		for_each_engine_class_instance(fd, e)
+		__for_each_physical_engine(fd, e)
 			num_engines++;
 	}
 
@@ -1685,7 +1696,7 @@ igt_main
 	igt_subtest("invalid-init")
 		invalid_init();
 
-	__for_each_engine_class_instance(e) {
+	__for_each_physical_engine(fd, e) {
 		const unsigned int pct[] = { 2, 50, 98 };
 
 		/**
@@ -1703,7 +1714,7 @@ igt_main
 
 		igt_subtest_group {
 			igt_fixture {
-				gem_require_engine(fd, e->class, e->instance);
+				gem_context_has_engine(fd, 0, e->flags);
 			}
 
 			/**
@@ -1889,12 +1900,11 @@ igt_main
 			gem_quiescent_gpu(fd);
 		}
 
-		__for_each_engine_class_instance(e) {
+		__for_each_physical_engine(render_fd, e) {
 			igt_subtest_group {
 				igt_fixture {
-					gem_require_engine(render_fd,
-							   e->class,
-							   e->instance);
+					gem_context_has_engine(render_fd,
+							   0, e->flags);
 				}
 
 				igt_subtest_f("render-node-busy-%s", e->name)
-- 
2.20.1

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

  parent reply	other threads:[~2019-05-13 17:56 UTC|newest]

Thread overview: 33+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-13 17:55 [igt-dev] [PATCH v24 00/14] new engine discovery interface Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 01/14] include/drm-uapi: import i915_drm.h header file Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 02/14] lib/i915: add gem_engine_topology library and for_each loop definition Andi Shyti
2019-05-22 11:16   ` Tvrtko Ursulin
2019-05-22 11:39     ` Andi Shyti
2019-05-22 11:41       ` Tvrtko Ursulin
2019-05-22 13:38     ` [igt-dev] [PATCH v25 " Andi Shyti
2019-05-22 13:45       ` Tvrtko Ursulin
2019-05-13 17:56 ` [igt-dev] [PATCH v24 03/14] lib: igt_gt: add execution buffer flags to class helper Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 04/14] lib: igt_gt: make gem_engine_can_store_dword() check engine class Andi Shyti
2019-05-14  9:07   ` Chris Wilson
2019-05-14  9:25     ` Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 05/14] lib: igt_dummyload: use for_each_context_engine() Andi Shyti
2019-05-13 17:56 ` Andi Shyti [this message]
2019-05-14  8:55   ` [igt-dev] [PATCH v24 06/14] test: perf_pmu: use the gem_engine_topology library Tvrtko Ursulin
2019-05-14  9:29     ` Andi Shyti
2019-05-15 12:08   ` [igt-dev] [PATCH v25 6/14] " Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 07/14] test/i915: gem_busy: " Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 08/14] test/i915: gem_cs_tlb: " Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 09/14] test/i915: gem_ctx_exec: " Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 10/14] test/i915: gem_exec_basic: " Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 11/14] test/i915: gem_exec_parallel: " Andi Shyti
2019-05-14  9:12   ` Chris Wilson
2019-05-13 17:56 ` [igt-dev] [PATCH v24 12/14] test/i915: gem_exec_store: " Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 13/14] test/i915: gem_wait: " Andi Shyti
2019-05-13 17:56 ` [igt-dev] [PATCH v24 14/14] test/i915: i915_hangman: " Andi Shyti
2019-05-14  9:13   ` Chris Wilson
2019-05-14  9:26     ` Andi Shyti
2019-05-13 18:26 ` [igt-dev] ✓ Fi.CI.BAT: success for new engine discovery interface Patchwork
2019-05-13 23:12 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork
2019-05-15 12:35 ` [igt-dev] ✗ Fi.CI.BAT: failure for new engine discovery interface (rev2) Patchwork
2019-05-22 14:36 ` [igt-dev] ✓ Fi.CI.BAT: success for new engine discovery interface (rev3) Patchwork
2019-05-23 10:37 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190513175613.2507-7-andi.shyti@intel.com \
    --to=andi.shyti@intel.com \
    --cc=andi@etezian.org \
    --cc=igt-dev@lists.freedesktop.org \
    --cc=tvrtko.ursulin@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.