* [igt-dev] [PATCH i-g-t 01/81] lib/i915/gem_submission_measure: Take an optional intel_ctx_cfg_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 02/81] tests/i915/gem_exec_fence: Move the engine data into inter_engine_context (v3) Jason Ekstrand
` (82 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
If provided, the engine (or ALL_ENGINES) is relative to the given
context config. This is intended to be transitional. We'll get rid of
all the __for_each_physical_engine stuff later.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
lib/i915/gem_submission.c | 39 ++++++++++++++++++++++++++++------
lib/i915/gem_submission.h | 5 ++++-
tests/i915/gem_busy.c | 2 +-
tests/i915/gem_exec_await.c | 2 +-
tests/i915/gem_exec_fence.c | 2 +-
tests/i915/gem_exec_latency.c | 2 +-
tests/i915/gem_exec_schedule.c | 6 +++---
tests/i915/perf_pmu.c | 2 +-
8 files changed, 44 insertions(+), 16 deletions(-)
diff --git a/lib/i915/gem_submission.c b/lib/i915/gem_submission.c
index bd4bbb3ef..7c305d6d6 100644
--- a/lib/i915/gem_submission.c
+++ b/lib/i915/gem_submission.c
@@ -306,7 +306,7 @@ static void alarm_handler(int sig)
}
static unsigned int
-__measure_ringsize(int i915, unsigned int engine)
+__measure_ringsize(int i915, uint32_t ctx_id, unsigned int engine)
{
struct sigaction old_sa, sa = { .sa_handler = alarm_handler };
struct drm_i915_gem_exec_object2 obj[2];
@@ -323,6 +323,7 @@ __measure_ringsize(int i915, unsigned int engine)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj[1]);
execbuf.buffer_count = 1;
+ execbuf.rsvd1 = ctx_id;
execbuf.flags = engine;
gem_execbuf(i915, &execbuf);
@@ -372,8 +373,10 @@ __measure_ringsize(int i915, unsigned int engine)
return count / 2 - 2;
}
-unsigned int gem_submission_measure(int i915, unsigned int engine)
+unsigned int gem_submission_measure(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
+ const intel_ctx_t *ctx = NULL;
unsigned int size;
bool nonblock;
@@ -381,19 +384,41 @@ unsigned int gem_submission_measure(int i915, unsigned int engine)
if (!nonblock)
fcntl(i915, F_SETFL, fcntl(i915, F_GETFL) | O_NONBLOCK);
+ if (cfg) {
+ if (gem_has_contexts(i915))
+ ctx = intel_ctx_create(i915, cfg);
+ else
+ ctx = intel_ctx_0(i915);
+ }
+
if (engine == ALL_ENGINES) {
struct intel_execution_engine2 *e;
size = -1;
- __for_each_physical_engine(i915, e) {
- unsigned int this = __measure_ringsize(i915, e->flags);
- if (this < size)
- size = this;
+ if (ctx) {
+ for_each_ctx_engine(i915, ctx, e) {
+ unsigned int this = __measure_ringsize(i915, ctx->id, e->flags);
+ if (this < size)
+ size = this;
+ }
+ } else {
+ __for_each_physical_engine(i915, e) {
+ unsigned int this = __measure_ringsize(i915, 0, e->flags);
+ if (this < size)
+ size = this;
+ }
}
} else {
- size = __measure_ringsize(i915, engine);
+ if (ctx)
+ size = __measure_ringsize(i915, ctx->id, engine);
+ else
+ size = __measure_ringsize(i915, 0, engine);
}
+ if (ctx)
+ intel_ctx_destroy(i915, ctx);
+
+
if (!nonblock)
fcntl(i915, F_SETFL, fcntl(i915, F_GETFL) & ~O_NONBLOCK);
diff --git a/lib/i915/gem_submission.h b/lib/i915/gem_submission.h
index 0faba6a3e..a5497a5e2 100644
--- a/lib/i915/gem_submission.h
+++ b/lib/i915/gem_submission.h
@@ -26,6 +26,8 @@
#include <stdint.h>
+#include "intel_ctx.h"
+
#define GEM_SUBMISSION_SEMAPHORES (1 << 0)
#define GEM_SUBMISSION_EXECLISTS (1 << 1)
#define GEM_SUBMISSION_GUC (1 << 2)
@@ -46,7 +48,8 @@ static inline bool gem_has_cmdparser(int i915, uint32_t engine)
bool gem_has_blitter(int i915);
void gem_require_blitter(int i915);
-unsigned int gem_submission_measure(int i915, unsigned int engine);
+unsigned int gem_submission_measure(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine);
void gem_test_engine(int fd, unsigned int engine);
bool gem_has_relocations(int fd);
diff --git a/tests/i915/gem_busy.c b/tests/i915/gem_busy.c
index dc481f3c5..7e2b220aa 100644
--- a/tests/i915/gem_busy.c
+++ b/tests/i915/gem_busy.c
@@ -232,7 +232,7 @@ static void xchg_u32(void *array, unsigned i, unsigned j)
static void close_race(int fd)
{
const unsigned int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
- const unsigned int nhandles = gem_submission_measure(fd, ALL_ENGINES);
+ const unsigned int nhandles = gem_submission_measure(fd, NULL, ALL_ENGINES);
unsigned int engines[I915_EXEC_RING_MASK + 1], nengine;
const struct intel_execution_engine2 *e;
unsigned long *control;
diff --git a/tests/i915/gem_exec_await.c b/tests/i915/gem_exec_await.c
index 6db30695f..ba8325ce3 100644
--- a/tests/i915/gem_exec_await.c
+++ b/tests/i915/gem_exec_await.c
@@ -237,7 +237,7 @@ igt_main
igt_require_gem(device);
gem_submission_print_method(device);
- ring_size = gem_submission_measure(device, ALL_ENGINES);
+ ring_size = gem_submission_measure(device, NULL, ALL_ENGINES);
igt_info("Ring size: %d batches\n", ring_size);
igt_require(ring_size > 0);
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index c3a650d89..6c4d3035f 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -3103,7 +3103,7 @@ igt_main
long ring_size = 0;
igt_fixture {
- ring_size = gem_submission_measure(i915, ALL_ENGINES);
+ ring_size = gem_submission_measure(i915, NULL, ALL_ENGINES);
igt_info("Ring size: %ld batches\n", ring_size);
igt_require(ring_size);
diff --git a/tests/i915/gem_exec_latency.c b/tests/i915/gem_exec_latency.c
index d31e82bc1..62bad6171 100644
--- a/tests/i915/gem_exec_latency.c
+++ b/tests/i915/gem_exec_latency.c
@@ -897,7 +897,7 @@ igt_main
gem_submission_print_method(device);
- ring_size = gem_submission_measure(device, ALL_ENGINES);
+ ring_size = gem_submission_measure(device, NULL, ALL_ENGINES);
igt_info("Ring size: %d batches\n", ring_size);
igt_require(ring_size > 8);
ring_size -= 8; /* leave some spare */
diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index fe3b8d29b..64a693ba5 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -1805,7 +1805,7 @@ static void deep(int fd, unsigned ring)
ctx[n] = gem_context_clone_with_engines(fd, 0);
}
- nreq = gem_submission_measure(fd, ring) / (3 * XS) * MAX_CONTEXTS;
+ nreq = gem_submission_measure(fd, NULL, ring) / (3 * XS) * MAX_CONTEXTS;
if (nreq > max_req)
nreq = max_req;
igt_info("Using %d requests (prio range %d)\n", nreq, max_req);
@@ -1950,7 +1950,7 @@ static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
static void wide(int fd, unsigned ring)
{
- const unsigned int ring_size = gem_submission_measure(fd, ring);
+ const unsigned int ring_size = gem_submission_measure(fd, NULL, ring);
struct timespec tv = {};
IGT_CORK_FENCE(cork);
uint32_t result;
@@ -1995,7 +1995,7 @@ static void wide(int fd, unsigned ring)
static void reorder_wide(int fd, unsigned ring)
{
- const unsigned int ring_size = gem_submission_measure(fd, ring);
+ const unsigned int ring_size = gem_submission_measure(fd, NULL, ring);
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
const int priorities[] = { MIN_PRIO, MAX_PRIO };
struct drm_i915_gem_relocation_entry reloc;
diff --git a/tests/i915/perf_pmu.c b/tests/i915/perf_pmu.c
index aa297bf19..f92f73919 100644
--- a/tests/i915/perf_pmu.c
+++ b/tests/i915/perf_pmu.c
@@ -1341,7 +1341,7 @@ static void cpu_hotplug(int gem_fd)
static int target_num_interrupts(int i915)
{
- return min(gem_submission_measure(i915, I915_EXEC_DEFAULT), 30);
+ return min(gem_submission_measure(i915, NULL, I915_EXEC_DEFAULT), 30);
}
static void
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 02/81] tests/i915/gem_exec_fence: Move the engine data into inter_engine_context (v3)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 01/81] lib/i915/gem_submission_measure: Take an optional intel_ctx_cfg_t Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 03/81] tests/i915/gem_exec_fence: Convert to intel_ctx_t (v2) Jason Ekstrand
` (81 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
This will make iteration easier when we switch to intel_ctx_t.
v2 (Zbigniew Kempczyński):
- Require nengines > 0 in setup_timeline_chain_engines()
v3 (Zbigniew Kempczyński):
- Should have been nengines > 1 in setup_timeline_chain_engines()
v3 (Jason Ekstrand):
- Delete more intel_init_engine_list() in this patch
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_fence.c | 84 ++++++++++++++++++-------------------
1 file changed, 41 insertions(+), 43 deletions(-)
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index 6c4d3035f..a86be17b1 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -2347,7 +2347,7 @@ struct inter_engine_context {
uint32_t context;
} iterations[9];
- struct intel_engine_data *engines;
+ struct intel_engine_data engines;
struct inter_engine_batches {
void *increment_bb;
@@ -2415,7 +2415,7 @@ static void submit_timeline_execbuf(struct inter_engine_context *context,
execbuf->cliprects_ptr = to_user_pointer(&fence_list);
}
- execbuf->flags |= context->engines->engines[run_engine_idx].flags;
+ execbuf->flags |= context->engines.engines[run_engine_idx].flags;
gem_execbuf(context->fd, execbuf);
}
@@ -2660,12 +2660,13 @@ get_cs_timestamp_frequency(int fd)
igt_skip("Kernel with PARAM_CS_TIMESTAMP_FREQUENCY support required\n");
}
-static void setup_timeline_chain_engines(struct inter_engine_context *context, int fd, struct intel_engine_data *engines)
+static void setup_timeline_chain_engines(struct inter_engine_context *context, int fd)
{
memset(context, 0, sizeof(*context));
context->fd = fd;
- context->engines = engines;
+ context->engines = intel_init_engine_list(fd, 0);
+ igt_require(context->engines.nengines > 1);
context->wait_context = gem_context_clone_with_engines(fd, 0);
context->wait_timeline = syncobj_create(fd, 0);
@@ -2685,15 +2686,16 @@ static void setup_timeline_chain_engines(struct inter_engine_context *context, i
gem_write(fd, context->wait_bb_handle, 0,
context->wait_bb, context->wait_bb_len);
- context->batches = calloc(engines->nengines, sizeof(*context->batches));
- for (uint32_t e = 0; e < engines->nengines; e++) {
+ context->batches = calloc(context->engines.nengines,
+ sizeof(*context->batches));
+ for (uint32_t e = 0; e < context->engines.nengines; e++) {
struct inter_engine_batches *batches = &context->batches[e];
batches->timeline = syncobj_create(fd, 0);
build_increment_engine_bb(
batches,
- gem_engine_mmio_base(fd, engines->engines[e].name));
+ gem_engine_mmio_base(fd, context->engines.engines[e].name));
batches->increment_bb_handle = gem_create(fd, 4096);
gem_write(fd, batches->increment_bb_handle, 0,
batches->increment_bb, batches->increment_bb_len);
@@ -2706,7 +2708,7 @@ static void setup_timeline_chain_engines(struct inter_engine_context *context, i
{
uint64_t dword = 1;
gem_write(fd, context->engine_counter_object.handle,
- sizeof(dword) * (context->engines->nengines - 1),
+ sizeof(dword) * (context->engines.nengines - 1),
&dword, sizeof(dword));
}
}
@@ -2724,7 +2726,7 @@ static void teardown_timeline_chain_engines(struct inter_engine_context *context
gem_close(context->fd, context->wait_bb_handle);
free(context->wait_bb);
- for (uint32_t e = 0; e < context->engines->nengines; e++) {
+ for (uint32_t e = 0; e < context->engines.nengines; e++) {
struct inter_engine_batches *batches = &context->batches[e];
syncobj_destroy(context->fd, batches->timeline);
@@ -2734,12 +2736,12 @@ static void teardown_timeline_chain_engines(struct inter_engine_context *context
free(context->batches);
}
-static void test_syncobj_timeline_chain_engines(int fd, struct intel_engine_data *engines)
+static void test_syncobj_timeline_chain_engines(int fd)
{
struct inter_engine_context ctx;
uint64_t *counter_output;
- setup_timeline_chain_engines(&ctx, fd, engines);
+ setup_timeline_chain_engines(&ctx, fd);
/*
* Delay all the other operations by making them depend on an
@@ -2748,11 +2750,11 @@ static void test_syncobj_timeline_chain_engines(int fd, struct intel_engine_data
wait_engine(&ctx, 0, ctx.wait_timeline, 1);
for (uint32_t iter = 0; iter < ARRAY_SIZE(ctx.iterations); iter++) {
- for (uint32_t engine = 0; engine < engines->nengines; engine++) {
+ for (uint32_t engine = 0; engine < ctx.engines.nengines; engine++) {
uint32_t prev_prev_engine =
- (engines->nengines + engine - 2) % engines->nengines;
+ (ctx.engines.nengines + engine - 2) % ctx.engines.nengines;
uint32_t prev_engine =
- (engines->nengines + engine - 1) % engines->nengines;
+ (ctx.engines.nengines + engine - 1) % ctx.engines.nengines;
/*
* Pick up the wait engine semaphore for the
* first increment, then pick up the previous
@@ -2778,28 +2780,28 @@ static void test_syncobj_timeline_chain_engines(int fd, struct intel_engine_data
counter_output = gem_mmap__wc(fd, ctx.engine_counter_object.handle, 0, 4096, PROT_READ);
- for (uint32_t i = 0; i < ctx.engines->nengines; i++)
+ for (uint32_t i = 0; i < ctx.engines.nengines; i++)
igt_debug("engine %i (%s)\t= %016"PRIx64"\n", i,
- ctx.engines->engines[i].name, counter_output[i]);
+ ctx.engines.engines[i].name, counter_output[i]);
/*
* Verify that we get the fibonacci number expected (we start
* at the sequence on the second number : 1).
*/
- igt_assert_eq(counter_output[engines->nengines - 1],
- fib(ARRAY_SIZE(ctx.iterations) * engines->nengines + 1));
+ igt_assert_eq(counter_output[ctx.engines.nengines - 1],
+ fib(ARRAY_SIZE(ctx.iterations) * ctx.engines.nengines + 1));
munmap(counter_output, 4096);
teardown_timeline_chain_engines(&ctx);
}
-static void test_syncobj_stationary_timeline_chain_engines(int fd, struct intel_engine_data *engines)
+static void test_syncobj_stationary_timeline_chain_engines(int fd)
{
struct inter_engine_context ctx;
uint64_t *counter_output;
- setup_timeline_chain_engines(&ctx, fd, engines);
+ setup_timeline_chain_engines(&ctx, fd);
/*
* Delay all the other operations by making them depend on an
@@ -2808,11 +2810,11 @@ static void test_syncobj_stationary_timeline_chain_engines(int fd, struct intel_
wait_engine(&ctx, 0, ctx.wait_timeline, 1);
for (uint32_t iter = 0; iter < ARRAY_SIZE(ctx.iterations); iter++) {
- for (uint32_t engine = 0; engine < engines->nengines; engine++) {
+ for (uint32_t engine = 0; engine < ctx.engines.nengines; engine++) {
uint32_t prev_prev_engine =
- (engines->nengines + engine - 2) % engines->nengines;
+ (ctx.engines.nengines + engine - 2) % ctx.engines.nengines;
uint32_t prev_engine =
- (engines->nengines + engine - 1) % engines->nengines;
+ (ctx.engines.nengines + engine - 1) % ctx.engines.nengines;
/*
* Pick up the wait engine semaphore for the
* first increment, then pick up the previous
@@ -2844,23 +2846,23 @@ static void test_syncobj_stationary_timeline_chain_engines(int fd, struct intel_
counter_output = gem_mmap__wc(fd, ctx.engine_counter_object.handle, 0, 4096, PROT_READ);
- for (uint32_t i = 0; i < ctx.engines->nengines; i++)
+ for (uint32_t i = 0; i < ctx.engines.nengines; i++)
igt_debug("engine %i (%s)\t= %016"PRIx64"\n", i,
- ctx.engines->engines[i].name, counter_output[i]);
- igt_assert_eq(counter_output[engines->nengines - 1],
- fib(ARRAY_SIZE(ctx.iterations) * engines->nengines + 1));
+ ctx.engines.engines[i].name, counter_output[i]);
+ igt_assert_eq(counter_output[ctx.engines.nengines - 1],
+ fib(ARRAY_SIZE(ctx.iterations) * ctx.engines.nengines + 1));
munmap(counter_output, 4096);
teardown_timeline_chain_engines(&ctx);
}
-static void test_syncobj_backward_timeline_chain_engines(int fd, struct intel_engine_data *engines)
+static void test_syncobj_backward_timeline_chain_engines(int fd)
{
struct inter_engine_context ctx;
uint64_t *counter_output;
- setup_timeline_chain_engines(&ctx, fd, engines);
+ setup_timeline_chain_engines(&ctx, fd);
/*
* Delay all the other operations by making them depend on an
@@ -2869,11 +2871,11 @@ static void test_syncobj_backward_timeline_chain_engines(int fd, struct intel_en
wait_engine(&ctx, 0, ctx.wait_timeline, 1);
for (uint32_t iter = 0; iter < ARRAY_SIZE(ctx.iterations); iter++) {
- for (uint32_t engine = 0; engine < engines->nengines; engine++) {
+ for (uint32_t engine = 0; engine < ctx.engines.nengines; engine++) {
uint32_t prev_prev_engine =
- (engines->nengines + engine - 2) % engines->nengines;
+ (ctx.engines.nengines + engine - 2) % ctx.engines.nengines;
uint32_t prev_engine =
- (engines->nengines + engine - 1) % engines->nengines;
+ (ctx.engines.nengines + engine - 1) % ctx.engines.nengines;
/*
* Pick up the wait engine semaphore for the
* first increment, then pick up the previous
@@ -2905,11 +2907,11 @@ static void test_syncobj_backward_timeline_chain_engines(int fd, struct intel_en
counter_output = gem_mmap__wc(fd, ctx.engine_counter_object.handle, 0, 4096, PROT_READ);
- for (uint32_t i = 0; i < ctx.engines->nengines; i++)
+ for (uint32_t i = 0; i < ctx.engines.nengines; i++)
igt_debug("engine %i (%s)\t= %016"PRIx64"\n", i,
- ctx.engines->engines[i].name, counter_output[i]);
- igt_assert_eq(counter_output[engines->nengines - 1],
- fib(ARRAY_SIZE(ctx.iterations) * engines->nengines + 1));
+ ctx.engines.engines[i].name, counter_output[i]);
+ igt_assert_eq(counter_output[ctx.engines.nengines - 1],
+ fib(ARRAY_SIZE(ctx.iterations) * ctx.engines.nengines + 1));
munmap(counter_output, 4096);
@@ -3203,8 +3205,6 @@ igt_main
test_syncobj_timeline_multiple_ext_nodes(i915);
igt_subtest_group { /* syncobj timeline engine chaining */
- struct intel_engine_data engines;
-
igt_fixture {
/*
* We need support for MI_ALU on all
@@ -3212,18 +3212,16 @@ igt_main
* only on Gen8+
*/
igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8);
- engines = intel_init_engine_list(i915, 0);
- igt_require(engines.nengines > 1);
}
igt_subtest("syncobj-timeline-chain-engines")
- test_syncobj_timeline_chain_engines(i915, &engines);
+ test_syncobj_timeline_chain_engines(i915);
igt_subtest("syncobj-stationary-timeline-chain-engines")
- test_syncobj_stationary_timeline_chain_engines(i915, &engines);
+ test_syncobj_stationary_timeline_chain_engines(i915);
igt_subtest("syncobj-backward-timeline-chain-engines")
- test_syncobj_backward_timeline_chain_engines(i915, &engines);
+ test_syncobj_backward_timeline_chain_engines(i915);
}
igt_fixture {
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 03/81] tests/i915/gem_exec_fence: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 01/81] lib/i915/gem_submission_measure: Take an optional intel_ctx_cfg_t Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 02/81] tests/i915/gem_exec_fence: Move the engine data into inter_engine_context (v3) Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 04/81] tests/i915/gem_exec_schedule: Convert to intel_ctx_t (v3) Jason Ekstrand
` (80 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
v2 (Jason Ekstrand):
- Pass the context config to gem_submission_measure()
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_fence.c | 244 +++++++++++++++++++++---------------
1 file changed, 141 insertions(+), 103 deletions(-)
diff --git a/tests/i915/gem_exec_fence.c b/tests/i915/gem_exec_fence.c
index a86be17b1..ef1bb0ca9 100644
--- a/tests/i915/gem_exec_fence.c
+++ b/tests/i915/gem_exec_fence.c
@@ -31,6 +31,7 @@
#include "igt_syncobj.h"
#include "igt_sysfs.h"
#include "igt_vgem.h"
+#include "intel_ctx.h"
#include "sw_sync.h"
IGT_TEST_DESCRIPTION("Check that execbuf waits for explicit fences");
@@ -56,7 +57,8 @@ struct sync_merge_data {
#define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12)
#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
-static void store(int fd, const struct intel_execution_engine2 *e,
+static void store(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
int fence, uint32_t target, unsigned offset_value)
{
const int SCRATCH = 0;
@@ -72,6 +74,7 @@ static void store(int fd, const struct intel_execution_engine2 *e,
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
execbuf.flags = e->flags | I915_EXEC_FENCE_IN;
+ execbuf.rsvd1 = ctx->id;
execbuf.rsvd2 = fence;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
@@ -119,7 +122,8 @@ static bool fence_busy(int fence)
#define NONBLOCK 0x2
#define WAIT 0x4
-static void test_fence_busy(int fd, const struct intel_execution_engine2 *e,
+static void test_fence_busy(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
unsigned flags)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
@@ -139,6 +143,7 @@ static void test_fence_busy(int fd, const struct intel_execution_engine2 *e,
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
execbuf.flags = e->flags | I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = ctx->id;
memset(&obj, 0, sizeof(obj));
obj.handle = gem_create(fd, 4096);
@@ -215,7 +220,7 @@ static void test_fence_busy(int fd, const struct intel_execution_engine2 *e,
gem_quiescent_gpu(fd);
}
-static void test_fence_busy_all(int fd, unsigned flags)
+static void test_fence_busy_all(int fd, const intel_ctx_t *ctx, unsigned flags)
{
const struct intel_execution_engine2 *e;
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
@@ -273,7 +278,7 @@ static void test_fence_busy_all(int fd, unsigned flags)
i++;
all = -1;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
int fence, new;
if ((flags & HANG) == 0 &&
@@ -281,6 +286,7 @@ static void test_fence_busy_all(int fd, unsigned flags)
continue;
execbuf.flags = e->flags | I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = ctx->id;
execbuf.rsvd2 = -1;
gem_execbuf_wr(fd, &execbuf);
fence = execbuf.rsvd2 >> 32;
@@ -337,7 +343,8 @@ static unsigned int spin_hang(unsigned int flags)
return IGT_SPIN_NO_PREEMPTION | IGT_SPIN_INVALID_CS;
}
-static void test_fence_await(int fd, const struct intel_execution_engine2 *e,
+static void test_fence_await(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
unsigned flags)
{
const struct intel_execution_engine2 *e2;
@@ -351,20 +358,21 @@ static void test_fence_await(int fd, const struct intel_execution_engine2 *e,
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
spin = igt_spin_new(fd,
+ .ctx = ctx,
.engine = e->flags,
.flags = IGT_SPIN_FENCE_OUT | spin_hang(flags));
igt_assert(spin->out_fence != -1);
i = 0;
- __for_each_physical_engine(fd, e2) {
+ for_each_ctx_engine(fd, ctx, e2) {
if (!gem_class_can_store_dword(fd, e->class))
continue;
if (flags & NONBLOCK) {
- store(fd, e2, spin->out_fence, scratch, i);
+ store(fd, ctx, e2, spin->out_fence, scratch, i);
} else {
igt_fork(child, 1)
- store(fd, e2, spin->out_fence, scratch, i);
+ store(fd, ctx, e2, spin->out_fence, scratch, i);
}
i++;
@@ -440,9 +448,10 @@ static uint32_t timeslicing_batches(int i915, uint32_t *offset)
return handle;
}
-static void test_submit_fence(int i915, unsigned int engine)
+static void test_submit_fence(int i915, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
- const struct intel_execution_engine2 *e;
+ const struct intel_execution_engine2 *e2;
/*
* Create a pair of interlocking batches, that ping pong
@@ -451,8 +460,9 @@ static void test_submit_fence(int i915, unsigned int engine)
* switch to the other batch in order to advance.
*/
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e2) {
unsigned int offset = 24 << 20;
+ const intel_ctx_t *tmp_ctx;
struct drm_i915_gem_exec_object2 obj = {
.offset = offset,
.flags = EXEC_OBJECT_PINNED,
@@ -468,17 +478,19 @@ static void test_submit_fence(int i915, unsigned int engine)
result = gem_mmap__device_coherent(i915, obj.handle,
0, 4096, PROT_READ);
- execbuf.flags = engine | I915_EXEC_FENCE_OUT;
+ execbuf.flags = e->flags | I915_EXEC_FENCE_OUT;
execbuf.batch_start_offset = 0;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf_wr(i915, &execbuf);
- execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ tmp_ctx = intel_ctx_create(i915, &ctx->cfg);
+ execbuf.rsvd1 = tmp_ctx->id;
execbuf.rsvd2 >>= 32;
- execbuf.flags = e->flags;
+ execbuf.flags = e2->flags;
execbuf.flags |= I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.batch_start_offset = offset;
gem_execbuf_wr(i915, &execbuf);
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, tmp_ctx);
gem_sync(i915, obj.handle);
gem_close(i915, obj.handle);
@@ -533,7 +545,9 @@ static uint32_t submitN_batches(int i915, uint32_t offset, int count)
return handle;
}
-static void test_submitN(int i915, unsigned int engine, int count)
+static void test_submitN(int i915, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
+ int count)
{
unsigned int offset = 24 << 20;
unsigned int sz = ALIGN((count + 1) * 1024, 4096);
@@ -545,7 +559,8 @@ static void test_submitN(int i915, unsigned int engine, int count)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .flags = engine | I915_EXEC_FENCE_OUT,
+ .flags = e->flags | I915_EXEC_FENCE_OUT,
+ .rsvd1 = ctx->id,
};
uint32_t *result =
gem_mmap__device_coherent(i915, obj.handle, 0, sz, PROT_READ);
@@ -556,10 +571,11 @@ static void test_submitN(int i915, unsigned int engine, int count)
igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8);
for (int i = 0; i < count; i++) {
- execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ const intel_ctx_t *tmp_ctx = intel_ctx_create(i915, &ctx->cfg);
+ execbuf.rsvd1 = tmp_ctx->id;
execbuf.batch_start_offset = (i + 1) * 1024;
gem_execbuf_wr(i915, &execbuf);
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, tmp_ctx);
execbuf.flags |= I915_EXEC_FENCE_SUBMIT;
execbuf.rsvd2 >>= 32;
@@ -595,7 +611,8 @@ static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
return err;
}
-static void test_parallel(int i915, const struct intel_execution_engine2 *e)
+static void test_parallel(int i915, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
const struct intel_execution_engine2 *e2;
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
@@ -609,6 +626,7 @@ static void test_parallel(int i915, const struct intel_execution_engine2 *e)
fence = igt_cork_plug(&cork, i915),
spin = igt_spin_new(i915,
+ .ctx = ctx,
.engine = e->flags,
.fence = fence,
.flags = (IGT_SPIN_FENCE_OUT |
@@ -616,7 +634,7 @@ static void test_parallel(int i915, const struct intel_execution_engine2 *e)
close(fence);
/* Queue all secondaries */
- __for_each_physical_engine(i915, e2) {
+ for_each_ctx_engine(i915, ctx, e2) {
struct drm_i915_gem_relocation_entry reloc = {
.target_handle = scratch,
.offset = sizeof(uint32_t),
@@ -633,6 +651,7 @@ static void test_parallel(int i915, const struct intel_execution_engine2 *e)
.buffers_ptr = to_user_pointer(obj),
.buffer_count = ARRAY_SIZE(obj),
.flags = e2->flags | I915_EXEC_FENCE_SUBMIT,
+ .rsvd1 = ctx->id,
.rsvd2 = spin->out_fence,
};
uint32_t batch[16];
@@ -702,7 +721,8 @@ static void test_parallel(int i915, const struct intel_execution_engine2 *e)
igt_spin_free(i915, spin);
}
-static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
+static void test_concurrent(int i915, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
struct drm_i915_gem_relocation_entry reloc = {
@@ -722,10 +742,12 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
.buffers_ptr = to_user_pointer(obj),
.buffer_count = ARRAY_SIZE(obj),
.flags = e->flags | I915_EXEC_FENCE_SUBMIT,
+ .rsvd1 = ctx->id,
};
IGT_CORK_FENCE(cork);
uint32_t batch[16];
igt_spin_t *spin;
+ const intel_ctx_t *tmp_ctx;
uint32_t result;
int fence;
int i;
@@ -738,6 +760,7 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
fence = igt_cork_plug(&cork, i915),
spin = igt_spin_new(i915,
+ .ctx = ctx,
.engine = e->flags,
.fence = fence,
.flags = (IGT_SPIN_FENCE_OUT |
@@ -761,13 +784,14 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
batch[++i] = MI_BATCH_BUFFER_END;
gem_write(i915, obj[1].handle, 0, batch, sizeof(batch));
- execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ tmp_ctx = intel_ctx_create(i915, &ctx->cfg);
+ execbuf.rsvd1 = tmp_ctx->id;
execbuf.rsvd2 = spin->out_fence;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
gem_execbuf(i915, &execbuf);
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, tmp_ctx);
gem_close(i915, obj[1].handle);
/*
@@ -796,7 +820,7 @@ static void test_concurrent(int i915, const struct intel_execution_engine2 *e)
igt_spin_free(i915, spin);
}
-static void test_submit_chain(int i915)
+static void test_submit_chain(int i915, const intel_ctx_t *ctx)
{
const struct intel_execution_engine2 *e;
igt_spin_t *spin, *sn;
@@ -807,8 +831,9 @@ static void test_submit_chain(int i915)
/* Check that we can simultaneously launch spinners on each engine */
fence = igt_cork_plug(&cork, i915);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
spin = igt_spin_new(i915,
+ .ctx = ctx,
.engine = e->flags,
.fence = fence,
.flags = (IGT_SPIN_POLL_RUN |
@@ -848,7 +873,8 @@ static uint32_t batch_create(int fd)
return handle;
}
-static void test_keep_in_fence(int fd, const struct intel_execution_engine2 *e)
+static void test_keep_in_fence(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
struct sigaction sa = { .sa_handler = alarm_handler };
struct drm_i915_gem_exec_object2 obj = {
@@ -858,13 +884,14 @@ static void test_keep_in_fence(int fd, const struct intel_execution_engine2 *e)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = e->flags | I915_EXEC_FENCE_OUT,
+ .rsvd1 = ctx->id,
};
unsigned long count, last;
struct itimerval itv;
igt_spin_t *spin;
int fence;
- spin = igt_spin_new(fd, .engine = e->flags);
+ spin = igt_spin_new(fd, .ctx = ctx, .engine = e->flags);
gem_execbuf_wr(fd, &execbuf);
fence = upper_32_bits(execbuf.rsvd2);
@@ -916,7 +943,8 @@ static void test_keep_in_fence(int fd, const struct intel_execution_engine2 *e)
}
#define EXPIRED 0x10000
-static void test_long_history(int fd, long ring_size, unsigned flags)
+static void test_long_history(int fd, const intel_ctx_t *ctx,
+ long ring_size, unsigned flags)
{
const uint32_t sz = 1 << 20;
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -933,7 +961,7 @@ static void test_long_history(int fd, long ring_size, unsigned flags)
limit = ring_size / 3;
nengine = 0;
- __for_each_physical_engine(fd, e)
+ for_each_ctx_engine(fd, ctx, e)
engines[nengine++] = e->flags;
igt_require(nengine);
@@ -947,6 +975,7 @@ static void test_long_history(int fd, long ring_size, unsigned flags)
execbuf.buffers_ptr = to_user_pointer(&obj[1]);
execbuf.buffer_count = 1;
execbuf.flags = I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf_wr(fd, &execbuf);
all_fences = execbuf.rsvd2 >> 32;
@@ -957,7 +986,8 @@ static void test_long_history(int fd, long ring_size, unsigned flags)
obj[0].handle = igt_cork_plug(&c, fd);
igt_until_timeout(5) {
- execbuf.rsvd1 = gem_context_clone_with_engines(fd, 0);
+ const intel_ctx_t *tmp_ctx = intel_ctx_create(fd, &ctx->cfg);
+ execbuf.rsvd1 = tmp_ctx->id;
for (n = 0; n < nengine; n++) {
struct sync_merge_data merge;
@@ -978,7 +1008,7 @@ static void test_long_history(int fd, long ring_size, unsigned flags)
all_fences = merge.fence;
}
- gem_context_destroy(fd, execbuf.rsvd1);
+ intel_ctx_destroy(fd, tmp_ctx);
if (!--limit)
break;
}
@@ -992,7 +1022,7 @@ static void test_long_history(int fd, long ring_size, unsigned flags)
execbuf.buffers_ptr = to_user_pointer(&obj[1]);
execbuf.buffer_count = 1;
execbuf.rsvd2 = all_fences;
- execbuf.rsvd1 = 0;
+ execbuf.rsvd1 = ctx->id;
for (s = 0; s < ring_size; s++) {
for (n = 0; n < nengine; n++) {
@@ -1258,7 +1288,7 @@ static void test_syncobj_signal(int fd)
syncobj_destroy(fd, fence.handle);
}
-static void test_syncobj_wait(int fd)
+static void test_syncobj_wait(int fd, const intel_ctx_t *ctx)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj;
@@ -1300,12 +1330,13 @@ static void test_syncobj_wait(int fd)
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
n = 0;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
obj.handle = gem_create(fd, 4096);
gem_write(fd, obj.handle, 0, &bbe, sizeof(bbe));
/* Now wait upon the blocked engine */
execbuf.flags = I915_EXEC_FENCE_ARRAY | e->flags;
+ execbuf.rsvd1 = ctx->id;
execbuf.cliprects_ptr = to_user_pointer(&fence);
execbuf.num_cliprects = 1;
fence.flags = I915_EXEC_FENCE_WAIT;
@@ -1998,7 +2029,7 @@ static void test_syncobj_timeline_signal(int fd)
static const char *test_syncobj_timeline_wait_desc =
"Verifies that waiting on a timeline syncobj point between engines"
" works";
-static void test_syncobj_timeline_wait(int fd)
+static void test_syncobj_timeline_wait(int fd, const intel_ctx_t *ctx)
{
const uint32_t bbe[2] = {
MI_BATCH_BUFFER_END,
@@ -2025,7 +2056,7 @@ static void test_syncobj_timeline_wait(int fd)
gem_quiescent_gpu(fd);
- spin = igt_spin_new(fd, .engine = ALL_ENGINES);
+ spin = igt_spin_new(fd, .ctx = ctx, .engine = ALL_ENGINES);
memset(&timeline_fences, 0, sizeof(timeline_fences));
timeline_fences.base.name = DRM_I915_GEM_EXECBUFFER_EXT_TIMELINE_FENCES;
@@ -2047,12 +2078,13 @@ static void test_syncobj_timeline_wait(int fd)
gem_close(fd, obj.handle);
n = 0;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
obj.handle = gem_create(fd, 4096);
gem_write(fd, obj.handle, 0, bbe, sizeof(bbe));
/* Now wait upon the blocked engine */
execbuf.flags = I915_EXEC_USE_EXTENSIONS | e->flags;
+ execbuf.rsvd1 = ctx->id,
execbuf.cliprects_ptr = to_user_pointer(&timeline_fences);
execbuf.num_cliprects = 0;
fence.flags = I915_EXEC_FENCE_WAIT;
@@ -2342,9 +2374,10 @@ static void test_syncobj_timeline_multiple_ext_nodes(int fd)
struct inter_engine_context {
int fd;
+ const intel_ctx_cfg_t *cfg;
struct {
- uint32_t context;
+ const intel_ctx_t *ctx;
} iterations[9];
struct intel_engine_data engines;
@@ -2368,7 +2401,7 @@ struct inter_engine_context {
void *jump_ptr;
void *timestamp2_ptr;
- uint32_t wait_context;
+ const intel_ctx_t *wait_ctx;
uint32_t wait_timeline;
struct drm_i915_gem_exec_object2 engine_counter_object;
@@ -2496,7 +2529,7 @@ static void wait_engine(struct inter_engine_context *context,
.buffers_ptr = to_user_pointer(&objects[0]),
.buffer_count = 2,
.flags = I915_EXEC_HANDLE_LUT,
- .rsvd1 = context->wait_context,
+ .rsvd1 = context->wait_ctx->id,
.batch_len = context->wait_bb_len,
};
@@ -2563,7 +2596,7 @@ static void build_increment_engine_bb(struct inter_engine_batches *batch,
}
static void increment_engine(struct inter_engine_context *context,
- uint32_t gem_context,
+ const intel_ctx_t *ctx,
uint32_t read0_engine_idx,
uint32_t read1_engine_idx,
uint32_t write_engine_idx,
@@ -2586,7 +2619,7 @@ static void increment_engine(struct inter_engine_context *context,
.buffers_ptr = to_user_pointer(&objects[0]),
.buffer_count = ARRAY_SIZE(objects),
.flags = I915_EXEC_HANDLE_LUT,
- .rsvd1 = gem_context,
+ .rsvd1 = ctx->id,
.batch_len = batch->increment_bb_len,
};
@@ -2660,24 +2693,26 @@ get_cs_timestamp_frequency(int fd)
igt_skip("Kernel with PARAM_CS_TIMESTAMP_FREQUENCY support required\n");
}
-static void setup_timeline_chain_engines(struct inter_engine_context *context, int fd)
+static void setup_timeline_chain_engines(struct inter_engine_context *context, int fd,
+ const intel_ctx_cfg_t *cfg)
{
memset(context, 0, sizeof(*context));
context->fd = fd;
- context->engines = intel_init_engine_list(fd, 0);
+ context->cfg = cfg;
+ context->engines = intel_engine_list_for_ctx_cfg(fd, cfg);
igt_require(context->engines.nengines > 1);
- context->wait_context = gem_context_clone_with_engines(fd, 0);
+ context->wait_ctx = intel_ctx_create(fd, cfg);
context->wait_timeline = syncobj_create(fd, 0);
context->engine_counter_object.handle = gem_create(fd, 4096);
for (uint32_t i = 0; i < ARRAY_SIZE(context->iterations); i++) {
- context->iterations[i].context = gem_context_clone_with_engines(fd, 0);
+ context->iterations[i].ctx = intel_ctx_create(fd, context->cfg);
/* Give a different priority to all contexts. */
- gem_context_set_priority(fd, context->iterations[i].context,
+ gem_context_set_priority(fd, context->iterations[i].ctx->id,
I915_CONTEXT_MAX_USER_PRIORITY - ARRAY_SIZE(context->iterations) + i);
}
@@ -2718,10 +2753,10 @@ static void teardown_timeline_chain_engines(struct inter_engine_context *context
gem_close(context->fd, context->engine_counter_object.handle);
for (uint32_t i = 0; i < ARRAY_SIZE(context->iterations); i++) {
- gem_context_destroy(context->fd, context->iterations[i].context);
+ intel_ctx_destroy(context->fd, context->iterations[i].ctx);
}
- gem_context_destroy(context->fd, context->wait_context);
+ intel_ctx_destroy(context->fd, context->wait_ctx);
syncobj_destroy(context->fd, context->wait_timeline);
gem_close(context->fd, context->wait_bb_handle);
free(context->wait_bb);
@@ -2736,12 +2771,12 @@ static void teardown_timeline_chain_engines(struct inter_engine_context *context
free(context->batches);
}
-static void test_syncobj_timeline_chain_engines(int fd)
+static void test_syncobj_timeline_chain_engines(int fd, const intel_ctx_cfg_t *cfg)
{
struct inter_engine_context ctx;
uint64_t *counter_output;
- setup_timeline_chain_engines(&ctx, fd);
+ setup_timeline_chain_engines(&ctx, fd, cfg);
/*
* Delay all the other operations by making them depend on an
@@ -2767,7 +2802,7 @@ static void test_syncobj_timeline_chain_engines(int fd)
iter == 0 && engine == 0 ?
1 : (engine == 0 ? iter : (iter + 1));
- increment_engine(&ctx, ctx.iterations[iter].context,
+ increment_engine(&ctx, ctx.iterations[iter].ctx,
prev_prev_engine /* read0 engine */,
prev_engine /* read1 engine */,
engine /* write engine */,
@@ -2796,12 +2831,12 @@ static void test_syncobj_timeline_chain_engines(int fd)
teardown_timeline_chain_engines(&ctx);
}
-static void test_syncobj_stationary_timeline_chain_engines(int fd)
+static void test_syncobj_stationary_timeline_chain_engines(int fd, const intel_ctx_cfg_t *cfg)
{
struct inter_engine_context ctx;
uint64_t *counter_output;
- setup_timeline_chain_engines(&ctx, fd);
+ setup_timeline_chain_engines(&ctx, fd, cfg);
/*
* Delay all the other operations by making them depend on an
@@ -2833,7 +2868,7 @@ static void test_syncobj_stationary_timeline_chain_engines(int fd)
iter == 0 && engine == 0 ?
1 : 10;
- increment_engine(&ctx, ctx.iterations[iter].context,
+ increment_engine(&ctx, ctx.iterations[iter].ctx,
prev_prev_engine /* read0 engine */,
prev_engine /* read1 engine */,
engine /* write engine */,
@@ -2857,12 +2892,12 @@ static void test_syncobj_stationary_timeline_chain_engines(int fd)
teardown_timeline_chain_engines(&ctx);
}
-static void test_syncobj_backward_timeline_chain_engines(int fd)
+static void test_syncobj_backward_timeline_chain_engines(int fd, const intel_ctx_cfg_t *cfg)
{
struct inter_engine_context ctx;
uint64_t *counter_output;
- setup_timeline_chain_engines(&ctx, fd);
+ setup_timeline_chain_engines(&ctx, fd, cfg);
/*
* Delay all the other operations by making them depend on an
@@ -2894,7 +2929,7 @@ static void test_syncobj_backward_timeline_chain_engines(int fd)
iter == 0 && engine == 0 ?
1 : 1;
- increment_engine(&ctx, ctx.iterations[iter].context,
+ increment_engine(&ctx, ctx.iterations[iter].ctx,
prev_prev_engine /* read0 engine */,
prev_engine /* read1 engine */,
engine /* write engine */,
@@ -2921,6 +2956,7 @@ static void test_syncobj_backward_timeline_chain_engines(int fd)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int i915 = -1;
igt_fixture {
@@ -2928,6 +2964,7 @@ igt_main
igt_require_gem(i915);
igt_require(gem_has_exec_fence(i915));
gem_require_mmap_wc(i915);
+ ctx = intel_ctx_create_all_physical(i915);
gem_submission_print_method(i915);
}
@@ -2940,19 +2977,19 @@ igt_main
}
igt_subtest("basic-busy-all")
- test_fence_busy_all(i915, 0);
+ test_fence_busy_all(i915, ctx, 0);
igt_subtest("basic-wait-all")
- test_fence_busy_all(i915, WAIT);
+ test_fence_busy_all(i915, ctx, WAIT);
igt_fixture {
igt_stop_hang_detector();
- hang = igt_allow_hang(i915, 0, 0);
+ hang = igt_allow_hang(i915, ctx->id, 0);
}
igt_subtest("busy-hang-all")
- test_fence_busy_all(i915, HANG);
+ test_fence_busy_all(i915, ctx, HANG);
igt_subtest("wait-hang-all")
- test_fence_busy_all(i915, WAIT | HANG);
+ test_fence_busy_all(i915, ctx, WAIT | HANG);
igt_fixture {
igt_disallow_hang(i915, hang);
@@ -2960,7 +2997,7 @@ igt_main
}
igt_subtest_group {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_fixture {
igt_require(gem_class_can_store_dword(i915, e->class));
}
@@ -2971,42 +3008,42 @@ igt_main
}
igt_subtest_with_dynamic("basic-busy") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_fence_busy(i915, e, 0);
+ test_fence_busy(i915, ctx, e, 0);
}
}
igt_subtest_with_dynamic("basic-wait") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_fence_busy(i915, e, WAIT);
+ test_fence_busy(i915, ctx, e, WAIT);
}
}
igt_subtest_with_dynamic("basic-await") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_fence_await(i915, e, 0);
+ test_fence_await(i915, ctx, e, 0);
}
}
igt_subtest_with_dynamic("nb-await") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_fence_await(i915,
- e, NONBLOCK);
+ test_fence_await(i915, ctx, e,
+ NONBLOCK);
}
}
igt_subtest_with_dynamic("keep-in-fence") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_keep_in_fence(i915, e);
+ test_keep_in_fence(i915, ctx, e);
}
}
igt_subtest_with_dynamic("parallel") {
igt_require(has_submit_fence(i915));
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name) {
igt_until_timeout(2)
- test_parallel(i915, e);
+ test_parallel(i915, ctx, e);
}
}
}
@@ -3015,9 +3052,9 @@ igt_main
igt_require(has_submit_fence(i915));
igt_require(gem_scheduler_has_semaphores(i915));
igt_require(gem_scheduler_has_preemption(i915));
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_concurrent(i915, e);
+ test_concurrent(i915, ctx, e);
}
}
@@ -3026,9 +3063,9 @@ igt_main
igt_require(gem_scheduler_has_preemption(i915));
igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_submit_fence(i915, e->flags);
+ test_submit_fence(i915, ctx, e);
}
}
@@ -3037,9 +3074,9 @@ igt_main
igt_require(gem_scheduler_has_preemption(i915));
igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_submitN(i915, e->flags, 3);
+ test_submitN(i915, ctx, e, 3);
}
}
@@ -3048,15 +3085,15 @@ igt_main
igt_require(gem_scheduler_has_preemption(i915));
igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_submitN(i915, e->flags, 67);
+ test_submitN(i915, ctx, e, 67);
}
}
igt_subtest("submit-chain") {
igt_require(has_submit_fence(i915));
- test_submit_chain(i915);
+ test_submit_chain(i915, ctx);
}
igt_fixture {
@@ -3072,27 +3109,27 @@ igt_main
}
igt_subtest_with_dynamic("busy-hang") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_fence_busy(i915, e, HANG);
+ test_fence_busy(i915, ctx, e, HANG);
}
}
igt_subtest_with_dynamic("wait-hang") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_fence_busy(i915, e, HANG | WAIT);
+ test_fence_busy(i915, ctx, e, HANG | WAIT);
}
}
igt_subtest_with_dynamic("await-hang") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_fence_await(i915, e, HANG);
+ test_fence_await(i915, ctx, e, HANG);
}
}
igt_subtest_with_dynamic("nb-await-hang") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_fence_await(i915, e, NONBLOCK | HANG);
+ test_fence_await(i915, ctx, e, NONBLOCK | HANG);
}
}
igt_fixture {
@@ -3105,7 +3142,8 @@ igt_main
long ring_size = 0;
igt_fixture {
- ring_size = gem_submission_measure(i915, NULL, ALL_ENGINES);
+ ring_size = gem_submission_measure(i915, &ctx->cfg,
+ ALL_ENGINES);
igt_info("Ring size: %ld batches\n", ring_size);
igt_require(ring_size);
@@ -3113,10 +3151,10 @@ igt_main
}
igt_subtest("long-history")
- test_long_history(i915, ring_size, 0);
+ test_long_history(i915, ctx, ring_size, 0);
igt_subtest("expired-history")
- test_long_history(i915, ring_size, EXPIRED);
+ test_long_history(i915, ctx, ring_size, EXPIRED);
}
igt_subtest_group { /* syncobj */
@@ -3142,7 +3180,7 @@ igt_main
test_syncobj_signal(i915);
igt_subtest("syncobj-wait")
- test_syncobj_wait(i915);
+ test_syncobj_wait(i915, ctx);
igt_subtest("syncobj-export")
test_syncobj_export(i915);
@@ -3190,7 +3228,7 @@ igt_main
igt_describe(test_syncobj_timeline_wait_desc);
igt_subtest("syncobj-timeline-wait")
- test_syncobj_timeline_wait(i915);
+ test_syncobj_timeline_wait(i915, ctx);
igt_describe(test_syncobj_timeline_export_desc);
igt_subtest("syncobj-timeline-export")
@@ -3215,13 +3253,13 @@ igt_main
}
igt_subtest("syncobj-timeline-chain-engines")
- test_syncobj_timeline_chain_engines(i915);
+ test_syncobj_timeline_chain_engines(i915, &ctx->cfg);
igt_subtest("syncobj-stationary-timeline-chain-engines")
- test_syncobj_stationary_timeline_chain_engines(i915);
+ test_syncobj_stationary_timeline_chain_engines(i915, &ctx->cfg);
igt_subtest("syncobj-backward-timeline-chain-engines")
- test_syncobj_backward_timeline_chain_engines(i915);
+ test_syncobj_backward_timeline_chain_engines(i915, &ctx->cfg);
}
igt_fixture {
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 04/81] tests/i915/gem_exec_schedule: Convert to intel_ctx_t (v3)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (2 preceding siblings ...)
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 03/81] tests/i915/gem_exec_fence: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 05/81] tests/i915/perf_pmu: " Jason Ekstrand
` (79 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
v2 (Jason Ekstrand):
- Pass the context config to gem_submission_measure()
v3 (Jason Ekstrand):
- Use a shared VM in the noreorder test
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_schedule.c | 902 +++++++++++++++++----------------
1 file changed, 478 insertions(+), 424 deletions(-)
diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index 64a693ba5..3a51b51da 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -36,11 +36,13 @@
#include "i915/gem.h"
#include "i915/gem_create.h"
+#include "i915/gem_vm.h"
#include "igt.h"
#include "igt_rand.h"
#include "igt_rapl.h"
#include "igt_sysfs.h"
#include "igt_vgem.h"
+#include "intel_ctx.h"
#include "sw_sync.h"
#define LO 0
@@ -52,7 +54,6 @@
#define MAX_CONTEXTS 1024
#define MAX_ELSP_QLEN 16
-#define MAX_ENGINES (I915_EXEC_RING_MASK + 1)
#define MI_SEMAPHORE_WAIT (0x1c << 23)
#define MI_SEMAPHORE_POLL (1 << 15)
@@ -90,7 +91,7 @@ void __sync_read_u32_count(int fd, uint32_t handle, uint32_t *dst, uint64_t size
gem_read(fd, handle, 0, dst, size);
}
-static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
+static uint32_t __store_dword(int fd, const intel_ctx_t *ctx, unsigned ring,
uint32_t target, uint32_t offset, uint32_t value,
uint32_t cork, int fence, unsigned write_domain)
{
@@ -107,7 +108,7 @@ static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
execbuf.flags = ring;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
- execbuf.rsvd1 = ctx;
+ execbuf.rsvd1 = ctx->id;
if (fence != -1) {
execbuf.flags |= I915_EXEC_FENCE_IN;
@@ -154,7 +155,7 @@ static uint32_t __store_dword(int fd, uint32_t ctx, unsigned ring,
return obj[2].handle;
}
-static void store_dword(int fd, uint32_t ctx, unsigned ring,
+static void store_dword(int fd, const intel_ctx_t *ctx, unsigned ring,
uint32_t target, uint32_t offset, uint32_t value,
unsigned write_domain)
{
@@ -163,7 +164,7 @@ static void store_dword(int fd, uint32_t ctx, unsigned ring,
0, -1, write_domain));
}
-static void store_dword_plug(int fd, uint32_t ctx, unsigned ring,
+static void store_dword_plug(int fd, const intel_ctx_t *ctx, unsigned ring,
uint32_t target, uint32_t offset, uint32_t value,
uint32_t cork, unsigned write_domain)
{
@@ -172,7 +173,7 @@ static void store_dword_plug(int fd, uint32_t ctx, unsigned ring,
cork, -1, write_domain));
}
-static void store_dword_fenced(int fd, uint32_t ctx, unsigned ring,
+static void store_dword_fenced(int fd, const intel_ctx_t *ctx, unsigned ring,
uint32_t target, uint32_t offset, uint32_t value,
int fence, unsigned write_domain)
{
@@ -181,21 +182,24 @@ static void store_dword_fenced(int fd, uint32_t ctx, unsigned ring,
0, fence, write_domain));
}
-static uint32_t create_highest_priority(int fd)
+static const intel_ctx_t *
+create_highest_priority(int fd, const intel_ctx_cfg_t *cfg)
{
- uint32_t ctx = gem_context_clone_with_engines(fd, 0);
+ const intel_ctx_t *ctx = intel_ctx_create(fd, cfg);
/*
* If there is no priority support, all contexts will have equal
* priority (and therefore the max user priority), so no context
* can overtake us, and we effectively can form a plug.
*/
- __gem_context_set_priority(fd, ctx, MAX_PRIO);
+ __gem_context_set_priority(fd, ctx->id, MAX_PRIO);
return ctx;
}
-static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine)
+static void unplug_show_queue(int fd, struct igt_cork *c,
+ const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
igt_spin_t *spin[MAX_ELSP_QLEN];
int max = MAX_ELSP_QLEN;
@@ -205,12 +209,9 @@ static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine)
max = 1;
for (int n = 0; n < max; n++) {
- const struct igt_spin_factory opts = {
- .ctx_id = create_highest_priority(fd),
- .engine = engine,
- };
- spin[n] = __igt_spin_factory(fd, &opts);
- gem_context_destroy(fd, opts.ctx_id);
+ const intel_ctx_t *ctx = create_highest_priority(fd, cfg);
+ spin[n] = __igt_spin_new(fd, .ctx = ctx, .engine = engine);
+ intel_ctx_destroy(fd, ctx);
}
igt_cork_unplug(c); /* batches will now be queued on the engine */
@@ -221,7 +222,7 @@ static void unplug_show_queue(int fd, struct igt_cork *c, unsigned int engine)
}
-static void fifo(int fd, unsigned ring)
+static void fifo(int fd, const intel_ctx_t *ctx, unsigned ring)
{
IGT_CORK_FENCE(cork);
uint32_t scratch;
@@ -233,10 +234,10 @@ static void fifo(int fd, unsigned ring)
fence = igt_cork_plug(&cork, fd);
/* Same priority, same timeline, final result will be the second eb */
- store_dword_fenced(fd, 0, ring, scratch, 0, 1, fence, 0);
- store_dword_fenced(fd, 0, ring, scratch, 0, 2, fence, 0);
+ store_dword_fenced(fd, ctx, ring, scratch, 0, 1, fence, 0);
+ store_dword_fenced(fd, ctx, ring, scratch, 0, 2, fence, 0);
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, &ctx->cfg, ring);
close(fence);
result = __sync_read_u32(fd, scratch, 0);
@@ -250,7 +251,8 @@ enum implicit_dir {
WRITE_READ = 0x2,
};
-static void implicit_rw(int i915, unsigned ring, enum implicit_dir dir)
+static void implicit_rw(int i915, const intel_ctx_t *ctx, unsigned int ring,
+ enum implicit_dir dir)
{
const struct intel_execution_engine2 *e;
IGT_CORK_FENCE(cork);
@@ -260,7 +262,7 @@ static void implicit_rw(int i915, unsigned ring, enum implicit_dir dir)
int fence;
count = 0;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (e->flags == ring)
continue;
@@ -275,28 +277,28 @@ static void implicit_rw(int i915, unsigned ring, enum implicit_dir dir)
fence = igt_cork_plug(&cork, i915);
if (dir & WRITE_READ)
- store_dword_fenced(i915, 0,
+ store_dword_fenced(i915, ctx,
ring, scratch, 0, ~ring,
fence, I915_GEM_DOMAIN_RENDER);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (e->flags == ring)
continue;
if (!gem_class_can_store_dword(i915, e->class))
continue;
- store_dword_fenced(i915, 0,
+ store_dword_fenced(i915, ctx,
e->flags, scratch, 0, e->flags,
fence, 0);
}
if (dir & READ_WRITE)
- store_dword_fenced(i915, 0,
+ store_dword_fenced(i915, ctx,
ring, scratch, 0, ring,
fence, I915_GEM_DOMAIN_RENDER);
- unplug_show_queue(i915, &cork, ring);
+ unplug_show_queue(i915, &cork, &ctx->cfg, ring);
close(fence);
result = __sync_read_u32(i915, scratch, 0);
@@ -308,7 +310,8 @@ static void implicit_rw(int i915, unsigned ring, enum implicit_dir dir)
igt_assert_eq_u32(result, ring);
}
-static void independent(int fd, unsigned int engine, unsigned long flags)
+static void independent(int fd, const intel_ctx_t *ctx, unsigned int engine,
+ unsigned long flags)
{
const struct intel_execution_engine2 *e;
IGT_CORK_FENCE(cork);
@@ -324,7 +327,7 @@ static void independent(int fd, unsigned int engine, unsigned long flags)
fence = igt_cork_plug(&cork, fd);
/* Check that we can submit to engine while all others are blocked */
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (e->flags == engine)
continue;
@@ -333,6 +336,7 @@ static void independent(int fd, unsigned int engine, unsigned long flags)
if (spin == NULL) {
spin = __igt_spin_new(fd,
+ .ctx = ctx,
.engine = e->flags,
.flags = flags);
} else {
@@ -344,14 +348,14 @@ static void independent(int fd, unsigned int engine, unsigned long flags)
gem_execbuf(fd, &eb);
}
- store_dword_fenced(fd, 0, e->flags, scratch, 0, e->flags, fence, 0);
+ store_dword_fenced(fd, ctx, e->flags, scratch, 0, e->flags, fence, 0);
}
igt_require(spin);
/* Same priority, but different timeline (as different engine) */
- batch = __store_dword(fd, 0, engine, scratch, 0, engine, 0, fence, 0);
+ batch = __store_dword(fd, ctx, engine, scratch, 0, engine, 0, fence, 0);
- unplug_show_queue(fd, &cork, engine);
+ unplug_show_queue(fd, &cork, &ctx->cfg, engine);
close(fence);
gem_sync(fd, batch);
@@ -374,11 +378,12 @@ static void independent(int fd, unsigned int engine, unsigned long flags)
gem_close(fd, scratch);
}
-static void smoketest(int fd, unsigned ring, unsigned timeout)
+static void smoketest(int fd, const intel_ctx_cfg_t *cfg,
+ unsigned ring, unsigned timeout)
{
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
const struct intel_execution_engine2 *e;
- unsigned engines[MAX_ENGINES];
+ unsigned engines[GEM_MAX_ENGINES];
unsigned nengine;
unsigned engine;
uint32_t scratch;
@@ -386,7 +391,7 @@ static void smoketest(int fd, unsigned ring, unsigned timeout)
nengine = 0;
if (ring == ALL_ENGINES) {
- __for_each_physical_engine(fd, e)
+ for_each_ctx_cfg_engine(fd, cfg, e)
if (gem_class_can_store_dword(fd, e->class))
engines[nengine++] = e->flags;
} else {
@@ -397,16 +402,16 @@ static void smoketest(int fd, unsigned ring, unsigned timeout)
scratch = gem_create(fd, 4096);
igt_fork(child, ncpus) {
unsigned long count = 0;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
hars_petruska_f54_1_random_perturb(child);
- ctx = gem_context_clone_with_engines(fd, 0);
+ ctx = intel_ctx_create(fd, cfg);
igt_until_timeout(timeout) {
int prio;
prio = hars_petruska_f54_1_random_unsafe_max(MAX_PRIO - MIN_PRIO) + MIN_PRIO;
- gem_context_set_priority(fd, ctx, prio);
+ gem_context_set_priority(fd, ctx->id, prio);
engine = engines[hars_petruska_f54_1_random_unsafe_max(nengine)];
store_dword(fd, ctx, engine, scratch,
@@ -417,7 +422,7 @@ static void smoketest(int fd, unsigned ring, unsigned timeout)
8*child + 4, count++,
0);
}
- gem_context_destroy(fd, ctx);
+ intel_ctx_destroy(fd, ctx);
}
igt_waitchildren();
@@ -484,7 +489,8 @@ static uint32_t timeslicing_batches(int i915, uint32_t *offset)
return handle;
}
-static void timeslice(int i915, unsigned int engine)
+static void timeslice(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
unsigned int offset = 24 << 20;
struct drm_i915_gem_exec_object2 obj = {
@@ -495,6 +501,7 @@ static void timeslice(int i915, unsigned int engine)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
};
+ const intel_ctx_t *ctx;
uint32_t *result;
int out;
@@ -517,12 +524,13 @@ static void timeslice(int i915, unsigned int engine)
/* No coupling between requests; free to timeslice */
- execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ ctx = intel_ctx_create(i915, cfg);
+ execbuf.rsvd1 = ctx->id;
execbuf.rsvd2 >>= 32;
execbuf.flags = engine | I915_EXEC_FENCE_OUT;
execbuf.batch_start_offset = offset;
gem_execbuf_wr(i915, &execbuf);
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, ctx);
gem_sync(i915, obj.handle);
gem_close(i915, obj.handle);
@@ -576,7 +584,8 @@ static uint32_t timesliceN_batches(int i915, uint32_t offset, int count)
return handle;
}
-static void timesliceN(int i915, unsigned int engine, int count)
+static void timesliceN(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine, int count)
{
const unsigned int sz = ALIGN((count + 1) * 1024, 4096);
unsigned int offset = 24 << 20;
@@ -592,6 +601,7 @@ static void timesliceN(int i915, unsigned int engine, int count)
};
uint32_t *result =
gem_mmap__device_coherent(i915, obj.handle, 0, sz, PROT_READ);
+ const intel_ctx_t *ctx;
int fence[count];
/*
@@ -607,10 +617,11 @@ static void timesliceN(int i915, unsigned int engine, int count)
/* No coupling between requests; free to timeslice */
for (int i = 0; i < count; i++) {
- execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ ctx = intel_ctx_create(i915, cfg);
+ execbuf.rsvd1 = ctx->id;
execbuf.batch_start_offset = (i + 1) * 1024;;
gem_execbuf_wr(i915, &execbuf);
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, ctx);
fence[i] = execbuf.rsvd2 >> 32;
}
@@ -628,30 +639,31 @@ static void timesliceN(int i915, unsigned int engine, int count)
munmap(result, sz);
}
-static void lateslice(int i915, unsigned int engine, unsigned long flags)
+static void lateslice(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine, unsigned long flags)
{
+ const intel_ctx_t *ctx;
igt_spin_t *spin[3];
- uint32_t ctx;
igt_require(gem_scheduler_has_timeslicing(i915));
igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8);
- ctx = gem_context_create(i915);
- spin[0] = igt_spin_new(i915, .ctx_id = ctx, .engine = engine,
+ ctx = intel_ctx_create(i915, cfg);
+ spin[0] = igt_spin_new(i915, .ctx = ctx, .engine = engine,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT |
flags));
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_spin_busywait_until_started(spin[0]);
- ctx = gem_context_create(i915);
- spin[1] = igt_spin_new(i915, .ctx_id = ctx, .engine = engine,
+ ctx = intel_ctx_create(i915, cfg);
+ spin[1] = igt_spin_new(i915, .ctx = ctx, .engine = engine,
.fence = spin[0]->out_fence,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_IN |
flags));
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
usleep(5000); /* give some time for the new spinner to be scheduled */
@@ -662,10 +674,10 @@ static void lateslice(int i915, unsigned int engine, unsigned long flags)
* third spinner we then expect timeslicing to be real enabled.
*/
- ctx = gem_context_create(i915);
- spin[2] = igt_spin_new(i915, .ctx_id = ctx, .engine = engine,
+ ctx = intel_ctx_create(i915, cfg);
+ spin[2] = igt_spin_new(i915, .ctx = ctx, .engine = engine,
.flags = IGT_SPIN_POLL_RUN | flags);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_spin_busywait_until_started(spin[2]);
@@ -687,7 +699,7 @@ static void lateslice(int i915, unsigned int engine, unsigned long flags)
}
static void cancel_spinner(int i915,
- uint32_t ctx, unsigned int engine,
+ const intel_ctx_t *ctx, unsigned int engine,
igt_spin_t *spin)
{
struct drm_i915_gem_exec_object2 obj = {
@@ -697,7 +709,7 @@ static void cancel_spinner(int i915,
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = engine | I915_EXEC_FENCE_SUBMIT,
- .rsvd1 = ctx, /* same vm */
+ .rsvd1 = ctx->id, /* same vm */
.rsvd2 = spin->out_fence,
};
uint32_t *map, *cs;
@@ -718,21 +730,18 @@ static void cancel_spinner(int i915,
gem_close(i915, obj.handle);
}
-static void submit_slice(int i915,
+static void submit_slice(int i915, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
unsigned int flags)
#define EARLY_SUBMIT 0x1
#define LATE_SUBMIT 0x2
#define USERPTR 0x4
{
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , 1) = {};
const struct intel_execution_engine2 *cancel;
- struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_create(i915),
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&engines),
- .size = sizeof(engines),
+ intel_ctx_cfg_t engine_cfg = {
+ .num_engines = 1,
};
+ const intel_ctx_t *ctx;
/*
* When using a submit fence, we do not want to block concurrent work,
@@ -742,7 +751,7 @@ static void submit_slice(int i915,
igt_require(gem_scheduler_has_timeslicing(i915));
igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8);
- __for_each_physical_engine(i915, cancel) {
+ for_each_ctx_cfg_engine(i915, cfg, cancel) {
igt_spin_t *bg, *spin;
int timeline = -1;
int fence = -1;
@@ -759,10 +768,10 @@ static void submit_slice(int i915,
fence = sw_sync_timeline_create_fence(timeline, 1);
}
- engines.engines[0].engine_class = e->class;
- engines.engines[0].engine_instance = e->instance;
- gem_context_set_param(i915, ¶m);
- spin = igt_spin_new(i915, .ctx_id = param.ctx_id,
+ engine_cfg.engines[0].engine_class = e->class;
+ engine_cfg.engines[0].engine_instance = e->instance;
+ ctx = intel_ctx_create(i915, &engine_cfg);
+ spin = igt_spin_new(i915, .ctx = ctx,
.fence = fence,
.flags =
IGT_SPIN_POLL_RUN |
@@ -775,10 +784,13 @@ static void submit_slice(int i915,
if (flags & EARLY_SUBMIT)
igt_spin_busywait_until_started(spin);
- engines.engines[0].engine_class = cancel->class;
- engines.engines[0].engine_instance = cancel->instance;
- gem_context_set_param(i915, ¶m);
- cancel_spinner(i915, param.ctx_id, 0, spin);
+ intel_ctx_destroy(i915, ctx);
+
+ engine_cfg.engines[0].engine_class = cancel->class;
+ engine_cfg.engines[0].engine_instance = cancel->instance;
+ ctx = intel_ctx_create(i915, &engine_cfg);
+
+ cancel_spinner(i915, ctx, 0, spin);
if (timeline != -1)
close(timeline);
@@ -786,9 +798,9 @@ static void submit_slice(int i915,
gem_sync(i915, spin->handle);
igt_spin_free(i915, spin);
igt_spin_free(i915, bg);
- }
- gem_context_destroy(i915, param.ctx_id);
+ intel_ctx_destroy(i915, ctx);
+ }
}
static uint32_t __batch_create(int i915, uint32_t offset)
@@ -807,7 +819,8 @@ static uint32_t batch_create(int i915)
return __batch_create(i915, 0);
}
-static void semaphore_userlock(int i915, unsigned long flags)
+static void semaphore_userlock(int i915, const intel_ctx_t *ctx,
+ unsigned long flags)
{
const struct intel_execution_engine2 *e;
struct drm_i915_gem_exec_object2 obj = {
@@ -815,6 +828,7 @@ static void semaphore_userlock(int i915, unsigned long flags)
};
igt_spin_t *spin = NULL;
uint32_t scratch;
+ const intel_ctx_t *tmp_ctx;
igt_require(gem_scheduler_has_timeslicing(i915));
@@ -826,9 +840,10 @@ static void semaphore_userlock(int i915, unsigned long flags)
*/
scratch = gem_create(i915, 4096);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (!spin) {
spin = igt_spin_new(i915,
+ .ctx = ctx,
.dependency = scratch,
.engine = e->flags,
.flags = flags);
@@ -851,13 +866,13 @@ static void semaphore_userlock(int i915, unsigned long flags)
* on a HW semaphore) but it should not prevent any real work from
* taking precedence.
*/
- scratch = gem_context_clone_with_engines(i915, 0);
- __for_each_physical_engine(i915, e) {
+ tmp_ctx = intel_ctx_create(i915, &ctx->cfg);
+ for_each_ctx_engine(i915, ctx, e) {
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = e->flags,
- .rsvd1 = scratch,
+ .rsvd1 = tmp_ctx->id,
};
if (e->flags == (spin->execbuf.flags & I915_EXEC_RING_MASK))
@@ -865,14 +880,15 @@ static void semaphore_userlock(int i915, unsigned long flags)
gem_execbuf(i915, &execbuf);
}
- gem_context_destroy(i915, scratch);
+ intel_ctx_destroy(i915, tmp_ctx);
gem_sync(i915, obj.handle); /* to hang unless we can preempt */
gem_close(i915, obj.handle);
igt_spin_free(i915, spin);
}
-static void semaphore_codependency(int i915, unsigned long flags)
+static void semaphore_codependency(int i915, const intel_ctx_t *ctx,
+ unsigned long flags)
{
const struct intel_execution_engine2 *e;
struct {
@@ -891,8 +907,8 @@ static void semaphore_codependency(int i915, unsigned long flags)
*/
i = 0;
- __for_each_physical_engine(i915, e) {
- uint32_t ctx;
+ for_each_ctx_engine(i915, ctx, e) {
+ const intel_ctx_t *tmp_ctx;
if (!e->flags) {
igt_require(gem_class_can_store_dword(i915, e->class));
@@ -902,11 +918,11 @@ static void semaphore_codependency(int i915, unsigned long flags)
if (!gem_class_can_store_dword(i915, e->class))
continue;
- ctx = gem_context_clone_with_engines(i915, 0);
+ tmp_ctx = intel_ctx_create(i915, &ctx->cfg);
task[i].xcs =
__igt_spin_new(i915,
- .ctx_id = ctx,
+ .ctx = tmp_ctx,
.engine = e->flags,
.flags = IGT_SPIN_POLL_RUN | flags);
igt_spin_busywait_until_started(task[i].xcs);
@@ -914,11 +930,11 @@ static void semaphore_codependency(int i915, unsigned long flags)
/* Common rcs tasks will be queued in FIFO */
task[i].rcs =
__igt_spin_new(i915,
- .ctx_id = ctx,
+ .ctx = tmp_ctx,
.engine = 0,
.dependency = task[i].xcs->handle);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, tmp_ctx);
if (++i == ARRAY_SIZE(task))
break;
@@ -941,11 +957,13 @@ static void semaphore_codependency(int i915, unsigned long flags)
}
}
-static void semaphore_resolve(int i915, unsigned long flags)
+static void semaphore_resolve(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned long flags)
{
const struct intel_execution_engine2 *e;
const uint32_t SEMAPHORE_ADDR = 64 << 10;
- uint32_t semaphore, outer, inner, *sema;
+ uint32_t semaphore, *sema;
+ const intel_ctx_t *outer, *inner;
/*
* Userspace may submit batches that wait upon unresolved
@@ -959,13 +977,13 @@ static void semaphore_resolve(int i915, unsigned long flags)
igt_require(gem_scheduler_has_preemption(i915));
igt_require(intel_get_drm_devid(i915) >= 8); /* for MI_SEMAPHORE_WAIT */
- outer = gem_context_clone_with_engines(i915, 0);
- inner = gem_context_clone_with_engines(i915, 0);
+ outer = intel_ctx_create(i915, cfg);
+ inner = intel_ctx_create(i915, cfg);
semaphore = gem_create(i915, 4096);
sema = gem_mmap__wc(i915, semaphore, 0, 4096, PROT_WRITE);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, cfg, e) {
struct drm_i915_gem_exec_object2 obj[3];
struct drm_i915_gem_execbuffer2 eb;
uint32_t handle, cancel;
@@ -1020,7 +1038,7 @@ static void semaphore_resolve(int i915, unsigned long flags)
obj[2].handle = handle;
eb.buffer_count = 3;
eb.buffers_ptr = to_user_pointer(obj);
- eb.rsvd1 = outer;
+ eb.rsvd1 = outer->id;
gem_execbuf(i915, &eb);
/* Then add the GPU hang intermediatory */
@@ -1051,7 +1069,7 @@ static void semaphore_resolve(int i915, unsigned long flags)
obj[0].flags = EXEC_OBJECT_PINNED;
obj[1].handle = cancel;
eb.buffer_count = 2;
- eb.rsvd1 = inner;
+ eb.rsvd1 = inner->id;
gem_execbuf(i915, &eb);
gem_wait(i915, cancel, &poke); /* match sync's WAIT_PRIORITY */
gem_close(i915, cancel);
@@ -1066,22 +1084,23 @@ static void semaphore_resolve(int i915, unsigned long flags)
munmap(sema, 4096);
gem_close(i915, semaphore);
- gem_context_destroy(i915, inner);
- gem_context_destroy(i915, outer);
+ intel_ctx_destroy(i915, inner);
+ intel_ctx_destroy(i915, outer);
}
-static void semaphore_noskip(int i915, unsigned long flags)
+static void semaphore_noskip(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned long flags)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
const struct intel_execution_engine2 *outer, *inner;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
igt_require(gen >= 6); /* MI_STORE_DWORD_IMM convenience */
- ctx = gem_context_clone_with_engines(i915, 0);
+ ctx = intel_ctx_create(i915, cfg);
- __for_each_physical_engine(i915, outer) {
- __for_each_physical_engine(i915, inner) {
+ for_each_ctx_engine(i915, ctx, outer) {
+ for_each_ctx_engine(i915, ctx, inner) {
struct drm_i915_gem_exec_object2 obj[3];
struct drm_i915_gem_execbuffer2 eb;
uint32_t handle, *cs, *map;
@@ -1091,9 +1110,11 @@ static void semaphore_noskip(int i915, unsigned long flags)
!gem_class_can_store_dword(i915, inner->class))
continue;
- chain = __igt_spin_new(i915, .engine = outer->flags, .flags = flags);
+ chain = __igt_spin_new(i915, .ctx = ctx,
+ .engine = outer->flags, .flags = flags);
- spin = __igt_spin_new(i915, .engine = inner->flags, .flags = flags);
+ spin = __igt_spin_new(i915, .ctx = ctx,
+ .engine = inner->flags, .flags = flags);
igt_spin_end(spin); /* we just want its address for later */
gem_sync(i915, spin->handle);
igt_spin_reset(spin);
@@ -1126,7 +1147,7 @@ static void semaphore_noskip(int i915, unsigned long flags)
memset(&eb, 0, sizeof(eb));
eb.buffer_count = 3;
eb.buffers_ptr = to_user_pointer(obj);
- eb.rsvd1 = ctx;
+ eb.rsvd1 = ctx->id;
eb.flags = inner->flags;
gem_execbuf(i915, &eb);
@@ -1150,11 +1171,12 @@ static void semaphore_noskip(int i915, unsigned long flags)
}
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
static void
-noreorder(int i915, unsigned int engine, int prio, unsigned int flags)
+noreorder(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine, int prio, unsigned int flags)
#define CORKED 0x1
{
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
@@ -1166,24 +1188,25 @@ noreorder(int i915, unsigned int engine, int prio, unsigned int flags)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = engine,
- .rsvd1 = gem_context_clone_with_engines(i915, 0),
};
+ intel_ctx_cfg_t vm_cfg = *cfg;
+ const intel_ctx_t *ctx;
IGT_CORK_FENCE(cork);
uint32_t *map, *cs;
igt_spin_t *slice;
igt_spin_t *spin;
int fence = -1;
uint64_t addr;
- uint32_t ctx;
if (flags & CORKED)
fence = igt_cork_plug(&cork, i915);
- ctx = gem_context_clone(i915, execbuf.rsvd1,
- I915_CONTEXT_CLONE_ENGINES |
- I915_CONTEXT_CLONE_VM,
- 0);
- spin = igt_spin_new(i915, ctx,
+ if (gem_uses_full_ppgtt(i915))
+ vm_cfg.vm = gem_vm_create(i915);
+
+ ctx = intel_ctx_create(i915, &vm_cfg);
+
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.fence = fence,
.flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_FENCE_IN);
@@ -1192,7 +1215,7 @@ noreorder(int i915, unsigned int engine, int prio, unsigned int flags)
/* Loop around the engines, creating a chain of fences */
spin->execbuf.rsvd2 = (uint64_t)dup(spin->out_fence) << 32;
spin->execbuf.rsvd2 |= 0xffffffff;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (e->flags == engine)
continue;
@@ -1205,7 +1228,7 @@ noreorder(int i915, unsigned int engine, int prio, unsigned int flags)
}
close(spin->execbuf.rsvd2);
spin->execbuf.rsvd2 >>= 32;
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
/*
* Wait upon the fence chain, and try to terminate the spinner.
@@ -1238,11 +1261,13 @@ noreorder(int i915, unsigned int engine, int prio, unsigned int flags)
execbuf.rsvd2 = spin->execbuf.rsvd2;
execbuf.flags |= I915_EXEC_FENCE_IN;
- gem_context_set_priority(i915, execbuf.rsvd1, prio);
+ ctx = intel_ctx_create(i915, &vm_cfg);
+ gem_context_set_priority(i915, ctx->id, prio);
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(i915, &execbuf);
gem_close(i915, obj.handle);
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, ctx);
if (cork.fd != -1)
igt_cork_unplug(&cork);
@@ -1255,7 +1280,9 @@ noreorder(int i915, unsigned int engine, int prio, unsigned int flags)
*
* Without timeslices, fallback to waiting a second.
*/
+ ctx = intel_ctx_create(i915, &vm_cfg);
slice = igt_spin_new(i915,
+ .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_POLL_RUN);
igt_until_timeout(1) {
@@ -1263,6 +1290,10 @@ noreorder(int i915, unsigned int engine, int prio, unsigned int flags)
break;
}
igt_spin_free(i915, slice);
+ intel_ctx_destroy(i915, ctx);
+
+ if (vm_cfg.vm)
+ gem_vm_destroy(i915, vm_cfg.vm);
/* Check the store did not run before the spinner */
igt_assert_eq(sync_fence_status(spin->out_fence), 0);
@@ -1270,20 +1301,21 @@ noreorder(int i915, unsigned int engine, int prio, unsigned int flags)
gem_quiescent_gpu(i915);
}
-static void reorder(int fd, unsigned ring, unsigned flags)
+static void reorder(int fd, const intel_ctx_cfg_t *cfg,
+ unsigned ring, unsigned flags)
#define EQUAL 1
{
IGT_CORK_FENCE(cork);
uint32_t scratch;
uint32_t result;
- uint32_t ctx[2];
+ const intel_ctx_t *ctx[2];
int fence;
- ctx[LO] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
+ ctx[LO] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
- ctx[HI] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[HI], flags & EQUAL ? MIN_PRIO : 0);
+ ctx[HI] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[HI]->id, flags & EQUAL ? MIN_PRIO : 0);
scratch = gem_create(fd, 4096);
fence = igt_cork_plug(&cork, fd);
@@ -1291,40 +1323,40 @@ static void reorder(int fd, unsigned ring, unsigned flags)
/* We expect the high priority context to be executed first, and
* so the final result will be value from the low priority context.
*/
- store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO], fence, 0);
- store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI], fence, 0);
+ store_dword_fenced(fd, ctx[LO], ring, scratch, 0, ctx[LO]->id, fence, 0);
+ store_dword_fenced(fd, ctx[HI], ring, scratch, 0, ctx[HI]->id, fence, 0);
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, cfg, ring);
close(fence);
- gem_context_destroy(fd, ctx[LO]);
- gem_context_destroy(fd, ctx[HI]);
-
result = __sync_read_u32(fd, scratch, 0);
gem_close(fd, scratch);
if (flags & EQUAL) /* equal priority, result will be fifo */
- igt_assert_eq_u32(result, ctx[HI]);
+ igt_assert_eq_u32(result, ctx[HI]->id);
else
- igt_assert_eq_u32(result, ctx[LO]);
+ igt_assert_eq_u32(result, ctx[LO]->id);
+
+ intel_ctx_destroy(fd, ctx[LO]);
+ intel_ctx_destroy(fd, ctx[HI]);
}
-static void promotion(int fd, unsigned ring)
+static void promotion(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
{
IGT_CORK_FENCE(cork);
uint32_t result, dep;
uint32_t result_read, dep_read;
- uint32_t ctx[3];
+ const intel_ctx_t *ctx[3];
int fence;
- ctx[LO] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
+ ctx[LO] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
- ctx[HI] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[HI], 0);
+ ctx[HI] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[HI]->id, 0);
- ctx[NOISE] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[NOISE], MIN_PRIO/2);
+ ctx[NOISE] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[NOISE]->id, MIN_PRIO/2);
result = gem_create(fd, 4096);
dep = gem_create(fd, 4096);
@@ -1336,30 +1368,30 @@ static void promotion(int fd, unsigned ring)
* fifo would be NOISE, LO, HI.
* strict priority would be HI, NOISE, LO
*/
- store_dword_fenced(fd, ctx[NOISE], ring, result, 0, ctx[NOISE], fence, 0);
- store_dword_fenced(fd, ctx[LO], ring, result, 0, ctx[LO], fence, 0);
+ store_dword_fenced(fd, ctx[NOISE], ring, result, 0, ctx[NOISE]->id, fence, 0);
+ store_dword_fenced(fd, ctx[LO], ring, result, 0, ctx[LO]->id, fence, 0);
/* link LO <-> HI via a dependency on another buffer */
- store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO], I915_GEM_DOMAIN_INSTRUCTION);
- store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI], 0);
+ store_dword(fd, ctx[LO], ring, dep, 0, ctx[LO]->id, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, ctx[HI], ring, dep, 0, ctx[HI]->id, 0);
- store_dword(fd, ctx[HI], ring, result, 0, ctx[HI], 0);
+ store_dword(fd, ctx[HI], ring, result, 0, ctx[HI]->id, 0);
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, cfg, ring);
close(fence);
- gem_context_destroy(fd, ctx[NOISE]);
- gem_context_destroy(fd, ctx[LO]);
- gem_context_destroy(fd, ctx[HI]);
-
dep_read = __sync_read_u32(fd, dep, 0);
gem_close(fd, dep);
result_read = __sync_read_u32(fd, result, 0);
gem_close(fd, result);
- igt_assert_eq_u32(dep_read, ctx[HI]);
- igt_assert_eq_u32(result_read, ctx[NOISE]);
+ igt_assert_eq_u32(dep_read, ctx[HI]->id);
+ igt_assert_eq_u32(result_read, ctx[NOISE]->id);
+
+ intel_ctx_destroy(fd, ctx[NOISE]);
+ intel_ctx_destroy(fd, ctx[LO]);
+ intel_ctx_destroy(fd, ctx[HI]);
}
static bool set_preempt_timeout(int i915,
@@ -1373,34 +1405,35 @@ static bool set_preempt_timeout(int i915,
#define NEW_CTX (0x1 << 0)
#define HANG_LP (0x1 << 1)
-static void preempt(int fd, const struct intel_execution_engine2 *e, unsigned flags)
+static void preempt(int fd, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *e, unsigned flags)
{
uint32_t result = gem_create(fd, 4096);
uint32_t result_read;
igt_spin_t *spin[MAX_ELSP_QLEN];
- uint32_t ctx[2];
+ const intel_ctx_t *ctx[2];
igt_hang_t hang;
/* Set a fast timeout to speed the test up (if available) */
set_preempt_timeout(fd, e, 150);
- ctx[LO] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
+ ctx[LO] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
- ctx[HI] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
+ ctx[HI] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
if (flags & HANG_LP)
- hang = igt_hang_ctx(fd, ctx[LO], e->flags, 0);
+ hang = igt_hang_ctx(fd, ctx[LO]->id, e->flags, 0);
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
if (flags & NEW_CTX) {
- gem_context_destroy(fd, ctx[LO]);
- ctx[LO] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
+ intel_ctx_destroy(fd, ctx[LO]);
+ ctx[LO] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
}
spin[n] = __igt_spin_new(fd,
- .ctx_id = ctx[LO],
+ .ctx = ctx[LO],
.engine = e->flags,
.flags = flags & USERPTR ? IGT_SPIN_USERPTR : 0);
igt_debug("spin[%d].handle=%d\n", n, spin[n]->handle);
@@ -1418,8 +1451,8 @@ static void preempt(int fd, const struct intel_execution_engine2 *e, unsigned fl
if (flags & HANG_LP)
igt_post_hang_ring(fd, hang);
- gem_context_destroy(fd, ctx[LO]);
- gem_context_destroy(fd, ctx[HI]);
+ intel_ctx_destroy(fd, ctx[LO]);
+ intel_ctx_destroy(fd, ctx[HI]);
gem_close(fd, result);
}
@@ -1427,22 +1460,23 @@ static void preempt(int fd, const struct intel_execution_engine2 *e, unsigned fl
#define CHAIN 0x1
#define CONTEXTS 0x2
-static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
+static igt_spin_t *__noise(int fd, const intel_ctx_t *ctx,
+ int prio, igt_spin_t *spin)
{
const struct intel_execution_engine2 *e;
- gem_context_set_priority(fd, ctx, prio);
+ gem_context_set_priority(fd, ctx->id, prio);
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (spin == NULL) {
spin = __igt_spin_new(fd,
- .ctx_id = ctx,
+ .ctx = ctx,
.engine = e->flags);
} else {
struct drm_i915_gem_execbuffer2 eb = {
.buffer_count = 1,
.buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
.flags = e->flags,
};
gem_execbuf(fd, &eb);
@@ -1453,7 +1487,7 @@ static igt_spin_t *__noise(int fd, uint32_t ctx, int prio, igt_spin_t *spin)
}
static void __preempt_other(int fd,
- uint32_t *ctx,
+ const intel_ctx_t **ctx,
unsigned int target, unsigned int primary,
unsigned flags)
{
@@ -1469,7 +1503,7 @@ static void __preempt_other(int fd,
n++;
if (flags & CHAIN) {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx[LO], e) {
store_dword(fd, ctx[LO], e->flags,
result, (n + 1)*sizeof(uint32_t), n + 1,
I915_GEM_DOMAIN_RENDER);
@@ -1493,11 +1527,12 @@ static void __preempt_other(int fd,
gem_close(fd, result);
}
-static void preempt_other(int fd, unsigned ring, unsigned int flags)
+static void preempt_other(int fd, const intel_ctx_cfg_t *cfg,
+ unsigned ring, unsigned int flags)
{
const struct intel_execution_engine2 *e;
igt_spin_t *spin = NULL;
- uint32_t ctx[3];
+ const intel_ctx_t *ctx[3];
/* On each engine, insert
* [NOISE] spinner,
@@ -1509,16 +1544,16 @@ static void preempt_other(int fd, unsigned ring, unsigned int flags)
* can cross engines.
*/
- ctx[LO] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
+ ctx[LO] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
- ctx[NOISE] = gem_context_clone_with_engines(fd, 0);
+ ctx[NOISE] = intel_ctx_create(fd, cfg);
spin = __noise(fd, ctx[NOISE], 0, NULL);
- ctx[HI] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
+ ctx[HI] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_cfg_engine(fd, cfg, e) {
igt_debug("Primary engine: %s\n", e->name);
__preempt_other(fd, ctx, ring, e->flags, flags);
@@ -1527,12 +1562,12 @@ static void preempt_other(int fd, unsigned ring, unsigned int flags)
igt_assert(gem_bo_busy(fd, spin->handle));
igt_spin_free(fd, spin);
- gem_context_destroy(fd, ctx[LO]);
- gem_context_destroy(fd, ctx[NOISE]);
- gem_context_destroy(fd, ctx[HI]);
+ intel_ctx_destroy(fd, ctx[LO]);
+ intel_ctx_destroy(fd, ctx[NOISE]);
+ intel_ctx_destroy(fd, ctx[HI]);
}
-static void __preempt_queue(int fd,
+static void __preempt_queue(int fd, const intel_ctx_cfg_t *cfg,
unsigned target, unsigned primary,
unsigned depth, unsigned flags)
{
@@ -1540,33 +1575,33 @@ static void __preempt_queue(int fd,
uint32_t result = gem_create(fd, 4096);
uint32_t result_read[4096 / sizeof(uint32_t)];
igt_spin_t *above = NULL, *below = NULL;
- uint32_t ctx[3] = {
- gem_context_clone_with_engines(fd, 0),
- gem_context_clone_with_engines(fd, 0),
- gem_context_clone_with_engines(fd, 0),
+ const intel_ctx_t *ctx[3] = {
+ intel_ctx_create(fd, cfg),
+ intel_ctx_create(fd, cfg),
+ intel_ctx_create(fd, cfg),
};
int prio = MAX_PRIO;
unsigned int n, i;
for (n = 0; n < depth; n++) {
if (flags & CONTEXTS) {
- gem_context_destroy(fd, ctx[NOISE]);
- ctx[NOISE] = gem_context_clone_with_engines(fd, 0);
+ intel_ctx_destroy(fd, ctx[NOISE]);
+ ctx[NOISE] = intel_ctx_create(fd, cfg);
}
above = __noise(fd, ctx[NOISE], prio--, above);
}
- gem_context_set_priority(fd, ctx[HI], prio--);
+ gem_context_set_priority(fd, ctx[HI]->id, prio--);
for (; n < MAX_ELSP_QLEN; n++) {
if (flags & CONTEXTS) {
- gem_context_destroy(fd, ctx[NOISE]);
- ctx[NOISE] = gem_context_clone_with_engines(fd, 0);
+ intel_ctx_destroy(fd, ctx[NOISE]);
+ ctx[NOISE] = intel_ctx_create(fd, cfg);
}
below = __noise(fd, ctx[NOISE], prio--, below);
}
- gem_context_set_priority(fd, ctx[LO], prio--);
+ gem_context_set_priority(fd, ctx[LO]->id, prio--);
n = 0;
store_dword(fd, ctx[LO], primary,
@@ -1575,7 +1610,7 @@ static void __preempt_queue(int fd,
n++;
if (flags & CHAIN) {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx[LO], e) {
store_dword(fd, ctx[LO], e->flags,
result, (n + 1)*sizeof(uint32_t), n + 1,
I915_GEM_DOMAIN_RENDER);
@@ -1607,25 +1642,26 @@ static void __preempt_queue(int fd,
igt_spin_free(fd, below);
}
- gem_context_destroy(fd, ctx[LO]);
- gem_context_destroy(fd, ctx[NOISE]);
- gem_context_destroy(fd, ctx[HI]);
+ intel_ctx_destroy(fd, ctx[LO]);
+ intel_ctx_destroy(fd, ctx[NOISE]);
+ intel_ctx_destroy(fd, ctx[HI]);
gem_close(fd, result);
}
-static void preempt_queue(int fd, unsigned ring, unsigned int flags)
+static void preempt_queue(int fd, const intel_ctx_cfg_t *cfg,
+ unsigned ring, unsigned int flags)
{
const struct intel_execution_engine2 *e;
for (unsigned depth = 1; depth <= MAX_ELSP_QLEN; depth *= 4)
- __preempt_queue(fd, ring, ring, depth, flags);
+ __preempt_queue(fd, cfg, ring, ring, depth, flags);
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_cfg_engine(fd, cfg, e) {
if (ring == e->flags)
continue;
- __preempt_queue(fd, ring, e->flags, MAX_ELSP_QLEN, flags);
+ __preempt_queue(fd, cfg, ring, e->flags, MAX_ELSP_QLEN, flags);
}
}
@@ -1642,19 +1678,16 @@ static void preempt_engines(int i915,
const struct intel_execution_engine2 *e,
unsigned int flags)
{
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , I915_EXEC_RING_MASK + 1);
- struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_create(i915),
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&engines),
- .size = sizeof(engines),
- };
struct pnode {
struct igt_list_head spinners;
struct igt_list_head link;
- } pnode[I915_EXEC_RING_MASK + 1], *p;
+ } pnode[GEM_MAX_ENGINES], *p;
+ struct intel_ctx_cfg cfg = {
+ .num_engines = GEM_MAX_ENGINES,
+ };
IGT_LIST_HEAD(plist);
igt_spin_t *spin, *sn;
+ const intel_ctx_t *ctx;
/*
* A quick test that each engine within a context is an independent
@@ -1663,19 +1696,19 @@ static void preempt_engines(int i915,
igt_require(has_context_engines(i915));
- for (int n = 0; n <= I915_EXEC_RING_MASK; n++) {
- engines.engines[n].engine_class = e->class;
- engines.engines[n].engine_instance = e->instance;
+ for (int n = 0; n < GEM_MAX_ENGINES; n++) {
+ cfg.engines[n].engine_class = e->class;
+ cfg.engines[n].engine_instance = e->instance;
IGT_INIT_LIST_HEAD(&pnode[n].spinners);
igt_list_add(&pnode[n].link, &plist);
}
- gem_context_set_param(i915, ¶m);
+ ctx = intel_ctx_create(i915, &cfg);
- for (int n = -I915_EXEC_RING_MASK; n <= I915_EXEC_RING_MASK; n++) {
+ for (int n = -(GEM_MAX_ENGINES - 1); n < GEM_MAX_ENGINES; n++) {
unsigned int engine = n & I915_EXEC_RING_MASK;
- gem_context_set_priority(i915, param.ctx_id, n);
- spin = igt_spin_new(i915, param.ctx_id, .engine = engine);
+ gem_context_set_priority(i915, ctx->id, n);
+ spin = igt_spin_new(i915, .ctx = ctx, .engine = engine);
igt_list_move_tail(&spin->link, &pnode[engine].spinners);
igt_list_move(&pnode[engine].link, &plist);
@@ -1688,17 +1721,18 @@ static void preempt_engines(int i915,
igt_spin_free(i915, spin);
}
}
- gem_context_destroy(i915, param.ctx_id);
+ intel_ctx_destroy(i915, ctx);
}
-static void preempt_self(int fd, unsigned ring)
+static void preempt_self(int fd, const intel_ctx_cfg_t *cfg,
+ unsigned ring)
{
const struct intel_execution_engine2 *e;
uint32_t result = gem_create(fd, 4096);
uint32_t result_read[4096 / sizeof(uint32_t)];
igt_spin_t *spin[MAX_ELSP_QLEN];
unsigned int n, i;
- uint32_t ctx[3];
+ const intel_ctx_t *ctx[3];
/* On each engine, insert
* [NOISE] spinner,
@@ -1708,21 +1742,21 @@ static void preempt_self(int fd, unsigned ring)
* preempt its own lower priority task on any engine.
*/
- ctx[NOISE] = gem_context_clone_with_engines(fd, 0);
- ctx[HI] = gem_context_clone_with_engines(fd, 0);
+ ctx[NOISE] = intel_ctx_create(fd, cfg);
+ ctx[HI] = intel_ctx_create(fd, cfg);
n = 0;
- gem_context_set_priority(fd, ctx[HI], MIN_PRIO);
- __for_each_physical_engine(fd, e) {
+ gem_context_set_priority(fd, ctx[HI]->id, MIN_PRIO);
+ for_each_ctx_cfg_engine(fd, cfg, e) {
spin[n] = __igt_spin_new(fd,
- .ctx_id = ctx[NOISE],
+ .ctx = ctx[NOISE],
.engine = e->flags);
store_dword(fd, ctx[HI], e->flags,
result, (n + 1)*sizeof(uint32_t), n + 1,
I915_GEM_DOMAIN_RENDER);
n++;
}
- gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
+ gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
store_dword(fd, ctx[HI], ring,
result, (n + 1)*sizeof(uint32_t), n + 1,
I915_GEM_DOMAIN_RENDER);
@@ -1740,36 +1774,37 @@ static void preempt_self(int fd, unsigned ring)
for (i = 0; i <= n; i++)
igt_assert_eq_u32(result_read[i], i);
- gem_context_destroy(fd, ctx[NOISE]);
- gem_context_destroy(fd, ctx[HI]);
+ intel_ctx_destroy(fd, ctx[NOISE]);
+ intel_ctx_destroy(fd, ctx[HI]);
gem_close(fd, result);
}
-static void preemptive_hang(int fd, const struct intel_execution_engine2 *e)
+static void preemptive_hang(int fd, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *e)
{
igt_spin_t *spin[MAX_ELSP_QLEN];
igt_hang_t hang;
- uint32_t ctx[2];
+ const intel_ctx_t *ctx[2];
/* Set a fast timeout to speed the test up (if available) */
set_preempt_timeout(fd, e, 150);
- ctx[HI] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[HI], MAX_PRIO);
+ ctx[HI] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[HI]->id, MAX_PRIO);
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
- ctx[LO] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[LO], MIN_PRIO);
+ ctx[LO] = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx[LO]->id, MIN_PRIO);
spin[n] = __igt_spin_new(fd,
- .ctx_id = ctx[LO],
+ .ctx = ctx[LO],
.engine = e->flags);
- gem_context_destroy(fd, ctx[LO]);
+ intel_ctx_destroy(fd, ctx[LO]);
}
- hang = igt_hang_ctx(fd, ctx[HI], e->flags, 0);
+ hang = igt_hang_ctx(fd, ctx[HI]->id, e->flags, 0);
igt_post_hang_ring(fd, hang);
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
@@ -1781,10 +1816,11 @@ static void preemptive_hang(int fd, const struct intel_execution_engine2 *e)
igt_spin_free(fd, spin[n]);
}
- gem_context_destroy(fd, ctx[HI]);
+ intel_ctx_destroy(fd, ctx[HI]);
}
-static void deep(int fd, unsigned ring)
+static void deep(int fd, const intel_ctx_cfg_t *cfg,
+ unsigned ring)
{
#define XS 8
const unsigned int max_req = MAX_PRIO - MIN_PRIO;
@@ -1796,16 +1832,16 @@ static void deep(int fd, unsigned ring)
uint32_t result, dep[XS];
uint32_t read_buf[size / sizeof(uint32_t)];
uint32_t expected = 0;
- uint32_t *ctx;
+ const intel_ctx_t **ctx;
int dep_nreq;
int n;
ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
for (n = 0; n < MAX_CONTEXTS; n++) {
- ctx[n] = gem_context_clone_with_engines(fd, 0);
+ ctx[n] = intel_ctx_create(fd, cfg);
}
- nreq = gem_submission_measure(fd, NULL, ring) / (3 * XS) * MAX_CONTEXTS;
+ nreq = gem_submission_measure(fd, cfg, ring) / (3 * XS) * MAX_CONTEXTS;
if (nreq > max_req)
nreq = max_req;
igt_info("Using %d requests (prio range %d)\n", nreq, max_req);
@@ -1832,7 +1868,7 @@ static void deep(int fd, unsigned ring)
execbuf.buffer_count = XS + 2;
execbuf.flags = ring;
for (n = 0; n < MAX_CONTEXTS; n++) {
- execbuf.rsvd1 = ctx[n];
+ execbuf.rsvd1 = ctx[n]->id;
gem_execbuf(fd, &execbuf);
}
gem_close(fd, obj[XS+1].handle);
@@ -1850,7 +1886,7 @@ static void deep(int fd, unsigned ring)
.buffers_ptr = to_user_pointer(obj),
.buffer_count = 3,
.flags = ring | (gen < 6 ? I915_EXEC_SECURE : 0),
- .rsvd1 = ctx[n % MAX_CONTEXTS],
+ .rsvd1 = ctx[n % MAX_CONTEXTS]->id,
};
uint32_t batch[16];
int i;
@@ -1898,33 +1934,33 @@ static void deep(int fd, unsigned ring)
dep_nreq = n;
for (n = 0; n < nreq && igt_seconds_elapsed(&tv) < 4; n++) {
- uint32_t context = ctx[n % MAX_CONTEXTS];
- gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
+ const intel_ctx_t *context = ctx[n % MAX_CONTEXTS];
+ gem_context_set_priority(fd, context->id, MAX_PRIO - nreq + n);
+ expected = context->id;
for (int m = 0; m < XS; m++) {
- store_dword_plug(fd, context, ring, result, 4*n, context, dep[m], 0);
- store_dword(fd, context, ring, result, 4*m, context, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword_plug(fd, context, ring, result, 4*n, expected, dep[m], 0);
+ store_dword(fd, context, ring, result, 4*m, expected, I915_GEM_DOMAIN_INSTRUCTION);
}
- expected = context;
}
igt_info("Second deptree: %d requests [%.3fs]\n",
n * XS, 1e-9*igt_nsec_elapsed(&tv));
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, cfg, ring);
gem_close(fd, plug);
igt_require(expected); /* too slow */
- for (n = 0; n < MAX_CONTEXTS; n++)
- gem_context_destroy(fd, ctx[n]);
-
for (int m = 0; m < XS; m++) {
__sync_read_u32_count(fd, dep[m], read_buf, sizeof(read_buf));
gem_close(fd, dep[m]);
for (n = 0; n < dep_nreq; n++)
- igt_assert_eq_u32(read_buf[n], ctx[n % MAX_CONTEXTS]);
+ igt_assert_eq_u32(read_buf[n], ctx[n % MAX_CONTEXTS]->id);
}
+ for (n = 0; n < MAX_CONTEXTS; n++)
+ intel_ctx_destroy(fd, ctx[n]);
+
__sync_read_u32_count(fd, result, read_buf, sizeof(read_buf));
gem_close(fd, result);
@@ -1948,20 +1984,20 @@ static int __execbuf(int fd, struct drm_i915_gem_execbuffer2 *execbuf)
return err;
}
-static void wide(int fd, unsigned ring)
+static void wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
{
- const unsigned int ring_size = gem_submission_measure(fd, NULL, ring);
+ const unsigned int ring_size = gem_submission_measure(fd, cfg, ring);
struct timespec tv = {};
IGT_CORK_FENCE(cork);
uint32_t result;
uint32_t result_read[MAX_CONTEXTS];
- uint32_t *ctx;
+ const intel_ctx_t **ctx;
unsigned int count;
int fence;
ctx = malloc(sizeof(*ctx)*MAX_CONTEXTS);
for (int n = 0; n < MAX_CONTEXTS; n++)
- ctx[n] = gem_context_clone_with_engines(fd, 0);
+ ctx[n] = intel_ctx_create(fd, cfg);
result = gem_create(fd, 4*MAX_CONTEXTS);
@@ -1972,30 +2008,30 @@ static void wide(int fd, unsigned ring)
igt_seconds_elapsed(&tv) < 5 && count < ring_size;
count++) {
for (int n = 0; n < MAX_CONTEXTS; n++) {
- store_dword_fenced(fd, ctx[n], ring, result, 4*n, ctx[n],
+ store_dword_fenced(fd, ctx[n], ring, result, 4*n, ctx[n]->id,
fence, I915_GEM_DOMAIN_INSTRUCTION);
}
}
igt_info("Submitted %d requests over %d contexts in %.1fms\n",
count, MAX_CONTEXTS, igt_nsec_elapsed(&tv) * 1e-6);
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, cfg, ring);
close(fence);
+ __sync_read_u32_count(fd, result, result_read, sizeof(result_read));
for (int n = 0; n < MAX_CONTEXTS; n++)
- gem_context_destroy(fd, ctx[n]);
+ igt_assert_eq_u32(result_read[n], ctx[n]->id);
- __sync_read_u32_count(fd, result, result_read, sizeof(result_read));
for (int n = 0; n < MAX_CONTEXTS; n++)
- igt_assert_eq_u32(result_read[n], ctx[n]);
+ intel_ctx_destroy(fd, ctx[n]);
gem_close(fd, result);
free(ctx);
}
-static void reorder_wide(int fd, unsigned ring)
+static void reorder_wide(int fd, const intel_ctx_cfg_t *cfg, unsigned ring)
{
- const unsigned int ring_size = gem_submission_measure(fd, NULL, ring);
+ const unsigned int ring_size = gem_submission_measure(fd, cfg, ring);
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
const int priorities[] = { MIN_PRIO, MAX_PRIO };
struct drm_i915_gem_relocation_entry reloc;
@@ -2037,9 +2073,11 @@ static void reorder_wide(int fd, unsigned ring)
for (int n = 0, x = 1; n < ARRAY_SIZE(priorities); n++, x++) {
unsigned int sz = ALIGN(ring_size * 64, 4096);
uint32_t *batch;
+ const intel_ctx_t *tmp_ctx;
- execbuf.rsvd1 = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, execbuf.rsvd1, priorities[n]);
+ tmp_ctx = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, tmp_ctx->id, priorities[n]);
+ execbuf.rsvd1 = tmp_ctx->id;
obj[1].handle = gem_create(fd, sz);
batch = gem_mmap__device_coherent(fd, obj[1].handle, 0, sz, PROT_WRITE);
@@ -2079,10 +2117,10 @@ static void reorder_wide(int fd, unsigned ring)
munmap(batch, sz);
gem_close(fd, obj[1].handle);
- gem_context_destroy(fd, execbuf.rsvd1);
+ intel_ctx_destroy(fd, tmp_ctx);
}
- unplug_show_queue(fd, &cork, ring);
+ unplug_show_queue(fd, &cork, cfg, ring);
close(fence);
__sync_read_u32_count(fd, result, result_read, sizeof(result_read));
@@ -2108,17 +2146,18 @@ static void bind_to_cpu(int cpu)
igt_assert(sched_setaffinity(getpid(), sizeof(cpu_set_t), &allowed) == 0);
}
-static void test_pi_ringfull(int fd, unsigned int engine, unsigned int flags)
+static void test_pi_ringfull(int fd, const intel_ctx_cfg_t *cfg,
+ unsigned int engine, unsigned int flags)
#define SHARED BIT(0)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct sigaction sa = { .sa_handler = alarm_handler };
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[2];
+ const intel_ctx_t *ctx, *vip;
unsigned int last, count;
struct itimerval itv;
IGT_CORK_HANDLE(c);
- uint32_t vip;
bool *result;
/*
@@ -2150,17 +2189,18 @@ static void test_pi_ringfull(int fd, unsigned int engine, unsigned int flags)
execbuf.buffers_ptr = to_user_pointer(&obj[1]);
execbuf.buffer_count = 1;
- execbuf.flags = engine;
/* Warm up both (hi/lo) contexts */
- execbuf.rsvd1 = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, execbuf.rsvd1, MAX_PRIO);
+ ctx = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx->id, MAX_PRIO);
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
gem_sync(fd, obj[1].handle);
- vip = execbuf.rsvd1;
+ vip = ctx;
- execbuf.rsvd1 = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, execbuf.rsvd1, MIN_PRIO);
+ ctx = intel_ctx_create(fd, cfg);
+ gem_context_set_priority(fd, ctx->id, MIN_PRIO);
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
gem_sync(fd, obj[1].handle);
@@ -2210,7 +2250,7 @@ static void test_pi_ringfull(int fd, unsigned int engine, unsigned int flags)
gem_write(fd, obj[1].handle, 0, &bbe, sizeof(bbe));
}
- result[0] = vip != execbuf.rsvd1;
+ result[0] = vip->id != execbuf.rsvd1;
igt_debug("Waking parent\n");
kill(getppid(), SIGALRM);
@@ -2227,7 +2267,7 @@ static void test_pi_ringfull(int fd, unsigned int engine, unsigned int flags)
* able to add ourselves to *our* ring without interruption.
*/
igt_debug("HP child executing\n");
- execbuf.rsvd1 = vip;
+ execbuf.rsvd1 = vip->id;
err = __execbuf(fd, &execbuf);
igt_debug("HP execbuf returned %d\n", err);
@@ -2258,8 +2298,8 @@ static void test_pi_ringfull(int fd, unsigned int engine, unsigned int flags)
igt_cork_unplug(&c);
igt_waitchildren();
- gem_context_destroy(fd, execbuf.rsvd1);
- gem_context_destroy(fd, vip);
+ intel_ctx_destroy(fd, ctx);
+ intel_ctx_destroy(fd, vip);
gem_close(fd, obj[1].handle);
gem_close(fd, obj[0].handle);
munmap(result, 4096);
@@ -2274,8 +2314,8 @@ struct ufd_thread {
uint32_t batch;
uint32_t scratch;
uint32_t *page;
+ const intel_ctx_cfg_t *cfg;
unsigned int engine;
- unsigned int flags;
int i915;
pthread_mutex_t mutex;
@@ -2298,11 +2338,12 @@ static void *ufd_thread(void *arg)
{ .handle = create_userptr(t->i915, t->page) },
{ .handle = t->batch },
};
+ const intel_ctx_t *ctx = intel_ctx_create(t->i915, t->cfg);
struct drm_i915_gem_execbuffer2 eb = {
.buffers_ptr = to_user_pointer(obj),
.buffer_count = ARRAY_SIZE(obj),
.flags = t->engine,
- .rsvd1 = gem_context_clone_with_engines(t->i915, 0),
+ .rsvd1 = ctx->id,
};
gem_context_set_priority(t->i915, eb.rsvd1, MIN_PRIO);
@@ -2311,13 +2352,15 @@ static void *ufd_thread(void *arg)
gem_sync(t->i915, obj[0].handle);
gem_close(t->i915, obj[0].handle);
- gem_context_destroy(t->i915, eb.rsvd1);
+ intel_ctx_destroy(t->i915, ctx);
t->i915 = -1;
return NULL;
}
-static void test_pi_userfault(int i915, unsigned int engine)
+static void test_pi_userfault(int i915,
+ const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct uffdio_api api = { .api = UFFD_API };
@@ -2350,6 +2393,7 @@ static void test_pi_userfault(int i915, unsigned int engine)
"userfaultfd API v%lld:%lld\n", UFFD_API, api.api);
t.i915 = i915;
+ t.cfg = cfg;
t.engine = engine;
t.page = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, 0, 0);
@@ -2380,11 +2424,12 @@ static void test_pi_userfault(int i915, unsigned int engine)
.handle = gem_create(i915, 4096),
};
struct pollfd pfd;
+ const intel_ctx_t *ctx = intel_ctx_create(i915, cfg);
struct drm_i915_gem_execbuffer2 eb = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = engine | I915_EXEC_FENCE_OUT,
- .rsvd1 = gem_context_clone_with_engines(i915, 0),
+ .rsvd1 = ctx->id,
};
gem_context_set_priority(i915, eb.rsvd1, MAX_PRIO);
gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
@@ -2398,7 +2443,7 @@ static void test_pi_userfault(int i915, unsigned int engine)
igt_assert_eq(sync_fence_status(pfd.fd), 1);
close(pfd.fd);
- gem_context_destroy(i915, eb.rsvd1);
+ intel_ctx_destroy(i915, ctx);
}
/* Confirm the low priority context is still waiting */
@@ -2422,15 +2467,10 @@ static void test_pi_userfault(int i915, unsigned int engine)
static void *iova_thread(struct ufd_thread *t, int prio)
{
- unsigned int clone;
- uint32_t ctx;
-
- clone = I915_CONTEXT_CLONE_ENGINES;
- if (t->flags & SHARED)
- clone |= I915_CONTEXT_CLONE_VM;
+ const intel_ctx_t *ctx;
- ctx = gem_context_clone(t->i915, 0, clone, 0);
- gem_context_set_priority(t->i915, ctx, prio);
+ ctx = intel_ctx_create(t->i915, t->cfg);
+ gem_context_set_priority(t->i915, ctx->id, prio);
store_dword_plug(t->i915, ctx, t->engine,
t->scratch, 0, prio,
@@ -2441,7 +2481,7 @@ static void *iova_thread(struct ufd_thread *t, int prio)
pthread_cond_signal(&t->cond);
pthread_mutex_unlock(&t->mutex);
- gem_context_destroy(t->i915, ctx);
+ intel_ctx_destroy(t->i915, ctx);
return NULL;
}
@@ -2455,8 +2495,10 @@ static void *iova_high(void *arg)
return iova_thread(arg, MAX_PRIO);
}
-static void test_pi_iova(int i915, unsigned int engine, unsigned int flags)
+static void test_pi_iova(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine, unsigned int flags)
{
+ intel_ctx_cfg_t ufd_cfg = *cfg;
struct uffdio_api api = { .api = UFFD_API };
struct uffdio_register reg;
struct uffdio_copy copy;
@@ -2490,9 +2532,12 @@ static void test_pi_iova(int i915, unsigned int engine, unsigned int flags)
igt_require_f(ioctl(ufd, UFFDIO_API, &api) == 0 && api.api == UFFD_API,
"userfaultfd API v%lld:%lld\n", UFFD_API, api.api);
+ if ((flags & SHARED) && gem_uses_full_ppgtt(i915))
+ ufd_cfg.vm = gem_vm_create(i915);
+
t.i915 = i915;
+ t.cfg = &ufd_cfg;
t.engine = engine;
- t.flags = flags;
t.count = 2;
pthread_cond_init(&t.cond, NULL);
@@ -2531,9 +2576,10 @@ static void test_pi_iova(int i915, unsigned int engine, unsigned int flags)
*/
spin = igt_spin_new(i915, .engine = engine);
for (int i = 0; i < MAX_ELSP_QLEN; i++) {
- spin->execbuf.rsvd1 = create_highest_priority(i915);
+ const intel_ctx_t *ctx = create_highest_priority(i915, cfg);
+ spin->execbuf.rsvd1 = ctx->id;
gem_execbuf(i915, &spin->execbuf);
- gem_context_destroy(i915, spin->execbuf.rsvd1);
+ intel_ctx_destroy(i915, ctx);
}
/* Kick off the submission threads */
@@ -2570,10 +2616,14 @@ static void test_pi_iova(int i915, unsigned int engine, unsigned int flags)
gem_close(i915, t.scratch);
munmap(t.page, 4096);
+
+ if (ufd_cfg.vm)
+ gem_vm_destroy(i915, ufd_cfg.vm);
+
close(ufd);
}
-static void measure_semaphore_power(int i915)
+static void measure_semaphore_power(int i915, const intel_ctx_t *ctx)
{
const struct intel_execution_engine2 *signaler, *e;
struct rapl gpu, pkg;
@@ -2581,7 +2631,7 @@ static void measure_semaphore_power(int i915)
igt_require(gpu_power_open(&gpu) == 0);
pkg_power_open(&pkg);
- __for_each_physical_engine(i915, signaler) {
+ for_each_ctx_engine(i915, ctx, signaler) {
struct {
struct power_sample pkg, gpu;
} s_spin[2], s_sema[2];
@@ -2593,6 +2643,7 @@ static void measure_semaphore_power(int i915)
continue;
spin = __igt_spin_new(i915,
+ .ctx = ctx,
.engine = signaler->flags,
.flags = IGT_SPIN_POLL_RUN);
gem_wait(i915, spin->handle, &jiffie); /* waitboost */
@@ -2605,13 +2656,14 @@ static void measure_semaphore_power(int i915)
rapl_read(&pkg, &s_spin[1].pkg);
/* Add a waiter to each engine */
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_spin_t *sema;
if (e->flags == signaler->flags)
continue;
sema = __igt_spin_new(i915,
+ .ctx = ctx,
.engine = e->flags,
.dependency = spin->handle);
@@ -2683,8 +2735,7 @@ static int cmp_u32(const void *A, const void *B)
return 0;
}
-static uint32_t read_ctx_timestamp(int i915,
- uint32_t ctx,
+static uint32_t read_ctx_timestamp(int i915, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e)
{
const int use_64b = intel_gen(intel_get_drm_devid(i915)) >= 8;
@@ -2700,7 +2751,7 @@ static uint32_t read_ctx_timestamp(int i915,
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = e->flags,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
#define RUNTIME (base + 0x3a8)
uint32_t *map, *cs;
@@ -2733,7 +2784,7 @@ static uint32_t read_ctx_timestamp(int i915,
return ts;
}
-static void fairslice(int i915,
+static void fairslice(int i915, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
unsigned long flags,
int duration)
@@ -2741,14 +2792,14 @@ static void fairslice(int i915,
const double timeslice_duration_ns = 1e6;
igt_spin_t *spin = NULL;
double threshold;
- uint32_t ctx[3];
+ const intel_ctx_t *ctx[3];
uint32_t ts[3];
for (int i = 0; i < ARRAY_SIZE(ctx); i++) {
- ctx[i] = gem_context_clone_with_engines(i915, 0);
+ ctx[i] = intel_ctx_create(i915, cfg);
if (spin == NULL) {
spin = __igt_spin_new(i915,
- .ctx_id = ctx[i],
+ .ctx = ctx[i],
.engine = e->flags,
.flags = flags);
} else {
@@ -2756,7 +2807,7 @@ static void fairslice(int i915,
.buffer_count = 1,
.buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
.flags = e->flags,
- .rsvd1 = ctx[i],
+ .rsvd1 = ctx[i]->id,
};
gem_execbuf(i915, &eb);
}
@@ -2770,7 +2821,7 @@ static void fairslice(int i915,
ts[i] = read_ctx_timestamp(i915, ctx[i], e);
for (int i = 0; i < ARRAY_SIZE(ctx); i++)
- gem_context_destroy(i915, ctx[i]);
+ intel_ctx_destroy(i915, ctx[i]);
igt_spin_free(i915, spin);
/*
@@ -2797,18 +2848,19 @@ static void fairslice(int i915,
1e-6 * threshold * 2);
}
-#define test_each_engine(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_engine(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
igt_dynamic_f("%s", e->name)
-#define test_each_engine_store(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_engine_store(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
for_each_if(gem_class_can_store_dword(fd, e->class)) \
igt_dynamic_f("%s", e->name)
igt_main
{
int fd = -1;
+ const intel_ctx_t *ctx = NULL;
igt_fixture {
igt_require_sw_sync();
@@ -2820,6 +2872,7 @@ igt_main
igt_require_gem(fd);
gem_require_mmap_wc(fd);
gem_require_contexts(fd);
+ ctx = intel_ctx_create_all_physical(fd);
igt_fork_hang_detector(fd);
}
@@ -2827,22 +2880,22 @@ igt_main
igt_subtest_group {
const struct intel_execution_engine2 *e;
- test_each_engine_store("fifo", fd, e)
- fifo(fd, e->flags);
+ test_each_engine_store("fifo", fd, ctx, e)
+ fifo(fd, ctx, e->flags);
- test_each_engine_store("implicit-read-write", fd, e)
- implicit_rw(fd, e->flags, READ_WRITE);
+ test_each_engine_store("implicit-read-write", fd, ctx, e)
+ implicit_rw(fd, ctx, e->flags, READ_WRITE);
- test_each_engine_store("implicit-write-read", fd, e)
- implicit_rw(fd, e->flags, WRITE_READ);
+ test_each_engine_store("implicit-write-read", fd, ctx, e)
+ implicit_rw(fd, ctx, e->flags, WRITE_READ);
- test_each_engine_store("implicit-boths", fd, e)
- implicit_rw(fd, e->flags, READ_WRITE | WRITE_READ);
+ test_each_engine_store("implicit-boths", fd, ctx, e)
+ implicit_rw(fd, ctx, e->flags, READ_WRITE | WRITE_READ);
- test_each_engine_store("independent", fd, e)
- independent(fd, e->flags, 0);
- test_each_engine_store("u-independent", fd, e)
- independent(fd, e->flags, IGT_SPIN_USERPTR);
+ test_each_engine_store("independent", fd, ctx, e)
+ independent(fd, ctx, e->flags, 0);
+ test_each_engine_store("u-independent", fd, ctx, e)
+ independent(fd, ctx, e->flags, IGT_SPIN_USERPTR);
}
igt_subtest_group {
@@ -2853,19 +2906,19 @@ igt_main
igt_require(gem_scheduler_has_ctx_priority(fd));
}
- test_each_engine("timeslicing", fd, e)
- timeslice(fd, e->flags);
+ test_each_engine("timeslicing", fd, ctx, e)
+ timeslice(fd, &ctx->cfg, e->flags);
- test_each_engine("thriceslice", fd, e)
- timesliceN(fd, e->flags, 3);
+ test_each_engine("thriceslice", fd, ctx, e)
+ timesliceN(fd, &ctx->cfg, e->flags, 3);
- test_each_engine("manyslice", fd, e)
- timesliceN(fd, e->flags, 67);
+ test_each_engine("manyslice", fd, ctx, e)
+ timesliceN(fd, &ctx->cfg, e->flags, 67);
- test_each_engine("lateslice", fd, e)
- lateslice(fd, e->flags, 0);
- test_each_engine("u-lateslice", fd, e)
- lateslice(fd, e->flags, IGT_SPIN_USERPTR);
+ test_each_engine("lateslice", fd, ctx, e)
+ lateslice(fd, &ctx->cfg, e->flags, 0);
+ test_each_engine("u-lateslice", fd, ctx, e)
+ lateslice(fd, &ctx->cfg, e->flags, IGT_SPIN_USERPTR);
igt_subtest_group {
igt_fixture {
@@ -2873,23 +2926,23 @@ igt_main
igt_require(intel_gen(intel_get_drm_devid(fd)) >= 8);
}
- test_each_engine("fairslice", fd, e)
- fairslice(fd, e, 0, 2);
+ test_each_engine("fairslice", fd, ctx, e)
+ fairslice(fd, &ctx->cfg, e, 0, 2);
- test_each_engine("u-fairslice", fd, e)
- fairslice(fd, e, IGT_SPIN_USERPTR, 2);
+ test_each_engine("u-fairslice", fd, ctx, e)
+ fairslice(fd, &ctx->cfg, e, IGT_SPIN_USERPTR, 2);
igt_subtest("fairslice-all") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_fork(child, 1)
- fairslice(fd, e, 0, 2);
+ fairslice(fd, &ctx->cfg, e, 0, 2);
}
igt_waitchildren();
}
igt_subtest("u-fairslice-all") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_fork(child, 1)
- fairslice(fd, e,
+ fairslice(fd, &ctx->cfg, e,
IGT_SPIN_USERPTR,
2);
}
@@ -2897,84 +2950,84 @@ igt_main
}
}
- test_each_engine("submit-early-slice", fd, e)
- submit_slice(fd, e, EARLY_SUBMIT);
- test_each_engine("u-submit-early-slice", fd, e)
- submit_slice(fd, e, EARLY_SUBMIT | USERPTR);
- test_each_engine("submit-golden-slice", fd, e)
- submit_slice(fd, e, 0);
- test_each_engine("u-submit-golden-slice", fd, e)
- submit_slice(fd, e, USERPTR);
- test_each_engine("submit-late-slice", fd, e)
- submit_slice(fd, e, LATE_SUBMIT);
- test_each_engine("u-submit-late-slice", fd, e)
- submit_slice(fd, e, LATE_SUBMIT | USERPTR);
+ test_each_engine("submit-early-slice", fd, ctx, e)
+ submit_slice(fd, &ctx->cfg, e, EARLY_SUBMIT);
+ test_each_engine("u-submit-early-slice", fd, ctx, e)
+ submit_slice(fd, &ctx->cfg, e, EARLY_SUBMIT | USERPTR);
+ test_each_engine("submit-golden-slice", fd, ctx, e)
+ submit_slice(fd, &ctx->cfg, e, 0);
+ test_each_engine("u-submit-golden-slice", fd, ctx, e)
+ submit_slice(fd, &ctx->cfg, e, USERPTR);
+ test_each_engine("submit-late-slice", fd, ctx, e)
+ submit_slice(fd, &ctx->cfg, e, LATE_SUBMIT);
+ test_each_engine("u-submit-late-slice", fd, ctx, e)
+ submit_slice(fd, &ctx->cfg, e, LATE_SUBMIT | USERPTR);
igt_subtest("semaphore-user")
- semaphore_userlock(fd, 0);
+ semaphore_userlock(fd, ctx, 0);
igt_subtest("semaphore-codependency")
- semaphore_codependency(fd, 0);
+ semaphore_codependency(fd, ctx, 0);
igt_subtest("semaphore-resolve")
- semaphore_resolve(fd, 0);
+ semaphore_resolve(fd, &ctx->cfg, 0);
igt_subtest("semaphore-noskip")
- semaphore_noskip(fd, 0);
+ semaphore_noskip(fd, &ctx->cfg, 0);
igt_subtest("u-semaphore-user")
- semaphore_userlock(fd, IGT_SPIN_USERPTR);
+ semaphore_userlock(fd, ctx, IGT_SPIN_USERPTR);
igt_subtest("u-semaphore-codependency")
- semaphore_codependency(fd, IGT_SPIN_USERPTR);
+ semaphore_codependency(fd, ctx, IGT_SPIN_USERPTR);
igt_subtest("u-semaphore-resolve")
- semaphore_resolve(fd, IGT_SPIN_USERPTR);
+ semaphore_resolve(fd, &ctx->cfg, IGT_SPIN_USERPTR);
igt_subtest("u-semaphore-noskip")
- semaphore_noskip(fd, IGT_SPIN_USERPTR);
+ semaphore_noskip(fd, &ctx->cfg, IGT_SPIN_USERPTR);
igt_subtest("smoketest-all")
- smoketest(fd, ALL_ENGINES, 30);
+ smoketest(fd, &ctx->cfg, ALL_ENGINES, 30);
- test_each_engine_store("in-order", fd, e)
- reorder(fd, e->flags, EQUAL);
+ test_each_engine_store("in-order", fd, ctx, e)
+ reorder(fd, &ctx->cfg, e->flags, EQUAL);
- test_each_engine_store("out-order", fd, e)
- reorder(fd, e->flags, 0);
+ test_each_engine_store("out-order", fd, ctx, e)
+ reorder(fd, &ctx->cfg, e->flags, 0);
- test_each_engine_store("promotion", fd, e)
- promotion(fd, e->flags);
+ test_each_engine_store("promotion", fd, ctx, e)
+ promotion(fd, &ctx->cfg, e->flags);
igt_subtest_group {
igt_fixture {
igt_require(gem_scheduler_has_preemption(fd));
}
- test_each_engine_store("preempt", fd, e)
- preempt(fd, e, 0);
+ test_each_engine_store("preempt", fd, ctx, e)
+ preempt(fd, &ctx->cfg, e, 0);
- test_each_engine_store("preempt-contexts", fd, e)
- preempt(fd, e, NEW_CTX);
+ test_each_engine_store("preempt-contexts", fd, ctx, e)
+ preempt(fd, &ctx->cfg, e, NEW_CTX);
- test_each_engine_store("preempt-user", fd, e)
- preempt(fd, e, USERPTR);
+ test_each_engine_store("preempt-user", fd, ctx, e)
+ preempt(fd, &ctx->cfg, e, USERPTR);
- test_each_engine_store("preempt-self", fd, e)
- preempt_self(fd, e->flags);
+ test_each_engine_store("preempt-self", fd, ctx, e)
+ preempt_self(fd, &ctx->cfg, e->flags);
- test_each_engine_store("preempt-other", fd, e)
- preempt_other(fd, e->flags, 0);
+ test_each_engine_store("preempt-other", fd, ctx, e)
+ preempt_other(fd, &ctx->cfg, e->flags, 0);
- test_each_engine_store("preempt-other-chain", fd, e)
- preempt_other(fd, e->flags, CHAIN);
+ test_each_engine_store("preempt-other-chain", fd, ctx, e)
+ preempt_other(fd, &ctx->cfg, e->flags, CHAIN);
- test_each_engine_store("preempt-queue", fd, e)
- preempt_queue(fd, e->flags, 0);
+ test_each_engine_store("preempt-queue", fd, ctx, e)
+ preempt_queue(fd, &ctx->cfg, e->flags, 0);
- test_each_engine_store("preempt-queue-chain", fd, e)
- preempt_queue(fd, e->flags, CHAIN);
- test_each_engine_store("preempt-queue-contexts", fd, e)
- preempt_queue(fd, e->flags, CONTEXTS);
+ test_each_engine_store("preempt-queue-chain", fd, ctx, e)
+ preempt_queue(fd, &ctx->cfg, e->flags, CHAIN);
+ test_each_engine_store("preempt-queue-contexts", fd, ctx, e)
+ preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS);
- test_each_engine_store("preempt-queue-contexts-chain", fd, e)
- preempt_queue(fd, e->flags, CONTEXTS | CHAIN);
+ test_each_engine_store("preempt-queue-contexts-chain", fd, ctx, e)
+ preempt_queue(fd, &ctx->cfg, e->flags, CONTEXTS | CHAIN);
- test_each_engine_store("preempt-engines", fd, e)
+ test_each_engine_store("preempt-engines", fd, ctx, e)
preempt_engines(fd, e, 0);
igt_subtest_group {
@@ -2985,11 +3038,11 @@ igt_main
hang = igt_allow_hang(fd, 0, 0);
}
- test_each_engine_store("preempt-hang", fd, e)
- preempt(fd, e, NEW_CTX | HANG_LP);
+ test_each_engine_store("preempt-hang", fd, ctx, e)
+ preempt(fd, &ctx->cfg, e, NEW_CTX | HANG_LP);
- test_each_engine_store("preemptive-hang", fd, e)
- preemptive_hang(fd, e);
+ test_each_engine_store("preemptive-hang", fd, ctx, e)
+ preemptive_hang(fd, &ctx->cfg, e);
igt_fixture {
igt_disallow_hang(fd, hang);
@@ -2998,30 +3051,30 @@ igt_main
}
}
- test_each_engine_store("noreorder", fd, e)
- noreorder(fd, e->flags, 0, 0);
+ test_each_engine_store("noreorder", fd, ctx, e)
+ noreorder(fd, &ctx->cfg, e->flags, 0, 0);
- test_each_engine_store("noreorder-priority", fd, e) {
+ test_each_engine_store("noreorder-priority", fd, ctx, e) {
igt_require(gem_scheduler_enabled(fd));
- noreorder(fd, e->flags, MAX_PRIO, 0);
+ noreorder(fd, &ctx->cfg, e->flags, MAX_PRIO, 0);
}
- test_each_engine_store("noreorder-corked", fd, e) {
+ test_each_engine_store("noreorder-corked", fd, ctx, e) {
igt_require(gem_scheduler_enabled(fd));
- noreorder(fd, e->flags, MAX_PRIO, CORKED);
+ noreorder(fd, &ctx->cfg, e->flags, MAX_PRIO, CORKED);
}
- test_each_engine_store("deep", fd, e)
- deep(fd, e->flags);
+ test_each_engine_store("deep", fd, ctx, e)
+ deep(fd, &ctx->cfg, e->flags);
- test_each_engine_store("wide", fd, e)
- wide(fd, e->flags);
+ test_each_engine_store("wide", fd, ctx, e)
+ wide(fd, &ctx->cfg, e->flags);
- test_each_engine_store("reorder-wide", fd, e)
- reorder_wide(fd, e->flags);
+ test_each_engine_store("reorder-wide", fd, ctx, e)
+ reorder_wide(fd, &ctx->cfg, e->flags);
- test_each_engine_store("smoketest", fd, e)
- smoketest(fd, e->flags, 5);
+ test_each_engine_store("smoketest", fd, ctx, e)
+ smoketest(fd, &ctx->cfg, e->flags, 5);
}
igt_subtest_group {
@@ -3033,20 +3086,20 @@ igt_main
igt_require(gem_scheduler_has_preemption(fd));
}
- test_each_engine("pi-ringfull", fd, e)
- test_pi_ringfull(fd, e->flags, 0);
+ test_each_engine("pi-ringfull", fd, ctx, e)
+ test_pi_ringfull(fd, &ctx->cfg, e->flags, 0);
- test_each_engine("pi-common", fd, e)
- test_pi_ringfull(fd, e->flags, SHARED);
+ test_each_engine("pi-common", fd, ctx, e)
+ test_pi_ringfull(fd, &ctx->cfg, e->flags, SHARED);
- test_each_engine("pi-userfault", fd, e)
- test_pi_userfault(fd, e->flags);
+ test_each_engine("pi-userfault", fd, ctx, e)
+ test_pi_userfault(fd, &ctx->cfg, e->flags);
- test_each_engine("pi-distinct-iova", fd, e)
- test_pi_iova(fd, e->flags, 0);
+ test_each_engine("pi-distinct-iova", fd, ctx, e)
+ test_pi_iova(fd, &ctx->cfg, e->flags, 0);
- test_each_engine("pi-shared-iova", fd, e)
- test_pi_iova(fd, e->flags, SHARED);
+ test_each_engine("pi-shared-iova", fd, ctx, e)
+ test_pi_iova(fd, &ctx->cfg, e->flags, SHARED);
}
igt_subtest_group {
@@ -3056,11 +3109,12 @@ igt_main
}
igt_subtest("semaphore-power")
- measure_semaphore_power(fd);
+ measure_semaphore_power(fd, ctx);
}
igt_fixture {
igt_stop_hang_detector();
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 05/81] tests/i915/perf_pmu: Convert to intel_ctx_t (v3)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (3 preceding siblings ...)
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 04/81] tests/i915/gem_exec_schedule: Convert to intel_ctx_t (v3) Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 06/81] tests/i915/gem_exec_nop: Convert to intel_ctx_t Jason Ekstrand
` (78 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
v2 (Ashutosh Dixit):
- Pass the context to the init() tests
- Iterate over render_ctx when using render_fd
v3 (Jason Ekstrand):
- Pass a context config to gem_submission_measure()
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/perf_pmu.c | 243 ++++++++++++++++++++++++------------------
1 file changed, 137 insertions(+), 106 deletions(-)
diff --git a/tests/i915/perf_pmu.c b/tests/i915/perf_pmu.c
index f92f73919..73f378604 100644
--- a/tests/i915/perf_pmu.c
+++ b/tests/i915/perf_pmu.c
@@ -47,6 +47,7 @@
#include "igt_perf.h"
#include "igt_sysfs.h"
#include "igt_pm.h"
+#include "intel_ctx.h"
#include "sw_sync.h"
IGT_TEST_DESCRIPTION("Test the i915 pmu perf interface");
@@ -77,7 +78,8 @@ static int open_group(int i915, uint64_t config, int group)
}
static void
-init(int gem_fd, const struct intel_execution_engine2 *e, uint8_t sample)
+init(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e, uint8_t sample)
{
int fd, err = 0;
bool exists;
@@ -88,7 +90,7 @@ init(int gem_fd, const struct intel_execution_engine2 *e, uint8_t sample)
if (fd < 0)
err = errno;
- exists = gem_context_has_engine(gem_fd, 0, e->flags);
+ exists = gem_context_has_engine(gem_fd, ctx->id, e->flags);
if (intel_gen(intel_get_drm_devid(gem_fd)) < 6 &&
sample == I915_SAMPLE_SEMA)
exists = false;
@@ -172,11 +174,11 @@ static unsigned int measured_usleep(unsigned int usec)
#define FLAG_HANG (32)
#define TEST_S3 (64)
-static igt_spin_t * __spin_poll(int fd, uint32_t ctx,
+static igt_spin_t * __spin_poll(int fd, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e)
{
struct igt_spin_factory opts = {
- .ctx_id = ctx,
+ .ctx = ctx,
.engine = e->flags,
};
@@ -215,7 +217,7 @@ static unsigned long __spin_wait(int fd, igt_spin_t *spin)
return igt_nsec_elapsed(&start);
}
-static igt_spin_t * __spin_sync(int fd, uint32_t ctx,
+static igt_spin_t * __spin_sync(int fd, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e)
{
igt_spin_t *spin = __spin_poll(fd, ctx, e);
@@ -225,7 +227,7 @@ static igt_spin_t * __spin_sync(int fd, uint32_t ctx,
return spin;
}
-static igt_spin_t * spin_sync(int fd, uint32_t ctx,
+static igt_spin_t * spin_sync(int fd, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e)
{
igt_require_gem(fd);
@@ -233,7 +235,7 @@ static igt_spin_t * spin_sync(int fd, uint32_t ctx,
return __spin_sync(fd, ctx, e);
}
-static igt_spin_t * spin_sync_flags(int fd, uint32_t ctx, unsigned int flags)
+static igt_spin_t * spin_sync_flags(int fd, const intel_ctx_t *ctx, unsigned int flags)
{
struct intel_execution_engine2 e = { };
@@ -277,7 +279,8 @@ static void end_spin(int fd, igt_spin_t *spin, unsigned int flags)
}
static void
-single(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
+single(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e, unsigned int flags)
{
unsigned long slept;
igt_spin_t *spin;
@@ -287,7 +290,7 @@ single(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
fd = open_pmu(gem_fd, I915_PMU_ENGINE_BUSY(e->class, e->instance));
if (flags & TEST_BUSY)
- spin = spin_sync(gem_fd, 0, e);
+ spin = spin_sync(gem_fd, ctx, e);
else
spin = NULL;
@@ -323,7 +326,8 @@ single(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
}
static void
-busy_start(int gem_fd, const struct intel_execution_engine2 *e)
+busy_start(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
unsigned long slept;
uint64_t val, ts[2];
@@ -336,7 +340,7 @@ busy_start(int gem_fd, const struct intel_execution_engine2 *e)
*/
sleep(2);
- spin = __spin_sync(gem_fd, 0, e);
+ spin = __spin_sync(gem_fd, ctx, e);
fd = open_pmu(gem_fd, I915_PMU_ENGINE_BUSY(e->class, e->instance));
@@ -358,15 +362,16 @@ busy_start(int gem_fd, const struct intel_execution_engine2 *e)
* will depend on the CI systems running it a lot to detect issues.
*/
static void
-busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
+busy_double_start(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
unsigned long slept;
uint64_t val, val2, ts[2];
igt_spin_t *spin[2];
- uint32_t ctx;
+ const intel_ctx_t *tmp_ctx;
int fd;
- ctx = gem_context_clone_with_engines(gem_fd, 0);
+ tmp_ctx = intel_ctx_create(gem_fd, &ctx->cfg);
/*
* Defeat the busy stats delayed disable, we need to guarantee we are
@@ -379,10 +384,10 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
* re-submission in execlists mode. Make sure busyness is correctly
* reported with the engine busy, and after the engine went idle.
*/
- spin[0] = __spin_sync(gem_fd, 0, e);
+ spin[0] = __spin_sync(gem_fd, ctx, e);
usleep(500e3);
spin[1] = __igt_spin_new(gem_fd,
- .ctx_id = ctx,
+ .ctx = tmp_ctx,
.engine = e->flags);
/*
@@ -413,7 +418,7 @@ busy_double_start(int gem_fd, const struct intel_execution_engine2 *e)
close(fd);
- gem_context_destroy(gem_fd, ctx);
+ intel_ctx_destroy(gem_fd, tmp_ctx);
assert_within_epsilon(val, ts[1] - ts[0], tolerance);
igt_assert_eq(val2, 0);
@@ -441,7 +446,8 @@ static void log_busy(unsigned int num_engines, uint64_t *val)
}
static void
-busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
+busy_check_all(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
const unsigned int num_engines, unsigned int flags)
{
struct intel_execution_engine2 *e_;
@@ -454,7 +460,7 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
i = 0;
fd[0] = -1;
- __for_each_physical_engine(gem_fd, e_) {
+ for_each_ctx_engine(gem_fd, ctx, e_) {
if (e->class == e_->class && e->instance == e_->instance)
busy_idx = i;
@@ -466,7 +472,7 @@ busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
igt_assert_eq(i, num_engines);
- spin = spin_sync(gem_fd, 0, e);
+ spin = spin_sync(gem_fd, ctx, e);
pmu_read_multi(fd[0], num_engines, tval[0]);
slept = measured_usleep(batch_duration_ns / 1000);
if (flags & TEST_TRAILING_IDLE)
@@ -507,7 +513,8 @@ __submit_spin(int gem_fd, igt_spin_t *spin,
}
static void
-most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
+most_busy_check_all(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
const unsigned int num_engines, unsigned int flags)
{
struct intel_execution_engine2 *e_;
@@ -519,13 +526,13 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
unsigned int idle_idx, i;
i = 0;
- __for_each_physical_engine(gem_fd, e_) {
+ for_each_ctx_engine(gem_fd, ctx, e_) {
if (e->class == e_->class && e->instance == e_->instance)
idle_idx = i;
else if (spin)
__submit_spin(gem_fd, spin, e_, 64);
else
- spin = __spin_poll(gem_fd, 0, e_);
+ spin = __spin_poll(gem_fd, ctx, e_);
val[i++] = I915_PMU_ENGINE_BUSY(e_->class, e_->instance);
}
@@ -565,7 +572,8 @@ most_busy_check_all(int gem_fd, const struct intel_execution_engine2 *e,
}
static void
-all_busy_check_all(int gem_fd, const unsigned int num_engines,
+all_busy_check_all(int gem_fd, const intel_ctx_t *ctx,
+ const unsigned int num_engines,
unsigned int flags)
{
struct intel_execution_engine2 *e;
@@ -577,11 +585,11 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
unsigned int i;
i = 0;
- __for_each_physical_engine(gem_fd, e) {
+ for_each_ctx_engine(gem_fd, ctx, e) {
if (spin)
__submit_spin(gem_fd, spin, e, 64);
else
- spin = __spin_poll(gem_fd, 0, e);
+ spin = __spin_poll(gem_fd, ctx, e);
val[i++] = I915_PMU_ENGINE_BUSY(e->class, e->instance);
}
@@ -616,7 +624,9 @@ all_busy_check_all(int gem_fd, const unsigned int num_engines,
}
static void
-no_sema(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
+no_sema(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
+ unsigned int flags)
{
igt_spin_t *spin;
uint64_t val[2][2];
@@ -628,7 +638,7 @@ no_sema(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
fd[0]);
if (flags & TEST_BUSY)
- spin = spin_sync(gem_fd, 0, e);
+ spin = spin_sync(gem_fd, ctx, e);
else
spin = NULL;
@@ -659,7 +669,8 @@ no_sema(int gem_fd, const struct intel_execution_engine2 *e, unsigned int flags)
#define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12)
static void
-sema_wait(int gem_fd, const struct intel_execution_engine2 *e,
+sema_wait(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
unsigned int flags)
{
struct drm_i915_gem_relocation_entry reloc[2] = {};
@@ -718,6 +729,7 @@ sema_wait(int gem_fd, const struct intel_execution_engine2 *e,
eb.buffer_count = 2;
eb.buffers_ptr = to_user_pointer(obj);
eb.flags = e->flags;
+ eb.rsvd1 = ctx->id;
/**
* Start the semaphore wait PMU and after some known time let the above
@@ -789,7 +801,7 @@ create_sema(int gem_fd, struct drm_i915_gem_relocation_entry *reloc)
}
static void
-__sema_busy(int gem_fd, int pmu,
+__sema_busy(int gem_fd, int pmu, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
int sema_pct,
int busy_pct)
@@ -811,6 +823,7 @@ __sema_busy(int gem_fd, int pmu,
.buffer_count = 1,
.buffers_ptr = to_user_pointer(&obj),
.flags = e->flags,
+ .rsvd1 = ctx->id,
};
igt_spin_t *spin;
uint32_t *map;
@@ -822,7 +835,7 @@ __sema_busy(int gem_fd, int pmu,
map = gem_mmap__wc(gem_fd, obj.handle, 0, 4096, PROT_WRITE);
gem_execbuf(gem_fd, &eb);
- spin = igt_spin_new(gem_fd, .engine = e->flags);
+ spin = igt_spin_new(gem_fd, .ctx = ctx, .engine = e->flags);
/* Wait until the batch is executed and the semaphore is busy-waiting */
while (!READ_ONCE(*map) && gem_bo_busy(gem_fd, obj.handle))
@@ -862,7 +875,7 @@ __sema_busy(int gem_fd, int pmu,
}
static void
-sema_busy(int gem_fd,
+sema_busy(int gem_fd, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
unsigned int flags)
{
@@ -875,15 +888,15 @@ sema_busy(int gem_fd,
fd[1] = open_group(gem_fd, I915_PMU_ENGINE_BUSY(e->class, e->instance),
fd[0]);
- __sema_busy(gem_fd, fd[0], e, 50, 100);
- __sema_busy(gem_fd, fd[0], e, 25, 50);
- __sema_busy(gem_fd, fd[0], e, 75, 75);
+ __sema_busy(gem_fd, fd[0], ctx, e, 50, 100);
+ __sema_busy(gem_fd, fd[0], ctx, e, 25, 50);
+ __sema_busy(gem_fd, fd[0], ctx, e, 75, 75);
close(fd[0]);
close(fd[1]);
}
-static void test_awake(int i915)
+static void test_awake(int i915, const intel_ctx_t *ctx)
{
const struct intel_execution_engine2 *e;
unsigned long slept;
@@ -894,8 +907,8 @@ static void test_awake(int i915)
igt_skip_on(fd < 0);
/* Check that each engine is captured by the GT wakeref */
- __for_each_physical_engine(i915, e) {
- igt_spin_new(i915, .engine = e->flags);
+ for_each_ctx_engine(i915, ctx, e) {
+ igt_spin_new(i915, .ctx = ctx, .engine = e->flags);
val = pmu_read_single(fd);
slept = measured_usleep(batch_duration_ns / 1000);
@@ -906,8 +919,8 @@ static void test_awake(int i915)
}
/* And that the total GT wakeref matches walltime not summation */
- __for_each_physical_engine(i915, e)
- igt_spin_new(i915, .engine = e->flags);
+ for_each_ctx_engine(i915, ctx, e)
+ igt_spin_new(i915, .ctx = ctx, .engine = e->flags);
val = pmu_read_single(fd);
slept = measured_usleep(batch_duration_ns / 1000);
@@ -996,7 +1009,8 @@ static int has_secure_batches(const int fd)
}
static void
-event_wait(int gem_fd, const struct intel_execution_engine2 *e)
+event_wait(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
struct drm_i915_gem_exec_object2 obj = { };
struct drm_i915_gem_execbuffer2 eb = { };
@@ -1052,6 +1066,7 @@ event_wait(int gem_fd, const struct intel_execution_engine2 *e)
eb.buffer_count = 1;
eb.buffers_ptr = to_user_pointer(&obj);
eb.flags = e->flags | I915_EXEC_SECURE;
+ eb.rsvd1 = ctx->id;
for_each_pipe_with_valid_output(&data.display, p, output) {
struct igt_helper_process waiter = { };
@@ -1124,7 +1139,8 @@ event_wait(int gem_fd, const struct intel_execution_engine2 *e)
}
static void
-multi_client(int gem_fd, const struct intel_execution_engine2 *e)
+multi_client(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
uint64_t config = I915_PMU_ENGINE_BUSY(e->class, e->instance);
unsigned long slept[2];
@@ -1143,7 +1159,7 @@ multi_client(int gem_fd, const struct intel_execution_engine2 *e)
*/
fd[1] = open_pmu(gem_fd, config);
- spin = spin_sync(gem_fd, 0, e);
+ spin = spin_sync(gem_fd, ctx, e);
val[0] = val[1] = __pmu_read_single(fd[0], &ts[0]);
slept[1] = measured_usleep(batch_duration_ns / 1000);
@@ -1341,7 +1357,9 @@ static void cpu_hotplug(int gem_fd)
static int target_num_interrupts(int i915)
{
- return min(gem_submission_measure(i915, NULL, I915_EXEC_DEFAULT), 30);
+ const intel_ctx_cfg_t cfg = intel_ctx_cfg_all_physical(i915);
+
+ return min(gem_submission_measure(i915, &cfg, I915_EXEC_DEFAULT), 30);
}
static void
@@ -1706,7 +1724,8 @@ test_rc6(int gem_fd, unsigned int flags)
}
static void
-test_enable_race(int gem_fd, const struct intel_execution_engine2 *e)
+test_enable_race(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
uint64_t config = I915_PMU_ENGINE_BUSY(e->class, e->instance);
struct igt_helper_process engine_load = { };
@@ -1724,6 +1743,7 @@ test_enable_race(int gem_fd, const struct intel_execution_engine2 *e)
eb.buffer_count = 1;
eb.buffers_ptr = to_user_pointer(&obj);
eb.flags = e->flags;
+ eb.rsvd1 = ctx->id;
/*
* This test is probabilistic so run in a few times to increase the
@@ -1770,7 +1790,8 @@ test_enable_race(int gem_fd, const struct intel_execution_engine2 *e)
__assert_within(x, ref, tolerance, tolerance)
static void
-accuracy(int gem_fd, const struct intel_execution_engine2 *e,
+accuracy(int gem_fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
unsigned long target_busy_pct,
unsigned long target_iters)
{
@@ -1820,7 +1841,7 @@ accuracy(int gem_fd, const struct intel_execution_engine2 *e,
igt_spin_t *spin;
/* Allocate our spin batch and idle it. */
- spin = igt_spin_new(gem_fd, .engine = e->flags);
+ spin = igt_spin_new(gem_fd, .ctx = ctx, .engine = e->flags);
igt_spin_end(spin);
gem_sync(gem_fd, spin->handle);
@@ -1979,6 +2000,7 @@ static int unload_i915(void)
static void test_unload(unsigned int num_engines)
{
igt_fork(child, 1) {
+ intel_ctx_cfg_t cfg;
const struct intel_execution_engine2 *e;
int fd[4 + num_engines * 3], i;
uint64_t *buf;
@@ -2004,7 +2026,8 @@ static void test_unload(unsigned int num_engines)
if (fd[count] != -1)
count++;
- __for_each_physical_engine(i915, e) {
+ cfg = intel_ctx_cfg_all_physical(i915);
+ for_each_ctx_cfg_engine(i915, &cfg, e) {
fd[count] = perf_i915_open_group(i915,
I915_PMU_ENGINE_BUSY(e->class, e->instance),
fd[count - 1]);
@@ -2052,12 +2075,12 @@ static void test_unload(unsigned int num_engines)
igt_assert_eq(unload_i915(), 0);
}
-#define test_each_engine(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_engine(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
igt_dynamic_f("%s", e->name)
-#define test_each_rcs(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_rcs(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
for_each_if((e)->class == I915_ENGINE_CLASS_RENDER) \
igt_dynamic_f("%s", e->name)
@@ -2065,6 +2088,7 @@ igt_main
{
const struct intel_execution_engine2 *e;
unsigned int num_engines = 0;
+ const intel_ctx_t *ctx = NULL;
int fd = -1;
/**
@@ -2079,7 +2103,9 @@ igt_main
igt_require_gem(fd);
igt_require(i915_perf_type_id(fd) > 0);
- __for_each_physical_engine(fd, e)
+ ctx = intel_ctx_create_all_physical(fd);
+
+ for_each_ctx_engine(fd, ctx, e)
num_engines++;
igt_require(num_engines);
}
@@ -2107,48 +2133,48 @@ igt_main
* Test that a single engine metric can be initialized or it
* is correctly rejected.
*/
- test_each_engine("init-busy", fd, e)
- init(fd, e, I915_SAMPLE_BUSY);
+ test_each_engine("init-busy", fd, ctx, e)
+ init(fd, ctx, e, I915_SAMPLE_BUSY);
- test_each_engine("init-wait", fd, e)
- init(fd, e, I915_SAMPLE_WAIT);
+ test_each_engine("init-wait", fd, ctx, e)
+ init(fd, ctx, e, I915_SAMPLE_WAIT);
- test_each_engine("init-sema", fd, e)
- init(fd, e, I915_SAMPLE_SEMA);
+ test_each_engine("init-sema", fd, ctx, e)
+ init(fd, ctx, e, I915_SAMPLE_SEMA);
/**
* Test that engines show no load when idle.
*/
- test_each_engine("idle", fd, e)
- single(fd, e, 0);
+ test_each_engine("idle", fd, ctx, e)
+ single(fd, ctx, e, 0);
/**
* Test that a single engine reports load correctly.
*/
- test_each_engine("busy", fd, e)
- single(fd, e, TEST_BUSY);
- test_each_engine("busy-idle", fd, e)
- single(fd, e, TEST_BUSY | TEST_TRAILING_IDLE);
+ test_each_engine("busy", fd, ctx, e)
+ single(fd, ctx, e, TEST_BUSY);
+ test_each_engine("busy-idle", fd, ctx, e)
+ single(fd, ctx, e, TEST_BUSY | TEST_TRAILING_IDLE);
/**
* Test that when one engine is loaded other report no
* load.
*/
- test_each_engine("busy-check-all", fd, e)
- busy_check_all(fd, e, num_engines, TEST_BUSY);
- test_each_engine("busy-idle-check-all", fd, e)
- busy_check_all(fd, e, num_engines,
+ test_each_engine("busy-check-all", fd, ctx, e)
+ busy_check_all(fd, ctx, e, num_engines, TEST_BUSY);
+ test_each_engine("busy-idle-check-all", fd, ctx, e)
+ busy_check_all(fd, ctx, e, num_engines,
TEST_BUSY | TEST_TRAILING_IDLE);
/**
* Test that when all except one engine are loaded all
* loads are correctly reported.
*/
- test_each_engine("most-busy-check-all", fd, e)
- most_busy_check_all(fd, e, num_engines,
+ test_each_engine("most-busy-check-all", fd, ctx, e)
+ most_busy_check_all(fd, ctx, e, num_engines,
TEST_BUSY);
- test_each_engine("most-busy-idle-check-all", fd, e)
- most_busy_check_all(fd, e, num_engines,
+ test_each_engine("most-busy-idle-check-all", fd, ctx, e)
+ most_busy_check_all(fd, ctx, e, num_engines,
TEST_BUSY |
TEST_TRAILING_IDLE);
@@ -2156,40 +2182,40 @@ igt_main
* Test that semphore counters report no activity on
* idle or busy engines.
*/
- test_each_engine("idle-no-semaphores", fd, e)
- no_sema(fd, e, 0);
+ test_each_engine("idle-no-semaphores", fd, ctx, e)
+ no_sema(fd, ctx, e, 0);
- test_each_engine("busy-no-semaphores", fd, e)
- no_sema(fd, e, TEST_BUSY);
+ test_each_engine("busy-no-semaphores", fd, ctx, e)
+ no_sema(fd, ctx, e, TEST_BUSY);
- test_each_engine("busy-idle-no-semaphores", fd, e)
- no_sema(fd, e, TEST_BUSY | TEST_TRAILING_IDLE);
+ test_each_engine("busy-idle-no-semaphores", fd, ctx, e)
+ no_sema(fd, ctx, e, TEST_BUSY | TEST_TRAILING_IDLE);
/**
* Test that semaphore waits are correctly reported.
*/
- test_each_engine("semaphore-wait", fd, e)
- sema_wait(fd, e, TEST_BUSY);
+ test_each_engine("semaphore-wait", fd, ctx, e)
+ sema_wait(fd, ctx, e, TEST_BUSY);
- test_each_engine("semaphore-wait-idle", fd, e)
- sema_wait(fd, e, TEST_BUSY | TEST_TRAILING_IDLE);
+ test_each_engine("semaphore-wait-idle", fd, ctx, e)
+ sema_wait(fd, ctx, e, TEST_BUSY | TEST_TRAILING_IDLE);
- test_each_engine("semaphore-busy", fd, e)
- sema_busy(fd, e, 0);
+ test_each_engine("semaphore-busy", fd, ctx, e)
+ sema_busy(fd, ctx, e, 0);
/**
* Check that two perf clients do not influence each
* others observations.
*/
- test_each_engine("multi-client", fd, e)
- multi_client(fd, e);
+ test_each_engine("multi-client", fd, ctx, e)
+ multi_client(fd, ctx, e);
/**
* Check that reported usage is correct when PMU is
* enabled after the batch is running.
*/
- test_each_engine("busy-start", fd, e)
- busy_start(fd, e);
+ test_each_engine("busy-start", fd, ctx, e)
+ busy_start(fd, ctx, e);
/**
* Check that reported usage is correct when PMU is
@@ -2198,16 +2224,16 @@ igt_main
igt_subtest_group {
igt_fixture gem_require_contexts(fd);
- test_each_engine("busy-double-start", fd, e)
- busy_double_start(fd, e);
+ test_each_engine("busy-double-start", fd, ctx, e)
+ busy_double_start(fd, ctx, e);
}
/**
* Check that the PMU can be safely enabled in face of
* interrupt-heavy engine load.
*/
- test_each_engine("enable-race", fd, e)
- test_enable_race(fd, e);
+ test_each_engine("enable-race", fd, ctx, e)
+ test_enable_race(fd, ctx, e);
igt_subtest_group {
const unsigned int pct[] = { 2, 50, 98 };
@@ -2217,18 +2243,18 @@ igt_main
*/
for (unsigned int i = 0; i < ARRAY_SIZE(pct); i++) {
igt_subtest_with_dynamic_f("busy-accuracy-%u", pct[i]) {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name)
- accuracy(fd, e, pct[i], 10);
+ accuracy(fd, ctx, e, pct[i], 10);
}
}
}
}
- test_each_engine("busy-hang", fd, e) {
+ test_each_engine("busy-hang", fd, ctx, e) {
igt_hang_t hang = igt_allow_hang(fd, 0, 0);
- single(fd, e, TEST_BUSY | FLAG_HANG);
+ single(fd, ctx, e, TEST_BUSY | FLAG_HANG);
igt_disallow_hang(fd, hang);
}
@@ -2236,17 +2262,18 @@ igt_main
/**
* Test that event waits are correctly reported.
*/
- test_each_rcs("event-wait", fd, e)
- event_wait(fd, e);
+ test_each_rcs("event-wait", fd, ctx, e)
+ event_wait(fd, ctx, e);
/**
* Test that when all engines are loaded all loads are
* correctly reported.
*/
igt_subtest("all-busy-check-all")
- all_busy_check_all(fd, num_engines, TEST_BUSY);
+ all_busy_check_all(fd, ctx, num_engines,
+ TEST_BUSY);
igt_subtest("all-busy-idle-check-all")
- all_busy_check_all(fd, num_engines,
+ all_busy_check_all(fd, ctx, num_engines,
TEST_BUSY | TEST_TRAILING_IDLE);
/**
@@ -2291,32 +2318,36 @@ igt_main
* Test GT wakeref tracking (similar to RC0, opposite of RC6)
*/
igt_subtest("gt-awake")
- test_awake(fd);
+ test_awake(fd, ctx);
/**
* Check render nodes are counted.
*/
igt_subtest_group {
int render_fd = -1;
+ const intel_ctx_t *render_ctx = NULL;
igt_fixture {
render_fd = __drm_open_driver_render(DRIVER_INTEL);
igt_require_gem(render_fd);
+ render_ctx = intel_ctx_create_all_physical(render_fd);
gem_quiescent_gpu(fd);
}
- test_each_engine("render-node-busy", render_fd, e)
- single(render_fd, e, TEST_BUSY);
- test_each_engine("render-node-busy-idle", render_fd, e)
- single(render_fd, e, TEST_BUSY | TEST_TRAILING_IDLE);
+ test_each_engine("render-node-busy", render_fd, render_ctx, e)
+ single(render_fd, render_ctx, e, TEST_BUSY);
+ test_each_engine("render-node-busy-idle", render_fd, render_ctx, e)
+ single(render_fd, render_ctx, e, TEST_BUSY | TEST_TRAILING_IDLE);
igt_fixture {
+ intel_ctx_destroy(render_fd, render_ctx);
close(render_fd);
}
}
igt_fixture {
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 06/81] tests/i915/gem_exec_nop: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (4 preceding siblings ...)
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 05/81] tests/i915/perf_pmu: " Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 07/81] tests/i915/gem_exec_reloc: Convert to intel_ctx_t (v3) Jason Ekstrand
` (77 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_nop.c | 156 ++++++++++++++++++++++----------------
1 file changed, 92 insertions(+), 64 deletions(-)
diff --git a/tests/i915/gem_exec_nop.c b/tests/i915/gem_exec_nop.c
index e75ccb566..09a0c29e7 100644
--- a/tests/i915/gem_exec_nop.c
+++ b/tests/i915/gem_exec_nop.c
@@ -46,6 +46,7 @@
#include "igt_device.h"
#include "igt_rand.h"
#include "igt_sysfs.h"
+#include "intel_ctx.h"
#define ENGINE_FLAGS (I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK)
@@ -63,7 +64,7 @@ static double elapsed(const struct timespec *start, const struct timespec *end)
(end->tv_nsec - start->tv_nsec)*1e-9);
}
-static double nop_on_ring(int fd, uint32_t handle,
+static double nop_on_ring(int fd, uint32_t handle, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
int timeout_ms,
unsigned long *out)
@@ -82,6 +83,7 @@ static double nop_on_ring(int fd, uint32_t handle,
execbuf.flags = e->flags;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
+ execbuf.rsvd1 = ctx->id;
if (__gem_execbuf(fd, &execbuf)) {
execbuf.flags = e->flags;
gem_execbuf(fd, &execbuf);
@@ -102,7 +104,8 @@ static double nop_on_ring(int fd, uint32_t handle,
return elapsed(&start, &now);
}
-static void poll_ring(int fd, const struct intel_execution_engine2 *e,
+static void poll_ring(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
int timeout)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
@@ -186,6 +189,7 @@ static void poll_ring(int fd, const struct intel_execution_engine2 *e,
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
execbuf.flags = e->flags | flags;
+ execbuf.rsvd1 = ctx->id;
cycles = 0;
do {
@@ -213,7 +217,8 @@ static void poll_ring(int fd, const struct intel_execution_engine2 *e,
gem_close(fd, obj.handle);
}
-static void poll_sequential(int fd, const char *name, int timeout)
+static void poll_sequential(int fd, const intel_ctx_t *ctx,
+ const char *name, int timeout)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
const struct intel_execution_engine2 *e;
@@ -233,7 +238,7 @@ static void poll_sequential(int fd, const char *name, int timeout)
flags |= I915_EXEC_SECURE;
nengine = 0;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (!gem_class_can_store_dword(fd, e->class) ||
!gem_class_has_mutable_submission(fd, e->class))
continue;
@@ -313,6 +318,7 @@ static void poll_sequential(int fd, const char *name, int timeout)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = ARRAY_SIZE(obj);
+ execbuf.rsvd1 = ctx->id;
cycles = 0;
do {
@@ -343,19 +349,19 @@ static void poll_sequential(int fd, const char *name, int timeout)
gem_close(fd, obj[0].handle);
}
-static void single(int fd, uint32_t handle,
+static void single(int fd, uint32_t handle, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e)
{
double time;
unsigned long count;
- time = nop_on_ring(fd, handle, e, 20000, &count);
+ time = nop_on_ring(fd, handle, ctx, e, 20000, &count);
igt_info("%s: %'lu cycles: %.3fus\n",
e->name, count, time*1e6 / count);
}
static double
-stable_nop_on_ring(int fd, uint32_t handle,
+stable_nop_on_ring(int fd, uint32_t handle, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
int timeout_ms,
int reps)
@@ -372,7 +378,7 @@ stable_nop_on_ring(int fd, uint32_t handle,
unsigned long count;
double time;
- time = nop_on_ring(fd, handle, e, timeout_ms, &count);
+ time = nop_on_ring(fd, handle, ctx, e, timeout_ms, &count);
igt_stats_push_float(&s, time / count);
}
@@ -388,7 +394,7 @@ stable_nop_on_ring(int fd, uint32_t handle,
"'%s' != '%s' (%f not within %f%% tolerance of %f)\n",\
#x, #ref, x, tolerance * 100.0, ref)
-static void headless(int fd, uint32_t handle,
+static void headless(int fd, uint32_t handle, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e)
{
unsigned int nr_connected = 0;
@@ -412,11 +418,11 @@ static void headless(int fd, uint32_t handle,
/* set graphics mode to prevent blanking */
kmstest_set_vt_graphics_mode();
- nop_on_ring(fd, handle, e, 10, &count);
+ nop_on_ring(fd, handle, ctx, e, 10, &count);
igt_require_f(count > 100, "submillisecond precision required\n");
/* benchmark nops */
- n_display = stable_nop_on_ring(fd, handle, e, 500, 5);
+ n_display = stable_nop_on_ring(fd, handle, ctx, e, 500, 5);
igt_info("With one display connected: %.2fus\n",
n_display * 1e6);
@@ -424,7 +430,7 @@ static void headless(int fd, uint32_t handle,
kmstest_unset_all_crtcs(fd, res);
/* benchmark nops again */
- n_headless = stable_nop_on_ring(fd, handle, e, 500, 5);
+ n_headless = stable_nop_on_ring(fd, handle, ctx, e, 500, 5);
igt_info("Without a display connected (headless): %.2fus\n",
n_headless * 1e6);
@@ -432,7 +438,8 @@ static void headless(int fd, uint32_t handle,
assert_within_epsilon(n_headless, n_display, 0.1f);
}
-static void parallel(int fd, uint32_t handle, int timeout)
+static void parallel(int fd, uint32_t handle,
+ const intel_ctx_t *ctx, int timeout)
{
const struct intel_execution_engine2 *e;
struct drm_i915_gem_execbuffer2 execbuf;
@@ -446,11 +453,11 @@ static void parallel(int fd, uint32_t handle, int timeout)
sum = 0;
nengine = 0;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
engines[nengine] = e->flags;
names[nengine++] = strdup(e->name);
- time = nop_on_ring(fd, handle, e, 250, &count) / count;
+ time = nop_on_ring(fd, handle, ctx, e, 250, &count) / count;
sum += time;
igt_debug("%s: %.3fus\n", e->name, 1e6*time);
}
@@ -465,6 +472,7 @@ static void parallel(int fd, uint32_t handle, int timeout)
execbuf.buffer_count = 1;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
+ execbuf.rsvd1 = ctx->id;
if (__gem_execbuf(fd, &execbuf)) {
execbuf.flags = 0;
gem_execbuf(fd, &execbuf);
@@ -495,7 +503,8 @@ static void parallel(int fd, uint32_t handle, int timeout)
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
-static void independent(int fd, uint32_t handle, int timeout)
+static void independent(int fd, uint32_t handle,
+ const intel_ctx_t *ctx, int timeout)
{
const struct intel_execution_engine2 *e;
struct drm_i915_gem_execbuffer2 execbuf;
@@ -508,11 +517,11 @@ static void independent(int fd, uint32_t handle, int timeout)
sum = 0;
nengine = 0;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
engines[nengine] = e->flags;
names[nengine++] = strdup(e->name);
- time = nop_on_ring(fd, handle, e, 250, &count) / count;
+ time = nop_on_ring(fd, handle, ctx, e, 250, &count) / count;
sum += time;
igt_debug("%s: %.3fus\n", e->name, 1e6*time);
}
@@ -527,6 +536,7 @@ static void independent(int fd, uint32_t handle, int timeout)
execbuf.buffer_count = 1;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
+ execbuf.rsvd1 = ctx->id;
if (__gem_execbuf(fd, &execbuf)) {
execbuf.flags = 0;
gem_execbuf(fd, &execbuf);
@@ -563,7 +573,7 @@ static void independent(int fd, uint32_t handle, int timeout)
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
-static void multiple(int fd,
+static void multiple(int fd, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
int timeout)
{
@@ -582,6 +592,7 @@ static void multiple(int fd,
execbuf.flags = e->flags;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
+ execbuf.rsvd1 = ctx->id;
if (__gem_execbuf(fd, &execbuf)) {
execbuf.flags = e->flags;
gem_execbuf(fd, &execbuf);
@@ -593,9 +604,11 @@ static void multiple(int fd,
unsigned long count;
double time;
int i915;
+ const intel_ctx_t *child_ctx;
i915 = gem_reopen_driver(fd);
- gem_context_copy_engines(fd, 0, i915, 0);
+ child_ctx = intel_ctx_create(i915, &ctx->cfg);
+ execbuf.rsvd1 = child_ctx->id;
obj.handle = gem_create(i915, 4096);
gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
@@ -610,6 +623,7 @@ static void multiple(int fd,
} while (elapsed(&start, &now) < timeout);
time = elapsed(&start, &now) / count;
igt_info("%d: %ld cycles, %.3fus\n", child, count, 1e6*time);
+ intel_ctx_destroy(i915, child_ctx);
}
igt_waitchildren();
@@ -618,7 +632,8 @@ static void multiple(int fd,
gem_close(fd, obj.handle);
}
-static void series(int fd, uint32_t handle, int timeout)
+static void series(int fd, uint32_t handle,
+ const intel_ctx_t *ctx, int timeout)
{
const struct intel_execution_engine2 *e;
struct drm_i915_gem_execbuffer2 execbuf;
@@ -631,8 +646,8 @@ static void series(int fd, uint32_t handle, int timeout)
const char *name;
nengine = 0;
- __for_each_physical_engine(fd, e) {
- time = nop_on_ring(fd, handle, e, 250, &count) / count;
+ for_each_ctx_engine(fd, ctx, e) {
+ time = nop_on_ring(fd, handle, ctx, e, 250, &count) / count;
if (time > max) {
name = e->name;
max = time;
@@ -654,6 +669,7 @@ static void series(int fd, uint32_t handle, int timeout)
execbuf.buffer_count = 1;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
+ execbuf.rsvd1 = ctx->id;
if (__gem_execbuf(fd, &execbuf)) {
execbuf.flags = 0;
gem_execbuf(fd, &execbuf);
@@ -689,9 +705,11 @@ static void xchg(void *array, unsigned i, unsigned j)
u[j] = tmp;
}
-static void sequential(int fd, uint32_t handle, unsigned flags, int timeout)
+static void sequential(int fd, uint32_t handle,
+ const intel_ctx_t *ctx, unsigned flags, int timeout)
{
const int ncpus = flags & FORKED ? sysconf(_SC_NPROCESSORS_ONLN) : 1;
+ const intel_ctx_t *tmp_ctx = NULL, *child_ctx = NULL;
const struct intel_execution_engine2 *e;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[2];
@@ -708,10 +726,10 @@ static void sequential(int fd, uint32_t handle, unsigned flags, int timeout)
nengine = 0;
sum = 0;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
unsigned long count;
- time = nop_on_ring(fd, handle, e, 250, &count) / count;
+ time = nop_on_ring(fd, handle, ctx, e, 250, &count) / count;
sum += time;
igt_debug("%s: %.3fus\n", e->name, 1e6*time);
@@ -735,7 +753,8 @@ static void sequential(int fd, uint32_t handle, unsigned flags, int timeout)
if (flags & CONTEXT) {
gem_require_contexts(fd);
- execbuf.rsvd1 = gem_context_clone_with_engines(fd, 0);
+ tmp_ctx = intel_ctx_create(fd, &ctx->cfg);
+ execbuf.rsvd1 = tmp_ctx->id;
}
for (n = 0; n < nengine; n++) {
@@ -755,7 +774,8 @@ static void sequential(int fd, uint32_t handle, unsigned flags, int timeout)
if (flags & CONTEXT) {
gem_require_contexts(fd);
- execbuf.rsvd1 = gem_context_clone_with_engines(fd, 0);
+ child_ctx = intel_ctx_create(fd, &ctx->cfg);
+ execbuf.rsvd1 = child_ctx->id;
}
hars_petruska_f54_1_random_perturb(child);
@@ -778,7 +798,7 @@ static void sequential(int fd, uint32_t handle, unsigned flags, int timeout)
results[child] = elapsed(&start, &now) / count;
if (flags & CONTEXT)
- gem_context_destroy(fd, execbuf.rsvd1);
+ intel_ctx_destroy(fd, child_ctx);
gem_close(fd, obj[0].handle);
}
@@ -794,7 +814,7 @@ static void sequential(int fd, uint32_t handle, unsigned flags, int timeout)
nengine, ncpus, 1e6*results[ncpus], 1e6*sum*ncpus);
if (flags & CONTEXT)
- gem_context_destroy(fd, execbuf.rsvd1);
+ intel_ctx_destroy(fd, tmp_ctx);
gem_close(fd, obj[0].handle);
munmap(results, 4096);
@@ -811,6 +831,7 @@ static bool fence_wait(int fence)
}
static void fence_signal(int fd, uint32_t handle,
+ const intel_ctx_t *ctx,
const struct intel_execution_engine2 *ring_id,
const char *ring_name, int timeout)
{
@@ -828,7 +849,7 @@ static void fence_signal(int fd, uint32_t handle,
nengine = 0;
if (!ring_id) {
- __for_each_physical_engine(fd, __e)
+ for_each_ctx_engine(fd, ctx, __e)
engines[nengine++] = __e->flags;
} else {
engines[nengine++] = ring_id->flags;
@@ -846,6 +867,7 @@ static void fence_signal(int fd, uint32_t handle,
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
execbuf.flags = I915_EXEC_FENCE_OUT;
+ execbuf.rsvd1 = ctx->id;
n = 0;
count = 0;
@@ -886,20 +908,21 @@ static void fence_signal(int fd, uint32_t handle,
}
static void preempt(int fd, uint32_t handle,
+ const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj;
struct timespec start, now;
unsigned long count;
- uint32_t ctx[2];
+ const intel_ctx_t *tmp_ctx[2];
igt_spin_t *spin;
- ctx[0] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[0], MIN_PRIO);
+ tmp_ctx[0] = intel_ctx_create(fd, &ctx->cfg);
+ gem_context_set_priority(fd, tmp_ctx[0]->id, MIN_PRIO);
- ctx[1] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[1], MAX_PRIO);
+ tmp_ctx[1] = intel_ctx_create(fd, &ctx->cfg);
+ gem_context_set_priority(fd, tmp_ctx[1]->id, MAX_PRIO);
memset(&obj, 0, sizeof(obj));
obj.handle = handle;
@@ -910,15 +933,16 @@ static void preempt(int fd, uint32_t handle,
execbuf.flags = e->flags;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
+ execbuf.rsvd1 = ctx->id;
if (__gem_execbuf(fd, &execbuf)) {
execbuf.flags = e->flags;
gem_execbuf(fd, &execbuf);
}
- execbuf.rsvd1 = ctx[1];
+ execbuf.rsvd1 = tmp_ctx[1]->id;
intel_detect_and_clear_missed_interrupts(fd);
count = 0;
- spin = __igt_spin_new(fd, .ctx_id = ctx[0], .engine = e->flags);
+ spin = __igt_spin_new(fd, .ctx = tmp_ctx[0], .engine = e->flags);
clock_gettime(CLOCK_MONOTONIC, &start);
do {
gem_execbuf(fd, &execbuf);
@@ -928,8 +952,8 @@ static void preempt(int fd, uint32_t handle,
igt_spin_free(fd, spin);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
- gem_context_destroy(fd, ctx[1]);
- gem_context_destroy(fd, ctx[0]);
+ intel_ctx_destroy(fd, tmp_ctx[1]);
+ intel_ctx_destroy(fd, tmp_ctx[0]);
igt_info("%s: %'lu cycles: %.3fus\n",
e->name, count, elapsed(&start, &now)*1e6 / count);
@@ -938,6 +962,7 @@ static void preempt(int fd, uint32_t handle,
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
uint32_t handle = 0;
int device = -1;
@@ -949,6 +974,8 @@ igt_main
gem_submission_print_method(device);
gem_scheduler_print_capability(device);
+ ctx = intel_ctx_create_all_physical(device);
+
handle = gem_create(device, 4096);
gem_write(device, handle, 0, &bbe, sizeof(bbe));
@@ -956,57 +983,57 @@ igt_main
}
igt_subtest("basic-series")
- series(device, handle, 2);
+ series(device, handle, ctx, 2);
igt_subtest("basic-parallel")
- parallel(device, handle, 2);
+ parallel(device, handle, ctx, 2);
igt_subtest("basic-sequential")
- sequential(device, handle, 0, 2);
+ sequential(device, handle, ctx, 0, 2);
igt_subtest_with_dynamic("single") {
- __for_each_physical_engine(device, e) {
+ for_each_ctx_engine(device, ctx, e) {
igt_dynamic_f("%s", e->name)
- single(device, handle, e);
+ single(device, handle, ctx, e);
}
}
igt_subtest_with_dynamic("signal") {
- __for_each_physical_engine(device, e) {
+ for_each_ctx_engine(device, ctx, e) {
igt_dynamic_f("%s", e->name)
- fence_signal(device, handle, e,
- e->name, 2);
+ fence_signal(device, handle, ctx,
+ e, e->name, 2);
}
}
igt_subtest("signal-all")
/* NULL value means all engines */
- fence_signal(device, handle, NULL, "all", 20);
+ fence_signal(device, handle, ctx, NULL, "all", 20);
igt_subtest("series")
- series(device, handle, 20);
+ series(device, handle, ctx, 20);
igt_subtest("parallel")
- parallel(device, handle, 20);
+ parallel(device, handle, ctx, 20);
igt_subtest("independent")
- independent(device, handle, 20);
+ independent(device, handle, ctx, 20);
igt_subtest_with_dynamic("multiple") {
- __for_each_physical_engine(device, e) {
+ for_each_ctx_engine(device, ctx, e) {
igt_dynamic_f("%s", e->name)
- multiple(device, e, 20);
+ multiple(device, ctx, e, 20);
}
}
igt_subtest("sequential")
- sequential(device, handle, 0, 20);
+ sequential(device, handle, ctx, 0, 20);
igt_subtest("forked-sequential")
- sequential(device, handle, FORKED, 20);
+ sequential(device, handle, ctx, FORKED, 20);
igt_subtest("context-sequential")
- sequential(device, handle, FORKED | CONTEXT, 20);
+ sequential(device, handle, ctx, FORKED | CONTEXT, 20);
igt_subtest_group {
igt_fixture {
@@ -1015,9 +1042,9 @@ igt_main
igt_require(gem_scheduler_has_preemption(device));
}
igt_subtest_with_dynamic("preempt") {
- __for_each_physical_engine(device, e) {
+ for_each_ctx_engine(device, ctx, e) {
igt_dynamic_f("%s", e->name)
- preempt(device, handle, e);
+ preempt(device, handle, ctx, e);
}
}
}
@@ -1028,29 +1055,30 @@ igt_main
}
igt_subtest_with_dynamic("poll") {
- __for_each_physical_engine(device, e) {
+ for_each_ctx_engine(device, ctx, e) {
/* Requires master for STORE_DWORD on gen4/5 */
igt_dynamic_f("%s", e->name)
- poll_ring(device, e, 20);
+ poll_ring(device, ctx, e, 20);
}
}
igt_subtest_with_dynamic("headless") {
- __for_each_physical_engine(device, e) {
+ for_each_ctx_engine(device, ctx, e) {
igt_dynamic_f("%s", e->name)
/* Requires master for changing display modes */
- headless(device, handle, e);
+ headless(device, handle, ctx, e);
}
}
igt_subtest("poll-sequential")
- poll_sequential(device, "Sequential", 20);
+ poll_sequential(device, ctx, "Sequential", 20);
}
igt_fixture {
igt_stop_hang_detector();
gem_close(device, handle);
+ intel_ctx_destroy(device, ctx);
close(device);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 07/81] tests/i915/gem_exec_reloc: Convert to intel_ctx_t (v3)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (5 preceding siblings ...)
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 06/81] tests/i915/gem_exec_nop: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 08/81] tests/i915/gem_busy: " Jason Ekstrand
` (76 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Iterate over tmp_ctx when that's what we're using
- Drop the zero-init of ctx
v3 (Ashutosh Dixit):
- Simplify context creation
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_reloc.c | 98 ++++++++++++++++++++++---------------
1 file changed, 58 insertions(+), 40 deletions(-)
diff --git a/tests/i915/gem_exec_reloc.c b/tests/i915/gem_exec_reloc.c
index c79400fbf..d54473341 100644
--- a/tests/i915/gem_exec_reloc.c
+++ b/tests/i915/gem_exec_reloc.c
@@ -267,7 +267,7 @@ static void check_bo(int fd, uint32_t handle)
munmap(map, 4096);
}
-static void active(int fd, unsigned engine)
+static void active(int fd, const intel_ctx_t *ctx, unsigned engine)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_relocation_entry reloc;
@@ -281,7 +281,7 @@ static void active(int fd, unsigned engine)
if (engine == ALL_ENGINES) {
const struct intel_execution_engine2 *e;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (gem_class_can_store_dword(fd, e->class))
engines[nengine++] = e->flags;
}
@@ -309,6 +309,7 @@ static void active(int fd, unsigned engine)
execbuf.buffer_count = 2;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
for (pass = 0; pass < 1024; pass++) {
uint32_t batch[16];
@@ -368,7 +369,8 @@ static uint64_t many_relocs(unsigned long count, unsigned long *out)
return to_user_pointer(reloc);
}
-static void __many_active(int i915, unsigned engine, unsigned long count)
+static void __many_active(int i915, const intel_ctx_t *ctx, unsigned engine,
+ unsigned long count)
{
unsigned long reloc_sz;
struct drm_i915_gem_exec_object2 obj[2] = {{
@@ -380,10 +382,12 @@ static void __many_active(int i915, unsigned engine, unsigned long count)
.buffers_ptr = to_user_pointer(obj),
.buffer_count = ARRAY_SIZE(obj),
.flags = engine | I915_EXEC_HANDLE_LUT,
+ .rsvd1 = ctx->id,
};
igt_spin_t *spin;
spin = __igt_spin_new(i915,
+ .ctx = ctx,
.engine = engine,
.dependency = obj[0].handle,
.flags = (IGT_SPIN_FENCE_OUT |
@@ -406,7 +410,7 @@ static void __many_active(int i915, unsigned engine, unsigned long count)
gem_close(i915, obj[0].handle);
}
-static void many_active(int i915, unsigned engine)
+static void many_active(int i915, const intel_ctx_t *ctx, unsigned engine)
{
const uint64_t max = 2048;
unsigned long count = 256;
@@ -419,7 +423,7 @@ static void many_active(int i915, unsigned engine)
break;
igt_debug("Testing count:%lu\n", count);
- __many_active(i915, engine, count);
+ __many_active(i915, ctx, engine, count);
count <<= 1;
if (count >= max)
@@ -427,7 +431,8 @@ static void many_active(int i915, unsigned engine)
}
}
-static void __wide_active(int i915, unsigned engine, unsigned long count)
+static void __wide_active(int i915, const intel_ctx_t *ctx, unsigned engine,
+ unsigned long count)
{
struct drm_i915_gem_relocation_entry *reloc =
calloc(count, sizeof(*reloc));
@@ -437,6 +442,7 @@ static void __wide_active(int i915, unsigned engine, unsigned long count)
.buffers_ptr = to_user_pointer(obj),
.buffer_count = count + 1,
.flags = engine | I915_EXEC_HANDLE_LUT,
+ .rsvd1 = ctx->id,
};
igt_spin_t *spin;
@@ -447,6 +453,7 @@ static void __wide_active(int i915, unsigned engine, unsigned long count)
}
spin = __igt_spin_new(i915,
+ .ctx = ctx,
.engine = engine,
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_NO_PREEMPTION));
@@ -476,7 +483,7 @@ static void __wide_active(int i915, unsigned engine, unsigned long count)
free(reloc);
}
-static void wide_active(int i915, unsigned engine)
+static void wide_active(int i915, const intel_ctx_t *ctx, unsigned engine)
{
const uint64_t max = gem_aperture_size(i915) / 4096 / 2;
unsigned long count = 256;
@@ -489,7 +496,7 @@ static void wide_active(int i915, unsigned engine)
break;
igt_debug("Testing count:%lu\n", count);
- __wide_active(i915, engine, count);
+ __wide_active(i915, ctx, engine, count);
count <<= 1;
if (count >= max)
@@ -502,7 +509,7 @@ static unsigned int offset_in_page(void *addr)
return (uintptr_t)addr & 4095;
}
-static void active_spin(int fd, unsigned engine)
+static void active_spin(int fd, const intel_ctx_t *ctx, unsigned engine)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_relocation_entry reloc;
@@ -511,6 +518,7 @@ static void active_spin(int fd, unsigned engine)
igt_spin_t *spin;
spin = igt_spin_new(fd,
+ .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_NO_PREEMPTION);
@@ -530,6 +538,7 @@ static void active_spin(int fd, unsigned engine)
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
execbuf.flags = engine;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
gem_close(fd, obj[1].handle);
@@ -542,7 +551,7 @@ static void active_spin(int fd, unsigned engine)
igt_spin_free(fd, spin);
}
-static void others_spin(int i915, unsigned engine)
+static void others_spin(int i915, const intel_ctx_t *ctx, unsigned engine)
{
struct drm_i915_gem_relocation_entry reloc = {};
struct drm_i915_gem_exec_object2 obj = {
@@ -553,18 +562,20 @@ static void others_spin(int i915, unsigned engine)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = engine,
+ .rsvd1 = ctx->id,
};
const struct intel_execution_engine2 *e;
igt_spin_t *spin = NULL;
uint64_t addr;
int fence;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (e->flags == engine)
continue;
if (!spin) {
spin = igt_spin_new(i915,
+ .ctx = ctx,
.engine = e->flags,
.flags = IGT_SPIN_FENCE_OUT);
fence = dup(spin->out_fence);
@@ -986,12 +997,13 @@ static void sighandler(int sig)
stop = 1;
}
-static void parallel_child(int i915,
+static void parallel_child(int i915, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *engine,
struct drm_i915_gem_relocation_entry *reloc,
uint32_t common)
{
- igt_spin_t *spin = __igt_spin_new(i915, .engine = engine->flags);
+ igt_spin_t *spin = __igt_spin_new(i915, .ctx = ctx,
+ .engine = engine->flags);
struct drm_i915_gem_exec_object2 reloc_target = {
.handle = gem_create(i915, 32 * 1024 * 8),
.relocation_count = 32 * 1024,
@@ -1006,6 +1018,7 @@ static void parallel_child(int i915,
.buffers_ptr = to_user_pointer(obj),
.buffer_count = ARRAY_SIZE(obj),
.flags = engine->flags | I915_EXEC_HANDLE_LUT,
+ .rsvd1 = ctx->id,
};
struct sigaction act = {
.sa_handler = sighandler,
@@ -1033,7 +1046,7 @@ static void kill_children(int sig)
signal(sig, SIG_DFL);
}
-static void parallel(int i915)
+static void parallel(int i915, const intel_ctx_t *ctx)
{
const struct intel_execution_engine2 *e;
struct drm_i915_gem_relocation_entry *reloc;
@@ -1044,16 +1057,16 @@ static void parallel(int i915)
reloc = parallel_relocs(32 * 1024, &reloc_sz);
stop = 0;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_fork(child, 1)
- parallel_child(i915, e, reloc, common);
+ parallel_child(i915, ctx, e, reloc, common);
}
sleep(2);
if (gem_scheduler_has_preemption(i915)) {
- uint32_t ctx = gem_context_clone_with_engines(i915, 0);
+ const intel_ctx_t *tmp_ctx = intel_ctx_create(i915, &ctx->cfg);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, tmp_ctx, e) {
struct drm_i915_gem_exec_object2 obj[2] = {
{ .handle = common },
{ .handle = batch },
@@ -1062,12 +1075,12 @@ static void parallel(int i915)
.buffers_ptr = to_user_pointer(obj),
.buffer_count = ARRAY_SIZE(obj),
.flags = e->flags,
- .rsvd1 = ctx,
+ .rsvd1 = tmp_ctx->id,
};
gem_execbuf(i915, &execbuf);
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, tmp_ctx);
}
gem_sync(i915, batch);
gem_close(i915, batch);
@@ -1121,7 +1134,7 @@ static void xchg_u32(void *array, unsigned i, unsigned j)
u32[j] = tmp;
}
-static void concurrent_child(int i915,
+static void concurrent_child(int i915, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
uint32_t *common, int num_common,
int in, int out)
@@ -1134,6 +1147,7 @@ static void concurrent_child(int i915,
.buffers_ptr = to_user_pointer(obj),
.buffer_count = ARRAY_SIZE(obj),
.flags = e->flags | I915_EXEC_HANDLE_LUT | (gen < 6 ? I915_EXEC_SECURE : 0),
+ .rsvd1 = ctx->id,
};
uint32_t *batch = &obj[num_common + 1].handle;
unsigned long count = 0;
@@ -1214,7 +1228,7 @@ static uint32_t create_concurrent_batch(int i915, unsigned int count)
return handle;
}
-static void concurrent(int i915, int num_common)
+static void concurrent(int i915, const intel_ctx_t *ctx, int num_common)
{
const struct intel_execution_engine2 *e;
int in[2], out[2];
@@ -1240,12 +1254,12 @@ static void concurrent(int i915, int num_common)
common[n] = gem_create(i915, 4 * 4 * CONCURRENT);
nchild = 0;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (!gem_class_can_store_dword(i915, e->class))
continue;
igt_fork(child, 1)
- concurrent_child(i915, e,
+ concurrent_child(i915, ctx, e,
common, num_common,
in[0], out[1]);
@@ -1309,6 +1323,7 @@ pin_scanout(igt_display_t *dpy, igt_output_t *output, struct igt_fb *fb)
static void scanout(int i915,
igt_display_t *dpy,
+ const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e)
{
struct drm_i915_gem_relocation_entry reloc = {};
@@ -1319,6 +1334,7 @@ static void scanout(int i915,
.buffers_ptr = to_user_pointer(obj),
.buffer_count = 2,
.flags = e->flags,
+ .rsvd1 = ctx->id,
};
igt_output_t *output;
struct igt_fb fb;
@@ -1438,6 +1454,7 @@ static void invalid_domains(int fd)
igt_main
{
+ const intel_ctx_t *ctx;
const struct intel_execution_engine2 *e;
const struct mode {
const char *name;
@@ -1481,6 +1498,7 @@ igt_main
igt_require_gem(fd);
/* Check if relocations supported by platform */
igt_require(gem_has_relocations(fd));
+ ctx = intel_ctx_create_all_physical(fd);
}
for (f = flags; f->name; f++) {
@@ -1542,52 +1560,52 @@ igt_main
igt_subtest_with_dynamic("basic-active") {
igt_dynamic("all")
- active(fd, ALL_ENGINES);
+ active(fd, ctx, ALL_ENGINES);
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (!gem_class_can_store_dword(fd, e->class))
continue;
igt_dynamic_f("%s", e->name)
- active(fd, e->flags);
+ active(fd, ctx, e->flags);
}
}
igt_subtest_with_dynamic("basic-spin") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name)
- active_spin(fd, e->flags);
+ active_spin(fd, ctx, e->flags);
}
}
igt_subtest_with_dynamic("basic-spin-others") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name)
- others_spin(fd, e->flags);
+ others_spin(fd, ctx, e->flags);
}
}
igt_subtest_with_dynamic("basic-many-active") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name)
- many_active(fd, e->flags);
+ many_active(fd, ctx, e->flags);
}
}
igt_subtest_with_dynamic("basic-wide-active") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name)
- wide_active(fd, e->flags);
+ wide_active(fd, ctx, e->flags);
}
}
igt_subtest("basic-parallel")
- parallel(fd);
+ parallel(fd, ctx);
igt_subtest("basic-concurrent0")
- concurrent(fd, 0);
+ concurrent(fd, ctx, 0);
igt_subtest("basic-concurrent16")
- concurrent(fd, 16);
+ concurrent(fd, ctx, 16);
igt_subtest("invalid-domains")
invalid_domains(fd);
@@ -1605,9 +1623,9 @@ igt_main
}
igt_subtest_with_dynamic("basic-scanout") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name)
- scanout(fd, &display, e);
+ scanout(fd, &display, ctx, e);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 08/81] tests/i915/gem_busy: Convert to intel_ctx_t (v3)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (6 preceding siblings ...)
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 07/81] tests/i915/gem_exec_reloc: Convert to intel_ctx_t (v3) Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 09/81] tests/i915/gem_ctx_isolation: Convert to intel_ctx_t (v2) Jason Ekstrand
` (75 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't null-init ctx
v3 (Jason Ekstrand):
- Pass the context config to gem_submission_measure()
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_busy.c | 80 +++++++++++++++++++++++++------------------
1 file changed, 47 insertions(+), 33 deletions(-)
diff --git a/tests/i915/gem_busy.c b/tests/i915/gem_busy.c
index 7e2b220aa..f0fca0e8a 100644
--- a/tests/i915/gem_busy.c
+++ b/tests/i915/gem_busy.c
@@ -68,6 +68,7 @@ static void __gem_busy(int fd,
static bool exec_noop(int fd,
uint32_t *handles,
+ const intel_ctx_t *ctx,
unsigned flags,
bool write)
{
@@ -85,6 +86,7 @@ static bool exec_noop(int fd,
execbuf.buffers_ptr = to_user_pointer(exec);
execbuf.buffer_count = 3;
execbuf.flags = flags;
+ execbuf.rsvd1 = ctx->id;
igt_debug("Queuing handle for %s on engine %d\n",
write ? "writing" : "reading", flags);
return __gem_execbuf(fd, &execbuf) == 0;
@@ -97,7 +99,8 @@ static bool still_busy(int fd, uint32_t handle)
return write;
}
-static void semaphore(int fd, const struct intel_execution_engine2 *e)
+static void semaphore(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
struct intel_execution_engine2 *__e;
uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -114,18 +117,19 @@ static void semaphore(int fd, const struct intel_execution_engine2 *e)
/* Create a long running batch which we can use to hog the GPU */
handle[BUSY] = gem_create(fd, 4096);
spin = igt_spin_new(fd,
+ .ctx = ctx,
.engine = e->flags,
.dependency = handle[BUSY]);
/* Queue a batch after the busy, it should block and remain "busy" */
- igt_assert(exec_noop(fd, handle, e->flags, false));
+ igt_assert(exec_noop(fd, handle, ctx, e->flags, false));
igt_assert(still_busy(fd, handle[BUSY]));
__gem_busy(fd, handle[TEST], &read, &write);
igt_assert_eq(read, 1 << e->class);
igt_assert_eq(write, 0);
/* Requeue with a write */
- igt_assert(exec_noop(fd, handle, e->flags, true));
+ igt_assert(exec_noop(fd, handle, ctx, e->flags, true));
igt_assert(still_busy(fd, handle[BUSY]));
__gem_busy(fd, handle[TEST], &read, &write);
igt_assert_eq(read, 1 << e->class);
@@ -133,8 +137,8 @@ static void semaphore(int fd, const struct intel_execution_engine2 *e)
/* Now queue it for a read across all available rings */
active = 0;
- __for_each_physical_engine(fd, __e) {
- if (exec_noop(fd, handle, __e->flags, false))
+ for_each_ctx_engine(fd, ctx, __e) {
+ if (exec_noop(fd, handle, ctx, __e->flags, false))
active |= 1 << __e->class;
}
igt_assert(still_busy(fd, handle[BUSY]));
@@ -158,7 +162,8 @@ static void semaphore(int fd, const struct intel_execution_engine2 *e)
#define PARALLEL 1
#define HANG 2
-static void one(int fd, const struct intel_execution_engine2 *e, unsigned test_flags)
+static void one(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e, unsigned test_flags)
{
uint32_t scratch = gem_create(fd, 4096);
uint32_t read[2], write[2];
@@ -168,6 +173,7 @@ static void one(int fd, const struct intel_execution_engine2 *e, unsigned test_f
int timeout;
spin = igt_spin_new(fd,
+ .ctx = ctx,
.engine = e->flags,
.dependency = scratch,
.flags = (test_flags & HANG) ? IGT_SPIN_NO_PREEMPTION : 0);
@@ -178,13 +184,13 @@ static void one(int fd, const struct intel_execution_engine2 *e, unsigned test_f
if (test_flags & PARALLEL) {
struct intel_execution_engine2 *e2;
- __for_each_physical_engine(fd, e2) {
+ for_each_ctx_engine(fd, ctx, e2) {
if (e2->class == e->class &&
e2->instance == e->instance)
continue;
igt_debug("Testing %s in parallel\n", e2->name);
- one(fd, e2, 0);
+ one(fd, ctx, e2, 0);
}
}
@@ -229,10 +235,11 @@ static void xchg_u32(void *array, unsigned i, unsigned j)
u32[j] = tmp;
}
-static void close_race(int fd)
+static void close_race(int fd, const intel_ctx_t *ctx)
{
const unsigned int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
- const unsigned int nhandles = gem_submission_measure(fd, NULL, ALL_ENGINES);
+ const unsigned int nhandles = gem_submission_measure(fd, &ctx->cfg,
+ ALL_ENGINES);
unsigned int engines[I915_EXEC_RING_MASK + 1], nengine;
const struct intel_execution_engine2 *e;
unsigned long *control;
@@ -248,7 +255,7 @@ static void close_race(int fd)
*/
nengine = 0;
- __for_each_physical_engine(fd, e)
+ for_each_ctx_engine(fd, ctx, e)
engines[nengine++] = e->flags;
igt_require(nengine);
@@ -296,6 +303,7 @@ static void close_race(int fd)
for (i = 0; i < nhandles; i++) {
spin[i] = __igt_spin_new(fd,
+ .ctx = ctx,
.engine = engines[rand() % nengine]);
handles[i] = spin[i]->handle;
}
@@ -304,6 +312,7 @@ static void close_race(int fd)
for (i = 0; i < nhandles; i++) {
igt_spin_free(fd, spin[i]);
spin[i] = __igt_spin_new(fd,
+ .ctx = ctx,
.engine = engines[rand() % nengine]);
handles[i] = spin[i]->handle;
__sync_synchronize();
@@ -355,10 +364,12 @@ static bool has_extended_busy_ioctl(int fd)
return read != 0;
}
-static void basic(int fd, const struct intel_execution_engine2 *e, unsigned flags)
+static void basic(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e, unsigned flags)
{
igt_spin_t *spin =
igt_spin_new(fd,
+ .ctx = ctx,
.engine = e->flags,
.flags = flags & HANG ?
IGT_SPIN_NO_PREEMPTION | IGT_SPIN_INVALID_CS : 0);
@@ -385,32 +396,34 @@ static void basic(int fd, const struct intel_execution_engine2 *e, unsigned flag
igt_spin_free(fd, spin);
}
-static void all(int i915)
+static void all(int i915, const intel_ctx_t *ctx)
{
const struct intel_execution_engine2 *e;
- __for_each_physical_engine(i915, e)
- igt_fork(child, 1) basic(i915, e, 0);
+ for_each_ctx_engine(i915, ctx, e)
+ igt_fork(child, 1) basic(i915, ctx, e, 0);
igt_waitchildren();
}
-#define test_each_engine(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_engine(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
igt_dynamic_f("%s", (e)->name)
-#define test_each_engine_store(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_engine_store(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
for_each_if (gem_class_can_store_dword(i915, (e)->class)) \
igt_dynamic_f("%s", (e)->name)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
fd = drm_open_driver_master(DRIVER_INTEL);
igt_require_gem(fd);
+ ctx = intel_ctx_create_all_physical(fd);
}
igt_subtest_group {
@@ -421,13 +434,13 @@ igt_main
igt_subtest_with_dynamic("busy") {
igt_dynamic("all") {
gem_quiescent_gpu(fd);
- all(fd);
+ all(fd, ctx);
}
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name) {
gem_quiescent_gpu(fd);
- basic(fd, e, 0);
+ basic(fd, ctx, e, 0);
}
}
}
@@ -438,15 +451,15 @@ igt_main
gem_require_mmap_wc(fd);
}
- test_each_engine_store("extended", fd, e) {
+ test_each_engine_store("extended", fd, ctx, e) {
gem_quiescent_gpu(fd);
- one(fd, e, 0);
+ one(fd, ctx, e, 0);
gem_quiescent_gpu(fd);
}
- test_each_engine_store("parallel", fd, e) {
+ test_each_engine_store("parallel", fd, ctx, e) {
gem_quiescent_gpu(fd);
- one(fd, e, PARALLEL);
+ one(fd, ctx, e, PARALLEL);
gem_quiescent_gpu(fd);
}
}
@@ -457,15 +470,15 @@ igt_main
igt_require(has_semaphores(fd));
}
- test_each_engine("semaphore", fd, e) {
+ test_each_engine("semaphore", fd, ctx, e) {
gem_quiescent_gpu(fd);
- semaphore(fd, e);
+ semaphore(fd, ctx, e);
gem_quiescent_gpu(fd);
}
}
igt_subtest("close-race")
- close_race(fd);
+ close_race(fd, ctx);
igt_fixture {
igt_stop_hang_detector();
@@ -479,9 +492,9 @@ igt_main
hang = igt_allow_hang(fd, 0, 0);
}
- test_each_engine("hang", fd, e) {
+ test_each_engine("hang", fd, ctx, e) {
gem_quiescent_gpu(fd);
- basic(fd, e, HANG);
+ basic(fd, ctx, e, HANG);
gem_quiescent_gpu(fd);
}
@@ -491,9 +504,9 @@ igt_main
gem_require_mmap_wc(fd);
}
- test_each_engine_store("hang-extended", fd, e) {
+ test_each_engine_store("hang-extended", fd, ctx, e) {
gem_quiescent_gpu(fd);
- one(fd, e, HANG);
+ one(fd, ctx, e, HANG);
gem_quiescent_gpu(fd);
}
}
@@ -504,6 +517,7 @@ igt_main
}
igt_fixture {
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 09/81] tests/i915/gem_ctx_isolation: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (7 preceding siblings ...)
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 08/81] tests/i915/gem_busy: " Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 10/81] tests/i915/gem_exec_async: " Jason Ekstrand
` (74 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Drop ctx from tmpl_regs
- Fix a whitespace typo
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_ctx_isolation.c | 127 +++++++++++++++++----------------
1 file changed, 66 insertions(+), 61 deletions(-)
diff --git a/tests/i915/gem_ctx_isolation.c b/tests/i915/gem_ctx_isolation.c
index ff5d3718f..24ddde0bc 100644
--- a/tests/i915/gem_ctx_isolation.c
+++ b/tests/i915/gem_ctx_isolation.c
@@ -233,7 +233,6 @@ static bool ignore_register(uint32_t offset, uint32_t mmio_base)
}
static void tmpl_regs(int fd,
- uint32_t ctx,
const struct intel_execution_engine2 *e,
uint32_t handle,
uint32_t value)
@@ -278,7 +277,7 @@ static void tmpl_regs(int fd,
}
static uint32_t read_regs(int fd,
- uint32_t ctx,
+ const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
unsigned int flags)
{
@@ -350,7 +349,7 @@ static uint32_t read_regs(int fd,
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
execbuf.flags = e->flags;
- execbuf.rsvd1 = ctx;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
gem_close(fd, obj[1].handle);
free(reloc);
@@ -359,7 +358,7 @@ static uint32_t read_regs(int fd,
}
static void write_regs(int fd,
- uint32_t ctx,
+ const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
unsigned int flags,
uint32_t value)
@@ -414,13 +413,13 @@ static void write_regs(int fd,
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
execbuf.flags = e->flags;
- execbuf.rsvd1 = ctx;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
gem_close(fd, obj.handle);
}
static void restore_regs(int fd,
- uint32_t ctx,
+ const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
unsigned int flags,
uint32_t regs)
@@ -492,7 +491,7 @@ static void restore_regs(int fd,
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
execbuf.flags = e->flags;
- execbuf.rsvd1 = ctx;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
gem_close(fd, obj[1].handle);
}
@@ -596,7 +595,7 @@ static void compare_regs(int fd, const struct intel_execution_engine2 *e,
num_errors, who);
}
-static void nonpriv(int fd,
+static void nonpriv(int fd, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
unsigned int flags)
{
@@ -621,33 +620,34 @@ static void nonpriv(int fd,
for (int v = 0; v < num_values; v++) {
igt_spin_t *spin = NULL;
- uint32_t ctx, regs[2], tmpl;
+ const intel_ctx_t *ctx;
+ uint32_t regs[2], tmpl;
- ctx = gem_context_clone_with_engines(fd, 0);
+ ctx = intel_ctx_create(fd, cfg);
tmpl = read_regs(fd, ctx, e, flags);
regs[0] = read_regs(fd, ctx, e, flags);
- tmpl_regs(fd, ctx, e, tmpl, values[v]);
+ tmpl_regs(fd, e, tmpl, values[v]);
- spin = igt_spin_new(fd, .ctx_id = ctx, .engine = e->flags);
+ spin = igt_spin_new(fd, .ctx = ctx, .engine = e->flags);
igt_debug("%s[%d]: Setting all registers to 0x%08x\n",
__func__, v, values[v]);
write_regs(fd, ctx, e, flags, values[v]);
if (flags & DIRTY2) {
- uint32_t sw = gem_context_clone_with_engines(fd, 0);
+ const intel_ctx_t *sw = intel_ctx_create(fd, &ctx->cfg);
igt_spin_t *syncpt, *dirt;
/* Explicit sync to keep the switch between write/read */
syncpt = igt_spin_new(fd,
- .ctx_id = ctx,
+ .ctx = ctx,
.engine = e->flags,
.flags = IGT_SPIN_FENCE_OUT);
dirt = igt_spin_new(fd,
- .ctx_id = sw,
+ .ctx = sw,
.engine = e->flags,
.fence = syncpt->out_fence,
.flags = (IGT_SPIN_FENCE_IN |
@@ -655,14 +655,14 @@ static void nonpriv(int fd,
igt_spin_free(fd, syncpt);
syncpt = igt_spin_new(fd,
- .ctx_id = ctx,
+ .ctx = ctx,
.engine = e->flags,
.fence = dirt->out_fence,
.flags = IGT_SPIN_FENCE_IN);
igt_spin_free(fd, dirt);
igt_spin_free(fd, syncpt);
- gem_context_destroy(fd, sw);
+ intel_ctx_destroy(fd, sw);
}
regs[1] = read_regs(fd, ctx, e, flags);
@@ -679,12 +679,12 @@ static void nonpriv(int fd,
for (int n = 0; n < ARRAY_SIZE(regs); n++)
gem_close(fd, regs[n]);
- gem_context_destroy(fd, ctx);
+ intel_ctx_destroy(fd, ctx);
gem_close(fd, tmpl);
}
}
-static void isolation(int fd,
+static void isolation(int fd, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
unsigned int flags)
{
@@ -704,12 +704,13 @@ static void isolation(int fd,
for (int v = 0; v < num_values; v++) {
igt_spin_t *spin = NULL;
- uint32_t ctx[2], regs[2], tmp;
+ const intel_ctx_t *ctx[2];
+ uint32_t regs[2], tmp;
- ctx[0] = gem_context_clone_with_engines(fd, 0);
+ ctx[0] = intel_ctx_create(fd, cfg);
regs[0] = read_regs(fd, ctx[0], e, flags);
- spin = igt_spin_new(fd, .ctx_id = ctx[0], .engine = e->flags);
+ spin = igt_spin_new(fd, .ctx = ctx[0], .engine = e->flags);
if (flags & DIRTY1) {
igt_debug("%s[%d]: Setting all registers of ctx 0 to 0x%08x\n",
@@ -725,7 +726,7 @@ static void isolation(int fd,
* the default values from this context, but if goes badly we
* see the corruption from the previous context instead!
*/
- ctx[1] = gem_context_clone_with_engines(fd, 0);
+ ctx[1] = intel_ctx_create(fd, cfg);
regs[1] = read_regs(fd, ctx[1], e, flags);
if (flags & DIRTY2) {
@@ -750,7 +751,7 @@ static void isolation(int fd,
for (int n = 0; n < ARRAY_SIZE(ctx); n++) {
gem_close(fd, regs[n]);
- gem_context_destroy(fd, ctx[n]);
+ intel_ctx_destroy(fd, ctx[n]);
}
gem_close(fd, tmp);
}
@@ -763,21 +764,25 @@ static void isolation(int fd,
#define S4 (4 << 8)
#define SLEEP_MASK (0xf << 8)
-static uint32_t create_reset_context(int i915)
+static const intel_ctx_t *
+create_reset_context(int i915, const intel_ctx_cfg_t *cfg)
{
+ const intel_ctx_t *ctx = intel_ctx_create(i915, cfg);
struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_clone_with_engines(i915, 0),
+ .ctx_id = ctx->id,
.param = I915_CONTEXT_PARAM_BANNABLE,
};
gem_context_set_param(i915, ¶m);
- return param.ctx_id;
+ return ctx;
}
-static void inject_reset_context(int fd, const struct intel_execution_engine2 *e)
+static void inject_reset_context(int fd, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *e)
{
+ const intel_ctx_t *ctx = create_reset_context(fd, cfg);
struct igt_spin_factory opts = {
- .ctx_id = create_reset_context(fd),
+ .ctx = ctx,
.engine = e->flags,
.flags = IGT_SPIN_FAST,
};
@@ -802,10 +807,10 @@ static void inject_reset_context(int fd, const struct intel_execution_engine2 *e
igt_force_gpu_reset(fd);
igt_spin_free(fd, spin);
- gem_context_destroy(fd, opts.ctx_id);
+ intel_ctx_destroy(fd, ctx);
}
-static void preservation(int fd,
+static void preservation(int fd, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
unsigned int flags)
{
@@ -819,17 +824,17 @@ static void preservation(int fd,
0xdeadbeef
};
const unsigned int num_values = ARRAY_SIZE(values);
- uint32_t ctx[num_values +1 ];
+ const intel_ctx_t *ctx[num_values + 1];
uint32_t regs[num_values + 1][2];
igt_spin_t *spin;
gem_quiescent_gpu(fd);
- ctx[num_values] = gem_context_clone_with_engines(fd, 0);
- spin = igt_spin_new(fd, .ctx_id = ctx[num_values], .engine = e->flags);
+ ctx[num_values] = intel_ctx_create(fd, cfg);
+ spin = igt_spin_new(fd, .ctx = ctx[num_values], .engine = e->flags);
regs[num_values][0] = read_regs(fd, ctx[num_values], e, flags);
for (int v = 0; v < num_values; v++) {
- ctx[v] = gem_context_clone_with_engines(fd, 0);
+ ctx[v] = intel_ctx_create(fd, cfg);
write_regs(fd, ctx[v], e, flags, values[v]);
regs[v][0] = read_regs(fd, ctx[v], e, flags);
@@ -839,7 +844,7 @@ static void preservation(int fd,
igt_spin_free(fd, spin);
if (flags & RESET)
- inject_reset_context(fd, e);
+ inject_reset_context(fd, cfg, e);
switch (flags & SLEEP_MASK) {
case NOSLEEP:
@@ -866,7 +871,7 @@ static void preservation(int fd,
break;
}
- spin = igt_spin_new(fd, .ctx_id = ctx[num_values], .engine = e->flags);
+ spin = igt_spin_new(fd, .ctx = ctx[num_values], .engine = e->flags);
for (int v = 0; v < num_values; v++)
regs[v][1] = read_regs(fd, ctx[v], e, flags);
regs[num_values][1] = read_regs(fd, ctx[num_values], e, flags);
@@ -880,10 +885,10 @@ static void preservation(int fd,
gem_close(fd, regs[v][0]);
gem_close(fd, regs[v][1]);
- gem_context_destroy(fd, ctx[v]);
+ intel_ctx_destroy(fd, ctx[v]);
}
compare_regs(fd, e, regs[num_values][0], regs[num_values][1], "clean");
- gem_context_destroy(fd, ctx[num_values]);
+ intel_ctx_destroy(fd, ctx[num_values]);
}
static unsigned int __has_context_isolation(int fd)
@@ -901,8 +906,8 @@ static unsigned int __has_context_isolation(int fd)
return value;
}
-#define test_each_engine(e, i915, mask) \
- __for_each_physical_engine(i915, e) \
+#define test_each_engine(e, i915, cfg, mask) \
+ for_each_ctx_cfg_engine(i915, cfg, e) \
for_each_if(mask & (1 << (e)->class)) \
igt_dynamic_f("%s", (e)->name)
@@ -910,6 +915,7 @@ igt_main
{
unsigned int has_context_isolation = 0;
const struct intel_execution_engine2 *e;
+ intel_ctx_cfg_t cfg;
int i915 = -1;
igt_fixture {
@@ -918,6 +924,7 @@ igt_main
i915 = drm_open_driver(DRIVER_INTEL);
igt_require_gem(i915);
igt_require(gem_has_contexts(i915));
+ cfg = intel_ctx_cfg_all_physical(i915);
has_context_isolation = __has_context_isolation(i915);
igt_require(has_context_isolation);
@@ -929,50 +936,48 @@ igt_main
igt_skip_on(gen > LAST_KNOWN_GEN);
}
- /* __for_each_physical_engine switches context to all engines. */
-
igt_fixture {
igt_fork_hang_detector(i915);
}
igt_subtest_with_dynamic("nonpriv") {
- test_each_engine(e, i915, has_context_isolation)
- nonpriv(i915, e, 0);
+ test_each_engine(e, i915, &cfg, has_context_isolation)
+ nonpriv(i915, &cfg, e, 0);
}
igt_subtest_with_dynamic("nonpriv-switch") {
- test_each_engine(e, i915, has_context_isolation)
- nonpriv(i915, e, DIRTY2);
+ test_each_engine(e, i915, &cfg, has_context_isolation)
+ nonpriv(i915, &cfg, e, DIRTY2);
}
igt_subtest_with_dynamic("clean") {
- test_each_engine(e, i915, has_context_isolation)
- isolation(i915, e, 0);
+ test_each_engine(e, i915, &cfg, has_context_isolation)
+ isolation(i915, &cfg, e, 0);
}
igt_subtest_with_dynamic("dirty-create") {
- test_each_engine(e, i915, has_context_isolation)
- isolation(i915, e, DIRTY1);
+ test_each_engine(e, i915, &cfg, has_context_isolation)
+ isolation(i915, &cfg, e, DIRTY1);
}
igt_subtest_with_dynamic("dirty-switch") {
- test_each_engine(e, i915, has_context_isolation)
- isolation(i915, e, DIRTY2);
+ test_each_engine(e, i915, &cfg, has_context_isolation)
+ isolation(i915, &cfg, e, DIRTY2);
}
igt_subtest_with_dynamic("preservation") {
- test_each_engine(e, i915, has_context_isolation)
- preservation(i915, e, 0);
+ test_each_engine(e, i915, &cfg, has_context_isolation)
+ preservation(i915, &cfg, e, 0);
}
igt_subtest_with_dynamic("preservation-S3") {
- test_each_engine(e, i915, has_context_isolation)
- preservation(i915, e, S3);
+ test_each_engine(e, i915, &cfg, has_context_isolation)
+ preservation(i915, &cfg, e, S3);
}
igt_subtest_with_dynamic("preservation-S4") {
- test_each_engine(e, i915, has_context_isolation)
- preservation(i915, e, S4);
+ test_each_engine(e, i915, &cfg, has_context_isolation)
+ preservation(i915, &cfg, e, S4);
}
igt_fixture {
@@ -982,8 +987,8 @@ igt_main
igt_subtest_with_dynamic("preservation-reset") {
igt_hang_t hang = igt_allow_hang(i915, 0, 0);
- test_each_engine(e, i915, has_context_isolation)
- preservation(i915, e, RESET);
+ test_each_engine(e, i915, &cfg, has_context_isolation)
+ preservation(i915, &cfg, e, RESET);
igt_disallow_hang(i915, hang);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 10/81] tests/i915/gem_exec_async: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (8 preceding siblings ...)
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 09/81] tests/i915/gem_ctx_isolation: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 11/81] tests/i915/sysfs_clients: Convert to intel_ctx_t Jason Ekstrand
` (73 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't null-init ctx
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_async.c | 32 ++++++++++++++++++++------------
1 file changed, 20 insertions(+), 12 deletions(-)
diff --git a/tests/i915/gem_exec_async.c b/tests/i915/gem_exec_async.c
index bf8a856a6..a3be6b3ee 100644
--- a/tests/i915/gem_exec_async.c
+++ b/tests/i915/gem_exec_async.c
@@ -27,7 +27,7 @@
IGT_TEST_DESCRIPTION("Check that we can issue concurrent writes across the engines.");
-static void store_dword(int fd, unsigned ring,
+static void store_dword(int fd, const intel_ctx_t *ctx, unsigned ring,
uint32_t target, uint32_t offset, uint32_t value)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
@@ -43,6 +43,7 @@ static void store_dword(int fd, unsigned ring,
execbuf.flags = ring;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
memset(obj, 0, sizeof(obj));
obj[0].handle = target;
@@ -80,7 +81,8 @@ static void store_dword(int fd, unsigned ring,
gem_close(fd, obj[1].handle);
}
-static void one(int fd, unsigned engine, unsigned int flags)
+static void one(int fd, const intel_ctx_t *ctx,
+ unsigned engine, unsigned int flags)
#define FORKED (1 << 0)
{
const struct intel_execution_engine2 *e;
@@ -94,10 +96,11 @@ static void one(int fd, unsigned engine, unsigned int flags)
* the scratch for write. Then on the other rings try and
* write into that target. If it blocks we hang the GPU...
*/
- spin = igt_spin_new(fd, .engine = engine, .dependency = scratch);
+ spin = igt_spin_new(fd, .ctx = ctx, .engine = engine,
+ .dependency = scratch);
i = 0;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (e->flags == engine)
continue;
@@ -106,9 +109,9 @@ static void one(int fd, unsigned engine, unsigned int flags)
if (flags & FORKED) {
igt_fork(child, 1)
- store_dword(fd, e->flags, scratch, 4*i, ~i);
+ store_dword(fd, ctx, e->flags, scratch, 4*i, ~i);
} else {
- store_dword(fd, e->flags, scratch, 4*i, ~i);
+ store_dword(fd, ctx, e->flags, scratch, 4*i, ~i);
}
i++;
}
@@ -135,13 +138,14 @@ static bool has_async_execbuf(int fd)
return async > 0;
}
-#define test_each_engine(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_engine(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
igt_dynamic_f("%s", (e)->name)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
@@ -149,17 +153,21 @@ igt_main
igt_require_gem(fd);
gem_require_mmap_wc(fd);
igt_require(has_async_execbuf(fd));
+
+ ctx = intel_ctx_create_all_physical(fd);
+
igt_fork_hang_detector(fd);
}
- test_each_engine("concurrent-writes", fd, e)
- one(fd, e->flags, 0);
+ test_each_engine("concurrent-writes", fd, ctx, e)
+ one(fd, ctx, e->flags, 0);
- test_each_engine("forked-writes", fd, e)
- one(fd, e->flags, FORKED);
+ test_each_engine("forked-writes", fd, ctx, e)
+ one(fd, ctx, e->flags, FORKED);
igt_fixture {
igt_stop_hang_detector();
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 11/81] tests/i915/sysfs_clients: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (9 preceding siblings ...)
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 10/81] tests/i915/gem_exec_async: " Jason Ekstrand
@ 2021-07-07 14:42 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 12/81] tests/i915/gem_exec_fair: " Jason Ekstrand
` (72 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:42 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/sysfs_clients.c | 87 ++++++++++++++++++++------------------
1 file changed, 46 insertions(+), 41 deletions(-)
diff --git a/tests/i915/sysfs_clients.c b/tests/i915/sysfs_clients.c
index 046367778..ab79ff18c 100644
--- a/tests/i915/sysfs_clients.c
+++ b/tests/i915/sysfs_clients.c
@@ -18,13 +18,13 @@
#include "drmtest.h"
#include "i915/gem.h"
-#include "i915/gem_context.h"
#include "i915/gem_create.h"
#include "i915/gem_engine_topology.h"
#include "i915/gem_mman.h"
#include "igt_aux.h"
#include "igt_dummyload.h"
#include "igt_sysfs.h"
+#include "intel_ctx.h"
#include "ioctl_wrappers.h"
#define __require_within_epsilon(x, ref, tol_up, tol_down) \
@@ -393,34 +393,26 @@ static uint64_t measured_usleep(unsigned int usec)
return igt_nsec_elapsed(&tv);
}
-static int reopen_client(int i915)
-{
- int clone;
-
- clone = gem_reopen_driver(i915);
- gem_context_copy_engines(i915, 0, clone, 0);
- close(i915);
-
- return clone;
-}
-
static void
-busy_one(int i915, int clients, const struct intel_execution_engine2 *e)
+busy_one(int i915, int clients, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *e)
{
int64_t active, idle, old, other[MAX_CLASS];
struct timespec tv;
+ const intel_ctx_t *ctx;
igt_spin_t *spin;
uint64_t delay;
int me;
/* Create a fresh client with 0 runtime */
- i915 = reopen_client(i915);
+ i915 = gem_reopen_driver(i915);
me = find_me(clients, getpid());
igt_assert(me != -1);
+ ctx = intel_ctx_create(i915, cfg);
spin = igt_spin_new(i915,
- gem_context_clone_with_engines(i915, 0),
+ .ctx = ctx,
.engine = e->flags,
.flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin);
@@ -472,7 +464,7 @@ busy_one(int i915, int clients, const struct intel_execution_engine2 *e)
igt_assert(idle >= active);
}
- gem_context_destroy(i915, spin->execbuf.rsvd1);
+ intel_ctx_destroy(i915, ctx);
/* And finally after the executing context is no more */
old = read_runtime(me, e->class);
@@ -513,28 +505,29 @@ busy_one(int i915, int clients, const struct intel_execution_engine2 *e)
close(i915);
}
-static void busy_all(int i915, int clients)
+static void busy_all(int i915, int clients, const intel_ctx_cfg_t *cfg)
{
const struct intel_execution_engine2 *e;
int64_t active[MAX_CLASS];
int64_t idle[MAX_CLASS];
int64_t old[MAX_CLASS];
uint64_t classes = 0;
+ const intel_ctx_t *ctx;
igt_spin_t *spin;
int expect = 0;
int64_t delay;
int me;
/* Create a fresh client with 0 runtime */
- i915 = reopen_client(i915);
+ i915 = gem_reopen_driver(i915);
me = find_me(clients, getpid());
igt_assert(me != -1);
- spin = igt_spin_new(i915,
- gem_context_clone_with_engines(i915, 0),
+ ctx = intel_ctx_create(i915, cfg);
+ spin = igt_spin_new(i915, .ctx = ctx,
.flags = IGT_SPIN_POLL_RUN);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (!gem_class_can_store_dword(i915, e->class))
continue;
@@ -580,7 +573,7 @@ static void busy_all(int i915, int clients)
igt_assert(idle[i] >= active[i]);
}
- gem_context_destroy(i915, spin->execbuf.rsvd1);
+ intel_ctx_destroy(i915, ctx);
igt_spin_free(i915, spin);
/* And finally after the executing context is no more */
@@ -597,17 +590,19 @@ static void busy_all(int i915, int clients)
}
static void
-split_child(int i915, int clients,
+split_child(int i915, int clients, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
int sv)
{
int64_t runtime[2] = {};
+ const intel_ctx_t *ctx;
igt_spin_t *spin;
int go = 1;
- i915 = reopen_client(i915);
+ i915 = gem_reopen_driver(i915);
- spin = igt_spin_new(i915, .engine = e->flags);
+ ctx = intel_ctx_create(i915, cfg);
+ spin = igt_spin_new(i915, .ctx = ctx, .engine = e->flags);
igt_spin_end(spin);
gem_sync(i915, spin->handle);
@@ -627,12 +622,14 @@ split_child(int i915, int clients,
igt_spin_free(i915, spin);
runtime[0] = read_runtime(find_me(clients, getpid()), e->class);
+ intel_ctx_destroy(i915, ctx);
write(sv, runtime, sizeof(runtime));
}
static void
-__split(int i915, int clients, const struct intel_execution_engine2 *e, int f,
- void (*fn)(int i915, int clients,
+__split(int i915, int clients, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *e, int f,
+ void (*fn)(int i915, int clients, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
int sv))
{
@@ -653,7 +650,7 @@ __split(int i915, int clients, const struct intel_execution_engine2 *e, int f,
igt_assert(socketpair(AF_UNIX, SOCK_DGRAM, 0, c->sv) == 0);
igt_fork(child, 1)
- fn(i915, clients, e, c->sv[1]);
+ fn(i915, clients, cfg, e, c->sv[1]);
read(c->sv[0], &go, sizeof(go));
}
@@ -721,13 +718,14 @@ __split(int i915, int clients, const struct intel_execution_engine2 *e, int f,
}
static void
-split(int i915, int clients, const struct intel_execution_engine2 *e, int f)
+split(int i915, int clients, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *e, int f)
{
- __split(i915, clients, e, f, split_child);
+ __split(i915, clients, cfg, e, f, split_child);
}
static void
-sema_child(int i915, int clients,
+sema_child(int i915, int clients, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
int sv)
{
@@ -740,9 +738,12 @@ sema_child(int i915, int clients,
.buffer_count = 1,
.flags = e->flags,
};
+ const intel_ctx_t *ctx;
uint32_t *cs, *sema;
- i915 = reopen_client(i915);
+ i915 = gem_reopen_driver(i915);
+ ctx = intel_ctx_create(i915, cfg);
+ execbuf.rsvd1 = ctx->id;
obj.handle = gem_create(i915, 4096);
obj.offset = obj.handle << 12;
@@ -772,6 +773,7 @@ sema_child(int i915, int clients,
*sema = 0;
gem_execbuf(i915, &execbuf);
gem_close(i915, obj.handle);
+ intel_ctx_destroy(i915, ctx);
write(sv, sema, sizeof(*sema));
read(sv, sema, sizeof(*sema));
@@ -795,9 +797,10 @@ sema_child(int i915, int clients,
}
static void
-sema(int i915, int clients, const struct intel_execution_engine2 *e, int f)
+sema(int i915, int clients, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *e, int f)
{
- __split(i915, clients, e, f, sema_child);
+ __split(i915, clients, cfg, e, f, sema_child);
}
static int read_all(int clients, pid_t pid, int class, uint64_t *runtime)
@@ -945,21 +948,23 @@ static bool has_busy(int clients)
static void test_busy(int i915, int clients)
{
const struct intel_execution_engine2 *e;
+ intel_ctx_cfg_t cfg;
const int frac[] = { 10, 25, 50 };
igt_fixture {
igt_require(gem_has_contexts(i915));
igt_require(has_busy(clients));
+ cfg = intel_ctx_cfg_all_physical(i915);
}
igt_subtest_with_dynamic("busy") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, &cfg, e) {
if (!gem_class_can_store_dword(i915, e->class))
continue;
igt_dynamic_f("%s", e->name) {
gem_quiescent_gpu(i915);
igt_fork(child, 1)
- busy_one(i915, clients, e);
+ busy_one(i915, clients, &cfg, e);
igt_waitchildren();
gem_quiescent_gpu(i915);
}
@@ -968,7 +973,7 @@ static void test_busy(int i915, int clients)
igt_dynamic("all") {
gem_quiescent_gpu(i915);
igt_fork(child, 1)
- busy_all(i915, clients);
+ busy_all(i915, clients, &cfg);
igt_waitchildren();
gem_quiescent_gpu(i915);
}
@@ -976,10 +981,10 @@ static void test_busy(int i915, int clients)
for (int i = 0; i < ARRAY_SIZE(frac); i++) {
igt_subtest_with_dynamic_f("split-%d", frac[i]) {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, &cfg, e) {
igt_dynamic_f("%s", e->name) {
gem_quiescent_gpu(i915);
- split(i915, clients, e, frac[i]);
+ split(i915, clients, &cfg, e, frac[i]);
gem_quiescent_gpu(i915);
}
}
@@ -994,13 +999,13 @@ static void test_busy(int i915, int clients)
for (int i = 0; i < ARRAY_SIZE(frac); i++) {
igt_subtest_with_dynamic_f("sema-%d", frac[i]) {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, &cfg, e) {
if (!gem_class_has_mutable_submission(i915, e->class))
continue;
igt_dynamic_f("%s", e->name) {
igt_drop_caches_set(i915, DROP_RESET_ACTIVE);
- sema(i915, clients, e, frac[i]);
+ sema(i915, clients, &cfg, e, frac[i]);
gem_quiescent_gpu(i915);
}
igt_drop_caches_set(i915, DROP_RESET_ACTIVE);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 12/81] tests/i915/gem_exec_fair: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (10 preceding siblings ...)
2021-07-07 14:42 ` [igt-dev] [PATCH i-g-t 11/81] tests/i915/sysfs_clients: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 13/81] tests/i915/gem_spin_batch: Convert to intel_ctx_t (v2) Jason Ekstrand
` (71 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_fair.c | 112 +++++++++++++++++++++----------------
1 file changed, 65 insertions(+), 47 deletions(-)
diff --git a/tests/i915/gem_exec_fair.c b/tests/i915/gem_exec_fair.c
index 452375749..4b5531cc2 100644
--- a/tests/i915/gem_exec_fair.c
+++ b/tests/i915/gem_exec_fair.c
@@ -220,7 +220,7 @@ static void delay(int i915,
}
static struct drm_i915_gem_exec_object2
-delay_create(int i915, uint32_t ctx,
+delay_create(int i915, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
uint64_t target_ns)
{
@@ -231,7 +231,7 @@ delay_create(int i915, uint32_t ctx,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
.flags = e->flags,
};
@@ -321,7 +321,8 @@ static void tslog(int i915,
}
static struct drm_i915_gem_exec_object2
-tslog_create(int i915, uint32_t ctx, const struct intel_execution_engine2 *e)
+tslog_create(int i915, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
struct drm_i915_gem_exec_object2 obj = {
.handle = batch_create(i915),
@@ -330,7 +331,7 @@ tslog_create(int i915, uint32_t ctx, const struct intel_execution_engine2 *e)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
.flags = e->flags,
};
@@ -357,7 +358,8 @@ static int cmp_u32(const void *A, const void *B)
}
static uint32_t
-read_ctx_timestamp(int i915, const struct intel_execution_engine2 *e)
+read_ctx_timestamp(int i915, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_exec_object2 obj = {
@@ -369,6 +371,7 @@ read_ctx_timestamp(int i915, const struct intel_execution_engine2 *e)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
+ .rsvd1 = ctx->id,
.flags = e->flags,
};
const int use_64b = intel_gen(intel_get_drm_devid(i915)) >= 8;
@@ -410,23 +413,31 @@ read_ctx_timestamp(int i915, const struct intel_execution_engine2 *e)
return ts;
}
-static bool has_ctx_timestamp(int i915, const struct intel_execution_engine2 *e)
+static bool has_ctx_timestamp(int i915, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *e)
{
const int gen = intel_gen(intel_get_drm_devid(i915));
+ const intel_ctx_t *tmp_ctx;
+ uint32_t timestamp;
if (gen == 8 && e->class == I915_ENGINE_CLASS_VIDEO)
return false; /* looks fubar */
- return read_ctx_timestamp(i915, e);
+ tmp_ctx = intel_ctx_create(i915, cfg);
+ timestamp = read_ctx_timestamp(i915, tmp_ctx, e);
+ intel_ctx_destroy(i915, tmp_ctx);
+
+ return timestamp;
}
static struct intel_execution_engine2
-pick_random_engine(int i915, const struct intel_execution_engine2 *not)
+pick_random_engine(int i915, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *not)
{
const struct intel_execution_engine2 *e;
unsigned int count = 0;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, cfg, e) {
if (e->flags == not->flags)
continue;
if (!gem_class_has_mutable_submission(i915, e->class))
@@ -437,7 +448,7 @@ pick_random_engine(int i915, const struct intel_execution_engine2 *not)
return *not;
count = rand() % count;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, cfg, e) {
if (e->flags == not->flags)
continue;
if (!gem_class_has_mutable_submission(i915, e->class))
@@ -449,7 +460,7 @@ pick_random_engine(int i915, const struct intel_execution_engine2 *not)
return *e;
}
-static void fair_child(int i915, uint32_t ctx,
+static void fair_child(int i915, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
uint64_t frame_ns,
int timeline,
@@ -490,7 +501,7 @@ static void fair_child(int i915, uint32_t ctx,
srandom(getpid());
if (flags & F_PING)
- ping = pick_random_engine(i915, e);
+ ping = pick_random_engine(i915, &ctx->cfg, e);
obj[0] = tslog_create(i915, ctx, &ping);
/* Synchronize with other children/parent upon construction */
@@ -510,7 +521,7 @@ static void fair_child(int i915, uint32_t ctx,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(obj),
.buffer_count = 3,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
.rsvd2 = -1,
.flags = aux_flags,
};
@@ -632,7 +643,7 @@ static void timeline_advance(int timeline, int delay_ns)
sw_sync_timeline_inc(timeline, 1);
}
-static void fairness(int i915,
+static void fairness(int i915, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
int duration, unsigned int flags)
{
@@ -645,7 +656,7 @@ static void fairness(int i915,
int parent[2];
} lnk;
- igt_require(has_ctx_timestamp(i915, e));
+ igt_require(has_ctx_timestamp(i915, cfg, e));
igt_require(gem_class_has_mutable_submission(i915, e->class));
if (flags & (F_ISOLATE | F_PING))
igt_require(intel_gen(intel_get_drm_devid(i915)) >= 8);
@@ -709,12 +720,12 @@ static void fairness(int i915,
if (flags & F_PING) { /* fill the others with light bg load */
struct intel_execution_engine2 *ping;
- __for_each_physical_engine(i915, ping) {
+ for_each_ctx_cfg_engine(i915, cfg, ping) {
if (ping->flags == e->flags)
continue;
igt_fork(child, 1) {
- uint32_t ctx = gem_context_clone_with_engines(i915, 0);
+ const intel_ctx_t *ctx = intel_ctx_create(i915, cfg);
fair_child(i915, ctx, ping,
child_ns / 8,
@@ -723,7 +734,7 @@ static void fairness(int i915,
&result[nchild],
NULL, NULL, -1, -1);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
}
}
@@ -731,26 +742,24 @@ static void fairness(int i915,
getrusage(RUSAGE_CHILDREN, &old_usage);
igt_nsec_elapsed(memset(&tv, 0, sizeof(tv)));
igt_fork(child, nchild) {
- uint32_t ctx;
+ const intel_ctx_t *ctx;
if (flags & F_ISOLATE) {
- int clone, dmabuf = -1;
+ int dmabuf = -1;
if (common)
dmabuf = prime_handle_to_fd(i915, common);
- clone = gem_reopen_driver(i915);
- gem_context_copy_engines(i915, 0, clone, 0);
- i915 = clone;
+ i915 = gem_reopen_driver(i915);
if (dmabuf != -1)
common = prime_fd_to_handle(i915, dmabuf);
}
- ctx = gem_context_clone_with_engines(i915, 0);
+ ctx = intel_ctx_create(i915, cfg);
if (flags & F_VIP && child == 0) {
- gem_context_set_priority(i915, ctx, 1023);
+ gem_context_set_priority(i915, ctx->id, 1023);
flags |= F_FLOW;
}
if (flags & F_RRUL && child == 0)
@@ -762,7 +771,7 @@ static void fairness(int i915,
&result[child], &iqr[child],
lnk.child[1], lnk.parent[0]);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
{
@@ -907,7 +916,7 @@ static void fairness(int i915,
}
static void deadline_child(int i915,
- uint32_t ctx,
+ const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
uint32_t handle,
int timeline,
@@ -927,7 +936,7 @@ static void deadline_child(int i915,
.buffers_ptr = to_user_pointer(obj),
.buffer_count = ARRAY_SIZE(obj),
.flags = I915_EXEC_FENCE_OUT | e->flags,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
unsigned int seq = 1;
int prev = -1, next = -1;
@@ -972,11 +981,12 @@ static void deadline_child(int i915,
close(prev);
}
-static struct intel_execution_engine2 pick_default(int i915)
+static struct intel_execution_engine2
+pick_default(int i915, const intel_ctx_cfg_t *cfg)
{
const struct intel_execution_engine2 *e;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, cfg, e) {
if (!e->flags)
return *e;
}
@@ -984,11 +994,12 @@ static struct intel_execution_engine2 pick_default(int i915)
return (struct intel_execution_engine2){};
}
-static struct intel_execution_engine2 pick_engine(int i915, const char *name)
+static struct intel_execution_engine2
+pick_engine(int i915, const intel_ctx_cfg_t *cfg, const char *name)
{
const struct intel_execution_engine2 *e;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, cfg, e) {
if (!strcmp(e->name, name))
return *e;
}
@@ -1025,15 +1036,16 @@ static uint64_t time_get_mono_ns(void)
return tv.tv_sec * NSEC64 + tv.tv_nsec;
}
-static void deadline(int i915, int duration, unsigned int flags)
+static void deadline(int i915, const intel_ctx_cfg_t *cfg,
+ int duration, unsigned int flags)
{
const int64_t frame_ns = 33670 * 1000; /* 29.7fps */
const int64_t parent_ns = 400 * 1000;
const int64_t switch_ns = 50 * 1000;
const int64_t overhead_ns = /* estimate timeslicing overhead */
(frame_ns / 1000 / 1000 + 2) * switch_ns + parent_ns;
- struct intel_execution_engine2 pe = pick_default(i915);
- struct intel_execution_engine2 ve = pick_engine(i915, "vcs0");
+ struct intel_execution_engine2 pe = pick_default(i915, cfg);
+ struct intel_execution_engine2 ve = pick_engine(i915, cfg, "vcs0");
struct drm_i915_gem_exec_fence *fences = calloc(sizeof(*fences), 32);
struct drm_i915_gem_exec_object2 *obj = calloc(sizeof(*obj), 32);
struct drm_i915_gem_execbuffer2 execbuf = {
@@ -1044,14 +1056,15 @@ static void deadline(int i915, int duration, unsigned int flags)
I915_EXEC_FENCE_ARRAY |
I915_EXEC_FENCE_OUT
};
+ const intel_ctx_t *delay_ctx;
int *ctl;
igt_require(has_syncobj(i915));
igt_require(has_fence_array(i915));
igt_require(has_mi_math(i915, &pe));
- igt_require(has_ctx_timestamp(i915, &pe));
+ igt_require(has_ctx_timestamp(i915, cfg, &pe));
igt_require(has_mi_math(i915, &ve));
- igt_require(has_ctx_timestamp(i915, &ve));
+ igt_require(has_ctx_timestamp(i915, cfg, &ve));
igt_assert(obj && fences);
if (flags & DL_PRIO)
igt_require(gem_scheduler_has_preemption(i915));
@@ -1059,9 +1072,10 @@ static void deadline(int i915, int duration, unsigned int flags)
ctl = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
igt_assert(ctl != MAP_FAILED);
- obj[0] = delay_create(i915, 0, &pe, parent_ns);
+ delay_ctx = intel_ctx_create(i915, cfg);
+ obj[0] = delay_create(i915, delay_ctx, &pe, parent_ns);
if (flags & DL_PRIO)
- gem_context_set_priority(i915, 0, 1023);
+ gem_context_set_priority(i915, delay_ctx->id, 1023);
if (intel_gen(intel_get_drm_devid(i915)) < 8)
execbuf.flags |= I915_EXEC_SECURE;
for (int n = 1; n <= 5; n++) {
@@ -1088,7 +1102,7 @@ static void deadline(int i915, int duration, unsigned int flags)
*ctl = 0;
igt_fork(child, num_children) {
- uint32_t ctx = gem_context_clone_with_engines(i915, 0);
+ const intel_ctx_t *ctx = intel_ctx_create(i915, cfg);
deadline_child(i915, ctx, &ve, obj[child + 1].handle,
timeline, child_ns,
@@ -1096,7 +1110,7 @@ static void deadline(int i915, int duration, unsigned int flags)
link[child].parent[0],
ctl, flags);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
for (int i = 0; i < num_children; i++)
@@ -1170,6 +1184,7 @@ static void deadline(int i915, int duration, unsigned int flags)
gem_quiescent_gpu(i915);
}
+ intel_ctx_destroy(i915, delay_ctx);
gem_close(i915, obj[0].handle);
free(obj);
free(fences);
@@ -1277,6 +1292,7 @@ igt_main
{}
};
const struct intel_execution_engine2 *e;
+ intel_ctx_cfg_t cfg;
int i915 = -1;
igt_fixture {
@@ -1292,6 +1308,8 @@ igt_main
igt_require(gem_scheduler_enabled(i915));
igt_require(gem_scheduler_has_ctx_priority(i915));
+ cfg = intel_ctx_cfg_all_physical(i915);
+
igt_info("CS timestamp frequency: %d\n",
read_timestamp_frequency(i915));
igt_require(has_mi_math(i915, NULL));
@@ -1305,7 +1323,7 @@ igt_main
continue;
igt_subtest_with_dynamic_f("basic-%s", f->name) {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, &cfg, e) {
if (!has_mi_math(i915, e))
continue;
@@ -1316,19 +1334,19 @@ igt_main
continue;
igt_dynamic_f("%s", e->name)
- fairness(i915, e, 1, f->flags);
+ fairness(i915, &cfg, e, 1, f->flags);
}
}
}
igt_subtest("basic-deadline")
- deadline(i915, 2, 0);
+ deadline(i915, &cfg, 2, 0);
igt_subtest("deadline-prio")
- deadline(i915, 2, DL_PRIO);
+ deadline(i915, &cfg, 2, DL_PRIO);
for (typeof(*fair) *f = fair; f->name; f++) {
igt_subtest_with_dynamic_f("fair-%s", f->name) {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, &cfg, e) {
if (!has_mi_math(i915, e))
continue;
@@ -1339,7 +1357,7 @@ igt_main
continue;
igt_dynamic_f("%s", e->name)
- fairness(i915, e, 5, f->flags);
+ fairness(i915, &cfg, e, 5, f->flags);
}
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 13/81] tests/i915/gem_spin_batch: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (11 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 12/81] tests/i915/gem_exec_fair: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 14/81] tests/i915/gem_exec_store: " Jason Ekstrand
` (70 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't null-init ctx
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_spin_batch.c | 81 +++++++++++++++++++++----------------
1 file changed, 46 insertions(+), 35 deletions(-)
diff --git a/tests/i915/gem_spin_batch.c b/tests/i915/gem_spin_batch.c
index db0af018e..4a9d6c2df 100644
--- a/tests/i915/gem_spin_batch.c
+++ b/tests/i915/gem_spin_batch.c
@@ -34,7 +34,7 @@
"'%s' != '%s' (%lld not within %d%% tolerance of %lld)\n",\
#x, #ref, (long long)x, tolerance, (long long)ref)
-static void spin(int fd,
+static void spin(int fd, const intel_ctx_t *ctx_id,
unsigned int engine,
unsigned int flags,
unsigned int timeout_sec)
@@ -46,10 +46,12 @@ static void spin(int fd,
struct timespec itv = { };
uint64_t elapsed;
- spin = __igt_spin_new(fd, .engine = engine, .flags = flags);
+ spin = __igt_spin_new(fd, .ctx = ctx_id, .engine = engine,
+ .flags = flags);
while ((elapsed = igt_nsec_elapsed(&tv)) >> 30 < timeout_sec) {
igt_spin_t *next =
- __igt_spin_new(fd, .engine = engine, .flags = flags);
+ __igt_spin_new(fd, .ctx = ctx_id, .engine = engine,
+ .flags = flags);
igt_spin_set_timeout(spin,
timeout_100ms - igt_nsec_elapsed(&itv));
@@ -75,21 +77,25 @@ static void spin(int fd,
#define RESUBMIT_NEW_CTX (1 << 0)
#define RESUBMIT_ALL_ENGINES (1 << 1)
-static void spin_resubmit(int fd, unsigned int engine, unsigned int flags)
+static void spin_resubmit(int fd, const intel_ctx_t *ctx,
+ unsigned int engine, unsigned int flags)
{
+ const intel_ctx_t *new_ctx = NULL;
igt_spin_t *spin;
if (flags & RESUBMIT_NEW_CTX)
igt_require(gem_has_contexts(fd));
- spin = __igt_spin_new(fd, .engine = engine);
- if (flags & RESUBMIT_NEW_CTX)
- spin->execbuf.rsvd1 = gem_context_clone_with_engines(fd, 0);
+ spin = __igt_spin_new(fd, .ctx = ctx, .engine = engine);
+ if (flags & RESUBMIT_NEW_CTX) {
+ new_ctx = intel_ctx_create(fd, &ctx->cfg);
+ spin->execbuf.rsvd1 = new_ctx->id;
+ }
if (flags & RESUBMIT_ALL_ENGINES) {
const struct intel_execution_engine2 *other;
- for_each_context_engine(fd, spin->execbuf.rsvd1, other) {
+ for_each_ctx_engine(fd, ctx, other) {
spin->execbuf.flags &= ~0x3f;
spin->execbuf.flags |= other->flags;
gem_execbuf(fd, &spin->execbuf);
@@ -100,8 +106,8 @@ static void spin_resubmit(int fd, unsigned int engine, unsigned int flags)
igt_spin_end(spin);
gem_sync(fd, spin->handle);
- if (spin->execbuf.rsvd1)
- gem_context_destroy(fd, spin->execbuf.rsvd1);
+ if (flags & RESUBMIT_NEW_CTX)
+ intel_ctx_destroy(fd, new_ctx);
igt_spin_free(fd, spin);
}
@@ -112,45 +118,44 @@ static void spin_exit_handler(int sig)
}
static void
-spin_on_all_engines(int fd, unsigned long flags, unsigned int timeout_sec)
+spin_on_all_engines(int fd, const intel_ctx_t *ctx,
+ unsigned long flags, unsigned int timeout_sec)
{
const struct intel_execution_engine2 *e2;
- __for_each_physical_engine(fd, e2) {
+ for_each_ctx_engine(fd, ctx, e2) {
igt_fork(child, 1) {
igt_install_exit_handler(spin_exit_handler);
- spin(fd, e2->flags, flags, timeout_sec);
+ spin(fd, ctx, e2->flags, flags, timeout_sec);
}
}
igt_waitchildren();
}
-static void spin_all(int i915, unsigned int flags)
+static void spin_all(int i915, const intel_ctx_t *ctx, unsigned int flags)
#define PARALLEL_SPIN_NEW_CTX BIT(0)
{
const struct intel_execution_engine2 *e;
+ intel_ctx_cfg_t cfg = ctx->cfg;
struct igt_spin *spin, *n;
IGT_LIST_HEAD(list);
- __for_each_physical_engine(i915, e) {
- uint32_t ctx;
-
+ for_each_ctx_cfg_engine(i915, &cfg, e) {
if (!gem_class_can_store_dword(i915, e->class))
continue;
- ctx = 0;
if (flags & PARALLEL_SPIN_NEW_CTX)
- ctx = gem_context_clone_with_engines(i915, 0);
+ ctx = intel_ctx_create(i915, &cfg);
/* Prevent preemption so only one is allowed on each engine */
spin = igt_spin_new(i915,
- .ctx_id = ctx,
+ .ctx = ctx,
.engine = e->flags,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_NO_PREEMPTION));
- if (ctx)
- gem_context_destroy(i915, ctx);
+ if (flags & PARALLEL_SPIN_NEW_CTX)
+ intel_ctx_destroy(i915, ctx);
igt_spin_busywait_until_started(spin);
igt_list_move(&spin->link, &list);
@@ -187,11 +192,14 @@ igt_main
{
const struct intel_execution_engine2 *e2;
const struct intel_execution_ring *e;
+ const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
+ ctx = intel_ctx_create_all_physical(fd);
+
igt_fork_hang_detector(fd);
}
@@ -202,53 +210,56 @@ igt_main
igt_dynamic_f("%s", e->name)
test_each_legacy_ring("legacy")
- spin(fd, eb_ring(e), 0, 3);
+ spin(fd, intel_ctx_0(fd), eb_ring(e), 0, 3);
test_each_legacy_ring("legacy-resubmit")
- spin_resubmit(fd, eb_ring(e), 0);
+ spin_resubmit(fd, intel_ctx_0(fd), eb_ring(e), 0);
test_each_legacy_ring("legacy-resubmit-new")
- spin_resubmit(fd, eb_ring(e), RESUBMIT_NEW_CTX);
+ spin_resubmit(fd, intel_ctx_0(fd), eb_ring(e), RESUBMIT_NEW_CTX);
#undef test_each_legcy_ring
igt_subtest("spin-all")
- spin_all(fd, 0);
+ spin_all(fd, ctx, 0);
igt_subtest("spin-all-new")
- spin_all(fd, PARALLEL_SPIN_NEW_CTX);
+ spin_all(fd, ctx, PARALLEL_SPIN_NEW_CTX);
#define test_each_engine(test) \
igt_subtest_with_dynamic(test) \
- __for_each_physical_engine(fd, e2) \
+ for_each_ctx_engine(fd, ctx, e2) \
igt_dynamic_f("%s", e2->name)
test_each_engine("engines")
- spin(fd, e2->flags, 0, 3);
+ spin(fd, ctx, e2->flags, 0, 3);
test_each_engine("resubmit")
- spin_resubmit(fd, e2->flags, 0);
+ spin_resubmit(fd, ctx, e2->flags, 0);
test_each_engine("resubmit-new")
- spin_resubmit(fd, e2->flags, RESUBMIT_NEW_CTX);
+ spin_resubmit(fd, ctx, e2->flags,
+ RESUBMIT_NEW_CTX);
test_each_engine("resubmit-all")
- spin_resubmit(fd, e2->flags, RESUBMIT_ALL_ENGINES);
+ spin_resubmit(fd, ctx, e2->flags,
+ RESUBMIT_ALL_ENGINES);
test_each_engine("resubmit-new-all")
- spin_resubmit(fd, e2->flags,
+ spin_resubmit(fd, ctx, e2->flags,
RESUBMIT_NEW_CTX |
RESUBMIT_ALL_ENGINES);
#undef test_each_engine
igt_subtest("spin-each")
- spin_on_all_engines(fd, 0, 3);
+ spin_on_all_engines(fd, ctx, 0, 3);
igt_subtest("user-each") {
igt_require(has_userptr(fd));
- spin_on_all_engines(fd, IGT_SPIN_USERPTR, 3);
+ spin_on_all_engines(fd, ctx, IGT_SPIN_USERPTR, 3);
}
igt_fixture {
igt_stop_hang_detector();
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 14/81] tests/i915/gem_exec_store: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (12 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 13/81] tests/i915/gem_spin_batch: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 15/81] tests/amdgpu/amd_prime: Convert to intel_ctx_t Jason Ekstrand
` (69 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't null-init ctx
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_store.c | 36 ++++++++++++++++++++++--------------
1 file changed, 22 insertions(+), 14 deletions(-)
diff --git a/tests/i915/gem_exec_store.c b/tests/i915/gem_exec_store.c
index 2df0b27f6..0798f61d7 100644
--- a/tests/i915/gem_exec_store.c
+++ b/tests/i915/gem_exec_store.c
@@ -37,7 +37,8 @@
#define ENGINE_MASK (I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK)
-static void store_dword(int fd, const struct intel_execution_engine2 *e)
+static void store_dword(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_exec_object2 obj[2];
@@ -53,6 +54,7 @@ static void store_dword(int fd, const struct intel_execution_engine2 *e)
execbuf.flags = e->flags;
if (gen > 3 && gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
memset(obj, 0, sizeof(obj));
obj[0].handle = gem_create(fd, 4096);
@@ -94,7 +96,8 @@ static void store_dword(int fd, const struct intel_execution_engine2 *e)
}
#define PAGES 1
-static void store_cachelines(int fd, const struct intel_execution_engine2 *e,
+static void store_cachelines(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e,
unsigned int flags)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
@@ -114,6 +117,7 @@ static void store_cachelines(int fd, const struct intel_execution_engine2 *e,
execbuf.flags = e->flags;
if (gen > 3 && gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
obj = calloc(execbuf.buffer_count, sizeof(*obj));
igt_assert(obj);
@@ -171,7 +175,7 @@ static void store_cachelines(int fd, const struct intel_execution_engine2 *e,
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
-static void store_all(int fd)
+static void store_all(int fd, const intel_ctx_t *ctx)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_exec_object2 obj[2];
@@ -186,7 +190,7 @@ static void store_all(int fd)
int i, j;
nengine = 0;
- __for_each_physical_engine(fd, engine) {
+ for_each_ctx_engine(fd, ctx, engine) {
if (!gem_class_can_store_dword(fd, engine->class))
continue;
nengine++;
@@ -207,6 +211,7 @@ static void store_all(int fd)
execbuf.buffer_count = 2;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
memset(obj, 0, sizeof(obj));
obj[0].handle = gem_create(fd, nengine*sizeof(uint32_t));
@@ -232,7 +237,7 @@ static void store_all(int fd)
nengine = 0;
intel_detect_and_clear_missed_interrupts(fd);
- __for_each_physical_engine(fd, engine) {
+ for_each_ctx_engine(fd, ctx, engine) {
if (!gem_class_can_store_dword(fd, engine->class))
continue;
@@ -323,14 +328,15 @@ static int print_welcome(int fd)
return info->graphics_ver;
}
-#define test_each_engine(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_engine(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
for_each_if(gem_class_can_store_dword(i915, (e)->class)) \
igt_dynamic_f("%s", (e)->name)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int fd;
igt_fixture {
@@ -343,24 +349,26 @@ igt_main
igt_device_set_master(fd);
igt_require_gem(fd);
+ ctx = intel_ctx_create_all_physical(fd);
igt_fork_hang_detector(fd);
}
igt_subtest("basic")
- store_all(fd);
+ store_all(fd, ctx);
- test_each_engine("dword", fd, e)
- store_dword(fd, e);
+ test_each_engine("dword", fd, ctx, e)
+ store_dword(fd, ctx, e);
- test_each_engine("cachelines", fd, e)
- store_cachelines(fd, e, 0);
+ test_each_engine("cachelines", fd, ctx, e)
+ store_cachelines(fd, ctx, e, 0);
- test_each_engine("pages", fd, e)
- store_cachelines(fd, e, PAGES);
+ test_each_engine("pages", fd, ctx, e)
+ store_cachelines(fd, ctx, e, PAGES);
igt_fixture {
igt_stop_hang_detector();
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 15/81] tests/amdgpu/amd_prime: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (13 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 14/81] tests/i915/gem_exec_store: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 16/81] tests/i915/i915_hangman: Convert to intel_ctx_t (v2) Jason Ekstrand
` (68 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Written totally blind but I'm pretty sure it's right.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/amdgpu/amd_prime.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/tests/amdgpu/amd_prime.c b/tests/amdgpu/amd_prime.c
index 3e5cc1a72..248fbc354 100644
--- a/tests/amdgpu/amd_prime.c
+++ b/tests/amdgpu/amd_prime.c
@@ -173,6 +173,7 @@ static void unplug(struct cork *c)
static void i915_to_amd(int i915, int amd, amdgpu_device_handle device)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
+ intel_ctx_cfg_t cfg;
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_execbuffer2 execbuf;
const struct intel_execution_engine2 *e;
@@ -181,8 +182,10 @@ static void i915_to_amd(int i915, int amd, amdgpu_device_handle device)
unsigned long count;
struct cork c;
+ cfg = intel_ctx_cfg_all_physical(i915);
+
nengine = 0;
- __for_each_physical_engine(i915, e)
+ for_each_ctx_cfg_engine(i915, &cfg, e)
engines[nengine++] = e->flags;
igt_require(nengine);
@@ -199,14 +202,15 @@ static void i915_to_amd(int i915, int amd, amdgpu_device_handle device)
count = 0;
igt_until_timeout(5) {
- execbuf.rsvd1 = gem_context_clone_with_engines(i915, 0);
+ const intel_ctx_t *ctx = intel_ctx_create(i915, &cfg);
+ execbuf.rsvd1 = ctx->id;
for (unsigned n = 0; n < nengine; n++) {
execbuf.flags = engines[n];
gem_execbuf(i915, &execbuf);
}
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, ctx);
count++;
if (!gem_uses_full_ppgtt(i915))
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 16/81] tests/i915/i915_hangman: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (14 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 15/81] tests/amdgpu/amd_prime: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 17/81] tests/i915/gem_ringfill: Convert to intel_ctx_t Jason Ekstrand
` (67 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't NULL-init ctx
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/i915_hangman.c | 37 ++++++++++++++++++++++---------------
1 file changed, 22 insertions(+), 15 deletions(-)
diff --git a/tests/i915/i915_hangman.c b/tests/i915/i915_hangman.c
index f6fac283e..a8e9891e0 100644
--- a/tests/i915/i915_hangman.c
+++ b/tests/i915/i915_hangman.c
@@ -205,7 +205,7 @@ static void check_error_state(const char *expected_ring_name,
igt_assert(found);
}
-static void test_error_state_capture(unsigned ring_id,
+static void test_error_state_capture(const intel_ctx_t *ctx, unsigned ring_id,
const char *ring_name)
{
uint32_t *batch;
@@ -214,7 +214,7 @@ static void test_error_state_capture(unsigned ring_id,
clear_error_state();
- hang = igt_hang_ctx(device, 0, ring_id, HANG_ALLOW_CAPTURE);
+ hang = igt_hang_ctx(device, ctx->id, ring_id, HANG_ALLOW_CAPTURE);
offset = hang.spin->obj[IGT_SPIN_BATCH].offset;
batch = gem_mmap__cpu(device, hang.spin->handle, 0, 4096, PROT_READ);
@@ -227,32 +227,34 @@ static void test_error_state_capture(unsigned ring_id,
}
static void
-test_engine_hang(const struct intel_execution_engine2 *e, unsigned int flags)
+test_engine_hang(const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e, unsigned int flags)
{
const struct intel_execution_engine2 *other;
+ const intel_ctx_t *tmp_ctx;
igt_spin_t *spin, *next;
IGT_LIST_HEAD(list);
- uint32_t ctx;
igt_skip_on(flags & IGT_SPIN_INVALID_CS &&
gem_has_cmdparser(device, e->flags));
/* Fill all the other engines with background load */
- __for_each_physical_engine(device, other) {
+ for_each_ctx_engine(device, ctx, other) {
if (other->flags == e->flags)
continue;
- ctx = gem_context_clone_with_engines(device, 0);
- spin = __igt_spin_new(device, ctx,
+ tmp_ctx = intel_ctx_create(device, &ctx->cfg);
+ spin = __igt_spin_new(device, .ctx = tmp_ctx,
.engine = other->flags,
.flags = IGT_SPIN_FENCE_OUT);
- gem_context_destroy(device, ctx);
+ intel_ctx_destroy(device, tmp_ctx);
igt_list_move(&spin->link, &list);
}
/* And on the target engine, we hang */
spin = igt_spin_new(device,
+ .ctx = ctx,
.engine = e->flags,
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_NO_PREEMPTION |
@@ -311,13 +313,16 @@ static void hangcheck_unterminated(void)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
igt_hang_t hang = {};
igt_fixture {
device = drm_open_driver(DRIVER_INTEL);
igt_require_gem(device);
- hang = igt_allow_hang(device, 0, HANG_ALLOW_CAPTURE);
+ ctx = intel_ctx_create_all_physical(device);
+
+ hang = igt_allow_hang(device, ctx->id, HANG_ALLOW_CAPTURE);
sysfs = igt_sysfs_open(device);
igt_assert(sysfs != -1);
@@ -329,9 +334,9 @@ igt_main
test_error_state_basic();
igt_subtest_with_dynamic("error-state-capture") {
- __for_each_physical_engine(device, e) {
+ for_each_ctx_engine(device, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_error_state_capture(e->flags, e->name);
+ test_error_state_capture(ctx, e->flags, e->name);
}
}
@@ -347,9 +352,9 @@ igt_main
ioctl(device, DRM_IOCTL_I915_GETPARAM, &gp);
igt_require(has_gpu_reset > 1);
- __for_each_physical_engine(device, e) {
+ for_each_ctx_engine(device, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_engine_hang(e, 0);
+ test_engine_hang(ctx, e, 0);
}
}
@@ -364,9 +369,9 @@ igt_main
ioctl(device, DRM_IOCTL_I915_GETPARAM, &gp);
igt_require(has_gpu_reset > 1);
- __for_each_physical_engine(device, e) {
+ for_each_ctx_engine(device, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_engine_hang(e, IGT_SPIN_INVALID_CS);
+ test_engine_hang(ctx, e, IGT_SPIN_INVALID_CS);
}
}
@@ -375,5 +380,7 @@ igt_main
igt_fixture {
igt_disallow_hang(device, hang);
+ intel_ctx_destroy(device, ctx);
+ close(device);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 17/81] tests/i915/gem_ringfill: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (15 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 16/81] tests/i915/i915_hangman: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 18/81] tests/prime_busy: " Jason Ekstrand
` (66 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_ringfill.c | 48 +++++++++++++++++++++++----------------
1 file changed, 28 insertions(+), 20 deletions(-)
diff --git a/tests/i915/gem_ringfill.c b/tests/i915/gem_ringfill.c
index 9ad97532f..5d2169d65 100644
--- a/tests/i915/gem_ringfill.c
+++ b/tests/i915/gem_ringfill.c
@@ -94,7 +94,7 @@ static void fill_ring(int fd,
}
}
-static void setup_execbuf(int fd,
+static void setup_execbuf(int fd, const intel_ctx_t *ctx,
struct drm_i915_gem_execbuffer2 *execbuf,
struct drm_i915_gem_exec_object2 *obj,
struct drm_i915_gem_relocation_entry *reloc,
@@ -115,6 +115,8 @@ static void setup_execbuf(int fd,
if (gen > 3 && gen < 6)
execbuf->flags |= I915_EXEC_SECURE;
+ execbuf->rsvd1 = ctx->id;
+
obj[0].handle = gem_create(fd, 4096);
gem_write(fd, obj[0].handle, 0, &bbe, sizeof(bbe));
execbuf->buffer_count = 1;
@@ -168,7 +170,8 @@ static void setup_execbuf(int fd,
check_bo(fd, obj[0].handle);
}
-static void run_test(int fd, unsigned ring, unsigned flags, unsigned timeout)
+static void run_test(int fd, const intel_ctx_t *ctx, unsigned ring,
+ unsigned flags, unsigned timeout)
{
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_relocation_entry reloc[1024];
@@ -176,15 +179,15 @@ static void run_test(int fd, unsigned ring, unsigned flags, unsigned timeout)
igt_hang_t hang;
if (flags & (SUSPEND | HIBERNATE)) {
- run_test(fd, ring, 0, 0);
+ run_test(fd, ctx, ring, 0, 0);
gem_quiescent_gpu(fd);
}
- setup_execbuf(fd, &execbuf, obj, reloc, ring);
+ setup_execbuf(fd, ctx, &execbuf, obj, reloc, ring);
memset(&hang, 0, sizeof(hang));
if (flags & HANG)
- hang = igt_hang_ring(fd, ring & ~(3<<13));
+ hang = igt_hang_ctx(fd, ctx->id, ring & ~(3<<13), 0);
if (flags & (CHILD | FORKED | BOMB)) {
int nchild;
@@ -198,16 +201,19 @@ static void run_test(int fd, unsigned ring, unsigned flags, unsigned timeout)
igt_debug("Forking %d children\n", nchild);
igt_fork(child, nchild) {
+ const intel_ctx_t *child_ctx = NULL;
if (flags & NEWFD) {
int this;
this = gem_reopen_driver(fd);
- gem_context_copy_engines(fd, 0, this, 0);
+ child_ctx = intel_ctx_create(fd, &ctx->cfg);
fd = this;
- setup_execbuf(fd, &execbuf, obj, reloc, ring);
+ setup_execbuf(fd, child_ctx, &execbuf, obj, reloc, ring);
}
fill_ring(fd, &execbuf, flags, timeout);
+ if (child_ctx)
+ intel_ctx_destroy(fd, child_ctx);
}
if (flags & SUSPEND)
@@ -235,7 +241,7 @@ static void run_test(int fd, unsigned ring, unsigned flags, unsigned timeout)
if (flags & (SUSPEND | HIBERNATE)) {
gem_quiescent_gpu(fd);
- run_test(fd, ring, 0, 0);
+ run_test(fd, ctx, ring, 0, 0);
}
}
@@ -286,6 +292,7 @@ igt_main
{ NULL }
}, *m;
bool master = false;
+ const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
@@ -305,22 +312,20 @@ igt_main
ring_size = gem_measure_ring_inflight(fd, ALL_ENGINES, 0);
igt_info("Ring size: %d batches\n", ring_size);
igt_require(ring_size);
+
+ ctx = intel_ctx_create_all_physical(fd);
}
/* Legacy path for selecting "rings". */
for (m = modes; m->suffix; m++) {
igt_subtest_with_dynamic_f("legacy-%s", m->suffix) {
- const struct intel_execution_ring *e;
-
igt_skip_on(m->flags & NEWFD && master);
- for (e = intel_execution_rings; e->name; e++) {
- if (!gem_has_ring(fd, eb_ring(e)))
- continue;
-
+ for_each_ring(e, fd) {
igt_dynamic_f("%s", e->name) {
igt_require(gem_can_store_dword(fd, eb_ring(e)));
- run_test(fd, eb_ring(e),
+ run_test(fd, intel_ctx_0(fd),
+ eb_ring(e),
m->flags,
m->timeout);
gem_quiescent_gpu(fd);
@@ -335,12 +340,13 @@ igt_main
const struct intel_execution_engine2 *e;
igt_skip_on(m->flags & NEWFD && master);
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (!gem_class_can_store_dword(fd, e->class))
continue;
igt_dynamic_f("%s", e->name) {
- run_test(fd, e->flags,
+ run_test(fd, ctx,
+ e->flags,
m->flags,
m->timeout);
gem_quiescent_gpu(fd);
@@ -352,17 +358,19 @@ igt_main
igt_subtest("basic-all") {
const struct intel_execution_engine2 *e;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (!gem_class_can_store_dword(fd, e->class))
continue;
igt_fork(child, 1)
- run_test(fd, e->flags, 0, 1);
+ run_test(fd, ctx, e->flags, 0, 1);
}
igt_waitchildren();
}
- igt_fixture
+ igt_fixture {
+ intel_ctx_destroy(fd, ctx);
close(fd);
+ }
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 18/81] tests/prime_busy: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (16 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 17/81] tests/i915/gem_ringfill: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 19/81] tests/prime_vgem: Convert to intel_ctx_t (v2) Jason Ekstrand
` (65 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/prime_busy.c | 19 ++++++++++++-------
1 file changed, 12 insertions(+), 7 deletions(-)
diff --git a/tests/prime_busy.c b/tests/prime_busy.c
index 8bf50eafe..e26848375 100644
--- a/tests/prime_busy.c
+++ b/tests/prime_busy.c
@@ -40,7 +40,7 @@ static bool prime_busy(struct pollfd *pfd, bool excl)
#define HANG 0x4
#define POLL 0x8
-static void busy(int fd, unsigned ring, unsigned flags)
+static void busy(int fd, const intel_ctx_t *ctx, unsigned ring, unsigned flags)
{
const int gen = intel_gen(intel_get_drm_devid(fd));
const uint32_t _bbe = MI_BATCH_BUFFER_END;
@@ -63,6 +63,7 @@ static void busy(int fd, unsigned ring, unsigned flags)
execbuf.flags = ring;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
memset(obj, 0, sizeof(obj));
obj[SCRATCH].handle = gem_create(fd, 4096);
@@ -186,7 +187,7 @@ static void busy(int fd, unsigned ring, unsigned flags)
close(pfd[SCRATCH].fd);
}
-static void test_mode(int fd, unsigned int flags)
+static void test_mode(int fd, const intel_ctx_t *ctx, unsigned int flags)
{
const struct intel_execution_engine2 *e;
igt_hang_t hang = {};
@@ -196,7 +197,7 @@ static void test_mode(int fd, unsigned int flags)
else
hang = igt_allow_hang(fd, 0, 0);
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (!gem_class_can_store_dword(fd, e->class))
continue;
@@ -204,7 +205,7 @@ static void test_mode(int fd, unsigned int flags)
continue;
igt_dynamic_f("%s", e->name)
- busy(fd, e->flags, flags);
+ busy(fd, ctx, e->flags, flags);
}
if ((flags & HANG) == 0)
@@ -215,11 +216,13 @@ static void test_mode(int fd, unsigned int flags)
igt_main
{
+ const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
fd = drm_open_driver_master(DRIVER_INTEL);
igt_require_gem(fd);
+ ctx = intel_ctx_create_all_physical(fd);
}
igt_subtest_group {
@@ -238,13 +241,15 @@ igt_main
for (const struct mode *m = modes; m->name; m++) {
igt_subtest_with_dynamic(m->name)
- test_mode(fd, m->flags);
+ test_mode(fd, ctx, m->flags);
igt_subtest_with_dynamic_f("%s-wait", m->name)
- test_mode(fd, m->flags | POLL);
+ test_mode(fd, ctx, m->flags | POLL);
}
}
- igt_fixture
+ igt_fixture {
+ intel_ctx_destroy(fd, ctx);
close(fd);
+ }
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 19/81] tests/prime_vgem: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (17 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 18/81] tests/prime_busy: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 20/81] tests/gem_exec_whisper: " Jason Ekstrand
` (64 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Jason Ekstrand):
- Pass intel_ctx_0() to work() instead of 0
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/prime_vgem.c | 35 +++++++++++++++++++++--------------
1 file changed, 21 insertions(+), 14 deletions(-)
diff --git a/tests/prime_vgem.c b/tests/prime_vgem.c
index aeff282b5..a1c3ed38e 100644
--- a/tests/prime_vgem.c
+++ b/tests/prime_vgem.c
@@ -559,7 +559,7 @@ static bool prime_busy(int fd, bool excl)
return poll(&pfd, 1, 0) == 0;
}
-static void work(int i915, int dmabuf, unsigned ring)
+static void work(int i915, int dmabuf, const intel_ctx_t *ctx, unsigned ring)
{
const int SCRATCH = 0;
const int BATCH = 1;
@@ -578,6 +578,7 @@ static void work(int i915, int dmabuf, unsigned ring)
execbuf.flags = ring;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
memset(obj, 0, sizeof(obj));
obj[SCRATCH].handle = prime_fd_to_handle(i915, dmabuf);
@@ -654,7 +655,7 @@ static void work(int i915, int dmabuf, unsigned ring)
igt_assert(read_busy && write_busy);
}
-static void test_busy(int i915, int vgem, unsigned ring)
+static void test_busy(int i915, int vgem, const intel_ctx_t *ctx, unsigned ring)
{
struct vgem_bo scratch;
struct timespec tv;
@@ -668,7 +669,7 @@ static void test_busy(int i915, int vgem, unsigned ring)
vgem_create(vgem, &scratch);
dmabuf = prime_handle_to_fd(vgem, scratch.handle);
- work(i915, dmabuf, ring);
+ work(i915, dmabuf, ctx, ring);
/* Calling busy in a loop should be enough to flush the rendering */
memset(&tv, 0, sizeof(tv));
@@ -684,7 +685,7 @@ static void test_busy(int i915, int vgem, unsigned ring)
close(dmabuf);
}
-static void test_wait(int i915, int vgem, unsigned ring)
+static void test_wait(int i915, int vgem, const intel_ctx_t *ctx, unsigned ring)
{
struct vgem_bo scratch;
struct pollfd pfd;
@@ -697,7 +698,7 @@ static void test_wait(int i915, int vgem, unsigned ring)
vgem_create(vgem, &scratch);
pfd.fd = prime_handle_to_fd(vgem, scratch.handle);
- work(i915, pfd.fd, ring);
+ work(i915, pfd.fd, ctx, ring);
pfd.events = POLLIN;
igt_assert_eq(poll(&pfd, 1, 10000), 1);
@@ -711,7 +712,7 @@ static void test_wait(int i915, int vgem, unsigned ring)
close(pfd.fd);
}
-static void test_sync(int i915, int vgem, unsigned ring)
+static void test_sync(int i915, int vgem, const intel_ctx_t *ctx, unsigned ring)
{
struct vgem_bo scratch;
uint32_t *ptr;
@@ -728,7 +729,7 @@ static void test_sync(int i915, int vgem, unsigned ring)
igt_assert(ptr != MAP_FAILED);
gem_close(vgem, scratch.handle);
- work(i915, dmabuf, ring);
+ work(i915, dmabuf, ctx, ring);
prime_sync_start(dmabuf, false);
for (i = 0; i < 1024; i++)
@@ -739,7 +740,7 @@ static void test_sync(int i915, int vgem, unsigned ring)
munmap(ptr, scratch.size);
}
-static void test_fence_wait(int i915, int vgem, unsigned ring)
+static void test_fence_wait(int i915, int vgem, const intel_ctx_t *ctx, unsigned ring)
{
struct vgem_bo scratch;
uint32_t fence;
@@ -760,7 +761,7 @@ static void test_fence_wait(int i915, int vgem, unsigned ring)
igt_assert(ptr != MAP_FAILED);
igt_fork(child, 1)
- work(i915, dmabuf, ring);
+ work(i915, dmabuf, ctx, ring);
sleep(1);
@@ -800,7 +801,7 @@ static void test_fence_hang(int i915, int vgem, unsigned flags)
igt_assert(ptr != MAP_FAILED);
gem_close(vgem, scratch.handle);
- work(i915, dmabuf, 0);
+ work(i915, dmabuf, intel_ctx_0(i915), 0);
/* The work should have been cancelled */
@@ -1042,12 +1043,15 @@ static void test_flip(int i915, int vgem, unsigned hang)
}
static void test_each_engine(const char *name, int vgem, int i915,
- void (*fn)(int i915, int vgem, unsigned int flags))
+ void (*fn)(int i915, int vgem,
+ const intel_ctx_t *ctx,
+ unsigned int flags))
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx = intel_ctx_create_all_physical(i915);
igt_subtest_with_dynamic(name) {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (!gem_class_can_store_dword(i915, e->class))
continue;
@@ -1056,10 +1060,12 @@ static void test_each_engine(const char *name, int vgem, int i915,
igt_dynamic_f("%s", e->name) {
gem_quiescent_gpu(i915);
- fn(i915, vgem, e->flags);
+ fn(i915, vgem, ctx, e->flags);
}
}
}
+
+ intel_ctx_destroy(i915, ctx);
}
igt_main
@@ -1110,7 +1116,8 @@ igt_main
{
static const struct {
const char *name;
- void (*fn)(int i915, int vgem, unsigned int engine);
+ void (*fn)(int i915, int vgem, const intel_ctx_t *ctx,
+ unsigned int engine);
} tests[] = {
{ "sync", test_sync },
{ "busy", test_busy },
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 20/81] tests/gem_exec_whisper: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (18 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 19/81] tests/prime_vgem: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 21/81] tests/i915/gem_ctx_exec: Stop cloning contexts in close_race Jason Ekstrand
` (63 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Ashutosh Dixit):
- Restore CREATE_FLAGS_SINGLE_TIMELINE for queues
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
lib/i915/gem_context.c | 17 +++++++
lib/i915/gem_context.h | 1 +
tests/i915/gem_exec_whisper.c | 88 +++++++++++++++++++++++------------
3 files changed, 76 insertions(+), 30 deletions(-)
diff --git a/lib/i915/gem_context.c b/lib/i915/gem_context.c
index 1dda09243..87dcbc6e8 100644
--- a/lib/i915/gem_context.c
+++ b/lib/i915/gem_context.c
@@ -209,6 +209,23 @@ void gem_context_destroy(int fd, uint32_t ctx_id)
igt_assert_eq(__gem_context_destroy(fd, ctx_id), 0);
}
+static bool __gem_context_has_flag(int i915, unsigned int flags)
+{
+ uint32_t ctx = 0;
+
+ __gem_context_create_ext(i915, flags, 0, &ctx);
+ if (ctx)
+ gem_context_destroy(i915, ctx);
+
+ errno = 0;
+ return ctx;
+}
+
+bool gem_context_has_single_timeline(int i915)
+{
+ return __gem_context_has_flag(i915, I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+}
+
int __gem_context_get_param(int fd, struct drm_i915_gem_context_param *p)
{
int err = 0;
diff --git a/lib/i915/gem_context.h b/lib/i915/gem_context.h
index 91bb9e7f2..6e2226d27 100644
--- a/lib/i915/gem_context.h
+++ b/lib/i915/gem_context.h
@@ -59,6 +59,7 @@ bool gem_has_queues(int i915);
bool gem_has_contexts(int fd);
void gem_require_contexts(int fd);
+bool gem_context_has_single_timeline(int i915);
void gem_context_require_bannable(int fd);
void gem_context_require_param(int fd, uint64_t param);
diff --git a/tests/i915/gem_exec_whisper.c b/tests/i915/gem_exec_whisper.c
index a9d3fa05e..d16409203 100644
--- a/tests/i915/gem_exec_whisper.c
+++ b/tests/i915/gem_exec_whisper.c
@@ -29,12 +29,14 @@
#include "i915/gem.h"
#include "i915/gem_create.h"
+#include "i915/gem_vm.h"
#include "igt.h"
#include "igt_debugfs.h"
#include "igt_rapl.h"
#include "igt_gt.h"
#include "igt_rand.h"
#include "igt_sysfs.h"
+#include "intel_ctx.h"
#define ENGINE_MASK (I915_EXEC_RING_MASK | I915_EXEC_BSD_MASK)
@@ -82,13 +84,14 @@ static void verify_reloc(int fd, uint32_t handle,
#define BASIC 0x400
struct hang {
+ const intel_ctx_t *ctx;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_execbuffer2 execbuf;
int fd;
};
-static void init_hang(struct hang *h, int fd)
+static void init_hang(struct hang *h, int fd, const intel_ctx_cfg_t *cfg)
{
uint32_t *batch;
int i, gen;
@@ -98,6 +101,13 @@ static void init_hang(struct hang *h, int fd)
gen = intel_gen(intel_get_drm_devid(h->fd));
+ if (gem_has_contexts(fd)) {
+ h->ctx = intel_ctx_create(h->fd, cfg);
+ h->execbuf.rsvd1 = h->ctx->id;
+ } else {
+ h->ctx = NULL;
+ }
+
memset(&h->execbuf, 0, sizeof(h->execbuf));
h->execbuf.buffers_ptr = to_user_pointer(&h->obj);
h->execbuf.buffer_count = 1;
@@ -157,6 +167,7 @@ static void submit_hang(struct hang *h, unsigned *engines, int nengine, unsigned
static void fini_hang(struct hang *h)
{
+ intel_ctx_destroy(h->fd, h->ctx);
close(h->fd);
}
@@ -166,7 +177,8 @@ static void ctx_set_random_priority(int fd, uint32_t ctx)
gem_context_set_priority(fd, ctx, prio);
}
-static void whisper(int fd, unsigned engine, unsigned flags)
+static void whisper(int fd, const intel_ctx_t *ctx,
+ unsigned engine, unsigned flags)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
@@ -180,7 +192,8 @@ static void whisper(int fd, unsigned engine, unsigned flags)
const struct intel_execution_engine2 *e;
struct hang hang;
int fds[64];
- uint32_t contexts[64];
+ intel_ctx_cfg_t local_cfg;
+ const intel_ctx_t *contexts[64];
unsigned nengine;
uint32_t batch[16];
unsigned int relocations = 0;
@@ -204,7 +217,7 @@ static void whisper(int fd, unsigned engine, unsigned flags)
nengine = 0;
if (engine == ALL_ENGINES) {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (gem_class_can_store_dword(fd, e->class))
engines[nengine++] = e->flags;
}
@@ -220,11 +233,13 @@ static void whisper(int fd, unsigned engine, unsigned flags)
if (flags & CONTEXTS)
gem_require_contexts(fd);
- if (flags & QUEUES)
- igt_require(gem_has_queues(fd));
+ if (flags & QUEUES) {
+ igt_require(gem_has_vm(fd));
+ igt_require(gem_context_has_single_timeline(fd));
+ }
if (flags & HANG)
- init_hang(&hang, fd);
+ init_hang(&hang, fd, &ctx->cfg);
nchild = 1;
if (flags & FORKED)
@@ -273,6 +288,7 @@ static void whisper(int fd, unsigned engine, unsigned flags)
execbuf.flags |= I915_EXEC_NO_RELOC;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
igt_require(__gem_execbuf(fd, &execbuf) == 0);
scratch = tmp[0];
store = tmp[1];
@@ -294,18 +310,21 @@ static void whisper(int fd, unsigned engine, unsigned flags)
igt_assert(loc == sizeof(uint32_t) * i);
batch[++i] = MI_BATCH_BUFFER_END;
- if (flags & CONTEXTS) {
- for (n = 0; n < 64; n++)
- contexts[n] = gem_context_clone_with_engines(fd, 0);
- }
- if (flags & QUEUES) {
- for (n = 0; n < 64; n++)
- contexts[n] = gem_queue_create(fd);
- }
if (flags & FDS) {
for (n = 0; n < 64; n++) {
fds[n] = gem_reopen_driver(fd);
- gem_context_copy_engines(fd, 0, fds[n], 0);
+ }
+ }
+ if (flags & (CONTEXTS | QUEUES | FDS)) {
+ local_cfg = ctx->cfg;
+ if (flags & QUEUES) {
+ igt_assert(!(flags & FDS));
+ local_cfg.vm = gem_vm_create(fd);
+ local_cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
+ }
+ for (n = 0; n < 64; n++) {
+ int this_fd = (flags & FDS) ? fds[n] : fd;
+ contexts[n] = intel_ctx_create(this_fd, &local_cfg);
}
}
@@ -414,8 +433,8 @@ static void whisper(int fd, unsigned engine, unsigned flags)
execbuf.flags &= ~ENGINE_MASK;
execbuf.flags |= engines[rand() % nengine];
}
- if (flags & (CONTEXTS | QUEUES)) {
- execbuf.rsvd1 = contexts[rand() % 64];
+ if (flags & (CONTEXTS | QUEUES | FDS)) {
+ execbuf.rsvd1 = contexts[rand() % 64]->id;
if (flags & PRIORITY)
ctx_set_random_priority(this_fd, execbuf.rsvd1);
}
@@ -443,7 +462,7 @@ static void whisper(int fd, unsigned engine, unsigned flags)
}
}
execbuf.flags &= ~ENGINE_MASK;
- execbuf.rsvd1 = 0;
+ execbuf.rsvd1 = ctx->id;
execbuf.buffers_ptr = to_user_pointer(&tmp);
tmp[0] = tmp[1];
@@ -493,16 +512,22 @@ static void whisper(int fd, unsigned engine, unsigned flags)
gem_close(fd, scratch.handle);
gem_close(fd, store.handle);
+ if (flags & (CONTEXTS | QUEUES | FDS)) {
+ for (n = 0; n < 64; n++) {
+ int this_fd = (flags & FDS) ? fds[n] : fd;
+ intel_ctx_destroy(this_fd, contexts[n]);
+ }
+ if (local_cfg.vm) {
+ igt_assert(!(flags & FDS));
+ gem_vm_destroy(fd, local_cfg.vm);
+ }
+ }
+ for (n = 0; n < QLEN; n++)
+ gem_close(fd, batches[n].handle);
if (flags & FDS) {
for (n = 0; n < 64; n++)
close(fds[n]);
}
- if (flags & (CONTEXTS | QUEUES)) {
- for (n = 0; n < 64; n++)
- gem_context_destroy(fd, contexts[n]);
- }
- for (n = 0; n < QLEN; n++)
- gem_close(fd, batches[n].handle);
}
igt_waitchildren();
@@ -555,6 +580,7 @@ igt_main
{ NULL }
};
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
@@ -562,6 +588,7 @@ igt_main
igt_require_gem(fd);
igt_require(gem_can_store_dword(fd, 0));
gem_submission_print_method(fd);
+ ctx = intel_ctx_create_all_physical(fd);
igt_fork_hang_detector(fd);
}
@@ -569,10 +596,10 @@ igt_main
for (const struct mode *m = modes; m->name; m++) {
igt_subtest_f("%s%s",
m->flags & BASIC ? "basic-" : "", m->name)
- whisper(fd, ALL_ENGINES, m->flags);
+ whisper(fd, ctx, ALL_ENGINES, m->flags);
igt_subtest_f("%s%s-all",
m->flags & BASIC ? "basic-" : "", m->name)
- whisper(fd, ALL_ENGINES, m->flags | ALL);
+ whisper(fd, ctx, ALL_ENGINES, m->flags | ALL);
}
for (const struct mode *m = modes; m->name; m++) {
@@ -580,12 +607,12 @@ igt_main
continue;
igt_subtest_with_dynamic_f("%s", m->name) {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (!gem_class_can_store_dword(fd, e->class))
continue;
igt_dynamic_f("%s", e->name)
- whisper(fd, e->flags, m->flags);
+ whisper(fd, ctx, e->flags, m->flags);
}
}
}
@@ -599,11 +626,12 @@ igt_main
if (m->flags & INTERRUPTIBLE)
continue;
igt_subtest_f("hang-%s", m->name)
- whisper(fd, ALL_ENGINES, m->flags | HANG);
+ whisper(fd, ctx, ALL_ENGINES, m->flags | HANG);
}
}
igt_fixture {
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 21/81] tests/i915/gem_ctx_exec: Stop cloning contexts in close_race
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (19 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 20/81] tests/gem_exec_whisper: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 22/81] tests/i915/gem_ctx_exec: Convert to intel_ctx_t Jason Ekstrand
` (62 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Nothing in this subtest sets the set of engines on ctx0 so there's no
point in cloning them.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_ctx_exec.c | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/tests/i915/gem_ctx_exec.c b/tests/i915/gem_ctx_exec.c
index a8616f95c..6db2d5983 100644
--- a/tests/i915/gem_ctx_exec.c
+++ b/tests/i915/gem_ctx_exec.c
@@ -351,7 +351,7 @@ static void close_race(int i915)
igt_assert(contexts != MAP_FAILED);
for (int child = 0; child < ncpus; child++)
- contexts[child] = gem_context_clone_with_engines(i915, 0);
+ contexts[child] = gem_context_create(i915);
igt_fork(child, ncpus) {
spin = __igt_spin_new(i915, .flags = IGT_SPIN_POLL_RUN);
@@ -405,8 +405,7 @@ static void close_race(int i915)
*/
for (int child = 0; child < ncpus; child++) {
gem_context_destroy(i915, contexts[child]);
- contexts[child] =
- gem_context_clone_with_engines(i915, 0);
+ contexts[child] = gem_context_create(i915);
}
usleep(1000 + hars_petruska_f54_1_random_unsafe() % 2000);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 22/81] tests/i915/gem_ctx_exec: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (20 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 21/81] tests/i915/gem_ctx_exec: Stop cloning contexts in close_race Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 23/81] tests/i915/gem_exec_suspend: Convert to intel_ctx_t (v2) Jason Ekstrand
` (61 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Jason Ekstrand):
- Also use an intel_ctx_t for basic-norecovery
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_ctx_exec.c | 59 +++++++++++++++++++++++----------------
1 file changed, 35 insertions(+), 24 deletions(-)
diff --git a/tests/i915/gem_ctx_exec.c b/tests/i915/gem_ctx_exec.c
index 6db2d5983..4d3d1c12f 100644
--- a/tests/i915/gem_ctx_exec.c
+++ b/tests/i915/gem_ctx_exec.c
@@ -170,8 +170,9 @@ static void norecovery(int i915)
hang = igt_allow_hang(i915, 0, 0);
for (int pass = 1; pass >= 0; pass--) {
+ const intel_ctx_t *ctx = intel_ctx_create(i915, NULL);
struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_create(i915),
+ .ctx_id = ctx->id,
.param = I915_CONTEXT_PARAM_RECOVERABLE,
.value = pass,
};
@@ -184,8 +185,7 @@ static void norecovery(int i915)
gem_context_get_param(i915, ¶m);
igt_assert_eq(param.value, pass);
- spin = __igt_spin_new(i915,
- .ctx_id = param.ctx_id,
+ spin = __igt_spin_new(i915, .ctx = ctx,
.flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin);
@@ -195,7 +195,7 @@ static void norecovery(int i915)
igt_assert_eq(__gem_execbuf(i915, &spin->execbuf), expect);
igt_spin_free(i915, spin);
- gem_context_destroy(i915, param.ctx_id);
+ intel_ctx_destroy(i915, ctx);
}
igt_disallow_hang(i915, hang);
@@ -268,7 +268,7 @@ static void nohangcheck_hostile(int i915)
const struct intel_execution_engine2 *e;
igt_hang_t hang;
int fence = -1;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
int err = 0;
int dir;
@@ -282,12 +282,12 @@ static void nohangcheck_hostile(int i915)
dir = igt_params_open(i915);
igt_require(dir != -1);
- ctx = gem_context_create(i915);
- hang = igt_allow_hang(i915, ctx, 0);
+ ctx = intel_ctx_create_all_physical(i915);
+ hang = igt_allow_hang(i915, ctx->id, 0);
igt_require(__enable_hangcheck(dir, false));
- ____for_each_physical_engine(i915, ctx, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_spin_t *spin;
int new;
@@ -295,7 +295,7 @@ static void nohangcheck_hostile(int i915)
gem_engine_property_printf(i915, e->name,
"preempt_timeout_ms", "%d", 50);
- spin = __igt_spin_new(i915, ctx,
+ spin = __igt_spin_new(i915, .ctx = ctx,
.engine = e->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_FENCE_OUT));
@@ -316,7 +316,7 @@ static void nohangcheck_hostile(int i915)
fence = tmp;
}
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert(fence != -1);
if (sync_fence_wait(fence, MSEC_PER_SEC)) { /* 640ms preempt-timeout */
@@ -341,30 +341,38 @@ static void nohangcheck_hostile(int i915)
static void close_race(int i915)
{
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
- uint32_t *contexts;
+ const intel_ctx_t *base_ctx;
+ const intel_ctx_t **ctx;
+ uint32_t *ctx_id;
igt_spin_t *spin;
/* Check we can execute a polling spinner */
- igt_spin_free(i915, igt_spin_new(i915, .flags = IGT_SPIN_POLL_RUN));
+ base_ctx = intel_ctx_create(i915, NULL);
+ igt_spin_free(i915, igt_spin_new(i915, .ctx = base_ctx,
+ .flags = IGT_SPIN_POLL_RUN));
- contexts = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
- igt_assert(contexts != MAP_FAILED);
+ ctx = calloc(ncpus, sizeof(*ctx));
+ ctx_id = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
+ igt_assert(ctx_id != MAP_FAILED);
- for (int child = 0; child < ncpus; child++)
- contexts[child] = gem_context_create(i915);
+ for (int child = 0; child < ncpus; child++) {
+ ctx[child] = intel_ctx_create(i915, NULL);
+ ctx_id[child] = ctx[child]->id;
+ }
igt_fork(child, ncpus) {
- spin = __igt_spin_new(i915, .flags = IGT_SPIN_POLL_RUN);
+ spin = __igt_spin_new(i915, .ctx = base_ctx,
+ .flags = IGT_SPIN_POLL_RUN);
igt_spin_end(spin);
gem_sync(i915, spin->handle);
- while (!READ_ONCE(contexts[ncpus])) {
+ while (!READ_ONCE(ctx_id[ncpus])) {
int64_t timeout = 1;
igt_spin_reset(spin);
igt_assert(!igt_spin_has_started(spin));
- spin->execbuf.rsvd1 = READ_ONCE(contexts[child]);
+ spin->execbuf.rsvd1 = READ_ONCE(ctx_id[child]);
if (__gem_execbuf(i915, &spin->execbuf))
continue;
@@ -404,19 +412,22 @@ static void close_race(int i915)
* and the kernel's context/request handling.
*/
for (int child = 0; child < ncpus; child++) {
- gem_context_destroy(i915, contexts[child]);
- contexts[child] = gem_context_create(i915);
+ intel_ctx_destroy(i915, ctx[child]);
+ ctx[child] = intel_ctx_create(i915, NULL);
+ ctx_id[child] = ctx[child]->id;
}
usleep(1000 + hars_petruska_f54_1_random_unsafe() % 2000);
}
- contexts[ncpus] = 1;
+ ctx_id[ncpus] = 1;
igt_waitchildren();
+ intel_ctx_destroy(i915, base_ctx);
for (int child = 0; child < ncpus; child++)
- gem_context_destroy(i915, contexts[child]);
+ intel_ctx_destroy(i915, ctx[child]);
- munmap(contexts, 4096);
+ free(ctx);
+ munmap(ctx_id, 4096);
}
igt_main
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 23/81] tests/i915/gem_exec_suspend: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (21 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 22/81] tests/i915/gem_ctx_exec: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 24/81] tests/i915/gem_sync: " Jason Ekstrand
` (60 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- intel_ctx_destroy() the context
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_suspend.c | 53 ++++++++++++++++++++---------------
1 file changed, 30 insertions(+), 23 deletions(-)
diff --git a/tests/i915/gem_exec_suspend.c b/tests/i915/gem_exec_suspend.c
index d13c443e2..0ef26ce11 100644
--- a/tests/i915/gem_exec_suspend.c
+++ b/tests/i915/gem_exec_suspend.c
@@ -51,7 +51,8 @@
#define CACHED (1<<8)
#define HANG (2<<8)
-static void run_test(int fd, unsigned engine, unsigned flags);
+static void run_test(int fd, const intel_ctx_t *ctx,
+ unsigned engine, unsigned flags);
static void check_bo(int fd, uint32_t handle)
{
@@ -66,12 +67,13 @@ static void check_bo(int fd, uint32_t handle)
munmap(map, 4096);
}
-static void test_all(int fd, unsigned flags)
+static void test_all(int fd, const intel_ctx_t *ctx, unsigned flags)
{
- run_test(fd, ALL_ENGINES, flags & ~0xff);
+ run_test(fd, ctx, ALL_ENGINES, flags & ~0xff);
}
-static void run_test(int fd, unsigned engine, unsigned flags)
+static void run_test(int fd, const intel_ctx_t *ctx,
+ unsigned engine, unsigned flags)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -86,7 +88,7 @@ static void run_test(int fd, unsigned engine, unsigned flags)
if (engine == ALL_ENGINES) {
const struct intel_execution_engine2 *e;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (gem_class_can_store_dword(fd, e->class))
engines[nengine++] = e->flags;
}
@@ -97,7 +99,7 @@ static void run_test(int fd, unsigned engine, unsigned flags)
/* Before suspending, check normal operation */
if (mode(flags) != NOSLEEP)
- test_all(fd, flags);
+ test_all(fd, ctx, flags);
gem_quiescent_gpu(fd);
@@ -107,6 +109,7 @@ static void run_test(int fd, unsigned engine, unsigned flags)
execbuf.flags = 1 << 11;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
memset(obj, 0, sizeof(obj));
obj[0].handle = gem_create(fd, 4096);
@@ -203,7 +206,7 @@ static void run_test(int fd, unsigned engine, unsigned flags)
/* After resume, make sure it still works */
if (mode(flags) != NOSLEEP)
- test_all(fd, flags);
+ test_all(fd, ctx, flags);
}
struct battery_sample {
@@ -230,7 +233,8 @@ static double d_time(const struct battery_sample *after,
(after->tv.tv_nsec - before->tv.tv_nsec) * 1e-9); /* s */
}
-static void power_test(int i915, unsigned engine, unsigned flags)
+static void power_test(int i915, const intel_ctx_t *ctx,
+ unsigned engine, unsigned flags)
{
struct battery_sample before, after;
char *status;
@@ -250,7 +254,7 @@ static void power_test(int i915, unsigned engine, unsigned flags)
igt_set_autoresume_delay(5 * 60); /* 5 minutes; longer == more stable */
igt_assert(get_power(dir, &before));
- run_test(i915, engine, flags);
+ run_test(i915, ctx, engine, flags);
igt_assert(get_power(dir, &after));
igt_set_autoresume_delay(0);
@@ -274,45 +278,47 @@ igt_main
}, *m;
const struct intel_execution_engine2 *e;
igt_hang_t hang;
+ const intel_ctx_t *ctx;
int fd;
igt_fixture {
fd = drm_open_driver_master(DRIVER_INTEL);
igt_require_gem(fd);
igt_require(gem_can_store_dword(fd, 0));
+ ctx = intel_ctx_create_all_physical(fd);
igt_fork_hang_detector(fd);
}
igt_subtest("basic")
- run_test(fd, ALL_ENGINES, NOSLEEP);
+ run_test(fd, ctx, ALL_ENGINES, NOSLEEP);
igt_subtest("basic-S0")
- run_test(fd, ALL_ENGINES, IDLE);
+ run_test(fd, ctx, ALL_ENGINES, IDLE);
igt_subtest("basic-S3-devices")
- run_test(fd, ALL_ENGINES, SUSPEND_DEVICES);
+ run_test(fd, ctx, ALL_ENGINES, SUSPEND_DEVICES);
igt_subtest("basic-S3")
- run_test(fd, ALL_ENGINES, SUSPEND);
+ run_test(fd, ctx, ALL_ENGINES, SUSPEND);
igt_subtest("basic-S4-devices")
- run_test(fd, ALL_ENGINES, HIBERNATE_DEVICES);
+ run_test(fd, ctx, ALL_ENGINES, HIBERNATE_DEVICES);
igt_subtest("basic-S4")
- run_test(fd, ALL_ENGINES, HIBERNATE);
+ run_test(fd, ctx, ALL_ENGINES, HIBERNATE);
for (m = modes; m->suffix; m++) {
igt_subtest_with_dynamic_f("uncached%s", m->suffix) {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (!gem_class_can_store_dword(fd, e->class))
continue;
igt_dynamic_f("%s", e->name)
- run_test(fd, e->flags, m->mode | UNCACHED);
+ run_test(fd, ctx, e->flags, m->mode | UNCACHED);
}
}
igt_subtest_with_dynamic_f("cached%s", m->suffix) {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (!gem_class_can_store_dword(fd, e->class))
continue;
igt_dynamic_f("%s", e->name)
- run_test(fd, e->flags, m->mode | CACHED);
+ run_test(fd, ctx, e->flags, m->mode | CACHED);
}
}
}
@@ -323,17 +329,18 @@ igt_main
}
igt_subtest("hang-S3")
- run_test(fd, 0, SUSPEND | HANG);
+ run_test(fd, intel_ctx_0(fd), 0, SUSPEND | HANG);
igt_subtest("hang-S4")
- run_test(fd, 0, HIBERNATE | HANG);
+ run_test(fd, intel_ctx_0(fd), 0, HIBERNATE | HANG);
igt_subtest("power-S0")
- power_test(fd, 0, IDLE);
+ power_test(fd, intel_ctx_0(fd), 0, IDLE);
igt_subtest("power-S3")
- power_test(fd, 0, SUSPEND);
+ power_test(fd, intel_ctx_0(fd), 0, SUSPEND);
igt_fixture {
igt_disallow_hang(fd, hang);
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 24/81] tests/i915/gem_sync: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (22 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 23/81] tests/i915/gem_exec_suspend: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 25/81] tests/i915/gem_userptr_blits: Convert to intel_ctx_t Jason Ekstrand
` (59 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Drop some redundant code
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_sync.c | 158 ++++++++++++++++++++++++------------------
1 file changed, 89 insertions(+), 69 deletions(-)
diff --git a/tests/i915/gem_sync.c b/tests/i915/gem_sync.c
index ae41b6bb0..6cb00c406 100644
--- a/tests/i915/gem_sync.c
+++ b/tests/i915/gem_sync.c
@@ -97,38 +97,32 @@ filter_engines_can_store_dword(int fd, struct intel_engine_data *ied)
ied->nengines = count;
}
-static struct intel_engine_data list_store_engines(int fd, unsigned ring)
+static struct intel_engine_data
+list_engines(int fd, const intel_ctx_t *ctx, unsigned ring)
{
struct intel_engine_data ied = { };
if (ring == ALL_ENGINES) {
- ied = intel_init_engine_list(fd, 0);
- filter_engines_can_store_dword(fd, &ied);
+ ied = intel_engine_list_for_ctx_cfg(fd, &ctx->cfg);
} else {
- if (gem_has_ring(fd, ring) && gem_can_store_dword(fd, ring)) {
- ied.engines[ied.nengines].flags = ring;
- strcpy(ied.engines[ied.nengines].name, " ");
- ied.nengines++;
- }
+ if (ctx->cfg.num_engines)
+ igt_assert(ring < ctx->cfg.num_engines);
+ else
+ igt_assert(gem_has_ring(fd, ring));
+
+ ied.engines[ied.nengines].flags = ring;
+ strcpy(ied.engines[ied.nengines].name, " ");
+ ied.nengines++;
}
return ied;
}
-static struct intel_engine_data list_engines(int fd, unsigned ring)
+static struct intel_engine_data
+list_store_engines(int fd, const intel_ctx_t *ctx, unsigned ring)
{
- struct intel_engine_data ied = { };
-
- if (ring == ALL_ENGINES) {
- ied = intel_init_engine_list(fd, 0);
- } else {
- if (gem_has_ring(fd, ring)) {
- ied.engines[ied.nengines].flags = ring;
- strcpy(ied.engines[ied.nengines].name, " ");
- ied.nengines++;
- }
- }
-
+ struct intel_engine_data ied = list_engines(fd, ctx, ring);
+ filter_engines_can_store_dword(fd, &ied);
return ied;
}
@@ -150,11 +144,12 @@ static void xchg_engine(void *array, unsigned i, unsigned j)
}
static void
-sync_ring(int fd, unsigned ring, int num_children, int timeout)
+sync_ring(int fd, const intel_ctx_t *ctx,
+ unsigned ring, int num_children, int timeout)
{
struct intel_engine_data ied;
- ied = list_engines(fd, ring);
+ ied = list_engines(fd, ctx, ring);
igt_require(ied.nengines);
num_children *= ied.nengines;
@@ -174,6 +169,7 @@ sync_ring(int fd, unsigned ring, int num_children, int timeout)
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = ied_flags(&ied, child);
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
gem_sync(fd, object.handle);
@@ -196,7 +192,8 @@ sync_ring(int fd, unsigned ring, int num_children, int timeout)
}
static void
-idle_ring(int fd, unsigned int ring, int num_children, int timeout)
+idle_ring(int fd, const intel_ctx_t *ctx, unsigned int ring,
+ int num_children, int timeout)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 object;
@@ -214,6 +211,7 @@ idle_ring(int fd, unsigned int ring, int num_children, int timeout)
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = ring;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
gem_sync(fd, object.handle);
@@ -235,11 +233,12 @@ idle_ring(int fd, unsigned int ring, int num_children, int timeout)
}
static void
-wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
+wakeup_ring(int fd, const intel_ctx_t *ctx, unsigned ring,
+ int timeout, int wlen)
{
struct intel_engine_data ied;
- ied = list_store_engines(fd, ring);
+ ied = list_store_engines(fd, ctx, ring);
igt_require(ied.nengines);
intel_detect_and_clear_missed_interrupts(fd);
@@ -259,8 +258,10 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = ied_flags(&ied, child);
+ execbuf.rsvd1 = ctx->id;
spin = __igt_spin_new(fd,
+ .ctx = ctx,
.engine = execbuf.flags,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FAST));
@@ -327,12 +328,12 @@ wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
}
-static void active_ring(int fd, unsigned int ring,
+static void active_ring(int fd, const intel_ctx_t *ctx, unsigned int ring,
int num_children, int timeout)
{
struct intel_engine_data ied;
- ied = list_store_engines(fd, ring);
+ ied = list_store_engines(fd, ctx, ring);
igt_require(ied.nengines);
intel_detect_and_clear_missed_interrupts(fd);
@@ -342,10 +343,12 @@ static void active_ring(int fd, unsigned int ring,
igt_spin_t *spin[2];
spin[0] = __igt_spin_new(fd,
+ .ctx = ctx,
.engine = ied_flags(&ied, child),
.flags = IGT_SPIN_FAST);
spin[1] = __igt_spin_new(fd,
+ .ctx = ctx,
.engine = ied_flags(&ied, child),
.flags = IGT_SPIN_FAST);
@@ -377,11 +380,12 @@ static void active_ring(int fd, unsigned int ring,
}
static void
-active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
+active_wakeup_ring(int fd, const intel_ctx_t *ctx, unsigned ring,
+ int timeout, int wlen)
{
struct intel_engine_data ied;
- ied = list_store_engines(fd, ring);
+ ied = list_store_engines(fd, ctx, ring);
igt_require(ied.nengines);
intel_detect_and_clear_missed_interrupts(fd);
@@ -401,6 +405,7 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = ied_flags(&ied, child);
+ execbuf.rsvd1 = ctx->id;
spin[0] = __igt_spin_new(fd,
.engine = execbuf.flags,
@@ -491,12 +496,13 @@ active_wakeup_ring(int fd, unsigned ring, int timeout, int wlen)
}
static void
-store_ring(int fd, unsigned ring, int num_children, int timeout)
+store_ring(int fd, const intel_ctx_t *ctx, unsigned ring,
+ int num_children, int timeout)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct intel_engine_data ied;
- ied = list_store_engines(fd, ring);
+ ied = list_store_engines(fd, ctx, ring);
igt_require(ied.nengines);
num_children *= ied.nengines;
@@ -517,6 +523,7 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
execbuf.flags |= I915_EXEC_HANDLE_LUT;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
memset(object, 0, sizeof(object));
object[0].handle = gem_create(fd, 4096);
@@ -587,14 +594,15 @@ store_ring(int fd, unsigned ring, int num_children, int timeout)
}
static void
-switch_ring(int fd, unsigned ring, int num_children, int timeout)
+switch_ring(int fd, const intel_ctx_t *ctx, unsigned ring,
+ int num_children, int timeout)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct intel_engine_data ied;
gem_require_contexts(fd);
- ied = list_store_engines(fd, ring);
+ ied = list_store_engines(fd, ctx, ring);
igt_require(ied.nengines);
num_children *= ied.nengines;
@@ -604,6 +612,7 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
struct drm_i915_gem_exec_object2 object[2];
struct drm_i915_gem_relocation_entry reloc[1024];
struct drm_i915_gem_execbuffer2 execbuf;
+ const intel_ctx_t *ctx;
} contexts[2];
double elapsed, baseline;
unsigned long cycles;
@@ -621,7 +630,9 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
c->execbuf.flags |= I915_EXEC_HANDLE_LUT;
if (gen < 6)
c->execbuf.flags |= I915_EXEC_SECURE;
- c->execbuf.rsvd1 = gem_context_create(fd);
+
+ c->ctx = intel_ctx_create(fd, &ctx->cfg);
+ c->execbuf.rsvd1 = c->ctx->id;
memset(c->object, 0, sizeof(c->object));
c->object[0].handle = gem_create(fd, 4096);
@@ -717,7 +728,7 @@ switch_ring(int fd, unsigned ring, int num_children, int timeout)
for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
gem_close(fd, contexts[i].object[1].handle);
gem_close(fd, contexts[i].object[0].handle);
- gem_context_destroy(fd, contexts[i].execbuf.rsvd1);
+ intel_ctx_destroy(fd, contexts[i].ctx);
}
}
igt_waitchildren_timeout(timeout+10, NULL);
@@ -766,7 +777,8 @@ static void *waiter(void *arg)
}
static void
-__store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
+__store_many(int fd, const intel_ctx_t *ctx, unsigned ring,
+ int timeout, unsigned long *cycles)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -785,6 +797,7 @@ __store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
execbuf.flags |= I915_EXEC_HANDLE_LUT;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
memset(object, 0, sizeof(object));
object[0].handle = gem_create(fd, 4096);
@@ -894,7 +907,8 @@ __store_many(int fd, unsigned ring, int timeout, unsigned long *cycles)
}
static void
-store_many(int fd, unsigned int ring, int num_children, int timeout)
+store_many(int fd, const intel_ctx_t *ctx, unsigned int ring,
+ int num_children, int timeout)
{
struct intel_engine_data ied;
unsigned long *shared;
@@ -902,14 +916,14 @@ store_many(int fd, unsigned int ring, int num_children, int timeout)
shared = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
igt_assert(shared != MAP_FAILED);
- ied = list_store_engines(fd, ring);
+ ied = list_store_engines(fd, ctx, ring);
igt_require(ied.nengines);
intel_detect_and_clear_missed_interrupts(fd);
for (int n = 0; n < ied.nengines; n++) {
igt_fork(child, 1)
- __store_many(fd,
+ __store_many(fd, ctx,
ied_flags(&ied, n),
timeout,
&shared[n]);
@@ -925,11 +939,11 @@ store_many(int fd, unsigned int ring, int num_children, int timeout)
}
static void
-sync_all(int fd, int num_children, int timeout)
+sync_all(int fd, const intel_ctx_t *ctx, int num_children, int timeout)
{
struct intel_engine_data ied;
- ied = list_engines(fd, ALL_ENGINES);
+ ied = list_engines(fd, ctx, ALL_ENGINES);
igt_require(ied.nengines);
intel_detect_and_clear_missed_interrupts(fd);
@@ -947,6 +961,7 @@ sync_all(int fd, int num_children, int timeout)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
gem_sync(fd, object.handle);
@@ -971,12 +986,12 @@ sync_all(int fd, int num_children, int timeout)
}
static void
-store_all(int fd, int num_children, int timeout)
+store_all(int fd, const intel_ctx_t *ctx, int num_children, int timeout)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct intel_engine_data ied;
- ied = list_store_engines(fd, ALL_ENGINES);
+ ied = list_store_engines(fd, ctx, ALL_ENGINES);
igt_require(ied.nengines);
intel_detect_and_clear_missed_interrupts(fd);
@@ -995,6 +1010,7 @@ store_all(int fd, int num_children, int timeout)
execbuf.flags |= I915_EXEC_HANDLE_LUT;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
memset(object, 0, sizeof(object));
object[0].handle = gem_create(fd, 4096);
@@ -1070,20 +1086,21 @@ store_all(int fd, int num_children, int timeout)
}
static void
-preempt(int fd, unsigned ring, int num_children, int timeout)
+preempt(int fd, const intel_ctx_t *ctx, unsigned ring,
+ int num_children, int timeout)
{
struct intel_engine_data ied;
- uint32_t ctx[2];
+ const intel_ctx_t *tmp_ctx[2];
- ied = list_engines(fd, ALL_ENGINES);
+ ied = list_engines(fd, ctx, ALL_ENGINES);
igt_require(ied.nengines);
num_children *= ied.nengines;
- ctx[0] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[0], MIN_PRIO);
+ tmp_ctx[0] = intel_ctx_create(fd, &ctx->cfg);
+ gem_context_set_priority(fd, tmp_ctx[0]->id, MIN_PRIO);
- ctx[1] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[1], MAX_PRIO);
+ tmp_ctx[1] = intel_ctx_create(fd, &ctx->cfg);
+ gem_context_set_priority(fd, tmp_ctx[1]->id, MAX_PRIO);
intel_detect_and_clear_missed_interrupts(fd);
igt_fork(child, num_children) {
@@ -1101,7 +1118,7 @@ preempt(int fd, unsigned ring, int num_children, int timeout)
execbuf.buffers_ptr = to_user_pointer(&object);
execbuf.buffer_count = 1;
execbuf.flags = ied_flags(&ied, child);
- execbuf.rsvd1 = ctx[1];
+ execbuf.rsvd1 = tmp_ctx[1]->id;
gem_execbuf(fd, &execbuf);
gem_sync(fd, object.handle);
@@ -1110,7 +1127,7 @@ preempt(int fd, unsigned ring, int num_children, int timeout)
do {
igt_spin_t *spin =
__igt_spin_new(fd,
- .ctx_id = ctx[0],
+ .ctx = tmp_ctx[0],
.engine = execbuf.flags);
do {
@@ -1129,8 +1146,8 @@ preempt(int fd, unsigned ring, int num_children, int timeout)
igt_waitchildren_timeout(timeout+10, NULL);
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
- gem_context_destroy(fd, ctx[1]);
- gem_context_destroy(fd, ctx[0]);
+ intel_ctx_destroy(fd, tmp_ctx[1]);
+ intel_ctx_destroy(fd, tmp_ctx[0]);
}
igt_main
@@ -1138,7 +1155,7 @@ igt_main
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
const struct {
const char *name;
- void (*func)(int fd, unsigned int engine,
+ void (*func)(int fd, const intel_ctx_t *ctx, unsigned int engine,
int num_children, int timeout);
int num_children;
int timeout;
@@ -1173,6 +1190,7 @@ igt_main
#define for_each_test(t, T) for(typeof(*T) *t = T; t->name; t++)
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
@@ -1180,6 +1198,7 @@ igt_main
igt_require_gem(fd);
gem_submission_print_method(fd);
gem_scheduler_print_capability(fd);
+ ctx = intel_ctx_create_all_physical(fd);
igt_fork_hang_detector(fd);
}
@@ -1189,7 +1208,7 @@ igt_main
igt_subtest_with_dynamic_f("%s", t->name) {
for (const struct intel_execution_ring *l = intel_execution_rings; l->name; l++) {
igt_dynamic_f("%s", l->name) {
- t->func(fd, eb_ring(l),
+ t->func(fd, intel_ctx_0(fd), eb_ring(l),
t->num_children, t->timeout);
}
}
@@ -1197,30 +1216,30 @@ igt_main
}
igt_subtest("basic-all")
- sync_all(fd, 1, 2);
+ sync_all(fd, ctx, 1, 2);
igt_subtest("basic-store-all")
- store_all(fd, 1, 2);
+ store_all(fd, ctx, 1, 2);
igt_subtest("all")
- sync_all(fd, 1, 20);
+ sync_all(fd, ctx, 1, 20);
igt_subtest("store-all")
- store_all(fd, 1, 20);
+ store_all(fd, ctx, 1, 20);
igt_subtest("forked-all")
- sync_all(fd, ncpus, 20);
+ sync_all(fd, ctx, ncpus, 20);
igt_subtest("forked-store-all")
- store_all(fd, ncpus, 20);
+ store_all(fd, ctx, ncpus, 20);
for_each_test(t, all) {
igt_subtest_f("%s", t->name)
- t->func(fd, ALL_ENGINES, t->num_children, t->timeout);
+ t->func(fd, ctx, ALL_ENGINES, t->num_children, t->timeout);
}
/* New way of selecting engines. */
for_each_test(t, individual) {
igt_subtest_with_dynamic_f("%s", t->name) {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name) {
- t->func(fd, e->flags,
+ t->func(fd, ctx, e->flags,
t->num_children, t->timeout);
}
}
@@ -1235,17 +1254,18 @@ igt_main
}
igt_subtest("preempt-all")
- preempt(fd, ALL_ENGINES, 1, 20);
+ preempt(fd, ctx, ALL_ENGINES, 1, 20);
igt_subtest_with_dynamic("preempt") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name)
- preempt(fd, e->flags, ncpus, 20);
+ preempt(fd, ctx, e->flags, ncpus, 20);
}
}
}
igt_fixture {
igt_stop_hang_detector();
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 25/81] tests/i915/gem_userptr_blits: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (23 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 24/81] tests/i915/gem_sync: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 26/81] tests/i915/gem_wait: Convert to intel_ctx_t (v2) Jason Ekstrand
` (58 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_userptr_blits.c | 25 +++++++++++++++----------
1 file changed, 15 insertions(+), 10 deletions(-)
diff --git a/tests/i915/gem_userptr_blits.c b/tests/i915/gem_userptr_blits.c
index e0e202c54..0616a9378 100644
--- a/tests/i915/gem_userptr_blits.c
+++ b/tests/i915/gem_userptr_blits.c
@@ -584,7 +584,7 @@ static void test_nohangcheck_hostile(int i915)
{
const struct intel_execution_engine2 *e;
igt_hang_t hang;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
int fence = -1;
int err = 0;
int dir;
@@ -599,11 +599,11 @@ static void test_nohangcheck_hostile(int i915)
dir = igt_params_open(i915);
igt_require(dir != -1);
- ctx = gem_context_create(i915);
- hang = igt_allow_hang(i915, ctx, 0);
+ ctx = intel_ctx_create_all_physical(i915);
+ hang = igt_allow_hang(i915, ctx->id, 0);
igt_require(__enable_hangcheck(dir, false));
- ____for_each_physical_engine(i915, ctx, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_spin_t *spin;
int new;
@@ -611,7 +611,7 @@ static void test_nohangcheck_hostile(int i915)
gem_engine_property_printf(i915, e->name,
"preempt_timeout_ms", "%d", 50);
- spin = __igt_spin_new(i915, ctx,
+ spin = __igt_spin_new(i915, .ctx = ctx,
.engine = e->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_USERPTR |
@@ -633,7 +633,7 @@ static void test_nohangcheck_hostile(int i915)
fence = tmp;
}
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert(fence != -1);
if (sync_fence_wait(fence, MSEC_PER_SEC)) { /* 640ms preempt-timeout */
@@ -1210,7 +1210,8 @@ static int test_dmabuf(void)
return 0;
}
-static void store_dword_rand(int i915, unsigned int engine,
+static void store_dword_rand(int i915, const intel_ctx_t *ctx,
+ unsigned int engine,
uint32_t target, uint64_t sz,
int count)
{
@@ -1242,6 +1243,7 @@ static void store_dword_rand(int i915, unsigned int engine,
exec.flags = engine;
if (gen < 6)
exec.flags |= I915_EXEC_SECURE;
+ exec.rsvd1 = ctx->id;
i = 0;
for (int n = 0; n < count; n++) {
@@ -1359,17 +1361,19 @@ static void test_readonly(int i915)
igt_fork(child, 1) {
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
char *orig;
orig = g_compute_checksum_for_data(G_CHECKSUM_SHA1, pages, sz);
gem_userptr(i915, space, total, true, userptr_flags, &rhandle);
- __for_each_physical_engine(i915, e) {
+ ctx = intel_ctx_create_all_physical(i915);
+ for_each_ctx_engine(i915, ctx, e) {
char *ref, *result;
/* First tweak the backing store through the write */
- store_dword_rand(i915, e->flags, whandle, sz, 64);
+ store_dword_rand(i915, ctx, e->flags, whandle, sz, 64);
gem_sync(i915, whandle);
ref = g_compute_checksum_for_data(G_CHECKSUM_SHA1,
pages, sz);
@@ -1378,7 +1382,7 @@ static void test_readonly(int i915)
igt_assert(strcmp(ref, orig));
/* Now try the same through the read-only handle */
- store_dword_rand(i915, e->flags, rhandle, total, 64);
+ store_dword_rand(i915, ctx, e->flags, rhandle, total, 64);
gem_sync(i915, rhandle);
result = g_compute_checksum_for_data(G_CHECKSUM_SHA1,
pages, sz);
@@ -1394,6 +1398,7 @@ static void test_readonly(int i915)
g_free(orig);
orig = ref;
}
+ intel_ctx_destroy(i915, ctx);
gem_close(i915, rhandle);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 26/81] tests/i915/gem_wait: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (24 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 25/81] tests/i915/gem_userptr_blits: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 27/81] tests/i915/gem_request_retire: Convert to intel_ctx_t Jason Ekstrand
` (57 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't NULL-init ctx
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_wait.c | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/tests/i915/gem_wait.c b/tests/i915/gem_wait.c
index 81ac25b59..d56707eda 100644
--- a/tests/i915/gem_wait.c
+++ b/tests/i915/gem_wait.c
@@ -75,13 +75,15 @@ static void invalid_buf(int fd)
#define timespec_isset(x) ((x)->tv_sec | (x)->tv_nsec)
-static void basic(int fd, unsigned engine, unsigned flags)
+static void basic(int fd, const intel_ctx_t *ctx, unsigned engine,
+ unsigned flags)
{
IGT_CORK_HANDLE(cork);
uint32_t plug =
flags & (WRITE | AWAIT) ? igt_cork_plug(&cork, fd) : 0;
igt_spin_t *spin =
igt_spin_new(fd,
+ .ctx = ctx,
.engine = engine,
.dependency = plug,
.flags = (flags & HANG) ? IGT_SPIN_NO_PREEMPTION : 0);
@@ -147,21 +149,22 @@ static void basic(int fd, unsigned engine, unsigned flags)
igt_spin_free(fd, spin);
}
-static void test_all_engines(const char *name, int i915, unsigned int test)
+static void test_all_engines(const char *name, int i915, const intel_ctx_t *ctx,
+ unsigned int test)
{
const struct intel_execution_engine2 *e;
igt_subtest_with_dynamic(name) {
igt_dynamic("all") {
gem_quiescent_gpu(i915);
- basic(i915, ALL_ENGINES, test);
+ basic(i915, ctx, ALL_ENGINES, test);
gem_quiescent_gpu(i915);
}
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name) {
gem_quiescent_gpu(i915);
- basic(i915, e->flags, test);
+ basic(i915, ctx, e->flags, test);
gem_quiescent_gpu(i915);
}
}
@@ -170,11 +173,13 @@ static void test_all_engines(const char *name, int i915, unsigned int test)
igt_main
{
+ const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
fd = drm_open_driver_master(DRIVER_INTEL);
igt_require_gem(fd);
+ ctx = intel_ctx_create_all_physical(fd);
}
igt_subtest("invalid-flags")
@@ -202,7 +207,7 @@ igt_main
}
for (const typeof(*tests) *t = tests; t->name; t++)
- test_all_engines(t->name, fd, t->flags);
+ test_all_engines(t->name, fd, ctx, t->flags);
igt_fixture {
igt_stop_signal_helper();
@@ -229,7 +234,7 @@ igt_main
}
for (const typeof(*tests) *t = tests; t->name; t++)
- test_all_engines(t->name, fd, t->flags);
+ test_all_engines(t->name, fd, ctx, t->flags);
igt_fixture {
igt_stop_signal_helper();
@@ -238,6 +243,7 @@ igt_main
}
igt_fixture {
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 27/81] tests/i915/gem_request_retire: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (25 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 26/81] tests/i915/gem_wait: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 28/81] tests/i915/gem_ctx_shared: Convert to intel_ctx_t (v2) Jason Ekstrand
` (56 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_request_retire.c | 17 ++++++++++-------
1 file changed, 10 insertions(+), 7 deletions(-)
diff --git a/tests/i915/gem_request_retire.c b/tests/i915/gem_request_retire.c
index c23ddfb7b..3df54f2a5 100644
--- a/tests/i915/gem_request_retire.c
+++ b/tests/i915/gem_request_retire.c
@@ -62,24 +62,26 @@ static void
test_retire_vma_not_inactive(int fd)
{
struct intel_execution_engine2 *e;
-
+ const intel_ctx_t *ctx;
igt_spin_t *bg = NULL;
- __for_each_physical_engine(fd, e) {
+ ctx = intel_ctx_create_all_physical(fd);
+
+ for_each_ctx_engine(fd, ctx, e) {
igt_spin_t *spin;
- uint32_t ctx;
+ const intel_ctx_t *spin_ctx;
if (!bg) {
- bg = igt_spin_new(fd, .engine = e->flags);
+ bg = igt_spin_new(fd, .ctx = ctx, .engine = e->flags);
continue;
}
- ctx = gem_context_clone_with_engines(fd, 0);
- spin = igt_spin_new(fd, ctx,
+ spin_ctx = intel_ctx_create(fd, &ctx->cfg);
+ spin = igt_spin_new(fd, .ctx = spin_ctx,
.engine = e->flags,
.dependency = bg->handle,
.flags = IGT_SPIN_SOFTDEP);
- gem_context_destroy(fd, ctx);
+ intel_ctx_destroy(fd, spin_ctx);
igt_spin_end(spin);
gem_sync(fd, spin->handle);
@@ -88,6 +90,7 @@ test_retire_vma_not_inactive(int fd)
igt_drop_caches_set(fd, DROP_RETIRE);
igt_spin_free(fd, bg);
+ intel_ctx_destroy(fd, ctx);
}
int fd;
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 28/81] tests/i915/gem_ctx_shared: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (26 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 27/81] tests/i915/gem_request_retire: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 29/81] tests/i915/gem_ctx_shared: Stop cloning contexts Jason Ekstrand
` (55 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Jason Ekstrand):
- Rework the Q- tests to share a VM and set SINGLE_TIMELINE
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_shared.c | 303 +++++++++++++++++++++---------------
1 file changed, 174 insertions(+), 129 deletions(-)
diff --git a/tests/i915/gem_ctx_shared.c b/tests/i915/gem_ctx_shared.c
index aedf389c6..97a8dd771 100644
--- a/tests/i915/gem_ctx_shared.c
+++ b/tests/i915/gem_ctx_shared.c
@@ -40,6 +40,7 @@
#include "i915/gem.h"
#include "i915/gem_create.h"
#include "i915/gem_engine_topology.h"
+#include "i915/gem_vm.h"
#include "igt.h"
#include "igt_rand.h"
#include "igt_vgem.h"
@@ -109,11 +110,13 @@ static void create_shared_gtt(int i915, unsigned int flags)
gem_close(i915, obj.handle);
}
-static void disjoint_timelines(int i915)
+static void disjoint_timelines(int i915, const intel_ctx_cfg_t *cfg)
{
IGT_CORK_HANDLE(cork);
+ intel_ctx_cfg_t vm_cfg;
+ const intel_ctx_t *ctx[2];
igt_spin_t *spin[2];
- uint32_t plug, child;
+ uint32_t plug;
igt_require(gem_has_execlists(i915));
@@ -122,11 +125,15 @@ static void disjoint_timelines(int i915)
* distinct timelines. A request queued to one context should be
* independent of any shared contexts.
*/
- child = gem_context_clone(i915, 0, I915_CONTEXT_CLONE_VM, 0);
+ vm_cfg = *cfg;
+ vm_cfg.vm = gem_vm_create(i915);
+ ctx[0] = intel_ctx_create(i915, &vm_cfg);
+ ctx[1] = intel_ctx_create(i915, &vm_cfg);
+
plug = igt_cork_plug(&cork, i915);
- spin[0] = __igt_spin_new(i915, .ctx_id = 0, .dependency = plug);
- spin[1] = __igt_spin_new(i915, .ctx_id = child);
+ spin[0] = __igt_spin_new(i915, .ctx = ctx[0], .dependency = plug);
+ spin[1] = __igt_spin_new(i915, .ctx = ctx[1]);
/* Wait for the second spinner, will hang if stuck behind the first */
igt_spin_end(spin[1]);
@@ -136,6 +143,10 @@ static void disjoint_timelines(int i915)
igt_spin_free(i915, spin[1]);
igt_spin_free(i915, spin[0]);
+
+ intel_ctx_destroy(i915, ctx[0]);
+ intel_ctx_destroy(i915, ctx[1]);
+ gem_vm_destroy(i915, vm_cfg.vm);
}
static void exhaust_shared_gtt(int i915, unsigned int flags)
@@ -185,7 +196,8 @@ static void exhaust_shared_gtt(int i915, unsigned int flags)
igt_waitchildren();
}
-static void exec_shared_gtt(int i915, unsigned int ring)
+static void exec_shared_gtt(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int ring)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -195,14 +207,18 @@ static void exec_shared_gtt(int i915, unsigned int ring)
.buffer_count = 1,
.flags = ring,
};
- uint32_t clone;
+ intel_ctx_cfg_t vm_cfg;
+ const intel_ctx_t *ctx[2];
uint32_t scratch, *s;
uint32_t batch, cs[16];
uint64_t offset;
int timeline;
int i;
- clone = gem_context_clone(i915, 0, I915_CONTEXT_CLONE_VM, 0);
+ vm_cfg = *cfg;
+ vm_cfg.vm = gem_vm_create(i915);
+ ctx[0] = intel_ctx_create(i915, &vm_cfg);
+ ctx[1] = intel_ctx_create(i915, &vm_cfg);
/* Find a hole big enough for both objects later */
scratch = gem_create(i915, 16384);
@@ -210,9 +226,9 @@ static void exec_shared_gtt(int i915, unsigned int ring)
obj.handle = scratch;
gem_execbuf(i915, &execbuf);
obj.flags |= EXEC_OBJECT_PINNED; /* reuse this address */
- execbuf.rsvd1 = clone; /* and bind the second context image */
+ execbuf.rsvd1 = ctx[1]->id; /* and bind the second context image */
gem_execbuf(i915, &execbuf);
- execbuf.rsvd1 = 0;
+ execbuf.rsvd1 = ctx[0]->id;
gem_close(i915, scratch);
timeline = sw_sync_timeline_create();
@@ -256,7 +272,7 @@ static void exec_shared_gtt(int i915, unsigned int ring)
obj.handle = batch;
obj.offset += 8192; /* make sure we don't cause an eviction! */
- execbuf.rsvd1 = clone;
+ execbuf.rsvd1 = ctx[1]->id;
if (gen > 3 && gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
gem_execbuf(i915, &execbuf);
@@ -288,10 +304,13 @@ static void exec_shared_gtt(int i915, unsigned int ring)
munmap(s, 4096);
gem_close(i915, scratch);
- gem_context_destroy(i915, clone);
+ intel_ctx_destroy(i915, ctx[0]);
+ intel_ctx_destroy(i915, ctx[1]);
+ gem_vm_destroy(i915, vm_cfg.vm);
}
-static int nop_sync(int i915, uint32_t ctx, unsigned int ring, int64_t timeout)
+static int nop_sync(int i915, const intel_ctx_t *ctx, unsigned int ring,
+ int64_t timeout)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj = {
@@ -301,7 +320,7 @@ static int nop_sync(int i915, uint32_t ctx, unsigned int ring, int64_t timeout)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = ring,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
int err;
@@ -313,30 +332,19 @@ static int nop_sync(int i915, uint32_t ctx, unsigned int ring, int64_t timeout)
return err;
}
-static bool has_single_timeline(int i915)
-{
- uint32_t ctx = 0;
-
- __gem_context_clone(i915, 0, 0,
- I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE,
- &ctx);
- if (ctx)
- gem_context_destroy(i915, ctx);
-
- return ctx != 0;
-}
-
-static void single_timeline(int i915)
+static void single_timeline(int i915, const intel_ctx_cfg_t *cfg)
{
const struct intel_execution_engine2 *e;
struct sync_fence_info rings[64];
struct sync_file_info sync_file_info = {
.num_fences = 1,
};
+ intel_ctx_cfg_t st_cfg;
+ const intel_ctx_t *ctx;
igt_spin_t *spin;
int n;
- igt_require(has_single_timeline(i915));
+ igt_require(gem_context_has_single_timeline(i915));
spin = igt_spin_new(i915);
@@ -347,11 +355,12 @@ static void single_timeline(int i915)
* to, it reports the same timeline name and fence context. However,
* the fence context is not reported through the sync_fence_info.
*/
- spin->execbuf.rsvd1 =
- gem_context_clone(i915, 0, 0,
- I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+ st_cfg = *cfg;
+ st_cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
+ ctx = intel_ctx_create(i915, &st_cfg);
+ spin->execbuf.rsvd1 = ctx->id;
n = 0;
- ____for_each_physical_engine(i915, spin->execbuf.rsvd1, e) {
+ for_each_ctx_engine(i915, ctx, e) {
spin->execbuf.flags = e->flags | I915_EXEC_FENCE_OUT;
gem_execbuf_wr(i915, &spin->execbuf);
@@ -370,32 +379,35 @@ static void single_timeline(int i915)
igt_assert(!strcmp(rings[0].driver_name, rings[i].driver_name));
igt_assert(!strcmp(rings[0].obj_name, rings[i].obj_name));
}
+ intel_ctx_destroy(i915, ctx);
}
-static void exec_single_timeline(int i915, unsigned int engine)
+static void exec_single_timeline(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
const struct intel_execution_engine2 *e;
igt_spin_t *spin;
- uint32_t ctx;
+ intel_ctx_cfg_t st_cfg;
+ const intel_ctx_t *ctx;
/*
* On an ordinary context, a blockage on one engine doesn't prevent
* execution on an other.
*/
- ctx = 0;
+ ctx = intel_ctx_create(i915, cfg);
spin = NULL;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, cfg, e) {
if (e->flags == engine)
continue;
if (spin == NULL) {
- spin = __igt_spin_new(i915, .ctx_id = ctx, .engine = e->flags);
+ spin = __igt_spin_new(i915, .ctx = ctx, .engine = e->flags);
} else {
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = spin->execbuf.buffers_ptr,
.buffer_count = spin->execbuf.buffer_count,
.flags = e->flags,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
gem_execbuf(i915, &execbuf);
}
@@ -403,27 +415,29 @@ static void exec_single_timeline(int i915, unsigned int engine)
igt_require(spin);
igt_assert_eq(nop_sync(i915, ctx, engine, NSEC_PER_SEC), 0);
igt_spin_free(i915, spin);
+ intel_ctx_destroy(i915, ctx);
/*
* But if we create a context with just a single shared timeline,
* then it will block waiting for the earlier requests on the
* other engines.
*/
- ctx = gem_context_clone(i915, 0, 0,
- I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+ st_cfg = *cfg;
+ st_cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
+ ctx = intel_ctx_create(i915, &st_cfg);
spin = NULL;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, &st_cfg, e) {
if (e->flags == engine)
continue;
if (spin == NULL) {
- spin = __igt_spin_new(i915, .ctx_id = ctx, .engine = e->flags);
+ spin = __igt_spin_new(i915, .ctx = ctx, .engine = e->flags);
} else {
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = spin->execbuf.buffers_ptr,
.buffer_count = spin->execbuf.buffer_count,
.flags = e->flags,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
gem_execbuf(i915, &execbuf);
}
@@ -431,9 +445,10 @@ static void exec_single_timeline(int i915, unsigned int engine)
igt_assert(spin);
igt_assert_eq(nop_sync(i915, ctx, engine, NSEC_PER_SEC), -ETIME);
igt_spin_free(i915, spin);
+ intel_ctx_destroy(i915, ctx);
}
-static void store_dword(int i915, uint32_t ctx, unsigned ring,
+static void store_dword(int i915, const intel_ctx_t *ctx, unsigned ring,
uint32_t target, uint32_t offset, uint32_t value,
uint32_t cork, unsigned write_domain)
{
@@ -450,7 +465,7 @@ static void store_dword(int i915, uint32_t ctx, unsigned ring,
execbuf.flags = ring;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
- execbuf.rsvd1 = ctx;
+ execbuf.rsvd1 = ctx->id;
memset(obj, 0, sizeof(obj));
obj[0].handle = cork;
@@ -491,31 +506,30 @@ static void store_dword(int i915, uint32_t ctx, unsigned ring,
gem_close(i915, obj[2].handle);
}
-static uint32_t create_highest_priority(int i915)
+static const intel_ctx_t *
+create_highest_priority(int i915, const intel_ctx_cfg_t *cfg)
{
- uint32_t ctx = gem_context_clone_with_engines(i915, 0);
+ const intel_ctx_t *ctx = intel_ctx_create(i915, cfg);
/*
* If there is no priority support, all contexts will have equal
* priority (and therefore the max user priority), so no context
* can overtake us, and we effectively can form a plug.
*/
- __gem_context_set_priority(i915, ctx, MAX_PRIO);
+ __gem_context_set_priority(i915, ctx->id, MAX_PRIO);
return ctx;
}
-static void unplug_show_queue(int i915, struct igt_cork *c, unsigned int engine)
+static void unplug_show_queue(int i915, struct igt_cork *c,
+ const intel_ctx_cfg_t *cfg, unsigned int engine)
{
igt_spin_t *spin[MAX_ELSP_QLEN];
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
- const struct igt_spin_factory opts = {
- .ctx_id = create_highest_priority(i915),
- .engine = engine,
- };
- spin[n] = __igt_spin_factory(i915, &opts);
- gem_context_destroy(i915, opts.ctx_id);
+ const intel_ctx_t *ctx = create_highest_priority(i915, cfg);
+ spin[n] = __igt_spin_new(i915, .ctx = ctx, .engine = engine);
+ intel_ctx_destroy(i915, ctx);
}
igt_cork_unplug(c); /* batches will now be queued on the engine */
@@ -526,7 +540,8 @@ static void unplug_show_queue(int i915, struct igt_cork *c, unsigned int engine)
}
static uint32_t store_timestamp(int i915,
- uint32_t ctx, unsigned ring,
+ const intel_ctx_t *ctx,
+ unsigned ring,
unsigned mmio_base,
int fence,
int offset)
@@ -549,7 +564,7 @@ static uint32_t store_timestamp(int i915,
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = ring | I915_EXEC_FENCE_IN,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
.rsvd2 = fence
};
uint32_t batch[] = {
@@ -577,13 +592,14 @@ static void kick_tasklets(void)
sched_yield();
}
-static void independent(int i915,
+static void independent(int i915, const intel_ctx_cfg_t *cfg,
const struct intel_execution_engine2 *e,
unsigned flags)
{
const int TIMESTAMP = 1023;
uint32_t handle[ARRAY_SIZE(priorities)];
igt_spin_t *spin[MAX_ELSP_QLEN];
+ intel_ctx_cfg_t q_cfg;
unsigned int mmio_base;
IGT_CORK_FENCE(cork);
int fence;
@@ -591,23 +607,24 @@ static void independent(int i915,
mmio_base = gem_engine_mmio_base(i915, e->name);
igt_require_f(mmio_base, "mmio base not known\n");
+ q_cfg = *cfg;
+ q_cfg.vm = gem_vm_create(i915);
+ q_cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
+
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
- const struct igt_spin_factory opts = {
- .ctx_id = create_highest_priority(i915),
- .engine = e->flags,
- };
- spin[n] = __igt_spin_factory(i915, &opts);
- gem_context_destroy(i915, opts.ctx_id);
+ const intel_ctx_t *ctx = create_highest_priority(i915, &q_cfg);
+ spin[n] = __igt_spin_new(i915, .ctx = ctx, .engine = e->flags);
+ intel_ctx_destroy(i915, ctx);
}
fence = igt_cork_plug(&cork, i915);
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
- uint32_t ctx = gem_queue_create(i915);
- gem_context_set_priority(i915, ctx, priorities[i]);
+ const intel_ctx_t *ctx = create_highest_priority(i915, &q_cfg);
+ gem_context_set_priority(i915, ctx->id, priorities[i]);
handle[i] = store_timestamp(i915, ctx,
e->flags, mmio_base,
fence, TIMESTAMP);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
close(fence);
kick_tasklets(); /* XXX try to hide cmdparser delays XXX */
@@ -634,22 +651,30 @@ static void independent(int i915,
}
igt_assert((int32_t)(handle[HI] - handle[LO]) < 0);
+
+ gem_vm_destroy(i915, q_cfg.vm);
}
-static void reorder(int i915, unsigned ring, unsigned flags)
+static void reorder(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned ring, unsigned flags)
#define EQUAL 1
{
IGT_CORK_HANDLE(cork);
uint32_t scratch;
uint32_t *ptr;
- uint32_t ctx[2];
+ intel_ctx_cfg_t q_cfg;
+ const intel_ctx_t *ctx[2];
uint32_t plug;
- ctx[LO] = gem_queue_create(i915);
- gem_context_set_priority(i915, ctx[LO], MIN_PRIO);
+ q_cfg = *cfg;
+ q_cfg.vm = gem_vm_create(i915);
+ q_cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
+
+ ctx[LO] = intel_ctx_create(i915, &q_cfg);
+ gem_context_set_priority(i915, ctx[LO]->id, MIN_PRIO);
- ctx[HI] = gem_queue_create(i915);
- gem_context_set_priority(i915, ctx[HI], flags & EQUAL ? MIN_PRIO : 0);
+ ctx[HI] = intel_ctx_create(i915, &q_cfg);
+ gem_context_set_priority(i915, ctx[HI]->id, flags & EQUAL ? MIN_PRIO : 0);
scratch = gem_create(i915, 4096);
plug = igt_cork_plug(&cork, i915);
@@ -657,43 +682,50 @@ static void reorder(int i915, unsigned ring, unsigned flags)
/* We expect the high priority context to be executed first, and
* so the final result will be value from the low priority context.
*/
- store_dword(i915, ctx[LO], ring, scratch, 0, ctx[LO], plug, 0);
- store_dword(i915, ctx[HI], ring, scratch, 0, ctx[HI], plug, 0);
+ store_dword(i915, ctx[LO], ring, scratch, 0, ctx[LO]->id, plug, 0);
+ store_dword(i915, ctx[HI], ring, scratch, 0, ctx[HI]->id, plug, 0);
- unplug_show_queue(i915, &cork, ring);
+ unplug_show_queue(i915, &cork, &q_cfg, ring);
gem_close(i915, plug);
- gem_context_destroy(i915, ctx[LO]);
- gem_context_destroy(i915, ctx[HI]);
-
ptr = gem_mmap__device_coherent(i915, scratch, 0, 4096, PROT_READ);
gem_set_domain(i915, scratch, /* no write hazard lies! */
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
gem_close(i915, scratch);
if (flags & EQUAL) /* equal priority, result will be fifo */
- igt_assert_eq_u32(ptr[0], ctx[HI]);
+ igt_assert_eq_u32(ptr[0], ctx[HI]->id);
else
- igt_assert_eq_u32(ptr[0], ctx[LO]);
+ igt_assert_eq_u32(ptr[0], ctx[LO]->id);
munmap(ptr, 4096);
+
+ intel_ctx_destroy(i915, ctx[LO]);
+ intel_ctx_destroy(i915, ctx[HI]);
+
+ gem_vm_destroy(i915, q_cfg.vm);
}
-static void promotion(int i915, unsigned ring)
+static void promotion(int i915, const intel_ctx_cfg_t *cfg, unsigned ring)
{
IGT_CORK_HANDLE(cork);
uint32_t result, dep;
uint32_t *ptr;
- uint32_t ctx[3];
+ intel_ctx_cfg_t q_cfg;
+ const intel_ctx_t *ctx[3];
uint32_t plug;
- ctx[LO] = gem_queue_create(i915);
- gem_context_set_priority(i915, ctx[LO], MIN_PRIO);
+ q_cfg = *cfg;
+ q_cfg.vm = gem_vm_create(i915);
+ q_cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
+
+ ctx[LO] = intel_ctx_create(i915, &q_cfg);
+ gem_context_set_priority(i915, ctx[LO]->id, MIN_PRIO);
- ctx[HI] = gem_queue_create(i915);
- gem_context_set_priority(i915, ctx[HI], 0);
+ ctx[HI] = intel_ctx_create(i915, &q_cfg);
+ gem_context_set_priority(i915, ctx[HI]->id, 0);
- ctx[NOISE] = gem_queue_create(i915);
- gem_context_set_priority(i915, ctx[NOISE], MIN_PRIO/2);
+ ctx[NOISE] = intel_ctx_create(i915, &q_cfg);
+ gem_context_set_priority(i915, ctx[NOISE]->id, MIN_PRIO/2);
result = gem_create(i915, 4096);
dep = gem_create(i915, 4096);
@@ -705,28 +737,24 @@ static void promotion(int i915, unsigned ring)
* fifo would be NOISE, LO, HI.
* strict priority would be HI, NOISE, LO
*/
- store_dword(i915, ctx[NOISE], ring, result, 0, ctx[NOISE], plug, 0);
- store_dword(i915, ctx[LO], ring, result, 0, ctx[LO], plug, 0);
+ store_dword(i915, ctx[NOISE], ring, result, 0, ctx[NOISE]->id, plug, 0);
+ store_dword(i915, ctx[LO], ring, result, 0, ctx[LO]->id, plug, 0);
/* link LO <-> HI via a dependency on another buffer */
- store_dword(i915, ctx[LO], ring, dep, 0, ctx[LO], 0, I915_GEM_DOMAIN_INSTRUCTION);
- store_dword(i915, ctx[HI], ring, dep, 0, ctx[HI], 0, 0);
+ store_dword(i915, ctx[LO], ring, dep, 0, ctx[LO]->id, 0, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(i915, ctx[HI], ring, dep, 0, ctx[HI]->id, 0, 0);
- store_dword(i915, ctx[HI], ring, result, 0, ctx[HI], 0, 0);
+ store_dword(i915, ctx[HI], ring, result, 0, ctx[HI]->id, 0, 0);
- unplug_show_queue(i915, &cork, ring);
+ unplug_show_queue(i915, &cork, &q_cfg, ring);
gem_close(i915, plug);
- gem_context_destroy(i915, ctx[NOISE]);
- gem_context_destroy(i915, ctx[LO]);
- gem_context_destroy(i915, ctx[HI]);
-
ptr = gem_mmap__device_coherent(i915, dep, 0, 4096, PROT_READ);
gem_set_domain(i915, dep, /* no write hazard lies! */
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
gem_close(i915, dep);
- igt_assert_eq_u32(ptr[0], ctx[HI]);
+ igt_assert_eq_u32(ptr[0], ctx[HI]->id);
munmap(ptr, 4096);
ptr = gem_mmap__device_coherent(i915, result, 0, 4096, PROT_READ);
@@ -734,24 +762,36 @@ static void promotion(int i915, unsigned ring)
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
gem_close(i915, result);
- igt_assert_eq_u32(ptr[0], ctx[NOISE]);
+ igt_assert_eq_u32(ptr[0], ctx[NOISE]->id);
munmap(ptr, 4096);
+
+ intel_ctx_destroy(i915, ctx[NOISE]);
+ intel_ctx_destroy(i915, ctx[LO]);
+ intel_ctx_destroy(i915, ctx[HI]);
+
+ gem_vm_destroy(i915, q_cfg.vm);
}
-static void smoketest(int i915, unsigned ring, unsigned timeout)
+static void smoketest(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned ring, unsigned timeout)
{
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+ intel_ctx_cfg_t q_cfg;
unsigned engines[I915_EXEC_RING_MASK + 1];
unsigned nengine;
unsigned engine;
uint32_t scratch;
uint32_t *ptr;
+ q_cfg = *cfg;
+ q_cfg.vm = gem_vm_create(i915);
+ q_cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
+
nengine = 0;
if (ring == -1) {
const struct intel_execution_engine2 *e;
- __for_each_physical_engine(i915, e)
+ for_each_ctx_cfg_engine(i915, &q_cfg, e)
engines[nengine++] = e->flags;
} else {
engines[nengine++] = ring;
@@ -761,16 +801,16 @@ static void smoketest(int i915, unsigned ring, unsigned timeout)
scratch = gem_create(i915, 4096);
igt_fork(child, ncpus) {
unsigned long count = 0;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
hars_petruska_f54_1_random_perturb(child);
- ctx = gem_queue_create(i915);
+ ctx = intel_ctx_create(i915, &q_cfg);
igt_until_timeout(timeout) {
int prio;
prio = hars_petruska_f54_1_random_unsafe_max(MAX_PRIO - MIN_PRIO) + MIN_PRIO;
- gem_context_set_priority(i915, ctx, prio);
+ gem_context_set_priority(i915, ctx->id, prio);
engine = engines[hars_petruska_f54_1_random_unsafe_max(nengine)];
store_dword(i915, ctx, engine, scratch,
@@ -781,7 +821,7 @@ static void smoketest(int i915, unsigned ring, unsigned timeout)
8*child + 4, count++,
0, 0);
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
igt_waitchildren();
@@ -801,21 +841,25 @@ static void smoketest(int i915, unsigned ring, unsigned timeout)
igt_info("Child[%d] completed %u cycles\n", n, ptr[2*n+1]);
}
munmap(ptr, 4096);
+
+ gem_vm_destroy(i915, q_cfg.vm);
}
-#define for_each_queue(e, i915) \
- __for_each_physical_engine(i915, e) \
+#define for_each_queue(e, i915, cfg) \
+ for_each_ctx_cfg_engine(i915, cfg, e) \
for_each_if(gem_class_can_store_dword(i915, (e)->class)) \
igt_dynamic_f("%s", e->name)
igt_main
{
const struct intel_execution_engine2 *e;
+ intel_ctx_cfg_t cfg;
int i915 = -1;
igt_fixture {
i915 = drm_open_driver(DRIVER_INTEL);
igt_require_gem(i915);
+ cfg = intel_ctx_cfg_all_physical(i915);
}
igt_subtest_group {
@@ -831,20 +875,20 @@ igt_main
create_shared_gtt(i915, DETACHED);
igt_subtest("disjoint-timelines")
- disjoint_timelines(i915);
+ disjoint_timelines(i915, &cfg);
igt_subtest("single-timeline")
- single_timeline(i915);
+ single_timeline(i915, &cfg);
igt_subtest_with_dynamic("exec-shared-gtt") {
- for_each_queue(e, i915)
- exec_shared_gtt(i915, e->flags);
+ for_each_queue(e, i915, &cfg)
+ exec_shared_gtt(i915, &cfg, e->flags);
}
igt_subtest_with_dynamic("exec-single-timeline") {
- igt_require(has_single_timeline(i915));
- for_each_queue(e, i915)
- exec_single_timeline(i915, e->flags);
+ igt_require(gem_context_has_single_timeline(i915));
+ for_each_queue(e, i915, &cfg)
+ exec_single_timeline(i915, &cfg, e->flags);
}
/*
@@ -856,38 +900,39 @@ igt_main
*/
igt_subtest_group {
igt_fixture {
- igt_require(gem_has_queues(i915));
igt_require(gem_scheduler_enabled(i915));
igt_require(gem_scheduler_has_ctx_priority(i915));
+ igt_require(gem_has_vm(i915));
+ igt_require(gem_context_has_single_timeline(i915));
}
igt_subtest_with_dynamic("Q-independent") {
- for_each_queue(e, i915)
- independent(i915, e, 0);
+ for_each_queue(e, i915, &cfg)
+ independent(i915, &cfg, e, 0);
}
igt_subtest_with_dynamic("Q-in-order") {
- for_each_queue(e, i915)
- reorder(i915, e->flags, EQUAL);
+ for_each_queue(e, i915, &cfg)
+ reorder(i915, &cfg, e->flags, EQUAL);
}
igt_subtest_with_dynamic("Q-out-order") {
- for_each_queue(e, i915)
- reorder(i915, e->flags, 0);
+ for_each_queue(e, i915, &cfg)
+ reorder(i915, &cfg, e->flags, 0);
}
igt_subtest_with_dynamic("Q-promotion") {
- for_each_queue(e, i915)
- promotion(i915, e->flags);
+ for_each_queue(e, i915, &cfg)
+ promotion(i915, &cfg, e->flags);
}
igt_subtest_with_dynamic("Q-smoketest") {
- for_each_queue(e, i915)
- smoketest(i915, e->flags, 5);
+ for_each_queue(e, i915, &cfg)
+ smoketest(i915, &cfg, e->flags, 5);
}
igt_subtest("Q-smoketest-all")
- smoketest(i915, -1, 30);
+ smoketest(i915, &cfg, -1, 30);
}
igt_subtest("exhaust-shared-gtt")
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 29/81] tests/i915/gem_ctx_shared: Stop cloning contexts
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (27 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 28/81] tests/i915/gem_ctx_shared: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 30/81] tests/i915/gem_create: Convert to intel_ctx_t Jason Ekstrand
` (54 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Instead use either GET/SET_CONTEXT_PARAM or a create ext.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_shared.c | 77 ++++++++++++++++++++++++++++++-------
1 file changed, 63 insertions(+), 14 deletions(-)
diff --git a/tests/i915/gem_ctx_shared.c b/tests/i915/gem_ctx_shared.c
index 97a8dd771..4441e6eb7 100644
--- a/tests/i915/gem_ctx_shared.c
+++ b/tests/i915/gem_ctx_shared.c
@@ -63,6 +63,48 @@ static int priorities[] = {
IGT_TEST_DESCRIPTION("Test shared contexts.");
+static int __get_vm(int i915, uint32_t ctx, uint32_t *vm)
+{
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_VM,
+ };
+ int err = __gem_context_get_param(i915, &p);
+ if (err)
+ return err;
+
+ igt_assert(p.value > 0 && p.value < UINT32_MAX);
+ *vm = p.value;
+
+ return 0;
+}
+
+static uint32_t get_vm(int i915, uint32_t ctx)
+{
+ uint32_t vm;
+ igt_assert_eq(__get_vm(i915, ctx, &vm), 0);
+ return vm;
+}
+
+static void set_vm(int i915, uint32_t ctx, uint32_t vm)
+{
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_VM,
+ .value = vm
+ };
+ gem_context_set_param(i915, &p);
+}
+
+static void copy_vm(int i915, uint32_t dst, uint32_t src)
+{
+ uint32_t vm = get_vm(i915, src);
+ set_vm(i915, dst, vm);
+
+ /* GETPARAM gets a reference to the VM which we have to drop */
+ gem_vm_destroy(i915, vm);
+}
+
static void create_shared_gtt(int i915, unsigned int flags)
#define DETACHED 0x1
{
@@ -83,9 +125,9 @@ static void create_shared_gtt(int i915, unsigned int flags)
child = flags & DETACHED ? gem_context_create(i915) : 0;
igt_until_timeout(2) {
parent = flags & DETACHED ? child : 0;
- child = gem_context_clone(i915,
- parent, I915_CONTEXT_CLONE_VM,
- 0);
+ child = gem_context_create(i915);
+ copy_vm(i915, child, parent);
+
execbuf.rsvd1 = child;
gem_execbuf(i915, &execbuf);
@@ -99,9 +141,7 @@ static void create_shared_gtt(int i915, unsigned int flags)
execbuf.rsvd1 = parent;
igt_assert_eq(__gem_execbuf(i915, &execbuf), -ENOENT);
- igt_assert_eq(__gem_context_clone(i915,
- parent, I915_CONTEXT_CLONE_VM,
- 0, &parent), -ENOENT);
+ igt_assert_eq(__get_vm(i915, parent, &parent), -ENOENT);
}
if (flags & DETACHED)
gem_context_destroy(i915, child);
@@ -152,8 +192,19 @@ static void disjoint_timelines(int i915, const intel_ctx_cfg_t *cfg)
static void exhaust_shared_gtt(int i915, unsigned int flags)
#define EXHAUST_LRC 0x1
{
+ struct drm_i915_gem_context_create_ext_setparam vm_create_ext = {
+ .base = {
+ .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ },
+ .param = {
+ .param = I915_CONTEXT_PARAM_VM,
+ },
+ };
+
i915 = gem_reopen_driver(i915);
+ vm_create_ext.param.value = gem_vm_create(i915);
+
igt_fork(pid, 1) {
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj = {
@@ -163,23 +214,21 @@ static void exhaust_shared_gtt(int i915, unsigned int flags)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
};
- uint32_t parent, child;
unsigned long count = 0;
int err;
gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
- child = 0;
for (;;) {
- parent = child;
- err = __gem_context_clone(i915,
- parent, I915_CONTEXT_CLONE_VM,
- 0, &child);
+ uint32_t ctx;
+ err = __gem_context_create_ext(i915, 0,
+ to_user_pointer(&vm_create_ext),
+ &ctx);
if (err)
break;
if (flags & EXHAUST_LRC) {
- execbuf.rsvd1 = child;
+ execbuf.rsvd1 = ctx;
err = __gem_execbuf(i915, &execbuf);
if (err)
break;
@@ -864,7 +913,7 @@ igt_main
igt_subtest_group {
igt_fixture {
- igt_require(gem_contexts_has_shared_gtt(i915));
+ igt_require(gem_has_vm(i915));
igt_fork_hang_detector(i915);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 30/81] tests/i915/gem_create: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (28 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 29/81] tests/i915/gem_ctx_shared: Stop cloning contexts Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 31/81] tests/i915/gem_ctx_switch: Convert to intel_ctx_t (v3) Jason Ekstrand
` (53 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_create.c | 14 ++++++++++----
1 file changed, 10 insertions(+), 4 deletions(-)
diff --git a/tests/i915/gem_create.c b/tests/i915/gem_create.c
index 167d7d28a..1acf8ee6a 100644
--- a/tests/i915/gem_create.c
+++ b/tests/i915/gem_create.c
@@ -246,20 +246,24 @@ static void always_clear(int i915, int timeout)
static void busy_create(int i915, int timeout)
{
struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
igt_spin_t *spin[I915_EXEC_RING_MASK + 1];
unsigned long count = 0;
+ ctx = intel_ctx_create_all_physical(i915);
+
igt_fork_hang_detector(i915);
- __for_each_physical_engine(i915, e)
- spin[e->flags] = igt_spin_new(i915, .engine = e->flags);
+ for_each_ctx_engine(i915, ctx, e)
+ spin[e->flags] = igt_spin_new(i915, .ctx = ctx,
+ .engine = e->flags);
igt_until_timeout(timeout) {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
uint32_t handle;
igt_spin_t *next;
handle = gem_create(i915, 4096);
- next = igt_spin_new(i915,
+ next = igt_spin_new(i915, .ctx = ctx,
.engine = e->flags,
.dependency = handle,
.flags = IGT_SPIN_SOFTDEP);
@@ -272,6 +276,8 @@ static void busy_create(int i915, int timeout)
}
}
+ intel_ctx_destroy(i915, ctx);
+
igt_info("Created %ld objects while busy\n", count);
gem_quiescent_gpu(i915);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 31/81] tests/i915/gem_ctx_switch: Convert to intel_ctx_t (v3)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (29 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 30/81] tests/i915/gem_create: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 32/81] tests/i915/gem_exec_parallel: Convert to intel_ctx_t (v2) Jason Ekstrand
` (52 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Ashutosh Dixit):
- Set SINGLE_TIMELINE for queue tests
- Use the same configure for measure_qlen as the rest of the test.
v2 (Jason Ekstrand):
- Use ARRAY_SIZE(contexts) instead of 64 for a context create loop
bound
v3 (Ashutosh Dixit):
- Add a new has_queues helper to check for VM+SINGLE_TIMELINE
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_switch.c | 121 +++++++++++++++++++++---------------
1 file changed, 72 insertions(+), 49 deletions(-)
diff --git a/tests/i915/gem_ctx_switch.c b/tests/i915/gem_ctx_switch.c
index 44c659f0b..4e46b7634 100644
--- a/tests/i915/gem_ctx_switch.c
+++ b/tests/i915/gem_ctx_switch.c
@@ -43,6 +43,7 @@
#include "i915/gem.h"
#include "i915/gem_create.h"
#include "i915/gem_ring.h"
+#include "i915/gem_vm.h"
#include "igt.h"
#define INTERRUPTIBLE 0x1
@@ -54,18 +55,18 @@ static double elapsed(const struct timespec *start, const struct timespec *end)
(end->tv_nsec - start->tv_nsec)*1e-9);
}
-static int measure_qlen(int fd,
+static int measure_qlen(int fd, const intel_ctx_cfg_t *cfg,
struct drm_i915_gem_execbuffer2 *execbuf,
const struct intel_engine_data *engines,
int timeout)
{
const struct drm_i915_gem_exec_object2 * const obj =
(struct drm_i915_gem_exec_object2 *)(uintptr_t)execbuf->buffers_ptr;
- uint32_t ctx[64];
+ const intel_ctx_t *ctx[64];
int min = INT_MAX, max = 0;
for (int i = 0; i < ARRAY_SIZE(ctx); i++)
- ctx[i] = gem_context_clone_with_engines(fd, 0);
+ ctx[i] = intel_ctx_create(fd, cfg);
for (unsigned int n = 0; n < engines->nengines; n++) {
uint64_t saved = execbuf->flags;
@@ -75,14 +76,14 @@ static int measure_qlen(int fd,
execbuf->flags |= engines->engines[n].flags;
for (int i = 0; i < ARRAY_SIZE(ctx); i++) {
- execbuf->rsvd1 = ctx[i];
+ execbuf->rsvd1 = ctx[i]->id;
gem_execbuf(fd, execbuf);
}
gem_sync(fd, obj->handle);
igt_nsec_elapsed(&tv);
for (int i = 0; i < ARRAY_SIZE(ctx); i++) {
- execbuf->rsvd1 = ctx[i];
+ execbuf->rsvd1 = ctx[i]->id;
gem_execbuf(fd, execbuf);
}
gem_sync(fd, obj->handle);
@@ -102,13 +103,14 @@ static int measure_qlen(int fd,
}
for (int i = 0; i < ARRAY_SIZE(ctx); i++)
- gem_context_destroy(fd, ctx[i]);
+ intel_ctx_destroy(fd, ctx[i]);
igt_debug("Estimated qlen: {min:%d, max:%d}\n", min, max);
return min;
}
static void single(int fd, uint32_t handle,
+ const intel_ctx_cfg_t *base_cfg,
const struct intel_execution_engine2 *e2,
unsigned flags,
const int ncpus,
@@ -117,7 +119,8 @@ static void single(int fd, uint32_t handle,
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj;
struct drm_i915_gem_relocation_entry reloc;
- uint32_t contexts[64];
+ intel_ctx_cfg_t cfg;
+ const intel_ctx_t *contexts[64];
struct {
double elapsed;
unsigned long count;
@@ -127,13 +130,15 @@ static void single(int fd, uint32_t handle,
shared = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
igt_assert(shared != MAP_FAILED);
- for (n = 0; n < 64; n++) {
- if (flags & QUEUE)
- contexts[n] = gem_queue_clone_with_engines(fd, 0);
- else
- contexts[n] = gem_context_clone_with_engines(fd, 0);
+ cfg = *base_cfg;
+ if (flags & QUEUE) {
+ cfg.vm = gem_vm_create(fd);
+ cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
}
+ for (n = 0; n < 64; n++)
+ contexts[n] = intel_ctx_create(fd, &cfg);
+
memset(&obj, 0, sizeof(obj));
obj.handle = handle;
@@ -151,7 +156,7 @@ static void single(int fd, uint32_t handle,
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&obj);
execbuf.buffer_count = 1;
- execbuf.rsvd1 = contexts[0];
+ execbuf.rsvd1 = contexts[0]->id;
execbuf.flags = e2->flags;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
@@ -169,7 +174,7 @@ static void single(int fd, uint32_t handle,
/* Warmup to bind all objects into each ctx before we begin */
for (int i = 0; i < ARRAY_SIZE(contexts); i++) {
- execbuf.rsvd1 = contexts[i];
+ execbuf.rsvd1 = contexts[i]->id;
gem_execbuf(fd, &execbuf);
}
gem_sync(fd, handle);
@@ -178,7 +183,7 @@ static void single(int fd, uint32_t handle,
do {
igt_while_interruptible(flags & INTERRUPTIBLE) {
for (int loop = 0; loop < 64; loop++) {
- execbuf.rsvd1 = contexts[loop % 64];
+ execbuf.rsvd1 = contexts[loop % 64]->id;
reloc.presumed_offset = -1;
gem_execbuf(fd, &execbuf);
}
@@ -215,42 +220,46 @@ static void single(int fd, uint32_t handle,
}
for (n = 0; n < 64; n++)
- gem_context_destroy(fd, contexts[n]);
+ intel_ctx_destroy(fd, contexts[n]);
munmap(shared, 4096);
}
-static void all(int fd, uint32_t handle, unsigned flags, int timeout)
+static void all(int fd, uint32_t handle, const intel_ctx_cfg_t *base_cfg,
+ unsigned flags, int timeout)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[2];
struct intel_engine_data engines = { };
- uint32_t contexts[65];
+ intel_ctx_cfg_t cfg;
+ const intel_ctx_t *contexts[65];
int n, qlen;
- engines = intel_init_engine_list(fd, 0);
+ engines = intel_engine_list_for_ctx_cfg(fd, base_cfg);
igt_require(engines.nengines);
- for (n = 0; n < ARRAY_SIZE(contexts); n++) {
- if (flags & QUEUE)
- contexts[n] = gem_queue_clone_with_engines(fd, 0);
- else
- contexts[n] = gem_context_clone_with_engines(fd, 0);
+ cfg = *base_cfg;
+ if (flags & QUEUE) {
+ cfg.vm = gem_vm_create(fd);
+ cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
}
+ for (n = 0; n < ARRAY_SIZE(contexts); n++)
+ contexts[n] = intel_ctx_create(fd, &cfg);
+
memset(obj, 0, sizeof(obj));
obj[1].handle = handle;
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj + 1);
execbuf.buffer_count = 1;
- execbuf.rsvd1 = contexts[0];
+ execbuf.rsvd1 = contexts[0]->id;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
igt_require(__gem_execbuf(fd, &execbuf) == 0);
gem_sync(fd, handle);
- qlen = measure_qlen(fd, &execbuf, &engines, timeout);
+ qlen = measure_qlen(fd, &cfg, &execbuf, &engines, timeout);
igt_info("Using timing depth of %d batches\n", qlen);
execbuf.buffers_ptr = to_user_pointer(obj);
@@ -267,7 +276,7 @@ static void all(int fd, uint32_t handle, unsigned flags, int timeout)
for (int loop = 0;
loop < ARRAY_SIZE(contexts);
loop++) {
- execbuf.rsvd1 = contexts[loop];
+ execbuf.rsvd1 = contexts[loop]->id;
gem_execbuf(fd, &execbuf);
}
gem_sync(fd, obj[0].handle);
@@ -276,7 +285,7 @@ static void all(int fd, uint32_t handle, unsigned flags, int timeout)
do {
for (int loop = 0; loop < qlen; loop++) {
execbuf.rsvd1 =
- contexts[loop % nctx];
+ contexts[loop % nctx]->id;
gem_execbuf(fd, &execbuf);
}
count += qlen;
@@ -300,7 +309,13 @@ static void all(int fd, uint32_t handle, unsigned flags, int timeout)
}
for (n = 0; n < ARRAY_SIZE(contexts); n++)
- gem_context_destroy(fd, contexts[n]);
+ intel_ctx_destroy(fd, contexts[n]);
+}
+
+static bool
+has_queues(int fd)
+{
+ return gem_has_vm(fd) && gem_context_has_single_timeline(fd);
}
igt_main
@@ -308,6 +323,8 @@ igt_main
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
const struct intel_execution_engine2 *e2;
const struct intel_execution_ring *e;
+ const intel_ctx_cfg_t legacy_cfg = {};
+ intel_ctx_cfg_t engines_cfg;
static const struct {
const char *name;
unsigned int flags;
@@ -315,8 +332,8 @@ igt_main
} phases[] = {
{ "", 0, NULL },
{ "-interruptible", INTERRUPTIBLE, NULL },
- { "-queue", QUEUE, gem_has_queues },
- { "-queue-interruptible", QUEUE | INTERRUPTIBLE, gem_has_queues },
+ { "-queue", QUEUE, has_queues },
+ { "-queue-interruptible", QUEUE | INTERRUPTIBLE, has_queues },
{ }
};
uint32_t light = 0, heavy;
@@ -330,6 +347,8 @@ igt_main
gem_require_contexts(fd);
+ engines_cfg = intel_ctx_cfg_all_physical(fd);
+
light = gem_create(fd, 4096);
gem_write(fd, light, 0, &bbe, sizeof(bbe));
@@ -358,24 +377,26 @@ igt_main
}
igt_subtest_f("legacy-%s%s", e->name, p->name)
- single(fd, light, e2, p->flags, 1, 2);
+ single(fd, light, &legacy_cfg, e2,
+ p->flags, 1, 2);
igt_subtest_f("legacy-%s-heavy%s",
e->name, p->name)
- single(fd, heavy, e2, p->flags, 1, 2);
+ single(fd, heavy, &legacy_cfg, e2,
+ p->flags, 1, 2);
igt_subtest_f("legacy-%s-forked%s",
e->name, p->name)
- single(fd, light, e2, p->flags, ncpus,
- 20);
+ single(fd, light, &legacy_cfg, e2,
+ p->flags, ncpus, 20);
igt_subtest_f("legacy-%s-forked-heavy%s",
e->name, p->name)
- single(fd, heavy, e2, p->flags, ncpus,
- 20);
+ single(fd, heavy, &legacy_cfg, e2,
+ p->flags, ncpus, 20);
}
}
}
/* Must come after legacy subtests. */
- __for_each_physical_engine(fd, e2) {
+ for_each_ctx_cfg_engine(fd, &engines_cfg, e2) {
for (typeof(*phases) *p = phases; p->name; p++) {
igt_subtest_group {
igt_fixture {
@@ -384,33 +405,35 @@ igt_main
}
igt_subtest_f("%s%s", e2->name, p->name)
- single(fd, light, e2, p->flags, 1, 2);
+ single(fd, light, &engines_cfg, e2,
+ p->flags, 1, 2);
igt_subtest_f("%s-heavy%s", e2->name, p->name)
- single(fd, heavy, e2, p->flags, 1, 2);
+ single(fd, heavy, &engines_cfg, e2,
+ p->flags, 1, 2);
igt_subtest_f("%s-forked%s", e2->name, p->name)
- single(fd, light, e2, p->flags, ncpus,
- 20);
+ single(fd, light, &engines_cfg, e2,
+ p->flags, ncpus, 20);
igt_subtest_f("%s-forked-heavy%s",
e2->name, p->name)
- single(fd, heavy, e2, p->flags, ncpus,
- 20);
+ single(fd, heavy, &engines_cfg, e2,
+ p->flags, ncpus, 20);
}
}
}
igt_subtest("all-light")
- all(fd, light, 0, 2);
+ all(fd, light, &engines_cfg, 0, 2);
igt_subtest("all-heavy")
- all(fd, heavy, 0, 2);
+ all(fd, heavy, &engines_cfg, 0, 2);
igt_subtest_group {
igt_fixture {
- igt_require(gem_has_queues(fd));
+ gem_require_vm(fd);
}
igt_subtest("queue-light")
- all(fd, light, QUEUE, 2);
+ all(fd, light, &engines_cfg, QUEUE, 2);
igt_subtest("queue-heavy")
- all(fd, heavy, QUEUE, 2);
+ all(fd, heavy, &engines_cfg, QUEUE, 2);
}
igt_fixture {
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 32/81] tests/i915/gem_exec_parallel: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (30 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 31/81] tests/i915/gem_ctx_switch: Convert to intel_ctx_t (v3) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 33/81] tests/i915/gem_exec_latency: Convert to intel_ctx_t (v3) Jason Ekstrand
` (51 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't NULL-init ctx
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_parallel.c | 29 +++++++++++++++++++----------
1 file changed, 19 insertions(+), 10 deletions(-)
diff --git a/tests/i915/gem_exec_parallel.c b/tests/i915/gem_exec_parallel.c
index 11cea5d7d..5920ac730 100644
--- a/tests/i915/gem_exec_parallel.c
+++ b/tests/i915/gem_exec_parallel.c
@@ -57,6 +57,7 @@ struct thread {
unsigned flags;
uint32_t *scratch;
unsigned id;
+ const intel_ctx_t *ctx;
unsigned engine;
uint32_t used;
int fd, gen, *go;
@@ -68,6 +69,7 @@ static void *thread(void *data)
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_execbuffer2 execbuf;
+ const intel_ctx_t *tmp_ctx = NULL;
uint32_t batch[16];
uint16_t used;
int fd, i;
@@ -79,7 +81,6 @@ static void *thread(void *data)
if (t->flags & FDS) {
fd = gem_reopen_driver(t->fd);
- gem_context_copy_engines(t->fd, 0, fd, 0);
} else {
fd = t->fd;
}
@@ -122,8 +123,11 @@ static void *thread(void *data)
execbuf.flags |= I915_EXEC_NO_RELOC;
if (t->gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
- if (t->flags & CONTEXTS) {
- execbuf.rsvd1 = gem_context_clone_with_engines(fd, 0);
+ if (t->flags & (CONTEXTS | FDS)) {
+ tmp_ctx = intel_ctx_create(fd, &t->ctx->cfg);
+ execbuf.rsvd1 = tmp_ctx->id;
+ } else {
+ execbuf.rsvd1 = t->ctx->id;
}
used = 0;
@@ -142,8 +146,8 @@ static void *thread(void *data)
gem_close(fd, obj[0].handle);
}
- if (t->flags & CONTEXTS)
- gem_context_destroy(fd, execbuf.rsvd1);
+ if (t->flags & (CONTEXTS | FDS))
+ intel_ctx_destroy(fd, tmp_ctx);
gem_close(fd, obj[1].handle);
if (t->flags & FDS)
close(fd);
@@ -197,7 +201,8 @@ static void handle_close(int fd, unsigned int flags, uint32_t handle, void *data
gem_close(fd, handle);
}
-static void all(int fd, struct intel_execution_engine2 *engine, unsigned flags)
+static void all(int fd, const intel_ctx_t *ctx,
+ struct intel_execution_engine2 *engine, unsigned flags)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
unsigned engines[I915_EXEC_RING_MASK + 1], nengine;
@@ -220,7 +225,7 @@ static void all(int fd, struct intel_execution_engine2 *engine, unsigned flags)
nengine = 0;
if (!engine) {
struct intel_execution_engine2 *e;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (gem_class_can_store_dword(fd, e->class))
engines[nengine++] = e->flags;
}
@@ -247,6 +252,7 @@ static void all(int fd, struct intel_execution_engine2 *engine, unsigned flags)
threads[i].id = i;
threads[i].fd = fd;
threads[i].gen = gen;
+ threads[i].ctx = ctx;
threads[i].engine = engines[i % nengine];
threads[i].flags = flags;
threads[i].scratch = scratch;
@@ -288,11 +294,13 @@ igt_main
{ "userptr", USERPTR },
{ NULL }
};
+ const intel_ctx_t *ctx;
int fd;
igt_fixture {
fd = drm_open_driver_master(DRIVER_INTEL);
igt_require_gem(fd);
+ ctx = intel_ctx_create_all_physical(fd);
igt_fork_hang_detector(fd);
}
@@ -301,21 +309,22 @@ igt_main
for (const struct mode *m = modes; m->name; m++)
igt_dynamic(m->name)
/* NULL value means all engines */
- all(fd, NULL, m->flags);
+ all(fd, ctx, NULL, m->flags);
}
for (const struct mode *m = modes; m->name; m++) {
igt_subtest_with_dynamic(m->name) {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (gem_class_can_store_dword(fd, e->class))
igt_dynamic(e->name)
- all(fd, e, m->flags);
+ all(fd, ctx, e, m->flags);
}
}
}
igt_fixture {
igt_stop_hang_detector();
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 33/81] tests/i915/gem_exec_latency: Convert to intel_ctx_t (v3)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (31 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 32/81] tests/i915/gem_exec_parallel: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 34/81] tests/i915/gem_watchdog: Convert to intel_ctx_t (v2) Jason Ekstrand
` (50 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't NULL-init ctx
v3 (Jason Ekstrand):
- Pass the context config to gem_submission_measure()
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_latency.c | 122 +++++++++++++++++++---------------
1 file changed, 67 insertions(+), 55 deletions(-)
diff --git a/tests/i915/gem_exec_latency.c b/tests/i915/gem_exec_latency.c
index 62bad6171..fcdf7787b 100644
--- a/tests/i915/gem_exec_latency.c
+++ b/tests/i915/gem_exec_latency.c
@@ -59,9 +59,11 @@ static unsigned int ring_size;
static double rcs_clock;
static struct intel_mmio_data mmio_data;
-static void poll_ring(int fd, const struct intel_execution_engine2 *e)
+static void poll_ring(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
const struct igt_spin_factory opts = {
+ .ctx = ctx,
.engine = e->flags,
.flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FAST,
};
@@ -101,7 +103,7 @@ static void poll_ring(int fd, const struct intel_execution_engine2 *e)
}
#define TIMESTAMP (0x358)
-static void latency_on_ring(int fd,
+static void latency_on_ring(int fd, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
unsigned flags)
{
@@ -128,6 +130,7 @@ static void latency_on_ring(int fd,
execbuf.buffer_count = 2;
execbuf.flags = e->flags;
execbuf.flags |= I915_EXEC_NO_RELOC | I915_EXEC_HANDLE_LUT;
+ execbuf.rsvd1 = ctx->id;
memset(obj, 0, sizeof(obj));
obj[1].handle = gem_create(fd, 4096);
@@ -250,7 +253,7 @@ static void latency_on_ring(int fd,
gem_close(fd, obj[2].handle);
}
-static void latency_from_ring(int fd,
+static void latency_from_ring(int fd, const intel_ctx_t *base_ctx,
const struct intel_execution_engine2 *e,
unsigned flags)
{
@@ -263,17 +266,17 @@ static void latency_from_ring(int fd,
const unsigned int repeats = ring_size / 2;
const struct intel_execution_engine2 *other;
uint32_t *map, *results;
- uint32_t ctx[2] = {};
+ const intel_ctx_t *ctx[2] = {base_ctx, base_ctx};
int i, j;
igt_require(mmio_base);
if (flags & PREEMPT) {
- ctx[0] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[0], -1023);
+ ctx[0] = intel_ctx_create(fd, &base_ctx->cfg);
+ gem_context_set_priority(fd, ctx[0]->id, -1023);
- ctx[1] = gem_context_clone_with_engines(fd, 0);
- gem_context_set_priority(fd, ctx[1], 1023);
+ ctx[1] = intel_ctx_create(fd, &base_ctx->cfg);
+ gem_context_set_priority(fd, ctx[1]->id, 1023);
}
memset(&execbuf, 0, sizeof(execbuf));
@@ -281,7 +284,7 @@ static void latency_from_ring(int fd,
execbuf.buffer_count = 2;
execbuf.flags = e->flags;
execbuf.flags |= I915_EXEC_NO_RELOC | I915_EXEC_HANDLE_LUT;
- execbuf.rsvd1 = ctx[1];
+ execbuf.rsvd1 = ctx[1]->id;
memset(obj, 0, sizeof(obj));
obj[1].handle = gem_create(fd, 4096);
@@ -309,7 +312,7 @@ static void latency_from_ring(int fd,
reloc.presumed_offset = obj[1].offset;
reloc.target_handle = flags & CORK ? 1 : 0;
- __for_each_physical_engine(fd, other) {
+ for_each_ctx_engine(fd, base_ctx, other) {
igt_spin_t *spin = NULL;
IGT_CORK_HANDLE(c);
@@ -319,7 +322,7 @@ static void latency_from_ring(int fd,
if (flags & PREEMPT)
spin = __igt_spin_new(fd,
- .ctx_id = ctx[0],
+ .ctx = ctx[0],
.engine = e->flags);
if (flags & CORK) {
@@ -403,12 +406,13 @@ static void latency_from_ring(int fd,
gem_close(fd, obj[2].handle);
if (flags & PREEMPT) {
- gem_context_destroy(fd, ctx[1]);
- gem_context_destroy(fd, ctx[0]);
+ intel_ctx_destroy(fd, ctx[1]);
+ intel_ctx_destroy(fd, ctx[0]);
}
}
-static void execution_latency(int i915, const struct intel_execution_engine2 *e)
+static void execution_latency(int i915, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
struct drm_i915_gem_exec_object2 obj = {
.handle = gem_create(i915, 4095),
@@ -417,6 +421,7 @@ static void execution_latency(int i915, const struct intel_execution_engine2 *e)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = e->flags | I915_EXEC_NO_RELOC | I915_EXEC_HANDLE_LUT,
+ .rsvd1 = ctx->id,
};
const uint32_t mmio_base = gem_engine_mmio_base(i915, e->name);
const unsigned int cs_timestamp = mmio_base + 0x358;
@@ -489,7 +494,8 @@ static void execution_latency(int i915, const struct intel_execution_engine2 *e)
gem_close(i915, obj.handle);
}
-static void wakeup_latency(int i915, const struct intel_execution_engine2 *e)
+static void wakeup_latency(int i915, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
struct drm_i915_gem_exec_object2 obj = {
.handle = gem_create(i915, 4095),
@@ -498,6 +504,7 @@ static void wakeup_latency(int i915, const struct intel_execution_engine2 *e)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = e->flags | I915_EXEC_NO_RELOC | I915_EXEC_HANDLE_LUT,
+ .rsvd1 = ctx->id,
};
const uint32_t mmio_base = gem_engine_mmio_base(i915, e->name);
const unsigned int cs_timestamp = mmio_base + 0x358;
@@ -598,7 +605,8 @@ static bool __spin_wait(int fd, igt_spin_t *spin)
* Test whether RT thread which hogs the CPU a lot can submit work with
* reasonable latency.
*/
-static void rthog_latency_on_ring(int fd, const struct intel_execution_engine2 *e)
+static void rthog_latency_on_ring(int fd, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
const char *passname[] = {
"warmup",
@@ -614,6 +622,7 @@ static void rthog_latency_on_ring(int fd, const struct intel_execution_engine2 *
#define NPASS ARRAY_SIZE(passname)
#define MMAP_SZ (64 << 10)
const struct igt_spin_factory opts = {
+ .ctx = ctx,
.engine = e->flags,
.flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FAST,
};
@@ -736,7 +745,7 @@ static void rthog_latency_on_ring(int fd, const struct intel_execution_engine2 *
munmap(results, MMAP_SZ);
}
-static void context_switch(int i915,
+static void context_switch(int i915, const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e,
unsigned int flags)
{
@@ -746,17 +755,17 @@ static void context_switch(int i915,
uint32_t *cs, *bbe, *results, v;
const uint32_t mmio_base = gem_engine_mmio_base(i915, e->name);
struct igt_mean mean;
- uint32_t ctx[2];
+ const intel_ctx_t *tmp_ctx[2];
igt_require(mmio_base);
igt_require(gem_class_has_mutable_submission(i915, e->class));
- for (int i = 0; i < ARRAY_SIZE(ctx); i++)
- ctx[i] = gem_context_clone_with_engines(i915, 0);
+ for (int i = 0; i < ARRAY_SIZE(tmp_ctx); i++)
+ tmp_ctx[i] = intel_ctx_create(i915, &ctx->cfg);
if (flags & PREEMPT) {
- gem_context_set_priority(i915, ctx[0], -1023);
- gem_context_set_priority(i915, ctx[1], +1023);
+ gem_context_set_priority(i915, tmp_ctx[0]->id, -1023);
+ gem_context_set_priority(i915, tmp_ctx[1]->id, +1023);
}
memset(obj, 0, sizeof(obj));
@@ -816,14 +825,14 @@ static void context_switch(int i915,
v = 0;
igt_mean_init(&mean);
igt_until_timeout(5) {
- eb.rsvd1 = ctx[0];
+ eb.rsvd1 = tmp_ctx[0]->id;
eb.batch_start_offset = 0;
gem_execbuf(i915, &eb);
while (results[0] == v)
igt_assert(gem_bo_busy(i915, obj[1].handle));
- eb.rsvd1 = ctx[1];
+ eb.rsvd1 = tmp_ctx[1]->id;
eb.batch_start_offset = 64 * sizeof(*cs);
gem_execbuf(i915, &eb);
@@ -844,8 +853,8 @@ static void context_switch(int i915,
for (int i = 0; i < ARRAY_SIZE(obj); i++)
gem_close(i915, obj[i].handle);
- for (int i = 0; i < ARRAY_SIZE(ctx); i++)
- gem_context_destroy(i915, ctx[i]);
+ for (int i = 0; i < ARRAY_SIZE(tmp_ctx); i++)
+ intel_ctx_destroy(i915, tmp_ctx[i]);
}
static double clockrate(int i915, int reg)
@@ -880,24 +889,26 @@ static double clockrate(int i915, int reg)
return (r_end - r_start) * 1e9 / elapsed;
}
-#define test_each_engine(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_engine(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
for_each_if(gem_class_can_store_dword(i915, (e)->class)) \
igt_dynamic_f("%s", (e)->name)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int device = -1;
igt_fixture {
device = drm_open_driver(DRIVER_INTEL);
igt_require_gem(device);
gem_require_mmap_wc(device);
+ ctx = intel_ctx_create_all_physical(device);
gem_submission_print_method(device);
- ring_size = gem_submission_measure(device, NULL, ALL_ENGINES);
+ ring_size = gem_submission_measure(device, &ctx->cfg, ALL_ENGINES);
igt_info("Ring size: %d batches\n", ring_size);
igt_require(ring_size > 8);
ring_size -= 8; /* leave some spare */
@@ -915,31 +926,31 @@ igt_main
igt_fixture
igt_require(intel_gen(intel_get_drm_devid(device)) >= 7);
- test_each_engine("rthog-submit", device, e)
- rthog_latency_on_ring(device, e);
+ test_each_engine("rthog-submit", device, ctx, e)
+ rthog_latency_on_ring(device, ctx, e);
- test_each_engine("dispatch", device, e)
- latency_on_ring(device, e, 0);
- test_each_engine("dispatch-queued", device, e)
- latency_on_ring(device, e, CORK);
+ test_each_engine("dispatch", device, ctx, e)
+ latency_on_ring(device, ctx, e, 0);
+ test_each_engine("dispatch-queued", device, ctx, e)
+ latency_on_ring(device, ctx, e, CORK);
- test_each_engine("live-dispatch", device, e)
- latency_on_ring(device, e, LIVE);
- test_each_engine("live-dispatch-queued", device, e)
- latency_on_ring(device, e, LIVE | CORK);
+ test_each_engine("live-dispatch", device, ctx, e)
+ latency_on_ring(device, ctx, e, LIVE);
+ test_each_engine("live-dispatch-queued", device, ctx, e)
+ latency_on_ring(device, ctx, e, LIVE | CORK);
- test_each_engine("poll", device, e)
- poll_ring(device, e);
+ test_each_engine("poll", device, ctx, e)
+ poll_ring(device, ctx, e);
- test_each_engine("synchronisation", device, e)
- latency_from_ring(device, e, 0);
- test_each_engine("synchronisation-queued", device, e)
- latency_from_ring(device, e, CORK);
+ test_each_engine("synchronisation", device, ctx, e)
+ latency_from_ring(device, ctx, e, 0);
+ test_each_engine("synchronisation-queued", device, ctx, e)
+ latency_from_ring(device, ctx, e, CORK);
- test_each_engine("execution-latency", device, e)
- execution_latency(device, e);
- test_each_engine("wakeup-latency", device, e)
- wakeup_latency(device, e);
+ test_each_engine("execution-latency", device, ctx, e)
+ execution_latency(device, ctx, e);
+ test_each_engine("wakeup-latency", device, ctx, e)
+ wakeup_latency(device, ctx, e);
igt_subtest_group {
igt_fixture {
@@ -947,17 +958,18 @@ igt_main
igt_require(gem_scheduler_has_preemption(device));
}
- test_each_engine("preemption", device, e)
- latency_from_ring(device, e, PREEMPT);
- test_each_engine("context-switch", device, e)
- context_switch(device, e, 0);
- test_each_engine("context-preempt", device, e)
- context_switch(device, e, PREEMPT);
+ test_each_engine("preemption", device, ctx, e)
+ latency_from_ring(device, ctx, e, PREEMPT);
+ test_each_engine("context-switch", device, ctx, e)
+ context_switch(device, ctx, e, 0);
+ test_each_engine("context-preempt", device, ctx, e)
+ context_switch(device, ctx, e, PREEMPT);
}
}
igt_fixture {
intel_register_access_fini(&mmio_data);
+ intel_ctx_destroy(device, ctx);
close(device);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 34/81] tests/i915/gem_watchdog: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (32 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 33/81] tests/i915/gem_exec_latency: Convert to intel_ctx_t (v3) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 35/81] tests/i915/gem_shrink: Convert to intel_ctx_t (v5) Jason Ekstrand
` (49 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Jason Ekstrand):
- Rebase on Tvrtko's changes
- Fix an issue in default-virtual with the set-once rule for engines
v2 (Ashutosh Dixit):
- Use SINGLE_TIMELINE in far_delay()
- Assert tmp_ctx->id != 0 in far_delay()
- Call intel_ctx_destroy()
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_watchdog.c | 174 +++++++++++---------------------------
1 file changed, 49 insertions(+), 125 deletions(-)
diff --git a/tests/i915/gem_watchdog.c b/tests/i915/gem_watchdog.c
index 92cb4e856..4d4aaee48 100644
--- a/tests/i915/gem_watchdog.c
+++ b/tests/i915/gem_watchdog.c
@@ -31,36 +31,13 @@
#include "i915/gem.h"
#include "i915/gem_create.h"
+#include "i915/gem_vm.h"
#include "igt.h"
#include "igt_params.h"
#include "sw_sync.h"
#define EWATCHDOG EINTR
-static struct drm_i915_query_engine_info *__engines__;
-
-static int __i915_query(int fd, struct drm_i915_query *q)
-{
- if (igt_ioctl(fd, DRM_IOCTL_I915_QUERY, q))
- return -errno;
- return 0;
-}
-
-static int
-__i915_query_items(int fd, struct drm_i915_query_item *items, uint32_t n_items)
-{
- struct drm_i915_query q = {
- .num_items = n_items,
- .items_ptr = to_user_pointer(items),
- };
- return __i915_query(fd, &q);
-}
-
-#define i915_query_items(fd, items, n_items) do { \
- igt_assert_eq(__i915_query_items(fd, items, n_items), 0); \
- errno = 0; \
- } while (0)
-
static unsigned int default_timeout_wait_s;
static const unsigned int watchdog_us = 500 * 1000;
@@ -128,56 +105,43 @@ static unsigned int spin_flags(void)
return IGT_SPIN_POLL_RUN | IGT_SPIN_FENCE_OUT;
}
-static void physical(int i915)
+static void physical(int i915, const intel_ctx_t *ctx)
{
const unsigned int wait_us = default_timeout_wait_s * USEC_PER_SEC;
- unsigned int num_engines = __engines__->num_engines, i, count;
+ unsigned int num_engines, i, count;
const struct intel_execution_engine2 *e;
- unsigned int expect = num_engines;
- igt_spin_t *spin[num_engines];
+ igt_spin_t *spin[GEM_MAX_ENGINES];
i = 0;
- __for_each_physical_engine(i915, e) {
- spin[i] = igt_spin_new(i915,
+ for_each_ctx_engine(i915, ctx, e) {
+ spin[i] = igt_spin_new(i915, .ctx = ctx,
.engine = e->flags,
.flags = spin_flags());
i++;
}
+ num_engines = i;
- count = wait_timeout(i915, spin, num_engines, wait_us, expect);
+ count = wait_timeout(i915, spin, num_engines, wait_us, num_engines);
for (i = 0; i < num_engines; i++)
igt_spin_free(i915, spin[i]);
- igt_assert_eq(count, expect);
+ igt_assert_eq(count, num_engines);
}
static struct i915_engine_class_instance *
-list_engines(unsigned int class, unsigned int *out)
+list_engines(const intel_ctx_cfg_t *cfg,
+ unsigned int class, unsigned int *out)
{
struct i915_engine_class_instance *ci;
- unsigned int count = 0, size = 64, i;
+ unsigned int count = 0, i;
- ci = malloc(size * sizeof(*ci));
+ ci = malloc(cfg->num_engines * sizeof(*ci));
igt_assert(ci);
- for (i = 0; i < __engines__->num_engines; i++) {
- struct drm_i915_engine_info *engine =
- (struct drm_i915_engine_info *)&__engines__->engines[i];
-
- if (class != engine->engine.engine_class)
- continue;
-
- if (count == size) {
- size *= 2;
- ci = realloc(ci, size * sizeof(*ci));
- igt_assert(ci);
- }
-
- ci[count++] = (struct i915_engine_class_instance){
- .engine_class = class,
- .engine_instance = engine->engine.engine_instance,
- };
+ for (i = 0; i < cfg->num_engines; i++) {
+ if (class == cfg->engines[i].engine_class)
+ ci[count++] = cfg->engines[i];
}
if (!count) {
@@ -244,49 +208,27 @@ static void set_load_balancer(int i915, uint32_t ctx,
igt_assert_eq(__set_load_balancer(i915, ctx, ci, count, ext), 0);
}
-static void ctx_set_vm(int i915, uint32_t ctx, uint32_t vm)
-{
- struct drm_i915_gem_context_param arg = {
- .param = I915_CONTEXT_PARAM_VM,
- .ctx_id = ctx,
- .value = vm,
- };
-
- gem_context_set_param(i915, &arg);
-}
-
-static uint32_t ctx_get_vm(int i915, uint32_t ctx)
-{
- struct drm_i915_gem_context_param arg;
-
- memset(&arg, 0, sizeof(arg));
- arg.param = I915_CONTEXT_PARAM_VM;
- arg.ctx_id = ctx;
- gem_context_get_param(i915, &arg);
- igt_assert(arg.value);
-
- return arg.value;
-}
-
-static void virtual(int i915)
+static void virtual(int i915, const intel_ctx_cfg_t *base_cfg)
{
const unsigned int wait_us = default_timeout_wait_s * USEC_PER_SEC;
- unsigned int num_engines = __engines__->num_engines, i, count;
+ unsigned int num_engines = base_cfg->num_engines, i, count;
igt_spin_t *spin[num_engines];
unsigned int expect = num_engines;
- uint32_t ctx[num_engines];
- uint32_t vm;
+ intel_ctx_cfg_t cfg = {};
+ const intel_ctx_t *ctx[num_engines];
igt_require(gem_has_execlists(i915));
igt_debug("%u virtual engines\n", num_engines);
igt_require(num_engines);
+ cfg.vm = gem_vm_create(i915);
+
i = 0;
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *ci;
- ci = list_engines(class, &count);
+ ci = list_engines(base_cfg, class, &count);
if (!ci)
continue;
@@ -296,17 +238,12 @@ static void virtual(int i915)
igt_assert(i < num_engines);
- ctx[i] = gem_context_create(i915);
-
- if (!i)
- vm = ctx_get_vm(i915, ctx[i]);
- else
- ctx_set_vm(i915, ctx[i], vm);
+ ctx[i] = intel_ctx_create(i915, &cfg);
- set_load_balancer(i915, ctx[i], ci, count, NULL);
+ set_load_balancer(i915, ctx[i]->id, ci, count, NULL);
spin[i] = igt_spin_new(i915,
- .ctx_id = ctx[i],
+ .ctx = ctx[i],
.flags = spin_flags());
i++;
}
@@ -317,8 +254,8 @@ static void virtual(int i915)
count = wait_timeout(i915, spin, num_engines, wait_us, expect);
for (i = 0; i < num_engines && spin[i]; i++) {
- gem_context_destroy(i915, ctx[i]);
igt_spin_free(i915, spin[i]);
+ intel_ctx_destroy(i915, ctx[i]);
}
igt_assert_eq(count, expect);
@@ -499,17 +436,6 @@ delay_create(int i915, uint32_t ctx,
return obj;
}
-static uint32_t vm_clone(int i915)
-{
- uint32_t ctx = 0;
- __gem_context_clone(i915, 0,
- I915_CONTEXT_CLONE_VM |
- I915_CONTEXT_CLONE_ENGINES,
- I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE,
- &ctx);
- return ctx;
-}
-
static int __execbuf(int i915, struct drm_i915_gem_execbuffer2 *execbuf)
{
int err;
@@ -526,6 +452,7 @@ static int __execbuf(int i915, struct drm_i915_gem_execbuffer2 *execbuf)
static uint32_t
far_delay(int i915, unsigned long delay, unsigned int target,
+ const intel_ctx_t *ctx,
const struct intel_execution_engine2 *e, int *fence)
{
struct drm_i915_gem_exec_object2 obj = delay_create(i915, 0, e, delay);
@@ -540,6 +467,7 @@ far_delay(int i915, unsigned long delay, unsigned int target,
.buffer_count = 2,
.flags = e->flags,
};
+ intel_ctx_cfg_t cfg = ctx->cfg;
uint32_t handle = gem_create(i915, 4096);
unsigned long count, submit;
@@ -552,23 +480,27 @@ far_delay(int i915, unsigned long delay, unsigned int target,
submit *= NSEC_PER_SEC;
submit /= 2 * delay;
+ if (gem_has_vm(i915))
+ cfg.vm = gem_vm_create(i915);
+ cfg.flags |= I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE;
+
/*
* Submit a few long chains of individually short pieces of work
* against a shared object.
*/
for (count = 0; count < submit;) {
- execbuf.rsvd1 = vm_clone(i915);
- if (!execbuf.rsvd1)
- break;
+ const intel_ctx_t *tmp_ctx = intel_ctx_create(i915, &cfg);
+ igt_assert(tmp_ctx->id);
+ execbuf.rsvd1 = tmp_ctx->id;
batch[1] = obj;
while (__execbuf(i915, &execbuf) == 0)
count++;
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, tmp_ctx);
}
execbuf.flags |= I915_EXEC_FENCE_OUT;
- execbuf.rsvd1 = 0;
+ execbuf.rsvd1 = ctx->id;
batch[1] = batch[0];
batch[1].flags &= ~EXEC_OBJECT_WRITE;
batch[0].handle = handle;
@@ -584,11 +516,12 @@ far_delay(int i915, unsigned long delay, unsigned int target,
}
static void
-far_fence(int i915, int timeout, const struct intel_execution_engine2 *e)
+far_fence(int i915, int timeout, const intel_ctx_t *ctx,
+ const struct intel_execution_engine2 *e)
{
int fence = -1;
uint32_t handle =
- far_delay(i915, NSEC_PER_SEC / 250, timeout, e, &fence);
+ far_delay(i915, NSEC_PER_SEC / 250, timeout, ctx, e, &fence);
gem_close(i915, handle);
@@ -627,10 +560,10 @@ far_fence(int i915, int timeout, const struct intel_execution_engine2 *e)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int i915 = -1;
igt_fixture {
- struct drm_i915_query_item item;
const unsigned int timeout = 1;
char *tmp;
@@ -650,36 +583,27 @@ igt_main
default_timeout_wait_s = timeout * 5;
i915 = gem_reopen_driver(i915); /* Apply modparam. */
-
- __engines__ = malloc(4096);
- igt_assert(__engines__);
- memset(__engines__, 0, 4096);
- memset(&item, 0, sizeof(item));
- item.query_id = DRM_I915_QUERY_ENGINE_INFO;
- item.data_ptr = to_user_pointer(__engines__);
- item.length = 4096;
- i915_query_items(i915, &item, 1);
- igt_assert(item.length >= 0);
- igt_assert(item.length <= 4096);
- igt_assert(__engines__->num_engines > 0);
+ ctx = intel_ctx_create_all_physical(i915);
}
igt_subtest_group {
igt_subtest("default-physical")
- physical(i915);
+ physical(i915, ctx);
igt_subtest("default-virtual")
- virtual(i915);
+ virtual(i915, &ctx->cfg);
}
igt_subtest_with_dynamic("far-fence") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- far_fence(i915, default_timeout_wait_s * 3, e);
+ far_fence(i915, default_timeout_wait_s * 3,
+ ctx, e);
}
}
igt_fixture {
+ intel_ctx_destroy(i915, ctx);
close(i915);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 35/81] tests/i915/gem_shrink: Convert to intel_ctx_t (v5)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (33 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 34/81] tests/i915/gem_watchdog: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 36/81] tests/i915/gem_exec_params: Convert to intel_ctx_t Jason Ekstrand
` (48 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Only one subtest actually needs to iterate over engines.
v2 (Zbigniew Kempczyński):
- Rework the execbufX loop to be more robust
v3 (Ashutosh Dixit):
- Don't explicitly create VMs if we don't have full PPGTT
v4 (Ashutosh Dixit):
- Fix a j -> i typo
v5 (Zbigniew Kempczyński):
- Get rid of the gem_exec_schedule.c due to rebase fail
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_shrink.c | 18 +++++++++---------
1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/tests/i915/gem_shrink.c b/tests/i915/gem_shrink.c
index b6450a6fb..ae61d0759 100644
--- a/tests/i915/gem_shrink.c
+++ b/tests/i915/gem_shrink.c
@@ -38,8 +38,6 @@
#define MADV_FREE 8
#endif
-static unsigned int engines[I915_EXEC_RING_MASK + 1], nengine;
-
static void get_pages(int fd, uint64_t alloc)
{
uint32_t handle = gem_create(fd, alloc);
@@ -164,8 +162,10 @@ static void execbufN(int fd, uint64_t alloc)
static void execbufX(int fd, uint64_t alloc)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
+ struct intel_engine_data engines;
struct drm_i915_gem_exec_object2 *obj;
struct drm_i915_gem_execbuffer2 execbuf;
+ const intel_ctx_t *ctx;
int count = alloc >> 20;
uint64_t obj_size;
@@ -175,6 +175,9 @@ static void execbufX(int fd, uint64_t alloc)
obj[count].handle = gem_create(fd, 4096);
gem_write(fd, obj[count].handle, 0, &bbe, sizeof(bbe));
+ ctx = intel_ctx_create_all_physical(fd);
+ engines = intel_engine_list_for_ctx_cfg(fd, &ctx->cfg);
+
for (int i = 1; i <= count; i++) {
int j = count - i;
@@ -185,13 +188,16 @@ static void execbufX(int fd, uint64_t alloc)
execbuf.buffers_ptr = to_user_pointer(&obj[j]);
execbuf.buffer_count = i + 1;
- execbuf.flags = engines[j % nengine];
+ execbuf.flags = engines.engines[j % engines.nengines].flags;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
}
for (int i = 0; i <= count; i++)
gem_madvise(fd, obj[i].handle, I915_MADV_DONTNEED);
munmap(obj, obj_size);
+
+ intel_ctx_destroy(fd, ctx);
}
static void hang(int fd, uint64_t alloc)
@@ -429,7 +435,6 @@ igt_main
igt_fixture {
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
uint64_t mem_size = intel_get_total_ram_mb();
- const struct intel_execution_engine2 *e;
int fd;
fd = drm_open_driver(DRIVER_INTEL);
@@ -451,11 +456,6 @@ igt_main
intel_require_memory(num_processes, alloc_size,
CHECK_SWAP | CHECK_RAM);
- nengine = 0;
- __for_each_physical_engine(fd, e)
- engines[nengine++] = e->flags;
- igt_require(nengine);
-
close(fd);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 36/81] tests/i915/gem_exec_params: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (34 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 35/81] tests/i915/gem_shrink: Convert to intel_ctx_t (v5) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 37/81] tests/i915/gem_exec_gttfill: Convert to intel_ctx_t (v2) Jason Ekstrand
` (47 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_params.c | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/tests/i915/gem_exec_params.c b/tests/i915/gem_exec_params.c
index 6ab1ab110..729d38a43 100644
--- a/tests/i915/gem_exec_params.c
+++ b/tests/i915/gem_exec_params.c
@@ -299,8 +299,10 @@ static void test_larger_than_life_batch(int fd)
{
const struct intel_execution_engine2 *e;
uint64_t size = 1ULL << 32; /* batch_len is __u32 as per the ABI */
+ const intel_ctx_t *ctx = intel_ctx_create_all_physical(fd);
struct drm_i915_gem_exec_object2 exec = {
.handle = batch_create_size(fd, size),
+ .rsvd1 = ctx->id,
};
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&exec),
@@ -316,7 +318,7 @@ static void test_larger_than_life_batch(int fd)
igt_require(size < gem_aperture_size(fd));
intel_require_memory(2, size, CHECK_RAM); /* batch + shadow */
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
/* Keep the batch_len implicit [0] */
execbuf.flags = e->flags;
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 37/81] tests/i915/gem_exec_gttfill: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (35 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 36/81] tests/i915/gem_exec_params: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 38/81] tests/i915/gem_exec_capture: " Jason Ekstrand
` (46 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't NULL-init ctx
- Add intel_ctx_destroy()
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_gttfill.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
diff --git a/tests/i915/gem_exec_gttfill.c b/tests/i915/gem_exec_gttfill.c
index e711576f5..b8283eb8f 100644
--- a/tests/i915/gem_exec_gttfill.c
+++ b/tests/i915/gem_exec_gttfill.c
@@ -106,7 +106,7 @@ static void submit(int fd, int gen,
gem_sync(fd, obj.handle);
}
-static void fillgtt(int fd, unsigned ring, int timeout)
+static void fillgtt(int fd, const intel_ctx_t *ctx, unsigned ring, int timeout)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_execbuffer2 execbuf;
@@ -126,7 +126,7 @@ static void fillgtt(int fd, unsigned ring, int timeout)
if (ring == ALL_ENGINES) {
struct intel_execution_engine2 *e;
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
if (!gem_class_can_store_dword(fd, e->class))
continue;
@@ -155,6 +155,7 @@ static void fillgtt(int fd, unsigned ring, int timeout)
execbuf.buffer_count = 1;
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
batches = calloc(count, sizeof(*batches));
igt_assert(batches);
@@ -209,32 +210,35 @@ static void fillgtt(int fd, unsigned ring, int timeout)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int i915 = -1;
igt_fixture {
i915 = drm_open_driver(DRIVER_INTEL);
igt_require_gem(i915);
+ ctx = intel_ctx_create_all_physical(i915);
igt_fork_hang_detector(i915);
}
igt_subtest("basic") /* just enough to run a single pass */
- fillgtt(i915, ALL_ENGINES, 1);
+ fillgtt(i915, ctx, ALL_ENGINES, 1);
igt_subtest_with_dynamic("engines") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (!gem_class_can_store_dword(i915, e->class))
continue;
igt_dynamic_f("%s", e->name)
- fillgtt(i915, e->flags, 20);
+ fillgtt(i915, ctx, e->flags, 20);
}
}
igt_subtest("all")
- fillgtt(i915, ALL_ENGINES, 20);
+ fillgtt(i915, ctx, ALL_ENGINES, 20);
igt_fixture {
igt_stop_hang_detector();
+ intel_ctx_destroy(i915, ctx);
close(i915);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 38/81] tests/i915/gem_exec_capture: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (36 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 37/81] tests/i915/gem_exec_gttfill: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 39/81] tests/i915/gem_exec_create: Convert to intel_ctx_t Jason Ekstrand
` (45 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't NULL-init ctx
- Destroy ctx at the end
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_capture.c | 31 +++++++++++++++++++------------
1 file changed, 19 insertions(+), 12 deletions(-)
diff --git a/tests/i915/gem_exec_capture.c b/tests/i915/gem_exec_capture.c
index a6b3d987f..f59cb09da 100644
--- a/tests/i915/gem_exec_capture.c
+++ b/tests/i915/gem_exec_capture.c
@@ -61,7 +61,8 @@ static void check_error_state(int dir, struct drm_i915_gem_exec_object2 *obj)
igt_assert(found);
}
-static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
+static void __capture1(int fd, int dir, const intel_ctx_t *ctx,
+ unsigned ring, uint32_t target)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(fd));
struct drm_i915_gem_exec_object2 obj[4];
@@ -148,6 +149,7 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
execbuf.flags = ring;
if (gen > 3 && gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
+ execbuf.rsvd1 = ctx->id;
igt_assert(!READ_ONCE(*seqno));
gem_execbuf(fd, &execbuf);
@@ -168,12 +170,12 @@ static void __capture1(int fd, int dir, unsigned ring, uint32_t target)
gem_close(fd, obj[SCRATCH].handle);
}
-static void capture(int fd, int dir, unsigned ring)
+static void capture(int fd, int dir, const intel_ctx_t *ctx, unsigned ring)
{
uint32_t handle;
handle = gem_create(fd, 4096);
- __capture1(fd, dir, ring, handle);
+ __capture1(fd, dir, ctx, ring, handle);
gem_close(fd, handle);
}
@@ -496,7 +498,8 @@ static void many(int fd, int dir, uint64_t size, unsigned int flags)
free(offsets);
}
-static void prioinv(int fd, int dir, unsigned ring, const char *name)
+static void prioinv(int fd, int dir, const intel_ctx_t *ctx,
+ unsigned ring, const char *name)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_exec_object2 obj = {
@@ -506,6 +509,7 @@ static void prioinv(int fd, int dir, unsigned ring, const char *name)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = ring,
+ .rsvd1 = ctx->id,
};
int64_t timeout = NSEC_PER_SEC; /* 1s, feeling generous, blame debug */
uint64_t ram, gtt, size = 4 << 20;
@@ -573,7 +577,7 @@ static void userptr(int fd, int dir)
igt_assert(posix_memalign(&ptr, 4096, 4096) == 0);
igt_require(__gem_userptr(fd, ptr, 4096, 0, 0, &handle) == 0);
- __capture1(fd, dir, 0, handle);
+ __capture1(fd, dir, intel_ctx_0(fd), 0, handle);
gem_close(fd, handle);
free(ptr);
@@ -596,14 +600,15 @@ static size_t safer_strlen(const char *s)
return s ? strlen(s) : 0;
}
-#define test_each_engine(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+#define test_each_engine(T, i915, ctx, e) \
+ igt_subtest_with_dynamic(T) for_each_ctx_engine(i915, ctx, e) \
for_each_if(gem_class_can_store_dword(i915, (e)->class)) \
igt_dynamic_f("%s", (e)->name)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
igt_hang_t hang;
int fd = -1;
int dir = -1;
@@ -620,15 +625,16 @@ igt_main
igt_require_gem(fd);
gem_require_mmap_wc(fd);
igt_require(has_capture(fd));
- igt_allow_hang(fd, 0, HANG_ALLOW_CAPTURE);
+ ctx = intel_ctx_create_all_physical(fd);
+ igt_allow_hang(fd, ctx->id, HANG_ALLOW_CAPTURE);
dir = igt_sysfs_open(fd);
igt_require(igt_sysfs_set(dir, "error", "Begone!"));
igt_require(safer_strlen(igt_sysfs_get(dir, "error")) > 0);
}
- test_each_engine("capture", fd, e)
- capture(fd, dir, e->flags);
+ test_each_engine("capture", fd, ctx, e)
+ capture(fd, dir, ctx, e->flags);
igt_subtest_f("many-4K-zero") {
igt_require(gem_can_store_dword(fd, 0));
@@ -662,12 +668,13 @@ igt_main
userptr(fd, dir);
}
- test_each_engine("pi", fd, e)
- prioinv(fd, dir, e->flags, e->name);
+ test_each_engine("pi", fd, ctx, e)
+ prioinv(fd, dir, ctx, e->flags, e->name);
igt_fixture {
close(dir);
igt_disallow_hang(fd, hang);
+ intel_ctx_destroy(fd, ctx);
close(fd);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 39/81] tests/i915/gem_exec_create: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (37 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 38/81] tests/i915/gem_exec_capture: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 40/81] tests/i915/gem_exec_await: Convert to intel_ctx_t (v2) Jason Ekstrand
` (44 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_create.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/tests/i915/gem_exec_create.c b/tests/i915/gem_exec_create.c
index 8556cb213..612eb032f 100644
--- a/tests/i915/gem_exec_create.c
+++ b/tests/i915/gem_exec_create.c
@@ -61,16 +61,21 @@ static void all(int fd, unsigned flags, int timeout, int ncpus)
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj;
unsigned engines[I915_EXEC_RING_MASK + 1], nengine;
+ const intel_ctx_t *ctx;
nengine = 0;
if (flags & ENGINES) { /* Modern API to iterate over *all* engines */
const struct intel_execution_engine2 *e;
- __for_each_physical_engine(fd, e)
+ ctx = intel_ctx_create_all_physical(fd);
+
+ for_each_ctx_engine(fd, ctx, e)
engines[nengine++] = e->flags;
/* Note: modifies engine map on context 0 */
} else {
+ ctx = intel_ctx_0(fd);
+
for_each_physical_ring(e, fd)
engines[nengine++] = eb_ring(e);
}
@@ -85,6 +90,7 @@ static void all(int fd, unsigned flags, int timeout, int ncpus)
execbuf.buffer_count = 1;
execbuf.flags |= I915_EXEC_HANDLE_LUT;
execbuf.flags |= I915_EXEC_NO_RELOC;
+ execbuf.rsvd1 = ctx->id;
if (__gem_execbuf(fd, &execbuf)) {
execbuf.flags = 0;
gem_execbuf(fd, &execbuf);
@@ -132,6 +138,7 @@ static void all(int fd, unsigned flags, int timeout, int ncpus)
}
igt_waitchildren();
igt_assert_eq(intel_detect_and_clear_missed_interrupts(fd), 0);
+ intel_ctx_destroy(fd, ctx);
}
igt_main
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 40/81] tests/i915/gem_exec_await: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (38 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 39/81] tests/i915/gem_exec_create: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 41/81] tests/i915/gem_ctx_persistence: Drop the clone subtest Jason Ekstrand
` (43 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Jason Ekstrand):
- Create the context in the fixture
- Pass the context config to gem_submission_measure()
- Drop Zbigniew's review since it's close to a rewrite
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_await.c | 29 +++++++++++++++++++----------
1 file changed, 19 insertions(+), 10 deletions(-)
diff --git a/tests/i915/gem_exec_await.c b/tests/i915/gem_exec_await.c
index ba8325ce3..bea57c61b 100644
--- a/tests/i915/gem_exec_await.c
+++ b/tests/i915/gem_exec_await.c
@@ -53,7 +53,8 @@ static void xchg_obj(void *array, unsigned i, unsigned j)
}
#define CONTEXTS 0x1
-static void wide(int fd, int ring_size, int timeout, unsigned int flags)
+static void wide(int fd, const intel_ctx_t *ctx, int ring_size,
+ int timeout, unsigned int flags)
{
const struct intel_execution_engine2 *engine;
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -63,6 +64,7 @@ static void wide(int fd, int ring_size, int timeout, unsigned int flags)
struct drm_i915_gem_exec_object2 exec[2];
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_execbuffer2 execbuf;
+ const intel_ctx_t *ctx;
uint32_t *cmd;
} *exec;
struct drm_i915_gem_exec_object2 *obj;
@@ -72,7 +74,7 @@ static void wide(int fd, int ring_size, int timeout, unsigned int flags)
double time;
nengine = 0;
- __for_each_physical_engine(fd, engine) {
+ for_each_ctx_engine(fd, ctx, engine) {
if (!gem_class_has_mutable_submission(fd, engine->class))
continue;
@@ -106,7 +108,10 @@ static void wide(int fd, int ring_size, int timeout, unsigned int flags)
I915_EXEC_HANDLE_LUT);
if (flags & CONTEXTS) {
- exec[e].execbuf.rsvd1 = gem_context_create(fd);
+ exec[e].ctx = intel_ctx_create(fd, &ctx->cfg);
+ exec[e].execbuf.rsvd1 = exec[e].ctx->id;
+ } else {
+ exec[e].execbuf.rsvd1 = ctx->id;
}
exec[e].exec[0].handle = gem_create(fd, 4096);
@@ -151,9 +156,9 @@ static void wide(int fd, int ring_size, int timeout, unsigned int flags)
int i;
if (flags & CONTEXTS) {
- gem_context_destroy(fd, exec[e].execbuf.rsvd1);
- exec[e].execbuf.rsvd1 =
- gem_context_clone_with_engines(fd, 0);
+ intel_ctx_destroy(fd, exec[e].ctx);
+ exec[e].ctx = intel_ctx_create(fd, &ctx->cfg);
+ exec[e].execbuf.rsvd1 = exec[e].ctx->id;
}
exec[e].reloc.presumed_offset = exec[e].exec[1].offset;
@@ -193,6 +198,7 @@ static void wide(int fd, int ring_size, int timeout, unsigned int flags)
execbuf.flags = (engines[e] |
I915_EXEC_NO_RELOC |
I915_EXEC_HANDLE_LUT);
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(fd, &execbuf);
}
clock_gettime(CLOCK_MONOTONIC, &now);
@@ -214,7 +220,7 @@ static void wide(int fd, int ring_size, int timeout, unsigned int flags)
for (unsigned e = 0; e < nengine; e++) {
if (flags & CONTEXTS)
- gem_context_destroy(fd, exec[e].execbuf.rsvd1);
+ intel_ctx_destroy(fd, exec[e].ctx);
for (unsigned n = 0; n < ring_size; n++)
gem_close(fd, exec[e].obj[n].handle);
@@ -230,14 +236,16 @@ igt_main
{
int ring_size = 0;
int device = -1;
+ const intel_ctx_t *ctx;
igt_fixture {
device = drm_open_driver(DRIVER_INTEL);
igt_require_gem(device);
gem_submission_print_method(device);
+ ctx = intel_ctx_create_all_physical(device);
- ring_size = gem_submission_measure(device, NULL, ALL_ENGINES);
+ ring_size = gem_submission_measure(device, &ctx->cfg, ALL_ENGINES);
igt_info("Ring size: %d batches\n", ring_size);
igt_require(ring_size > 0);
@@ -246,15 +254,16 @@ igt_main
}
igt_subtest("wide-all")
- wide(device, ring_size, 20, 0);
+ wide(device, ctx, ring_size, 20, 0);
igt_subtest("wide-contexts") {
gem_require_contexts(device);
- wide(device, ring_size, 20, CONTEXTS);
+ wide(device, ctx, ring_size, 20, CONTEXTS);
}
igt_fixture {
igt_stop_hang_detector();
+ intel_ctx_destroy(device, ctx);
close(device);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 41/81] tests/i915/gem_ctx_persistence: Drop the clone subtest
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (39 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 40/81] tests/i915/gem_exec_await: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 42/81] tests/i915/gem_ctx_persistence: Drop the engine replace subtests Jason Ekstrand
` (42 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
The entire CONTEXT_CLONE_* API is being removed from upstream i915.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_ctx_persistence.c | 30 ------------------------------
1 file changed, 30 deletions(-)
diff --git a/tests/i915/gem_ctx_persistence.c b/tests/i915/gem_ctx_persistence.c
index 10d057f1b..91c301769 100644
--- a/tests/i915/gem_ctx_persistence.c
+++ b/tests/i915/gem_ctx_persistence.c
@@ -147,33 +147,6 @@ static void test_idempotent(int i915)
igt_assert_eq(p.value, expected);
}
-static void test_clone(int i915)
-{
- struct drm_i915_gem_context_param p = {
- .param = I915_CONTEXT_PARAM_PERSISTENCE,
- };
- uint32_t ctx, clone;
-
- /*
- * Check that persistence is inherited across a clone.
- */
- igt_require( __gem_context_create(i915, &ctx) == 0);
-
- p.ctx_id = ctx;
- p.value = 0;
- gem_context_set_param(i915, &p);
-
- clone = gem_context_clone(i915, ctx, I915_CONTEXT_CLONE_FLAGS, 0);
- gem_context_destroy(i915, ctx);
-
- p.ctx_id = clone;
- p.value = -1;
- gem_context_get_param(i915, &p);
- igt_assert_eq(p.value, 0);
-
- gem_context_destroy(i915, clone);
-}
-
static void test_persistence(int i915, unsigned int engine)
{
igt_spin_t *spin;
@@ -1366,9 +1339,6 @@ igt_main
igt_subtest("idempotent")
test_idempotent(i915);
- igt_subtest("clone")
- test_clone(i915);
-
igt_subtest("file")
test_nonpersistent_file(i915);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 42/81] tests/i915/gem_ctx_persistence: Drop the engine replace subtests
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (40 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 41/81] tests/i915/gem_ctx_persistence: Drop the clone subtest Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 43/81] tests/i915/gem_ctx_persistence: Convert to intel_ctx_t Jason Ekstrand
` (41 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
We're going to start disallowing non-trivial uses of setparam for
engines precisely to make races like this impossible. It'll also make
these test cases invalid.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_ctx_persistence.c | 210 -------------------------------
1 file changed, 210 deletions(-)
diff --git a/tests/i915/gem_ctx_persistence.c b/tests/i915/gem_ctx_persistence.c
index 91c301769..59077cc01 100644
--- a/tests/i915/gem_ctx_persistence.c
+++ b/tests/i915/gem_ctx_persistence.c
@@ -1085,191 +1085,6 @@ static void many_contexts(int i915)
gem_quiescent_gpu(i915);
}
-static void replace_engines(int i915, const struct intel_execution_engine2 *e)
-{
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
- .engines = {{ e->class, e->instance }}
- };
- struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_create(i915),
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&engines),
- .size = sizeof(engines),
- };
- igt_spin_t *spin[2];
- int64_t timeout;
-
- /*
- * Suppose the user tries to hide a hanging batch by replacing
- * the set of engines on the context so that it's not visible
- * at the time of closure? Then we must act when they replace
- * the engines!
- */
-
- gem_context_set_persistence(i915, param.ctx_id, false);
-
- gem_context_set_param(i915, ¶m);
- spin[0] = igt_spin_new(i915, param.ctx_id);
-
- gem_context_set_param(i915, ¶m);
- spin[1] = igt_spin_new(i915, param.ctx_id);
-
- gem_context_destroy(i915, param.ctx_id);
-
- timeout = reset_timeout_ms * NSEC_PER_MSEC;
- igt_assert_eq(gem_wait(i915, spin[1]->handle, &timeout), 0);
-
- timeout = reset_timeout_ms * NSEC_PER_MSEC;
- igt_assert_eq(gem_wait(i915, spin[0]->handle, &timeout), 0);
-
- igt_spin_free(i915, spin[1]);
- igt_spin_free(i915, spin[0]);
- gem_quiescent_gpu(i915);
-}
-
-static void race_set_engines(int i915, int in, int out)
-{
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
- .engines = {}
- };
- struct drm_i915_gem_context_param param = {
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&engines),
- .size = sizeof(engines),
- };
- igt_spin_t *spin;
-
- spin = igt_spin_new(i915);
- igt_spin_end(spin);
-
- while (read(in, ¶m.ctx_id, sizeof(param.ctx_id)) > 0) {
- if (!param.ctx_id)
- break;
-
- __gem_context_set_param(i915, ¶m);
-
- spin->execbuf.rsvd1 = param.ctx_id;
- __gem_execbuf(i915, &spin->execbuf);
-
- write(out, ¶m.ctx_id, sizeof(param.ctx_id));
- }
-
- igt_spin_free(i915, spin);
-}
-
-static void close_replace_race(int i915)
-{
- const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
- int fence = -1;
- int out[2], in[2];
-
- cleanup(i915);
-
- /*
- * If we time the submission of a hanging batch to one set of engines
- * and then simultaneously replace the engines in one thread, and
- * close the context in another, it might be possible for the kernel
- * to lose track of the old engines believing that the non-persisten
- * context is already closed and the hanging requests cancelled.
- *
- * Our challenge is try and expose any such race condition.
- */
-
- igt_assert(pipe(out) == 0);
- igt_assert(pipe(in) == 0);
- igt_fork(child, ncpus) {
- close(out[1]);
- close(in[0]);
- race_set_engines(i915, out[0], in[1]);
- }
- for (int i = 0; i < ncpus; i++)
- close(out[0]);
-
- igt_until_timeout(5) {
- igt_spin_t *spin;
- uint32_t ctx;
-
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, false);
-
- spin = igt_spin_new(i915, ctx, .flags = IGT_SPIN_FENCE_OUT);
- for (int i = 0; i < ncpus; i++)
- write(out[1], &ctx, sizeof(ctx));
-
- gem_context_destroy(i915, ctx);
- for (int i = 0; i < ncpus; i++)
- read(in[0], &ctx, sizeof(ctx));
-
- if (fence < 0) {
- fence = spin->out_fence;
- } else {
- int tmp;
-
- tmp = sync_fence_merge(fence, spin->out_fence);
- close(fence);
- close(spin->out_fence);
-
- fence = tmp;
- }
- spin->out_fence = -1;
- }
- close(in[0]);
-
- for (int i = 0; i < ncpus; i++) {
- uint32_t end = 0;
-
- write(out[1], &end, sizeof(end));
- }
- close(out[1]);
-
- if (sync_fence_wait(fence, MSEC_PER_SEC / 2)) {
- igt_debugfs_dump(i915, "i915_engine_info");
- igt_assert(sync_fence_wait(fence, MSEC_PER_SEC / 2) == 0);
- }
- close(fence);
-
- igt_waitchildren();
- gem_quiescent_gpu(i915);
-}
-
-static void replace_engines_hostile(int i915,
- const struct intel_execution_engine2 *e)
-{
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
- .engines = {{ e->class, e->instance }}
- };
- struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_create(i915),
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&engines),
- .size = sizeof(engines),
- };
- int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
- igt_spin_t *spin;
-
- /*
- * Suppose the user tries to hide a hanging batch by replacing
- * the set of engines on the context so that it's not visible
- * at the time of closure? Then we must act when they replace
- * the engines!
- */
-
- gem_context_set_persistence(i915, param.ctx_id, false);
-
- gem_context_set_param(i915, ¶m);
- spin = igt_spin_new(i915, param.ctx_id,
- .flags = IGT_SPIN_NO_PREEMPTION);
-
- param.size = 8;
- gem_context_set_param(i915, ¶m);
- gem_context_destroy(i915, param.ctx_id);
-
- igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
-
- igt_spin_free(i915, spin);
- gem_quiescent_gpu(i915);
-}
-
static void do_test(void (*test)(int i915, unsigned int engine),
int i915, unsigned int engine,
const char *name)
@@ -1422,31 +1237,6 @@ igt_main
smoketest(i915);
}
- /* Check interactions with set-engines */
- igt_subtest_group {
- const struct intel_execution_engine2 *e;
-
- igt_fixture
- gem_require_contexts(i915);
-
- igt_subtest_with_dynamic("replace") {
- __for_each_physical_engine(i915, e) {
- igt_dynamic_f("%s", e->name)
- replace_engines(i915, e);
- }
- }
-
- igt_subtest_with_dynamic("replace-hostile") {
- __for_each_physical_engine(i915, e) {
- igt_dynamic_f("%s", e->name)
- replace_engines_hostile(i915, e);
- }
- }
-
- igt_subtest("close-replace-race")
- close_replace_race(i915);
- }
-
igt_fixture {
close(i915);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 43/81] tests/i915/gem_ctx_persistence: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (41 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 42/81] tests/i915/gem_ctx_persistence: Drop the engine replace subtests Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 44/81] tests/i915/module_load: " Jason Ekstrand
` (40 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
lib/intel_ctx.c | 13 ++
lib/intel_ctx.h | 2 +
tests/i915/gem_ctx_persistence.c | 223 ++++++++++++++++---------------
3 files changed, 133 insertions(+), 105 deletions(-)
diff --git a/lib/intel_ctx.c b/lib/intel_ctx.c
index 4ababda8a..5ca8b4534 100644
--- a/lib/intel_ctx.c
+++ b/lib/intel_ctx.c
@@ -84,6 +84,7 @@ __context_create_cfg(int fd, const intel_ctx_cfg_t *cfg, uint32_t *ctx_id)
uint64_t ext_root = 0;
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, GEM_MAX_ENGINES);
struct drm_i915_gem_context_create_ext_setparam engines_param, vm_param;
+ struct drm_i915_gem_context_create_ext_setparam persist_param;
uint32_t i;
if (cfg->vm) {
@@ -99,6 +100,18 @@ __context_create_cfg(int fd, const intel_ctx_cfg_t *cfg, uint32_t *ctx_id)
add_user_ext(&ext_root, &vm_param.base);
}
+ if (cfg->nopersist) {
+ persist_param = (struct drm_i915_gem_context_create_ext_setparam) {
+ .base = {
+ .name = I915_CONTEXT_CREATE_EXT_SETPARAM,
+ },
+ .param = {
+ .param = I915_CONTEXT_PARAM_PERSISTENCE,
+ },
+ };
+ add_user_ext(&ext_root, &persist_param.base);
+ }
+
if (cfg->num_engines) {
memset(&engines, 0, sizeof(engines));
for (i = 0; i < cfg->num_engines; i++)
diff --git a/lib/intel_ctx.h b/lib/intel_ctx.h
index 054fecc4a..e34cefc14 100644
--- a/lib/intel_ctx.h
+++ b/lib/intel_ctx.h
@@ -16,6 +16,7 @@
* intel_ctx_cfg_t:
* @flags: Context create flags
* @vm: VM to inherit or 0 for using a per-context VM
+ * @nopersist: set I915_CONTEXT_PARAM_PERSISTENCE to 0
* @num_engines: Number of client-specified engines or 0 for legacy mode
* @engines: Client-specified engines
*
@@ -42,6 +43,7 @@
typedef struct intel_ctx_cfg {
uint32_t flags;
uint32_t vm;
+ bool nopersist;
unsigned int num_engines;
struct i915_engine_class_instance engines[GEM_MAX_ENGINES];
} intel_ctx_cfg_t;
diff --git a/tests/i915/gem_ctx_persistence.c b/tests/i915/gem_ctx_persistence.c
index 59077cc01..142bac28e 100644
--- a/tests/i915/gem_ctx_persistence.c
+++ b/tests/i915/gem_ctx_persistence.c
@@ -147,24 +147,32 @@ static void test_idempotent(int i915)
igt_assert_eq(p.value, expected);
}
-static void test_persistence(int i915, unsigned int engine)
+static const intel_ctx_t *
+ctx_create_persistence(int i915, const intel_ctx_cfg_t *base_cfg, bool persist)
+{
+ intel_ctx_cfg_t cfg = *base_cfg;
+ cfg.nopersist = !persist;
+ return intel_ctx_create(i915, &cfg);
+}
+
+static void test_persistence(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
igt_spin_t *spin;
int64_t timeout;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/*
* Default behaviour are contexts remain alive until their last active
* request is retired -- no early termination.
*/
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, true);
+ ctx = ctx_create_persistence(i915, cfg, true);
- spin = igt_spin_new(i915, ctx,
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), -ETIME);
@@ -178,24 +186,24 @@ static void test_persistence(int i915, unsigned int engine)
igt_spin_free(i915, spin);
}
-static void test_nonpersistent_cleanup(int i915, unsigned int engine)
+static void test_nonpersistent_cleanup(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/*
* A nonpersistent context is terminated immediately upon closure,
* any inflight request is cancelled.
*/
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, false);
+ ctx = ctx_create_persistence(i915, cfg, false);
- spin = igt_spin_new(i915, ctx,
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
igt_assert_eq(sync_fence_status(spin->out_fence), -EIO);
@@ -203,7 +211,8 @@ static void test_nonpersistent_cleanup(int i915, unsigned int engine)
igt_spin_free(i915, spin);
}
-static void test_nonpersistent_mixed(int i915, unsigned int engine)
+static void test_nonpersistent_mixed(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
int fence[3];
@@ -215,15 +224,14 @@ static void test_nonpersistent_mixed(int i915, unsigned int engine)
for (int i = 0; i < ARRAY_SIZE(fence); i++) {
igt_spin_t *spin;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, i & 1);
+ ctx = ctx_create_persistence(i915, cfg, i & 1);
- spin = igt_spin_new(i915, ctx,
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
fence[i] = spin->out_fence;
}
@@ -236,11 +244,12 @@ static void test_nonpersistent_mixed(int i915, unsigned int engine)
igt_assert_eq(sync_fence_wait(fence[1], 0), -ETIME);
}
-static void test_nonpersistent_hostile(int i915, unsigned int engine)
+static void test_nonpersistent_hostile(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/*
* If we cannot cleanly cancel the non-persistent context on closure,
@@ -248,24 +257,24 @@ static void test_nonpersistent_hostile(int i915, unsigned int engine)
* the requests and cleanup the context.
*/
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, false);
+ ctx = ctx_create_persistence(i915, cfg, false);
- spin = igt_spin_new(i915, ctx,
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_NO_PREEMPTION);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
igt_spin_free(i915, spin);
}
-static void test_nonpersistent_hostile_preempt(int i915, unsigned int engine)
+static void test_nonpersistent_hostile_preempt(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin[2];
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/*
* Double plus ungood.
@@ -278,24 +287,22 @@ static void test_nonpersistent_hostile_preempt(int i915, unsigned int engine)
igt_require(gem_scheduler_has_preemption(i915));
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, true);
- gem_context_set_priority(i915, ctx, 0);
- spin[0] = igt_spin_new(i915, ctx,
+ ctx = ctx_create_persistence(i915, cfg, true);
+ gem_context_set_priority(i915, ctx->id, 0);
+ spin[0] = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN));
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_spin_busywait_until_started(spin[0]);
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, false);
- gem_context_set_priority(i915, ctx, 1); /* higher priority than 0 */
- spin[1] = igt_spin_new(i915, ctx,
+ ctx = ctx_create_persistence(i915, cfg, false);
+ gem_context_set_priority(i915, ctx->id, 1); /* higher priority than 0 */
+ spin[1] = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_NO_PREEMPTION);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin[1]->handle, &timeout), 0);
@@ -303,24 +310,24 @@ static void test_nonpersistent_hostile_preempt(int i915, unsigned int engine)
igt_spin_free(i915, spin[0]);
}
-static void test_nonpersistent_hang(int i915, unsigned int engine)
+static void test_nonpersistent_hang(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
int64_t timeout = reset_timeout_ms * NSEC_PER_MSEC;
igt_spin_t *spin;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/*
* The user made a simple mistake and submitted an invalid batch,
* but fortunately under a nonpersistent context. Do we detect it?
*/
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, false);
+ ctx = ctx_create_persistence(i915, cfg, false);
- spin = igt_spin_new(i915, ctx,
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_INVALID_CS);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert_eq(gem_wait(i915, spin->handle, &timeout), 0);
@@ -460,14 +467,16 @@ static void test_noheartbeat_many(int i915, int count, unsigned int flags)
igt_assert(set_heartbeat(i915, e->full_name, 500));
for (int n = 0; n < ARRAY_SIZE(spin); n++) {
- uint32_t ctx;
+ const intel_ctx_t *ctx;
+
+ ctx = intel_ctx_create(i915, NULL);
- ctx = gem_context_create(i915);
- spin[n] = igt_spin_new(i915, ctx, .engine = eb_ring(e),
+ spin[n] = igt_spin_new(i915, .ctx = ctx,
+ .engine = eb_ring(e),
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_POLL_RUN |
flags));
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
igt_spin_busywait_until_started(spin[0]);
@@ -504,7 +513,7 @@ static void test_noheartbeat_close(int i915, unsigned int flags)
for_each_physical_ring(e, i915) {
igt_spin_t *spin;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
int err;
if (!set_preempt_timeout(i915, e->full_name, 250))
@@ -513,15 +522,16 @@ static void test_noheartbeat_close(int i915, unsigned int flags)
if (!set_heartbeat(i915, e->full_name, 0))
continue;
- ctx = gem_context_create(i915);
- spin = igt_spin_new(i915, ctx, .engine = eb_ring(e),
+ ctx = intel_ctx_create(i915, NULL);
+ spin = igt_spin_new(i915, .ctx = ctx,
+ .engine = eb_ring(e),
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_POLL_RUN |
flags));
igt_spin_busywait_until_started(spin);
igt_debug("Testing %s\n", e->full_name);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
err = wait_for_status(spin->out_fence, reset_timeout_ms);
set_heartbeat(i915, e->full_name, 2500);
@@ -578,22 +588,22 @@ static void alarm_handler(int sig)
{
}
-static void test_nonpersistent_queued(int i915, unsigned int engine)
+static void test_nonpersistent_queued(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
struct sigaction old_sa, sa = { .sa_handler = alarm_handler };
struct itimerval itv;
igt_spin_t *spin;
int fence = -1;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/*
* Not only must the immediate batch be cancelled, but
* all pending batches in the context.
*/
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, false);
- spin = igt_spin_new(i915, ctx,
+ ctx = ctx_create_persistence(i915, cfg, false);
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
@@ -621,7 +631,7 @@ static void test_nonpersistent_queued(int i915, unsigned int engine)
setitimer(ITIMER_REAL, &itv, NULL);
sigaction(SIGALRM, &old_sa, NULL);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert_eq(wait_for_status(spin->out_fence, reset_timeout_ms), -EIO);
igt_assert_eq(wait_for_status(fence, reset_timeout_ms), -EIO);
@@ -752,7 +762,8 @@ static void test_userptr(int i915)
gem_quiescent_gpu(i915);
}
-static void test_process_mixed(int pfd, unsigned int engine)
+static void test_process_mixed(int pfd, const intel_ctx_cfg_t *cfg,
+ unsigned int engine)
{
int fence[2], sv[2];
@@ -772,13 +783,10 @@ static void test_process_mixed(int pfd, unsigned int engine)
for (int persists = 0; persists <= 1; persists++) {
igt_spin_t *spin;
- uint32_t ctx;
-
- ctx = gem_context_create(i915);
- gem_context_copy_engines(pfd, 0, i915, ctx);
- gem_context_set_persistence(i915, ctx, persists);
+ const intel_ctx_t *ctx;
- spin = igt_spin_new(i915, ctx,
+ ctx = ctx_create_persistence(i915, cfg, persists);
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = engine,
.flags = IGT_SPIN_FENCE_OUT);
@@ -810,11 +818,12 @@ static void test_process_mixed(int pfd, unsigned int engine)
}
static void
-test_saturated_hostile(int i915, const struct intel_execution_engine2 *engine)
+test_saturated_hostile(int i915, const intel_ctx_t *base_ctx,
+ const struct intel_execution_engine2 *engine)
{
const struct intel_execution_engine2 *other;
igt_spin_t *spin;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
int fence = -1;
cleanup(i915);
@@ -831,11 +840,11 @@ test_saturated_hostile(int i915, const struct intel_execution_engine2 *engine)
* reset other users whenever they chose.]
*/
- __for_each_physical_engine(i915, other) {
+ for_each_ctx_engine(i915, base_ctx, other) {
if (other->flags == engine->flags)
continue;
- spin = igt_spin_new(i915,
+ spin = igt_spin_new(i915, .ctx = base_ctx,
.engine = other->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_FENCE_OUT));
@@ -855,15 +864,14 @@ test_saturated_hostile(int i915, const struct intel_execution_engine2 *engine)
}
igt_require(fence != -1);
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, false);
- spin = igt_spin_new(i915, ctx,
+ ctx = ctx_create_persistence(i915, &base_ctx->cfg, false);
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = engine->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
/* Hostile request requires a GPU reset to terminate */
igt_assert_eq(wait_for_status(spin->out_fence, reset_timeout_ms), -EIO);
@@ -950,7 +958,7 @@ static void test_processes(int i915)
gem_quiescent_gpu(i915);
}
-static void __smoker(int i915,
+static void __smoker(int i915, const intel_ctx_cfg_t *cfg,
unsigned int engine,
unsigned int timeout,
int expected)
@@ -958,11 +966,12 @@ static void __smoker(int i915,
igt_spin_t *spin;
int fence = -1;
int fd, extra;
+ const intel_ctx_t *ctx;
fd = gem_reopen_driver(i915);
- gem_context_copy_engines(i915, 0, fd, 0);
- gem_context_set_persistence(fd, 0, expected > 0);
- spin = igt_spin_new(fd, .engine = engine, .flags = IGT_SPIN_FENCE_OUT);
+ ctx = ctx_create_persistence(fd, cfg, expected > 0);
+ spin = igt_spin_new(fd, .ctx = ctx, .engine = engine,
+ .flags = IGT_SPIN_FENCE_OUT);
extra = rand() % 8;
while (extra--) {
@@ -974,6 +983,8 @@ static void __smoker(int i915,
fence = spin->execbuf.rsvd2 >> 32;
}
+ intel_ctx_destroy(fd, ctx);
+
close(fd);
flush_delayed_fput(i915);
@@ -990,18 +1001,18 @@ static void __smoker(int i915,
igt_spin_free(fd, spin);
}
-static void smoker(int i915,
+static void smoker(int i915, const intel_ctx_cfg_t *cfg,
unsigned int engine,
unsigned int timeout,
unsigned int *ctl)
{
while (!READ_ONCE(*ctl)) {
- __smoker(i915, engine, timeout, -EIO);
- __smoker(i915, engine, timeout, 1);
+ __smoker(i915, cfg, engine, timeout, -EIO);
+ __smoker(i915, cfg, engine, timeout, 1);
}
}
-static void smoketest(int i915)
+static void smoketest(int i915, const intel_ctx_cfg_t *cfg)
{
const int SMOKE_LOAD_FACTOR = 4;
const struct intel_execution_engine2 *e;
@@ -1021,9 +1032,9 @@ static void smoketest(int i915)
*ctl = 0;
igt_debug("Applying load factor: %d\n", i);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_cfg_engine(i915, cfg, e) {
igt_fork(child, i)
- smoker(i915,
+ smoker(i915, cfg,
e->flags,
i * reset_timeout_ms,
ctl);
@@ -1038,7 +1049,7 @@ static void smoketest(int i915)
gem_quiescent_gpu(i915);
}
-static void many_contexts(int i915)
+static void many_contexts(int i915, const intel_ctx_cfg_t *cfg)
{
const struct intel_execution_engine2 *e;
int64_t timeout = NSEC_PER_SEC;
@@ -1059,17 +1070,16 @@ static void many_contexts(int i915)
igt_spin_reset(spin);
igt_until_timeout(30) {
- __for_each_physical_engine(i915, e) {
- uint32_t ctx;
+ for_each_ctx_cfg_engine(i915, cfg, e) {
+ const intel_ctx_t *ctx;
- ctx = gem_context_clone_with_engines(i915, 0);
- gem_context_set_persistence(i915, ctx, false);
+ ctx = ctx_create_persistence(i915, cfg, false);
- spin->execbuf.rsvd1 = ctx;
+ spin->execbuf.rsvd1 = ctx->id;
spin->execbuf.flags &= ~63;
spin->execbuf.flags |= e->flags;
gem_execbuf(i915, &spin->execbuf);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
}
igt_debugfs_dump(i915, "i915_engine_info");
@@ -1085,9 +1095,10 @@ static void many_contexts(int i915)
gem_quiescent_gpu(i915);
}
-static void do_test(void (*test)(int i915, unsigned int engine),
- int i915, unsigned int engine,
- const char *name)
+static void do_test(void (*test)(int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine),
+ int i915, const intel_ctx_cfg_t *cfg,
+ unsigned int engine, const char *name)
{
#define ATTR "preempt_timeout_ms"
int timeout = -1;
@@ -1101,7 +1112,7 @@ static void do_test(void (*test)(int i915, unsigned int engine),
reset_timeout_ms = 200;
}
- test(i915, engine);
+ test(i915, cfg, engine);
if (timeout != -1) {
gem_engine_property_printf(i915, name, ATTR, "%d", timeout);
@@ -1122,7 +1133,8 @@ igt_main
{
struct {
const char *name;
- void (*func)(int fd, unsigned int engine);
+ void (*func)(int fd, const intel_ctx_cfg_t *cfg,
+ unsigned int engine);
} *test, tests[] = {
{ "persistence", test_persistence },
{ "cleanup", test_nonpersistent_cleanup },
@@ -1134,6 +1146,7 @@ igt_main
{ "hang", test_nonpersistent_hang },
{ NULL, NULL },
};
+ const intel_ctx_t *ctx;
igt_fixture {
i915 = drm_open_driver(DRIVER_INTEL);
@@ -1145,6 +1158,8 @@ igt_main
enable_hangcheck(i915);
igt_install_exit_handler(exit_handler);
+ ctx = intel_ctx_create_all_physical(i915);
+
igt_require(has_persistence(i915));
igt_allow_hang(i915, 0, 0);
}
@@ -1189,18 +1204,16 @@ igt_main
for (test = tests; test->name; test++) {
igt_subtest_with_dynamic_f("legacy-engines-%s",
test->name) {
+ const intel_ctx_cfg_t empty_cfg = {};
for_each_physical_ring(e, i915) {
igt_dynamic_f("%s", e->name) {
- do_test(test->func,
- i915, eb_ring(e),
+ do_test(test->func, i915,
+ &empty_cfg, eb_ring(e),
e->full_name);
}
}
}
}
-
- /* Assert things are under control. */
- igt_assert(!gem_context_has_engine_map(i915, 0));
}
/* New way of selecting engines. */
@@ -1213,10 +1226,10 @@ igt_main
for (test = tests; test->name; test++) {
igt_subtest_with_dynamic_f("engines-%s", test->name) {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name) {
- do_test(test->func,
- i915, e->flags,
+ do_test(test->func, i915,
+ &ctx->cfg, e->flags,
e->name);
}
}
@@ -1224,17 +1237,17 @@ igt_main
}
igt_subtest_with_dynamic_f("saturated-hostile") {
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- test_saturated_hostile(i915, e);
+ test_saturated_hostile(i915, ctx, e);
}
}
igt_subtest("many-contexts")
- many_contexts(i915);
+ many_contexts(i915, &ctx->cfg);
igt_subtest("smoketest")
- smoketest(i915);
+ smoketest(i915, &ctx->cfg);
}
igt_fixture {
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 44/81] tests/i915/module_load: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (42 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 43/81] tests/i915/gem_ctx_persistence: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 45/81] tests/i915/pm_rc6_residency: " Jason Ekstrand
` (39 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/i915_module_load.c | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/tests/i915/i915_module_load.c b/tests/i915/i915_module_load.c
index 281454a5b..98ceb5d85 100644
--- a/tests/i915/i915_module_load.c
+++ b/tests/i915/i915_module_load.c
@@ -64,6 +64,7 @@ static void store_all(int i915)
.buffer_count = 2,
};
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int reloc_sz = sizeof(uint32_t);
unsigned int nengine, value;
void *cs;
@@ -88,7 +89,9 @@ static void store_all(int i915)
nengine = 0;
cs = gem_mmap__device_coherent(i915, obj[1].handle, 0, sz, PROT_WRITE);
- __for_each_physical_engine(i915, e) {
+
+ ctx = intel_ctx_create_all_physical(i915);
+ for_each_ctx_engine(i915, ctx, e) {
uint64_t addr;
igt_assert(reloc.presumed_offset != -1);
@@ -103,6 +106,7 @@ static void store_all(int i915)
if (gen < 6)
execbuf.flags |= I915_EXEC_SECURE;
execbuf.flags |= I915_EXEC_NO_RELOC | I915_EXEC_HANDLE_LUT;
+ execbuf.rsvd1 = ctx->id;
memcpy(cs + execbuf.batch_start_offset, batch, sizeof(batch));
memcpy(cs + reloc.offset, &addr, reloc_sz);
@@ -121,6 +125,7 @@ static void store_all(int i915)
memset(engines, 0xdeadbeef, sizeof(engines));
gem_read(i915, obj[0].handle, 0, engines, nengine * sizeof(engines[0]));
gem_close(i915, obj[0].handle);
+ intel_ctx_destroy(i915, ctx);
for (i = 0; i < nengine; i++)
igt_assert_eq_u32(engines[i], i);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 45/81] tests/i915/pm_rc6_residency: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (43 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 44/81] tests/i915/module_load: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 46/81] tests/i915/gem_cs_tlb: " Jason Ekstrand
` (38 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/i915_pm_rc6_residency.c | 7 +++++--
1 file changed, 5 insertions(+), 2 deletions(-)
diff --git a/tests/i915/i915_pm_rc6_residency.c b/tests/i915/i915_pm_rc6_residency.c
index bfbe4ab01..d1cce474e 100644
--- a/tests/i915/i915_pm_rc6_residency.c
+++ b/tests/i915/i915_pm_rc6_residency.c
@@ -455,6 +455,7 @@ static void rc6_fence(int i915)
const int tolerance = 20; /* Some RC6 is better than none! */
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
struct power_sample sample[2];
unsigned long slept;
uint64_t rc6, ts[2];
@@ -484,14 +485,15 @@ static void rc6_fence(int i915)
assert_within_epsilon(rc6, ts[1] - ts[0], 5);
/* Submit but delay execution, we should be idle and conserving power */
- __for_each_physical_engine(i915, e) {
+ ctx = intel_ctx_create_all_physical(i915);
+ for_each_ctx_engine(i915, ctx, e) {
igt_spin_t *spin;
int timeline;
int fence;
timeline = sw_sync_timeline_create();
fence = sw_sync_timeline_create_fence(timeline, 1);
- spin = igt_spin_new(i915,
+ spin = igt_spin_new(i915, .ctx = ctx,
.engine = e->flags,
.fence = fence,
.flags = IGT_SPIN_FENCE_IN);
@@ -519,6 +521,7 @@ static void rc6_fence(int i915)
assert_within_epsilon(rc6, ts[1] - ts[0], tolerance);
gem_quiescent_gpu(i915);
}
+ intel_ctx_destroy(i915, ctx);
rapl_close(&rapl);
close(fd);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 46/81] tests/i915/gem_cs_tlb: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (44 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 45/81] tests/i915/pm_rc6_residency: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 47/81] tests/core_hotplug: " Jason Ekstrand
` (37 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
v2 (Zbigniew Kempczyński):
- Don't NULL-init ctx
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_cs_tlb.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/tests/i915/gem_cs_tlb.c b/tests/i915/gem_cs_tlb.c
index dec9c107e..7a8886650 100644
--- a/tests/i915/gem_cs_tlb.c
+++ b/tests/i915/gem_cs_tlb.c
@@ -91,7 +91,8 @@ mmap_coherent(int fd, uint32_t handle, int size)
return ptr;
}
-static void run_on_ring(int fd, unsigned ring_id, const char *ring_name)
+static void run_on_ring(int fd, const intel_ctx_t *ctx,
+ unsigned ring_id, const char *ring_name)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 execobj;
@@ -117,6 +118,7 @@ static void run_on_ring(int fd, unsigned ring_id, const char *ring_name)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(&execobj);
execbuf.buffer_count = 1;
+ execbuf.rsvd1 = ctx->id;
execbuf.flags = ring_id;
/* Execute once to allocate a gtt-offset */
@@ -144,17 +146,19 @@ static void run_on_ring(int fd, unsigned ring_id, const char *ring_name)
igt_main
{
const struct intel_execution_engine2 *e;
+ const intel_ctx_t *ctx;
int fd = -1;
igt_fixture {
fd = drm_open_driver(DRIVER_INTEL);
igt_require_gem(fd);
+ ctx = intel_ctx_create_all_physical(fd);
}
igt_subtest_with_dynamic("engines") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_engine(fd, ctx, e) {
igt_dynamic_f("%s", e->name)
- run_on_ring(fd, e->flags, e->name);
+ run_on_ring(fd, ctx, e->flags, e->name);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 47/81] tests/core_hotplug: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (45 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 46/81] tests/i915/gem_cs_tlb: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 48/81] tests/i915/gem_exec_balancer: Stop cloning engines Jason Ekstrand
` (36 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/core_hotunplug.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/tests/core_hotunplug.c b/tests/core_hotunplug.c
index 878efcc7b..2d73e27f2 100644
--- a/tests/core_hotunplug.c
+++ b/tests/core_hotunplug.c
@@ -292,6 +292,7 @@ static int local_i915_healthcheck(int i915, const char *prefix)
.buffer_count = 1,
};
const struct intel_execution_engine2 *engine;
+ const intel_ctx_t *ctx;
int fence = -1, err = 0, status = 1;
local_debug("%s%s\n", prefix, "running i915 GPU healthcheck");
@@ -303,7 +304,9 @@ static int local_i915_healthcheck(int i915, const char *prefix)
gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
/* As soon as a fence is open, don't fail before closing it */
- __for_each_physical_engine(i915, engine) {
+ ctx = intel_ctx_create_all_physical(i915);
+ for_each_ctx_engine(i915, ctx, engine) {
+ execbuf.rsvd1 = ctx->id;
execbuf.flags = engine->flags | I915_EXEC_FENCE_OUT;
err = __gem_execbuf_wr(i915, &execbuf);
if (igt_warn_on_f(err < 0, "__gem_execbuf_wr() returned %d\n",
@@ -317,6 +320,7 @@ static int local_i915_healthcheck(int i915, const char *prefix)
break;
}
}
+ intel_ctx_destroy(i915, ctx);
if (fence >= 0) {
status = sync_fence_wait(fence, -1);
if (igt_warn_on_f(status < 0, "sync_fence_wait() returned %d\n",
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 48/81] tests/i915/gem_exec_balancer: Stop cloning engines
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (46 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 47/81] tests/core_hotplug: " Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 49/81] tests/i915/gem_exec_balancer: Don't reset engines on a context (v2) Jason Ekstrand
` (35 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
We have the list of engines and bonding info in each case. It's easy
enough to create a context instead of clone.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_balancer.c | 21 +++++++++++----------
1 file changed, 11 insertions(+), 10 deletions(-)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 286c11d50..2452fe93e 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -827,8 +827,7 @@ static void bonded_slice(int i915)
igt_fork(child, count + 1) { /* C: arbitrary background load */
igt_list_del(&spin->link);
- ctx = gem_context_clone(i915, ctx,
- I915_CONTEXT_CLONE_ENGINES, 0);
+ ctx = load_balancer_create(i915, siblings, count);
while (!READ_ONCE(*stop)) {
spin = igt_spin_new(i915,
@@ -2280,7 +2279,8 @@ static int __execbuf(int i915, struct drm_i915_gem_execbuffer2 *execbuf)
return err;
}
-static uint32_t *sema(int i915, uint32_t ctx)
+static uint32_t *sema(int i915, struct i915_engine_class_instance *ci,
+ unsigned int count)
{
uint32_t *ctl;
struct drm_i915_gem_exec_object2 batch = {
@@ -2291,7 +2291,7 @@ static uint32_t *sema(int i915, uint32_t ctx)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = gem_context_clone_with_engines(i915, ctx),
+ .rsvd1 = load_balancer_create(i915, ci, count),
};
for (int n = 1; n <= 32; n++) {
@@ -2313,12 +2313,14 @@ static uint32_t *sema(int i915, uint32_t ctx)
return ctl;
}
-static void __waits(int i915, int timeout, uint32_t ctx, unsigned int count)
+static void __waits(int i915, int timeout,
+ struct i915_engine_class_instance *ci,
+ unsigned int count)
{
uint32_t *semaphores[count + 1];
for (int i = 0; i <= count; i++)
- semaphores[i] = sema(i915, ctx);
+ semaphores[i] = sema(i915, ci, count);
igt_until_timeout(timeout) {
int i = rand() % (count + 1);
@@ -2330,7 +2332,7 @@ static void __waits(int i915, int timeout, uint32_t ctx, unsigned int count)
if ((*semaphores[i] += rand() % 32) >= 32) {
*semaphores[i] = 0xffffffff;
munmap(semaphores[i], 4096);
- semaphores[i] = sema(i915, ctx);
+ semaphores[i] = sema(i915, ci, count);
}
}
@@ -2359,7 +2361,7 @@ static void waits(int i915, int timeout)
if (count > 1) {
uint32_t ctx = load_balancer_create(i915, ci, count);
- __waits(i915, timeout, ctx, count);
+ __waits(i915, timeout, ci, count);
gem_context_destroy(i915, ctx);
}
@@ -2443,8 +2445,7 @@ static void nop(int i915)
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
.flags = child + 1,
- .rsvd1 = gem_context_clone(i915, ctx,
- I915_CONTEXT_CLONE_ENGINES, 0),
+ .rsvd1 = load_balancer_create(i915, ci, count),
};
struct timespec tv = {};
unsigned long nops;
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 49/81] tests/i915/gem_exec_balancer: Don't reset engines on a context (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (47 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 48/81] tests/i915/gem_exec_balancer: Stop cloning engines Jason Ekstrand
@ 2021-07-07 14:43 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 50/81] tests/i915/gem_exec_balancer: Stop munging ctx0 engines Jason Ekstrand
` (34 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:43 UTC (permalink / raw)
To: igt-dev
Instead of resetting the set of engines to break implicit dependencies,
just use a new context. Everything interesting gets swapped out when we
set CONTEXT_PARAM_ENGINES so this is equivalent as long as
SINGLE_TIMELINE isn't used. If SINGLE_TIMELINE is used then the
timeline is associated with the context and that gets lost when we set
CONTEXT_PARAM_ENGINES. The only test where this flag is used is the
bonded tests but those wait for the GPU to idle between iterations so
replacing the timeline is a no-op.
v2 (Jason Ekstrand):
- Improve comments and commit message
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_balancer.c | 172 ++++++++++++++++++---------------
1 file changed, 94 insertions(+), 78 deletions(-)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 2452fe93e..13b1495de 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -28,6 +28,7 @@
#include "i915/gem.h"
#include "i915/gem_create.h"
+#include "i915/gem_vm.h"
#include "igt.h"
#include "igt_gt.h"
#include "igt_perf.h"
@@ -123,6 +124,21 @@ static bool has_perf_engines(int i915)
return i915_perf_type_id(i915);
}
+static int __set_vm(int i915, uint32_t ctx, uint32_t vm)
+{
+ struct drm_i915_gem_context_param p = {
+ .ctx_id = ctx,
+ .param = I915_CONTEXT_PARAM_VM,
+ .value = vm
+ };
+ return __gem_context_set_param(i915, &p);
+}
+
+static void set_vm(int i915, uint32_t ctx, uint32_t vm)
+{
+ igt_assert_eq(__set_vm(i915, ctx, vm), 0);
+}
+
static int __set_engines(int i915, uint32_t ctx,
const struct i915_engine_class_instance *ci,
unsigned int count)
@@ -544,8 +560,6 @@ static void check_individual_engine(int i915,
static void individual(int i915)
{
- uint32_t ctx;
-
/*
* I915_CONTEXT_PARAM_ENGINE allows us to index into the user
* supplied array from gem_execbuf(). Our check is to build the
@@ -554,8 +568,6 @@ static void individual(int i915)
* was busy.
*/
- ctx = gem_context_create(i915);
-
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *ci;
unsigned int count;
@@ -565,17 +577,20 @@ static void individual(int i915)
continue;
for (int pass = 0; pass < count; pass++) { /* approx. count! */
+ uint32_t ctx;
+
igt_assert(sizeof(*ci) == sizeof(int));
igt_permute_array(ci, count, igt_exchange_int);
+ ctx = gem_context_create(i915);
set_load_balancer(i915, ctx, ci, count, NULL);
for (unsigned int n = 0; n < count; n++)
check_individual_engine(i915, ctx, ci, n);
+ gem_context_destroy(i915, ctx);
}
free(ci);
}
- gem_context_destroy(i915, ctx);
gem_quiescent_gpu(i915);
}
@@ -584,7 +599,7 @@ static void bonded(int i915, unsigned int flags)
{
I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[16], 1);
struct i915_engine_class_instance *master_engines;
- uint32_t master;
+ uint32_t vm;
/*
* I915_CONTEXT_PARAM_ENGINE provides an extension that allows us
@@ -592,7 +607,7 @@ static void bonded(int i915, unsigned int flags)
* request submitted to another engine.
*/
- master = gem_queue_create(i915);
+ vm = gem_vm_create(i915);
memset(bonds, 0, sizeof(bonds));
for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
@@ -605,7 +620,7 @@ static void bonded(int i915, unsigned int flags)
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *siblings;
unsigned int count, limit, *order;
- uint32_t ctx;
+ uint32_t master, ctx;
int n;
siblings = list_engines(i915, 1u << class, &count);
@@ -618,6 +633,8 @@ static void bonded(int i915, unsigned int flags)
}
master_engines = list_engines(i915, ~(1u << class), &limit);
+ master = gem_context_create_ext(i915, I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE, 0);
+ set_vm(i915, master, vm);
set_engines(i915, master, master_engines, limit);
limit = min(count, limit);
@@ -627,9 +644,9 @@ static void bonded(int i915, unsigned int flags)
bonds[n].engines[0] = siblings[n];
}
- ctx = gem_context_clone(i915,
- master, I915_CONTEXT_CLONE_VM,
- I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
+ ctx = gem_context_create_ext(i915, I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE, 0);
+ set_vm(i915, ctx, vm);
+ set_engines(i915, ctx, master_engines, limit);
set_load_balancer(i915, ctx, siblings, count, &bonds[limit - 1]);
order = malloc(sizeof(*order) * 8 * limit);
@@ -711,12 +728,11 @@ static void bonded(int i915, unsigned int flags)
}
free(order);
+ gem_context_destroy(i915, master);
gem_context_destroy(i915, ctx);
free(master_engines);
free(siblings);
}
-
- gem_context_destroy(i915, master);
}
#define VIRTUAL_ENGINE (1u << 0)
@@ -761,7 +777,6 @@ static uint32_t create_semaphore_to_spinner(int i915, igt_spin_t *spin)
static void bonded_slice(int i915)
{
- uint32_t ctx;
int *stop;
/*
@@ -774,13 +789,12 @@ static void bonded_slice(int i915)
stop = mmap(0, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
igt_assert(stop != MAP_FAILED);
- ctx = gem_context_create(i915); /* NB timeline per engine */
-
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *siblings;
struct drm_i915_gem_exec_object2 obj[3] = {};
struct drm_i915_gem_execbuffer2 eb = {};
unsigned int count;
+ uint32_t ctx;
igt_spin_t *spin;
siblings = list_engines(i915, 1u << class, &count);
@@ -804,6 +818,7 @@ static void bonded_slice(int i915)
* XXX add explicit bonding options for A->B
*/
+ ctx = gem_context_create(i915); /* NB timeline per engine */
set_load_balancer(i915, ctx, siblings, count, NULL);
spin = __igt_spin_new(i915,
@@ -865,13 +880,13 @@ static void bonded_slice(int i915)
gem_close(i915, obj[2].handle);
igt_spin_free(i915, spin);
+ gem_context_destroy(i915, ctx);
}
- gem_context_destroy(i915, ctx);
munmap(stop, 4096);
}
-static void __bonded_chain(int i915, uint32_t ctx,
+static void __bonded_chain(int i915,
const struct i915_engine_class_instance *siblings,
unsigned int count)
{
@@ -882,12 +897,14 @@ static void __bonded_chain(int i915, uint32_t ctx,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = ctx,
};
igt_spin_t *spin;
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
+ uint32_t ctx;
/* A: spin forever on engine 1 */
+
+ ctx = gem_context_create(i915);
set_load_balancer(i915, ctx, siblings, count, NULL);
if (priorities[i] < 0)
gem_context_set_priority(i915, ctx, priorities[i]);
@@ -897,17 +914,20 @@ static void __bonded_chain(int i915, uint32_t ctx,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin);
- gem_context_set_priority(i915, ctx, 0);
/*
- * Note we replace the timelines between each execbuf, so
- * that any pair of requests on the same engine could be
- * re-ordered by the scheduler -- if the dependency tracking
- * is subpar.
+ * Note we replace the contexts and their timelines between
+ * each execbuf, so that any pair of requests on the same
+ * engine could be re-ordered by the scheduler -- if the
+ * dependency tracking is subpar.
*/
/* B: waits for A on engine 2 */
+ gem_context_destroy(i915, ctx);
+ ctx = gem_context_create(i915);
+ gem_context_set_priority(i915, ctx, 0);
set_load_balancer(i915, ctx, siblings, count, NULL);
+ execbuf.rsvd1 = ctx;
execbuf.rsvd2 = spin->out_fence;
execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
execbuf.flags |= 2; /* opposite engine to spinner */
@@ -916,7 +936,6 @@ static void __bonded_chain(int i915, uint32_t ctx,
/* B': run in parallel with B on engine 1, i.e. not before A! */
if (priorities[i] > 0)
gem_context_set_priority(i915, ctx, priorities[i]);
- set_load_balancer(i915, ctx, siblings, count, NULL);
execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.flags |= 1; /* same engine as spinner */
execbuf.rsvd2 >>= 32;
@@ -938,6 +957,7 @@ static void __bonded_chain(int i915, uint32_t ctx,
igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 0);
igt_spin_free(i915, spin);
+ gem_context_destroy(i915, ctx);
gem_sync(i915, batch.handle);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
@@ -950,7 +970,7 @@ static void __bonded_chain(int i915, uint32_t ctx,
gem_close(i915, batch.handle);
}
-static void __bonded_chain_inv(int i915, uint32_t ctx,
+static void __bonded_chain_inv(int i915,
const struct i915_engine_class_instance *siblings,
unsigned int count)
{
@@ -961,12 +981,14 @@ static void __bonded_chain_inv(int i915, uint32_t ctx,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = ctx,
};
igt_spin_t *spin;
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
+ uint32_t ctx;
+
/* A: spin forever on engine 1 */
+ ctx = gem_context_create(i915);
set_load_balancer(i915, ctx, siblings, count, NULL);
if (priorities[i] < 0)
gem_context_set_priority(i915, ctx, priorities[i]);
@@ -976,17 +998,13 @@ static void __bonded_chain_inv(int i915, uint32_t ctx,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin);
- gem_context_set_priority(i915, ctx, 0);
-
- /*
- * Note we replace the timelines between each execbuf, so
- * that any pair of requests on the same engine could be
- * re-ordered by the scheduler -- if the dependency tracking
- * is subpar.
- */
/* B: waits for A on engine 1 */
+ gem_context_destroy(i915, ctx);
+ ctx = gem_context_create(i915);
+ gem_context_set_priority(i915, ctx, 0);
set_load_balancer(i915, ctx, siblings, count, NULL);
+ execbuf.rsvd1 = ctx;
execbuf.rsvd2 = spin->out_fence;
execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
execbuf.flags |= 1; /* same engine as spinner */
@@ -995,7 +1013,6 @@ static void __bonded_chain_inv(int i915, uint32_t ctx,
/* B': run in parallel with B on engine 2, i.e. not before A! */
if (priorities[i] > 0)
gem_context_set_priority(i915, ctx, priorities[i]);
- set_load_balancer(i915, ctx, siblings, count, NULL);
execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.flags |= 2; /* opposite engine to spinner */
execbuf.rsvd2 >>= 32;
@@ -1018,6 +1035,7 @@ static void __bonded_chain_inv(int i915, uint32_t ctx,
igt_spin_free(i915, spin);
gem_sync(i915, batch.handle);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 1);
@@ -1031,32 +1049,26 @@ static void __bonded_chain_inv(int i915, uint32_t ctx,
static void bonded_chain(int i915)
{
- uint32_t ctx;
-
/*
* Given batches A, B and B', where B and B' are a bonded pair, with
* B' depending on B with a submit fence and B depending on A as
* an ordinary fence; prove B' cannot complete before A.
*/
- ctx = gem_context_create(i915);
-
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *siblings;
unsigned int count;
siblings = list_engines(i915, 1u << class, &count);
if (count > 1) {
- __bonded_chain(i915, ctx, siblings, count);
- __bonded_chain_inv(i915, ctx, siblings, count);
+ __bonded_chain(i915, siblings, count);
+ __bonded_chain_inv(i915, siblings, count);
}
free(siblings);
}
-
- gem_context_destroy(i915, ctx);
}
-static void __bonded_sema(int i915, uint32_t ctx,
+static void __bonded_sema(int i915,
const struct i915_engine_class_instance *siblings,
unsigned int count)
{
@@ -1067,11 +1079,12 @@ static void __bonded_sema(int i915, uint32_t ctx,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = ctx,
};
igt_spin_t *spin;
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
+ uint32_t ctx;
+
/* A: spin forever on seperate render engine */
spin = igt_spin_new(i915,
.flags = (IGT_SPIN_POLL_RUN |
@@ -1079,23 +1092,28 @@ static void __bonded_sema(int i915, uint32_t ctx,
igt_spin_busywait_until_started(spin);
/*
- * Note we replace the timelines between each execbuf, so
- * that any pair of requests on the same engine could be
- * re-ordered by the scheduler -- if the dependency tracking
- * is subpar.
+ * Note we replace the contexts and their timelines between
+ * each execbuf, so that any pair of requests on the same
+ * engine could be re-ordered by the scheduler -- if the
+ * dependency tracking is subpar.
*/
/* B: waits for A (using a semaphore) on engine 1 */
+ ctx = gem_context_create(i915);
set_load_balancer(i915, ctx, siblings, count, NULL);
+ execbuf.rsvd1 = ctx;
execbuf.rsvd2 = spin->out_fence;
execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
execbuf.flags |= 1;
gem_execbuf_wr(i915, &execbuf);
/* B': run in parallel with B on engine 2 */
+ gem_context_destroy(i915, ctx);
+ ctx = gem_context_create(i915);
if (priorities[i] > 0)
gem_context_set_priority(i915, ctx, priorities[i]);
set_load_balancer(i915, ctx, siblings, count, NULL);
+ execbuf.rsvd1 = ctx;
execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.flags |= 2;
execbuf.rsvd2 >>= 32;
@@ -1118,6 +1136,7 @@ static void __bonded_sema(int i915, uint32_t ctx,
igt_spin_free(i915, spin);
gem_sync(i915, batch.handle);
+ gem_context_destroy(i915, ctx);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 1);
@@ -1131,8 +1150,6 @@ static void __bonded_sema(int i915, uint32_t ctx,
static void bonded_semaphore(int i915)
{
- uint32_t ctx;
-
/*
* Given batches A, B and B', where B and B' are a bonded pair, with
* B' depending on B with a submit fence and B depending on A as
@@ -1142,19 +1159,15 @@ static void bonded_semaphore(int i915)
*/
igt_require(gem_scheduler_has_semaphores(i915));
- ctx = gem_context_create(i915);
-
for (int class = 1; class < 32; class++) {
struct i915_engine_class_instance *siblings;
unsigned int count;
siblings = list_engines(i915, 1u << class, &count);
if (count > 1)
- __bonded_sema(i915, ctx, siblings, count);
+ __bonded_sema(i915, siblings, count);
free(siblings);
}
-
- gem_context_destroy(i915, ctx);
}
static void __bonded_pair(int i915,
@@ -1805,7 +1818,7 @@ static void indices(int i915)
gem_quiescent_gpu(i915);
}
-static void __bonded_early(int i915, uint32_t ctx,
+static void __bonded_early(int i915,
const struct i915_engine_class_instance *siblings,
unsigned int count,
unsigned int flags)
@@ -1818,8 +1831,8 @@ static void __bonded_early(int i915, uint32_t ctx,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = ctx,
};
+ uint32_t vm, ctx;
igt_spin_t *spin;
memset(bonds, 0, sizeof(bonds));
@@ -1833,6 +1846,11 @@ static void __bonded_early(int i915, uint32_t ctx,
bonds[n].engines[0] = siblings[(n + 1) % count];
}
+ /* We share a VM so that the spin cancel will work without a reloc */
+ vm = gem_vm_create(i915);
+
+ ctx = gem_context_create(i915);
+ set_vm(i915, ctx, vm);
set_load_balancer(i915, ctx, siblings, count,
flags & VIRTUAL_ENGINE ? &bonds : NULL);
@@ -1843,6 +1861,7 @@ static void __bonded_early(int i915, uint32_t ctx,
.flags = IGT_SPIN_NO_PREEMPTION);
/* B: runs after A on engine 1 */
+ execbuf.rsvd1 = ctx;
execbuf.flags = I915_EXEC_FENCE_OUT;
execbuf.flags |= spin->execbuf.flags & 63;
gem_execbuf_wr(i915, &execbuf);
@@ -1860,9 +1879,14 @@ static void __bonded_early(int i915, uint32_t ctx,
igt_debugfs_dump(i915, "i915_engine_info");
- /* D: cancel the spinner from engine 2 (new timeline) */
- set_load_balancer(i915, ctx, siblings, count, NULL);
+ /* D: cancel the spinner from engine 2 (new context) */
+ gem_context_destroy(i915, ctx);
+ ctx = gem_context_create(i915);
+ set_vm(i915, ctx, vm);
+ set_load_balancer(i915, ctx, siblings, count,
+ flags & VIRTUAL_ENGINE ? &bonds : NULL);
batch.handle = create_semaphore_to_spinner(i915, spin);
+ execbuf.rsvd1 = ctx;
execbuf.flags = 0;
if(!(flags & VIRTUAL_ENGINE))
execbuf.flags |= 2;
@@ -1879,14 +1903,13 @@ static void __bonded_early(int i915, uint32_t ctx,
close(execbuf.rsvd2);
close(execbuf.rsvd2 >> 32);
+ gem_context_destroy(i915, ctx);
gem_close(i915, handle);
igt_spin_free(i915, spin);
}
static void bonded_early(int i915)
{
- uint32_t ctx;
-
/*
* Our goal is to start the bonded payloads at roughly the same time.
* We do not want to start the secondary batch too early as it will
@@ -1902,21 +1925,17 @@ static void bonded_early(int i915)
* hang.
*/
- ctx = gem_context_create(i915);
-
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *siblings;
unsigned int count;
siblings = list_engines(i915, 1u << class, &count);
if (count > 1) {
- __bonded_early(i915, ctx, siblings, count, 0);
- __bonded_early(i915, ctx, siblings, count, VIRTUAL_ENGINE);
+ __bonded_early(i915, siblings, count, 0);
+ __bonded_early(i915, siblings, count, VIRTUAL_ENGINE);
}
free(siblings);
}
-
- gem_context_destroy(i915, ctx);
}
static void busy(int i915)
@@ -2571,7 +2590,7 @@ static void ping(int i915, uint32_t ctx, unsigned int engine)
static void semaphore(int i915)
{
- uint32_t block[2], scratch;
+ uint32_t scratch;
igt_spin_t *spin[3];
/*
@@ -2581,15 +2600,12 @@ static void semaphore(int i915)
*/
igt_require(gem_scheduler_has_preemption(i915));
- block[0] = gem_context_create(i915);
- block[1] = gem_context_create(i915);
-
scratch = gem_create(i915, 4096);
spin[2] = igt_spin_new(i915, .dependency = scratch);
for (int class = 1; class < 32; class++) {
struct i915_engine_class_instance *ci;
unsigned int count;
- uint32_t vip;
+ uint32_t block[2], vip;
ci = list_engines(i915, 1u << class, &count);
if (!ci)
@@ -2602,6 +2618,7 @@ static void semaphore(int i915)
count = ARRAY_SIZE(block);
for (int i = 0; i < count; i++) {
+ block[i] = gem_context_create(i915);
set_load_balancer(i915, block[i], ci, count, NULL);
spin[i] = __igt_spin_new(i915,
.ctx_id = block[i],
@@ -2617,17 +2634,16 @@ static void semaphore(int i915)
ping(i915, vip, 0);
gem_context_destroy(i915, vip);
- for (int i = 0; i < count; i++)
+ for (int i = 0; i < count; i++) {
igt_spin_free(i915, spin[i]);
+ gem_context_destroy(i915, block[i]);
+ }
free(ci);
}
igt_spin_free(i915, spin[2]);
gem_close(i915, scratch);
- gem_context_destroy(i915, block[1]);
- gem_context_destroy(i915, block[0]);
-
gem_quiescent_gpu(i915);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 50/81] tests/i915/gem_exec_balancer: Stop munging ctx0 engines
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (48 preceding siblings ...)
2021-07-07 14:43 ` [igt-dev] [PATCH i-g-t 49/81] tests/i915/gem_exec_balancer: Don't reset engines on a context (v2) Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 51/81] tests/i915/gem_exec_balancer: Drop bonded tests Jason Ekstrand
` (33 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_balancer.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 13b1495de..6ef8433aa 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -3088,7 +3088,7 @@ static void noheartbeat(int i915)
* heartbeat has already been disabled.
*/
- __for_each_physical_engine(i915, e)
+ for_each_physical_engine(i915, e)
set_heartbeat(i915, e->name, 0);
for (int class = 0; class < 32; class++) {
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 51/81] tests/i915/gem_exec_balancer: Drop bonded tests
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (49 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 50/81] tests/i915/gem_exec_balancer: Stop munging ctx0 engines Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 52/81] lib/intel_ctx: Add load balancing support (v2) Jason Ekstrand
` (32 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Support for combining bonding and balancing is being removed from i915
because it's not used by media.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_balancer.c | 330 ---------------------------------
1 file changed, 330 deletions(-)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 6ef8433aa..e6f742310 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -124,21 +124,6 @@ static bool has_perf_engines(int i915)
return i915_perf_type_id(i915);
}
-static int __set_vm(int i915, uint32_t ctx, uint32_t vm)
-{
- struct drm_i915_gem_context_param p = {
- .ctx_id = ctx,
- .param = I915_CONTEXT_PARAM_VM,
- .value = vm
- };
- return __gem_context_set_param(i915, &p);
-}
-
-static void set_vm(int i915, uint32_t ctx, uint32_t vm)
-{
- igt_assert_eq(__set_vm(i915, ctx, vm), 0);
-}
-
static int __set_engines(int i915, uint32_t ctx,
const struct i915_engine_class_instance *ci,
unsigned int count)
@@ -485,31 +470,6 @@ static double measure_min_load(int pmu, unsigned int num, int period_us)
return min / (double)d_t;
}
-static void measure_all_load(int pmu, double *v, unsigned int num, int period_us)
-{
- uint64_t data[2 + num];
- uint64_t d_t, d_v[num];
-
- kick_kthreads();
-
- igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
- for (unsigned int n = 0; n < num; n++)
- d_v[n] = -data[2 + n];
- d_t = -data[1];
-
- usleep(period_us);
-
- igt_assert_eq(read(pmu, data, sizeof(data)), sizeof(data));
-
- d_t += data[1];
- for (unsigned int n = 0; n < num; n++) {
- d_v[n] += data[2 + n];
- igt_debug("engine[%d]: %.1f%%\n",
- n, d_v[n] / (double)d_t * 100);
- v[n] = d_v[n] / (double)d_t;
- }
-}
-
static int
add_pmu(int i915, int pmu, const struct i915_engine_class_instance *ci)
{
@@ -594,147 +554,6 @@ static void individual(int i915)
gem_quiescent_gpu(i915);
}
-static void bonded(int i915, unsigned int flags)
-#define CORK 0x1
-{
- I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[16], 1);
- struct i915_engine_class_instance *master_engines;
- uint32_t vm;
-
- /*
- * I915_CONTEXT_PARAM_ENGINE provides an extension that allows us
- * to specify which engine(s) to pair with a parallel (EXEC_SUBMIT)
- * request submitted to another engine.
- */
-
- vm = gem_vm_create(i915);
-
- memset(bonds, 0, sizeof(bonds));
- for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
- bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
- bonds[n].base.next_extension =
- n ? to_user_pointer(&bonds[n - 1]) : 0;
- bonds[n].num_bonds = 1;
- }
-
- for (int class = 0; class < 32; class++) {
- struct i915_engine_class_instance *siblings;
- unsigned int count, limit, *order;
- uint32_t master, ctx;
- int n;
-
- siblings = list_engines(i915, 1u << class, &count);
- if (!siblings)
- continue;
-
- if (count < 2) {
- free(siblings);
- continue;
- }
-
- master_engines = list_engines(i915, ~(1u << class), &limit);
- master = gem_context_create_ext(i915, I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE, 0);
- set_vm(i915, master, vm);
- set_engines(i915, master, master_engines, limit);
-
- limit = min(count, limit);
- igt_assert(limit <= ARRAY_SIZE(bonds));
- for (n = 0; n < limit; n++) {
- bonds[n].master = master_engines[n];
- bonds[n].engines[0] = siblings[n];
- }
-
- ctx = gem_context_create_ext(i915, I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE, 0);
- set_vm(i915, ctx, vm);
- set_engines(i915, ctx, master_engines, limit);
- set_load_balancer(i915, ctx, siblings, count, &bonds[limit - 1]);
-
- order = malloc(sizeof(*order) * 8 * limit);
- igt_assert(order);
- for (n = 0; n < limit; n++)
- order[2 * limit - n - 1] = order[n] = n % limit;
- memcpy(order + 2 * limit, order, 2 * limit * sizeof(*order));
- memcpy(order + 4 * limit, order, 4 * limit * sizeof(*order));
- igt_permute_array(order + 2 * limit, 6 * limit, igt_exchange_int);
-
- for (n = 0; n < 8 * limit; n++) {
- struct drm_i915_gem_execbuffer2 eb;
- igt_spin_t *spin, *plug;
- IGT_CORK_HANDLE(cork);
- double v[limit];
- int pmu[limit + 1];
- int bond = order[n];
-
- pmu[0] = -1;
- for (int i = 0; i < limit; i++)
- pmu[i] = add_pmu(i915, pmu[0], &siblings[i]);
- pmu[limit] = add_pmu(i915,
- pmu[0], &master_engines[bond]);
-
- igt_assert(siblings[bond].engine_class !=
- master_engines[bond].engine_class);
-
- plug = NULL;
- if (flags & CORK) {
- plug = __igt_spin_new(i915,
- .ctx_id = master,
- .engine = bond,
- .dependency = igt_cork_plug(&cork, i915));
- }
-
- spin = __igt_spin_new(i915,
- .ctx_id = master,
- .engine = bond,
- .flags = IGT_SPIN_FENCE_OUT);
-
- eb = spin->execbuf;
- eb.rsvd1 = ctx;
- eb.rsvd2 = spin->out_fence;
- eb.flags = I915_EXEC_FENCE_SUBMIT;
- gem_execbuf(i915, &eb);
-
- if (plug) {
- igt_cork_unplug(&cork);
- igt_spin_free(i915, plug);
- }
-
- measure_all_load(pmu[0], v, limit + 1, 10000);
- igt_spin_free(i915, spin);
-
- igt_assert_f(v[bond] > 0.90,
- "engine %d (class:instance %s:%d) was found to be only %.1f%% busy\n",
- bond,
- class_to_str(siblings[bond].engine_class),
- siblings[bond].engine_instance,
- 100 * v[bond]);
- for (int other = 0; other < limit; other++) {
- if (other == bond)
- continue;
-
- igt_assert_f(v[other] == 0,
- "engine %d (class:instance %s:%d) was not idle, and actually %.1f%% busy\n",
- other,
- class_to_str(siblings[other].engine_class),
- siblings[other].engine_instance,
- 100 * v[other]);
- }
- igt_assert_f(v[limit] > 0.90,
- "master (class:instance %s:%d) was found to be only %.1f%% busy\n",
- class_to_str(master_engines[bond].engine_class),
- master_engines[bond].engine_instance,
- 100 * v[limit]);
-
- close(pmu[0]);
- }
-
- free(order);
- gem_context_destroy(i915, master);
- gem_context_destroy(i915, ctx);
- free(master_engines);
- free(siblings);
- }
-}
-
#define VIRTUAL_ENGINE (1u << 0)
static unsigned int offset_in_page(void *addr)
@@ -1818,126 +1637,6 @@ static void indices(int i915)
gem_quiescent_gpu(i915);
}
-static void __bonded_early(int i915,
- const struct i915_engine_class_instance *siblings,
- unsigned int count,
- unsigned int flags)
-{
- I915_DEFINE_CONTEXT_ENGINES_BOND(bonds[count], 1);
- uint32_t handle = batch_create(i915);
- struct drm_i915_gem_exec_object2 batch = {
- .handle = handle,
- };
- struct drm_i915_gem_execbuffer2 execbuf = {
- .buffers_ptr = to_user_pointer(&batch),
- .buffer_count = 1,
- };
- uint32_t vm, ctx;
- igt_spin_t *spin;
-
- memset(bonds, 0, sizeof(bonds));
- for (int n = 0; n < ARRAY_SIZE(bonds); n++) {
- bonds[n].base.name = I915_CONTEXT_ENGINES_EXT_BOND;
- bonds[n].base.next_extension =
- n ? to_user_pointer(&bonds[n - 1]) : 0;
-
- bonds[n].master = siblings[n];
- bonds[n].num_bonds = 1;
- bonds[n].engines[0] = siblings[(n + 1) % count];
- }
-
- /* We share a VM so that the spin cancel will work without a reloc */
- vm = gem_vm_create(i915);
-
- ctx = gem_context_create(i915);
- set_vm(i915, ctx, vm);
- set_load_balancer(i915, ctx, siblings, count,
- flags & VIRTUAL_ENGINE ? &bonds : NULL);
-
- /* A: spin forever on engine 1 */
- spin = igt_spin_new(i915,
- .ctx_id = ctx,
- .engine = (flags & VIRTUAL_ENGINE) ? 0 : 1,
- .flags = IGT_SPIN_NO_PREEMPTION);
-
- /* B: runs after A on engine 1 */
- execbuf.rsvd1 = ctx;
- execbuf.flags = I915_EXEC_FENCE_OUT;
- execbuf.flags |= spin->execbuf.flags & 63;
- gem_execbuf_wr(i915, &execbuf);
-
- /* B': run in parallel with B on engine 2, i.e. not before A! */
- execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
- if(!(flags & VIRTUAL_ENGINE))
- execbuf.flags |= 2;
- execbuf.rsvd2 >>= 32;
- gem_execbuf_wr(i915, &execbuf);
-
- /* C: prevent anything running on engine 2 after B' */
- spin->execbuf.flags = execbuf.flags & 63;
- gem_execbuf(i915, &spin->execbuf);
-
- igt_debugfs_dump(i915, "i915_engine_info");
-
- /* D: cancel the spinner from engine 2 (new context) */
- gem_context_destroy(i915, ctx);
- ctx = gem_context_create(i915);
- set_vm(i915, ctx, vm);
- set_load_balancer(i915, ctx, siblings, count,
- flags & VIRTUAL_ENGINE ? &bonds : NULL);
- batch.handle = create_semaphore_to_spinner(i915, spin);
- execbuf.rsvd1 = ctx;
- execbuf.flags = 0;
- if(!(flags & VIRTUAL_ENGINE))
- execbuf.flags |= 2;
- gem_execbuf(i915, &execbuf);
- gem_close(i915, batch.handle);
-
- /* If C runs before D, we never cancel the spinner and so hang */
- gem_sync(i915, handle);
-
- /* Check the bonded pair completed successfully */
- igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
- igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 1);
-
- close(execbuf.rsvd2);
- close(execbuf.rsvd2 >> 32);
-
- gem_context_destroy(i915, ctx);
- gem_close(i915, handle);
- igt_spin_free(i915, spin);
-}
-
-static void bonded_early(int i915)
-{
- /*
- * Our goal is to start the bonded payloads at roughly the same time.
- * We do not want to start the secondary batch too early as it will
- * do nothing but hog the GPU until the first has a chance to execute.
- * So if we were to arbitrary delay the first by running it after a
- * spinner...
- *
- * By using a pair of spinners, we can create a bonded hog that when
- * set in motion will fully utilize both engines [if the scheduling is
- * incorrect]. We then use a third party submitted after the bonded
- * pair to cancel the spinner from the GPU -- if it is unable to run,
- * the spinner is never cancelled, and the bonded pair will cause a GPU
- * hang.
- */
-
- for (int class = 0; class < 32; class++) {
- struct i915_engine_class_instance *siblings;
- unsigned int count;
-
- siblings = list_engines(i915, 1u << class, &count);
- if (count > 1) {
- __bonded_early(i915, siblings, count, 0);
- __bonded_early(i915, siblings, count, VIRTUAL_ENGINE);
- }
- free(siblings);
- }
-}
-
static void busy(int i915)
{
uint32_t scratch = gem_create(i915, 4096);
@@ -3178,22 +2877,6 @@ static bool has_load_balancer(int i915)
return err == 0;
}
-static bool has_bonding(int i915)
-{
- I915_DEFINE_CONTEXT_ENGINES_BOND(bonds, 0) = {
- .base.name = I915_CONTEXT_ENGINES_EXT_BOND,
- };
- struct i915_engine_class_instance ci = {};
- uint32_t ctx;
- int err;
-
- ctx = gem_context_create(i915);
- err = __set_load_balancer(i915, ctx, &ci, 1, &bonds);
- gem_context_destroy(i915, ctx);
-
- return err == 0;
-}
-
igt_main
{
int i915 = -1;
@@ -3265,19 +2948,6 @@ igt_main
igt_subtest("smoke")
smoketest(i915, 20);
- igt_subtest_group {
- igt_fixture igt_require(has_bonding(i915));
-
- igt_subtest("bonded-imm")
- bonded(i915, 0);
-
- igt_subtest("bonded-cork")
- bonded(i915, CORK);
-
- igt_subtest("bonded-early")
- bonded_early(i915);
- }
-
igt_subtest("bonded-slice")
bonded_slice(i915);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 52/81] lib/intel_ctx: Add load balancing support (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (50 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 51/81] tests/i915/gem_exec_balancer: Drop bonded tests Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 53/81] tests/i915/gem_exec_balancer: Convert to intel_ctx_t Jason Ekstrand
` (31 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
We use the same convention as the load balancing tests and, when
balancing is requested, we make a context with N+1 engines where the
first one is the balanced engine and the others are physical engines.
v2 (Zbigniew Kempczyński):
- Don't initialize num_logical_engines
- Use igt_assert() instead of assert()
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
lib/i915/gem_engine_topology.c | 27 ++++++++++++++----
lib/intel_ctx.c | 50 +++++++++++++++++++++++++++++++---
lib/intel_ctx.h | 2 ++
3 files changed, 69 insertions(+), 10 deletions(-)
diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
index 50f4bde71..4bb7d21f6 100644
--- a/lib/i915/gem_engine_topology.c
+++ b/lib/i915/gem_engine_topology.c
@@ -312,12 +312,27 @@ intel_engine_list_for_ctx_cfg(int fd, const intel_ctx_cfg_t *cfg)
struct intel_engine_data engine_data = { };
int i;
- engine_data.nengines = cfg->num_engines;
- for (i = 0; i < cfg->num_engines; i++)
- init_engine(&engine_data.engines[i],
- cfg->engines[i].engine_class,
- cfg->engines[i].engine_instance,
- i);
+ if (cfg->load_balance) {
+ engine_data.nengines = cfg->num_engines + 1;
+
+ init_engine(&engine_data.engines[0],
+ I915_ENGINE_CLASS_INVALID,
+ I915_ENGINE_CLASS_INVALID_NONE,
+ 0);
+
+ for (i = 0; i < cfg->num_engines; i++)
+ init_engine(&engine_data.engines[i + 1],
+ cfg->engines[i].engine_class,
+ cfg->engines[i].engine_instance,
+ i + 1);
+ } else {
+ engine_data.nengines = cfg->num_engines;
+ for (i = 0; i < cfg->num_engines; i++)
+ init_engine(&engine_data.engines[i],
+ cfg->engines[i].engine_class,
+ cfg->engines[i].engine_instance,
+ i);
+ }
return engine_data;
} else {
diff --git a/lib/intel_ctx.c b/lib/intel_ctx.c
index 5ca8b4534..5495fa764 100644
--- a/lib/intel_ctx.c
+++ b/lib/intel_ctx.c
@@ -82,6 +82,7 @@ static int
__context_create_cfg(int fd, const intel_ctx_cfg_t *cfg, uint32_t *ctx_id)
{
uint64_t ext_root = 0;
+ I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(balance, GEM_MAX_ENGINES);
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, GEM_MAX_ENGINES);
struct drm_i915_gem_context_create_ext_setparam engines_param, vm_param;
struct drm_i915_gem_context_create_ext_setparam persist_param;
@@ -113,9 +114,39 @@ __context_create_cfg(int fd, const intel_ctx_cfg_t *cfg, uint32_t *ctx_id)
}
if (cfg->num_engines) {
+ unsigned num_logical_engines;
memset(&engines, 0, sizeof(engines));
- for (i = 0; i < cfg->num_engines; i++)
- engines.engines[i] = cfg->engines[i];
+
+ if (cfg->load_balance) {
+ memset(&balance, 0, sizeof(balance));
+
+ /* In this case, the first engine is the virtual
+ * balanced engine and the subsequent engines are
+ * the actual requested engines.
+ */
+ igt_assert(cfg->num_engines + 1 <= GEM_MAX_ENGINES);
+ num_logical_engines = cfg->num_engines + 1;
+
+ engines.engines[0].engine_class =
+ I915_ENGINE_CLASS_INVALID;
+ engines.engines[0].engine_instance =
+ I915_ENGINE_CLASS_INVALID_NONE;
+
+ balance.num_siblings = cfg->num_engines;
+ for (i = 0; i < cfg->num_engines; i++) {
+ igt_assert_eq(cfg->engines[0].engine_class,
+ cfg->engines[i].engine_class);
+ balance.engines[i] = cfg->engines[i];
+ engines.engines[i + 1] = cfg->engines[i];
+ }
+
+ engines.extensions = to_user_pointer(&balance);
+ } else {
+ igt_assert(cfg->num_engines <= GEM_MAX_ENGINES);
+ num_logical_engines = cfg->num_engines;
+ for (i = 0; i < cfg->num_engines; i++)
+ engines.engines[i] = cfg->engines[i];
+ }
engines_param = (struct drm_i915_gem_context_create_ext_setparam) {
.base = {
@@ -123,11 +154,13 @@ __context_create_cfg(int fd, const intel_ctx_cfg_t *cfg, uint32_t *ctx_id)
},
.param = {
.param = I915_CONTEXT_PARAM_ENGINES,
- .size = sizeof_param_engines(cfg->num_engines),
+ .size = sizeof_param_engines(num_logical_engines),
.value = to_user_pointer(&engines),
},
};
add_user_ext(&ext_root, &engines_param.base);
+ } else {
+ igt_assert(!cfg->load_balance);
}
return __gem_context_create_ext(fd, cfg->flags, ext_root, ctx_id);
@@ -259,7 +292,16 @@ void intel_ctx_destroy(int fd, const intel_ctx_t *ctx)
*/
unsigned int intel_ctx_engine_class(const intel_ctx_t *ctx, unsigned int engine)
{
- if (ctx->cfg.num_engines) {
+ if (ctx->cfg.load_balance) {
+ if (engine == 0) {
+ /* This is our virtual engine */
+ return ctx->cfg.engines[0].engine_class;
+ } else {
+ /* This is a physical engine */
+ igt_assert(engine - 1 < ctx->cfg.num_engines);
+ return ctx->cfg.engines[engine - 1].engine_class;
+ }
+ } else if (ctx->cfg.num_engines) {
igt_assert(engine < ctx->cfg.num_engines);
return ctx->cfg.engines[engine].engine_class;
} else {
diff --git a/lib/intel_ctx.h b/lib/intel_ctx.h
index e34cefc14..d4cb435a7 100644
--- a/lib/intel_ctx.h
+++ b/lib/intel_ctx.h
@@ -17,6 +17,7 @@
* @flags: Context create flags
* @vm: VM to inherit or 0 for using a per-context VM
* @nopersist: set I915_CONTEXT_PARAM_PERSISTENCE to 0
+ * @load_balance: True if the first engine should be a load balancing engine
* @num_engines: Number of client-specified engines or 0 for legacy mode
* @engines: Client-specified engines
*
@@ -44,6 +45,7 @@ typedef struct intel_ctx_cfg {
uint32_t flags;
uint32_t vm;
bool nopersist;
+ bool load_balance;
unsigned int num_engines;
struct i915_engine_class_instance engines[GEM_MAX_ENGINES];
} intel_ctx_cfg_t;
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 53/81] tests/i915/gem_exec_balancer: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (51 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 52/81] lib/intel_ctx: Add load balancing support (v2) Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 54/81] tests/i915/gem_exec_endless: Stop munging ctx0 engines Jason Ekstrand
` (30 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_balancer.c | 451 ++++++++++++++-------------------
1 file changed, 192 insertions(+), 259 deletions(-)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index e6f742310..952428298 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -54,12 +54,6 @@ static size_t sizeof_load_balance(int count)
engines[count]);
}
-static size_t sizeof_param_engines(int count)
-{
- return offsetof(struct i915_context_param_engines,
- engines[count]);
-}
-
#define alloca0(sz) ({ size_t sz__ = (sz); memset(alloca(sz__), 0, sz__); })
static bool has_class_instance(int i915, uint16_t class, uint16_t instance)
@@ -124,83 +118,35 @@ static bool has_perf_engines(int i915)
return i915_perf_type_id(i915);
}
-static int __set_engines(int i915, uint32_t ctx,
- const struct i915_engine_class_instance *ci,
- unsigned int count)
-{
- struct i915_context_param_engines *engines =
- alloca0(sizeof_param_engines(count));
- struct drm_i915_gem_context_param p = {
- .ctx_id = ctx,
- .param = I915_CONTEXT_PARAM_ENGINES,
- .size = sizeof_param_engines(count),
- .value = to_user_pointer(engines)
- };
-
- engines->extensions = 0;
- memcpy(engines->engines, ci, count * sizeof(*ci));
-
- return __gem_context_set_param(i915, &p);
-}
-
-static void set_engines(int i915, uint32_t ctx,
- const struct i915_engine_class_instance *ci,
- unsigned int count)
-{
- igt_assert_eq(__set_engines(i915, ctx, ci, count), 0);
-}
-
-static int __set_load_balancer(int i915, uint32_t ctx,
- const struct i915_engine_class_instance *ci,
- unsigned int count,
- void *ext)
+static intel_ctx_cfg_t
+ctx_cfg_for_engines(const struct i915_engine_class_instance *ci,
+ unsigned int count)
{
- struct i915_context_engines_load_balance *balancer =
- alloca0(sizeof_load_balance(count));
- struct i915_context_param_engines *engines =
- alloca0(sizeof_param_engines(count + 1));
- struct drm_i915_gem_context_param p = {
- .ctx_id = ctx,
- .param = I915_CONTEXT_PARAM_ENGINES,
- .size = sizeof_param_engines(count + 1),
- .value = to_user_pointer(engines)
- };
-
- balancer->base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
- balancer->base.next_extension = to_user_pointer(ext);
-
- igt_assert(count);
- balancer->num_siblings = count;
- memcpy(balancer->engines, ci, count * sizeof(*ci));
+ intel_ctx_cfg_t cfg = { };
+ unsigned int i;
- engines->extensions = to_user_pointer(balancer);
- engines->engines[0].engine_class =
- I915_ENGINE_CLASS_INVALID;
- engines->engines[0].engine_instance =
- I915_ENGINE_CLASS_INVALID_NONE;
- memcpy(engines->engines + 1, ci, count * sizeof(*ci));
+ for (i = 0; i < count; i++)
+ cfg.engines[i] = ci[i];
+ cfg.num_engines = count;
- return __gem_context_set_param(i915, &p);
+ return cfg;
}
-static void set_load_balancer(int i915, uint32_t ctx,
- const struct i915_engine_class_instance *ci,
- unsigned int count,
- void *ext)
+static const intel_ctx_t *
+ctx_create_engines(int i915, const struct i915_engine_class_instance *ci,
+ unsigned int count)
{
- igt_assert_eq(__set_load_balancer(i915, ctx, ci, count, ext), 0);
+ intel_ctx_cfg_t cfg = ctx_cfg_for_engines(ci, count);
+ return intel_ctx_create(i915, &cfg);
}
-static uint32_t load_balancer_create(int i915,
- const struct i915_engine_class_instance *ci,
- unsigned int count)
+static const intel_ctx_t *
+ctx_create_balanced(int i915, const struct i915_engine_class_instance *ci,
+ unsigned int count)
{
- uint32_t ctx;
-
- ctx = gem_context_create(i915);
- set_load_balancer(i915, ctx, ci, count, NULL);
-
- return ctx;
+ intel_ctx_cfg_t cfg = ctx_cfg_for_engines(ci, count);
+ cfg.load_balance = true;
+ return intel_ctx_create(i915, &cfg);
}
static uint32_t __batch_create(int i915, uint32_t offset)
@@ -495,7 +441,7 @@ static const char *class_to_str(int class)
}
static void check_individual_engine(int i915,
- uint32_t ctx,
+ const intel_ctx_t *ctx,
const struct i915_engine_class_instance *ci,
int idx)
{
@@ -507,7 +453,7 @@ static void check_individual_engine(int i915,
I915_PMU_ENGINE_BUSY(ci[idx].engine_class,
ci[idx].engine_instance));
- spin = igt_spin_new(i915, .ctx_id = ctx, .engine = idx + 1);
+ spin = igt_spin_new(i915, .ctx = ctx, .engine = idx + 1);
load = measure_load(pmu, 10000);
igt_spin_free(i915, spin);
@@ -537,15 +483,14 @@ static void individual(int i915)
continue;
for (int pass = 0; pass < count; pass++) { /* approx. count! */
- uint32_t ctx;
+ const intel_ctx_t *ctx;
igt_assert(sizeof(*ci) == sizeof(int));
igt_permute_array(ci, count, igt_exchange_int);
- ctx = gem_context_create(i915);
- set_load_balancer(i915, ctx, ci, count, NULL);
+ ctx = ctx_create_balanced(i915, ci, count);
for (unsigned int n = 0; n < count; n++)
check_individual_engine(i915, ctx, ci, n);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
free(ci);
@@ -613,7 +558,7 @@ static void bonded_slice(int i915)
struct drm_i915_gem_exec_object2 obj[3] = {};
struct drm_i915_gem_execbuffer2 eb = {};
unsigned int count;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
igt_spin_t *spin;
siblings = list_engines(i915, 1u << class, &count);
@@ -637,11 +582,10 @@ static void bonded_slice(int i915)
* XXX add explicit bonding options for A->B
*/
- ctx = gem_context_create(i915); /* NB timeline per engine */
- set_load_balancer(i915, ctx, siblings, count, NULL);
+ ctx = ctx_create_balanced(i915, siblings, count);
spin = __igt_spin_new(i915,
- .ctx_id = ctx,
+ .ctx = ctx,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN));
igt_spin_end(spin); /* we just want its address for later */
@@ -655,17 +599,17 @@ static void bonded_slice(int i915)
obj[2].handle = create_semaphore_to_spinner(i915, spin);
eb.buffers_ptr = to_user_pointer(obj);
- eb.rsvd1 = ctx;
+ eb.rsvd1 = ctx->id;
*stop = 0;
igt_fork(child, count + 1) { /* C: arbitrary background load */
igt_list_del(&spin->link);
- ctx = load_balancer_create(i915, siblings, count);
+ ctx = ctx_create_balanced(i915, siblings, count);
while (!READ_ONCE(*stop)) {
spin = igt_spin_new(i915,
- .ctx_id = ctx,
+ .ctx = ctx,
.engine = (1 + rand() % count),
.flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin);
@@ -673,7 +617,7 @@ static void bonded_slice(int i915)
igt_spin_free(i915, spin);
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
igt_until_timeout(5) {
@@ -699,7 +643,7 @@ static void bonded_slice(int i915)
gem_close(i915, obj[2].handle);
igt_spin_free(i915, spin);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
munmap(stop, 4096);
@@ -720,15 +664,14 @@ static void __bonded_chain(int i915,
igt_spin_t *spin;
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/* A: spin forever on engine 1 */
- ctx = gem_context_create(i915);
- set_load_balancer(i915, ctx, siblings, count, NULL);
+ ctx = ctx_create_balanced(i915, siblings, count);
if (priorities[i] < 0)
- gem_context_set_priority(i915, ctx, priorities[i]);
+ gem_context_set_priority(i915, ctx->id, priorities[i]);
spin = igt_spin_new(i915,
- .ctx_id = ctx,
+ .ctx = ctx,
.engine = 1,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
@@ -742,11 +685,10 @@ static void __bonded_chain(int i915,
*/
/* B: waits for A on engine 2 */
- gem_context_destroy(i915, ctx);
- ctx = gem_context_create(i915);
- gem_context_set_priority(i915, ctx, 0);
- set_load_balancer(i915, ctx, siblings, count, NULL);
- execbuf.rsvd1 = ctx;
+ intel_ctx_destroy(i915, ctx);
+ ctx = ctx_create_balanced(i915, siblings, count);
+ gem_context_set_priority(i915, ctx->id, 0);
+ execbuf.rsvd1 = ctx->id;
execbuf.rsvd2 = spin->out_fence;
execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
execbuf.flags |= 2; /* opposite engine to spinner */
@@ -754,12 +696,12 @@ static void __bonded_chain(int i915,
/* B': run in parallel with B on engine 1, i.e. not before A! */
if (priorities[i] > 0)
- gem_context_set_priority(i915, ctx, priorities[i]);
+ gem_context_set_priority(i915, ctx->id, priorities[i]);
execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.flags |= 1; /* same engine as spinner */
execbuf.rsvd2 >>= 32;
gem_execbuf_wr(i915, &execbuf);
- gem_context_set_priority(i915, ctx, 0);
+ gem_context_set_priority(i915, ctx->id, 0);
/* Wait for any magic timeslicing or preemptions... */
igt_assert_eq(sync_fence_wait(execbuf.rsvd2 >> 32, 1000),
@@ -776,7 +718,7 @@ static void __bonded_chain(int i915,
igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 0);
igt_spin_free(i915, spin);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
gem_sync(i915, batch.handle);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
@@ -804,26 +746,24 @@ static void __bonded_chain_inv(int i915,
igt_spin_t *spin;
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/* A: spin forever on engine 1 */
- ctx = gem_context_create(i915);
- set_load_balancer(i915, ctx, siblings, count, NULL);
+ ctx = ctx_create_balanced(i915, siblings, count);
if (priorities[i] < 0)
- gem_context_set_priority(i915, ctx, priorities[i]);
+ gem_context_set_priority(i915, ctx->id, priorities[i]);
spin = igt_spin_new(i915,
- .ctx_id = ctx,
+ .ctx = ctx,
.engine = 1,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin);
/* B: waits for A on engine 1 */
- gem_context_destroy(i915, ctx);
- ctx = gem_context_create(i915);
- gem_context_set_priority(i915, ctx, 0);
- set_load_balancer(i915, ctx, siblings, count, NULL);
- execbuf.rsvd1 = ctx;
+ intel_ctx_destroy(i915, ctx);
+ ctx = ctx_create_balanced(i915, siblings, count);
+ gem_context_set_priority(i915, ctx->id, 0);
+ execbuf.rsvd1 = ctx->id;
execbuf.rsvd2 = spin->out_fence;
execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
execbuf.flags |= 1; /* same engine as spinner */
@@ -831,12 +771,12 @@ static void __bonded_chain_inv(int i915,
/* B': run in parallel with B on engine 2, i.e. not before A! */
if (priorities[i] > 0)
- gem_context_set_priority(i915, ctx, priorities[i]);
+ gem_context_set_priority(i915, ctx->id, priorities[i]);
execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.flags |= 2; /* opposite engine to spinner */
execbuf.rsvd2 >>= 32;
gem_execbuf_wr(i915, &execbuf);
- gem_context_set_priority(i915, ctx, 0);
+ gem_context_set_priority(i915, ctx->id, 0);
/* Wait for any magic timeslicing or preemptions... */
igt_assert_eq(sync_fence_wait(execbuf.rsvd2 >> 32, 1000),
@@ -854,7 +794,7 @@ static void __bonded_chain_inv(int i915,
igt_spin_free(i915, spin);
gem_sync(i915, batch.handle);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 1);
@@ -902,10 +842,10 @@ static void __bonded_sema(int i915,
igt_spin_t *spin;
for (int i = 0; i < ARRAY_SIZE(priorities); i++) {
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/* A: spin forever on seperate render engine */
- spin = igt_spin_new(i915,
+ spin = igt_spin_new(i915, .ctx = intel_ctx_0(i915),
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin);
@@ -918,26 +858,24 @@ static void __bonded_sema(int i915,
*/
/* B: waits for A (using a semaphore) on engine 1 */
- ctx = gem_context_create(i915);
- set_load_balancer(i915, ctx, siblings, count, NULL);
- execbuf.rsvd1 = ctx;
+ ctx = ctx_create_balanced(i915, siblings, count);
+ execbuf.rsvd1 = ctx->id;
execbuf.rsvd2 = spin->out_fence;
execbuf.flags = I915_EXEC_FENCE_IN | I915_EXEC_FENCE_OUT;
execbuf.flags |= 1;
gem_execbuf_wr(i915, &execbuf);
/* B': run in parallel with B on engine 2 */
- gem_context_destroy(i915, ctx);
- ctx = gem_context_create(i915);
+ intel_ctx_destroy(i915, ctx);
+ ctx = ctx_create_balanced(i915, siblings, count);
if (priorities[i] > 0)
- gem_context_set_priority(i915, ctx, priorities[i]);
- set_load_balancer(i915, ctx, siblings, count, NULL);
- execbuf.rsvd1 = ctx;
+ gem_context_set_priority(i915, ctx->id, priorities[i]);
+ execbuf.rsvd1 = ctx->id;
execbuf.flags = I915_EXEC_FENCE_SUBMIT | I915_EXEC_FENCE_OUT;
execbuf.flags |= 2;
execbuf.rsvd2 >>= 32;
gem_execbuf_wr(i915, &execbuf);
- gem_context_set_priority(i915, ctx, 0);
+ gem_context_set_priority(i915, ctx->id, 0);
/* Wait for any magic timeslicing or preemptions... */
igt_assert_eq(sync_fence_wait(execbuf.rsvd2 >> 32, 1000),
@@ -955,7 +893,7 @@ static void __bonded_sema(int i915,
igt_spin_free(i915, spin);
gem_sync(i915, batch.handle);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 & 0xffffffff), 1);
igt_assert_eq(sync_fence_status(execbuf.rsvd2 >> 32), 1);
@@ -1008,7 +946,7 @@ static void __bonded_pair(int i915,
unsigned int spinner;
igt_spin_t *a;
int timeline;
- uint32_t A;
+ const intel_ctx_t *A;
srandom(getpid());
@@ -1016,9 +954,8 @@ static void __bonded_pair(int i915,
if (flags & B_HOSTILE)
spinner |= IGT_SPIN_NO_PREEMPTION;
- A = gem_context_create(i915);
- set_load_balancer(i915, A, siblings, count, NULL);
- a = igt_spin_new(i915, A, .flags = spinner);
+ A = ctx_create_balanced(i915, siblings, count);
+ a = igt_spin_new(i915, .ctx = A, .flags = spinner);
igt_spin_end(a);
gem_sync(i915, a->handle);
@@ -1071,7 +1008,7 @@ static void __bonded_pair(int i915,
close(timeline);
igt_spin_free(i915, a);
- gem_context_destroy(i915, A);
+ intel_ctx_destroy(i915, A);
*out = cycles;
}
@@ -1091,7 +1028,7 @@ static void __bonded_dual(int i915,
unsigned int spinner;
igt_spin_t *a, *b;
int timeline;
- uint32_t A, B;
+ const intel_ctx_t *A, *B;
srandom(getpid());
@@ -1099,15 +1036,13 @@ static void __bonded_dual(int i915,
if (flags & B_HOSTILE)
spinner |= IGT_SPIN_NO_PREEMPTION;
- A = gem_context_create(i915);
- set_load_balancer(i915, A, siblings, count, NULL);
- a = igt_spin_new(i915, A, .flags = spinner);
+ A = ctx_create_balanced(i915, siblings, count);
+ a = igt_spin_new(i915, .ctx = A, .flags = spinner);
igt_spin_end(a);
gem_sync(i915, a->handle);
- B = gem_context_create(i915);
- set_load_balancer(i915, B, siblings, count, NULL);
- b = igt_spin_new(i915, B, .flags = spinner);
+ B = ctx_create_balanced(i915, siblings, count);
+ b = igt_spin_new(i915, .ctx = B, .flags = spinner);
igt_spin_end(b);
gem_sync(i915, b->handle);
@@ -1186,8 +1121,8 @@ static void __bonded_dual(int i915,
igt_spin_free(i915, a);
igt_spin_free(i915, b);
- gem_context_destroy(i915, A);
- gem_context_destroy(i915, B);
+ intel_ctx_destroy(i915, A);
+ intel_ctx_destroy(i915, B);
*out = cycles;
}
@@ -1293,6 +1228,7 @@ static void __bonded_sync(int i915,
unsigned long *out)
{
const uint64_t A = 0 << 12, B = 1 << 12;
+ const intel_ctx_t *ctx = ctx_create_balanced(i915, siblings, count);
struct drm_i915_gem_exec_object2 obj[2] = { {
.handle = sync_to(i915, A, B),
.offset = A,
@@ -1305,7 +1241,7 @@ static void __bonded_sync(int i915,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(obj),
.buffer_count = 2,
- .rsvd1 = gem_context_create(i915),
+ .rsvd1 = ctx->id,
};
unsigned long cycles = 0;
@@ -1314,7 +1250,6 @@ static void __bonded_sync(int i915,
if (!(flags & B_HOSTILE)) /* always non-preemptible */
goto out;
- set_load_balancer(i915, execbuf.rsvd1, siblings, count, NULL);
disable_preparser(i915, execbuf.rsvd1);
srandom(getpid());
@@ -1371,7 +1306,7 @@ out:
close(timeline);
gem_close(i915, obj[0].handle);
gem_close(i915, obj[1].handle);
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, ctx);
*out = cycles;
}
@@ -1459,7 +1394,7 @@ bonded_runner(int i915,
munmap(cycles, 4096);
}
-static void __bonded_nohang(int i915, uint32_t ctx,
+static void __bonded_nohang(int i915, const intel_ctx_t *ctx,
const struct i915_engine_class_instance *siblings,
unsigned int count,
unsigned int flags)
@@ -1471,16 +1406,15 @@ static void __bonded_nohang(int i915, uint32_t ctx,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
igt_spin_t *time, *spin;
- uint32_t load;
+ const intel_ctx_t *load;
- load = gem_context_create(i915);
- gem_context_set_priority(i915, load, 1023);
- set_load_balancer(i915, load, siblings, count, NULL);
+ load = ctx_create_balanced(i915, siblings, count);
+ gem_context_set_priority(i915, load->id, 1023);
- spin = igt_spin_new(i915, load, .engine = 1);
+ spin = igt_spin_new(i915, .ctx = load, .engine = 1);
/* Master on engine 1, stuck behind a spinner */
execbuf.flags = 1 | I915_EXEC_FENCE_OUT;
@@ -1500,7 +1434,7 @@ static void __bonded_nohang(int i915, uint32_t ctx,
if (flags & NOHANG) {
/* Keep replacing spin, so that it doesn't hang */
- next = igt_spin_new(i915, load, .engine = 1);
+ next = igt_spin_new(i915, .ctx = load, .engine = 1);
igt_spin_free(i915, spin);
spin = next;
}
@@ -1518,13 +1452,13 @@ static void __bonded_nohang(int i915, uint32_t ctx,
close(execbuf.rsvd2);
close(execbuf.rsvd2 >> 32);
- gem_context_destroy(i915, load);
+ intel_ctx_destroy(i915, load);
gem_close(i915, batch.handle);
}
static void bonded_nohang(int i915, unsigned int flags)
{
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/*
* We try and trick ourselves into declaring a bonded request as
@@ -1533,7 +1467,7 @@ static void bonded_nohang(int i915, unsigned int flags)
igt_require(gem_scheduler_has_semaphores(i915));
- ctx = gem_context_create(i915);
+ ctx = intel_ctx_create(i915, NULL);
for (int class = 1; class < 32; class++) {
struct i915_engine_class_instance *siblings;
@@ -1545,7 +1479,7 @@ static void bonded_nohang(int i915, unsigned int flags)
free(siblings);
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
static void indices(int i915)
@@ -1661,20 +1595,20 @@ static void busy(int i915)
struct i915_engine_class_instance *ci;
unsigned int count;
igt_spin_t *spin[2];
- uint32_t ctx;
+ const intel_ctx_t *ctx;
ci = list_engines(i915, 1u << class, &count);
if (!ci)
continue;
- ctx = load_balancer_create(i915, ci, count);
+ ctx = ctx_create_balanced(i915, ci, count);
free(ci);
spin[0] = __igt_spin_new(i915,
- .ctx_id = ctx,
+ .ctx = ctx,
.flags = IGT_SPIN_POLL_RUN);
spin[1] = __igt_spin_new(i915,
- .ctx_id = ctx,
+ .ctx = ctx,
.dependency = scratch);
igt_spin_busywait_until_started(spin[0]);
@@ -1698,7 +1632,7 @@ static void busy(int i915)
igt_spin_free(i915, spin[1]);
igt_spin_free(i915, spin[0]);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
gem_close(i915, scratch);
@@ -1751,7 +1685,7 @@ static void full(int i915, unsigned int flags)
pmu[0] = -1;
for (unsigned int n = 0; n < count; n++) {
- uint32_t ctx;
+ const intel_ctx_t *ctx;
pmu[n] = add_pmu(i915, pmu[0], &ci[n]);
@@ -1770,22 +1704,22 @@ static void full(int i915, unsigned int flags)
* otherwise they will just sit in the single queue
* and not run concurrently.
*/
- ctx = load_balancer_create(i915, ci, count);
+ ctx = ctx_create_balanced(i915, ci, count);
if (spin == NULL) {
- spin = __igt_spin_new(i915, .ctx_id = ctx);
+ spin = __igt_spin_new(i915, .ctx = ctx);
} else {
struct drm_i915_gem_execbuffer2 eb = {
.buffers_ptr = spin->execbuf.buffers_ptr,
.buffer_count = spin->execbuf.buffer_count,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
.rsvd2 = fence,
.flags = flags & LATE ? I915_EXEC_FENCE_IN : 0,
};
gem_execbuf(i915, &eb);
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
if (flags & LATE) {
@@ -1812,17 +1746,17 @@ static void full(int i915, unsigned int flags)
}
static void __sliced(int i915,
- uint32_t ctx, unsigned int count,
+ const intel_ctx_t *ctx, unsigned int count,
unsigned int flags)
{
igt_spin_t *load[count];
igt_spin_t *virtual;
- virtual = igt_spin_new(i915, ctx, .engine = 0,
+ virtual = igt_spin_new(i915, .ctx = ctx, .engine = 0,
.flags = (IGT_SPIN_FENCE_OUT |
IGT_SPIN_POLL_RUN));
for (int i = 0; i < count; i++)
- load[i] = __igt_spin_new(i915, ctx,
+ load[i] = __igt_spin_new(i915, .ctx = ctx,
.engine = i + 1,
.fence = virtual->out_fence,
.flags = flags);
@@ -1873,7 +1807,9 @@ static void sliced(int i915)
}
igt_fork(child, count) {
- uint32_t ctx = load_balancer_create(i915, ci, count);
+ const intel_ctx_t *ctx;
+
+ ctx = ctx_create_balanced(i915, ci, count);
/* Independent load */
__sliced(i915, ctx, count, 0);
@@ -1881,7 +1817,7 @@ static void sliced(int i915)
/* Dependent load */
__sliced(i915, ctx, count, IGT_SPIN_FENCE_IN);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
igt_waitchildren();
@@ -1891,23 +1827,23 @@ static void sliced(int i915)
gem_quiescent_gpu(i915);
}
-static void __hog(int i915, uint32_t ctx, unsigned int count)
+static void __hog(int i915, const intel_ctx_t *ctx, unsigned int count)
{
int64_t timeout = 50 * 1000 * 1000; /* 50ms */
igt_spin_t *virtual;
igt_spin_t *hog;
- virtual = igt_spin_new(i915, ctx, .engine = 0);
+ virtual = igt_spin_new(i915, .ctx = ctx, .engine = 0);
for (int i = 0; i < count; i++)
gem_execbuf(i915, &virtual->execbuf);
usleep(50 * 1000); /* 50ms, long enough to spread across all engines */
- gem_context_set_priority(i915, ctx, 1023);
- hog = __igt_spin_new(i915, ctx,
+ gem_context_set_priority(i915, ctx->id, 1023);
+ hog = __igt_spin_new(i915, .ctx = ctx,
.engine = 1 + (random() % count),
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_NO_PREEMPTION));
- gem_context_set_priority(i915, ctx, 0);
+ gem_context_set_priority(i915, ctx->id, 0);
/* No matter which engine we choose, we'll have interrupted someone */
igt_spin_busywait_until_started(hog);
@@ -1937,7 +1873,7 @@ static void hog(int i915)
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *ci;
unsigned int count;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
ci = list_engines(i915, 1u << class, &count);
if (!ci)
@@ -1948,11 +1884,11 @@ static void hog(int i915)
continue;
}
- ctx = load_balancer_create(i915, ci, count);
+ ctx = ctx_create_balanced(i915, ci, count);
__hog(i915, ctx, count);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_waitchildren();
free(ci);
@@ -2000,6 +1936,7 @@ static int __execbuf(int i915, struct drm_i915_gem_execbuffer2 *execbuf)
static uint32_t *sema(int i915, struct i915_engine_class_instance *ci,
unsigned int count)
{
+ const intel_ctx_t *ctx = ctx_create_balanced(i915, ci, count);
uint32_t *ctl;
struct drm_i915_gem_exec_object2 batch = {
.handle = sema_create(i915, 64 << 20, &ctl),
@@ -2009,7 +1946,7 @@ static uint32_t *sema(int i915, struct i915_engine_class_instance *ci,
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = load_balancer_create(i915, ci, count),
+ .rsvd1 = ctx->id,
};
for (int n = 1; n <= 32; n++) {
@@ -2023,7 +1960,7 @@ static uint32_t *sema(int i915, struct i915_engine_class_instance *ci,
gem_wait(i915, batch.handle, &poll);
}
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, ctx);
igt_assert(gem_bo_busy(i915, batch.handle));
gem_close(i915, batch.handle);
@@ -2076,14 +2013,9 @@ static void waits(int i915, int timeout)
if (!ci)
continue;
- if (count > 1) {
- uint32_t ctx = load_balancer_create(i915, ci, count);
-
+ if (count > 1)
__waits(i915, timeout, ci, count);
- gem_context_destroy(i915, ctx);
- }
-
free(ci);
}
@@ -2102,20 +2034,20 @@ static void nop(int i915)
for (int class = 0; class < 32; class++) {
struct i915_engine_class_instance *ci;
unsigned int count;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
ci = list_engines(i915, 1u << class, &count);
if (!ci)
continue;
- ctx = load_balancer_create(i915, ci, count);
+ ctx = ctx_create_balanced(i915, ci, count);
for (int n = 0; n < count; n++) {
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
.flags = n + 1,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
struct timespec tv = {};
unsigned long nops;
@@ -2138,7 +2070,7 @@ static void nop(int i915)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
struct timespec tv = {};
unsigned long nops;
@@ -2159,11 +2091,13 @@ static void nop(int i915)
igt_fork(child, count) {
+ const intel_ctx_t *child_ctx =
+ ctx_create_balanced(i915, ci, count);
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
.flags = child + 1,
- .rsvd1 = load_balancer_create(i915, ci, count),
+ .rsvd1 = child_ctx->id,
};
struct timespec tv = {};
unsigned long nops;
@@ -2198,12 +2132,12 @@ static void nop(int i915)
igt_info("[%d] %s:* %.3fus\n",
child, class_to_str(class), t);
- gem_context_destroy(i915, execbuf.rsvd1);
+ intel_ctx_destroy(i915, child_ctx);
}
igt_waitchildren();
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
free(ci);
}
@@ -2228,7 +2162,7 @@ static void sequential(int i915)
unsigned int count;
unsigned long nops;
double t;
- uint32_t *ctx;
+ const intel_ctx_t **ctx;
ci = list_engines(i915, 1u << class, &count);
if (!ci || count < 2)
@@ -2236,7 +2170,7 @@ static void sequential(int i915)
ctx = malloc(sizeof(*ctx) * count);
for (int n = 0; n < count; n++)
- ctx[n] = load_balancer_create(i915, ci, count);
+ ctx[n] = ctx_create_balanced(i915, ci, count);
gem_execbuf_wr(i915, &execbuf);
execbuf.rsvd2 >>= 32;
@@ -2247,7 +2181,7 @@ static void sequential(int i915)
igt_nsec_elapsed(&tv);
do {
for (int n = 0; n < count; n++) {
- execbuf.rsvd1 = ctx[n];
+ execbuf.rsvd1 = ctx[n]->id;
gem_execbuf_wr(i915, &execbuf);
close(execbuf.rsvd2);
execbuf.rsvd2 >>= 32;
@@ -2261,7 +2195,7 @@ static void sequential(int i915)
close(execbuf.rsvd2);
for (int n = 0; n < count; n++)
- gem_context_destroy(i915, ctx[n]);
+ intel_ctx_destroy(i915, ctx[n]);
free(ctx);
next:
free(ci);
@@ -2271,7 +2205,7 @@ next:
gem_quiescent_gpu(i915);
}
-static void ping(int i915, uint32_t ctx, unsigned int engine)
+static void ping(int i915, const intel_ctx_t *ctx, unsigned int engine)
{
struct drm_i915_gem_exec_object2 obj = {
.handle = batch_create(i915),
@@ -2280,7 +2214,7 @@ static void ping(int i915, uint32_t ctx, unsigned int engine)
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
.flags = engine,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
gem_execbuf(i915, &execbuf);
gem_sync(i915, obj.handle);
@@ -2304,7 +2238,7 @@ static void semaphore(int i915)
for (int class = 1; class < 32; class++) {
struct i915_engine_class_instance *ci;
unsigned int count;
- uint32_t block[2], vip;
+ const intel_ctx_t *block[2], *vip;
ci = list_engines(i915, 1u << class, &count);
if (!ci)
@@ -2317,10 +2251,9 @@ static void semaphore(int i915)
count = ARRAY_SIZE(block);
for (int i = 0; i < count; i++) {
- block[i] = gem_context_create(i915);
- set_load_balancer(i915, block[i], ci, count, NULL);
+ block[i] = ctx_create_balanced(i915, ci, count);
spin[i] = __igt_spin_new(i915,
- .ctx_id = block[i],
+ .ctx = block[i],
.dependency = scratch);
}
@@ -2328,14 +2261,13 @@ static void semaphore(int i915)
* Either we haven't blocked both engines with semaphores,
* or we let the vip through. If not, we hang.
*/
- vip = gem_context_create(i915);
- set_load_balancer(i915, vip, ci, count, NULL);
+ vip = ctx_create_balanced(i915, ci, count);
ping(i915, vip, 0);
- gem_context_destroy(i915, vip);
+ intel_ctx_destroy(i915, vip);
for (int i = 0; i < count; i++) {
igt_spin_free(i915, spin[i]);
- gem_context_destroy(i915, block[i]);
+ intel_ctx_destroy(i915, block[i]);
}
free(ci);
@@ -2374,7 +2306,7 @@ static void hangme(int i915)
igt_spin_t *spin[2];
} *client;
unsigned int count;
- uint32_t bg;
+ const intel_ctx_t *bg;
int fence;
ci = list_engines(i915, 1u << class, &count);
@@ -2391,12 +2323,12 @@ static void hangme(int i915)
fence = igt_cork_plug(&cork, i915);
for (int i = 0; i < count; i++) {
- uint32_t ctx = gem_context_create(i915);
+ const intel_ctx_t *ctx;
struct client *c = &client[i];
unsigned int flags;
- set_unbannable(i915, ctx);
- set_load_balancer(i915, ctx, ci, count, NULL);
+ ctx = ctx_create_balanced(i915, ci, count);
+ set_unbannable(i915, ctx->id);
flags = IGT_SPIN_FENCE_IN |
IGT_SPIN_FENCE_OUT |
@@ -2404,31 +2336,30 @@ static void hangme(int i915)
if (!gem_has_cmdparser(i915, ALL_ENGINES))
flags |= IGT_SPIN_INVALID_CS;
for (int j = 0; j < ARRAY_SIZE(c->spin); j++) {
- c->spin[j] = __igt_spin_new(i915, ctx,
+ c->spin[j] = __igt_spin_new(i915, .ctx = ctx,
.fence = fence,
.flags = flags);
flags = IGT_SPIN_FENCE_OUT;
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
}
close(fence);
igt_cork_unplug(&cork); /* queue all hangs en masse */
/* Apply some background context to speed up hang detection */
- bg = gem_context_create(i915);
- set_engines(i915, bg, ci, count);
- gem_context_set_priority(i915, bg, 1023);
+ bg = ctx_create_engines(i915, ci, count);
+ gem_context_set_priority(i915, bg->id, 1023);
for (int i = 0; i < count; i++) {
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
.flags = i,
- .rsvd1 = bg,
+ .rsvd1 = bg->id,
};
gem_execbuf(i915, &execbuf);
}
- gem_context_destroy(i915, bg);
+ intel_ctx_destroy(i915, bg);
for (int i = 0; i < count; i++) {
struct client *c = &client[i];
@@ -2464,8 +2395,8 @@ static void smoketest(int i915, int timeout)
struct drm_i915_gem_exec_object2 batch[2] = {
{ .handle = __batch_create(i915, 16380) }
};
- unsigned int ncontext = 0;
- uint32_t *contexts = NULL;
+ unsigned int nctx = 0;
+ const intel_ctx_t **ctx = NULL;
uint32_t *handles = NULL;
igt_require_sw_sync();
@@ -2480,35 +2411,35 @@ static void smoketest(int i915, int timeout)
continue;
}
- ncontext += 128;
- contexts = realloc(contexts, sizeof(*contexts) * ncontext);
- igt_assert(contexts);
+ nctx += 128;
+ ctx = realloc(ctx, sizeof(*ctx) * nctx);
+ igt_assert(ctx);
- for (unsigned int n = ncontext - 128; n < ncontext; n++) {
- contexts[n] = load_balancer_create(i915, ci, count);
- igt_assert(contexts[n]);
+ for (unsigned int n = nctx - 128; n < nctx; n++) {
+ ctx[n] = ctx_create_balanced(i915, ci, count);
+ igt_assert(ctx[n]);
}
free(ci);
}
- if (!ncontext) /* suppress the fluctuating status of shard-icl */
+ if (!nctx) /* suppress the fluctuating status of shard-icl */
return;
- igt_debug("Created %d virtual engines (one per context)\n", ncontext);
- contexts = realloc(contexts, sizeof(*contexts) * ncontext * 4);
- igt_assert(contexts);
- memcpy(contexts + ncontext, contexts, ncontext * sizeof(*contexts));
- ncontext *= 2;
- memcpy(contexts + ncontext, contexts, ncontext * sizeof(*contexts));
- ncontext *= 2;
+ igt_debug("Created %d virtual engines (one per context)\n", nctx);
+ ctx = realloc(ctx, sizeof(*ctx) * nctx * 4);
+ igt_assert(ctx);
+ memcpy(ctx + nctx, ctx, nctx * sizeof(*ctx));
+ nctx *= 2;
+ memcpy(ctx + nctx, ctx, nctx * sizeof(*ctx));
+ nctx *= 2;
- handles = malloc(sizeof(*handles) * ncontext);
+ handles = malloc(sizeof(*handles) * nctx);
igt_assert(handles);
- for (unsigned int n = 0; n < ncontext; n++)
+ for (unsigned int n = 0; n < nctx; n++)
handles[n] = gem_create(i915, 4096);
igt_until_timeout(timeout) {
- unsigned int count = 1 + (rand() % (ncontext - 1));
+ unsigned int count = 1 + (rand() % (nctx - 1));
IGT_CORK_FENCE(cork);
int fence = igt_cork_plug(&cork, i915);
@@ -2516,7 +2447,7 @@ static void smoketest(int i915, int timeout)
struct drm_i915_gem_execbuffer2 eb = {
.buffers_ptr = to_user_pointer(batch),
.buffer_count = ARRAY_SIZE(batch),
- .rsvd1 = contexts[n],
+ .rsvd1 = ctx[n]->id,
.rsvd2 = fence,
.flags = I915_EXEC_BATCH_FIRST | I915_EXEC_FENCE_IN,
};
@@ -2532,16 +2463,16 @@ static void smoketest(int i915, int timeout)
close(fence);
}
- for (unsigned int n = 0; n < ncontext; n++) {
+ for (unsigned int n = 0; n < nctx / 4; n++) {
gem_close(i915, handles[n]);
- __gem_context_destroy(i915, contexts[n]);
+ intel_ctx_destroy(i915, ctx[n]);
}
free(handles);
- free(contexts);
+ free(ctx);
gem_close(i915, batch[0].handle);
}
-static uint32_t read_ctx_timestamp(int i915, uint32_t ctx)
+static uint32_t read_ctx_timestamp(int i915, const intel_ctx_t *ctx)
{
struct drm_i915_gem_relocation_entry reloc;
struct drm_i915_gem_exec_object2 obj = {
@@ -2553,7 +2484,7 @@ static uint32_t read_ctx_timestamp(int i915, uint32_t ctx)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .rsvd1 = ctx,
+ .rsvd1 = ctx->id,
};
uint32_t *map, *cs;
uint32_t ts;
@@ -2625,7 +2556,7 @@ static void __fairslice(int i915,
{
const double timeslice_duration_ns = 1e6;
igt_spin_t *spin = NULL;
- uint32_t ctx[count + 1];
+ const intel_ctx_t *ctx[count + 1];
uint32_t ts[count + 1];
double threshold;
@@ -2634,14 +2565,14 @@ static void __fairslice(int i915,
igt_assert(ARRAY_SIZE(ctx) >= 3);
for (int i = 0; i < ARRAY_SIZE(ctx); i++) {
- ctx[i] = load_balancer_create(i915, ci, count);
+ ctx[i] = ctx_create_balanced(i915, ci, count);
if (spin == NULL) {
- spin = __igt_spin_new(i915, .ctx_id = ctx[i]);
+ spin = __igt_spin_new(i915, .ctx = ctx[i]);
} else {
struct drm_i915_gem_execbuffer2 eb = {
.buffer_count = 1,
.buffers_ptr = to_user_pointer(&spin->obj[IGT_SPIN_BATCH]),
- .rsvd1 = ctx[i],
+ .rsvd1 = ctx[i]->id,
};
gem_execbuf(i915, &eb);
}
@@ -2657,7 +2588,7 @@ static void __fairslice(int i915,
ts[i] = read_ctx_timestamp(i915, ctx[i]);
for (int i = 0; i < ARRAY_SIZE(ctx); i++)
- gem_context_destroy(i915, ctx[i]);
+ intel_ctx_destroy(i915, ctx[i]);
igt_spin_free(i915, spin);
/*
@@ -2722,21 +2653,21 @@ static void __persistence(int i915,
bool persistent)
{
igt_spin_t *spin;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/*
* A nonpersistent context is terminated immediately upon closure,
* any inflight request is cancelled.
*/
- ctx = load_balancer_create(i915, ci, count);
+ ctx = ctx_create_balanced(i915, ci, count);
if (!persistent)
- gem_context_set_persistence(i915, ctx, persistent);
+ gem_context_set_persistence(i915, ctx->id, persistent);
- spin = igt_spin_new(i915, ctx,
+ spin = igt_spin_new(i915, .ctx = ctx,
.flags = IGT_SPIN_FENCE_OUT | IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin);
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_assert_eq(wait_for_status(spin->out_fence, 500), -EIO);
igt_spin_free(i915, spin);
@@ -2866,13 +2797,15 @@ static bool has_context_engines(int i915)
static bool has_load_balancer(int i915)
{
- struct i915_engine_class_instance ci = {};
- uint32_t ctx;
+ const intel_ctx_cfg_t cfg = {
+ .load_balance = true,
+ .num_engines = 1,
+ };
+ const intel_ctx_t *ctx = NULL;
int err;
- ctx = gem_context_create(i915);
- err = __set_load_balancer(i915, ctx, &ci, 1, NULL);
- gem_context_destroy(i915, ctx);
+ err = __intel_ctx_create(i915, &cfg, &ctx);
+ intel_ctx_destroy(i915, ctx);
return err == 0;
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 54/81] tests/i915/gem_exec_endless: Stop munging ctx0 engines
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (52 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 53/81] tests/i915/gem_exec_balancer: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 55/81] lib/i915/submission: Rework gem_test_all_engines to use intel_ctx_t (v2) Jason Ekstrand
` (29 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_exec_endless.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/i915/gem_exec_endless.c b/tests/i915/gem_exec_endless.c
index c3c806543..b83d5a2c1 100644
--- a/tests/i915/gem_exec_endless.c
+++ b/tests/i915/gem_exec_endless.c
@@ -309,7 +309,7 @@ static void endless_dispatch(int i915, const struct intel_execution_engine2 *e)
}
#define test_each_engine(T, i915, e) \
- igt_subtest_with_dynamic(T) __for_each_physical_engine(i915, e) \
+ igt_subtest_with_dynamic(T) for_each_physical_engine(i915, e) \
for_each_if(gem_class_can_store_dword(i915, (e)->class)) \
igt_dynamic_f("%s", (e)->name)
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 55/81] lib/i915/submission: Rework gem_test_all_engines to use intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (53 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 54/81] tests/i915/gem_exec_endless: Stop munging ctx0 engines Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 56/81] lib/i915: Require a context config in gem_submission_measure Jason Ekstrand
` (28 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
This function used to be called gem_test_engine but it's only ever used
to test all the engines in the GPU so we rename it for the one thing
it's used for. We also convert to intel_ctx_t. It now creates a new
context with all physical engines and iterates over all the engines in
it.
v2 (Ashutosh Dixit):
- Delete the now non-existent @engine param from the docs
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
lib/i915/gem_submission.c | 20 ++++++++------------
lib/i915/gem_submission.h | 2 +-
tests/i915/gem_eio.c | 2 +-
tests/i915/i915_pm_rpm.c | 2 +-
4 files changed, 11 insertions(+), 15 deletions(-)
diff --git a/lib/i915/gem_submission.c b/lib/i915/gem_submission.c
index 7c305d6d6..9c6cc8146 100644
--- a/lib/i915/gem_submission.c
+++ b/lib/i915/gem_submission.c
@@ -177,20 +177,22 @@ static bool is_wedged(int i915)
}
/**
- * gem_test_engine:
+ * gem_test_all_engines:
* @i915: open i915 drm file descriptor
- * @engine: the engine (I915_EXEC_RING id) to exercise
*
* Execute a nop batch on the engine specified, or ALL_ENGINES for all,
* and check it executes.
*/
-void gem_test_engine(int i915, unsigned int engine)
+void gem_test_all_engines(int i915)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
+ const intel_ctx_t *ctx = intel_ctx_create_all_physical(i915);
+ const struct intel_execution_engine2 *e2;
struct drm_i915_gem_exec_object2 obj = { };
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
+ .rsvd1 = ctx->id,
};
i915 = gem_reopen_driver(i915);
@@ -199,21 +201,15 @@ void gem_test_engine(int i915, unsigned int engine)
obj.handle = gem_create(i915, 4096);
gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
- if (engine == ALL_ENGINES) {
- const struct intel_execution_engine2 *e2;
-
- __for_each_physical_engine(i915, e2) {
- execbuf.flags = e2->flags;
- gem_execbuf(i915, &execbuf);
- }
- } else {
- execbuf.flags = engine;
+ for_each_ctx_engine(i915, ctx, e2) {
+ execbuf.flags = e2->flags;
gem_execbuf(i915, &execbuf);
}
gem_sync(i915, obj.handle);
gem_close(i915, obj.handle);
igt_assert(!is_wedged(i915));
+ intel_ctx_destroy(i915, ctx);
close(i915);
}
diff --git a/lib/i915/gem_submission.h b/lib/i915/gem_submission.h
index a5497a5e2..44e6e3118 100644
--- a/lib/i915/gem_submission.h
+++ b/lib/i915/gem_submission.h
@@ -51,7 +51,7 @@ void gem_require_blitter(int i915);
unsigned int gem_submission_measure(int i915, const intel_ctx_cfg_t *cfg,
unsigned int engine);
-void gem_test_engine(int fd, unsigned int engine);
+void gem_test_all_engines(int fd);
bool gem_has_relocations(int fd);
#endif /* GEM_SUBMISSION_H */
diff --git a/tests/i915/gem_eio.c b/tests/i915/gem_eio.c
index 5cb242a32..50d250f38 100644
--- a/tests/i915/gem_eio.c
+++ b/tests/i915/gem_eio.c
@@ -77,7 +77,7 @@ static void trigger_reset(int fd)
/* And just check the gpu is indeed running again */
igt_kmsg(KMSG_DEBUG "Checking that the GPU recovered\n");
- gem_test_engine(fd, ALL_ENGINES);
+ gem_test_all_engines(fd);
igt_debugfs_dump(fd, "i915_engine_info");
igt_drop_caches_set(fd, DROP_ACTIVE);
diff --git a/tests/i915/i915_pm_rpm.c b/tests/i915/i915_pm_rpm.c
index da498ad68..fb7a70aa0 100644
--- a/tests/i915/i915_pm_rpm.c
+++ b/tests/i915/i915_pm_rpm.c
@@ -1408,7 +1408,7 @@ static void gem_idle_subtest(void)
sleep(5);
- gem_test_engine(drm_fd, -1);
+ gem_test_all_engines(drm_fd);
}
static void gem_evict_pwrite_subtest(void)
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 56/81] lib/i915: Require a context config in gem_submission_measure
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (54 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 55/81] lib/i915/submission: Rework gem_test_all_engines to use intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 57/81] tests/i915/gem_ctx_engines: Rework execute-one* Jason Ekstrand
` (27 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Now all tests are converted to provide a context config to
gem_submission_measure so we can require it. This gets rid of its use
of __for_each_physical_engine().
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
lib/i915/gem_submission.c | 38 ++++++++++++--------------------------
1 file changed, 12 insertions(+), 26 deletions(-)
diff --git a/lib/i915/gem_submission.c b/lib/i915/gem_submission.c
index 9c6cc8146..69cf1f16a 100644
--- a/lib/i915/gem_submission.c
+++ b/lib/i915/gem_submission.c
@@ -372,7 +372,7 @@ __measure_ringsize(int i915, uint32_t ctx_id, unsigned int engine)
unsigned int gem_submission_measure(int i915, const intel_ctx_cfg_t *cfg,
unsigned int engine)
{
- const intel_ctx_t *ctx = NULL;
+ const intel_ctx_t *ctx;
unsigned int size;
bool nonblock;
@@ -380,40 +380,26 @@ unsigned int gem_submission_measure(int i915, const intel_ctx_cfg_t *cfg,
if (!nonblock)
fcntl(i915, F_SETFL, fcntl(i915, F_GETFL) | O_NONBLOCK);
- if (cfg) {
- if (gem_has_contexts(i915))
- ctx = intel_ctx_create(i915, cfg);
- else
- ctx = intel_ctx_0(i915);
- }
+ igt_assert(cfg);
+ if (gem_has_contexts(i915))
+ ctx = intel_ctx_create(i915, cfg);
+ else
+ ctx = intel_ctx_0(i915);
if (engine == ALL_ENGINES) {
struct intel_execution_engine2 *e;
size = -1;
- if (ctx) {
- for_each_ctx_engine(i915, ctx, e) {
- unsigned int this = __measure_ringsize(i915, ctx->id, e->flags);
- if (this < size)
- size = this;
- }
- } else {
- __for_each_physical_engine(i915, e) {
- unsigned int this = __measure_ringsize(i915, 0, e->flags);
- if (this < size)
- size = this;
- }
+ for_each_ctx_engine(i915, ctx, e) {
+ unsigned int this = __measure_ringsize(i915, ctx->id, e->flags);
+ if (this < size)
+ size = this;
}
} else {
- if (ctx)
- size = __measure_ringsize(i915, ctx->id, engine);
- else
- size = __measure_ringsize(i915, 0, engine);
+ size = __measure_ringsize(i915, ctx->id, engine);
}
- if (ctx)
- intel_ctx_destroy(i915, ctx);
-
+ intel_ctx_destroy(i915, ctx);
if (!nonblock)
fcntl(i915, F_SETFL, fcntl(i915, F_GETFL) & ~O_NONBLOCK);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 57/81] tests/i915/gem_ctx_engines: Rework execute-one*
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (55 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 56/81] lib/i915: Require a context config in gem_submission_measure Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 58/81] tests/i915/gem_ctx_engines: Use better engine iteration Jason Ekstrand
` (26 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Instead of switching out the set of engines constantly, create a new
context for every set of engines. This means, among other things, that
there's no point in testing whether or not the context is in a "default"
because there's no more resetting it.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_engines.c | 79 +++++++++++++-----------------------
1 file changed, 28 insertions(+), 51 deletions(-)
diff --git a/tests/i915/gem_ctx_engines.c b/tests/i915/gem_ctx_engines.c
index f03e31532..ac03206ce 100644
--- a/tests/i915/gem_ctx_engines.c
+++ b/tests/i915/gem_ctx_engines.c
@@ -317,41 +317,19 @@ static void none(int i915)
static void execute_one(int i915)
{
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
- struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_create(i915),
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&engines),
- /* .size to be filled in later */
- };
struct drm_i915_gem_exec_object2 obj = {
.handle = gem_create(i915, 4096),
};
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
- .rsvd1 = param.ctx_id,
};
const uint32_t bbe = MI_BATCH_BUFFER_END;
const struct intel_execution_engine2 *e;
- igt_spin_t *spin;
-
- /* Prewarm the spinner */
- spin = igt_spin_new(i915, .ctx_id = param.ctx_id,
- .flags = (IGT_SPIN_NO_PREEMPTION |
- IGT_SPIN_POLL_RUN));
gem_write(i915, obj.handle, 0, &bbe, sizeof(bbe));
- /* Unadulterated I915_EXEC_DEFAULT should work */
- execbuf.flags = 0;
- gem_execbuf(i915, &execbuf);
- obj.flags |= EXEC_OBJECT_PINNED;
-
- igt_spin_end(spin);
- gem_sync(i915, obj.handle);
-
- __for_each_physical_engine(i915, e) {
+ for_each_physical_engine(i915, e) {
struct drm_i915_gem_busy busy = { .handle = obj.handle };
if (!gem_class_can_store_dword(i915, e->class))
@@ -360,32 +338,38 @@ static void execute_one(int i915)
igt_debug("Testing [%s...]\n", e->name);
for (int i = -1; i <= I915_EXEC_RING_MASK; i++) {
- memset(&engines, 0, sizeof(engines));
- engine_class(&engines, 0) = e->class;
- engine_instance(&engines, 0) = e->instance;
- param.size = offsetof(typeof(engines), engines[1]);
- gem_context_set_param(i915, ¶m);
-
- gem_sync(i915, spin->handle);
- igt_spin_reset(spin);
- gem_execbuf(i915, &spin->execbuf);
+ intel_ctx_cfg_t cfg = {};
+ const intel_ctx_t *ctx;
+ igt_spin_t *spin;
+
+ cfg.num_engines = 1;
+ cfg.engines[0].engine_class = e->class;
+ cfg.engines[0].engine_instance = e->instance;
+ ctx = intel_ctx_create(i915, &cfg);
+
+ spin = igt_spin_new(i915, .ctx = ctx,
+ .flags = (IGT_SPIN_NO_PREEMPTION |
+ IGT_SPIN_POLL_RUN));
do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
igt_assert_eq(busy.busy, 0);
+ intel_ctx_destroy(i915, ctx);
+ /* Create a new context with a lot of engines */
igt_debug("Testing with map of %d engines\n", i + 1);
- memset(&engines.engines, -1, sizeof(engines.engines));
+ memset(cfg.engines, -1, sizeof(cfg.engines));
if (i != -1) {
- engine_class(&engines, i) = e->class;
- engine_instance(&engines, i) = e->instance;
+ cfg.engines[i].engine_class = e->class;
+ cfg.engines[i].engine_instance = e->instance;
}
- param.size = sizeof(uint64_t) + (i + 1) * sizeof(uint32_t);
- gem_context_set_param(i915, ¶m);
+ cfg.num_engines = GEM_MAX_ENGINES;
+ ctx = intel_ctx_create(i915, &cfg);
igt_spin_busywait_until_started(spin);
for (int j = 0; j <= I915_EXEC_RING_MASK; j++) {
int expected = j == i ? 0 : -EINVAL;
+ execbuf.rsvd1 = ctx->id;
execbuf.flags = j;
igt_assert_f(__gem_execbuf(i915, &execbuf) == expected,
"Failed to report the %s engine for slot %d (valid at %d)\n",
@@ -396,38 +380,31 @@ static void execute_one(int i915)
igt_assert_eq(batch_busy(busy.busy),
i != -1 ? 1 << e->class : 0);
- igt_spin_end(spin);
+ igt_spin_free(i915, spin);
gem_sync(i915, obj.handle);
+ intel_ctx_destroy(i915, ctx);
do_ioctl(i915, DRM_IOCTL_I915_GEM_BUSY, &busy);
igt_assert_eq(busy.busy, 0);
}
}
- /* Restore the defaults and check I915_EXEC_DEFAULT works again. */
- param.size = 0;
- gem_context_set_param(i915, ¶m);
- execbuf.flags = 0;
- gem_execbuf(i915, &execbuf);
-
- igt_spin_free(i915, spin);
-
gem_close(i915, obj.handle);
- gem_context_destroy(i915, param.ctx_id);
}
static void execute_oneforall(int i915)
{
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_create(i915),
.param = I915_CONTEXT_PARAM_ENGINES,
.value = to_user_pointer(&engines),
.size = sizeof(engines),
};
const struct intel_execution_engine2 *e;
- __for_each_physical_engine(i915, e) {
+ for_each_physical_engine(i915, e) {
+ param.ctx_id = gem_context_create(i915);
+
memset(&engines, 0, sizeof(engines));
for (int i = 0; i <= I915_EXEC_RING_MASK; i++) {
engine_class(&engines, i) = e->class;
@@ -449,9 +426,9 @@ static void execute_oneforall(int i915)
igt_spin_free(i915, spin);
}
- }
- gem_context_destroy(i915, param.ctx_id);
+ gem_context_destroy(i915, param.ctx_id);
+ }
}
static void execute_allforone(int i915)
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 58/81] tests/i915/gem_ctx_engines: Use better engine iteration
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (56 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 57/81] tests/i915/gem_ctx_engines: Rework execute-one* Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 59/81] tests/i915/gem_ctx_engines: Drop the idempotent subtest Jason Ekstrand
` (25 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_engines.c | 32 +++++++++++++++++++-------------
1 file changed, 19 insertions(+), 13 deletions(-)
diff --git a/tests/i915/gem_ctx_engines.c b/tests/i915/gem_ctx_engines.c
index ac03206ce..531495c39 100644
--- a/tests/i915/gem_ctx_engines.c
+++ b/tests/i915/gem_ctx_engines.c
@@ -444,7 +444,7 @@ static void execute_allforone(int i915)
i = 0;
memset(&engines, 0, sizeof(engines));
- __for_each_physical_engine(i915, e) {
+ for_each_physical_engine(i915, e) {
engine_class(&engines, i) = e->class;
engine_instance(&engines, i) = e->instance;
i++;
@@ -453,7 +453,7 @@ static void execute_allforone(int i915)
gem_context_set_param(i915, ¶m);
i = 0;
- __for_each_physical_engine(i915, e) {
+ for_each_physical_engine(i915, e) {
struct drm_i915_gem_busy busy = {};
igt_spin_t *spin;
@@ -490,7 +490,8 @@ static bool has_cs_timestamp(const struct intel_execution_engine2 *e, int gen)
return e->class == I915_ENGINE_CLASS_RENDER;
}
-static void independent(int i915, const struct intel_execution_engine2 *e)
+static void independent(int i915, const intel_ctx_t *base_ctx,
+ const struct intel_execution_engine2 *e)
{
#define RCS_TIMESTAMP (mmio_base + 0x358)
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
@@ -498,7 +499,6 @@ static void independent(int i915, const struct intel_execution_engine2 *e)
const int has_64bit_reloc = gen >= 8;
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_clone_with_engines(i915, 0),
.param = I915_CONTEXT_PARAM_ENGINES,
.value = to_user_pointer(&engines),
.size = sizeof(engines),
@@ -518,7 +518,7 @@ static void independent(int i915, const struct intel_execution_engine2 *e)
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&results),
.buffer_count = 1,
- .rsvd1 = param.ctx_id,
+ .rsvd1 = base_ctx->id,
.flags = e->flags,
};
gem_write(i915, results.handle, 0, &bbe, sizeof(bbe));
@@ -531,6 +531,7 @@ static void independent(int i915, const struct intel_execution_engine2 *e)
engine_class(&engines, i) = e->class;
engine_instance(&engines, i) = e->instance;
}
+ param.ctx_id = gem_context_create(i915);
gem_context_set_param(i915, ¶m);
gem_set_caching(i915, results.handle, I915_CACHING_CACHED);
@@ -590,19 +591,20 @@ static void independent(int i915, const struct intel_execution_engine2 *e)
gem_context_destroy(i915, param.ctx_id);
}
-static void independent_all(int i915)
+static void independent_all(int i915, const intel_ctx_t *ctx)
{
const unsigned int gen = intel_gen(intel_get_drm_devid(i915));
const struct intel_execution_engine2 *e;
igt_spin_t *spin = NULL;
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (spin) {
spin->execbuf.flags &= ~63;
spin->execbuf.flags |= e->flags;
gem_execbuf(i915, &spin->execbuf);
} else {
- spin = igt_spin_new(i915, .engine = e->flags,
+ spin = igt_spin_new(i915, .ctx = ctx,
+ .engine = e->flags,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN));
}
@@ -610,7 +612,7 @@ static void independent_all(int i915)
igt_require(spin);
igt_spin_busywait_until_started(spin);
- __for_each_physical_engine(i915, e) {
+ for_each_ctx_engine(i915, ctx, e) {
if (!gem_engine_mmio_base(i915, e->name))
continue;
@@ -618,7 +620,7 @@ static void independent_all(int i915)
continue;
igt_fork(child, 1)
- independent(i915, e);
+ independent(i915, ctx, e);
}
sched_yield();
igt_spin_free(i915, spin);
@@ -727,14 +729,18 @@ igt_main
execute_allforone(i915);
igt_subtest_with_dynamic("independent") {
+ const intel_ctx_t *ctx;
+
igt_require(gem_scheduler_enabled(i915));
igt_require(intel_gen(intel_get_drm_devid(i915) >= 6));
- __for_each_physical_engine(i915, e) {
+
+ ctx = intel_ctx_create_all_physical(i915);
+ for_each_ctx_engine(i915, ctx, e) {
igt_dynamic_f("%s", e->name)
- independent(i915, e);
+ independent(i915, ctx, e);
}
igt_dynamic("all")
- independent_all(i915);
+ independent_all(i915, ctx);
}
igt_subtest("libapi")
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 59/81] tests/i915/gem_ctx_engines: Drop the idempotent subtest
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (57 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 58/81] tests/i915/gem_ctx_engines: Use better engine iteration Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 60/81] tests/i915/gem_ctx_create: Don't re-open the device in maximum() Jason Ekstrand
` (24 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
This just tests the engines CONTEXT_GETPARAM which we're going to be
dropping from i915.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_engines.c | 79 ------------------------------------
1 file changed, 79 deletions(-)
diff --git a/tests/i915/gem_ctx_engines.c b/tests/i915/gem_ctx_engines.c
index 531495c39..ffed96592 100644
--- a/tests/i915/gem_ctx_engines.c
+++ b/tests/i915/gem_ctx_engines.c
@@ -174,82 +174,6 @@ out:
gem_context_destroy(i915, param.ctx_id);
}
-static void idempotent(int i915)
-{
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
- I915_DEFINE_CONTEXT_PARAM_ENGINES(expected, I915_EXEC_RING_MASK + 1);
- struct drm_i915_gem_context_param p = {
- .ctx_id = gem_context_create(i915),
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&engines),
- .size = sizeof(engines),
- };
- const size_t base = sizeof(struct i915_context_param_engines);
- const struct intel_execution_engine2 *e;
- int idx;
-
- /* What goes in, must come out. And what comes out, must go in */
-
- gem_context_get_param(i915, &p);
- igt_assert_eq(p.size, 0); /* atm default is to use legacy ring mask */
-
- idx = 0;
- memset(&engines, 0, sizeof(engines));
- __for_each_physical_engine(i915, e) {
- engines.engines[idx].engine_class = e->class;
- engines.engines[idx].engine_instance = e->instance;
- idx++;
- }
- idx *= sizeof(*engines.engines);
- p.size = base + idx;
- gem_context_set_param(i915, &p);
-
- memcpy(&expected, &engines, sizeof(expected));
-
- gem_context_get_param(i915, &p);
- igt_assert_eq(p.size, base + idx);
- igt_assert(!memcmp(&expected, &engines, idx));
-
- p.size = base;
- gem_context_set_param(i915, &p);
- gem_context_get_param(i915, &p);
- igt_assert_eq(p.size, base);
-
- /* and it should not have overwritten the previous contents */
- igt_assert(!memcmp(&expected, &engines, idx));
-
- memset(&engines, 0, sizeof(engines));
- engines.engines[0].engine_class = I915_ENGINE_CLASS_INVALID;
- engines.engines[0].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
- idx = sizeof(*engines.engines);
- p.size = base + idx;
- gem_context_set_param(i915, &p);
-
- memcpy(&expected, &engines, sizeof(expected));
-
- gem_context_get_param(i915, &p);
- igt_assert_eq(p.size, base + idx);
- igt_assert(!memcmp(&expected, &engines, idx));
-
- p.size = sizeof(engines);
- __for_each_physical_engine(i915, e) {
- memset(&engines, 0, sizeof(engines));
- for (int n = 0; n < I915_EXEC_RING_MASK + 1; n++) {
- engine_class(&engines, n) = e->class;
- engine_instance(&engines, n) = e->instance;
- }
- gem_context_set_param(i915, &p);
-
- memcpy(&expected, &engines, sizeof(expected));
-
- gem_context_get_param(i915, &p);
- igt_assert_eq(p.size, sizeof(engines));
- igt_assert(!memcmp(&expected, &engines, p.size));
- }
-
- gem_context_destroy(i915, p.ctx_id);
-}
-
static uint32_t batch_create(int i915)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -713,9 +637,6 @@ igt_main
igt_subtest("invalid-engines")
invalid_engines(i915);
- igt_subtest("idempotent")
- idempotent(i915);
-
igt_subtest("none")
none(i915);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 60/81] tests/i915/gem_ctx_create: Don't re-open the device in maximum()
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (58 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 59/81] tests/i915/gem_ctx_engines: Drop the idempotent subtest Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-08 6:35 ` Zbigniew Kempczyński
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 61/81] tests/i915/gem_ctx_create: Convert benchmarks to intel_ctx_t Jason Ekstrand
` (23 subsequent siblings)
83 siblings, 1 reply; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
In 00a64098aaae ("tests/i915/gem_ctx_create: Use
__for_each_physical_engine to utilize all available engines"), the
maximum tests were modified to re-open the device. However, this is
both unnecessary and broken because the execution loop re-uses the
contexts created above it which do not exist on the newly re-opened DRM
file.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Fixes: 00a64098aaae ("tests/i915/gem_ctx_create: Use __for_each_physical_engine to utilize all available engines")
Cc: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/gem_ctx_create.c | 17 ++++-------------
1 file changed, 4 insertions(+), 13 deletions(-)
diff --git a/tests/i915/gem_ctx_create.c b/tests/i915/gem_ctx_create.c
index 5b14f7afd..37061ebc5 100644
--- a/tests/i915/gem_ctx_create.c
+++ b/tests/i915/gem_ctx_create.c
@@ -323,18 +323,9 @@ static void maximum(int fd, int ncpus, unsigned mode)
igt_fork(child, ncpus) {
struct timespec start, end;
- int i915;
-
- i915 = gem_reopen_driver(fd);
- /*
- * Ensure the gpu is idle by launching
- * a nop execbuf and stalling for it.
- */
- gem_quiescent_gpu(i915);
- gem_context_copy_engines(fd, 0, i915, 0);
hars_petruska_f54_1_random_perturb(child);
- obj[0].handle = gem_create(i915, 4096);
+ obj[0].handle = gem_create(fd, 4096);
clock_gettime(CLOCK_MONOTONIC, &start);
for (int repeat = 0; repeat < 3; repeat++) {
@@ -345,13 +336,13 @@ static void maximum(int fd, int ncpus, unsigned mode)
execbuf.rsvd1 = contexts[i];
for (unsigned long j = 0; j < all_nengine; j++) {
execbuf.flags = all_engines[j];
- gem_execbuf(i915, &execbuf);
+ gem_execbuf(fd, &execbuf);
}
}
}
- gem_sync(i915, obj[0].handle);
+ gem_sync(fd, obj[0].handle);
clock_gettime(CLOCK_MONOTONIC, &end);
- gem_close(i915, obj[0].handle);
+ gem_close(fd, obj[0].handle);
igt_info("[%d] Context execution: %.3f us\n", child,
elapsed(&start, &end) / (3 * count * all_nengine) * 1e6);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* Re: [igt-dev] [PATCH i-g-t 60/81] tests/i915/gem_ctx_create: Don't re-open the device in maximum()
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 60/81] tests/i915/gem_ctx_create: Don't re-open the device in maximum() Jason Ekstrand
@ 2021-07-08 6:35 ` Zbigniew Kempczyński
0 siblings, 0 replies; 87+ messages in thread
From: Zbigniew Kempczyński @ 2021-07-08 6:35 UTC (permalink / raw)
To: Jason Ekstrand; +Cc: igt-dev, Sarvela, Tomi P
On Wed, Jul 07, 2021 at 09:46:22AM -0500, Jason Ekstrand wrote:
> In 00a64098aaae ("tests/i915/gem_ctx_create: Use
> __for_each_physical_engine to utilize all available engines"), the
> maximum tests were modified to re-open the device. However, this is
> both unnecessary and broken because the execution loop re-uses the
> contexts created above it which do not exist on the newly re-opened DRM
> file.
>
> Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
> Fixes: 00a64098aaae ("tests/i915/gem_ctx_create: Use __for_each_physical_engine to utilize all available engines")
> Cc: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
> ---
> tests/i915/gem_ctx_create.c | 17 ++++-------------
> 1 file changed, 4 insertions(+), 13 deletions(-)
>
> diff --git a/tests/i915/gem_ctx_create.c b/tests/i915/gem_ctx_create.c
> index 5b14f7afd..37061ebc5 100644
> --- a/tests/i915/gem_ctx_create.c
> +++ b/tests/i915/gem_ctx_create.c
> @@ -323,18 +323,9 @@ static void maximum(int fd, int ncpus, unsigned mode)
>
> igt_fork(child, ncpus) {
> struct timespec start, end;
> - int i915;
> -
> - i915 = gem_reopen_driver(fd);
> - /*
> - * Ensure the gpu is idle by launching
> - * a nop execbuf and stalling for it.
> - */
> - gem_quiescent_gpu(i915);
> - gem_context_copy_engines(fd, 0, i915, 0);
>
> hars_petruska_f54_1_random_perturb(child);
> - obj[0].handle = gem_create(i915, 4096);
> + obj[0].handle = gem_create(fd, 4096);
>
> clock_gettime(CLOCK_MONOTONIC, &start);
> for (int repeat = 0; repeat < 3; repeat++) {
> @@ -345,13 +336,13 @@ static void maximum(int fd, int ncpus, unsigned mode)
> execbuf.rsvd1 = contexts[i];
> for (unsigned long j = 0; j < all_nengine; j++) {
> execbuf.flags = all_engines[j];
> - gem_execbuf(i915, &execbuf);
> + gem_execbuf(fd, &execbuf);
> }
> }
> }
> - gem_sync(i915, obj[0].handle);
> + gem_sync(fd, obj[0].handle);
> clock_gettime(CLOCK_MONOTONIC, &end);
> - gem_close(i915, obj[0].handle);
> + gem_close(fd, obj[0].handle);
>
> igt_info("[%d] Context execution: %.3f us\n", child,
> elapsed(&start, &end) / (3 * count * all_nengine) * 1e6);
> --
> 2.31.1
>
+Tomi
Looks ok. Only thing which is worrying me is your fix will start test running
on pre-merge so it would increase CI time (especially on machines with a lot
of mem (+swap). But lets see if this will be really noticeable.
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
--
Zbigniew
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 61/81] tests/i915/gem_ctx_create: Convert benchmarks to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (59 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 60/81] tests/i915/gem_ctx_create: Don't re-open the device in maximum() Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-08 7:08 ` Zbigniew Kempczyński
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 62/81] tests/i915/gem_vm_create: Delete destroy racing tests Jason Ekstrand
` (22 subsequent siblings)
83 siblings, 1 reply; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
---
tests/i915/gem_ctx_create.c | 93 +++++++++++++++++++++----------------
1 file changed, 52 insertions(+), 41 deletions(-)
diff --git a/tests/i915/gem_ctx_create.c b/tests/i915/gem_ctx_create.c
index 37061ebc5..448466523 100644
--- a/tests/i915/gem_ctx_create.c
+++ b/tests/i915/gem_ctx_create.c
@@ -79,7 +79,8 @@ static double elapsed(const struct timespec *start,
return (end->tv_sec - start->tv_sec) + 1e-9*(end->tv_nsec - start->tv_nsec);
}
-static void files(int core, int timeout, const int ncpus)
+static void files(int core, const intel_ctx_cfg_t *cfg,
+ int timeout, const int ncpus)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
@@ -98,18 +99,22 @@ static void files(int core, int timeout, const int ncpus)
igt_fork(child, ncpus) {
struct timespec start, end;
unsigned count = 0;
+ const intel_ctx_t *ctx;
int fd;
clock_gettime(CLOCK_MONOTONIC, &start);
do {
fd = gem_reopen_driver(core);
- gem_context_copy_engines(core, 0, fd, 0);
+
+ ctx = intel_ctx_create(fd, cfg);
+ execbuf.rsvd1 = ctx->id;
obj.handle = gem_open(fd, name);
execbuf.flags &= ~ENGINE_FLAGS;
execbuf.flags |= ppgtt_engines[count % ppgtt_nengine];
gem_execbuf(fd, &execbuf);
+ intel_ctx_destroy(fd, ctx);
close(fd);
count++;
@@ -126,7 +131,8 @@ static void files(int core, int timeout, const int ncpus)
gem_close(core, batch);
}
-static void active(int fd, const struct intel_execution_engine2 *e,
+static void active(int fd, const intel_ctx_cfg_t *cfg,
+ const struct intel_execution_engine2 *e,
int timeout, int ncpus)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -158,19 +164,19 @@ static void active(int fd, const struct intel_execution_engine2 *e,
if (ncpus < 0) {
igt_fork(child, ppgtt_nengine) {
unsigned long count = 0;
- int i915;
+ const intel_ctx_t *ctx;
- i915 = gem_reopen_driver(fd);
/*
* Ensure the gpu is idle by launching
* a nop execbuf and stalling for it
*/
- gem_quiescent_gpu(i915);
- gem_context_copy_engines(fd, 0, i915, 0);
+ gem_quiescent_gpu(fd);
if (ppgtt_engines[child] == e->flags)
continue;
+ ctx = intel_ctx_create(fd, cfg);
+ execbuf.rsvd1 = ctx->id;
execbuf.flags = ppgtt_engines[child];
while (!READ_ONCE(*shared)) {
@@ -183,6 +189,7 @@ static void active(int fd, const struct intel_execution_engine2 *e,
}
igt_debug("hog[%d]: cycles=%lu\n", child, count);
+ intel_ctx_destroy(fd, ctx);
}
ncpus = -ncpus;
}
@@ -190,33 +197,27 @@ static void active(int fd, const struct intel_execution_engine2 *e,
igt_fork(child, ncpus) {
struct timespec start, end;
unsigned count = 0;
- int i915;
- uint32_t ctx;
- i915 = gem_reopen_driver(fd);
/*
* Ensure the gpu is idle by launching
* a nop execbuf and stalling for it.
*/
- gem_quiescent_gpu(i915);
- ctx = gem_context_create(i915);
- gem_context_copy_engines(fd, 0, i915, ctx);
+ gem_quiescent_gpu(fd);
clock_gettime(CLOCK_MONOTONIC, &start);
do {
- execbuf.rsvd1 = gem_context_clone_with_engines(fd, ctx);
+ const intel_ctx_t *ctx = intel_ctx_create(fd, cfg);
+ execbuf.rsvd1 = ctx->id;
for (unsigned n = 0; n < nengine; n++) {
execbuf.flags = engines[n];
gem_execbuf(fd, &execbuf);
}
- gem_context_destroy(fd, execbuf.rsvd1);
+ intel_ctx_destroy(fd, ctx);
count++;
clock_gettime(CLOCK_MONOTONIC, &end);
} while (elapsed(&start, &end) < timeout);
- gem_context_destroy(fd, ctx);
-
gem_sync(fd, obj.handle);
clock_gettime(CLOCK_MONOTONIC, &end);
igt_info("[%d] Context creation + execution: %.3f us\n",
@@ -239,6 +240,15 @@ static void xchg_u32(void *array, unsigned i, unsigned j)
a[j] = tmp;
}
+static void xchg_ptr(void *array, unsigned i, unsigned j)
+{
+ void **a = array, *tmp;
+
+ tmp = a[i];
+ a[i] = a[j];
+ a[j] = tmp;
+}
+
static unsigned __context_size(int fd)
{
switch (intel_gen(intel_get_drm_devid(fd))) {
@@ -277,16 +287,17 @@ static uint64_t total_avail_mem(unsigned mode)
return total << 20;
}
-static void maximum(int fd, int ncpus, unsigned mode)
+static void maximum(int fd, const intel_ctx_cfg_t *cfg,
+ int ncpus, unsigned mode)
{
const uint32_t bbe = MI_BATCH_BUFFER_END;
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[2];
uint64_t avail_mem = total_avail_mem(mode);
unsigned ctx_size = context_size(fd);
- uint32_t *contexts = NULL;
+ const intel_ctx_t **contexts = NULL;
unsigned long count = 0;
- uint32_t ctx_id;
+ const intel_ctx_t *ctx;
do {
int err;
@@ -300,16 +311,14 @@ static void maximum(int fd, int ncpus, unsigned mode)
err = -ENOMEM;
if (avail_mem > (count + 1) * ctx_size)
- err = __gem_context_clone(fd, 0,
- I915_CONTEXT_CLONE_ENGINES,
- 0, &ctx_id);
+ err = __intel_ctx_create(fd, cfg, &ctx);
if (err) {
igt_info("Created %lu contexts, before failing with '%s' [%d]\n",
count, strerror(-err), -err);
break;
}
- contexts[count++] = ctx_id;
+ contexts[count++] = ctx;
} while (1);
igt_require(count);
@@ -329,11 +338,11 @@ static void maximum(int fd, int ncpus, unsigned mode)
clock_gettime(CLOCK_MONOTONIC, &start);
for (int repeat = 0; repeat < 3; repeat++) {
- igt_permute_array(contexts, count, xchg_u32);
+ igt_permute_array(contexts, count, xchg_ptr);
igt_permute_array(all_engines, all_nengine, xchg_u32);
for (unsigned long i = 0; i < count; i++) {
- execbuf.rsvd1 = contexts[i];
+ execbuf.rsvd1 = contexts[i]->id;
for (unsigned long j = 0; j < all_nengine; j++) {
execbuf.flags = all_engines[j];
gem_execbuf(fd, &execbuf);
@@ -352,7 +361,7 @@ static void maximum(int fd, int ncpus, unsigned mode)
gem_close(fd, obj[1].handle);
for (unsigned long i = 0; i < count; i++)
- gem_context_destroy(fd, contexts[i]);
+ intel_ctx_destroy(fd, contexts[i]);
free(contexts);
}
@@ -552,6 +561,7 @@ igt_main
const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
struct drm_i915_gem_context_create create;
const struct intel_execution_engine2 *e;
+ intel_ctx_cfg_t cfg;
int fd = -1;
igt_fixture {
@@ -559,7 +569,8 @@ igt_main
igt_require_gem(fd);
gem_require_contexts(fd);
- __for_each_physical_engine(fd, e)
+ cfg = intel_ctx_cfg_all_physical(fd);
+ for_each_ctx_cfg_engine(fd, &cfg, e)
all_engines[all_nengine++] = e->flags;
igt_require(all_nengine);
@@ -589,39 +600,39 @@ igt_main
iris_pipeline(fd);
igt_subtest("maximum-mem")
- maximum(fd, ncpus, CHECK_RAM);
+ maximum(fd, &cfg, ncpus, CHECK_RAM);
igt_subtest("maximum-swap")
- maximum(fd, ncpus, CHECK_RAM | CHECK_SWAP);
+ maximum(fd, &cfg, ncpus, CHECK_RAM | CHECK_SWAP);
igt_subtest("basic-files")
- files(fd, 2, 1);
+ files(fd, &cfg, 2, 1);
igt_subtest("files")
- files(fd, 20, 1);
+ files(fd, &cfg, 20, 1);
igt_subtest("forked-files")
- files(fd, 20, ncpus);
+ files(fd, &cfg, 20, ncpus);
/* NULL value means all engines */
igt_subtest("active-all")
- active(fd, NULL, 20, 1);
+ active(fd, &cfg, NULL, 20, 1);
igt_subtest("forked-active-all")
- active(fd, NULL, 20, ncpus);
+ active(fd, &cfg, NULL, 20, ncpus);
igt_subtest_with_dynamic("active") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_cfg_engine(fd, &cfg, e) {
igt_dynamic_f("%s", e->name)
- active(fd, e, 20, 1);
+ active(fd, &cfg, e, 20, 1);
}
}
igt_subtest_with_dynamic("forked-active") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_cfg_engine(fd, &cfg, e) {
igt_dynamic_f("%s", e->name)
- active(fd, e, 20, ncpus);
+ active(fd, &cfg, e, 20, ncpus);
}
}
igt_subtest_with_dynamic("hog") {
- __for_each_physical_engine(fd, e) {
+ for_each_ctx_cfg_engine(fd, &cfg, e) {
igt_dynamic_f("%s", e->name)
- active(fd, e, 20, -1);
+ active(fd, &cfg, e, 20, -1);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* Re: [igt-dev] [PATCH i-g-t 61/81] tests/i915/gem_ctx_create: Convert benchmarks to intel_ctx_t
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 61/81] tests/i915/gem_ctx_create: Convert benchmarks to intel_ctx_t Jason Ekstrand
@ 2021-07-08 7:08 ` Zbigniew Kempczyński
0 siblings, 0 replies; 87+ messages in thread
From: Zbigniew Kempczyński @ 2021-07-08 7:08 UTC (permalink / raw)
To: Jason Ekstrand; +Cc: igt-dev
On Wed, Jul 07, 2021 at 09:46:23AM -0500, Jason Ekstrand wrote:
> Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
> ---
> tests/i915/gem_ctx_create.c | 93 +++++++++++++++++++++----------------
> 1 file changed, 52 insertions(+), 41 deletions(-)
>
> diff --git a/tests/i915/gem_ctx_create.c b/tests/i915/gem_ctx_create.c
> index 37061ebc5..448466523 100644
> --- a/tests/i915/gem_ctx_create.c
> +++ b/tests/i915/gem_ctx_create.c
> @@ -79,7 +79,8 @@ static double elapsed(const struct timespec *start,
> return (end->tv_sec - start->tv_sec) + 1e-9*(end->tv_nsec - start->tv_nsec);
> }
>
> -static void files(int core, int timeout, const int ncpus)
> +static void files(int core, const intel_ctx_cfg_t *cfg,
> + int timeout, const int ncpus)
> {
> const uint32_t bbe = MI_BATCH_BUFFER_END;
> struct drm_i915_gem_execbuffer2 execbuf;
> @@ -98,18 +99,22 @@ static void files(int core, int timeout, const int ncpus)
> igt_fork(child, ncpus) {
> struct timespec start, end;
> unsigned count = 0;
> + const intel_ctx_t *ctx;
> int fd;
>
> clock_gettime(CLOCK_MONOTONIC, &start);
> do {
> fd = gem_reopen_driver(core);
> - gem_context_copy_engines(core, 0, fd, 0);
> +
> + ctx = intel_ctx_create(fd, cfg);
> + execbuf.rsvd1 = ctx->id;
>
> obj.handle = gem_open(fd, name);
> execbuf.flags &= ~ENGINE_FLAGS;
> execbuf.flags |= ppgtt_engines[count % ppgtt_nengine];
This is not part of this commit but it is related to it. Mixing using
all_engines / ppgtt_engines constructed upon cfg as globals is not pure solution
and reader has to be aware cfg and these variables are tightly coupled.
I understand we can initialize them once instead doing this in each test
separately but I always afraid some test will start altering this array.
But this is minor nit and we can live with that, so:
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
--
Zbigniew
> gem_execbuf(fd, &execbuf);
>
> + intel_ctx_destroy(fd, ctx);
> close(fd);
> count++;
>
> @@ -126,7 +131,8 @@ static void files(int core, int timeout, const int ncpus)
> gem_close(core, batch);
> }
>
> -static void active(int fd, const struct intel_execution_engine2 *e,
> +static void active(int fd, const intel_ctx_cfg_t *cfg,
> + const struct intel_execution_engine2 *e,
> int timeout, int ncpus)
> {
> const uint32_t bbe = MI_BATCH_BUFFER_END;
> @@ -158,19 +164,19 @@ static void active(int fd, const struct intel_execution_engine2 *e,
> if (ncpus < 0) {
> igt_fork(child, ppgtt_nengine) {
> unsigned long count = 0;
> - int i915;
> + const intel_ctx_t *ctx;
>
> - i915 = gem_reopen_driver(fd);
> /*
> * Ensure the gpu is idle by launching
> * a nop execbuf and stalling for it
> */
> - gem_quiescent_gpu(i915);
> - gem_context_copy_engines(fd, 0, i915, 0);
> + gem_quiescent_gpu(fd);
>
> if (ppgtt_engines[child] == e->flags)
> continue;
>
> + ctx = intel_ctx_create(fd, cfg);
> + execbuf.rsvd1 = ctx->id;
> execbuf.flags = ppgtt_engines[child];
>
> while (!READ_ONCE(*shared)) {
> @@ -183,6 +189,7 @@ static void active(int fd, const struct intel_execution_engine2 *e,
> }
>
> igt_debug("hog[%d]: cycles=%lu\n", child, count);
> + intel_ctx_destroy(fd, ctx);
> }
> ncpus = -ncpus;
> }
> @@ -190,33 +197,27 @@ static void active(int fd, const struct intel_execution_engine2 *e,
> igt_fork(child, ncpus) {
> struct timespec start, end;
> unsigned count = 0;
> - int i915;
> - uint32_t ctx;
>
> - i915 = gem_reopen_driver(fd);
> /*
> * Ensure the gpu is idle by launching
> * a nop execbuf and stalling for it.
> */
> - gem_quiescent_gpu(i915);
> - ctx = gem_context_create(i915);
> - gem_context_copy_engines(fd, 0, i915, ctx);
> + gem_quiescent_gpu(fd);
>
> clock_gettime(CLOCK_MONOTONIC, &start);
> do {
> - execbuf.rsvd1 = gem_context_clone_with_engines(fd, ctx);
> + const intel_ctx_t *ctx = intel_ctx_create(fd, cfg);
> + execbuf.rsvd1 = ctx->id;
> for (unsigned n = 0; n < nengine; n++) {
> execbuf.flags = engines[n];
> gem_execbuf(fd, &execbuf);
> }
> - gem_context_destroy(fd, execbuf.rsvd1);
> + intel_ctx_destroy(fd, ctx);
> count++;
>
> clock_gettime(CLOCK_MONOTONIC, &end);
> } while (elapsed(&start, &end) < timeout);
>
> - gem_context_destroy(fd, ctx);
> -
> gem_sync(fd, obj.handle);
> clock_gettime(CLOCK_MONOTONIC, &end);
> igt_info("[%d] Context creation + execution: %.3f us\n",
> @@ -239,6 +240,15 @@ static void xchg_u32(void *array, unsigned i, unsigned j)
> a[j] = tmp;
> }
>
> +static void xchg_ptr(void *array, unsigned i, unsigned j)
> +{
> + void **a = array, *tmp;
> +
> + tmp = a[i];
> + a[i] = a[j];
> + a[j] = tmp;
> +}
> +
> static unsigned __context_size(int fd)
> {
> switch (intel_gen(intel_get_drm_devid(fd))) {
> @@ -277,16 +287,17 @@ static uint64_t total_avail_mem(unsigned mode)
> return total << 20;
> }
>
> -static void maximum(int fd, int ncpus, unsigned mode)
> +static void maximum(int fd, const intel_ctx_cfg_t *cfg,
> + int ncpus, unsigned mode)
> {
> const uint32_t bbe = MI_BATCH_BUFFER_END;
> struct drm_i915_gem_execbuffer2 execbuf;
> struct drm_i915_gem_exec_object2 obj[2];
> uint64_t avail_mem = total_avail_mem(mode);
> unsigned ctx_size = context_size(fd);
> - uint32_t *contexts = NULL;
> + const intel_ctx_t **contexts = NULL;
> unsigned long count = 0;
> - uint32_t ctx_id;
> + const intel_ctx_t *ctx;
>
> do {
> int err;
> @@ -300,16 +311,14 @@ static void maximum(int fd, int ncpus, unsigned mode)
>
> err = -ENOMEM;
> if (avail_mem > (count + 1) * ctx_size)
> - err = __gem_context_clone(fd, 0,
> - I915_CONTEXT_CLONE_ENGINES,
> - 0, &ctx_id);
> + err = __intel_ctx_create(fd, cfg, &ctx);
> if (err) {
> igt_info("Created %lu contexts, before failing with '%s' [%d]\n",
> count, strerror(-err), -err);
> break;
> }
>
> - contexts[count++] = ctx_id;
> + contexts[count++] = ctx;
> } while (1);
> igt_require(count);
>
> @@ -329,11 +338,11 @@ static void maximum(int fd, int ncpus, unsigned mode)
>
> clock_gettime(CLOCK_MONOTONIC, &start);
> for (int repeat = 0; repeat < 3; repeat++) {
> - igt_permute_array(contexts, count, xchg_u32);
> + igt_permute_array(contexts, count, xchg_ptr);
> igt_permute_array(all_engines, all_nengine, xchg_u32);
>
> for (unsigned long i = 0; i < count; i++) {
> - execbuf.rsvd1 = contexts[i];
> + execbuf.rsvd1 = contexts[i]->id;
> for (unsigned long j = 0; j < all_nengine; j++) {
> execbuf.flags = all_engines[j];
> gem_execbuf(fd, &execbuf);
> @@ -352,7 +361,7 @@ static void maximum(int fd, int ncpus, unsigned mode)
> gem_close(fd, obj[1].handle);
>
> for (unsigned long i = 0; i < count; i++)
> - gem_context_destroy(fd, contexts[i]);
> + intel_ctx_destroy(fd, contexts[i]);
> free(contexts);
> }
>
> @@ -552,6 +561,7 @@ igt_main
> const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
> struct drm_i915_gem_context_create create;
> const struct intel_execution_engine2 *e;
> + intel_ctx_cfg_t cfg;
> int fd = -1;
>
> igt_fixture {
> @@ -559,7 +569,8 @@ igt_main
> igt_require_gem(fd);
> gem_require_contexts(fd);
>
> - __for_each_physical_engine(fd, e)
> + cfg = intel_ctx_cfg_all_physical(fd);
> + for_each_ctx_cfg_engine(fd, &cfg, e)
> all_engines[all_nengine++] = e->flags;
> igt_require(all_nengine);
>
> @@ -589,39 +600,39 @@ igt_main
> iris_pipeline(fd);
>
> igt_subtest("maximum-mem")
> - maximum(fd, ncpus, CHECK_RAM);
> + maximum(fd, &cfg, ncpus, CHECK_RAM);
> igt_subtest("maximum-swap")
> - maximum(fd, ncpus, CHECK_RAM | CHECK_SWAP);
> + maximum(fd, &cfg, ncpus, CHECK_RAM | CHECK_SWAP);
>
> igt_subtest("basic-files")
> - files(fd, 2, 1);
> + files(fd, &cfg, 2, 1);
> igt_subtest("files")
> - files(fd, 20, 1);
> + files(fd, &cfg, 20, 1);
> igt_subtest("forked-files")
> - files(fd, 20, ncpus);
> + files(fd, &cfg, 20, ncpus);
>
> /* NULL value means all engines */
> igt_subtest("active-all")
> - active(fd, NULL, 20, 1);
> + active(fd, &cfg, NULL, 20, 1);
> igt_subtest("forked-active-all")
> - active(fd, NULL, 20, ncpus);
> + active(fd, &cfg, NULL, 20, ncpus);
>
> igt_subtest_with_dynamic("active") {
> - __for_each_physical_engine(fd, e) {
> + for_each_ctx_cfg_engine(fd, &cfg, e) {
> igt_dynamic_f("%s", e->name)
> - active(fd, e, 20, 1);
> + active(fd, &cfg, e, 20, 1);
> }
> }
> igt_subtest_with_dynamic("forked-active") {
> - __for_each_physical_engine(fd, e) {
> + for_each_ctx_cfg_engine(fd, &cfg, e) {
> igt_dynamic_f("%s", e->name)
> - active(fd, e, 20, ncpus);
> + active(fd, &cfg, e, 20, ncpus);
> }
> }
> igt_subtest_with_dynamic("hog") {
> - __for_each_physical_engine(fd, e) {
> + for_each_ctx_cfg_engine(fd, &cfg, e) {
> igt_dynamic_f("%s", e->name)
> - active(fd, e, 20, -1);
> + active(fd, &cfg, e, 20, -1);
> }
> }
>
> --
> 2.31.1
>
> _______________________________________________
> igt-dev mailing list
> igt-dev@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/igt-dev
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 62/81] tests/i915/gem_vm_create: Delete destroy racing tests
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (60 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 61/81] tests/i915/gem_ctx_create: Convert benchmarks to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 63/81] tests/i915/gem_vm_create: Use intel_ctx_t in the execbuf test Jason Ekstrand
` (21 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
The races these tests attempt to exercise won't exist once we rework the
context creation to make the VM immutable state.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_vm_create.c | 104 -------------------------------------
1 file changed, 104 deletions(-)
diff --git a/tests/i915/gem_vm_create.c b/tests/i915/gem_vm_create.c
index 70b43dc6d..4b77920bc 100644
--- a/tests/i915/gem_vm_create.c
+++ b/tests/i915/gem_vm_create.c
@@ -353,104 +353,6 @@ static void isolation(int i915)
gem_vm_destroy(i915, vm[0]);
}
-static void async_destroy(int i915)
-{
- struct drm_i915_gem_context_param arg = {
- .ctx_id = gem_context_create(i915),
- .value = gem_vm_create(i915),
- .param = I915_CONTEXT_PARAM_VM,
- };
- igt_spin_t *spin[2];
- int err;
-
- spin[0] = igt_spin_new(i915,
- .ctx_id = arg.ctx_id,
- .flags = IGT_SPIN_POLL_RUN);
- igt_spin_busywait_until_started(spin[0]);
-
- err = __gem_context_set_param(i915, &arg);
- if (err == -EBUSY) /* update while busy may be verboten, let it ride. */
- err = 0;
- igt_assert_eq(err, 0);
-
- spin[1] = __igt_spin_new(i915, .ctx_id = arg.ctx_id);
-
- igt_spin_end(spin[0]);
- gem_sync(i915, spin[0]->handle);
-
- gem_vm_destroy(i915, arg.value);
- gem_context_destroy(i915, arg.ctx_id);
-
- igt_spin_end(spin[1]);
- gem_sync(i915, spin[1]->handle);
-
- for (int i = 0; i < ARRAY_SIZE(spin); i++)
- igt_spin_free(i915, spin[i]);
-}
-
-static void destroy_race(int i915)
-{
- const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
- uint32_t *vm;
-
- /* Check we can execute a polling spinner */
- igt_spin_free(i915, igt_spin_new(i915, .flags = IGT_SPIN_POLL_RUN));
-
- vm = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, -1, 0);
- igt_assert(vm != MAP_FAILED);
-
- for (int child = 0; child < ncpus; child++)
- vm[child] = gem_vm_create(i915);
-
- igt_fork(child, ncpus) {
- uint32_t ctx = gem_context_create(i915);
- igt_spin_t *spin;
-
- spin = __igt_spin_new(i915, ctx, .flags = IGT_SPIN_POLL_RUN);
- while (!READ_ONCE(vm[ncpus])) {
- struct drm_i915_gem_context_param arg = {
- .ctx_id = ctx,
- .param = I915_CONTEXT_PARAM_VM,
- .value = READ_ONCE(vm[child]),
- };
- igt_spin_t *nxt;
-
- if (__gem_context_set_param(i915, &arg))
- continue;
-
- nxt = __igt_spin_new(i915, ctx,
- .flags = IGT_SPIN_POLL_RUN);
-
- igt_spin_end(spin);
- gem_sync(i915, spin->handle);
- igt_spin_free(i915, spin);
-
- usleep(1000 + hars_petruska_f54_1_random_unsafe() % 2000);
-
- spin = nxt;
- }
-
- igt_spin_free(i915, spin);
- gem_context_destroy(i915, ctx);
- }
-
- igt_until_timeout(5) {
- for (int child = 0; child < ncpus; child++) {
- gem_vm_destroy(i915, vm[child]);
- vm[child] = gem_vm_create(i915);
- }
- usleep(1000 + hars_petruska_f54_1_random_unsafe() % 2000);
- }
-
- vm[ncpus] = 1;
- igt_waitchildren();
-
- for (int child = 0; child < ncpus; child++)
- gem_vm_destroy(i915, vm[child]);
-
- munmap(vm, 4096);
-}
-
igt_main
{
int i915 = -1;
@@ -480,12 +382,6 @@ igt_main
igt_subtest("create-ext")
create_ext(i915);
-
- igt_subtest("async-destroy")
- async_destroy(i915);
-
- igt_subtest("destroy-race")
- destroy_race(i915);
}
igt_fixture {
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 63/81] tests/i915/gem_vm_create: Use intel_ctx_t in the execbuf test
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (61 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 62/81] tests/i915/gem_vm_create: Delete destroy racing tests Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 64/81] tests/i915/sysfs: Convert to intel_ctx_t Jason Ekstrand
` (20 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_vm_create.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)
diff --git a/tests/i915/gem_vm_create.c b/tests/i915/gem_vm_create.c
index 4b77920bc..3005d347c 100644
--- a/tests/i915/gem_vm_create.c
+++ b/tests/i915/gem_vm_create.c
@@ -220,9 +220,8 @@ static void execbuf(int i915)
.buffers_ptr = to_user_pointer(&batch),
.buffer_count = 1,
};
- struct drm_i915_gem_context_param arg = {
- .param = I915_CONTEXT_PARAM_VM,
- };
+ intel_ctx_cfg_t cfg = {};
+ const intel_ctx_t *ctx;
/* First verify that we try to use "softpinning" by default */
batch.offset = 48 << 20;
@@ -230,20 +229,24 @@ static void execbuf(int i915)
igt_assert_eq_u64(batch.offset, 48 << 20);
gem_sync(i915, batch.handle);
- arg.value = gem_vm_create(i915);
- gem_context_set_param(i915, &arg);
+ cfg.vm = gem_vm_create(i915);
+ ctx = intel_ctx_create(i915, &cfg);
+ eb.rsvd1 = ctx->id;
gem_execbuf(i915, &eb);
igt_assert_eq_u64(batch.offset, 48 << 20);
- gem_vm_destroy(i915, arg.value);
+ gem_vm_destroy(i915, cfg.vm);
+ intel_ctx_destroy(i915, ctx);
gem_sync(i915, batch.handle); /* be idle! */
- arg.value = gem_vm_create(i915);
- gem_context_set_param(i915, &arg);
+ cfg.vm = gem_vm_create(i915);
+ ctx = intel_ctx_create(i915, &cfg);
batch.offset = 0;
+ eb.rsvd1 = ctx->id;
gem_execbuf(i915, &eb);
igt_assert_eq_u64(batch.offset, 0);
- gem_vm_destroy(i915, arg.value);
+ gem_vm_destroy(i915, cfg.vm);
+ intel_ctx_destroy(i915, ctx);
gem_sync(i915, batch.handle);
gem_close(i915, batch.handle);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 64/81] tests/i915/sysfs: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (62 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 63/81] tests/i915/gem_vm_create: Use intel_ctx_t in the execbuf test Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 65/81] tests/i915/gem_workarounds: " Jason Ekstrand
` (19 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/sysfs_heartbeat_interval.c | 40 ++++++++++-----------
tests/i915/sysfs_preempt_timeout.c | 39 ++++++++++----------
tests/i915/sysfs_timeslice_duration.c | 51 +++++++++++++--------------
3 files changed, 64 insertions(+), 66 deletions(-)
diff --git a/tests/i915/sysfs_heartbeat_interval.c b/tests/i915/sysfs_heartbeat_interval.c
index b8aba2416..b70b653b1 100644
--- a/tests/i915/sysfs_heartbeat_interval.c
+++ b/tests/i915/sysfs_heartbeat_interval.c
@@ -132,13 +132,12 @@ static void set_unbannable(int i915, uint32_t ctx)
gem_context_set_param(i915, &p);
}
-static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
+static const intel_ctx_t *
+create_ctx(int i915, unsigned int class, unsigned int inst, int prio)
{
- uint32_t ctx;
-
- ctx = gem_context_create_for_engine(i915, class, inst);
- set_unbannable(i915, ctx);
- gem_context_set_priority(i915, ctx, prio);
+ const intel_ctx_t *ctx = intel_ctx_create_for_engine(i915, class, inst);
+ set_unbannable(i915, ctx->id);
+ gem_context_set_priority(i915, ctx->id, prio);
return ctx;
}
@@ -149,23 +148,23 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
struct timespec ts = {};
igt_spin_t *spin[2];
uint64_t elapsed;
- uint32_t ctx[2];
+ const intel_ctx_t *ctx[2];
igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
set_heartbeat(engine, timeout);
- ctx[0] = create_context(i915, class, inst, 1023);
- spin[0] = igt_spin_new(i915, ctx[0],
+ ctx[0] = create_ctx(i915, class, inst, 1023);
+ spin[0] = igt_spin_new(i915, .ctx = ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_context(i915, class, inst, -1023);
+ ctx[1] = create_ctx(i915, class, inst, -1023);
igt_nsec_elapsed(&ts);
- spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin[1]);
elapsed = igt_nsec_elapsed(&ts);
@@ -176,8 +175,8 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
igt_spin_free(i915, spin[0]);
- gem_context_destroy(i915, ctx[1]);
- gem_context_destroy(i915, ctx[0]);
+ intel_ctx_destroy(i915, ctx[1]);
+ intel_ctx_destroy(i915, ctx[0]);
gem_quiescent_gpu(i915);
return elapsed;
@@ -292,18 +291,18 @@ static void client(int i915, int engine, int *ctl, int duration, int expect)
{
unsigned int class, inst;
unsigned long count = 0;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
- ctx = create_context(i915, class, inst, 0);
+ ctx = create_ctx(i915, class, inst, 0);
while (!READ_ONCE(*ctl)) {
unsigned int elapsed;
igt_spin_t *spin;
- spin = igt_spin_new(i915, ctx,
+ spin = igt_spin_new(i915, .ctx = ctx,
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
@@ -331,7 +330,7 @@ static void client(int i915, int engine, int *ctl, int duration, int expect)
count++;
}
- gem_context_destroy(i915, ctx);
+ intel_ctx_destroy(i915, ctx);
igt_info("%s client completed %lu spins\n",
expect < 0 ? "Bad" : "Good", count);
}
@@ -414,7 +413,7 @@ static void test_off(int i915, int engine)
unsigned int class, inst;
unsigned int saved;
igt_spin_t *spin;
- uint32_t ctx;
+ const intel_ctx_t *ctx;
/*
* Some other clients request that there is never any interruption
@@ -433,9 +432,9 @@ static void test_off(int i915, int engine)
set_heartbeat(engine, 0);
- ctx = create_context(i915, class, inst, 0);
+ ctx = create_ctx(i915, class, inst, 0);
- spin = igt_spin_new(i915, ctx,
+ spin = igt_spin_new(i915, .ctx = ctx,
.flags = (IGT_SPIN_POLL_RUN |
IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_FENCE_OUT));
@@ -455,6 +454,7 @@ static void test_off(int i915, int engine)
gem_quiescent_gpu(i915);
set_heartbeat(engine, saved);
+ intel_ctx_destroy(i915, ctx);
}
igt_main
diff --git a/tests/i915/sysfs_preempt_timeout.c b/tests/i915/sysfs_preempt_timeout.c
index 83a60436c..9f00093ea 100644
--- a/tests/i915/sysfs_preempt_timeout.c
+++ b/tests/i915/sysfs_preempt_timeout.c
@@ -126,13 +126,12 @@ static void set_unbannable(int i915, uint32_t ctx)
gem_context_set_param(i915, &p);
}
-static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
+static const intel_ctx_t *
+create_ctx(int i915, unsigned int class, unsigned int inst, int prio)
{
- uint32_t ctx;
-
- ctx = gem_context_create_for_engine(i915, class, inst);
- set_unbannable(i915, ctx);
- gem_context_set_priority(i915, ctx, prio);
+ const intel_ctx_t *ctx = intel_ctx_create_for_engine(i915, class, inst);
+ set_unbannable(i915, ctx->id);
+ gem_context_set_priority(i915, ctx->id, prio);
return ctx;
}
@@ -143,23 +142,23 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
struct timespec ts = {};
igt_spin_t *spin[2];
uint64_t elapsed;
- uint32_t ctx[2];
+ const intel_ctx_t *ctx[2];
igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
set_preempt_timeout(engine, timeout);
- ctx[0] = create_context(i915, class, inst, -1023);
- spin[0] = igt_spin_new(i915, ctx[0],
+ ctx[0] = create_ctx(i915, class, inst, -1023);
+ spin[0] = igt_spin_new(i915, .ctx = ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_context(i915, class, inst, 1023);
+ ctx[1] = create_ctx(i915, class, inst, 1023);
igt_nsec_elapsed(&ts);
- spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin[1]);
elapsed = igt_nsec_elapsed(&ts);
@@ -170,8 +169,8 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
igt_spin_free(i915, spin[0]);
- gem_context_destroy(i915, ctx[1]);
- gem_context_destroy(i915, ctx[0]);
+ intel_ctx_destroy(i915, ctx[1]);
+ intel_ctx_destroy(i915, ctx[0]);
gem_quiescent_gpu(i915);
return elapsed;
@@ -231,7 +230,7 @@ static void test_off(int i915, int engine)
unsigned int class, inst;
igt_spin_t *spin[2];
unsigned int saved;
- uint32_t ctx[2];
+ const intel_ctx_t *ctx[2];
/*
* We support setting the timeout to 0 to disable the reset on
@@ -252,15 +251,15 @@ static void test_off(int i915, int engine)
set_preempt_timeout(engine, 0);
- ctx[0] = create_context(i915, class, inst, -1023);
- spin[0] = igt_spin_new(i915, ctx[0],
+ ctx[0] = create_ctx(i915, class, inst, -1023);
+ spin[0] = igt_spin_new(i915, .ctx = ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_context(i915, class, inst, 1023);
- spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ ctx[1] = create_ctx(i915, class, inst, 1023);
+ spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
for (int i = 0; i < 150; i++) {
igt_assert_eq(sync_fence_status(spin[0]->out_fence), 0);
@@ -277,8 +276,8 @@ static void test_off(int i915, int engine)
igt_spin_free(i915, spin[0]);
- gem_context_destroy(i915, ctx[1]);
- gem_context_destroy(i915, ctx[0]);
+ intel_ctx_destroy(i915, ctx[1]);
+ intel_ctx_destroy(i915, ctx[0]);
igt_assert(enable_hangcheck(i915, true));
gem_quiescent_gpu(i915);
diff --git a/tests/i915/sysfs_timeslice_duration.c b/tests/i915/sysfs_timeslice_duration.c
index 05ab79667..b73ee3889 100644
--- a/tests/i915/sysfs_timeslice_duration.c
+++ b/tests/i915/sysfs_timeslice_duration.c
@@ -138,13 +138,12 @@ static void set_unbannable(int i915, uint32_t ctx)
gem_context_set_param(i915, &p);
}
-static uint32_t create_context(int i915, unsigned int class, unsigned int inst, int prio)
+static const intel_ctx_t *
+create_ctx(int i915, unsigned int class, unsigned int inst, int prio)
{
- uint32_t ctx;
-
- ctx = gem_context_create_for_engine(i915, class, inst);
- set_unbannable(i915, ctx);
- gem_context_set_priority(i915, ctx, prio);
+ const intel_ctx_t *ctx = intel_ctx_create_for_engine(i915, class, inst);
+ set_unbannable(i915, ctx->id);
+ gem_context_set_priority(i915, ctx->id, prio);
return ctx;
}
@@ -191,7 +190,7 @@ static uint64_t __test_duration(int i915, int engine, unsigned int timeout)
double duration = clockrate(i915);
unsigned int class, inst, mmio;
uint32_t *cs, *map;
- uint32_t ctx[2];
+ const intel_ctx_t *ctx[2];
int start;
int i;
@@ -204,8 +203,8 @@ static uint64_t __test_duration(int i915, int engine, unsigned int timeout)
set_timeslice(engine, timeout);
- ctx[0] = create_context(i915, class, inst, 0);
- ctx[1] = create_context(i915, class, inst, 0);
+ ctx[0] = create_ctx(i915, class, inst, 0);
+ ctx[1] = create_ctx(i915, class, inst, 0);
map = gem_mmap__device_coherent(i915, obj[2].handle,
0, 4096, PROT_WRITE);
@@ -260,10 +259,10 @@ static uint64_t __test_duration(int i915, int engine, unsigned int timeout)
igt_assert(cs - map < 4096 / sizeof(*cs));
munmap(map, 4096);
- eb.rsvd1 = ctx[0];
+ eb.rsvd1 = ctx[0]->id;
gem_execbuf(i915, &eb);
- eb.rsvd1 = ctx[1];
+ eb.rsvd1 = ctx[1]->id;
eb.batch_start_offset = start;
gem_execbuf(i915, &eb);
@@ -280,7 +279,7 @@ static uint64_t __test_duration(int i915, int engine, unsigned int timeout)
munmap(map, 4096);
for (i = 0; i < ARRAY_SIZE(ctx); i++)
- gem_context_destroy(i915, ctx[i]);
+ intel_ctx_destroy(i915, ctx[i]);
for (i = 0; i < ARRAY_SIZE(obj); i++)
gem_close(i915, obj[i].handle);
@@ -371,23 +370,23 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
struct timespec ts = {};
igt_spin_t *spin[2];
uint64_t elapsed;
- uint32_t ctx[2];
+ const intel_ctx_t *ctx[2];
igt_assert(igt_sysfs_scanf(engine, "class", "%u", &class) == 1);
igt_assert(igt_sysfs_scanf(engine, "instance", "%u", &inst) == 1);
set_timeslice(engine, timeout);
- ctx[0] = create_context(i915, class, inst, 0);
- spin[0] = igt_spin_new(i915, ctx[0],
+ ctx[0] = create_ctx(i915, class, inst, 0);
+ spin[0] = igt_spin_new(i915, .ctx = ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_context(i915, class, inst, 0);
+ ctx[1] = create_ctx(i915, class, inst, 0);
igt_nsec_elapsed(&ts);
- spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin[1]);
elapsed = igt_nsec_elapsed(&ts);
@@ -398,8 +397,8 @@ static uint64_t __test_timeout(int i915, int engine, unsigned int timeout)
igt_spin_free(i915, spin[0]);
- gem_context_destroy(i915, ctx[1]);
- gem_context_destroy(i915, ctx[0]);
+ intel_ctx_destroy(i915, ctx[1]);
+ intel_ctx_destroy(i915, ctx[0]);
gem_quiescent_gpu(i915);
return elapsed;
@@ -460,7 +459,7 @@ static void test_off(int i915, int engine)
unsigned int class, inst;
unsigned int saved;
igt_spin_t *spin[2];
- uint32_t ctx[2];
+ const intel_ctx_t *ctx[2];
/*
* As always, there are some who must run uninterrupted and simply do
@@ -482,15 +481,15 @@ static void test_off(int i915, int engine)
set_timeslice(engine, 0);
- ctx[0] = create_context(i915, class, inst, 0);
- spin[0] = igt_spin_new(i915, ctx[0],
+ ctx[0] = create_ctx(i915, class, inst, 0);
+ spin[0] = igt_spin_new(i915, .ctx = ctx[0],
.flags = (IGT_SPIN_NO_PREEMPTION |
IGT_SPIN_POLL_RUN |
IGT_SPIN_FENCE_OUT));
igt_spin_busywait_until_started(spin[0]);
- ctx[1] = create_context(i915, class, inst, 0);
- spin[1] = igt_spin_new(i915, ctx[1], .flags = IGT_SPIN_POLL_RUN);
+ ctx[1] = create_ctx(i915, class, inst, 0);
+ spin[1] = igt_spin_new(i915, .ctx = ctx[1], .flags = IGT_SPIN_POLL_RUN);
for (int i = 0; i < 150; i++) {
igt_assert_eq(sync_fence_status(spin[0]->out_fence), 0);
@@ -507,8 +506,8 @@ static void test_off(int i915, int engine)
igt_spin_free(i915, spin[0]);
- gem_context_destroy(i915, ctx[1]);
- gem_context_destroy(i915, ctx[0]);
+ intel_ctx_destroy(i915, ctx[1]);
+ intel_ctx_destroy(i915, ctx[0]);
igt_assert(enable_hangcheck(i915, true));
gem_quiescent_gpu(i915);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 65/81] tests/i915/gem_workarounds: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (63 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 64/81] tests/i915/sysfs: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 66/81] lib/i915/gem_context: Delete all the context clone/copy stuff Jason Ekstrand
` (18 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_workarounds.c | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/tests/i915/gem_workarounds.c b/tests/i915/gem_workarounds.c
index 9cdc24374..e240901c4 100644
--- a/tests/i915/gem_workarounds.c
+++ b/tests/i915/gem_workarounds.c
@@ -85,7 +85,7 @@ static bool write_only(const uint32_t addr)
#define MI_STORE_REGISTER_MEM (0x24 << 23)
-static int workaround_fail_count(int i915, uint32_t ctx)
+static int workaround_fail_count(int i915, const intel_ctx_t *ctx)
{
struct drm_i915_gem_exec_object2 obj[2];
struct drm_i915_gem_relocation_entry *reloc;
@@ -131,12 +131,12 @@ static int workaround_fail_count(int i915, uint32_t ctx)
memset(&execbuf, 0, sizeof(execbuf));
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
- execbuf.rsvd1 = ctx;
+ execbuf.rsvd1 = ctx->id;
gem_execbuf(i915, &execbuf);
gem_set_domain(i915, obj[0].handle, I915_GEM_DOMAIN_CPU, 0);
- spin = igt_spin_new(i915, .ctx_id = ctx, .flags = IGT_SPIN_POLL_RUN);
+ spin = igt_spin_new(i915, .ctx = ctx, .flags = IGT_SPIN_POLL_RUN);
igt_spin_busywait_until_started(spin);
fw = igt_open_forcewake_handle(i915);
@@ -184,14 +184,15 @@ static int workaround_fail_count(int i915, uint32_t ctx)
#define FD 0x2
static void check_workarounds(int fd, enum operation op, unsigned int flags)
{
- uint32_t ctx = 0;
+ const intel_ctx_t *ctx;
if (flags & FD)
fd = gem_reopen_driver(fd);
+ ctx = intel_ctx_0(fd);
if (flags & CONTEXT) {
gem_require_contexts(fd);
- ctx = gem_context_create(fd);
+ ctx = intel_ctx_create(fd, NULL);
}
igt_assert_eq(workaround_fail_count(fd, ctx), 0);
@@ -221,7 +222,7 @@ static void check_workarounds(int fd, enum operation op, unsigned int flags)
igt_assert_eq(workaround_fail_count(fd, ctx), 0);
if (flags & CONTEXT)
- gem_context_destroy(fd, ctx);
+ intel_ctx_destroy(fd, ctx);
if (flags & FD)
close(fd);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 66/81] lib/i915/gem_context: Delete all the context clone/copy stuff
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (64 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 65/81] tests/i915/gem_workarounds: " Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 67/81] tests/i915/gem_ctx_engines: Delete the libapi subtest Jason Ekstrand
` (17 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
lib/i915/gem_context.c | 149 -----------------------------------------
lib/i915/gem_context.h | 16 -----
2 files changed, 165 deletions(-)
diff --git a/lib/i915/gem_context.c b/lib/i915/gem_context.c
index 87dcbc6e8..3428cf7a3 100644
--- a/lib/i915/gem_context.c
+++ b/lib/i915/gem_context.c
@@ -407,125 +407,6 @@ bool gem_context_has_persistence(int i915)
return __gem_context_get_param(i915, ¶m) == 0;
}
-int
-__gem_context_clone(int i915,
- uint32_t src, unsigned int share,
- unsigned int flags,
- uint32_t *out)
-{
- struct drm_i915_gem_context_create_ext_clone clone = {
- { .name = I915_CONTEXT_CREATE_EXT_CLONE },
- .clone_id = src,
- .flags = share,
- };
- struct drm_i915_gem_context_create_ext arg = {
- .flags = flags | I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
- .extensions = to_user_pointer(&clone),
- };
- int err;
-
- err = create_ext_ioctl(i915, &arg);
- if (err)
- return err;
-
- *out = arg.ctx_id;
- return 0;
-}
-
-static bool __gem_context_has(int i915, uint32_t share, unsigned int flags)
-{
- uint32_t ctx = 0;
-
- __gem_context_clone(i915, 0, share, flags, &ctx);
- if (ctx)
- gem_context_destroy(i915, ctx);
-
- errno = 0;
- return ctx;
-}
-
-bool gem_contexts_has_shared_gtt(int i915)
-{
- return __gem_context_has(i915, I915_CONTEXT_CLONE_VM, 0);
-}
-
-bool gem_has_queues(int i915)
-{
- return __gem_context_has(i915,
- I915_CONTEXT_CLONE_VM,
- I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
-}
-
-uint32_t gem_context_clone(int i915,
- uint32_t src, unsigned int share,
- unsigned int flags)
-{
- uint32_t ctx;
-
- igt_assert_eq(__gem_context_clone(i915, src, share, flags, &ctx), 0);
-
- return ctx;
-}
-
-bool gem_has_context_clone(int i915)
-{
- struct drm_i915_gem_context_create_ext_clone ext = {
- { .name = I915_CONTEXT_CREATE_EXT_CLONE },
- .clone_id = -1,
- };
- struct drm_i915_gem_context_create_ext create = {
- .flags = I915_CONTEXT_CREATE_FLAGS_USE_EXTENSIONS,
- .extensions = to_user_pointer(&ext),
- };
-
- return create_ext_ioctl(i915, &create) == -ENOENT;
-}
-
-/**
- * gem_context_clone_with_engines:
- * @i915: open i915 drm file descriptor
- * @src: i915 context id
- *
- * Special purpose wrapper to create a new context by cloning engines from @src.
- *
- * In can be called regardless of whether the kernel supports context cloning.
- *
- * Intended purpose is to use for creating contexts against which work will be
- * submitted and the engine index came from external source, derived from a
- * default context potentially configured with an engine map.
- */
-uint32_t gem_context_clone_with_engines(int i915, uint32_t src)
-{
- if (!gem_has_context_clone(i915))
- return gem_context_create(i915);
- else
- return gem_context_clone(i915, src, I915_CONTEXT_CLONE_ENGINES,
- 0);
-}
-
-uint32_t gem_queue_create(int i915)
-{
- return gem_context_clone(i915, 0,
- I915_CONTEXT_CLONE_VM |
- I915_CONTEXT_CLONE_ENGINES,
- I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
-}
-
-/**
- * gem_queue_clone_with_engines:
- * @i915: open i915 drm file descriptor
- * @src: i915 context id
- *
- * See gem_context_clone_with_engines.
- */
-uint32_t gem_queue_clone_with_engines(int i915, uint32_t src)
-{
- return gem_context_clone(i915, src,
- I915_CONTEXT_CLONE_ENGINES |
- I915_CONTEXT_CLONE_VM,
- I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE);
-}
-
bool gem_context_has_engine(int fd, uint32_t ctx, uint64_t engine)
{
struct drm_i915_gem_exec_object2 exec = {};
@@ -551,36 +432,6 @@ bool gem_context_has_engine(int fd, uint32_t ctx, uint64_t engine)
return __gem_execbuf(fd, &execbuf) == -ENOENT;
}
-/**
- * gem_context_copy_engines:
- * @src_fd: open i915 drm file descriptor where @src context belongs to
- * @src: source engine map context id
- * @dst_fd: open i915 drm file descriptor where @dst context belongs to
- * @dst: destination engine map context id
- *
- * Special purpose helper for copying engine map from one context to another.
- *
- * In can be called regardless of whether the kernel supports context engine
- * maps and is a no-op if not supported.
- */
-void
-gem_context_copy_engines(int src_fd, uint32_t src, int dst_fd, uint32_t dst)
-{
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, I915_EXEC_RING_MASK + 1);
- struct drm_i915_gem_context_param param = {
- .param = I915_CONTEXT_PARAM_ENGINES,
- .ctx_id = src,
- .size = sizeof(engines),
- .value = to_user_pointer(&engines),
- };
-
- if (__gem_context_get_param(src_fd, ¶m))
- return;
-
- param.ctx_id = dst;
- gem_context_set_param(dst_fd, ¶m);
-}
-
uint32_t gem_context_create_for_engine(int i915, unsigned int class, unsigned int inst)
{
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 1) = {
diff --git a/lib/i915/gem_context.h b/lib/i915/gem_context.h
index 6e2226d27..505d55724 100644
--- a/lib/i915/gem_context.h
+++ b/lib/i915/gem_context.h
@@ -40,20 +40,6 @@ int __gem_context_destroy(int fd, uint32_t ctx_id);
uint32_t gem_context_create_for_engine(int fd, unsigned int class, unsigned int inst);
uint32_t gem_context_create_for_class(int i915, unsigned int class, unsigned int *count);
-int __gem_context_clone(int i915,
- uint32_t src, unsigned int share,
- unsigned int flags,
- uint32_t *out);
-uint32_t gem_context_clone(int i915,
- uint32_t src, unsigned int share,
- unsigned int flags);
-uint32_t gem_context_clone_with_engines(int i915, uint32_t src);
-void gem_context_copy_engines(int src_fd, uint32_t src,
- int dst_fd, uint32_t dst);
-
-uint32_t gem_queue_create(int i915);
-uint32_t gem_queue_clone_with_engines(int i915, uint32_t src);
-
bool gem_contexts_has_shared_gtt(int i915);
bool gem_has_queues(int i915);
@@ -63,8 +49,6 @@ bool gem_context_has_single_timeline(int i915);
void gem_context_require_bannable(int fd);
void gem_context_require_param(int fd, uint64_t param);
-bool gem_has_context_clone(int i915);
-
void gem_context_get_param(int fd, struct drm_i915_gem_context_param *p);
void gem_context_set_param(int fd, struct drm_i915_gem_context_param *p);
int __gem_context_set_param(int fd, struct drm_i915_gem_context_param *p);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 67/81] tests/i915/gem_ctx_engines: Delete the libapi subtest
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (65 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 66/81] lib/i915/gem_context: Delete all the context clone/copy stuff Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 68/81] lib/igt_dummyload: Stop supporting ALL_ENGINES without an intel_ctx_t Jason Ekstrand
` (16 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
It just tests enumeration APIs that we're about to delete.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_engines.c | 71 ------------------------------------
1 file changed, 71 deletions(-)
diff --git a/tests/i915/gem_ctx_engines.c b/tests/i915/gem_ctx_engines.c
index ffed96592..caa97774c 100644
--- a/tests/i915/gem_ctx_engines.c
+++ b/tests/i915/gem_ctx_engines.c
@@ -551,74 +551,6 @@ static void independent_all(int i915, const intel_ctx_t *ctx)
igt_waitchildren();
}
-static void libapi(int i915)
-{
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 64) = {};
- struct drm_i915_gem_context_param p = {
- .ctx_id = gem_context_create(i915),
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&engines),
- };
- const struct intel_execution_engine2 *e;
- unsigned int count, idx;
-
- p.size = sizeof(struct i915_context_param_engines);
- gem_context_set_param(i915, &p);
-
- /* An empty context should be a short loop */
- count = 0;
- for_each_context_engine(i915, p.ctx_id, e)
- count++;
- igt_assert_eq(count, 0);
-
- p.size += sizeof(struct i915_engine_class_instance);
- engine_class(&engines, 0) = -1;
- engine_instance(&engines, 0) = -1;
- gem_context_set_param(i915, &p);
-
- /* We report all engines from the context, even if invalid/unusable */
- count = 0;
- for_each_context_engine(i915, p.ctx_id, e) {
- igt_assert_eq(e->class, engine_class(&engines, 0));
- igt_assert_eq(e->instance, engine_instance(&engines, 0));
- count++;
- }
- igt_assert_eq(count, 1);
-
- /* Check that every known engine can be found from the context map */
- idx = 0;
- p.size = sizeof(struct i915_context_param_engines);
- p.size += sizeof(struct i915_engine_class_instance);
- for (engine_class(&engines, idx) = 0;
- engine_class(&engines, idx) < 16;
- engine_class(&engines, idx)++) {
- for (engine_instance(&engines, idx) = 0;
- engine_instance(&engines, idx) < 16;
- engine_instance(&engines, idx)++) {
- if (__gem_context_set_param(i915, &p))
- break;
-
- count = 0;
- for_each_context_engine(i915, p.ctx_id, e) {
- igt_assert_eq(e->class,
- engine_class(&engines, count));
- igt_assert_eq(e->instance,
- engine_instance(&engines, count));
- count++;
- }
- igt_assert_eq(count, idx + 1);
-
- engines.engines[(idx + 1) % 64] = engines.engines[idx];
- idx = (idx + 1) % 64;
-
- p.size = sizeof(struct i915_context_param_engines);
- p.size += (idx + 1) * sizeof(struct i915_engine_class_instance);
- }
- }
-
- gem_context_destroy(i915, p.ctx_id);
-}
-
igt_main
{
const struct intel_execution_engine2 *e;
@@ -664,9 +596,6 @@ igt_main
independent_all(i915, ctx);
}
- igt_subtest("libapi")
- libapi(i915);
-
igt_fixture
igt_stop_hang_detector();
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 68/81] lib/igt_dummyload: Stop supporting ALL_ENGINES without an intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (66 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 67/81] tests/i915/gem_ctx_engines: Delete the libapi subtest Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 69/81] lib/i915/gem_engine_topology: Delete the old physical engine iterators Jason Ekstrand
` (15 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
lib/igt_dummyload.c | 21 ++++++---------------
1 file changed, 6 insertions(+), 15 deletions(-)
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 75be2f7c7..7cc242872 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -131,22 +131,13 @@ emit_recursive_batch(igt_spin_t *spin,
if (opts->engine == ALL_ENGINES) {
struct intel_execution_engine2 *engine;
- if (opts->ctx) {
- for_each_ctx_engine(fd, opts->ctx, engine) {
- if (opts->flags & IGT_SPIN_POLL_RUN &&
- !gem_class_can_store_dword(fd, engine->class))
- continue;
+ igt_assert(opts->ctx);
+ for_each_ctx_engine(fd, opts->ctx, engine) {
+ if (opts->flags & IGT_SPIN_POLL_RUN &&
+ !gem_class_can_store_dword(fd, engine->class))
+ continue;
- flags[nengine++] = engine->flags;
- }
- } else {
- for_each_context_engine(fd, opts->ctx_id, engine) {
- if (opts->flags & IGT_SPIN_POLL_RUN &&
- !gem_class_can_store_dword(fd, engine->class))
- continue;
-
- flags[nengine++] = engine->flags;
- }
+ flags[nengine++] = engine->flags;
}
} else {
flags[nengine++] = opts->engine;
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 69/81] lib/i915/gem_engine_topology: Delete the old physical engine iterators
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (67 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 68/81] lib/igt_dummyload: Stop supporting ALL_ENGINES without an intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 70/81] tests/i915/gem_mmap_gtt: Convert to intel_ctx_t (v2) Jason Ekstrand
` (14 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
lib/i915/gem_engine_topology.c | 58 ----------------------------------
lib/i915/gem_engine_topology.h | 15 ---------
2 files changed, 73 deletions(-)
diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
index 4bb7d21f6..f9c881457 100644
--- a/lib/i915/gem_engine_topology.c
+++ b/lib/i915/gem_engine_topology.c
@@ -136,25 +136,6 @@ int __gem_query_engines(int fd,
return __gem_query(fd, &query);
}
-static void ctx_map_engines(int fd, struct intel_engine_data *ed,
- struct drm_i915_gem_context_param *param)
-{
- struct i915_context_param_engines *engines =
- from_user_pointer(param->value);
- int i = 0;
-
- for (typeof(engines->engines[0]) *p = &engines->engines[0];
- i < ed->nengines; i++, p++) {
- p->engine_class = ed->engines[i].class;
- p->engine_instance = ed->engines[i].instance;
- }
-
- param->size = offsetof(typeof(*engines), engines[i]);
- engines->extensions = 0;
-
- gem_context_set_param(fd, param);
-}
-
static const char *class_names[] = {
[I915_ENGINE_CLASS_RENDER] = "rcs",
[I915_ENGINE_CLASS_COPY] = "bcs",
@@ -215,11 +196,6 @@ static int __query_engine_list(int fd, struct intel_engine_data *ed)
return 0;
}
-static void query_engine_list(int fd, struct intel_engine_data *ed)
-{
- igt_assert_eq(__query_engine_list(fd, ed), 0);
-}
-
struct intel_execution_engine2 *
intel_get_current_engine(struct intel_engine_data *ed)
{
@@ -353,40 +329,6 @@ static int gem_topology_get_param(int fd,
return 0;
}
-struct intel_engine_data intel_init_engine_list(int fd, uint32_t ctx_id)
-{
- DEFINE_CONTEXT_ENGINES_PARAM(engines, param, ctx_id, GEM_MAX_ENGINES);
- struct intel_engine_data engine_data = { };
- int i;
-
- if (gem_topology_get_param(fd, ¶m)) {
- /* if kernel does not support engine/context mapping */
- return intel_engine_list_for_static(fd);
- }
-
- if (!param.size) {
- query_engine_list(fd, &engine_data);
- ctx_map_engines(fd, &engine_data, ¶m);
- } else {
- /* engine count can be inferred from size */
- param.size -= sizeof(struct i915_context_param_engines);
- param.size /= sizeof(struct i915_engine_class_instance);
-
- igt_assert_f(param.size <= GEM_MAX_ENGINES,
- "unsupported engine count\n");
-
- for (i = 0; i < param.size; i++)
- init_engine(&engine_data.engines[i],
- engines.engines[i].engine_class,
- engines.engines[i].engine_instance,
- i);
-
- engine_data.nengines = i;
- }
-
- return engine_data;
-}
-
int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
struct intel_execution_engine2 *e)
{
diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
index 92d9a4792..8f5987f6a 100644
--- a/lib/i915/gem_engine_topology.h
+++ b/lib/i915/gem_engine_topology.h
@@ -51,7 +51,6 @@ struct intel_engine_data {
bool gem_has_engine_topology(int fd);
struct intel_engine_data intel_engine_list_of_physical(int fd);
struct intel_engine_data intel_engine_list_for_ctx_cfg(int fd, const intel_ctx_cfg_t *cfg);
-struct intel_engine_data intel_init_engine_list(int fd, uint32_t ctx_id);
/* iteration functions */
struct intel_execution_engine2 *
@@ -106,11 +105,6 @@ struct intel_execution_engine2 gem_eb_flags_to_engine(unsigned int flags);
#define for_each_ctx_engine(fd__, ctx__, e__) \
for_each_ctx_cfg_engine(fd__, &(ctx__)->cfg, e__)
-#define for_each_context_engine(fd__, ctx__, e__) \
- for (struct intel_engine_data i__ = intel_init_engine_list(fd__, ctx__); \
- ((e__) = intel_get_current_engine(&i__)); \
- intel_next_engine(&i__))
-
/**
* for_each_physical_engine
* @fd__: open i915 drm file descriptor
@@ -126,15 +120,6 @@ struct intel_execution_engine2 gem_eb_flags_to_engine(unsigned int flags);
((e__) = intel_get_current_physical_engine(&i__##e__)); \
intel_next_engine(&i__##e__))
-/* needs to replace "for_each_physical_engine" when conflicts are fixed */
-#define ____for_each_physical_engine(fd__, ctx__, e__) \
- for (struct intel_engine_data i__##e__ = intel_init_engine_list(fd__, ctx__); \
- ((e__) = intel_get_current_physical_engine(&i__##e__)); \
- intel_next_engine(&i__##e__))
-
-#define __for_each_physical_engine(fd__, e__) \
- ____for_each_physical_engine(fd__, 0, e__)
-
__attribute__((format(scanf, 4, 5)))
int gem_engine_property_scanf(int i915, const char *engine, const char *attr,
const char *fmt, ...);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 70/81] tests/i915/gem_mmap_gtt: Convert to intel_ctx_t (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (68 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 69/81] lib/i915/gem_engine_topology: Delete the old physical engine iterators Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 71/81] tests/i915/i915_query: Convert to intel_ctx_t Jason Ekstrand
` (13 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
v2 (Ashutosh Dixit):
- Split the i915_query changes into their own patch
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_mmap_gtt.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)
diff --git a/tests/i915/gem_mmap_gtt.c b/tests/i915/gem_mmap_gtt.c
index cbfa222a6..60282699e 100644
--- a/tests/i915/gem_mmap_gtt.c
+++ b/tests/i915/gem_mmap_gtt.c
@@ -737,14 +737,18 @@ static void
test_hang_busy(int i915)
{
uint32_t *ptr, *tile, *x;
+ const intel_ctx_t *ctx = intel_ctx_create(i915, NULL);
igt_spin_t *spin;
igt_hang_t hang;
uint32_t handle;
- hang = igt_allow_hang(i915, 0, 0);
+ hang = igt_allow_hang(i915, ctx->id, 0);
igt_require(igt_params_set(i915, "reset", "1")); /* global */
- spin = igt_spin_new(i915, .flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION);
+ spin = igt_spin_new(i915, .ctx = ctx,
+ .flags = IGT_SPIN_POLL_RUN |
+ IGT_SPIN_FENCE_OUT |
+ IGT_SPIN_NO_PREEMPTION);
igt_spin_busywait_until_started(spin);
igt_assert(spin->execbuf.buffer_count == 2);
@@ -785,20 +789,25 @@ test_hang_busy(int i915)
igt_spin_free(i915, spin);
igt_disallow_hang(i915, hang);
+ intel_ctx_destroy(i915, ctx);
}
static void
test_hang_user(int i915)
{
+ const intel_ctx_t *ctx = intel_ctx_create(i915, NULL);
uint32_t *ptr, *mem, *x;
igt_spin_t *spin;
igt_hang_t hang;
uint32_t handle;
- hang = igt_allow_hang(i915, 0, 0);
+ hang = igt_allow_hang(i915, ctx->id, 0);
igt_require(igt_params_set(i915, "reset", "1")); /* global */
- spin = igt_spin_new(i915, .flags = IGT_SPIN_POLL_RUN | IGT_SPIN_FENCE_OUT | IGT_SPIN_NO_PREEMPTION);
+ spin = igt_spin_new(i915, .ctx = ctx,
+ .flags = IGT_SPIN_POLL_RUN |
+ IGT_SPIN_FENCE_OUT |
+ IGT_SPIN_NO_PREEMPTION);
igt_spin_busywait_until_started(spin);
igt_assert(spin->execbuf.buffer_count == 2);
@@ -835,6 +844,7 @@ test_hang_user(int i915)
igt_spin_free(i915, spin);
igt_disallow_hang(i915, hang);
+ intel_ctx_destroy(i915, ctx);
}
static int min_tile_width(uint32_t devid, int tiling)
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 71/81] tests/i915/i915_query: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (69 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 70/81] tests/i915/gem_mmap_gtt: Convert to intel_ctx_t (v2) Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 72/81] igt/dummyload: Require an intel_ctx_t for POLL_RUN and !ALL_ENGINES Jason Ekstrand
` (12 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/i915_query.c | 17 +++++------------
1 file changed, 5 insertions(+), 12 deletions(-)
diff --git a/tests/i915/i915_query.c b/tests/i915/i915_query.c
index 29b938e9c..0add3401b 100644
--- a/tests/i915/i915_query.c
+++ b/tests/i915/i915_query.c
@@ -682,19 +682,14 @@ static void engines(int fd)
for (i = 0; i < engines->num_engines; i++) {
struct drm_i915_engine_info *engine =
(struct drm_i915_engine_info *)&engines->engines[i];
- I915_DEFINE_CONTEXT_PARAM_ENGINES(p_engines, 1) = {
- .engines = { engine->engine }
- };
- struct drm_i915_gem_context_param param = {
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&p_engines),
- .size = sizeof(p_engines),
- };
-
+ const intel_ctx_t *ctx =
+ intel_ctx_create_for_engine(fd, engine->engine.engine_class,
+ engine->engine.engine_instance);
struct drm_i915_gem_exec_object2 obj = {};
struct drm_i915_gem_execbuffer2 execbuf = {
.buffers_ptr = to_user_pointer(&obj),
.buffer_count = 1,
+ .rsvd1 = ctx->id,
};
igt_debug("%u: class=%u instance=%u flags=%llx capabilities=%llx\n",
@@ -703,11 +698,9 @@ static void engines(int fd)
engine->engine.engine_instance,
engine->flags,
engine->capabilities);
- gem_context_set_param(fd, ¶m);
igt_assert_eq(__gem_execbuf(fd, &execbuf), -ENOENT);
- param.size = 0; /* reset context engine map to defaults */
- gem_context_set_param(fd, ¶m);
+ intel_ctx_destroy(fd, ctx);
}
/* Check results match the legacy GET_PARAM (where we can). */
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 72/81] igt/dummyload: Require an intel_ctx_t for POLL_RUN and !ALL_ENGINES
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (70 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 71/81] tests/i915/i915_query: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 73/81] lib/i915: Rework engine API availability checks (v4) Jason Ekstrand
` (11 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
This lets us drop gem_context_lookup_engines
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
lib/i915/gem_engine_topology.c | 29 -----------------------------
lib/i915/gem_engine_topology.h | 3 ---
lib/igt_dummyload.c | 20 +++++---------------
3 files changed, 5 insertions(+), 47 deletions(-)
diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
index f9c881457..7c60daf07 100644
--- a/lib/i915/gem_engine_topology.c
+++ b/lib/i915/gem_engine_topology.c
@@ -317,35 +317,6 @@ intel_engine_list_for_ctx_cfg(int fd, const intel_ctx_cfg_t *cfg)
}
}
-static int gem_topology_get_param(int fd,
- struct drm_i915_gem_context_param *p)
-{
- if (igt_only_list_subtests())
- return -ENODEV;
-
- if (__gem_context_get_param(fd, p))
- return -1; /* using default engine map */
-
- return 0;
-}
-
-int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
- struct intel_execution_engine2 *e)
-{
- DEFINE_CONTEXT_ENGINES_PARAM(engines, param, ctx_id, GEM_MAX_ENGINES);
-
- /* a bit paranoic */
- igt_assert(e);
-
- if (gem_topology_get_param(fd, ¶m) || !param.size)
- return -EINVAL;
-
- e->class = engines.engines[engine].engine_class;
- e->instance = engines.engines[engine].engine_instance;
-
- return 0;
-}
-
/**
* gem_has_engine_topology:
* @fd: open i915 drm file descriptor
diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
index 8f5987f6a..991d0ff85 100644
--- a/lib/i915/gem_engine_topology.h
+++ b/lib/i915/gem_engine_topology.h
@@ -61,9 +61,6 @@ intel_get_current_physical_engine(struct intel_engine_data *ed);
void intel_next_engine(struct intel_engine_data *ed);
-int gem_context_lookup_engine(int fd, uint64_t engine, uint32_t ctx_id,
- struct intel_execution_engine2 *e);
-
bool gem_context_has_engine_map(int fd, uint32_t ctx);
bool gem_engine_is_equal(const struct intel_execution_engine2 *e1,
diff --git a/lib/igt_dummyload.c b/lib/igt_dummyload.c
index 7cc242872..5354b9c2b 100644
--- a/lib/igt_dummyload.c
+++ b/lib/igt_dummyload.c
@@ -424,22 +424,12 @@ igt_spin_factory(int fd, const struct igt_spin_factory *opts)
{
igt_spin_t *spin;
- if (opts->engine != ALL_ENGINES) {
- struct intel_execution_engine2 e;
- int class;
-
- if (opts->ctx) {
- class = opts->ctx->cfg.engines[opts->engine].engine_class;
- } else if (!gem_context_lookup_engine(fd, opts->engine,
- opts->ctx_id, &e)) {
- class = e.class;
- } else {
- gem_require_ring(fd, opts->engine);
- class = gem_execbuf_flags_to_engine_class(opts->engine);
- }
+ if ((opts->flags & IGT_SPIN_POLL_RUN) && opts->engine != ALL_ENGINES) {
+ unsigned int class;
- if (opts->flags & IGT_SPIN_POLL_RUN)
- igt_require(gem_class_can_store_dword(fd, class));
+ igt_assert(opts->ctx);
+ class = intel_ctx_engine_class(opts->ctx, opts->engine);
+ igt_require(gem_class_can_store_dword(fd, class));
}
if (opts->flags & IGT_SPIN_INVALID_CS)
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 73/81] lib/i915: Rework engine API availability checks (v4)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (71 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 72/81] igt/dummyload: Require an intel_ctx_t for POLL_RUN and !ALL_ENGINES Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 74/81] lib/intel_bb: Remove intel_bb_assign_vm and tests (v2) Jason Ekstrand
` (10 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Instead of relying on the context param, check for the device query or
attempt to set engines as a create param.
v2 (Jason Ekstrand):
- Add a common gem_has_context_engines helper
v3 (Jason Ekstrand):
- Drop gem_has_context_engines and always check I915_QUERY_ENGINE_INFO
v4 (Ashutosh Dixit):
- Add a comment to the gem_has_engine_topology docs saying that it can
be used to query either feature.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
lib/i915/gem_context.c | 7 +++---
lib/i915/gem_engine_topology.c | 40 +++++-----------------------------
lib/i915/gem_engine_topology.h | 2 --
tests/i915/gem_ctx_engines.c | 11 +---------
tests/i915/gem_exec_balancer.c | 11 +---------
tests/i915/gem_exec_schedule.c | 11 +---------
6 files changed, 13 insertions(+), 69 deletions(-)
diff --git a/lib/i915/gem_context.c b/lib/i915/gem_context.c
index 3428cf7a3..fe989a8d1 100644
--- a/lib/i915/gem_context.c
+++ b/lib/i915/gem_context.c
@@ -70,12 +70,13 @@ static int create_ext_ioctl(int i915,
bool gem_has_contexts(int fd)
{
uint32_t ctx_id = 0;
+ int err;
- __gem_context_create(fd, &ctx_id);
- if (ctx_id)
+ err = __gem_context_create(fd, &ctx_id);
+ if (!err)
gem_context_destroy(fd, ctx_id);
- return ctx_id;
+ return !err;
}
/**
diff --git a/lib/i915/gem_engine_topology.c b/lib/i915/gem_engine_topology.c
index 7c60daf07..4e497a5cb 100644
--- a/lib/i915/gem_engine_topology.c
+++ b/lib/i915/gem_engine_topology.c
@@ -86,20 +86,9 @@
* Limit what we support for simplicity due limitation in how much we
* can address via execbuf2.
*/
-#define SIZEOF_CTX_PARAM offsetof(struct i915_context_param_engines, \
- engines[GEM_MAX_ENGINES])
#define SIZEOF_QUERY offsetof(struct drm_i915_query_engine_info, \
engines[GEM_MAX_ENGINES])
-#define DEFINE_CONTEXT_ENGINES_PARAM(e__, p__, c__, N__) \
- I915_DEFINE_CONTEXT_PARAM_ENGINES(e__, N__); \
- struct drm_i915_gem_context_param p__ = { \
- .param = I915_CONTEXT_PARAM_ENGINES, \
- .ctx_id = c__, \
- .size = SIZEOF_CTX_PARAM, \
- .value = to_user_pointer(memset(&e__, 0, sizeof(e__))), \
- }
-
static int __gem_query(int fd, struct drm_i915_query *q)
{
int err = 0;
@@ -321,17 +310,17 @@ intel_engine_list_for_ctx_cfg(int fd, const intel_ctx_cfg_t *cfg)
* gem_has_engine_topology:
* @fd: open i915 drm file descriptor
*
- * Queries whether the engine topology API is supported or not.
+ * Queries whether the engine topology API is supported or not. Every
+ * kernel that has the global engines query should have the
+ * CONTEXT_PARAM_ENGINES and vice versa so this one check can be used for
+ * either.
*
* Returns: Engine topology API availability.
*/
bool gem_has_engine_topology(int fd)
{
- struct drm_i915_gem_context_param param = {
- .param = I915_CONTEXT_PARAM_ENGINES,
- };
-
- return !__gem_context_get_param(fd, ¶m);
+ struct intel_engine_data ed;
+ return !__query_engine_list(fd, &ed);
}
struct intel_execution_engine2 gem_eb_flags_to_engine(unsigned int flags)
@@ -360,23 +349,6 @@ struct intel_execution_engine2 gem_eb_flags_to_engine(unsigned int flags)
return e2__;
}
-bool gem_context_has_engine_map(int fd, uint32_t ctx)
-{
- struct drm_i915_gem_context_param param = {
- .param = I915_CONTEXT_PARAM_ENGINES,
- .ctx_id = ctx
- };
-
- /*
- * If the kernel is too old to support PARAM_ENGINES,
- * then naturally the context has no engine map.
- */
- if (__gem_context_get_param(fd, ¶m))
- return false;
-
- return param.size;
-}
-
bool gem_engine_is_equal(const struct intel_execution_engine2 *e1,
const struct intel_execution_engine2 *e2)
{
diff --git a/lib/i915/gem_engine_topology.h b/lib/i915/gem_engine_topology.h
index 991d0ff85..4cfab560b 100644
--- a/lib/i915/gem_engine_topology.h
+++ b/lib/i915/gem_engine_topology.h
@@ -61,8 +61,6 @@ intel_get_current_physical_engine(struct intel_engine_data *ed);
void intel_next_engine(struct intel_engine_data *ed);
-bool gem_context_has_engine_map(int fd, uint32_t ctx);
-
bool gem_engine_is_equal(const struct intel_execution_engine2 *e1,
const struct intel_execution_engine2 *e2);
diff --git a/tests/i915/gem_ctx_engines.c b/tests/i915/gem_ctx_engines.c
index caa97774c..bd622b0e5 100644
--- a/tests/i915/gem_ctx_engines.c
+++ b/tests/i915/gem_ctx_engines.c
@@ -46,15 +46,6 @@
#define engine_class(e, n) ((e)->engines[(n)].engine_class)
#define engine_instance(e, n) ((e)->engines[(n)].engine_instance)
-static bool has_context_engines(int i915)
-{
- struct drm_i915_gem_context_param param = {
- .ctx_id = 0,
- .param = I915_CONTEXT_PARAM_ENGINES,
- };
- return __gem_context_set_param(i915, ¶m) == 0;
-}
-
static void invalid_engines(int i915)
{
struct i915_context_param_engines stack = {}, *engines;
@@ -561,7 +552,7 @@ igt_main
igt_require_gem(i915);
gem_require_contexts(i915);
- igt_require(has_context_engines(i915));
+ igt_require(gem_has_engine_topology(i915));
igt_fork_hang_detector(i915);
}
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 952428298..85f94d795 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -2786,15 +2786,6 @@ static bool has_persistence(int i915)
return __gem_context_set_param(i915, &p) == 0;
}
-static bool has_context_engines(int i915)
-{
- struct drm_i915_gem_context_param p = {
- .param = I915_CONTEXT_PARAM_ENGINES,
- };
-
- return __gem_context_set_param(i915, &p) == 0;
-}
-
static bool has_load_balancer(int i915)
{
const intel_ctx_cfg_t cfg = {
@@ -2819,7 +2810,7 @@ igt_main
igt_require_gem(i915);
gem_require_contexts(i915);
- igt_require(has_context_engines(i915));
+ igt_require(gem_has_engine_topology(i915));
igt_require(has_load_balancer(i915));
igt_require(has_perf_engines(i915));
diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index 3a51b51da..a75faeb68 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -1665,15 +1665,6 @@ static void preempt_queue(int fd, const intel_ctx_cfg_t *cfg,
}
}
-static bool has_context_engines(int i915)
-{
- struct drm_i915_gem_context_param param = {
- .ctx_id = 0,
- .param = I915_CONTEXT_PARAM_ENGINES,
- };
- return __gem_context_set_param(i915, ¶m) == 0;
-}
-
static void preempt_engines(int i915,
const struct intel_execution_engine2 *e,
unsigned int flags)
@@ -1694,7 +1685,7 @@ static void preempt_engines(int i915,
* timeline that we can reprioritise and shuffle amongst themselves.
*/
- igt_require(has_context_engines(i915));
+ igt_require(gem_has_engine_topology(i915));
for (int n = 0; n < GEM_MAX_ENGINES; n++) {
cfg.engines[n].engine_class = e->class;
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 74/81] lib/intel_bb: Remove intel_bb_assign_vm and tests (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (72 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 73/81] lib/i915: Rework engine API availability checks (v4) Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 75/81] tests/i915/gem_ctx_param: Stop setting VMs on old contexts Jason Ekstrand
` (9 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
It's not used by anything other than the tests for that functionality
and it relies on setting the VM via SET_CONTEXT_PARAM which is
deprecated. Delete it for now. We can add it back in later if it's
actually useful and do it properly then.
v2 (Zbigniew Kempczyński):
- Also remove intel_bb_detach_intel_buf()
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
lib/intel_batchbuffer.c | 65 ------------------------
lib/intel_batchbuffer.h | 4 --
tests/i915/api_intel_bb.c | 104 --------------------------------------
3 files changed, 173 deletions(-)
diff --git a/lib/intel_batchbuffer.c b/lib/intel_batchbuffer.c
index cc976a624..2b8b903e2 100644
--- a/lib/intel_batchbuffer.c
+++ b/lib/intel_batchbuffer.c
@@ -1513,14 +1513,6 @@ static void __intel_bb_destroy_cache(struct intel_bb *ibb)
ibb->root = NULL;
}
-static void __intel_bb_detach_intel_bufs(struct intel_bb *ibb)
-{
- struct intel_buf *entry, *tmp;
-
- igt_list_for_each_entry_safe(entry, tmp, &ibb->intel_bufs, link)
- intel_bb_detach_intel_buf(ibb, entry);
-}
-
static void __intel_bb_remove_intel_bufs(struct intel_bb *ibb)
{
struct intel_buf *entry, *tmp;
@@ -1649,50 +1641,6 @@ int intel_bb_sync(struct intel_bb *ibb)
return ret;
}
-uint64_t intel_bb_assign_vm(struct intel_bb *ibb, uint64_t allocator,
- uint32_t vm_id)
-{
- struct drm_i915_gem_context_param arg = {
- .param = I915_CONTEXT_PARAM_VM,
- };
- uint64_t prev_allocator = ibb->allocator_handle;
- bool closed = false;
-
- if (ibb->vm_id == vm_id) {
- igt_debug("Skipping to assign same vm_id: %u\n", vm_id);
- return 0;
- }
-
- /* Cannot switch if someone keeps bb refcount */
- igt_assert(ibb->refcount == 1);
-
- /* Detach intel_bufs and remove bb handle */
- __intel_bb_detach_intel_bufs(ibb);
- intel_bb_remove_object(ibb, ibb->handle, ibb->batch_offset, ibb->size);
-
- /* Cache + objects are not valid after change anymore */
- __intel_bb_destroy_objects(ibb);
- __intel_bb_destroy_cache(ibb);
-
- /* Attach new allocator */
- ibb->allocator_handle = allocator;
-
- /* Setparam */
- ibb->vm_id = vm_id;
-
- /* Skip set param, we likely return to default vm */
- if (vm_id) {
- arg.ctx_id = ibb->ctx;
- arg.value = vm_id;
- gem_context_set_param(ibb->i915, &arg);
- }
-
- /* Recreate bb */
- intel_bb_reset(ibb, false);
-
- return closed ? 0 : prev_allocator;
-}
-
/*
* intel_bb_print:
* @ibb: pointer to intel_bb
@@ -2040,19 +1988,6 @@ intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *bu
return __intel_bb_add_intel_buf(ibb, buf, alignment, write);
}
-void intel_bb_detach_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
-{
- igt_assert(ibb);
- igt_assert(buf);
- igt_assert(!buf->ibb || buf->ibb == ibb);
-
- if (!igt_list_empty(&buf->link)) {
- buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
- buf->ibb = NULL;
- igt_list_del_init(&buf->link);
- }
-}
-
bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf)
{
bool removed;
diff --git a/lib/intel_batchbuffer.h b/lib/intel_batchbuffer.h
index 6f148713b..bd417e998 100644
--- a/lib/intel_batchbuffer.h
+++ b/lib/intel_batchbuffer.h
@@ -523,9 +523,6 @@ static inline void intel_bb_unref(struct intel_bb *ibb)
void intel_bb_reset(struct intel_bb *ibb, bool purge_objects_cache);
int intel_bb_sync(struct intel_bb *ibb);
-uint64_t intel_bb_assign_vm(struct intel_bb *ibb, uint64_t allocator,
- uint32_t vm_id);
-
void intel_bb_print(struct intel_bb *ibb);
void intel_bb_dump(struct intel_bb *ibb, const char *filename);
void intel_bb_set_debug(struct intel_bb *ibb, bool debug);
@@ -588,7 +585,6 @@ intel_bb_add_intel_buf(struct intel_bb *ibb, struct intel_buf *buf, bool write);
struct drm_i915_gem_exec_object2 *
intel_bb_add_intel_buf_with_alignment(struct intel_bb *ibb, struct intel_buf *buf,
uint64_t alignment, bool write);
-void intel_bb_detach_intel_buf(struct intel_bb *ibb, struct intel_buf *buf);
bool intel_bb_remove_intel_buf(struct intel_bb *ibb, struct intel_buf *buf);
void intel_bb_print_intel_bufs(struct intel_bb *ibb);
struct drm_i915_gem_exec_object2 *
diff --git a/tests/i915/api_intel_bb.c b/tests/i915/api_intel_bb.c
index 7ffe64fc5..74cb18417 100644
--- a/tests/i915/api_intel_bb.c
+++ b/tests/i915/api_intel_bb.c
@@ -241,107 +241,6 @@ static void bb_with_allocator(struct buf_ops *bops)
intel_bb_destroy(ibb);
}
-static void bb_with_vm(struct buf_ops *bops)
-{
- int i915 = buf_ops_get_fd(bops);
- struct drm_i915_gem_context_param arg = {
- .param = I915_CONTEXT_PARAM_VM,
- };
- struct intel_bb *ibb;
- struct intel_buf *src, *dst, *gap;
- uint32_t ctx = 0, vm_id1, vm_id2;
- uint64_t prev_vm, vm;
- uint64_t src_addr[5], dst_addr[5];
-
- igt_require(gem_uses_full_ppgtt(i915));
-
- ibb = intel_bb_create_with_allocator(i915, ctx, PAGE_SIZE,
- INTEL_ALLOCATOR_SIMPLE);
- if (debug_bb)
- intel_bb_set_debug(ibb, true);
-
- src = intel_buf_create(bops, 4096/32, 32, 8, 0, I915_TILING_NONE,
- I915_COMPRESSION_NONE);
- dst = intel_buf_create(bops, 4096/32, 32, 8, 0, I915_TILING_NONE,
- I915_COMPRESSION_NONE);
- gap = intel_buf_create(bops, 4096, 128, 8, 0, I915_TILING_NONE,
- I915_COMPRESSION_NONE);
-
- /* vm for second blit */
- vm_id1 = gem_vm_create(i915);
-
- /* Get vm_id for default vm */
- arg.ctx_id = ctx;
- gem_context_get_param(i915, &arg);
- vm_id2 = arg.value;
-
- igt_debug("Vm_id1: %u\n", vm_id1);
- igt_debug("Vm_id2: %u\n", vm_id2);
-
- /* First blit without set calling setparam */
- intel_bb_copy_intel_buf(ibb, dst, src, 4096);
- src_addr[0] = src->addr.offset;
- dst_addr[0] = dst->addr.offset;
- igt_debug("step1: src: 0x%llx, dst: 0x%llx\n",
- (long long) src_addr[0], (long long) dst_addr[0]);
-
- /* Open new allocator with vm_id */
- vm = intel_allocator_open_vm(i915, vm_id1, INTEL_ALLOCATOR_SIMPLE);
- prev_vm = intel_bb_assign_vm(ibb, vm, vm_id1);
-
- intel_bb_add_intel_buf(ibb, gap, false);
- intel_bb_copy_intel_buf(ibb, dst, src, 4096);
- src_addr[1] = src->addr.offset;
- dst_addr[1] = dst->addr.offset;
- igt_debug("step2: src: 0x%llx, dst: 0x%llx\n",
- (long long) src_addr[1], (long long) dst_addr[1]);
-
- /* Back with default vm */
- intel_bb_assign_vm(ibb, prev_vm, vm_id2);
- intel_bb_add_intel_buf(ibb, gap, false);
- intel_bb_copy_intel_buf(ibb, dst, src, 4096);
- src_addr[2] = src->addr.offset;
- dst_addr[2] = dst->addr.offset;
- igt_debug("step3: src: 0x%llx, dst: 0x%llx\n",
- (long long) src_addr[2], (long long) dst_addr[2]);
-
- /* And exchange one more time */
- intel_bb_assign_vm(ibb, vm, vm_id1);
- intel_bb_copy_intel_buf(ibb, dst, src, 4096);
- src_addr[3] = src->addr.offset;
- dst_addr[3] = dst->addr.offset;
- igt_debug("step4: src: 0x%llx, dst: 0x%llx\n",
- (long long) src_addr[3], (long long) dst_addr[3]);
-
- /* Back with default vm */
- gem_vm_destroy(i915, vm_id1);
- gem_vm_destroy(i915, vm_id2);
- intel_bb_assign_vm(ibb, prev_vm, 0);
-
- /* We can close it after assign previous vm to ibb */
- intel_allocator_close(vm);
-
- /* Try default vm still works */
- intel_bb_copy_intel_buf(ibb, dst, src, 4096);
- src_addr[4] = src->addr.offset;
- dst_addr[4] = dst->addr.offset;
- igt_debug("step5: src: 0x%llx, dst: 0x%llx\n",
- (long long) src_addr[4], (long long) dst_addr[4]);
-
- /* Addresses should match for vm and prev_vm blits */
- igt_assert_eq(src_addr[0], src_addr[2]);
- igt_assert_eq(dst_addr[0], dst_addr[2]);
- igt_assert_eq(src_addr[1], src_addr[3]);
- igt_assert_eq(dst_addr[1], dst_addr[3]);
- igt_assert_eq(src_addr[2], src_addr[4]);
- igt_assert_eq(dst_addr[2], dst_addr[4]);
-
- intel_buf_destroy(src);
- intel_buf_destroy(dst);
- intel_buf_destroy(gap);
- intel_bb_destroy(ibb);
-}
-
/*
* Make sure we lead to realloc in the intel_bb.
*/
@@ -1558,9 +1457,6 @@ igt_main_args("dpib", NULL, help_str, opt_handler, NULL)
igt_subtest("bb-with-allocator")
bb_with_allocator(bops);
- igt_subtest("bb-with-vm")
- bb_with_vm(bops);
-
igt_subtest("lot-of-buffers")
lot_of_buffers(bops);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 75/81] tests/i915/gem_ctx_param: Stop setting VMs on old contexts
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (73 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 74/81] lib/intel_bb: Remove intel_bb_assign_vm and tests (v2) Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 76/81] tests/i915/gen9_exec_parse: Convert to intel_ctx_t Jason Ekstrand
` (8 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
We can still test all the same basic functionality, we just have to
re-create the context more often.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_param.c | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/tests/i915/gem_ctx_param.c b/tests/i915/gem_ctx_param.c
index c1b46a16c..54bf0cc96 100644
--- a/tests/i915/gem_ctx_param.c
+++ b/tests/i915/gem_ctx_param.c
@@ -162,6 +162,7 @@ static void test_vm(int i915)
struct drm_i915_gem_context_param arg = {
.param = I915_CONTEXT_PARAM_VM,
};
+ int err;
uint32_t parent, child;
igt_spin_t *spin;
@@ -174,8 +175,11 @@ static void test_vm(int i915)
* in the next context that shared the VM.
*/
+ arg.ctx_id = gem_context_create(i915);
arg.value = -1ull;
- igt_require(__gem_context_set_param(i915, &arg) == -ENOENT);
+ err = __gem_context_set_param(i915, &arg);
+ gem_context_destroy(i915, arg.ctx_id);
+ igt_require(err == -ENOENT);
parent = gem_context_create(i915);
child = gem_context_create(i915);
@@ -199,6 +203,7 @@ static void test_vm(int i915)
batch.offset = 0;
gem_execbuf(i915, &eb);
igt_assert_eq_u64(batch.offset, 0);
+ gem_context_destroy(i915, child);
eb.rsvd1 = parent;
gem_execbuf(i915, &eb);
@@ -206,14 +211,9 @@ static void test_vm(int i915)
arg.ctx_id = parent;
gem_context_get_param(i915, &arg);
- gem_context_set_param(i915, &arg);
-
- /* Still the same VM, so expect the old VMA again */
- batch.offset = 0;
- gem_execbuf(i915, &eb);
- igt_assert_eq_u64(batch.offset, nonzero_offset);
/* Note: changing an active ctx->vm may be verboten */
+ child = gem_context_create(i915);
arg.ctx_id = child;
if (__gem_context_set_param(i915, &arg) != -EBUSY) {
eb.rsvd1 = child;
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 76/81] tests/i915/gen9_exec_parse: Convert to intel_ctx_t
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (74 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 75/81] tests/i915/gem_ctx_param: Stop setting VMs on old contexts Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 77/81] tests/i915/gem_ctx_param: Add tests for recently removed params Jason Ekstrand
` (7 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gen9_exec_parse.c | 106 +++++++++++++++++------------------
1 file changed, 53 insertions(+), 53 deletions(-)
diff --git a/tests/i915/gen9_exec_parse.c b/tests/i915/gen9_exec_parse.c
index e10c6ce9f..6e6ee3af7 100644
--- a/tests/i915/gen9_exec_parse.c
+++ b/tests/i915/gen9_exec_parse.c
@@ -81,7 +81,7 @@ __checked_execbuf(int i915, struct drm_i915_gem_execbuffer2 *eb)
}
static int
-__exec_batch_patched(int i915, int engine,
+__exec_batch_patched(int i915, const intel_ctx_t *ctx, int engine,
uint32_t cmd_bo, const uint32_t *cmds, int size,
uint32_t target_bo, uint64_t target_offset, uint64_t target_delta)
{
@@ -110,12 +110,13 @@ __exec_batch_patched(int i915, int engine,
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 2;
execbuf.batch_len = size;
+ execbuf.rsvd1 = ctx->id;
execbuf.flags = engine;
return __checked_execbuf(i915, &execbuf);
}
-static void exec_batch_patched(int i915, int engine,
+static void exec_batch_patched(int i915, const intel_ctx_t *ctx, int engine,
uint32_t cmd_bo, const uint32_t *cmds,
int size, int patch_offset,
long int expected_value)
@@ -124,7 +125,8 @@ static void exec_batch_patched(int i915, int engine,
uint64_t actual_value = 0;
long int ret;
- ret = __exec_batch_patched(i915, engine, cmd_bo, cmds, size, target_bo, patch_offset, 0);
+ ret = __exec_batch_patched(i915, ctx, engine, cmd_bo, cmds, size,
+ target_bo, patch_offset, 0);
if (ret) {
igt_assert_lt(ret, 0);
gem_close(i915, target_bo);
@@ -139,8 +141,8 @@ static void exec_batch_patched(int i915, int engine,
igt_assert_eq(actual_value, expected_value);
}
-static int __exec_batch(int i915, int engine, uint32_t cmd_bo,
- const uint32_t *cmds, int size)
+static int __exec_batch(int i915, const intel_ctx_t *ctx, int engine,
+ uint32_t cmd_bo, const uint32_t *cmds, int size)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[1];
@@ -154,6 +156,7 @@ static int __exec_batch(int i915, int engine, uint32_t cmd_bo,
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 1;
execbuf.batch_len = size;
+ execbuf.rsvd1 = ctx->id;
execbuf.flags = engine;
return __checked_execbuf(i915, &execbuf);
@@ -173,12 +176,12 @@ static void print_batch(const uint32_t *cmds, const uint32_t sz)
#define print_batch(cmds, size)
#endif
-#define exec_batch(i915, engine, bo, cmds, sz, expected) \
+#define exec_batch(i915, ctx, engine, bo, cmds, sz, expected) \
print_batch(cmds, sz); \
- igt_assert_eq(__exec_batch(i915, engine, bo, cmds, sz), expected)
+ igt_assert_eq(__exec_batch(i915, ctx, engine, bo, cmds, sz), expected)
-static void exec_split_batch(int i915, int engine, const uint32_t *cmds,
- int size, int expected_ret)
+static void exec_split_batch(int i915, const intel_ctx_t *ctx, int engine,
+ const uint32_t *cmds, int size, int expected_ret)
{
struct drm_i915_gem_execbuffer2 execbuf;
struct drm_i915_gem_exec_object2 obj[1];
@@ -213,6 +216,7 @@ static void exec_split_batch(int i915, int engine, const uint32_t *cmds,
execbuf.batch_len =
ALIGN(size + actual_start_offset - execbuf.batch_start_offset,
0x8);
+ execbuf.rsvd1 = ctx->id;
execbuf.flags = engine;
igt_assert_eq(__checked_execbuf(i915, &execbuf), expected_ret);
@@ -220,7 +224,7 @@ static void exec_split_batch(int i915, int engine, const uint32_t *cmds,
gem_close(i915, cmd_bo);
}
-static void exec_batch_chained(int i915, int engine,
+static void exec_batch_chained(int i915, const intel_ctx_t *ctx, int engine,
uint32_t cmd_bo, const uint32_t *cmds,
int size, int patch_offset,
uint64_t expected_value,
@@ -276,6 +280,7 @@ static void exec_batch_chained(int i915, int engine,
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = 3;
execbuf.batch_len = sizeof(first_level_cmds);
+ execbuf.rsvd1 = ctx->id;
execbuf.flags = engine;
ret = __checked_execbuf(i915, &execbuf);
@@ -371,7 +376,8 @@ static void test_allowed_all(const int i915, const uint32_t handle)
b = inject_cmd(b, MI_BATCH_BUFFER_END, 1);
- exec_batch(i915, I915_EXEC_BLT, handle, batch, batch_bytes(batch, b), 0);
+ exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT,
+ handle, batch, batch_bytes(batch, b), 0);
}
static void test_allowed_single(const int i915, const uint32_t handle)
@@ -386,7 +392,8 @@ static void test_allowed_single(const int i915, const uint32_t handle)
b = inject_cmd(b, MI_BATCH_BUFFER_END, 1);
- igt_assert_eq(__exec_batch(i915, I915_EXEC_BLT, handle,
+ igt_assert_eq(__exec_batch(i915, intel_ctx_0(i915),
+ I915_EXEC_BLT, handle,
batch, batch_bytes(batch, b)),
0);
};
@@ -658,14 +665,14 @@ static void test_bb_chained(const int i915, const uint32_t handle)
MI_BATCH_BUFFER_END,
};
- exec_batch_chained(i915, I915_EXEC_RENDER,
+ exec_batch_chained(i915, intel_ctx_0(i915), I915_EXEC_RENDER,
handle,
batch, sizeof(batch),
4,
0xbaadf00d,
0);
- exec_batch_chained(i915, I915_EXEC_BLT,
+ exec_batch_chained(i915, intel_ctx_0(i915), I915_EXEC_BLT,
handle,
batch, sizeof(batch),
4,
@@ -690,11 +697,11 @@ static void test_cmd_crossing_page(const int i915, const uint32_t handle)
MI_BATCH_BUFFER_END,
};
- exec_split_batch(i915, I915_EXEC_BLT,
+ exec_split_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT,
lri_ok, sizeof(lri_ok),
0);
- exec_batch_patched(i915, I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
0xbaadf00d);
@@ -729,25 +736,25 @@ static void test_invalid_length(const int i915, const uint32_t handle)
MI_BATCH_BUFFER_END,
};
- exec_batch(i915, I915_EXEC_BLT, handle,
+ exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
lri_ok, sizeof(lri_ok),
0);
- exec_batch_patched(i915, I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
ok_val);
- exec_batch(i915, I915_EXEC_BLT, handle,
+ exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
lri_bad, 0,
0);
- exec_batch_patched(i915, I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
ok_val);
- exec_batch(i915, I915_EXEC_BLT, handle,
+ exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
lri_ok, 4096,
0);
@@ -842,20 +849,20 @@ static void test_register(const int i915, const uint32_t handle,
MI_BATCH_BUFFER_END,
};
- exec_batch(i915, I915_EXEC_BLT, handle,
+ exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
lri_mask, sizeof(lri_mask),
r->privileged ? -EACCES : 0);
- exec_batch_patched(i915, I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
r->privileged ? -EACCES : r->mask);
- exec_batch(i915, I915_EXEC_BLT, handle,
+ exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
lri_zero, sizeof(lri_zero),
r->privileged ? -EACCES : 0);
- exec_batch_patched(i915, I915_EXEC_BLT, handle,
+ exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
store_reg, sizeof(store_reg),
2 * sizeof(uint32_t), /* reloc */
r->privileged ? -EACCES : 0);
@@ -886,8 +893,8 @@ static long int read_reg(const int i915, const uint32_t handle,
target_bo = gem_create(i915, HANDLE_SIZE);
- ret = __exec_batch_patched(i915, I915_EXEC_BLT, handle,
- store_reg, sizeof(store_reg),
+ ret = __exec_batch_patched(i915, intel_ctx_0(i915), I915_EXEC_BLT,
+ handle, store_reg, sizeof(store_reg),
target_bo, 2 * sizeof(uint32_t), 0);
if (ret) {
@@ -913,7 +920,7 @@ static int write_reg(const int i915, const uint32_t handle,
MI_BATCH_BUFFER_END,
};
- return __exec_batch(i915, I915_EXEC_BLT, handle,
+ return __exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
lri, sizeof(lri));
}
@@ -999,7 +1006,8 @@ static void test_unaligned_jump(const int i915, const uint32_t handle)
}
static void
-test_reject_on_engine(int i915, uint32_t handle, unsigned int engine)
+test_reject_on_engine(int i915, const intel_ctx_t *ctx, unsigned int engine,
+ uint32_t handle)
{
const uint32_t invalid_cmd[] = {
INSTR_INVALID_CLIENT << INSTR_CLIENT_SHIFT,
@@ -1010,45 +1018,37 @@ test_reject_on_engine(int i915, uint32_t handle, unsigned int engine)
MI_BATCH_BUFFER_END,
};
- exec_batch(i915, engine, handle,
+ exec_batch(i915, ctx, engine, handle,
invalid_cmd, sizeof(invalid_cmd),
-EINVAL);
- exec_batch(i915, engine, handle,
+ exec_batch(i915, ctx, engine, handle,
invalid_set_context, sizeof(invalid_set_context),
-EINVAL);
}
static void test_rejected(int i915, uint32_t handle, bool ctx_param)
{
-#define engine_class(e, n) ((e)->engines[(n)].engine_class)
-#define engine_instance(e, n) ((e)->engines[(n)].engine_instance)
-
if (ctx_param) {
+ intel_ctx_cfg_t cfg = {};
+ const intel_ctx_t *ctx;
int i;
- I915_DEFINE_CONTEXT_PARAM_ENGINES(engines , I915_EXEC_RING_MASK + 1);
- struct drm_i915_gem_context_param param = {
- .ctx_id = 0,
- .param = I915_CONTEXT_PARAM_ENGINES,
- .value = to_user_pointer(&engines),
- .size = sizeof(engines),
- };
-
- memset(&engines, 0, sizeof(engines));
- for (i = 0; i <= I915_EXEC_RING_MASK; i++) {
- engine_class(&engines, i) = I915_ENGINE_CLASS_COPY;
- engine_instance(&engines, i) = 0;
+ for (i = 0; i < GEM_MAX_ENGINES; i++) {
+ cfg.engines[i].engine_class = I915_ENGINE_CLASS_COPY;
+ cfg.engines[i].engine_instance = 0;
}
- gem_context_set_param(i915, ¶m);
+ cfg.num_engines = GEM_MAX_ENGINES;
+
+ ctx = intel_ctx_create(i915, &cfg);
for (i = 0; i <= I915_EXEC_RING_MASK; i++)
- test_reject_on_engine(i915, handle, i);
+ test_reject_on_engine(i915, ctx, i, handle);
- param.size = 0;
- gem_context_set_param(i915, ¶m);
+ intel_ctx_destroy(i915, ctx);
} else {
- test_reject_on_engine(i915, handle, I915_EXEC_BLT);
+ test_reject_on_engine(i915, intel_ctx_0(i915),
+ I915_EXEC_BLT, handle);
}
}
@@ -1223,7 +1223,7 @@ igt_main
igt_subtest("batch-without-end") {
const uint32_t noop[1024] = { 0 };
- exec_batch(i915, I915_EXEC_BLT, handle,
+ exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
noop, sizeof(noop),
-EINVAL);
}
@@ -1231,7 +1231,7 @@ igt_main
igt_subtest("batch-zero-length") {
const uint32_t noop[] = { 0, MI_BATCH_BUFFER_END };
- exec_batch(i915, I915_EXEC_BLT, handle,
+ exec_batch(i915, intel_ctx_0(i915), I915_EXEC_BLT, handle,
noop, 0,
-EINVAL);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 77/81] tests/i915/gem_ctx_param: Add tests for recently removed params
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (75 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 76/81] tests/i915/gen9_exec_parse: Convert to intel_ctx_t Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 78/81] tests/i915/gem_ctx_param: Add a couple invalid PARAM_VM cases Jason Ekstrand
` (6 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_param.c | 44 ++++++++++++++++++++++++++++++++++++++
1 file changed, 44 insertions(+)
diff --git a/tests/i915/gem_ctx_param.c b/tests/i915/gem_ctx_param.c
index 54bf0cc96..7fba37cbd 100644
--- a/tests/i915/gem_ctx_param.c
+++ b/tests/i915/gem_ctx_param.c
@@ -244,6 +244,35 @@ static void test_vm(int i915)
gem_close(i915, batch.handle);
}
+static void test_set_invalid_param(int fd, uint64_t param, uint64_t value)
+{
+ /* Create a fresh context */
+ struct drm_i915_gem_context_param arg = {
+ .ctx_id = gem_context_create(fd),
+ .param = param,
+ .value = value,
+ };
+ int err;
+
+ err = __gem_context_set_param(fd, &arg);
+ gem_context_destroy(fd, arg.ctx_id);
+ igt_assert_eq(err, -EINVAL);
+}
+
+static void test_get_invalid_param(int fd, uint64_t param)
+{
+ /* Create a fresh context */
+ struct drm_i915_gem_context_param arg = {
+ .ctx_id = gem_context_create(fd),
+ .param = param,
+ };
+ int err;
+
+ err = __gem_context_get_param(fd, &arg);
+ gem_context_destroy(fd, arg.ctx_id);
+ igt_assert_eq(err, -EINVAL);
+}
+
igt_main
{
struct drm_i915_gem_context_param arg;
@@ -406,6 +435,21 @@ igt_main
igt_assert_eq(__gem_context_set_param(fd, &arg), -EINVAL);
}
+ igt_subtest("invalid-set-ringsize")
+ test_set_invalid_param(fd, I915_CONTEXT_PARAM_RINGSIZE, 8192);
+
+ igt_subtest("invalid-get-ringsize")
+ test_get_invalid_param(fd, I915_CONTEXT_PARAM_RINGSIZE);
+
+ igt_subtest("invalid-set-no-zeromap")
+ test_set_invalid_param(fd, I915_CONTEXT_PARAM_NO_ZEROMAP, 1);
+
+ igt_subtest("invalid-get-no-zeromap")
+ test_get_invalid_param(fd, I915_CONTEXT_PARAM_NO_ZEROMAP);
+
+ igt_subtest("invalid-get-engines")
+ test_get_invalid_param(fd, I915_CONTEXT_PARAM_ENGINES);
+
igt_fixture
close(fd);
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 78/81] tests/i915/gem_ctx_param: Add a couple invalid PARAM_VM cases
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (76 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 77/81] tests/i915/gem_ctx_param: Add tests for recently removed params Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 79/81] tests/i915/gem_ctx_engines: Fix the invalid subtest for the new rules Jason Ekstrand
` (5 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_param.c | 17 +++++++++++++++++
1 file changed, 17 insertions(+)
diff --git a/tests/i915/gem_ctx_param.c b/tests/i915/gem_ctx_param.c
index 7fba37cbd..c795f1b45 100644
--- a/tests/i915/gem_ctx_param.c
+++ b/tests/i915/gem_ctx_param.c
@@ -181,6 +181,23 @@ static void test_vm(int i915)
gem_context_destroy(i915, arg.ctx_id);
igt_require(err == -ENOENT);
+ /* Test that we can't set the VM on ctx0 */
+ arg.ctx_id = 0;
+ arg.value = gem_vm_create(i915);
+ err = __gem_context_set_param(i915, &arg);
+ gem_vm_destroy(i915, arg.value);
+ igt_assert_eq(err, -EINVAL);
+
+ /* Test that we can't set the VM after we've done an execbuf */
+ arg.ctx_id = gem_context_create(i915);
+ spin = igt_spin_new(i915, .ctx_id = arg.ctx_id);
+ igt_spin_free(i915, spin);
+ arg.value = gem_vm_create(i915);
+ err = __gem_context_set_param(i915, &arg);
+ gem_context_destroy(i915, arg.ctx_id);
+ gem_vm_destroy(i915, arg.value);
+ igt_assert_eq(err, -EINVAL);
+
parent = gem_context_create(i915);
child = gem_context_create(i915);
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 79/81] tests/i915/gem_ctx_engines: Fix the invalid subtest for the new rules
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (77 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 78/81] tests/i915/gem_ctx_param: Add a couple invalid PARAM_VM cases Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 80/81] tests/i915/gem_exec_balancer: Fix invalid-balancer for the set-once rule Jason Ekstrand
` (4 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Since we no longer allow setting engines multiple times on one context,
we need to create a new context every time. We've also got more invalid
cases that need testing.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_ctx_engines.c | 88 ++++++++++++++++++++++++------------
1 file changed, 58 insertions(+), 30 deletions(-)
diff --git a/tests/i915/gem_ctx_engines.c b/tests/i915/gem_ctx_engines.c
index bd622b0e5..bfa83f7e5 100644
--- a/tests/i915/gem_ctx_engines.c
+++ b/tests/i915/gem_ctx_engines.c
@@ -46,31 +46,44 @@
#define engine_class(e, n) ((e)->engines[(n)].engine_class)
#define engine_instance(e, n) ((e)->engines[(n)].engine_instance)
+static int
+__set_param_fresh_context(int i915, struct drm_i915_gem_context_param param)
+{
+ int err;
+
+ igt_assert_eq(param.ctx_id, 0);
+ param.ctx_id = gem_context_create(i915);
+ err = __gem_context_set_param(i915, ¶m);
+ gem_context_destroy(i915, param.ctx_id);
+
+ return err;
+}
+
static void invalid_engines(int i915)
{
struct i915_context_param_engines stack = {}, *engines;
struct drm_i915_gem_context_param param = {
- .ctx_id = gem_context_create(i915),
.param = I915_CONTEXT_PARAM_ENGINES,
.value = to_user_pointer(&stack),
};
uint32_t handle;
+ igt_spin_t *spin;
void *ptr;
param.size = 0;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
param.size = 1;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
param.size = sizeof(stack) - 1;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
param.size = sizeof(stack) + 1;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
- param.size = 0;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
+ param.size = sizeof(*engines) + (I915_EXEC_RING_MASK + 2) * sizeof(*engines->engines);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EINVAL);
/* Create a single page surrounded by inaccessible nothingness */
ptr = mmap(NULL, 3 * 4096, PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
@@ -84,57 +97,57 @@ static void invalid_engines(int i915)
param.value = to_user_pointer(engines);
engines->engines[0].engine_class = -1;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -ENOENT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -ENOENT);
mprotect(engines, 4096, PROT_READ);
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -ENOENT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -ENOENT);
mprotect(engines, 4096, PROT_WRITE);
engines->engines[0].engine_class = 0;
- if (__gem_context_set_param(i915, ¶m)) /* XXX needs RCS */
+ if (__set_param_fresh_context(i915, param)) /* XXX needs RCS */
goto out;
engines->extensions = to_user_pointer(ptr);
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
engines->extensions = 0;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
+ igt_assert_eq(__set_param_fresh_context(i915, param), 0);
param.value = to_user_pointer(engines - 1);
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines) - 1;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines) - param.size + 1;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines) + 4096;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines) - param.size + 4096;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
+ igt_assert_eq(__set_param_fresh_context(i915, param), 0);
param.value = to_user_pointer(engines) - param.size + 4096 + 1;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines) + 4096;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines) + 4096 - 1;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines) - 1;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines - 1);
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines - 1) + 4096;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(engines - 1) + 4096 - sizeof(*engines->engines) / 2;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
handle = gem_create(i915, 4096 * 3);
ptr = gem_mmap__device_coherent(i915, handle, 0, 4096 * 3, PROT_READ);
@@ -144,25 +157,40 @@ static void invalid_engines(int i915)
munmap(ptr + 8192, 4096);
param.value = to_user_pointer(ptr + 4096);
- igt_assert_eq(__gem_context_set_param(i915, ¶m), 0);
+ igt_assert_eq(__set_param_fresh_context(i915, param), 0);
param.value = to_user_pointer(ptr);
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(ptr) + 4095;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(ptr) + 8192;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
param.value = to_user_pointer(ptr) + 12287;
- igt_assert_eq(__gem_context_set_param(i915, ¶m), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, param), -EFAULT);
munmap(ptr + 4096, 4096);
+ /* Reset back to a known-good param struct */
+ param.size = sizeof(*engines) + sizeof(*engines->engines);
+ param.value = to_user_pointer(engines);
+ igt_assert_eq(__set_param_fresh_context(i915, param), 0);
+
+ /* Test that we can't set engines after we've done an execbuf */
+ param.ctx_id = gem_context_create(i915);
+ spin = igt_spin_new(i915, .ctx_id = param.ctx_id);
+ igt_spin_free(i915, spin);
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
+ gem_context_destroy(i915, param.ctx_id);
+
+ /* Test that we can't set engines on ctx0 */
+ param.ctx_id = 0;
+ igt_assert_eq(__gem_context_set_param(i915, ¶m), -EINVAL);
+
out:
munmap(engines, 4096);
- gem_context_destroy(i915, param.ctx_id);
}
static uint32_t batch_create(int i915)
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 80/81] tests/i915/gem_exec_balancer: Fix invalid-balancer for the set-once rule
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (78 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 79/81] tests/i915/gem_ctx_engines: Fix the invalid subtest for the new rules Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 81/81] tests/i915/gem_exec_balancer: Add a test for combined balancing and bonding (v2) Jason Ekstrand
` (3 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
Upstream i915 is going to start only allowing CONTEXT_PARAM_ENGINES to
be set once on any given context. This means we need to create a new
context for every setparam in the test.
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_balancer.c | 56 ++++++++++++++++++++--------------
1 file changed, 33 insertions(+), 23 deletions(-)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 85f94d795..28fc2d33a 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -165,6 +165,19 @@ static uint32_t batch_create(int i915)
return __batch_create(i915, 0);
}
+static int
+__set_param_fresh_context(int i915, struct drm_i915_gem_context_param param)
+{
+ int err;
+
+ igt_assert_eq(param.ctx_id, 0);
+ param.ctx_id = gem_context_create(i915);
+ err = __gem_context_set_param(i915, ¶m);
+ gem_context_destroy(i915, param.ctx_id);
+
+ return err;
+}
+
static void invalid_balancer(int i915)
{
I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(balancer, 64);
@@ -192,7 +205,6 @@ static void invalid_balancer(int i915)
igt_assert_lte(count, 64);
- p.ctx_id = gem_context_create(i915);
p.size = (sizeof(struct i915_context_param_engines) +
(count + 1) * sizeof(*engines.engines));
@@ -200,13 +212,13 @@ static void invalid_balancer(int i915)
engines.engines[0].engine_class = I915_ENGINE_CLASS_INVALID;
engines.engines[0].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
memcpy(engines.engines + 1, ci, count * sizeof(*ci));
- gem_context_set_param(i915, &p);
+ igt_assert_eq(__set_param_fresh_context(i915, p), 0);
engines.extensions = -1ull;
- igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
engines.extensions = 1ull;
- igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
memset(&balancer, 0, sizeof(balancer));
balancer.base.name = I915_CONTEXT_ENGINES_EXT_LOAD_BALANCE;
@@ -214,25 +226,25 @@ static void invalid_balancer(int i915)
memcpy(balancer.engines, ci, count * sizeof(*ci));
engines.extensions = to_user_pointer(&balancer);
- gem_context_set_param(i915, &p);
+ igt_assert_eq(__set_param_fresh_context(i915, p), 0);
balancer.engine_index = 1;
- igt_assert_eq(__gem_context_set_param(i915, &p), -EEXIST);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EEXIST);
balancer.engine_index = count;
- igt_assert_eq(__gem_context_set_param(i915, &p), -EEXIST);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EEXIST);
balancer.engine_index = count + 1;
- igt_assert_eq(__gem_context_set_param(i915, &p), -EINVAL);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EINVAL);
balancer.engine_index = 0;
- gem_context_set_param(i915, &p);
+ igt_assert_eq(__set_param_fresh_context(i915, p), 0);
balancer.base.next_extension = to_user_pointer(&balancer);
- igt_assert_eq(__gem_context_set_param(i915, &p), -EEXIST);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EEXIST);
balancer.base.next_extension = -1ull;
- igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
handle = gem_create(i915, 4096 * 3);
ptr = gem_mmap__device_coherent(i915, handle, 0, 4096 * 3,
@@ -247,44 +259,42 @@ static void invalid_balancer(int i915)
memcpy(engines.engines + 2, ci, count * sizeof(ci));
p.size = (sizeof(struct i915_context_param_engines) +
(count + 2) * sizeof(*engines.engines));
- gem_context_set_param(i915, &p);
+ igt_assert_eq(__set_param_fresh_context(i915, p), 0);
balancer.base.next_extension = 0;
balancer.engine_index = 1;
engines.extensions = to_user_pointer(&balancer);
- gem_context_set_param(i915, &p);
+ igt_assert_eq(__set_param_fresh_context(i915, p), 0);
memcpy(ptr + 4096 - 8, &balancer, sizeof(balancer));
memcpy(ptr + 8192 - 8, &balancer, sizeof(balancer));
balancer.engine_index = 0;
engines.extensions = to_user_pointer(ptr) + 4096 - 8;
- gem_context_set_param(i915, &p);
+ igt_assert_eq(__set_param_fresh_context(i915, p), 0);
balancer.base.next_extension = engines.extensions;
engines.extensions = to_user_pointer(&balancer);
- gem_context_set_param(i915, &p);
+ igt_assert_eq(__set_param_fresh_context(i915, p), 0);
munmap(ptr, 4096);
- igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
engines.extensions = to_user_pointer(ptr) + 4096 - 8;
- igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
engines.extensions = to_user_pointer(ptr) + 8192 - 8;
- gem_context_set_param(i915, &p);
+ igt_assert_eq(__set_param_fresh_context(i915, p), 0);
balancer.base.next_extension = engines.extensions;
engines.extensions = to_user_pointer(&balancer);
- gem_context_set_param(i915, &p);
+ igt_assert_eq(__set_param_fresh_context(i915, p), 0);
munmap(ptr + 8192, 4096);
- igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
engines.extensions = to_user_pointer(ptr) + 8192 - 8;
- igt_assert_eq(__gem_context_set_param(i915, &p), -EFAULT);
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
munmap(ptr + 4096, 4096);
-
- gem_context_destroy(i915, p.ctx_id);
free(ci);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] [PATCH i-g-t 81/81] tests/i915/gem_exec_balancer: Add a test for combined balancing and bonding (v2)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (79 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 80/81] tests/i915/gem_exec_balancer: Fix invalid-balancer for the set-once rule Jason Ekstrand
@ 2021-07-07 14:46 ` Jason Ekstrand
2021-07-07 15:26 ` [igt-dev] ✓ Fi.CI.BAT: success for Stop depending on context mutation (rev14) Patchwork
` (2 subsequent siblings)
83 siblings, 0 replies; 87+ messages in thread
From: Jason Ekstrand @ 2021-07-07 14:46 UTC (permalink / raw)
To: igt-dev
v2 (Jason Ekstrand):
- Use __set_param_fresh_context
Signed-off-by: Jason Ekstrand <jason@jlekstrand.net>
Reviewed-by: Ashutosh Dixit <ashutosh.dixit@intel.com>
---
tests/i915/gem_exec_balancer.c | 27 +++++++++++++++++++++++++++
1 file changed, 27 insertions(+)
diff --git a/tests/i915/gem_exec_balancer.c b/tests/i915/gem_exec_balancer.c
index 28fc2d33a..40077aced 100644
--- a/tests/i915/gem_exec_balancer.c
+++ b/tests/i915/gem_exec_balancer.c
@@ -181,6 +181,7 @@ __set_param_fresh_context(int i915, struct drm_i915_gem_context_param param)
static void invalid_balancer(int i915)
{
I915_DEFINE_CONTEXT_ENGINES_LOAD_BALANCE(balancer, 64);
+ I915_DEFINE_CONTEXT_ENGINES_BOND(bond, 1);
I915_DEFINE_CONTEXT_PARAM_ENGINES(engines, 64);
struct drm_i915_gem_context_param p = {
.param = I915_CONTEXT_PARAM_ENGINES,
@@ -295,6 +296,32 @@ static void invalid_balancer(int i915)
igt_assert_eq(__set_param_fresh_context(i915, p), -EFAULT);
munmap(ptr + 4096, 4096);
+
+ if (count >= 2) {
+ /* You can't bond to a balanced engine */
+ memset(&bond, 0, sizeof(bond));
+ bond.base.name = I915_CONTEXT_ENGINES_EXT_BOND;
+ bond.master = ci[0];
+ bond.virtual_index = 0;
+ bond.num_bonds = 1;
+ bond.engines[0] = ci[1];
+
+ balancer.base.next_extension = to_user_pointer(&bond);
+ balancer.engine_index = 0;
+ balancer.num_siblings = count;
+ memcpy(balancer.engines, ci, count * sizeof(*ci));
+
+ memset(&engines, 0, sizeof(engines));
+ engines.engines[0].engine_class = I915_ENGINE_CLASS_INVALID;
+ engines.engines[0].engine_instance = I915_ENGINE_CLASS_INVALID_NONE;
+ engines.extensions = to_user_pointer(&balancer);
+
+ p.size = (sizeof(struct i915_context_param_engines) +
+ sizeof(*engines.engines));
+
+ igt_assert_eq(__set_param_fresh_context(i915, p), -EINVAL);
+ }
+
free(ci);
}
}
--
2.31.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 87+ messages in thread
* [igt-dev] ✓ Fi.CI.BAT: success for Stop depending on context mutation (rev14)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (80 preceding siblings ...)
2021-07-07 14:46 ` [igt-dev] [PATCH i-g-t 81/81] tests/i915/gem_exec_balancer: Add a test for combined balancing and bonding (v2) Jason Ekstrand
@ 2021-07-07 15:26 ` Patchwork
2021-07-07 19:11 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
2021-07-08 7:45 ` [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Maarten Lankhorst
83 siblings, 0 replies; 87+ messages in thread
From: Patchwork @ 2021-07-07 15:26 UTC (permalink / raw)
To: Jason Ekstrand; +Cc: igt-dev
[-- Attachment #1.1: Type: text/plain, Size: 2761 bytes --]
== Series Details ==
Series: Stop depending on context mutation (rev14)
URL : https://patchwork.freedesktop.org/series/88986/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_10309 -> IGTPW_5990
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/index.html
Known issues
------------
Here are the changes found in IGTPW_5990 that come from known issues:
### IGT changes ###
#### Possible fixes ####
* igt@gem_exec_suspend@basic-s0:
- {fi-tgl-1115g4}: [FAIL][1] ([i915#1888]) -> [PASS][2]
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/fi-tgl-1115g4/igt@gem_exec_suspend@basic-s0.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/fi-tgl-1115g4/igt@gem_exec_suspend@basic-s0.html
* igt@kms_chamelium@dp-crc-fast:
- fi-kbl-7500u: [FAIL][3] ([i915#1372]) -> [PASS][4]
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/fi-kbl-7500u/igt@kms_chamelium@dp-crc-fast.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[i915#1372]: https://gitlab.freedesktop.org/drm/intel/issues/1372
[i915#1888]: https://gitlab.freedesktop.org/drm/intel/issues/1888
Participating hosts (44 -> 39)
------------------------------
Missing (5): fi-ilk-m540 fi-hsw-4200u fi-skl-guc fi-bsw-cyan fi-ctg-p8600
Build changes
-------------
* CI: CI-20190529 -> None
* IGT: IGT_6130 -> IGTPW_5990
CI-20190529: 20190529
CI_DRM_10309: 6a5db0d08c45a29cebcfd39b53a15be664b9369c @ git://anongit.freedesktop.org/gfx-ci/linux
IGTPW_5990: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/index.html
IGT_6130: 390edfb703c346f06b0850db71bd3cc1342a3c02 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
== Testlist changes ==
+igt@gem_ctx_param@invalid-get-engines
+igt@gem_ctx_param@invalid-get-no-zeromap
+igt@gem_ctx_param@invalid-get-ringsize
+igt@gem_ctx_param@invalid-set-no-zeromap
+igt@gem_ctx_param@invalid-set-ringsize
-igt@api_intel_bb@bb-with-vm
-igt@gem_ctx_engines@idempotent
-igt@gem_ctx_engines@libapi
-igt@gem_ctx_persistence@clone
-igt@gem_ctx_persistence@close-replace-race
-igt@gem_ctx_persistence@replace
-igt@gem_ctx_persistence@replace-hostile
-igt@gem_exec_balancer@bonded-cork
-igt@gem_exec_balancer@bonded-early
-igt@gem_exec_balancer@bonded-imm
-igt@gem_vm_create@async-destroy
-igt@gem_vm_create@destroy-race
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/index.html
[-- Attachment #1.2: Type: text/html, Size: 3499 bytes --]
[-- Attachment #2: Type: text/plain, Size: 154 bytes --]
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 87+ messages in thread
* [igt-dev] ✗ Fi.CI.IGT: failure for Stop depending on context mutation (rev14)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (81 preceding siblings ...)
2021-07-07 15:26 ` [igt-dev] ✓ Fi.CI.BAT: success for Stop depending on context mutation (rev14) Patchwork
@ 2021-07-07 19:11 ` Patchwork
2021-07-08 7:45 ` [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Maarten Lankhorst
83 siblings, 0 replies; 87+ messages in thread
From: Patchwork @ 2021-07-07 19:11 UTC (permalink / raw)
To: Jason Ekstrand; +Cc: igt-dev
[-- Attachment #1.1: Type: text/plain, Size: 30260 bytes --]
== Series Details ==
Series: Stop depending on context mutation (rev14)
URL : https://patchwork.freedesktop.org/series/88986/
State : failure
== Summary ==
CI Bug Log - changes from CI_DRM_10309_full -> IGTPW_5990_full
====================================================
Summary
-------
**FAILURE**
Serious unknown changes coming with IGTPW_5990_full absolutely need to be
verified manually.
If you think the reported changes have nothing to do with the changes
introduced in IGTPW_5990_full, please notify your bug team to allow them
to document this new failure mode, which will reduce false positives in CI.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/index.html
Possible new issues
-------------------
Here are the unknown changes that may have been introduced in IGTPW_5990_full:
### IGT changes ###
#### Possible regressions ####
* igt@drm_import_export@import-close-race-flink:
- shard-snb: NOTRUN -> [TIMEOUT][1] +3 similar issues
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb2/igt@drm_import_export@import-close-race-flink.html
* {igt@gem_ctx_param@invalid-get-engines} (NEW):
- shard-glk: NOTRUN -> [FAIL][2] +7 similar issues
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk9/igt@gem_ctx_param@invalid-get-engines.html
* {igt@gem_ctx_param@invalid-set-no-zeromap} (NEW):
- shard-iclb: NOTRUN -> [FAIL][3] +9 similar issues
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb5/igt@gem_ctx_param@invalid-set-no-zeromap.html
- shard-kbl: NOTRUN -> [FAIL][4] +7 similar issues
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl4/igt@gem_ctx_param@invalid-set-no-zeromap.html
- shard-tglb: NOTRUN -> [FAIL][5] +4 similar issues
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb7/igt@gem_ctx_param@invalid-set-no-zeromap.html
* igt@gem_eio@in-flight-contexts-10ms:
- shard-apl: NOTRUN -> [FAIL][6] +6 similar issues
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl2/igt@gem_eio@in-flight-contexts-10ms.html
* igt@gem_eio@in-flight-immediate:
- shard-snb: NOTRUN -> [FAIL][7] +7 similar issues
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb7/igt@gem_eio@in-flight-immediate.html
* igt@gem_eio@in-flight-internal-immediate:
- shard-tglb: [PASS][8] -> [FAIL][9] +25 similar issues
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-tglb3/igt@gem_eio@in-flight-internal-immediate.html
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb5/igt@gem_eio@in-flight-internal-immediate.html
* igt@gem_eio@unwedge-stress:
- shard-glk: [PASS][10] -> [FAIL][11] +24 similar issues
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-glk7/igt@gem_eio@unwedge-stress.html
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk5/igt@gem_eio@unwedge-stress.html
* igt@gem_eio@wait-immediate:
- shard-snb: [PASS][12] -> [FAIL][13] +8 similar issues
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-snb7/igt@gem_eio@wait-immediate.html
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb7/igt@gem_eio@wait-immediate.html
* igt@gem_eio@wait-wedge-10ms:
- shard-apl: [PASS][14] -> [FAIL][15] +5 similar issues
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-apl6/igt@gem_eio@wait-wedge-10ms.html
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl8/igt@gem_eio@wait-wedge-10ms.html
* igt@gem_partial_pwrite_pread@write:
- shard-snb: [PASS][16] -> [TIMEOUT][17] +4 similar issues
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-snb5/igt@gem_partial_pwrite_pread@write.html
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb2/igt@gem_partial_pwrite_pread@write.html
* igt@i915_pm_rpm@gem-idle:
- shard-iclb: [PASS][18] -> [FAIL][19] +22 similar issues
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-iclb7/igt@i915_pm_rpm@gem-idle.html
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb2/igt@i915_pm_rpm@gem-idle.html
- shard-kbl: [PASS][20] -> [FAIL][21] +20 similar issues
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-kbl6/igt@i915_pm_rpm@gem-idle.html
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl2/igt@i915_pm_rpm@gem-idle.html
#### Warnings ####
* igt@gem_eio@unwedge-stress:
- shard-tglb: [TIMEOUT][22] ([i915#2369] / [i915#3063] / [i915#3648]) -> [FAIL][23]
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-tglb2/igt@gem_eio@unwedge-stress.html
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb3/igt@gem_eio@unwedge-stress.html
* igt@i915_pm_dc@dc5-psr:
- shard-snb: [SKIP][24] ([fdo#109271]) -> [TIMEOUT][25] +4 similar issues
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-snb2/igt@i915_pm_dc@dc5-psr.html
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb2/igt@i915_pm_dc@dc5-psr.html
New tests
---------
New tests have been introduced between CI_DRM_10309_full and IGTPW_5990_full:
### New IGT tests (5) ###
* igt@gem_ctx_param@invalid-get-engines:
- Statuses : 4 fail(s)
- Exec time: [0.01, 0.11] s
* igt@gem_ctx_param@invalid-get-no-zeromap:
- Statuses : 4 fail(s)
- Exec time: [0.01, 0.04] s
* igt@gem_ctx_param@invalid-get-ringsize:
- Statuses : 4 fail(s)
- Exec time: [0.01, 0.03] s
* igt@gem_ctx_param@invalid-set-no-zeromap:
- Statuses : 5 fail(s)
- Exec time: [0.01, 0.11] s
* igt@gem_ctx_param@invalid-set-ringsize:
- Statuses : 5 fail(s)
- Exec time: [0.00, 0.03] s
Known issues
------------
Here are the changes found in IGTPW_5990_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@feature_discovery@display-3x:
- shard-iclb: NOTRUN -> [SKIP][26] ([i915#1839])
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb1/igt@feature_discovery@display-3x.html
* igt@gem_ctx_engines@invalid-engines:
- shard-glk: [PASS][27] -> [FAIL][28] ([i915#3468]) +1 similar issue
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-glk3/igt@gem_ctx_engines@invalid-engines.html
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk6/igt@gem_ctx_engines@invalid-engines.html
* igt@gem_ctx_persistence@legacy-engines-mixed:
- shard-snb: NOTRUN -> [SKIP][29] ([fdo#109271] / [i915#1099]) +3 similar issues
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb7/igt@gem_ctx_persistence@legacy-engines-mixed.html
* igt@gem_ctx_shared@create-shared-gtt:
- shard-snb: [PASS][30] -> [SKIP][31] ([fdo#109271])
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-snb7/igt@gem_ctx_shared@create-shared-gtt.html
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb5/igt@gem_ctx_shared@create-shared-gtt.html
* igt@gem_eio@in-flight-suspend:
- shard-tglb: [PASS][32] -> [FAIL][33] ([i915#2411]) +1 similar issue
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-tglb2/igt@gem_eio@in-flight-suspend.html
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb2/igt@gem_eio@in-flight-suspend.html
* igt@gem_eio@wait-1us:
- shard-apl: [PASS][34] -> [FAIL][35] ([i915#3468])
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-apl1/igt@gem_eio@wait-1us.html
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl7/igt@gem_eio@wait-1us.html
* igt@gem_exec_fair@basic-none-solo@rcs0:
- shard-kbl: NOTRUN -> [FAIL][36] ([i915#2842])
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl1/igt@gem_exec_fair@basic-none-solo@rcs0.html
* igt@gem_exec_fair@basic-none@vcs1:
- shard-iclb: NOTRUN -> [FAIL][37] ([i915#2842])
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb4/igt@gem_exec_fair@basic-none@vcs1.html
* igt@gem_exec_fair@basic-throttle@rcs0:
- shard-glk: [PASS][38] -> [FAIL][39] ([i915#2842])
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-glk3/igt@gem_exec_fair@basic-throttle@rcs0.html
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk8/igt@gem_exec_fair@basic-throttle@rcs0.html
* igt@gem_exec_reloc@basic-wide-active@bcs0:
- shard-apl: NOTRUN -> [FAIL][40] ([i915#3633]) +3 similar issues
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl3/igt@gem_exec_reloc@basic-wide-active@bcs0.html
* igt@gem_exec_reloc@basic-wide-active@vcs1:
- shard-iclb: NOTRUN -> [FAIL][41] ([i915#3633])
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb1/igt@gem_exec_reloc@basic-wide-active@vcs1.html
* igt@gem_huc_copy@huc-copy:
- shard-apl: NOTRUN -> [SKIP][42] ([fdo#109271] / [i915#2190])
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl3/igt@gem_huc_copy@huc-copy.html
* igt@gem_pwrite@basic-exhaustion:
- shard-snb: NOTRUN -> [WARN][43] ([i915#2658])
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb5/igt@gem_pwrite@basic-exhaustion.html
- shard-apl: NOTRUN -> [WARN][44] ([i915#2658])
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl7/igt@gem_pwrite@basic-exhaustion.html
* igt@gem_render_copy@x-tiled-to-vebox-yf-tiled:
- shard-kbl: NOTRUN -> [SKIP][45] ([fdo#109271]) +148 similar issues
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl3/igt@gem_render_copy@x-tiled-to-vebox-yf-tiled.html
* igt@gem_render_copy@y-tiled-mc-ccs-to-vebox-y-tiled:
- shard-iclb: NOTRUN -> [SKIP][46] ([i915#768]) +1 similar issue
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb3/igt@gem_render_copy@y-tiled-mc-ccs-to-vebox-y-tiled.html
* igt@gem_softpin@noreloc-s3:
- shard-kbl: NOTRUN -> [DMESG-WARN][47] ([i915#180])
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl3/igt@gem_softpin@noreloc-s3.html
* igt@gem_userptr_blits@unsync-unmap-cycles:
- shard-iclb: NOTRUN -> [SKIP][48] ([i915#3297])
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb5/igt@gem_userptr_blits@unsync-unmap-cycles.html
* igt@gen7_exec_parse@chained-batch:
- shard-iclb: NOTRUN -> [SKIP][49] ([fdo#109289]) +2 similar issues
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb4/igt@gen7_exec_parse@chained-batch.html
* igt@gen9_exec_parse@allowed-single:
- shard-iclb: NOTRUN -> [SKIP][50] ([fdo#112306])
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb5/igt@gen9_exec_parse@allowed-single.html
* igt@gen9_exec_parse@batch-invalid-length:
- shard-snb: NOTRUN -> [SKIP][51] ([fdo#109271]) +326 similar issues
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb5/igt@gen9_exec_parse@batch-invalid-length.html
* igt@i915_pm_dc@dc6-psr:
- shard-iclb: [PASS][52] -> [INCOMPLETE][53] ([i915#1982] / [i915#3698])
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-iclb2/igt@i915_pm_dc@dc6-psr.html
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb1/igt@i915_pm_dc@dc6-psr.html
* igt@i915_pm_dc@dc9-dpms:
- shard-iclb: NOTRUN -> [FAIL][54] ([i915#3343])
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb4/igt@i915_pm_dc@dc9-dpms.html
* igt@i915_pm_lpsp@kms-lpsp@kms-lpsp-dp:
- shard-kbl: NOTRUN -> [SKIP][55] ([fdo#109271] / [i915#1937])
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl4/igt@i915_pm_lpsp@kms-lpsp@kms-lpsp-dp.html
- shard-apl: NOTRUN -> [SKIP][56] ([fdo#109271] / [i915#1937])
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl8/igt@i915_pm_lpsp@kms-lpsp@kms-lpsp-dp.html
* igt@i915_pm_lpsp@kms-lpsp@kms-lpsp-hdmi-a:
- shard-glk: NOTRUN -> [SKIP][57] ([fdo#109271] / [i915#1937])
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk4/igt@i915_pm_lpsp@kms-lpsp@kms-lpsp-hdmi-a.html
* igt@i915_pm_rc6_residency@rc6-idle:
- shard-iclb: NOTRUN -> [WARN][58] ([i915#2684])
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb2/igt@i915_pm_rc6_residency@rc6-idle.html
* igt@i915_selftest@live@hangcheck:
- shard-snb: [PASS][59] -> [INCOMPLETE][60] ([i915#2782])
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-snb7/igt@i915_selftest@live@hangcheck.html
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb6/igt@i915_selftest@live@hangcheck.html
* igt@kms_addfb_basic@invalid-smem-bo-on-discrete:
- shard-apl: NOTRUN -> [FAIL][61] ([i915#3745])
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl6/igt@kms_addfb_basic@invalid-smem-bo-on-discrete.html
* igt@kms_big_fb@linear-16bpp-rotate-90:
- shard-iclb: NOTRUN -> [SKIP][62] ([fdo#110725] / [fdo#111614]) +1 similar issue
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb3/igt@kms_big_fb@linear-16bpp-rotate-90.html
* igt@kms_big_fb@linear-32bpp-rotate-180:
- shard-glk: [PASS][63] -> [DMESG-WARN][64] ([i915#118] / [i915#95]) +1 similar issue
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-glk8/igt@kms_big_fb@linear-32bpp-rotate-180.html
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk5/igt@kms_big_fb@linear-32bpp-rotate-180.html
* igt@kms_big_fb@linear-64bpp-rotate-0:
- shard-iclb: [PASS][65] -> [DMESG-WARN][66] ([i915#3621])
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-iclb4/igt@kms_big_fb@linear-64bpp-rotate-0.html
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb1/igt@kms_big_fb@linear-64bpp-rotate-0.html
* igt@kms_big_fb@yf-tiled-8bpp-rotate-0:
- shard-iclb: NOTRUN -> [SKIP][67] ([fdo#110723])
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb7/igt@kms_big_fb@yf-tiled-8bpp-rotate-0.html
* igt@kms_big_joiner@basic:
- shard-iclb: NOTRUN -> [SKIP][68] ([i915#2705])
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb2/igt@kms_big_joiner@basic.html
* igt@kms_ccs@pipe-d-bad-rotation-90-y_tiled_ccs:
- shard-tglb: NOTRUN -> [SKIP][69] ([i915#3689]) +1 similar issue
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb5/igt@kms_ccs@pipe-d-bad-rotation-90-y_tiled_ccs.html
* igt@kms_cdclk@plane-scaling:
- shard-iclb: NOTRUN -> [SKIP][70] ([i915#3742])
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb7/igt@kms_cdclk@plane-scaling.html
* igt@kms_chamelium@hdmi-hpd-storm-disable:
- shard-kbl: NOTRUN -> [SKIP][71] ([fdo#109271] / [fdo#111827]) +12 similar issues
[71]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl2/igt@kms_chamelium@hdmi-hpd-storm-disable.html
* igt@kms_chamelium@hdmi-mode-timings:
- shard-snb: NOTRUN -> [SKIP][72] ([fdo#109271] / [fdo#111827]) +13 similar issues
[72]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-snb2/igt@kms_chamelium@hdmi-mode-timings.html
* igt@kms_color@pipe-d-degamma:
- shard-iclb: NOTRUN -> [SKIP][73] ([fdo#109278] / [i915#1149])
[73]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb3/igt@kms_color@pipe-d-degamma.html
* igt@kms_color_chamelium@pipe-a-ctm-red-to-blue:
- shard-iclb: NOTRUN -> [SKIP][74] ([fdo#109284] / [fdo#111827]) +5 similar issues
[74]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb8/igt@kms_color_chamelium@pipe-a-ctm-red-to-blue.html
* igt@kms_color_chamelium@pipe-c-ctm-0-25:
- shard-apl: NOTRUN -> [SKIP][75] ([fdo#109271] / [fdo#111827]) +15 similar issues
[75]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl1/igt@kms_color_chamelium@pipe-c-ctm-0-25.html
* igt@kms_color_chamelium@pipe-d-ctm-max:
- shard-tglb: NOTRUN -> [SKIP][76] ([fdo#109284] / [fdo#111827]) +1 similar issue
[76]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb6/igt@kms_color_chamelium@pipe-d-ctm-max.html
- shard-glk: NOTRUN -> [SKIP][77] ([fdo#109271] / [fdo#111827]) +2 similar issues
[77]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk8/igt@kms_color_chamelium@pipe-d-ctm-max.html
- shard-iclb: NOTRUN -> [SKIP][78] ([fdo#109278] / [fdo#109284] / [fdo#111827])
[78]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb8/igt@kms_color_chamelium@pipe-d-ctm-max.html
* igt@kms_content_protection@atomic:
- shard-kbl: NOTRUN -> [TIMEOUT][79] ([i915#1319])
[79]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl3/igt@kms_content_protection@atomic.html
* igt@kms_content_protection@lic:
- shard-apl: NOTRUN -> [TIMEOUT][80] ([i915#1319])
[80]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl7/igt@kms_content_protection@lic.html
* igt@kms_cursor_crc@pipe-a-cursor-512x512-offscreen:
- shard-iclb: NOTRUN -> [SKIP][81] ([fdo#109278] / [fdo#109279]) +2 similar issues
[81]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb8/igt@kms_cursor_crc@pipe-a-cursor-512x512-offscreen.html
* igt@kms_cursor_crc@pipe-a-cursor-suspend:
- shard-kbl: [PASS][82] -> [DMESG-WARN][83] ([i915#180]) +8 similar issues
[82]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-kbl2/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
[83]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl4/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
* igt@kms_cursor_crc@pipe-c-cursor-512x512-sliding:
- shard-tglb: NOTRUN -> [SKIP][84] ([fdo#109279] / [i915#3359])
[84]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb6/igt@kms_cursor_crc@pipe-c-cursor-512x512-sliding.html
* igt@kms_cursor_crc@pipe-d-cursor-64x21-sliding:
- shard-iclb: NOTRUN -> [SKIP][85] ([fdo#109278]) +29 similar issues
[85]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb2/igt@kms_cursor_crc@pipe-d-cursor-64x21-sliding.html
* igt@kms_cursor_legacy@cursorb-vs-flipb-toggle:
- shard-iclb: NOTRUN -> [SKIP][86] ([fdo#109274] / [fdo#109278]) +1 similar issue
[86]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb7/igt@kms_cursor_legacy@cursorb-vs-flipb-toggle.html
* igt@kms_dp_dsc@basic-dsc-enable-edp:
- shard-iclb: NOTRUN -> [SKIP][87] ([fdo#109349])
[87]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb4/igt@kms_dp_dsc@basic-dsc-enable-edp.html
* igt@kms_flip@2x-flip-vs-expired-vblank@bc-hdmi-a1-hdmi-a2:
- shard-glk: [PASS][88] -> [FAIL][89] ([i915#2122])
[88]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-glk9/igt@kms_flip@2x-flip-vs-expired-vblank@bc-hdmi-a1-hdmi-a2.html
[89]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk2/igt@kms_flip@2x-flip-vs-expired-vblank@bc-hdmi-a1-hdmi-a2.html
* igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs:
- shard-iclb: NOTRUN -> [SKIP][90] ([i915#2587])
[90]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb4/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytilercccs.html
* igt@kms_frontbuffer_tracking@fbc-2p-primscrn-shrfb-pgflip-blt:
- shard-tglb: NOTRUN -> [SKIP][91] ([fdo#111825]) +8 similar issues
[91]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb2/igt@kms_frontbuffer_tracking@fbc-2p-primscrn-shrfb-pgflip-blt.html
* igt@kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-blt:
- shard-iclb: NOTRUN -> [SKIP][92] ([fdo#109280]) +24 similar issues
[92]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb1/igt@kms_frontbuffer_tracking@psr-2p-primscrn-pri-shrfb-draw-blt.html
* igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-render:
- shard-glk: NOTRUN -> [SKIP][93] ([fdo#109271]) +38 similar issues
[93]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk1/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-indfb-draw-render.html
* igt@kms_hdr@static-toggle:
- shard-iclb: NOTRUN -> [SKIP][94] ([i915#1187]) +1 similar issue
[94]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb3/igt@kms_hdr@static-toggle.html
- shard-tglb: NOTRUN -> [SKIP][95] ([i915#1187])
[95]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb2/igt@kms_hdr@static-toggle.html
* igt@kms_pipe_crc_basic@nonblocking-crc-pipe-d-frame-sequence:
- shard-kbl: NOTRUN -> [SKIP][96] ([fdo#109271] / [i915#533]) +1 similar issue
[96]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl6/igt@kms_pipe_crc_basic@nonblocking-crc-pipe-d-frame-sequence.html
- shard-apl: NOTRUN -> [SKIP][97] ([fdo#109271] / [i915#533]) +1 similar issue
[97]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl2/igt@kms_pipe_crc_basic@nonblocking-crc-pipe-d-frame-sequence.html
- shard-glk: NOTRUN -> [SKIP][98] ([fdo#109271] / [i915#533])
[98]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk8/igt@kms_pipe_crc_basic@nonblocking-crc-pipe-d-frame-sequence.html
* igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b:
- shard-apl: [PASS][99] -> [DMESG-WARN][100] ([i915#180])
[99]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-apl2/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b.html
[100]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl2/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b.html
* igt@kms_pipe_crc_basic@suspend-read-crc-pipe-c:
- shard-apl: NOTRUN -> [DMESG-WARN][101] ([i915#180])
[101]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl8/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-c.html
* igt@kms_plane_alpha_blend@pipe-a-alpha-opaque-fb:
- shard-apl: NOTRUN -> [FAIL][102] ([fdo#108145] / [i915#265]) +3 similar issues
[102]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl6/igt@kms_plane_alpha_blend@pipe-a-alpha-opaque-fb.html
* igt@kms_plane_multiple@atomic-pipe-c-tiling-yf:
- shard-tglb: NOTRUN -> [SKIP][103] ([fdo#112054])
[103]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb3/igt@kms_plane_multiple@atomic-pipe-c-tiling-yf.html
* igt@kms_plane_scaling@2x-scaler-multi-pipe:
- shard-iclb: NOTRUN -> [SKIP][104] ([fdo#109274]) +6 similar issues
[104]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb1/igt@kms_plane_scaling@2x-scaler-multi-pipe.html
* igt@kms_psr2_sf@overlay-plane-update-sf-dmg-area-4:
- shard-apl: NOTRUN -> [SKIP][105] ([fdo#109271] / [i915#658]) +2 similar issues
[105]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl6/igt@kms_psr2_sf@overlay-plane-update-sf-dmg-area-4.html
* igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-4:
- shard-iclb: NOTRUN -> [SKIP][106] ([i915#2920])
[106]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb2/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-4.html
- shard-glk: NOTRUN -> [SKIP][107] ([fdo#109271] / [i915#658])
[107]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk4/igt@kms_psr2_sf@overlay-primary-update-sf-dmg-area-4.html
* igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-2:
- shard-iclb: NOTRUN -> [SKIP][108] ([i915#658]) +1 similar issue
[108]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb3/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-2.html
* igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-5:
- shard-kbl: NOTRUN -> [SKIP][109] ([fdo#109271] / [i915#658]) +3 similar issues
[109]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl2/igt@kms_psr2_sf@primary-plane-update-sf-dmg-area-5.html
* igt@kms_psr@psr2_cursor_mmap_cpu:
- shard-iclb: [PASS][110] -> [SKIP][111] ([fdo#109441]) +1 similar issue
[110]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-iclb2/igt@kms_psr@psr2_cursor_mmap_cpu.html
[111]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb7/igt@kms_psr@psr2_cursor_mmap_cpu.html
* igt@kms_psr@psr2_sprite_plane_move:
- shard-iclb: NOTRUN -> [SKIP][112] ([fdo#109441]) +2 similar issues
[112]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb1/igt@kms_psr@psr2_sprite_plane_move.html
* igt@kms_sysfs_edid_timing:
- shard-apl: NOTRUN -> [FAIL][113] ([IGT#2])
[113]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl3/igt@kms_sysfs_edid_timing.html
* igt@kms_vrr@flipline:
- shard-iclb: NOTRUN -> [SKIP][114] ([fdo#109502])
[114]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb7/igt@kms_vrr@flipline.html
* igt@kms_writeback@writeback-fb-id:
- shard-apl: NOTRUN -> [SKIP][115] ([fdo#109271] / [i915#2437])
[115]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl1/igt@kms_writeback@writeback-fb-id.html
* igt@nouveau_crc@pipe-a-ctx-flip-skip-current-frame:
- shard-tglb: NOTRUN -> [SKIP][116] ([i915#2530])
[116]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb2/igt@nouveau_crc@pipe-a-ctx-flip-skip-current-frame.html
* igt@nouveau_crc@pipe-b-ctx-flip-skip-current-frame:
- shard-apl: NOTRUN -> [SKIP][117] ([fdo#109271]) +257 similar issues
[117]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl6/igt@nouveau_crc@pipe-b-ctx-flip-skip-current-frame.html
* igt@nouveau_crc@pipe-c-ctx-flip-skip-current-frame:
- shard-iclb: NOTRUN -> [SKIP][118] ([i915#2530]) +2 similar issues
[118]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb6/igt@nouveau_crc@pipe-c-ctx-flip-skip-current-frame.html
* igt@perf@per-context-mode-unprivileged:
- shard-tglb: NOTRUN -> [SKIP][119] ([fdo#109289])
[119]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb5/igt@perf@per-context-mode-unprivileged.html
* igt@prime_nv_api@nv_i915_reimport_twice_check_flink_name:
- shard-iclb: NOTRUN -> [SKIP][120] ([fdo#109291]) +2 similar issues
[120]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb3/igt@prime_nv_api@nv_i915_reimport_twice_check_flink_name.html
* igt@prime_nv_pcopy@test3_5:
- shard-tglb: NOTRUN -> [SKIP][121] ([fdo#109291])
[121]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-tglb2/igt@prime_nv_pcopy@test3_5.html
* igt@sysfs_clients@create:
- shard-apl: NOTRUN -> [SKIP][122] ([fdo#109271] / [i915#2994]) +2 similar issues
[122]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl1/igt@sysfs_clients@create.html
* igt@sysfs_clients@pidname:
- shard-iclb: NOTRUN -> [SKIP][123] ([i915#2994])
[123]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb7/igt@sysfs_clients@pidname.html
* igt@sysfs_clients@sema-25:
- shard-kbl: NOTRUN -> [SKIP][124] ([fdo#109271] / [i915#2994]) +1 similar issue
[124]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl1/igt@sysfs_clients@sema-25.html
#### Possible fixes ####
* igt@core_hotunplug@unbind-rebind:
- shard-apl: [DMESG-WARN][125] ([i915#2283]) -> [PASS][126]
[125]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-apl1/igt@core_hotunplug@unbind-rebind.html
[126]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl3/igt@core_hotunplug@unbind-rebind.html
- shard-glk: [DMESG-WARN][127] ([i915#2283]) -> [PASS][128]
[127]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-glk3/igt@core_hotunplug@unbind-rebind.html
[128]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk2/igt@core_hotunplug@unbind-rebind.html
- shard-kbl: [DMESG-WARN][129] ([i915#2283]) -> [PASS][130]
[129]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-kbl2/igt@core_hotunplug@unbind-rebind.html
[130]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl1/igt@core_hotunplug@unbind-rebind.html
- shard-iclb: [DMESG-WARN][131] ([i915#1602] / [i915#2283]) -> [PASS][132]
[131]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-iclb5/igt@core_hotunplug@unbind-rebind.html
[132]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-iclb1/igt@core_hotunplug@unbind-rebind.html
* igt@gem_create@create-clear:
- shard-glk: [FAIL][133] ([i915#3160]) -> [PASS][134]
[133]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-glk6/igt@gem_create@create-clear.html
[134]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-glk5/igt@gem_create@create-clear.html
* igt@gem_ctx_isolation@preservation-s3@bcs0:
- shard-kbl: [DMESG-WARN][135] ([i915#180]) -> [PASS][136] +1 similar issue
[135]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-kbl1/igt@gem_ctx_isolation@preservation-s3@bcs0.html
[136]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-kbl1/igt@gem_ctx_isolation@preservation-s3@bcs0.html
- shard-apl: [DMESG-WARN][137] ([i915#180]) -> [PASS][138]
[137]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_10309/shard-apl8/igt@gem_ctx_isolation@preservation-s3@bcs0.html
[138]: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/shard-apl1/igt@gem_ctx_i
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_5990/index.html
[-- Attachment #1.2: Type: text/html, Size: 33930 bytes --]
[-- Attachment #2: Type: text/plain, Size: 154 bytes --]
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 87+ messages in thread
* Re: [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4)
2021-07-07 14:42 [igt-dev] [PATCH i-g-t 00/81] Stop depending on context mutation (v4) Jason Ekstrand
` (82 preceding siblings ...)
2021-07-07 19:11 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
@ 2021-07-08 7:45 ` Maarten Lankhorst
83 siblings, 0 replies; 87+ messages in thread
From: Maarten Lankhorst @ 2021-07-08 7:45 UTC (permalink / raw)
To: Jason Ekstrand, igt-dev
Op 07-07-2021 om 16:42 schreef Jason Ekstrand:
> I'm trying to clean up some of our uAPI technical debt in i915. One of the
> biggest areas we have right now is context mutability. There's no good
> reason why things like the set of engines or the VM should be able to be
> changed on the fly and no "real" userspace actually relies on this
> functionality. It does, however, make for a good excuse for tests and lots
> of bug reports as things like swapping out the set of engines under load
> break randomly. The solution here is to stop allowing that behavior and
> simplify the i915 internals.
>
> In particular, we'd like to remove the following from the i915 API:
>
> 1. I915_CONTEXT_CLONE_*. These are only used by IGT and have never been
> used by any "real" userspace.
>
> 2. Changing the VM or set of engines via SETPARAM after they've been
> "used" by an execbuf or similar. This would effectively make those
> parameters create params rather than mutable state. We can't drop
> setparam entirely for those because media does use it but we can
> enforce some rules.
>
> 3. Unused (by non-IGT userspace) GETPARAM for things like engines.
>
> As much as we'd love to do that, we have a bit of a problem in IGT. The
> way we handle multi-engine testing today relies heavily on this soon-to-be-
> deprecated functionality. In particular, the standard flow is usually
> something like this:
>
> static void run_test1(int fd, uint32_t engine)
> {
> igt_spin_t *spin;
>
> ctx = = gem_context_clone_with_engines(fd, 0);
> __igt_spin_new(fd, ctx, .engine = engine);
>
> /* do some testing with ctx */
>
> igt_spin_free(fd, spin);
> gem_destroy_context(fd, ctx);
> }
>
> igt_main
> {
> struct intel_execution_engine2 *e;
>
> /* Usual fixture code */
>
> __for_each_physical_engine(fd, e)
> run_test1(fd, e->flags);
>
> __for_each_physical_engine(fd, e)
> run_test2(fd, e->flags);
> }
>
> Let's walk through what this does:
>
> 1. __for_each_physical_engine calls intel_init_engine_list() which resets
> the set of engines on ctx0 to the full set of engines available as per
> the engine query. On older kernels/hardware where we don't have the
> engines query, it leaves the set alone.
>
> 2. intel_init_engine_list() also returns a set of engines for iteration
> and __for_each_physical_engine() sets up a for loop to walk the set.
>
> 3. gem_context_clone_with_engines() creates a new context using
> I915_CONTEXT_CONTEXT_CLONE_ENGINES (not used by anything other than
> IGT) to ask that the newly created context has the same set of engines
> as ctx0. Remember we changed that at the start of loop iteration!
>
> 4. When the context is passed to __igt_spin_new(), it calls
> gem_context_lookup_engine which does a GETPARAM to introspet the set of
> engines on the context and figure out the engine class.
>
> If you've been keeping track, this trivial and extremely common example
> uses every single one of these soon-to-be-deprecated APIs even though the
> test author may be completely obvious to it. It also means that getting
> rid of IGT's use of them is going to require some fairly deep surgery.
>
> The approach proposed and partially implemented here is to add a new
> wrapper struct intel_ctx_t which wraps a GEM context handle as well as the
> full set of parameters used to create it, represented by intel_ctx_cfg_t.
> We can then use the context anywhere we would regularly use a context, we
> just have to do ctx->id. If we want to clone it, we can do so by re-using
> the create parameters by calling intel_ctx_create(fd, &old_ctx->cfg);
>
> Along with the above rework (which got long, sorry) I've got a few other
> patches in here which delete tests which exist expressly to test APIs that
> are on the chopping block.
Seems some tests start failing. I like this whole removal of uapi though, some of the complicated code related to ww mutex inside the kernel was related to it.
I roughly looked at all the patches, so I can only give this for the whole series:
Acked-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Although it seems some tests in the ci full run might still be breaking?
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 87+ messages in thread