* [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
@ 2018-02-13 9:26 ` Chris Wilson
0 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2018-02-13 9:26 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev
icl offers a much reduced context space, and in its simplest setup we
cannot allocate one context per priority level, so trim the number and
reuse the same context for multiple priority requests.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
1 file changed, 23 insertions(+), 15 deletions(-)
diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index 05f7ae38..ec02d994 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -43,6 +43,8 @@
#define BUSY_QLEN 8
#define MAX_ELSP_QLEN 16
+#define MAX_CONTEXTS 256
+
IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
static void store_dword(int fd, uint32_t ctx, unsigned ring,
@@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
static void deep(int fd, unsigned ring)
{
#define XS 8
- const unsigned int nctx = MAX_PRIO - MIN_PRIO;
- const unsigned size = ALIGN(4*nctx, 4096);
+ const unsigned int nreq = MAX_PRIO - MIN_PRIO;
+ const unsigned size = ALIGN(4*nreq, 4096);
struct timespec tv = {};
struct cork cork;
uint32_t result, dep[XS];
@@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
uint32_t *ptr;
uint32_t *ctx;
- ctx = malloc(sizeof(*ctx) * nctx);
- for (int n = 0; n < nctx; n++) {
+ ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
+ for (int n = 0; n < MAX_CONTEXTS; n++) {
ctx[n] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
}
result = gem_create(fd, size);
@@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = XS + 2;
execbuf.flags = ring;
- for (int n = 0; n < nctx; n++) {
+ for (int n = 0; n < MAX_CONTEXTS; n++) {
execbuf.rsvd1 = ctx[n];
gem_execbuf(fd, &execbuf);
}
@@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
plug(fd, &cork);
/* Create a deep dependency chain, with a few branches */
- for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
+ for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
+ uint32_t context = ctx[n % MAX_CONTEXTS];
+ gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
+
for (int m = 0; m < XS; m++)
- store_dword(fd, ctx[n], ring, dep[m], 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, context, ring, dep[m], 4*n, context, cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
+ }
+
+ for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 6; n++) {
+ uint32_t context = ctx[n % MAX_CONTEXTS];
+ gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
- for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 6; n++) {
for (int m = 0; m < XS; m++) {
- store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], dep[m], 0);
- store_dword(fd, ctx[n], ring, result, 4*m, ctx[n], 0, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, context, ring, result, 4*n, context, dep[m], 0);
+ store_dword(fd, context, ring, result, 4*m, context, 0, I915_GEM_DOMAIN_INSTRUCTION);
}
- expected = ctx[n];
+ expected = context;
}
unplug_show_queue(fd, &cork, ring);
igt_require(expected); /* too slow */
- for (int n = 0; n < nctx; n++)
+ for (int n = 0; n < MAX_CONTEXTS; n++)
gem_context_destroy(fd, ctx[n]);
for (int m = 0; m < XS; m++) {
@@ -627,8 +635,8 @@ static void deep(int fd, unsigned ring)
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
gem_close(fd, dep[m]);
- for (int n = 0; n < nctx; n++)
- igt_assert_eq_u32(ptr[n], ctx[n]);
+ for (int n = 0; n < nreq; n++)
+ igt_assert_eq_u32(ptr[n], ctx[n % MAX_CONTEXTS]);
munmap(ptr, size);
}
--
2.16.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [Intel-gfx] [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
@ 2018-02-13 9:26 ` Chris Wilson
0 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2018-02-13 9:26 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev
icl offers a much reduced context space, and in its simplest setup we
cannot allocate one context per priority level, so trim the number and
reuse the same context for multiple priority requests.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
1 file changed, 23 insertions(+), 15 deletions(-)
diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index 05f7ae38..ec02d994 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -43,6 +43,8 @@
#define BUSY_QLEN 8
#define MAX_ELSP_QLEN 16
+#define MAX_CONTEXTS 256
+
IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
static void store_dword(int fd, uint32_t ctx, unsigned ring,
@@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
static void deep(int fd, unsigned ring)
{
#define XS 8
- const unsigned int nctx = MAX_PRIO - MIN_PRIO;
- const unsigned size = ALIGN(4*nctx, 4096);
+ const unsigned int nreq = MAX_PRIO - MIN_PRIO;
+ const unsigned size = ALIGN(4*nreq, 4096);
struct timespec tv = {};
struct cork cork;
uint32_t result, dep[XS];
@@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
uint32_t *ptr;
uint32_t *ctx;
- ctx = malloc(sizeof(*ctx) * nctx);
- for (int n = 0; n < nctx; n++) {
+ ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
+ for (int n = 0; n < MAX_CONTEXTS; n++) {
ctx[n] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
}
result = gem_create(fd, size);
@@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = XS + 2;
execbuf.flags = ring;
- for (int n = 0; n < nctx; n++) {
+ for (int n = 0; n < MAX_CONTEXTS; n++) {
execbuf.rsvd1 = ctx[n];
gem_execbuf(fd, &execbuf);
}
@@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
plug(fd, &cork);
/* Create a deep dependency chain, with a few branches */
- for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
+ for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
+ uint32_t context = ctx[n % MAX_CONTEXTS];
+ gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
+
for (int m = 0; m < XS; m++)
- store_dword(fd, ctx[n], ring, dep[m], 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, context, ring, dep[m], 4*n, context, cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
+ }
+
+ for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 6; n++) {
+ uint32_t context = ctx[n % MAX_CONTEXTS];
+ gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
- for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 6; n++) {
for (int m = 0; m < XS; m++) {
- store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], dep[m], 0);
- store_dword(fd, ctx[n], ring, result, 4*m, ctx[n], 0, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, context, ring, result, 4*n, context, dep[m], 0);
+ store_dword(fd, context, ring, result, 4*m, context, 0, I915_GEM_DOMAIN_INSTRUCTION);
}
- expected = ctx[n];
+ expected = context;
}
unplug_show_queue(fd, &cork, ring);
igt_require(expected); /* too slow */
- for (int n = 0; n < nctx; n++)
+ for (int n = 0; n < MAX_CONTEXTS; n++)
gem_context_destroy(fd, ctx[n]);
for (int m = 0; m < XS; m++) {
@@ -627,8 +635,8 @@ static void deep(int fd, unsigned ring)
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
gem_close(fd, dep[m]);
- for (int n = 0; n < nctx; n++)
- igt_assert_eq_u32(ptr[n], ctx[n]);
+ for (int n = 0; n < nreq; n++)
+ igt_assert_eq_u32(ptr[n], ctx[n % MAX_CONTEXTS]);
munmap(ptr, size);
}
--
2.16.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [igt-dev] ✓ Fi.CI.BAT: success for igt/gem_exec_schedule: Trim max number of contexts used
2018-02-13 9:26 ` [Intel-gfx] " Chris Wilson
(?)
@ 2018-02-13 10:01 ` Patchwork
-1 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2018-02-13 10:01 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
== Series Details ==
Series: igt/gem_exec_schedule: Trim max number of contexts used
URL : https://patchwork.freedesktop.org/series/38143/
State : success
== Summary ==
IGT patchset tested on top of latest successful build
7a02d3056fe93fd2ddd2986f7d3db5f384ee22a0 lib: force a reset on an uncooperative i915 device
with latest DRM-Tip kernel build CI_DRM_3759
013536408c7f drm-tip: 2018y-02m-13d-07h-39m-27s UTC integration manifest
No testlist changes.
fi-bdw-5557u total:288 pass:265 dwarn:0 dfail:0 fail:2 skip:21 time:441s
fi-bdw-gvtdvm total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:427s
fi-blb-e6850 total:288 pass:223 dwarn:1 dfail:0 fail:0 skip:64 time:382s
fi-bsw-n3050 total:288 pass:242 dwarn:0 dfail:0 fail:0 skip:46 time:493s
fi-bwr-2160 total:288 pass:183 dwarn:0 dfail:0 fail:0 skip:105 time:290s
fi-bxt-dsi total:288 pass:258 dwarn:0 dfail:0 fail:0 skip:30 time:486s
fi-bxt-j4205 total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:488s
fi-byt-j1900 total:288 pass:253 dwarn:0 dfail:0 fail:0 skip:35 time:477s
fi-byt-n2820 total:288 pass:249 dwarn:0 dfail:0 fail:0 skip:39 time:462s
fi-cfl-s2 total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:578s
fi-cnl-y3 total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:586s
fi-elk-e7500 total:288 pass:229 dwarn:0 dfail:0 fail:0 skip:59 time:417s
fi-gdg-551 total:288 pass:179 dwarn:0 dfail:0 fail:1 skip:108 time:286s
fi-glk-1 total:288 pass:260 dwarn:0 dfail:0 fail:0 skip:28 time:518s
fi-hsw-4770 total:288 pass:259 dwarn:0 dfail:0 fail:2 skip:27 time:412s
fi-ilk-650 total:288 pass:228 dwarn:0 dfail:0 fail:0 skip:60 time:416s
fi-ivb-3520m total:288 pass:259 dwarn:0 dfail:0 fail:0 skip:29 time:456s
fi-ivb-3770 total:288 pass:255 dwarn:0 dfail:0 fail:0 skip:33 time:417s
fi-kbl-7500u total:288 pass:263 dwarn:1 dfail:0 fail:0 skip:24 time:460s
fi-kbl-7560u total:288 pass:269 dwarn:0 dfail:0 fail:0 skip:19 time:502s
fi-kbl-r total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:500s
fi-pnv-d510 total:288 pass:222 dwarn:1 dfail:0 fail:0 skip:65 time:594s
fi-skl-6260u total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:437s
fi-skl-6600u total:288 pass:261 dwarn:0 dfail:0 fail:0 skip:27 time:516s
fi-skl-6700hq total:288 pass:262 dwarn:0 dfail:0 fail:0 skip:26 time:534s
fi-skl-6700k2 total:288 pass:264 dwarn:0 dfail:0 fail:0 skip:24 time:496s
fi-skl-6770hq total:288 pass:268 dwarn:0 dfail:0 fail:0 skip:20 time:488s
fi-skl-guc total:288 pass:260 dwarn:0 dfail:0 fail:0 skip:28 time:417s
fi-skl-gvtdvm total:288 pass:265 dwarn:0 dfail:0 fail:0 skip:23 time:434s
fi-snb-2520m total:288 pass:248 dwarn:0 dfail:0 fail:0 skip:40 time:523s
fi-snb-2600 total:288 pass:248 dwarn:0 dfail:0 fail:0 skip:40 time:399s
Blacklisted hosts:
fi-glk-dsi total:117 pass:104 dwarn:0 dfail:0 fail:0 skip:12
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_908/issues.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 13+ messages in thread
* [igt-dev] ✗ Fi.CI.IGT: failure for igt/gem_exec_schedule: Trim max number of contexts used
2018-02-13 9:26 ` [Intel-gfx] " Chris Wilson
(?)
(?)
@ 2018-02-13 12:59 ` Patchwork
-1 siblings, 0 replies; 13+ messages in thread
From: Patchwork @ 2018-02-13 12:59 UTC (permalink / raw)
To: Chris Wilson; +Cc: igt-dev
== Series Details ==
Series: igt/gem_exec_schedule: Trim max number of contexts used
URL : https://patchwork.freedesktop.org/series/38143/
State : failure
== Summary ==
Warning: bzip CI_DRM_3759/shard-glkb6/results2.json.bz2 wasn't in correct JSON format
Test pm_rpm:
Subgroup legacy-planes-dpms:
fail -> PASS (shard-hsw) fdo#105069 +3
Test kms_frontbuffer_tracking:
Subgroup fbc-1p-primscrn-pri-shrfb-draw-mmap-gtt:
fail -> PASS (shard-apl) fdo#101623 +3
Subgroup fbc-1p-primscrn-cur-indfb-draw-blt:
fail -> PASS (shard-apl) fdo#103167
Test kms_vblank:
Subgroup pipe-b-ts-continuation-dpms-suspend:
pass -> SKIP (shard-snb)
Subgroup pipe-b-ts-continuation-suspend:
pass -> INCOMPLETE (shard-hsw)
Test pm_lpsp:
Subgroup screens-disabled:
fail -> PASS (shard-hsw) fdo#104941
Test kms_flip:
Subgroup 2x-flip-vs-wf_vblank-interruptible:
fail -> PASS (shard-hsw) fdo#100368 +1
Test gem_eio:
Subgroup in-flight-suspend:
fail -> PASS (shard-hsw) fdo#104676 +1
Test perf_pmu:
Subgroup rc6-runtime-pm-long:
pass -> SKIP (shard-hsw) fdo#105010
Test perf:
Subgroup enable-disable:
pass -> FAIL (shard-apl) fdo#103715
fdo#105069 https://bugs.freedesktop.org/show_bug.cgi?id=105069
fdo#101623 https://bugs.freedesktop.org/show_bug.cgi?id=101623
fdo#103167 https://bugs.freedesktop.org/show_bug.cgi?id=103167
fdo#104941 https://bugs.freedesktop.org/show_bug.cgi?id=104941
fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
fdo#104676 https://bugs.freedesktop.org/show_bug.cgi?id=104676
fdo#105010 https://bugs.freedesktop.org/show_bug.cgi?id=105010
fdo#103715 https://bugs.freedesktop.org/show_bug.cgi?id=103715
shard-apl total:3358 pass:1741 dwarn:1 dfail:0 fail:21 skip:1594 time:13780s
shard-hsw total:3384 pass:1694 dwarn:1 dfail:0 fail:53 skip:1634 time:13964s
shard-snb total:3427 pass:1348 dwarn:1 dfail:0 fail:10 skip:2068 time:7629s
Blacklisted hosts:
shard-kbl total:3350 pass:1861 dwarn:1 dfail:0 fail:21 skip:1466 time:10777s
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_908/shards.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [igt-dev] [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
2018-02-13 9:26 ` [Intel-gfx] " Chris Wilson
@ 2018-02-13 22:47 ` Antonio Argenziano
-1 siblings, 0 replies; 13+ messages in thread
From: Antonio Argenziano @ 2018-02-13 22:47 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 13/02/18 01:26, Chris Wilson wrote:
> icl offers a much reduced context space, and in its simplest setup we
> cannot allocate one context per priority level, so trim the number and
> reuse the same context for multiple priority requests.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
> 1 file changed, 23 insertions(+), 15 deletions(-)
>
> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
> index 05f7ae38..ec02d994 100644
> --- a/tests/gem_exec_schedule.c
> +++ b/tests/gem_exec_schedule.c
> @@ -43,6 +43,8 @@
> #define BUSY_QLEN 8
> #define MAX_ELSP_QLEN 16
>
> +#define MAX_CONTEXTS 256
> +
> IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
>
> static void store_dword(int fd, uint32_t ctx, unsigned ring,
> @@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
> static void deep(int fd, unsigned ring)
> {
> #define XS 8
> - const unsigned int nctx = MAX_PRIO - MIN_PRIO;
> - const unsigned size = ALIGN(4*nctx, 4096);
> + const unsigned int nreq = MAX_PRIO - MIN_PRIO;
> + const unsigned size = ALIGN(4*nreq, 4096);
> struct timespec tv = {};
> struct cork cork;
> uint32_t result, dep[XS];
> @@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
> uint32_t *ptr;
> uint32_t *ctx;
>
> - ctx = malloc(sizeof(*ctx) * nctx);
> - for (int n = 0; n < nctx; n++) {
> + ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
> + for (int n = 0; n < MAX_CONTEXTS; n++) {
> ctx[n] = gem_context_create(fd);
> - gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
> }
>
> result = gem_create(fd, size);
> @@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
> execbuf.buffers_ptr = to_user_pointer(obj);
> execbuf.buffer_count = XS + 2;
> execbuf.flags = ring;
> - for (int n = 0; n < nctx; n++) {
> + for (int n = 0; n < MAX_CONTEXTS; n++) {
> execbuf.rsvd1 = ctx[n];
> gem_execbuf(fd, &execbuf);
> }
> @@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
> plug(fd, &cork);
>
> /* Create a deep dependency chain, with a few branches */
> - for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
> + for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
> + uint32_t context = ctx[n % MAX_CONTEXTS];
Doesn't this introduce some intra-context dependency we didn't have
before? Do we care?
Thanks,
Antonio
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [igt-dev] [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
@ 2018-02-13 22:47 ` Antonio Argenziano
0 siblings, 0 replies; 13+ messages in thread
From: Antonio Argenziano @ 2018-02-13 22:47 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 13/02/18 01:26, Chris Wilson wrote:
> icl offers a much reduced context space, and in its simplest setup we
> cannot allocate one context per priority level, so trim the number and
> reuse the same context for multiple priority requests.
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
> 1 file changed, 23 insertions(+), 15 deletions(-)
>
> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
> index 05f7ae38..ec02d994 100644
> --- a/tests/gem_exec_schedule.c
> +++ b/tests/gem_exec_schedule.c
> @@ -43,6 +43,8 @@
> #define BUSY_QLEN 8
> #define MAX_ELSP_QLEN 16
>
> +#define MAX_CONTEXTS 256
> +
> IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
>
> static void store_dword(int fd, uint32_t ctx, unsigned ring,
> @@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
> static void deep(int fd, unsigned ring)
> {
> #define XS 8
> - const unsigned int nctx = MAX_PRIO - MIN_PRIO;
> - const unsigned size = ALIGN(4*nctx, 4096);
> + const unsigned int nreq = MAX_PRIO - MIN_PRIO;
> + const unsigned size = ALIGN(4*nreq, 4096);
> struct timespec tv = {};
> struct cork cork;
> uint32_t result, dep[XS];
> @@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
> uint32_t *ptr;
> uint32_t *ctx;
>
> - ctx = malloc(sizeof(*ctx) * nctx);
> - for (int n = 0; n < nctx; n++) {
> + ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
> + for (int n = 0; n < MAX_CONTEXTS; n++) {
> ctx[n] = gem_context_create(fd);
> - gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
> }
>
> result = gem_create(fd, size);
> @@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
> execbuf.buffers_ptr = to_user_pointer(obj);
> execbuf.buffer_count = XS + 2;
> execbuf.flags = ring;
> - for (int n = 0; n < nctx; n++) {
> + for (int n = 0; n < MAX_CONTEXTS; n++) {
> execbuf.rsvd1 = ctx[n];
> gem_execbuf(fd, &execbuf);
> }
> @@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
> plug(fd, &cork);
>
> /* Create a deep dependency chain, with a few branches */
> - for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
> + for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
> + uint32_t context = ctx[n % MAX_CONTEXTS];
Doesn't this introduce some intra-context dependency we didn't have
before? Do we care?
Thanks,
Antonio
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [igt-dev] [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
2018-02-13 22:47 ` Antonio Argenziano
@ 2018-02-13 23:10 ` Chris Wilson
-1 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2018-02-13 23:10 UTC (permalink / raw)
To: Antonio Argenziano, intel-gfx; +Cc: igt-dev
Quoting Antonio Argenziano (2018-02-13 22:47:35)
>
>
> On 13/02/18 01:26, Chris Wilson wrote:
> > icl offers a much reduced context space, and in its simplest setup we
> > cannot allocate one context per priority level, so trim the number and
> > reuse the same context for multiple priority requests.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> > tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
> > 1 file changed, 23 insertions(+), 15 deletions(-)
> >
> > diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
> > index 05f7ae38..ec02d994 100644
> > --- a/tests/gem_exec_schedule.c
> > +++ b/tests/gem_exec_schedule.c
> > @@ -43,6 +43,8 @@
> > #define BUSY_QLEN 8
> > #define MAX_ELSP_QLEN 16
> >
> > +#define MAX_CONTEXTS 256
> > +
> > IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
> >
> > static void store_dword(int fd, uint32_t ctx, unsigned ring,
> > @@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
> > static void deep(int fd, unsigned ring)
> > {
> > #define XS 8
> > - const unsigned int nctx = MAX_PRIO - MIN_PRIO;
> > - const unsigned size = ALIGN(4*nctx, 4096);
> > + const unsigned int nreq = MAX_PRIO - MIN_PRIO;
> > + const unsigned size = ALIGN(4*nreq, 4096);
> > struct timespec tv = {};
> > struct cork cork;
> > uint32_t result, dep[XS];
> > @@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
> > uint32_t *ptr;
> > uint32_t *ctx;
> >
> > - ctx = malloc(sizeof(*ctx) * nctx);
> > - for (int n = 0; n < nctx; n++) {
> > + ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
> > + for (int n = 0; n < MAX_CONTEXTS; n++) {
> > ctx[n] = gem_context_create(fd);
> > - gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
> > }
> >
> > result = gem_create(fd, size);
> > @@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
> > execbuf.buffers_ptr = to_user_pointer(obj);
> > execbuf.buffer_count = XS + 2;
> > execbuf.flags = ring;
> > - for (int n = 0; n < nctx; n++) {
> > + for (int n = 0; n < MAX_CONTEXTS; n++) {
> > execbuf.rsvd1 = ctx[n];
> > gem_execbuf(fd, &execbuf);
> > }
> > @@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
> > plug(fd, &cork);
> >
> > /* Create a deep dependency chain, with a few branches */
> > - for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
> > + for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
> > + uint32_t context = ctx[n % MAX_CONTEXTS];
>
> Doesn't this introduce some intra-context dependency we didn't have
> before? Do we care?
Yes, it reduces the re-ordering possibilities (it will also exercise
priority inversion within the context as well). We have to reduce the
test somehow, and this preserves the essence in that the kernel has to
manage the same long dependencies chains, just now with a few more
branches.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [igt-dev] [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
@ 2018-02-13 23:10 ` Chris Wilson
0 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2018-02-13 23:10 UTC (permalink / raw)
To: Antonio Argenziano, intel-gfx; +Cc: igt-dev
Quoting Antonio Argenziano (2018-02-13 22:47:35)
>
>
> On 13/02/18 01:26, Chris Wilson wrote:
> > icl offers a much reduced context space, and in its simplest setup we
> > cannot allocate one context per priority level, so trim the number and
> > reuse the same context for multiple priority requests.
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> > ---
> > tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
> > 1 file changed, 23 insertions(+), 15 deletions(-)
> >
> > diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
> > index 05f7ae38..ec02d994 100644
> > --- a/tests/gem_exec_schedule.c
> > +++ b/tests/gem_exec_schedule.c
> > @@ -43,6 +43,8 @@
> > #define BUSY_QLEN 8
> > #define MAX_ELSP_QLEN 16
> >
> > +#define MAX_CONTEXTS 256
> > +
> > IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
> >
> > static void store_dword(int fd, uint32_t ctx, unsigned ring,
> > @@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
> > static void deep(int fd, unsigned ring)
> > {
> > #define XS 8
> > - const unsigned int nctx = MAX_PRIO - MIN_PRIO;
> > - const unsigned size = ALIGN(4*nctx, 4096);
> > + const unsigned int nreq = MAX_PRIO - MIN_PRIO;
> > + const unsigned size = ALIGN(4*nreq, 4096);
> > struct timespec tv = {};
> > struct cork cork;
> > uint32_t result, dep[XS];
> > @@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
> > uint32_t *ptr;
> > uint32_t *ctx;
> >
> > - ctx = malloc(sizeof(*ctx) * nctx);
> > - for (int n = 0; n < nctx; n++) {
> > + ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
> > + for (int n = 0; n < MAX_CONTEXTS; n++) {
> > ctx[n] = gem_context_create(fd);
> > - gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
> > }
> >
> > result = gem_create(fd, size);
> > @@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
> > execbuf.buffers_ptr = to_user_pointer(obj);
> > execbuf.buffer_count = XS + 2;
> > execbuf.flags = ring;
> > - for (int n = 0; n < nctx; n++) {
> > + for (int n = 0; n < MAX_CONTEXTS; n++) {
> > execbuf.rsvd1 = ctx[n];
> > gem_execbuf(fd, &execbuf);
> > }
> > @@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
> > plug(fd, &cork);
> >
> > /* Create a deep dependency chain, with a few branches */
> > - for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
> > + for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
> > + uint32_t context = ctx[n % MAX_CONTEXTS];
>
> Doesn't this introduce some intra-context dependency we didn't have
> before? Do we care?
Yes, it reduces the re-ordering possibilities (it will also exercise
priority inversion within the context as well). We have to reduce the
test somehow, and this preserves the essence in that the kernel has to
manage the same long dependencies chains, just now with a few more
branches.
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [igt-dev] [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
2018-02-13 23:10 ` Chris Wilson
@ 2018-02-13 23:51 ` Antonio Argenziano
-1 siblings, 0 replies; 13+ messages in thread
From: Antonio Argenziano @ 2018-02-13 23:51 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 13/02/18 15:10, Chris Wilson wrote:
> Quoting Antonio Argenziano (2018-02-13 22:47:35)
>>
>>
>> On 13/02/18 01:26, Chris Wilson wrote:
>>> icl offers a much reduced context space, and in its simplest setup we
>>> cannot allocate one context per priority level, so trim the number and
>>> reuse the same context for multiple priority requests.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> ---
>>> tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
>>> 1 file changed, 23 insertions(+), 15 deletions(-)
>>>
>>> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
>>> index 05f7ae38..ec02d994 100644
>>> --- a/tests/gem_exec_schedule.c
>>> +++ b/tests/gem_exec_schedule.c
>>> @@ -43,6 +43,8 @@
>>> #define BUSY_QLEN 8
>>> #define MAX_ELSP_QLEN 16
>>>
>>> +#define MAX_CONTEXTS 256
>>> +
>>> IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
>>>
>>> static void store_dword(int fd, uint32_t ctx, unsigned ring,
>>> @@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
>>> static void deep(int fd, unsigned ring)
>>> {
>>> #define XS 8
>>> - const unsigned int nctx = MAX_PRIO - MIN_PRIO;
>>> - const unsigned size = ALIGN(4*nctx, 4096);
>>> + const unsigned int nreq = MAX_PRIO - MIN_PRIO;
>>> + const unsigned size = ALIGN(4*nreq, 4096);
>>> struct timespec tv = {};
>>> struct cork cork;
>>> uint32_t result, dep[XS];
>>> @@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
>>> uint32_t *ptr;
>>> uint32_t *ctx;
>>>
>>> - ctx = malloc(sizeof(*ctx) * nctx);
>>> - for (int n = 0; n < nctx; n++) {
>>> + ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
>>> + for (int n = 0; n < MAX_CONTEXTS; n++) {
>>> ctx[n] = gem_context_create(fd);
>>> - gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
>>> }
>>>
>>> result = gem_create(fd, size);
>>> @@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
>>> execbuf.buffers_ptr = to_user_pointer(obj);
>>> execbuf.buffer_count = XS + 2;
>>> execbuf.flags = ring;
>>> - for (int n = 0; n < nctx; n++) {
>>> + for (int n = 0; n < MAX_CONTEXTS; n++) {
>>> execbuf.rsvd1 = ctx[n];
>>> gem_execbuf(fd, &execbuf);
>>> }
>>> @@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
>>> plug(fd, &cork);
>>>
>>> /* Create a deep dependency chain, with a few branches */
>>> - for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
>>> + for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
>>> + uint32_t context = ctx[n % MAX_CONTEXTS];
>>
>> Doesn't this introduce some intra-context dependency we didn't have
>> before? Do we care?
>
> Yes, it reduces the re-ordering possibilities (it will also exercise
> priority inversion within the context as well). We have to reduce the
> test somehow, and this preserves the essence in that the kernel has to
> manage the same long dependencies chains, just now with a few more
> branches.
OK.
Acked-by: Antonio Argenziano <antonio.argenziano@intel.com>
Thanks,
Antonio
> -Chris
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Intel-gfx] [igt-dev] [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
@ 2018-02-13 23:51 ` Antonio Argenziano
0 siblings, 0 replies; 13+ messages in thread
From: Antonio Argenziano @ 2018-02-13 23:51 UTC (permalink / raw)
To: Chris Wilson, intel-gfx; +Cc: igt-dev
On 13/02/18 15:10, Chris Wilson wrote:
> Quoting Antonio Argenziano (2018-02-13 22:47:35)
>>
>>
>> On 13/02/18 01:26, Chris Wilson wrote:
>>> icl offers a much reduced context space, and in its simplest setup we
>>> cannot allocate one context per priority level, so trim the number and
>>> reuse the same context for multiple priority requests.
>>>
>>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>>> ---
>>> tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
>>> 1 file changed, 23 insertions(+), 15 deletions(-)
>>>
>>> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
>>> index 05f7ae38..ec02d994 100644
>>> --- a/tests/gem_exec_schedule.c
>>> +++ b/tests/gem_exec_schedule.c
>>> @@ -43,6 +43,8 @@
>>> #define BUSY_QLEN 8
>>> #define MAX_ELSP_QLEN 16
>>>
>>> +#define MAX_CONTEXTS 256
>>> +
>>> IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
>>>
>>> static void store_dword(int fd, uint32_t ctx, unsigned ring,
>>> @@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
>>> static void deep(int fd, unsigned ring)
>>> {
>>> #define XS 8
>>> - const unsigned int nctx = MAX_PRIO - MIN_PRIO;
>>> - const unsigned size = ALIGN(4*nctx, 4096);
>>> + const unsigned int nreq = MAX_PRIO - MIN_PRIO;
>>> + const unsigned size = ALIGN(4*nreq, 4096);
>>> struct timespec tv = {};
>>> struct cork cork;
>>> uint32_t result, dep[XS];
>>> @@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
>>> uint32_t *ptr;
>>> uint32_t *ctx;
>>>
>>> - ctx = malloc(sizeof(*ctx) * nctx);
>>> - for (int n = 0; n < nctx; n++) {
>>> + ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
>>> + for (int n = 0; n < MAX_CONTEXTS; n++) {
>>> ctx[n] = gem_context_create(fd);
>>> - gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
>>> }
>>>
>>> result = gem_create(fd, size);
>>> @@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
>>> execbuf.buffers_ptr = to_user_pointer(obj);
>>> execbuf.buffer_count = XS + 2;
>>> execbuf.flags = ring;
>>> - for (int n = 0; n < nctx; n++) {
>>> + for (int n = 0; n < MAX_CONTEXTS; n++) {
>>> execbuf.rsvd1 = ctx[n];
>>> gem_execbuf(fd, &execbuf);
>>> }
>>> @@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
>>> plug(fd, &cork);
>>>
>>> /* Create a deep dependency chain, with a few branches */
>>> - for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
>>> + for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
>>> + uint32_t context = ctx[n % MAX_CONTEXTS];
>>
>> Doesn't this introduce some intra-context dependency we didn't have
>> before? Do we care?
>
> Yes, it reduces the re-ordering possibilities (it will also exercise
> priority inversion within the context as well). We have to reduce the
> test somehow, and this preserves the essence in that the kernel has to
> manage the same long dependencies chains, just now with a few more
> branches.
OK.
Acked-by: Antonio Argenziano <antonio.argenziano@intel.com>
Thanks,
Antonio
> -Chris
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [igt-dev] [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
2018-02-13 23:51 ` [Intel-gfx] " Antonio Argenziano
@ 2018-02-14 9:00 ` Chris Wilson
-1 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2018-02-14 9:00 UTC (permalink / raw)
To: Antonio Argenziano, intel-gfx; +Cc: igt-dev
Quoting Antonio Argenziano (2018-02-13 23:51:00)
>
>
> On 13/02/18 15:10, Chris Wilson wrote:
> > Quoting Antonio Argenziano (2018-02-13 22:47:35)
> >>
> >>
> >> On 13/02/18 01:26, Chris Wilson wrote:
> >>> icl offers a much reduced context space, and in its simplest setup we
> >>> cannot allocate one context per priority level, so trim the number and
> >>> reuse the same context for multiple priority requests.
> >>>
> >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>> ---
> >>> tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
> >>> 1 file changed, 23 insertions(+), 15 deletions(-)
> >>>
> >>> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
> >>> index 05f7ae38..ec02d994 100644
> >>> --- a/tests/gem_exec_schedule.c
> >>> +++ b/tests/gem_exec_schedule.c
> >>> @@ -43,6 +43,8 @@
> >>> #define BUSY_QLEN 8
> >>> #define MAX_ELSP_QLEN 16
> >>>
> >>> +#define MAX_CONTEXTS 256
> >>> +
> >>> IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
> >>>
> >>> static void store_dword(int fd, uint32_t ctx, unsigned ring,
> >>> @@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
> >>> static void deep(int fd, unsigned ring)
> >>> {
> >>> #define XS 8
> >>> - const unsigned int nctx = MAX_PRIO - MIN_PRIO;
> >>> - const unsigned size = ALIGN(4*nctx, 4096);
> >>> + const unsigned int nreq = MAX_PRIO - MIN_PRIO;
> >>> + const unsigned size = ALIGN(4*nreq, 4096);
> >>> struct timespec tv = {};
> >>> struct cork cork;
> >>> uint32_t result, dep[XS];
> >>> @@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
> >>> uint32_t *ptr;
> >>> uint32_t *ctx;
> >>>
> >>> - ctx = malloc(sizeof(*ctx) * nctx);
> >>> - for (int n = 0; n < nctx; n++) {
> >>> + ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
> >>> + for (int n = 0; n < MAX_CONTEXTS; n++) {
> >>> ctx[n] = gem_context_create(fd);
> >>> - gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
> >>> }
> >>>
> >>> result = gem_create(fd, size);
> >>> @@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
> >>> execbuf.buffers_ptr = to_user_pointer(obj);
> >>> execbuf.buffer_count = XS + 2;
> >>> execbuf.flags = ring;
> >>> - for (int n = 0; n < nctx; n++) {
> >>> + for (int n = 0; n < MAX_CONTEXTS; n++) {
> >>> execbuf.rsvd1 = ctx[n];
> >>> gem_execbuf(fd, &execbuf);
> >>> }
> >>> @@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
> >>> plug(fd, &cork);
> >>>
> >>> /* Create a deep dependency chain, with a few branches */
> >>> - for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
> >>> + for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
> >>> + uint32_t context = ctx[n % MAX_CONTEXTS];
> >>
> >> Doesn't this introduce some intra-context dependency we didn't have
> >> before? Do we care?
> >
> > Yes, it reduces the re-ordering possibilities (it will also exercise
> > priority inversion within the context as well). We have to reduce the
> > test somehow, and this preserves the essence in that the kernel has to
> > manage the same long dependencies chains, just now with a few more
> > branches.
>
> OK.
>
> Acked-by: Antonio Argenziano <antonio.argenziano@intel.com>
Fwiw, we can bump MAX_CONTEXTS to 1024.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [Intel-gfx] [igt-dev] [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
@ 2018-02-14 9:00 ` Chris Wilson
0 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2018-02-14 9:00 UTC (permalink / raw)
To: Antonio Argenziano, intel-gfx; +Cc: igt-dev
Quoting Antonio Argenziano (2018-02-13 23:51:00)
>
>
> On 13/02/18 15:10, Chris Wilson wrote:
> > Quoting Antonio Argenziano (2018-02-13 22:47:35)
> >>
> >>
> >> On 13/02/18 01:26, Chris Wilson wrote:
> >>> icl offers a much reduced context space, and in its simplest setup we
> >>> cannot allocate one context per priority level, so trim the number and
> >>> reuse the same context for multiple priority requests.
> >>>
> >>> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >>> ---
> >>> tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
> >>> 1 file changed, 23 insertions(+), 15 deletions(-)
> >>>
> >>> diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
> >>> index 05f7ae38..ec02d994 100644
> >>> --- a/tests/gem_exec_schedule.c
> >>> +++ b/tests/gem_exec_schedule.c
> >>> @@ -43,6 +43,8 @@
> >>> #define BUSY_QLEN 8
> >>> #define MAX_ELSP_QLEN 16
> >>>
> >>> +#define MAX_CONTEXTS 256
> >>> +
> >>> IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
> >>>
> >>> static void store_dword(int fd, uint32_t ctx, unsigned ring,
> >>> @@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
> >>> static void deep(int fd, unsigned ring)
> >>> {
> >>> #define XS 8
> >>> - const unsigned int nctx = MAX_PRIO - MIN_PRIO;
> >>> - const unsigned size = ALIGN(4*nctx, 4096);
> >>> + const unsigned int nreq = MAX_PRIO - MIN_PRIO;
> >>> + const unsigned size = ALIGN(4*nreq, 4096);
> >>> struct timespec tv = {};
> >>> struct cork cork;
> >>> uint32_t result, dep[XS];
> >>> @@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
> >>> uint32_t *ptr;
> >>> uint32_t *ctx;
> >>>
> >>> - ctx = malloc(sizeof(*ctx) * nctx);
> >>> - for (int n = 0; n < nctx; n++) {
> >>> + ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
> >>> + for (int n = 0; n < MAX_CONTEXTS; n++) {
> >>> ctx[n] = gem_context_create(fd);
> >>> - gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
> >>> }
> >>>
> >>> result = gem_create(fd, size);
> >>> @@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
> >>> execbuf.buffers_ptr = to_user_pointer(obj);
> >>> execbuf.buffer_count = XS + 2;
> >>> execbuf.flags = ring;
> >>> - for (int n = 0; n < nctx; n++) {
> >>> + for (int n = 0; n < MAX_CONTEXTS; n++) {
> >>> execbuf.rsvd1 = ctx[n];
> >>> gem_execbuf(fd, &execbuf);
> >>> }
> >>> @@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
> >>> plug(fd, &cork);
> >>>
> >>> /* Create a deep dependency chain, with a few branches */
> >>> - for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
> >>> + for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
> >>> + uint32_t context = ctx[n % MAX_CONTEXTS];
> >>
> >> Doesn't this introduce some intra-context dependency we didn't have
> >> before? Do we care?
> >
> > Yes, it reduces the re-ordering possibilities (it will also exercise
> > priority inversion within the context as well). We have to reduce the
> > test somehow, and this preserves the essence in that the kernel has to
> > manage the same long dependencies chains, just now with a few more
> > branches.
>
> OK.
>
> Acked-by: Antonio Argenziano <antonio.argenziano@intel.com>
Fwiw, we can bump MAX_CONTEXTS to 1024.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used
@ 2018-02-14 9:58 Chris Wilson
0 siblings, 0 replies; 13+ messages in thread
From: Chris Wilson @ 2018-02-14 9:58 UTC (permalink / raw)
To: intel-gfx; +Cc: igt-dev
icl offers a much reduced context space, and in its simplest setup we
cannot allocate one context per priority level, so trim the number and
reuse the same context for multiple priority requests.
v2: Bump the MAX to 1024 (still lower than the ~4096 previously in use)
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Acked-by: Antonio Argenziano <antonio.argenziano@intel.com>
---
tests/gem_exec_schedule.c | 38 +++++++++++++++++++++++---------------
1 file changed, 23 insertions(+), 15 deletions(-)
diff --git a/tests/gem_exec_schedule.c b/tests/gem_exec_schedule.c
index 05f7ae38..8cdbd888 100644
--- a/tests/gem_exec_schedule.c
+++ b/tests/gem_exec_schedule.c
@@ -43,6 +43,8 @@
#define BUSY_QLEN 8
#define MAX_ELSP_QLEN 16
+#define MAX_CONTEXTS 1024
+
IGT_TEST_DESCRIPTION("Check that we can control the order of execution");
static void store_dword(int fd, uint32_t ctx, unsigned ring,
@@ -556,8 +558,8 @@ static void preemptive_hang(int fd, unsigned ring)
static void deep(int fd, unsigned ring)
{
#define XS 8
- const unsigned int nctx = MAX_PRIO - MIN_PRIO;
- const unsigned size = ALIGN(4*nctx, 4096);
+ const unsigned int nreq = MAX_PRIO - MIN_PRIO;
+ const unsigned size = ALIGN(4*nreq, 4096);
struct timespec tv = {};
struct cork cork;
uint32_t result, dep[XS];
@@ -565,10 +567,9 @@ static void deep(int fd, unsigned ring)
uint32_t *ptr;
uint32_t *ctx;
- ctx = malloc(sizeof(*ctx) * nctx);
- for (int n = 0; n < nctx; n++) {
+ ctx = malloc(sizeof(*ctx) * MAX_CONTEXTS);
+ for (int n = 0; n < MAX_CONTEXTS; n++) {
ctx[n] = gem_context_create(fd);
- gem_context_set_priority(fd, ctx[n], MAX_PRIO - nctx + n);
}
result = gem_create(fd, size);
@@ -592,7 +593,7 @@ static void deep(int fd, unsigned ring)
execbuf.buffers_ptr = to_user_pointer(obj);
execbuf.buffer_count = XS + 2;
execbuf.flags = ring;
- for (int n = 0; n < nctx; n++) {
+ for (int n = 0; n < MAX_CONTEXTS; n++) {
execbuf.rsvd1 = ctx[n];
gem_execbuf(fd, &execbuf);
}
@@ -603,22 +604,29 @@ static void deep(int fd, unsigned ring)
plug(fd, &cork);
/* Create a deep dependency chain, with a few branches */
- for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 8; n++)
+ for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 8; n++) {
+ uint32_t context = ctx[n % MAX_CONTEXTS];
+ gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
+
for (int m = 0; m < XS; m++)
- store_dword(fd, ctx[n], ring, dep[m], 4*n, ctx[n], cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, context, ring, dep[m], 4*n, context, cork.handle, I915_GEM_DOMAIN_INSTRUCTION);
+ }
+
+ for (int n = 0; n < nreq && igt_seconds_elapsed(&tv) < 6; n++) {
+ uint32_t context = ctx[n % MAX_CONTEXTS];
+ gem_context_set_priority(fd, context, MAX_PRIO - nreq + n);
- for (int n = 0; n < nctx && igt_seconds_elapsed(&tv) < 6; n++) {
for (int m = 0; m < XS; m++) {
- store_dword(fd, ctx[n], ring, result, 4*n, ctx[n], dep[m], 0);
- store_dword(fd, ctx[n], ring, result, 4*m, ctx[n], 0, I915_GEM_DOMAIN_INSTRUCTION);
+ store_dword(fd, context, ring, result, 4*n, context, dep[m], 0);
+ store_dword(fd, context, ring, result, 4*m, context, 0, I915_GEM_DOMAIN_INSTRUCTION);
}
- expected = ctx[n];
+ expected = context;
}
unplug_show_queue(fd, &cork, ring);
igt_require(expected); /* too slow */
- for (int n = 0; n < nctx; n++)
+ for (int n = 0; n < MAX_CONTEXTS; n++)
gem_context_destroy(fd, ctx[n]);
for (int m = 0; m < XS; m++) {
@@ -627,8 +635,8 @@ static void deep(int fd, unsigned ring)
I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
gem_close(fd, dep[m]);
- for (int n = 0; n < nctx; n++)
- igt_assert_eq_u32(ptr[n], ctx[n]);
+ for (int n = 0; n < nreq; n++)
+ igt_assert_eq_u32(ptr[n], ctx[n % MAX_CONTEXTS]);
munmap(ptr, size);
}
--
2.16.1
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 13+ messages in thread
end of thread, other threads:[~2018-02-14 9:58 UTC | newest]
Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-13 9:26 [PATCH igt] igt/gem_exec_schedule: Trim max number of contexts used Chris Wilson
2018-02-13 9:26 ` [Intel-gfx] " Chris Wilson
2018-02-13 10:01 ` [igt-dev] ✓ Fi.CI.BAT: success for " Patchwork
2018-02-13 12:59 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
2018-02-13 22:47 ` [igt-dev] [PATCH igt] " Antonio Argenziano
2018-02-13 22:47 ` Antonio Argenziano
2018-02-13 23:10 ` Chris Wilson
2018-02-13 23:10 ` Chris Wilson
2018-02-13 23:51 ` Antonio Argenziano
2018-02-13 23:51 ` [Intel-gfx] " Antonio Argenziano
2018-02-14 9:00 ` Chris Wilson
2018-02-14 9:00 ` [Intel-gfx] " Chris Wilson
2018-02-14 9:58 Chris Wilson
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.