* [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size
@ 2020-02-10 23:10 Chris Wilson
2020-02-11 0:10 ` [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915/selftests: Trim blitter block size (rev5) Patchwork
2020-02-12 10:49 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
0 siblings, 2 replies; 10+ messages in thread
From: Chris Wilson @ 2020-02-10 23:10 UTC (permalink / raw)
To: intel-gfx; +Cc: Matthew Auld
Reduce the amount of work we do to verify client blt correctness as
currently our 0.5s subtests takes about 15s on slower devices!
v2: Grow the maximum block size until we run out of time
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
---
.../i915/gem/selftests/i915_gem_object_blt.c | 72 ++++++++++++-------
1 file changed, 46 insertions(+), 26 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe46715..f29da4560dc0 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total, max;
int err;
ctx = thread->ctx;
@@ -225,24 +226,28 @@ static int igt_fill_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ max = ce->vm->total;
+ if (i915_is_ggtt(ce->vm) || thread->ctx)
+ max = div_u64(max, thread->n_cpus);
+ max >>= 4;
+
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- /*
- * If we have a tiny shared address space, like for the GGTT
- * then we can't be too greedy.
- */
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, max);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -276,13 +281,14 @@ static int igt_fill_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -293,6 +299,8 @@ static int igt_fill_blt_thread(void *arg)
i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
@@ -319,6 +327,7 @@ static int igt_copy_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total, max;
int err;
ctx = thread->ctx;
@@ -334,20 +343,28 @@ static int igt_copy_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ max = ce->vm->total;
+ if (i915_is_ggtt(ce->vm) || thread->ctx)
+ max = div_u64(max, thread->n_cpus);
+ max >>= 4;
+
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, max);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -397,13 +414,14 @@ static int igt_copy_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(dst);
- err = i915_gem_object_set_to_cpu_domain(dst, false);
- i915_gem_object_unlock(dst);
+ err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -416,6 +434,8 @@ static int igt_copy_blt_thread(void *arg)
i915_gem_object_put(src);
i915_gem_object_put(dst);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
--
2.25.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915/selftests: Trim blitter block size (rev5)
2020-02-10 23:10 [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size Chris Wilson
@ 2020-02-11 0:10 ` Patchwork
2020-02-12 10:49 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
1 sibling, 0 replies; 10+ messages in thread
From: Patchwork @ 2020-02-11 0:10 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/i915/selftests: Trim blitter block size (rev5)
URL : https://patchwork.freedesktop.org/series/73066/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_7903 -> Patchwork_16512
====================================================
Summary
-------
**SUCCESS**
No regressions found.
External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/index.html
Known issues
------------
Here are the changes found in Patchwork_16512 that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@kms_chamelium@hdmi-hpd-fast:
- fi-kbl-7500u: [PASS][1] -> [FAIL][2] ([fdo#111407])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/fi-kbl-7500u/igt@kms_chamelium@hdmi-hpd-fast.html
#### Possible fixes ####
* igt@i915_selftest@live_blt:
- fi-bsw-n3050: [INCOMPLETE][3] ([i915#392]) -> [PASS][4]
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/fi-bsw-n3050/igt@i915_selftest@live_blt.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/fi-bsw-n3050/igt@i915_selftest@live_blt.html
- fi-ivb-3770: [DMESG-FAIL][5] ([i915#725]) -> [PASS][6]
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/fi-ivb-3770/igt@i915_selftest@live_blt.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/fi-ivb-3770/igt@i915_selftest@live_blt.html
- fi-hsw-4770: [DMESG-FAIL][7] ([i915#553] / [i915#725]) -> [PASS][8]
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/fi-hsw-4770/igt@i915_selftest@live_blt.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/fi-hsw-4770/igt@i915_selftest@live_blt.html
* igt@i915_selftest@live_gem_contexts:
- fi-cfl-8700k: [DMESG-FAIL][9] ([i915#623]) -> [PASS][10]
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/fi-cfl-8700k/igt@i915_selftest@live_gem_contexts.html
- fi-byt-n2820: [DMESG-FAIL][11] ([i915#1052]) -> [PASS][12]
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/fi-byt-n2820/igt@i915_selftest@live_gem_contexts.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/fi-byt-n2820/igt@i915_selftest@live_gem_contexts.html
- fi-cfl-guc: [INCOMPLETE][13] ([CI#80] / [fdo#106070] / [i915#424]) -> [PASS][14]
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/fi-cfl-guc/igt@i915_selftest@live_gem_contexts.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/fi-cfl-guc/igt@i915_selftest@live_gem_contexts.html
- fi-cml-s: [DMESG-FAIL][15] ([i915#877]) -> [PASS][16]
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/fi-cml-s/igt@i915_selftest@live_gem_contexts.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/fi-cml-s/igt@i915_selftest@live_gem_contexts.html
#### Warnings ####
* igt@gem_exec_parallel@fds:
- fi-byt-n2820: [FAIL][17] ([i915#694]) -> [TIMEOUT][18] ([fdo#112271])
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/fi-byt-n2820/igt@gem_exec_parallel@fds.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/fi-byt-n2820/igt@gem_exec_parallel@fds.html
[CI#80]: https://gitlab.freedesktop.org/gfx-ci/i915-infra/issues/80
[fdo#106070]: https://bugs.freedesktop.org/show_bug.cgi?id=106070
[fdo#111407]: https://bugs.freedesktop.org/show_bug.cgi?id=111407
[fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271
[i915#1052]: https://gitlab.freedesktop.org/drm/intel/issues/1052
[i915#392]: https://gitlab.freedesktop.org/drm/intel/issues/392
[i915#424]: https://gitlab.freedesktop.org/drm/intel/issues/424
[i915#553]: https://gitlab.freedesktop.org/drm/intel/issues/553
[i915#623]: https://gitlab.freedesktop.org/drm/intel/issues/623
[i915#694]: https://gitlab.freedesktop.org/drm/intel/issues/694
[i915#725]: https://gitlab.freedesktop.org/drm/intel/issues/725
[i915#877]: https://gitlab.freedesktop.org/drm/intel/issues/877
Participating hosts (47 -> 43)
------------------------------
Additional (4): fi-hsw-peppy fi-skl-lmem fi-gdg-551 fi-snb-2600
Missing (8): fi-ilk-m540 fi-bdw-5557u fi-hsw-4200u fi-byt-squawks fi-kbl-guc fi-byt-clapper fi-bsw-nick fi-bdw-samus
Build changes
-------------
* CI: CI-20190529 -> None
* Linux: CI_DRM_7903 -> Patchwork_16512
CI-20190529: 20190529
CI_DRM_7903: 47b768c475f4a11a48bc43e6228660f8b26a542b @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_5433: 6a96c17f3a1b4e1f90b1a0b0ce42a7219875d1a4 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
Patchwork_16512: 1b0c23fd95fa68e8be067333049dd94c47346579 @ git://anongit.freedesktop.org/gfx-ci/linux
== Linux commits ==
1b0c23fd95fa drm/i915/selftests: Trim blitter block size
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* [Intel-gfx] ✓ Fi.CI.IGT: success for drm/i915/selftests: Trim blitter block size (rev5)
2020-02-10 23:10 [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size Chris Wilson
2020-02-11 0:10 ` [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915/selftests: Trim blitter block size (rev5) Patchwork
@ 2020-02-12 10:49 ` Patchwork
1 sibling, 0 replies; 10+ messages in thread
From: Patchwork @ 2020-02-12 10:49 UTC (permalink / raw)
To: Chris Wilson; +Cc: intel-gfx
== Series Details ==
Series: drm/i915/selftests: Trim blitter block size (rev5)
URL : https://patchwork.freedesktop.org/series/73066/
State : success
== Summary ==
CI Bug Log - changes from CI_DRM_7903_full -> Patchwork_16512_full
====================================================
Summary
-------
**SUCCESS**
No regressions found.
Known issues
------------
Here are the changes found in Patchwork_16512_full that come from known issues:
### IGT changes ###
#### Issues hit ####
* igt@gem_blits@basic:
- shard-skl: [PASS][1] -> [DMESG-WARN][2] ([i915#836])
[1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-skl3/igt@gem_blits@basic.html
[2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-skl8/igt@gem_blits@basic.html
* igt@gem_ctx_shared@exec-shared-gtt-blt:
- shard-tglb: [PASS][3] -> [FAIL][4] ([i915#616])
[3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-tglb3/igt@gem_ctx_shared@exec-shared-gtt-blt.html
[4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-tglb1/igt@gem_ctx_shared@exec-shared-gtt-blt.html
* igt@gem_ctx_shared@exec-single-timeline-bsd:
- shard-iclb: [PASS][5] -> [SKIP][6] ([fdo#110841])
[5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb7/igt@gem_ctx_shared@exec-single-timeline-bsd.html
[6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb2/igt@gem_ctx_shared@exec-single-timeline-bsd.html
* igt@gem_exec_parallel@vcs1-fds:
- shard-iclb: [PASS][7] -> [SKIP][8] ([fdo#112080]) +8 similar issues
[7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb2/igt@gem_exec_parallel@vcs1-fds.html
[8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb5/igt@gem_exec_parallel@vcs1-fds.html
* igt@gem_exec_schedule@in-order-bsd:
- shard-iclb: [PASS][9] -> [SKIP][10] ([fdo#112146]) +2 similar issues
[9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb8/igt@gem_exec_schedule@in-order-bsd.html
[10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb2/igt@gem_exec_schedule@in-order-bsd.html
* igt@gem_exec_schedule@preempt-queue-bsd1:
- shard-iclb: [PASS][11] -> [SKIP][12] ([fdo#109276]) +12 similar issues
[11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb1/igt@gem_exec_schedule@preempt-queue-bsd1.html
[12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb7/igt@gem_exec_schedule@preempt-queue-bsd1.html
* igt@gem_partial_pwrite_pread@write-uncached:
- shard-hsw: [PASS][13] -> [FAIL][14] ([i915#694]) +2 similar issues
[13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-hsw7/igt@gem_partial_pwrite_pread@write-uncached.html
[14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-hsw8/igt@gem_partial_pwrite_pread@write-uncached.html
* igt@i915_selftest@live_gtt:
- shard-kbl: [PASS][15] -> [TIMEOUT][16] ([fdo#112271])
[15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-kbl4/igt@i915_selftest@live_gtt.html
[16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-kbl2/igt@i915_selftest@live_gtt.html
* igt@kms_cursor_crc@pipe-a-cursor-suspend:
- shard-kbl: [PASS][17] -> [DMESG-WARN][18] ([i915#180]) +3 similar issues
[17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-kbl4/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
[18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-kbl2/igt@kms_cursor_crc@pipe-a-cursor-suspend.html
* igt@kms_flip@flip-vs-suspend-interruptible:
- shard-apl: [PASS][19] -> [DMESG-WARN][20] ([i915#180]) +5 similar issues
[19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-apl2/igt@kms_flip@flip-vs-suspend-interruptible.html
[20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-apl8/igt@kms_flip@flip-vs-suspend-interruptible.html
* igt@kms_plane_alpha_blend@pipe-a-coverage-7efc:
- shard-skl: [PASS][21] -> [FAIL][22] ([fdo#108145])
[21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-skl8/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
[22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-skl9/igt@kms_plane_alpha_blend@pipe-a-coverage-7efc.html
* igt@kms_psr2_su@frontbuffer:
- shard-iclb: [PASS][23] -> [SKIP][24] ([fdo#109642] / [fdo#111068])
[23]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb2/igt@kms_psr2_su@frontbuffer.html
[24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb8/igt@kms_psr2_su@frontbuffer.html
* igt@kms_psr@psr2_sprite_mmap_cpu:
- shard-iclb: [PASS][25] -> [SKIP][26] ([fdo#109441])
[25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb2/igt@kms_psr@psr2_sprite_mmap_cpu.html
[26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb5/igt@kms_psr@psr2_sprite_mmap_cpu.html
* igt@kms_setmode@basic:
- shard-kbl: [PASS][27] -> [FAIL][28] ([i915#31])
[27]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-kbl6/igt@kms_setmode@basic.html
[28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-kbl6/igt@kms_setmode@basic.html
* igt@prime_mmap_coherency@ioctl-errors:
- shard-hsw: [PASS][29] -> [FAIL][30] ([i915#831])
[29]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-hsw8/igt@prime_mmap_coherency@ioctl-errors.html
[30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-hsw6/igt@prime_mmap_coherency@ioctl-errors.html
#### Possible fixes ####
* igt@gem_ctx_isolation@rcs0-s3:
- shard-apl: [DMESG-WARN][31] ([i915#180]) -> [PASS][32] +2 similar issues
[31]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-apl1/igt@gem_ctx_isolation@rcs0-s3.html
[32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-apl2/igt@gem_ctx_isolation@rcs0-s3.html
* igt@gem_ctx_isolation@vcs0-s3:
- shard-skl: [INCOMPLETE][33] ([i915#69]) -> [PASS][34] +1 similar issue
[33]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-skl3/igt@gem_ctx_isolation@vcs0-s3.html
[34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-skl6/igt@gem_ctx_isolation@vcs0-s3.html
* igt@gem_exec_schedule@preemptive-hang-bsd:
- shard-iclb: [SKIP][35] ([fdo#112146]) -> [PASS][36] +10 similar issues
[35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb1/igt@gem_exec_schedule@preemptive-hang-bsd.html
[36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb7/igt@gem_exec_schedule@preemptive-hang-bsd.html
* igt@gem_partial_pwrite_pread@writes-after-reads-uncached:
- shard-hsw: [FAIL][37] ([i915#694]) -> [PASS][38] +2 similar issues
[37]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-hsw2/igt@gem_partial_pwrite_pread@writes-after-reads-uncached.html
[38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-hsw2/igt@gem_partial_pwrite_pread@writes-after-reads-uncached.html
* igt@gem_softpin@noreloc-s3:
- shard-kbl: [DMESG-WARN][39] ([i915#180]) -> [PASS][40]
[39]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-kbl7/igt@gem_softpin@noreloc-s3.html
[40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-kbl6/igt@gem_softpin@noreloc-s3.html
* igt@gen9_exec_parse@allowed-all:
- shard-glk: [DMESG-WARN][41] ([i915#716]) -> [PASS][42]
[41]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-glk7/igt@gen9_exec_parse@allowed-all.html
[42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-glk3/igt@gen9_exec_parse@allowed-all.html
* igt@i915_pm_rps@waitboost:
- shard-iclb: [FAIL][43] ([i915#413]) -> [PASS][44]
[43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb5/igt@i915_pm_rps@waitboost.html
[44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb6/igt@i915_pm_rps@waitboost.html
* igt@kms_busy@basic-flip-pipe-a:
- shard-snb: [SKIP][45] ([fdo#109271]) -> [PASS][46] +3 similar issues
[45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-snb4/igt@kms_busy@basic-flip-pipe-a.html
[46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-snb4/igt@kms_busy@basic-flip-pipe-a.html
* igt@kms_cursor_crc@pipe-b-cursor-suspend:
- shard-skl: [INCOMPLETE][47] ([i915#300]) -> [PASS][48]
[47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-skl1/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
[48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-skl8/igt@kms_cursor_crc@pipe-b-cursor-suspend.html
* igt@kms_cursor_crc@pipe-c-cursor-128x128-random:
- shard-skl: [FAIL][49] ([i915#54]) -> [PASS][50]
[49]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-skl10/igt@kms_cursor_crc@pipe-c-cursor-128x128-random.html
[50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-skl7/igt@kms_cursor_crc@pipe-c-cursor-128x128-random.html
* igt@kms_flip@2x-flip-vs-expired-vblank-interruptible:
- shard-glk: [FAIL][51] ([i915#79]) -> [PASS][52]
[51]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-glk6/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible.html
[52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-glk5/igt@kms_flip@2x-flip-vs-expired-vblank-interruptible.html
* igt@kms_flip@flip-vs-expired-vblank-interruptible:
- shard-skl: [FAIL][53] ([i915#79]) -> [PASS][54]
[53]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-skl8/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
[54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-skl9/igt@kms_flip@flip-vs-expired-vblank-interruptible.html
* igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min:
- shard-skl: [FAIL][55] ([fdo#108145]) -> [PASS][56]
[55]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-skl10/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html
[56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-skl7/igt@kms_plane_alpha_blend@pipe-c-constant-alpha-min.html
* igt@kms_plane_lowres@pipe-a-tiling-y:
- shard-glk: [FAIL][57] ([i915#899]) -> [PASS][58]
[57]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-glk3/igt@kms_plane_lowres@pipe-a-tiling-y.html
[58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-glk1/igt@kms_plane_lowres@pipe-a-tiling-y.html
* igt@kms_plane_multiple@atomic-pipe-b-tiling-yf:
- shard-skl: [DMESG-WARN][59] ([IGT#6]) -> [PASS][60]
[59]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-skl3/igt@kms_plane_multiple@atomic-pipe-b-tiling-yf.html
[60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-skl6/igt@kms_plane_multiple@atomic-pipe-b-tiling-yf.html
* igt@kms_psr@psr2_sprite_plane_move:
- shard-iclb: [SKIP][61] ([fdo#109441]) -> [PASS][62] +3 similar issues
[61]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb8/igt@kms_psr@psr2_sprite_plane_move.html
[62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb2/igt@kms_psr@psr2_sprite_plane_move.html
* igt@perf_pmu@busy-no-semaphores-vcs1:
- shard-iclb: [SKIP][63] ([fdo#112080]) -> [PASS][64] +16 similar issues
[63]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb3/igt@perf_pmu@busy-no-semaphores-vcs1.html
[64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb2/igt@perf_pmu@busy-no-semaphores-vcs1.html
* igt@prime_busy@hang-bsd2:
- shard-iclb: [SKIP][65] ([fdo#109276]) -> [PASS][66] +25 similar issues
[65]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb7/igt@prime_busy@hang-bsd2.html
[66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb2/igt@prime_busy@hang-bsd2.html
#### Warnings ####
* igt@gem_ctx_isolation@vcs1-nonpriv-switch:
- shard-iclb: [FAIL][67] ([IGT#28]) -> [SKIP][68] ([fdo#112080])
[67]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-iclb2/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html
[68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-iclb8/igt@gem_ctx_isolation@vcs1-nonpriv-switch.html
* igt@i915_pm_rpm@gem-idle:
- shard-snb: [INCOMPLETE][69] ([i915#82]) -> [SKIP][70] ([fdo#109271])
[69]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_7903/shard-snb5/igt@i915_pm_rpm@gem-idle.html
[70]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/shard-snb5/igt@i915_pm_rpm@gem-idle.html
{name}: This element is suppressed. This means it is ignored when computing
the status of the difference (SUCCESS, WARNING, or FAILURE).
[IGT#28]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/28
[IGT#6]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/6
[fdo#108145]: https://bugs.freedesktop.org/show_bug.cgi?id=108145
[fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
[fdo#109276]: https://bugs.freedesktop.org/show_bug.cgi?id=109276
[fdo#109441]: https://bugs.freedesktop.org/show_bug.cgi?id=109441
[fdo#109642]: https://bugs.freedesktop.org/show_bug.cgi?id=109642
[fdo#110841]: https://bugs.freedesktop.org/show_bug.cgi?id=110841
[fdo#111068]: https://bugs.freedesktop.org/show_bug.cgi?id=111068
[fdo#112080]: https://bugs.freedesktop.org/show_bug.cgi?id=112080
[fdo#112146]: https://bugs.freedesktop.org/show_bug.cgi?id=112146
[fdo#112271]: https://bugs.freedesktop.org/show_bug.cgi?id=112271
[i915#1188]: https://gitlab.freedesktop.org/drm/intel/issues/1188
[i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
[i915#300]: https://gitlab.freedesktop.org/drm/intel/issues/300
[i915#31]: https://gitlab.freedesktop.org/drm/intel/issues/31
[i915#413]: https://gitlab.freedesktop.org/drm/intel/issues/413
[i915#54]: https://gitlab.freedesktop.org/drm/intel/issues/54
[i915#616]: https://gitlab.freedesktop.org/drm/intel/issues/616
[i915#69]: https://gitlab.freedesktop.org/drm/intel/issues/69
[i915#694]: https://gitlab.freedesktop.org/drm/intel/issues/694
[i915#716]: https://gitlab.freedesktop.org/drm/intel/issues/716
[i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
[i915#82]: https://gitlab.freedesktop.org/drm/intel/issues/82
[i915#831]: https://gitlab.freedesktop.org/drm/intel/issues/831
[i915#836]: https://gitlab.freedesktop.org/drm/intel/issues/836
[i915#899]: https://gitlab.freedesktop.org/drm/intel/issues/899
Participating hosts (10 -> 10)
------------------------------
No changes in participating hosts
Build changes
-------------
* CI: CI-20190529 -> None
* Linux: CI_DRM_7903 -> Patchwork_16512
CI-20190529: 20190529
CI_DRM_7903: 47b768c475f4a11a48bc43e6228660f8b26a542b @ git://anongit.freedesktop.org/gfx-ci/linux
IGT_5433: 6a96c17f3a1b4e1f90b1a0b0ce42a7219875d1a4 @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools
Patchwork_16512: 1b0c23fd95fa68e8be067333049dd94c47346579 @ git://anongit.freedesktop.org/gfx-ci/linux
piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit
== Logs ==
For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_16512/index.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size
2020-02-10 12:52 ` Chris Wilson
@ 2020-02-10 23:02 ` Chris Wilson
0 siblings, 0 replies; 10+ messages in thread
From: Chris Wilson @ 2020-02-10 23:02 UTC (permalink / raw)
To: Matthew Auld; +Cc: Intel Graphics Development
Quoting Chris Wilson (2020-02-10 12:52:26)
> Quoting Matthew Auld (2020-02-10 12:47:35)
> > On Thu, 6 Feb 2020 at 14:10, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> > >
> > > Reduce the amount of work we do to verify client blt correctness as
> > > currently our 0.5s subtests takes about 15s on slower devices!
> > >
> > > v2: Grow the maximum block size until we run out of time
> > >
> > > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
> >
> > Is that bsw-kefka issue(coherency?) something new?
>
> It's something to worry about.
Oh, it's just the vlv/chv clflush bug. Nothing to see here...
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size
2020-02-10 12:47 ` Matthew Auld
@ 2020-02-10 12:52 ` Chris Wilson
2020-02-10 23:02 ` Chris Wilson
0 siblings, 1 reply; 10+ messages in thread
From: Chris Wilson @ 2020-02-10 12:52 UTC (permalink / raw)
To: Matthew Auld; +Cc: Intel Graphics Development
Quoting Matthew Auld (2020-02-10 12:47:35)
> On Thu, 6 Feb 2020 at 14:10, Chris Wilson <chris@chris-wilson.co.uk> wrote:
> >
> > Reduce the amount of work we do to verify client blt correctness as
> > currently our 0.5s subtests takes about 15s on slower devices!
> >
> > v2: Grow the maximum block size until we run out of time
> >
> > Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
>
> Is that bsw-kefka issue(coherency?) something new?
It's something to worry about.
This version is much harsher on the shared-gtt as it doesn't try to
relax our requests to allow parallelism between threads. We can reduce
the per-thread sizes again to sweep the problem under the carpet for a
bit longer.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size
2020-02-06 14:09 [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size Chris Wilson
@ 2020-02-10 12:47 ` Matthew Auld
2020-02-10 12:52 ` Chris Wilson
0 siblings, 1 reply; 10+ messages in thread
From: Matthew Auld @ 2020-02-10 12:47 UTC (permalink / raw)
To: Chris Wilson; +Cc: Intel Graphics Development
On Thu, 6 Feb 2020 at 14:10, Chris Wilson <chris@chris-wilson.co.uk> wrote:
>
> Reduce the amount of work we do to verify client blt correctness as
> currently our 0.5s subtests takes about 15s on slower devices!
>
> v2: Grow the maximum block size until we run out of time
>
> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Is that bsw-kefka issue(coherency?) something new?
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply [flat|nested] 10+ messages in thread
* [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size
@ 2020-02-06 14:09 Chris Wilson
2020-02-10 12:47 ` Matthew Auld
0 siblings, 1 reply; 10+ messages in thread
From: Chris Wilson @ 2020-02-06 14:09 UTC (permalink / raw)
To: intel-gfx
Reduce the amount of work we do to verify client blt correctness as
currently our 0.5s subtests takes about 15s on slower devices!
v2: Grow the maximum block size until we run out of time
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
.../i915/gem/selftests/i915_gem_object_blt.c | 54 +++++++++++--------
1 file changed, 32 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe46715..b98705821b84 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total;
int err;
ctx = thread->ctx;
@@ -225,10 +226,11 @@ static int igt_fill_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
@@ -238,11 +240,9 @@ static int igt_fill_blt_thread(void *arg)
* If we have a tiny shared address space, like for the GGTT
* then we can't be too greedy.
*/
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, ce->vm->total / 2);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -276,13 +276,16 @@ static int igt_fill_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -293,6 +296,8 @@ static int igt_fill_blt_thread(void *arg)
i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
@@ -319,6 +324,7 @@ static int igt_copy_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total;
int err;
ctx = thread->ctx;
@@ -334,20 +340,19 @@ static int igt_copy_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, ce->vm->total / 2);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -397,13 +402,16 @@ static int igt_copy_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(dst);
- err = i915_gem_object_set_to_cpu_domain(dst, false);
- i915_gem_object_unlock(dst);
+ err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -416,6 +424,8 @@ static int igt_copy_blt_thread(void *arg)
i915_gem_object_put(src);
i915_gem_object_put(dst);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
--
2.25.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size
2020-02-06 0:33 Chris Wilson
@ 2020-02-06 12:52 ` Chris Wilson
0 siblings, 0 replies; 10+ messages in thread
From: Chris Wilson @ 2020-02-06 12:52 UTC (permalink / raw)
To: intel-gfx
Reduce the amount of work we do to verify client blt correctness as
currently our 0.5s subtests takes about 15s on slower devices!
v2: Grow the maximum block size until we run out of time
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
.../i915/gem/selftests/i915_gem_object_blt.c | 54 +++++++++++--------
1 file changed, 32 insertions(+), 22 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe46715..b98705821b84 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total;
int err;
ctx = thread->ctx;
@@ -225,10 +226,11 @@ static int igt_fill_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
@@ -238,11 +240,9 @@ static int igt_fill_blt_thread(void *arg)
* If we have a tiny shared address space, like for the GGTT
* then we can't be too greedy.
*/
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, ce->vm->total / 2);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -276,13 +276,16 @@ static int igt_fill_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -293,6 +296,8 @@ static int igt_fill_blt_thread(void *arg)
i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
@@ -319,6 +324,7 @@ static int igt_copy_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total;
int err;
ctx = thread->ctx;
@@ -334,20 +340,19 @@ static int igt_copy_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, ce->vm->total / 2);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -397,13 +402,16 @@ static int igt_copy_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(dst);
- err = i915_gem_object_set_to_cpu_domain(dst, false);
- i915_gem_object_unlock(dst);
+ err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -416,6 +424,8 @@ static int igt_copy_blt_thread(void *arg)
i915_gem_object_put(src);
i915_gem_object_put(dst);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
--
2.25.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size
2020-02-06 3:33 [Intel-gfx] ✗ Fi.CI.BAT: failure for " Patchwork
@ 2020-02-06 9:20 ` Chris Wilson
0 siblings, 0 replies; 10+ messages in thread
From: Chris Wilson @ 2020-02-06 9:20 UTC (permalink / raw)
To: intel-gfx
Reduce the amount of work we do to verify client blt correctness as
currently our 0.5s subtests takes about 15s on slower devices!
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
.../i915/gem/selftests/i915_gem_object_blt.c | 40 ++++++++++++-------
1 file changed, 26 insertions(+), 14 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe46715..674f4298f9a6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -226,7 +226,9 @@ static int igt_fill_blt_thread(void *arg)
GEM_BUG_ON(IS_ERR(ce));
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_block_size = SZ_64M; /* max S16_MAX * PAGE_SIZE */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
u64 total = ce->vm->total;
u32 phys_sz;
@@ -240,9 +242,10 @@ static int igt_fill_blt_thread(void *arg)
*/
if (i915_is_ggtt(ce->vm))
total = div64_u64(total, thread->n_cpus);
+ total = min_t(u64, total, max_block_size);
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ sz = i915_prandom_u32_max_state(total, prng);
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -276,13 +279,16 @@ static int igt_fill_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, I915_WAIT_ALL, HZ / 2);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -335,7 +341,9 @@ static int igt_copy_blt_thread(void *arg)
GEM_BUG_ON(IS_ERR(ce));
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_block_size = SZ_64M; /* max S16_MAX * PAGE_SIZE */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
u64 total = ce->vm->total;
u32 phys_sz;
@@ -345,9 +353,10 @@ static int igt_copy_blt_thread(void *arg)
if (i915_is_ggtt(ce->vm))
total = div64_u64(total, thread->n_cpus);
+ total = min_t(u64, total, max_block_size);
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ sz = i915_prandom_u32_max_state(total, prng);
+ phys_sz = sz % max_phys_size;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
@@ -397,13 +406,16 @@ static int igt_copy_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(dst);
- err = i915_gem_object_set_to_cpu_domain(dst, false);
- i915_gem_object_unlock(dst);
+ err = i915_gem_object_wait(dst, I915_WAIT_ALL, HZ / 2);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
--
2.25.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size
@ 2020-02-06 0:33 Chris Wilson
2020-02-06 12:52 ` Chris Wilson
0 siblings, 1 reply; 10+ messages in thread
From: Chris Wilson @ 2020-02-06 0:33 UTC (permalink / raw)
To: intel-gfx
Reduce the amount of work we do to verify client blt correctness as
currently our 0.5s subtests takes about 15s on slower devices!
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
.../i915/gem/selftests/i915_gem_object_blt.c | 24 ++++++++++++-------
1 file changed, 15 insertions(+), 9 deletions(-)
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe46715..cebbe3c3ca86 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -226,7 +226,7 @@ static int igt_fill_blt_thread(void *arg)
GEM_BUG_ON(IS_ERR(ce));
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ const u32 max_block_size = SZ_64M; /* max S16_MAX * PAGE_SIZE */
u32 val = prandom_u32_state(prng);
u64 total = ce->vm->total;
u32 phys_sz;
@@ -276,13 +276,16 @@ static int igt_fill_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, I915_WAIT_ALL, HZ / 2);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -335,7 +338,7 @@ static int igt_copy_blt_thread(void *arg)
GEM_BUG_ON(IS_ERR(ce));
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ const u32 max_block_size = SZ_64M; /* max S16_MAX * PAGE_SIZE */
u32 val = prandom_u32_state(prng);
u64 total = ce->vm->total;
u32 phys_sz;
@@ -397,13 +400,16 @@ static int igt_copy_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(dst);
- err = i915_gem_object_set_to_cpu_domain(dst, false);
- i915_gem_object_unlock(dst);
+ err = i915_gem_object_wait(dst, I915_WAIT_ALL, HZ / 2);
if (err)
goto err_unpin;
for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) {
+ clflush(&vaddr[i]);
+ mb();
+ }
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
--
2.25.0
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx
^ permalink raw reply related [flat|nested] 10+ messages in thread
end of thread, other threads:[~2020-02-12 10:49 UTC | newest]
Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-02-10 23:10 [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size Chris Wilson
2020-02-11 0:10 ` [Intel-gfx] ✓ Fi.CI.BAT: success for drm/i915/selftests: Trim blitter block size (rev5) Patchwork
2020-02-12 10:49 ` [Intel-gfx] ✓ Fi.CI.IGT: " Patchwork
-- strict thread matches above, loose matches on Subject: below --
2020-02-06 14:09 [Intel-gfx] [PATCH] drm/i915/selftests: Trim blitter block size Chris Wilson
2020-02-10 12:47 ` Matthew Auld
2020-02-10 12:52 ` Chris Wilson
2020-02-10 23:02 ` Chris Wilson
2020-02-06 3:33 [Intel-gfx] ✗ Fi.CI.BAT: failure for " Patchwork
2020-02-06 9:20 ` [Intel-gfx] [PATCH] " Chris Wilson
2020-02-06 0:33 Chris Wilson
2020-02-06 12:52 ` Chris Wilson
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).