All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge
@ 2017-11-23 13:22 Matthew Auld
  2017-11-23 13:22 ` [PATCH 2/2] drm/i915/selftests: test descending addresses Matthew Auld
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Matthew Auld @ 2017-11-23 13:22 UTC (permalink / raw)
  To: intel-gfx

Rather than repeat the test for each engine, which takes a long time,
let's try alternating between the engines in some randomized
order.

v2: fix gen2 blunder
    fix !order blunder
    more cunning permutation construction!

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/huge_pages.c | 111 +++++++++++++++++-----------
 1 file changed, 66 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index db7a0a1f2960..83b3a27370a4 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -27,6 +27,7 @@
 #include <linux/prime_numbers.h>
 
 #include "mock_drm.h"
+#include "i915_random.h"
 
 static const unsigned int page_sizes[] = {
 	I915_GTT_PAGE_SIZE_2M,
@@ -1044,7 +1045,10 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	static struct intel_engine_cs *engines[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
 	struct i915_vma *vma;
 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
 	unsigned int max_page_size;
@@ -1052,6 +1056,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	u64 max;
 	u64 num;
 	u64 size;
+	int *order;
+	int i, n;
 	int err = 0;
 
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -1067,67 +1073,81 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
+	n = 0;
 	for_each_engine(engine, i915, id) {
-		IGT_TIMEOUT(end_time);
-
 		if (!intel_engine_can_store_dword(engine)) {
-			pr_info("store-dword-imm not supported on engine=%u\n",
-				id);
+			pr_info("store-dword-imm not supported on engine=%u\n", id);
 			continue;
 		}
+		engines[n++] = engine;
+	}
 
-		/*
-		 * Try various offsets until we timeout -- we want to avoid
-		 * issues hidden by effectively always using offset = 0.
-		 */
-		for_each_prime_number_from(num, 0, max) {
-			u64 offset = num * max_page_size;
-			u32 dword;
+	if (!n)
+		return 0;
 
-			err = i915_vma_unbind(vma);
-			if (err)
-				goto out_vma_close;
+	/*
+	 * To keep things interesting when alternating between engines in our
+	 * randomized order, lets also make feeding to the same engine a few
+	 * times in succession a possibility by enlarging the permutation array.
+	 */
+	order = i915_random_order(n * I915_NUM_ENGINES, &prng);
+	if (!order)
+		return -ENOMEM;
 
-			err = i915_vma_pin(vma, size, max_page_size, flags | offset);
-			if (err) {
-				/*
-				 * The ggtt may have some pages reserved so
-				 * refrain from erroring out.
-				 */
-				if (err == -ENOSPC && i915_is_ggtt(vm)) {
-					err = 0;
-					continue;
-				}
+	/*
+	 * Try various offsets until we timeout -- we want to avoid
+	 * issues hidden by effectively always using offset = 0.
+	 */
+	i = 0;
+	for_each_prime_number_from(num, 0, max) {
+		u64 offset = num * max_page_size;
+		u32 dword;
 
-				goto out_vma_close;
+		err = i915_vma_unbind(vma);
+		if (err)
+			goto out_vma_close;
+
+		err = i915_vma_pin(vma, size, max_page_size, flags | offset);
+		if (err) {
+			/*
+			 * The ggtt may have some pages reserved so
+			 * refrain from erroring out.
+			 */
+			if (err == -ENOSPC && i915_is_ggtt(vm)) {
+				err = 0;
+				continue;
 			}
 
-			err = igt_check_page_sizes(vma);
-			if (err)
-				goto out_vma_unpin;
+			goto out_vma_close;
+		}
 
-			dword = offset_in_page(num) / 4;
+		err = igt_check_page_sizes(vma);
+		if (err)
+			goto out_vma_unpin;
 
-			err = gpu_write(vma, ctx, engine, dword, num + 1);
-			if (err) {
-				pr_err("gpu-write failed at offset=%llx", offset);
-				goto out_vma_unpin;
-			}
+		dword = offset_in_page(num) / 4;
 
-			err = cpu_check(obj, dword, num + 1);
-			if (err) {
-				pr_err("cpu-check failed at offset=%llx", offset);
-				goto out_vma_unpin;
-			}
+		engine = engines[order[i] % n];
+		i = (i + 1) % (n * I915_NUM_ENGINES);
 
-			i915_vma_unpin(vma);
+		err = gpu_write(vma, ctx, engine, dword, num + 1);
+		if (err) {
+			pr_err("gpu-write failed at offset=%llx", offset);
+			goto out_vma_unpin;
+		}
 
-			if (num > 0 &&
-			    igt_timeout(end_time,
-					"%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
-					__func__, id, offset, max_page_size))
-				break;
+		err = cpu_check(obj, dword, num + 1);
+		if (err) {
+			pr_err("cpu-check failed at offset=%llx", offset);
+			goto out_vma_unpin;
 		}
+
+		i915_vma_unpin(vma);
+
+		if (igt_timeout(end_time,
+				"%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
+				__func__, engine->id, offset, max_page_size))
+			break;
 	}
 
 out_vma_unpin:
@@ -1135,6 +1155,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 		i915_vma_unpin(vma);
 out_vma_close:
 	i915_vma_close(vma);
+	kfree(order);
 
 	return err;
 }
-- 
2.14.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/2] drm/i915/selftests: test descending addresses
  2017-11-23 13:22 [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge Matthew Auld
@ 2017-11-23 13:22 ` Matthew Auld
  2017-11-23 13:30   ` Chris Wilson
  2017-11-23 13:27 ` [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge Chris Wilson
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 9+ messages in thread
From: Matthew Auld @ 2017-11-23 13:22 UTC (permalink / raw)
  To: intel-gfx

For igt_write_huge make sure the higher gtt offsets don't feel left out,
which is especially true when dealing with the 48b PPGTT, where we
timeout long before we are able exhaust the address space.

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/huge_pages.c | 125 ++++++++++++++++------------
 1 file changed, 71 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index 83b3a27370a4..4522ce709157 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -1040,6 +1040,62 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val)
 	return err;
 }
 
+static int __igt_write_huge(struct i915_gem_context *ctx,
+			    struct intel_engine_cs *engine,
+			    struct drm_i915_gem_object *obj,
+			    u64 size, u64 offset,
+			    u32 dword, u32 val)
+{
+	struct drm_i915_private *i915 = to_i915(obj->base.dev);
+	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+	struct i915_vma *vma;
+	int err;
+
+	vma = i915_vma_instance(obj, vm, NULL);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
+
+	err = i915_vma_unbind(vma);
+	if (err)
+		goto out_vma_close;
+
+	err = i915_vma_pin(vma, size, 0, flags | offset);
+	if (err) {
+		/*
+		 * The ggtt may have some pages reserved so
+		 * refrain from erroring out.
+		 */
+		if (err == -ENOSPC && i915_is_ggtt(vm))
+			err = 0;
+
+		goto out_vma_close;
+	}
+
+	err = igt_check_page_sizes(vma);
+	if (err)
+		goto out_vma_unpin;
+
+	err = gpu_write(vma, ctx, engine, dword, val);
+	if (err) {
+		pr_err("gpu-write failed at offset=%llx\n", offset);
+		goto out_vma_unpin;
+	}
+
+	err = cpu_check(obj, dword, val);
+	if (err) {
+		pr_err("cpu-check failed at offset=%llx\n", offset);
+		goto out_vma_unpin;
+	}
+
+out_vma_unpin:
+	i915_vma_unpin(vma);
+out_vma_close:
+	i915_vma_close(vma);
+
+	return err;
+}
+
 static int igt_write_huge(struct i915_gem_context *ctx,
 			  struct drm_i915_gem_object *obj)
 {
@@ -1048,9 +1104,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	static struct intel_engine_cs *engines[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine;
 	I915_RND_STATE(prng);
-	IGT_TIMEOUT(end_time);
-	struct i915_vma *vma;
-	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
+	unsigned long end_time = jiffies + i915_selftest.timeout_jiffies * 2;
 	unsigned int max_page_size;
 	unsigned int id;
 	u64 max;
@@ -1069,10 +1123,6 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg);
 	max = div_u64((vm->total - size), max_page_size);
 
-	vma = i915_vma_instance(obj, vm, NULL);
-	if (IS_ERR(vma))
-		return PTR_ERR(vma);
-
 	n = 0;
 	for_each_engine(engine, i915, id) {
 		if (!intel_engine_can_store_dword(engine)) {
@@ -1095,66 +1145,33 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 		return -ENOMEM;
 
 	/*
-	 * Try various offsets until we timeout -- we want to avoid
-	 * issues hidden by effectively always using offset = 0.
+	 * Try various offsets in an ascending/descending fashion until we
+	 * timeout -- we want to avoid issues hidden by effectively always using
+	 * offset = 0.
 	 */
 	i = 0;
 	for_each_prime_number_from(num, 0, max) {
-		u64 offset = num * max_page_size;
-		u32 dword;
-
-		err = i915_vma_unbind(vma);
-		if (err)
-			goto out_vma_close;
-
-		err = i915_vma_pin(vma, size, max_page_size, flags | offset);
-		if (err) {
-			/*
-			 * The ggtt may have some pages reserved so
-			 * refrain from erroring out.
-			 */
-			if (err == -ENOSPC && i915_is_ggtt(vm)) {
-				err = 0;
-				continue;
-			}
-
-			goto out_vma_close;
-		}
-
-		err = igt_check_page_sizes(vma);
-		if (err)
-			goto out_vma_unpin;
-
-		dword = offset_in_page(num) / 4;
+		u64 offset_low = num * max_page_size;
+		u64 offset_high = (max - num) * max_page_size;
+		u32 dword = offset_in_page(num) / 4;
 
 		engine = engines[order[i] % n];
 		i = (i + 1) % (n * I915_NUM_ENGINES);
 
-		err = gpu_write(vma, ctx, engine, dword, num + 1);
-		if (err) {
-			pr_err("gpu-write failed at offset=%llx", offset);
-			goto out_vma_unpin;
-		}
-
-		err = cpu_check(obj, dword, num + 1);
-		if (err) {
-			pr_err("cpu-check failed at offset=%llx", offset);
-			goto out_vma_unpin;
-		}
+		err = __igt_write_huge(ctx, engine, obj, size, offset_low, dword, num + 1);
+		if (err)
+			break;
 
-		i915_vma_unpin(vma);
+		err = __igt_write_huge(ctx, engine, obj, size, offset_high, dword, num + 1);
+		if (err)
+			break;
 
 		if (igt_timeout(end_time,
-				"%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
-				__func__, engine->id, offset, max_page_size))
+				"%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n",
+				__func__, engine->id, offset_low, offset_high, max_page_size))
 			break;
 	}
 
-out_vma_unpin:
-	if (i915_vma_is_pinned(vma))
-		i915_vma_unpin(vma);
-out_vma_close:
-	i915_vma_close(vma);
 	kfree(order);
 
 	return err;
-- 
2.14.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge
  2017-11-23 13:22 [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge Matthew Auld
  2017-11-23 13:22 ` [PATCH 2/2] drm/i915/selftests: test descending addresses Matthew Auld
@ 2017-11-23 13:27 ` Chris Wilson
  2017-11-23 14:10 ` ✓ Fi.CI.BAT: success for series starting with [1/2] " Patchwork
  2017-11-23 16:01 ` ✓ Fi.CI.IGT: " Patchwork
  3 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2017-11-23 13:27 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx

Quoting Matthew Auld (2017-11-23 13:22:57)
> Rather than repeat the test for each engine, which takes a long time,
> let's try alternating between the engines in some randomized
> order.
> 
> v2: fix gen2 blunder
>     fix !order blunder
>     more cunning permutation construction!
> 
> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>

lgtm,
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/2] drm/i915/selftests: test descending addresses
  2017-11-23 13:22 ` [PATCH 2/2] drm/i915/selftests: test descending addresses Matthew Auld
@ 2017-11-23 13:30   ` Chris Wilson
  0 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2017-11-23 13:30 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx

Quoting Matthew Auld (2017-11-23 13:22:58)
> For igt_write_huge make sure the higher gtt offsets don't feel left out,
> which is especially true when dealing with the 48b PPGTT, where we
> timeout long before we are able exhaust the address space.
> 
> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> ---
> @@ -1048,9 +1104,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
>         static struct intel_engine_cs *engines[I915_NUM_ENGINES];
>         struct intel_engine_cs *engine;
>         I915_RND_STATE(prng);
> -       IGT_TIMEOUT(end_time);
> -       struct i915_vma *vma;
> -       unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
> +       unsigned long end_time = jiffies + i915_selftest.timeout_jiffies * 2;

I'm still unconcerned about the need to bump timeout here, and would
stick with IGT_TIMEOUT() until proven otherwise. (Yes, for the plans to
use various boundaries, we should try to generalise the patterns
employed in selftests/i915_gem_gtt.c. Hmm)

If you use IGT_TIMEOUT() here or explain your reason otherwise,
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* ✓ Fi.CI.BAT: success for series starting with [1/2] drm/i915/selftests: rein in igt_write_huge
  2017-11-23 13:22 [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge Matthew Auld
  2017-11-23 13:22 ` [PATCH 2/2] drm/i915/selftests: test descending addresses Matthew Auld
  2017-11-23 13:27 ` [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge Chris Wilson
@ 2017-11-23 14:10 ` Patchwork
  2017-11-23 16:01 ` ✓ Fi.CI.IGT: " Patchwork
  3 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2017-11-23 14:10 UTC (permalink / raw)
  To: Matthew Auld; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/2] drm/i915/selftests: rein in igt_write_huge
URL   : https://patchwork.freedesktop.org/series/34302/
State : success

== Summary ==

Series 34302v1 series starting with [1/2] drm/i915/selftests: rein in igt_write_huge
https://patchwork.freedesktop.org/api/1.0/series/34302/revisions/1/mbox/

Test kms_pipe_crc_basic:
        Subgroup suspend-read-crc-pipe-b:
                pass       -> INCOMPLETE (fi-snb-2520m) fdo#103713

fdo#103713 https://bugs.freedesktop.org/show_bug.cgi?id=103713

fi-bdw-5557u     total:289  pass:268  dwarn:0   dfail:0   fail:0   skip:21  time:445s
fi-bdw-gvtdvm    total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:460s
fi-blb-e6850     total:289  pass:223  dwarn:1   dfail:0   fail:0   skip:65  time:383s
fi-bsw-n3050     total:289  pass:243  dwarn:0   dfail:0   fail:0   skip:46  time:536s
fi-bwr-2160      total:289  pass:183  dwarn:0   dfail:0   fail:0   skip:106 time:278s
fi-bxt-dsi       total:289  pass:259  dwarn:0   dfail:0   fail:0   skip:30  time:505s
fi-bxt-j4205     total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:508s
fi-byt-j1900     total:289  pass:254  dwarn:0   dfail:0   fail:0   skip:35  time:501s
fi-byt-n2820     total:289  pass:250  dwarn:0   dfail:0   fail:0   skip:39  time:492s
fi-cfl-s2        total:289  pass:263  dwarn:0   dfail:0   fail:0   skip:26  time:623s
fi-elk-e7500     total:289  pass:229  dwarn:0   dfail:0   fail:0   skip:60  time:436s
fi-gdg-551       total:289  pass:178  dwarn:1   dfail:0   fail:1   skip:109 time:267s
fi-glk-1         total:289  pass:261  dwarn:0   dfail:0   fail:0   skip:28  time:540s
fi-hsw-4770      total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:432s
fi-hsw-4770r     total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:439s
fi-ilk-650       total:289  pass:228  dwarn:0   dfail:0   fail:0   skip:61  time:428s
fi-ivb-3520m     total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:484s
fi-ivb-3770      total:289  pass:260  dwarn:0   dfail:0   fail:0   skip:29  time:461s
fi-pnv-d510      total:289  pass:222  dwarn:1   dfail:0   fail:0   skip:66  time:582s
fi-skl-6260u     total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:451s
fi-skl-6600u     total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:555s
fi-skl-6700hq    total:289  pass:263  dwarn:0   dfail:0   fail:0   skip:26  time:564s
fi-skl-6700k     total:289  pass:265  dwarn:0   dfail:0   fail:0   skip:24  time:525s
fi-skl-6770hq    total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:498s
fi-skl-gvtdvm    total:289  pass:266  dwarn:0   dfail:0   fail:0   skip:23  time:461s
fi-snb-2520m     total:246  pass:212  dwarn:0   dfail:0   fail:0   skip:33 
fi-snb-2600      total:289  pass:249  dwarn:0   dfail:0   fail:0   skip:40  time:423s
Blacklisted hosts:
fi-cnl-y         total:238  pass:213  dwarn:0   dfail:0   fail:0   skip:24 
fi-glk-dsi       total:289  pass:259  dwarn:0   dfail:0   fail:0   skip:30  time:498s
fi-kbl-7500u     total:289  pass:264  dwarn:1   dfail:0   fail:0   skip:24  time:481s
fi-kbl-7560u     total:289  pass:270  dwarn:0   dfail:0   fail:0   skip:19  time:534s
fi-kbl-7567u     total:289  pass:269  dwarn:0   dfail:0   fail:0   skip:20  time:480s
fi-kbl-r         total:289  pass:262  dwarn:0   dfail:0   fail:0   skip:27  time:538s

9d399f81694538228a539de9a908cdf9ef00f814 drm-tip: 2017y-11m-23d-12h-28m-12s UTC integration manifest
5210885c40ed drm/i915/selftests: test descending addresses
5af6a3513c9a drm/i915/selftests: rein in igt_write_huge

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_7259/
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* ✓ Fi.CI.IGT: success for series starting with [1/2] drm/i915/selftests: rein in igt_write_huge
  2017-11-23 13:22 [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge Matthew Auld
                   ` (2 preceding siblings ...)
  2017-11-23 14:10 ` ✓ Fi.CI.BAT: success for series starting with [1/2] " Patchwork
@ 2017-11-23 16:01 ` Patchwork
  3 siblings, 0 replies; 9+ messages in thread
From: Patchwork @ 2017-11-23 16:01 UTC (permalink / raw)
  To: Matthew Auld; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/2] drm/i915/selftests: rein in igt_write_huge
URL   : https://patchwork.freedesktop.org/series/34302/
State : success

== Summary ==

Test kms_setmode:
        Subgroup basic:
                pass       -> FAIL       (shard-hsw) fdo#99912
Test drv_module_reload:
        Subgroup basic-no-display:
                dmesg-warn -> PASS       (shard-hsw) fdo#102707
Test kms_cursor_legacy:
        Subgroup flip-vs-cursor-legacy:
                pass       -> FAIL       (shard-hsw) fdo#102670 +1
Test drv_suspend:
        Subgroup fence-restore-tiled2untiled-hibernate:
                fail       -> SKIP       (shard-hsw) fdo#103375
Test kms_flip:
        Subgroup blt-flip-vs-panning-interruptible:
                dmesg-warn -> PASS       (shard-hsw)

fdo#99912 https://bugs.freedesktop.org/show_bug.cgi?id=99912
fdo#102707 https://bugs.freedesktop.org/show_bug.cgi?id=102707
fdo#102670 https://bugs.freedesktop.org/show_bug.cgi?id=102670
fdo#103375 https://bugs.freedesktop.org/show_bug.cgi?id=103375

shard-hsw        total:2667 pass:1533 dwarn:1   dfail:0   fail:11  skip:1122 time:9556s
shard-snb        total:2667 pass:1311 dwarn:2   dfail:0   fail:13  skip:1341 time:8108s

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_7259/shards.html
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge
@ 2017-11-23 13:54 Matthew Auld
  0 siblings, 0 replies; 9+ messages in thread
From: Matthew Auld @ 2017-11-23 13:54 UTC (permalink / raw)
  To: intel-gfx

Rather than repeat the test for each engine, which takes a long time,
let's try alternating between the engines in some randomized
order.

v2: fix gen2 blunder
    fix !order blunder
    more cunning permutation construction!

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/huge_pages.c | 111 +++++++++++++++++-----------
 1 file changed, 66 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index db7a0a1f2960..83b3a27370a4 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -27,6 +27,7 @@
 #include <linux/prime_numbers.h>
 
 #include "mock_drm.h"
+#include "i915_random.h"
 
 static const unsigned int page_sizes[] = {
 	I915_GTT_PAGE_SIZE_2M,
@@ -1044,7 +1045,10 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	static struct intel_engine_cs *engines[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
 	struct i915_vma *vma;
 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
 	unsigned int max_page_size;
@@ -1052,6 +1056,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	u64 max;
 	u64 num;
 	u64 size;
+	int *order;
+	int i, n;
 	int err = 0;
 
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -1067,67 +1073,81 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
+	n = 0;
 	for_each_engine(engine, i915, id) {
-		IGT_TIMEOUT(end_time);
-
 		if (!intel_engine_can_store_dword(engine)) {
-			pr_info("store-dword-imm not supported on engine=%u\n",
-				id);
+			pr_info("store-dword-imm not supported on engine=%u\n", id);
 			continue;
 		}
+		engines[n++] = engine;
+	}
 
-		/*
-		 * Try various offsets until we timeout -- we want to avoid
-		 * issues hidden by effectively always using offset = 0.
-		 */
-		for_each_prime_number_from(num, 0, max) {
-			u64 offset = num * max_page_size;
-			u32 dword;
+	if (!n)
+		return 0;
 
-			err = i915_vma_unbind(vma);
-			if (err)
-				goto out_vma_close;
+	/*
+	 * To keep things interesting when alternating between engines in our
+	 * randomized order, lets also make feeding to the same engine a few
+	 * times in succession a possibility by enlarging the permutation array.
+	 */
+	order = i915_random_order(n * I915_NUM_ENGINES, &prng);
+	if (!order)
+		return -ENOMEM;
 
-			err = i915_vma_pin(vma, size, max_page_size, flags | offset);
-			if (err) {
-				/*
-				 * The ggtt may have some pages reserved so
-				 * refrain from erroring out.
-				 */
-				if (err == -ENOSPC && i915_is_ggtt(vm)) {
-					err = 0;
-					continue;
-				}
+	/*
+	 * Try various offsets until we timeout -- we want to avoid
+	 * issues hidden by effectively always using offset = 0.
+	 */
+	i = 0;
+	for_each_prime_number_from(num, 0, max) {
+		u64 offset = num * max_page_size;
+		u32 dword;
 
-				goto out_vma_close;
+		err = i915_vma_unbind(vma);
+		if (err)
+			goto out_vma_close;
+
+		err = i915_vma_pin(vma, size, max_page_size, flags | offset);
+		if (err) {
+			/*
+			 * The ggtt may have some pages reserved so
+			 * refrain from erroring out.
+			 */
+			if (err == -ENOSPC && i915_is_ggtt(vm)) {
+				err = 0;
+				continue;
 			}
 
-			err = igt_check_page_sizes(vma);
-			if (err)
-				goto out_vma_unpin;
+			goto out_vma_close;
+		}
 
-			dword = offset_in_page(num) / 4;
+		err = igt_check_page_sizes(vma);
+		if (err)
+			goto out_vma_unpin;
 
-			err = gpu_write(vma, ctx, engine, dword, num + 1);
-			if (err) {
-				pr_err("gpu-write failed at offset=%llx", offset);
-				goto out_vma_unpin;
-			}
+		dword = offset_in_page(num) / 4;
 
-			err = cpu_check(obj, dword, num + 1);
-			if (err) {
-				pr_err("cpu-check failed at offset=%llx", offset);
-				goto out_vma_unpin;
-			}
+		engine = engines[order[i] % n];
+		i = (i + 1) % (n * I915_NUM_ENGINES);
 
-			i915_vma_unpin(vma);
+		err = gpu_write(vma, ctx, engine, dword, num + 1);
+		if (err) {
+			pr_err("gpu-write failed at offset=%llx", offset);
+			goto out_vma_unpin;
+		}
 
-			if (num > 0 &&
-			    igt_timeout(end_time,
-					"%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
-					__func__, id, offset, max_page_size))
-				break;
+		err = cpu_check(obj, dword, num + 1);
+		if (err) {
+			pr_err("cpu-check failed at offset=%llx", offset);
+			goto out_vma_unpin;
 		}
+
+		i915_vma_unpin(vma);
+
+		if (igt_timeout(end_time,
+				"%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
+				__func__, engine->id, offset, max_page_size))
+			break;
 	}
 
 out_vma_unpin:
@@ -1135,6 +1155,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 		i915_vma_unpin(vma);
 out_vma_close:
 	i915_vma_close(vma);
+	kfree(order);
 
 	return err;
 }
-- 
2.14.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge
  2017-11-22 21:21 Matthew Auld
@ 2017-11-22 21:27 ` Chris Wilson
  0 siblings, 0 replies; 9+ messages in thread
From: Chris Wilson @ 2017-11-22 21:27 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx

Quoting Matthew Auld (2017-11-22 21:21:39)
> Rather than repeat the test for each engine, which takes a long time,
> let's try alternating between the engines in some randomized
> order.
> 
> Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> ---
>  drivers/gpu/drm/i915/selftests/huge_pages.c | 102 ++++++++++++++++------------
>  1 file changed, 57 insertions(+), 45 deletions(-)
> 
> diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
> index db7a0a1f2960..4809368dfbbc 100644
> --- a/drivers/gpu/drm/i915/selftests/huge_pages.c
> +++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
> @@ -27,6 +27,7 @@
>  #include <linux/prime_numbers.h>
>  
>  #include "mock_drm.h"
> +#include "i915_random.h"
>  
>  static const unsigned int page_sizes[] = {
>         I915_GTT_PAGE_SIZE_2M,
> @@ -1044,7 +1045,10 @@ static int igt_write_huge(struct i915_gem_context *ctx,
>  {
>         struct drm_i915_private *i915 = to_i915(obj->base.dev);
>         struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
> +       static struct intel_engine_cs *engines[I915_NUM_ENGINES];
>         struct intel_engine_cs *engine;
> +       I915_RND_STATE(prng);
> +       IGT_TIMEOUT(end_time);
>         struct i915_vma *vma;
>         unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
>         unsigned int max_page_size;
> @@ -1052,6 +1056,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
>         u64 max;
>         u64 num;
>         u64 size;
> +       int *order;
> +       int i, n;
>         int err = 0;
>  
>         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
> @@ -1067,67 +1073,72 @@ static int igt_write_huge(struct i915_gem_context *ctx,
>         if (IS_ERR(vma))
>                 return PTR_ERR(vma);
>  
> +       n = 0;
>         for_each_engine(engine, i915, id) {
> -               IGT_TIMEOUT(end_time);
> -
>                 if (!intel_engine_can_store_dword(engine)) {
> -                       pr_info("store-dword-imm not supported on engine=%u\n",
> -                               id);
> +                       pr_info("store-dword-imm not supported on engine=%u\n", id);
>                         continue;
>                 }
> +               engines[n++] = engine;
> +       }
>  
> -               /*
> -                * Try various offsets until we timeout -- we want to avoid
> -                * issues hidden by effectively always using offset = 0.
> -                */
> -               for_each_prime_number_from(num, 0, max) {
> -                       u64 offset = num * max_page_size;
> -                       u32 dword;
> +       GEM_BUG_ON(!n);

Think gen2! if (!n) return 0;

> +       order = i915_random_order(n, &prng);

if (!order) return -ENOMEM;

Now, I would be a little more cunning in the construction of the
permutation. Use some_number*n, then engine = order[i] % n;
That way we may test feeding to the same engine a few times in succession,
again exploiting the random pattern.

Overall makes a lot of sense, and does seem to be a good compromise.
-Chris
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge
@ 2017-11-22 21:21 Matthew Auld
  2017-11-22 21:27 ` Chris Wilson
  0 siblings, 1 reply; 9+ messages in thread
From: Matthew Auld @ 2017-11-22 21:21 UTC (permalink / raw)
  To: intel-gfx

Rather than repeat the test for each engine, which takes a long time,
let's try alternating between the engines in some randomized
order.

Suggested-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 drivers/gpu/drm/i915/selftests/huge_pages.c | 102 ++++++++++++++++------------
 1 file changed, 57 insertions(+), 45 deletions(-)

diff --git a/drivers/gpu/drm/i915/selftests/huge_pages.c b/drivers/gpu/drm/i915/selftests/huge_pages.c
index db7a0a1f2960..4809368dfbbc 100644
--- a/drivers/gpu/drm/i915/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/selftests/huge_pages.c
@@ -27,6 +27,7 @@
 #include <linux/prime_numbers.h>
 
 #include "mock_drm.h"
+#include "i915_random.h"
 
 static const unsigned int page_sizes[] = {
 	I915_GTT_PAGE_SIZE_2M,
@@ -1044,7 +1045,10 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct i915_address_space *vm = ctx->ppgtt ? &ctx->ppgtt->base : &i915->ggtt.base;
+	static struct intel_engine_cs *engines[I915_NUM_ENGINES];
 	struct intel_engine_cs *engine;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
 	struct i915_vma *vma;
 	unsigned int flags = PIN_USER | PIN_OFFSET_FIXED;
 	unsigned int max_page_size;
@@ -1052,6 +1056,8 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	u64 max;
 	u64 num;
 	u64 size;
+	int *order;
+	int i, n;
 	int err = 0;
 
 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
@@ -1067,67 +1073,72 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 	if (IS_ERR(vma))
 		return PTR_ERR(vma);
 
+	n = 0;
 	for_each_engine(engine, i915, id) {
-		IGT_TIMEOUT(end_time);
-
 		if (!intel_engine_can_store_dword(engine)) {
-			pr_info("store-dword-imm not supported on engine=%u\n",
-				id);
+			pr_info("store-dword-imm not supported on engine=%u\n", id);
 			continue;
 		}
+		engines[n++] = engine;
+	}
 
-		/*
-		 * Try various offsets until we timeout -- we want to avoid
-		 * issues hidden by effectively always using offset = 0.
-		 */
-		for_each_prime_number_from(num, 0, max) {
-			u64 offset = num * max_page_size;
-			u32 dword;
+	GEM_BUG_ON(!n);
+	order = i915_random_order(n, &prng);
 
-			err = i915_vma_unbind(vma);
-			if (err)
-				goto out_vma_close;
+	/*
+	 * Try various offsets until we timeout -- we want to avoid
+	 * issues hidden by effectively always using offset = 0.
+	 */
+	i = 0;
+	for_each_prime_number_from(num, 0, max) {
+		u64 offset = num * max_page_size;
+		u32 dword;
 
-			err = i915_vma_pin(vma, size, max_page_size, flags | offset);
-			if (err) {
-				/*
-				 * The ggtt may have some pages reserved so
-				 * refrain from erroring out.
-				 */
-				if (err == -ENOSPC && i915_is_ggtt(vm)) {
-					err = 0;
-					continue;
-				}
+		err = i915_vma_unbind(vma);
+		if (err)
+			goto out_vma_close;
 
-				goto out_vma_close;
+		err = i915_vma_pin(vma, size, max_page_size, flags | offset);
+		if (err) {
+			/*
+			 * The ggtt may have some pages reserved so
+			 * refrain from erroring out.
+			 */
+			if (err == -ENOSPC && i915_is_ggtt(vm)) {
+				err = 0;
+				continue;
 			}
 
-			err = igt_check_page_sizes(vma);
-			if (err)
-				goto out_vma_unpin;
+			goto out_vma_close;
+		}
 
-			dword = offset_in_page(num) / 4;
+		err = igt_check_page_sizes(vma);
+		if (err)
+			goto out_vma_unpin;
 
-			err = gpu_write(vma, ctx, engine, dword, num + 1);
-			if (err) {
-				pr_err("gpu-write failed at offset=%llx", offset);
-				goto out_vma_unpin;
-			}
+		dword = offset_in_page(num) / 4;
 
-			err = cpu_check(obj, dword, num + 1);
-			if (err) {
-				pr_err("cpu-check failed at offset=%llx", offset);
-				goto out_vma_unpin;
-			}
+		engine = engines[order[i]];
+		i = (i + 1) % n;
 
-			i915_vma_unpin(vma);
+		err = gpu_write(vma, ctx, engine, dword, num + 1);
+		if (err) {
+			pr_err("gpu-write failed at offset=%llx", offset);
+			goto out_vma_unpin;
+		}
 
-			if (num > 0 &&
-			    igt_timeout(end_time,
-					"%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
-					__func__, id, offset, max_page_size))
-				break;
+		err = cpu_check(obj, dword, num + 1);
+		if (err) {
+			pr_err("cpu-check failed at offset=%llx", offset);
+			goto out_vma_unpin;
 		}
+
+		i915_vma_unpin(vma);
+
+		if (igt_timeout(end_time,
+				"%s timed out on engine=%u at offset=%llx, max_page_size=%x\n",
+				__func__, engine->id, offset, max_page_size))
+			break;
 	}
 
 out_vma_unpin:
@@ -1135,6 +1146,7 @@ static int igt_write_huge(struct i915_gem_context *ctx,
 		i915_vma_unpin(vma);
 out_vma_close:
 	i915_vma_close(vma);
+	kfree(order);
 
 	return err;
 }
-- 
2.14.3

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-11-23 16:01 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-23 13:22 [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge Matthew Auld
2017-11-23 13:22 ` [PATCH 2/2] drm/i915/selftests: test descending addresses Matthew Auld
2017-11-23 13:30   ` Chris Wilson
2017-11-23 13:27 ` [PATCH 1/2] drm/i915/selftests: rein in igt_write_huge Chris Wilson
2017-11-23 14:10 ` ✓ Fi.CI.BAT: success for series starting with [1/2] " Patchwork
2017-11-23 16:01 ` ✓ Fi.CI.IGT: " Patchwork
  -- strict thread matches above, loose matches on Subject: below --
2017-11-23 13:54 [PATCH 1/2] " Matthew Auld
2017-11-22 21:21 Matthew Auld
2017-11-22 21:27 ` Chris Wilson

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.