All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH i-g-t] igt/gem_fence_upload: Stabilise the test for CI
@ 2017-05-31 10:28 Tvrtko Ursulin
  2017-05-31 12:45 ` Chris Wilson
  0 siblings, 1 reply; 3+ messages in thread
From: Tvrtko Ursulin @ 2017-05-31 10:28 UTC (permalink / raw)
  To: Intel-gfx

From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

Most of the subtest were failing on my SKL GT2 and on the
various CI systems as well. Try to fix that by different
tweaks per subtests:

performance:

We cannot say how big the performance drop will be once the
fences are contented, just that there will be one, so modify
the assert accordingly.

thread-performance-*:

Similar to "performance", but also the peak performance to
compare against seems to be num_cpu dependant which the
biggest aggregate throughput with num_cpu clients.

Also, writes are much faster than reads which was making
the "-write" subtest a bit unstable. Increased the number
of iterations to hopefully stabilise this.

thread-contention:

Same as "performance" - we don't know how big the drop to
expect.

wc-contention:

This subtest should not expect a tiled performance drop since
CPU WC maps are under test. Change a few things:

1.
Number of iterations was to small causing result instability.

2.
There seems to be a performance valley with num_threads ==
num_cpus so do a pre-warm step in those cases which seem to fix
it.

3.
Only assert that the final step is slower than the reference,
and for the reference take the single thred result which looks
to be the fastest.

4.
Check that linear vs tiled results in each step are within
15% tolerance to verify the above attempts to stabilise the
test.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/gem_fence_upload.c | 51 +++++++++++++++++++++++++++++++-----------------
 1 file changed, 33 insertions(+), 18 deletions(-)

diff --git a/tests/gem_fence_upload.c b/tests/gem_fence_upload.c
index 7d9acdc0fcb3..d656d4ea70f8 100644
--- a/tests/gem_fence_upload.c
+++ b/tests/gem_fence_upload.c
@@ -103,7 +103,7 @@ static void performance(void)
 
 	errno = 0;
 	igt_assert(linear[1] > 0.75 * linear[0]);
-	igt_assert(tiled[1] > 0.75 * tiled[0]);
+	igt_assert(tiled[1] < tiled[0]);
 }
 
 struct thread_performance {
@@ -155,7 +155,8 @@ static const char *direction_string(unsigned mask)
 }
 static void thread_performance(unsigned mask)
 {
-	const int loops = 4096;
+	const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+	const int loops = (mask & READ) ? 4096 : 32768;
 	int n, count;
 	int fd, num_fences;
 	double linear[2], tiled[2];
@@ -164,9 +165,11 @@ static void thread_performance(unsigned mask)
 
 	num_fences = gem_available_fences(fd);
 	igt_require(num_fences > 0);
+	igt_require(num_fences > ncpus);
 
 	for (count = 2; count < 4*num_fences; count *= 2) {
 		const int nthreads = (mask & READ ? count : 0) + (mask & WRITE ? count : 0);
+		const int idx = ncpus != count;
 		struct timeval start, end;
 		struct thread_performance readers[count];
 		struct thread_performance writers[count];
@@ -209,8 +212,8 @@ static void thread_performance(unsigned mask)
 		}
 		gettimeofday(&end, NULL);
 
-		linear[count != 2] = nthreads * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
-		igt_info("%s rate for %d linear surfaces, %d threads:	%7.3fMiB/s\n", direction_string(mask), count, nthreads, linear[count != 2]);
+		linear[idx] = nthreads * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
+		igt_info("%s rate for %d linear surfaces, %d threads:	%7.3fMiB/s\n", direction_string(mask), count, nthreads, linear[idx]);
 
 		for (n = 0; n < count; n++)
 			gem_set_tiling(fd, handle[n], I915_TILING_X, 1024);
@@ -230,8 +233,8 @@ static void thread_performance(unsigned mask)
 		}
 		gettimeofday(&end, NULL);
 
-		tiled[count != 2] = nthreads * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
-		igt_info("%s rate for %d tiled surfaces, %d threads:	%7.3fMiB/s\n", direction_string(mask), count, nthreads, tiled[count != 2]);
+		tiled[idx] = nthreads * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
+		igt_info("%s rate for %d tiled surfaces, %d threads:	%7.3fMiB/s\n", direction_string(mask), count, nthreads, tiled[idx]);
 
 		for (n = 0; n < count; n++) {
 			munmap(ptr[n], OBJECT_SIZE);
@@ -241,7 +244,7 @@ static void thread_performance(unsigned mask)
 
 	errno = 0;
 	igt_assert(linear[1] > 0.75 * linear[0]);
-	igt_assert(tiled[1] > 0.75 * tiled[0]);
+	igt_assert(tiled[1] < tiled[0]);
 }
 
 struct thread_contention {
@@ -329,13 +332,13 @@ static void thread_contention(void)
 
 	errno = 0;
 	igt_assert(linear[1] > 0.75 * linear[0]);
-	igt_assert(tiled[1] > 0.75 * tiled[0]);
+	igt_assert(tiled[1] < tiled[0]);
 }
 
 static void wc_contention(void)
 {
-	const int loops = 4096;
-	int n, count;
+	const int ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+	int n, count, order;
 	int fd, num_fences;
 	double linear[2], tiled[2];
 
@@ -344,8 +347,13 @@ static void wc_contention(void)
 
 	num_fences = gem_available_fences(fd);
 	igt_require(num_fences > 0);
+	igt_require(num_fences > ncpus);
 
-	for (count = 1; count < 4*num_fences; count *= 2) {
+	for (count = 1, order = log2(num_fences * 2) + 1;
+	     count < 4 * num_fences;
+	     count *= 2, order--) {
+		const int loops = 4096 * order * 2;
+		const int idx = 1 != count;
 		struct timeval start, end;
 		struct thread_contention threads[count];
 
@@ -355,15 +363,20 @@ static void wc_contention(void)
 			threads[n].fd = fd;
 		}
 
+		if (count <= ncpus / 2) {
+			for (n = 0; n < count; n++)
+				pthread_create(&threads[n].thread, NULL, wc_mmap, &threads[n]);
+			for (n = 0; n < count; n++)
+				pthread_join(threads[n].thread, NULL);
+		}
 		gettimeofday(&start, NULL);
 		for (n = 0; n < count; n++)
 			pthread_create(&threads[n].thread, NULL, wc_mmap, &threads[n]);
 		for (n = 0; n < count; n++)
 			pthread_join(threads[n].thread, NULL);
 		gettimeofday(&end, NULL);
-
-		linear[count != 2] = count * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
-		igt_info("Contended upload rate for %d linear threads/wc:	%7.3fMiB/s\n", count, linear[count != 2]);
+		linear[idx] = count * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
+		igt_info("Contended upload rate for %d linear threads/wc:	%7.3fMiB/s\n", count, linear[idx]);
 
 		for (n = 0; n < count; n++)
 			gem_set_tiling(fd, threads[n].handle, I915_TILING_X, 1024);
@@ -375,17 +388,19 @@ static void wc_contention(void)
 			pthread_join(threads[n].thread, NULL);
 		gettimeofday(&end, NULL);
 
-		tiled[count != 2] = count * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
-		igt_info("Contended upload rate for %d tiled threads/wc:	%7.3fMiB/s\n", count, tiled[count != 2]);
+		tiled[idx] = count * loops / elapsed(&start, &end) / (OBJECT_SIZE / 4096);
+		igt_info("Contended upload rate for %d tiled threads/wc:	%7.3fMiB/s\n", count, tiled[idx]);
 
 		for (n = 0; n < count; n++) {
 			gem_close(fd, threads[n].handle);
 		}
+
+		igt_require(fabs((linear[idx] - tiled[idx]) / linear[idx]) < 0.15);
 	}
 
 	errno = 0;
-	igt_assert(linear[1] > 0.75 * linear[0]);
-	igt_assert(tiled[1] > 0.75 * tiled[0]);
+	igt_assert(linear[1] < linear[0]);
+	igt_assert(tiled[1] < tiled[0]);
 }
 
 igt_main
-- 
2.9.4

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2017-05-31 13:30 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-31 10:28 [PATCH i-g-t] igt/gem_fence_upload: Stabilise the test for CI Tvrtko Ursulin
2017-05-31 12:45 ` Chris Wilson
2017-05-31 13:30   ` Tvrtko Ursulin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.