All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH I-g-t V2 0/2] Tests: Add test cases based on multi drm_fd to test sync
@ 2014-04-15  2:38 Zhao Yakui
  2014-04-15  2:38 ` [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings Zhao Yakui
  2014-04-15  2:38 ` [PATCH I-g-t V2 2/2] tests/gem_dummy_reloc_loop: Add one subtest based on multi drm_fd to test CPU<->GPU " Zhao Yakui
  0 siblings, 2 replies; 10+ messages in thread
From: Zhao Yakui @ 2014-04-15  2:38 UTC (permalink / raw)
  To: intel-gfx

This follows Daniel's advice to add the two test cases based on multi drm_fd to 
test the ring sync and CPU<->GPU sync.
The Broadwell GT3 machine has two independent BSD rings that can be used
to process the video commands. This is implemented in kernel driver and transparent
to the user-space. But we still need to check the ring sync and CPU<->GPU sync for
the second BSD ring. Two tests are created based on the multi drm_fds to
test the sync. Multi drm_fd can assure that the second BSD ring has the opportunity
to dispatch the GPU command. 

V1->V2: Follow Daniel's comment to add one subtext instead of one individual
test case, which is used to test the CPU<->GPU sync under multi BSD rings/

Zhao Yakui (2):
  tests: Add one ring sync case based on multi drm_fd to test ring
    semaphore sync under multi BSD rings
  tests/gem_dummy_reloc_loop: Add one subtest based on multi drm_fd to
    test CPU<->GPU sync under multi BSD rings

 tests/Makefile.sources          |    1 +
 tests/gem_dummy_reloc_loop.c    |  102 ++++++++++++++++++++++-
 tests/gem_multi_bsd_sync_loop.c |  172 +++++++++++++++++++++++++++++++++++++++
 3 files changed, 274 insertions(+), 1 deletion(-)
 create mode 100644 tests/gem_multi_bsd_sync_loop.c

-- 
1.7.10.1

^ permalink raw reply	[flat|nested] 10+ messages in thread

* [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings
  2014-04-15  2:38 [PATCH I-g-t V2 0/2] Tests: Add test cases based on multi drm_fd to test sync Zhao Yakui
@ 2014-04-15  2:38 ` Zhao Yakui
  2014-04-22 11:52   ` Imre Deak
  2014-04-15  2:38 ` [PATCH I-g-t V2 2/2] tests/gem_dummy_reloc_loop: Add one subtest based on multi drm_fd to test CPU<->GPU " Zhao Yakui
  1 sibling, 1 reply; 10+ messages in thread
From: Zhao Yakui @ 2014-04-15  2:38 UTC (permalink / raw)
  To: intel-gfx

The Broadwell GT3 machine has two independent BSD rings in kernel driver while
it is transparent to the user-space driver. In such case it needs to check
the ring sync between the two BSD rings. At the same time it also needs to
check the sync among the second BSD ring and the other rings.

Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
---
 tests/Makefile.sources          |    1 +
 tests/gem_multi_bsd_sync_loop.c |  172 +++++++++++++++++++++++++++++++++++++++
 2 files changed, 173 insertions(+)
 create mode 100644 tests/gem_multi_bsd_sync_loop.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index c957ace..7cd9ca8 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -105,6 +105,7 @@ TESTS_progs = \
 	gem_render_tiled_blits \
 	gem_ring_sync_copy \
 	gem_ring_sync_loop \
+	gem_multi_bsd_sync_loop \
 	gem_seqno_wrap \
 	gem_set_tiling_vs_gtt \
 	gem_set_tiling_vs_pwrite \
diff --git a/tests/gem_multi_bsd_sync_loop.c b/tests/gem_multi_bsd_sync_loop.c
new file mode 100644
index 0000000..7f5b832
--- /dev/null
+++ b/tests/gem_multi_bsd_sync_loop.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Daniel Vetter <daniel.vetter@ffwll.ch> (based on gem_ring_sync_loop_*.c)
+ *    Zhao Yakui <yakui.zhao@intel.com>
+ *
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include "drm.h"
+#include "ioctl_wrappers.h"
+#include "drmtest.h"
+#include "intel_bufmgr.h"
+#include "intel_batchbuffer.h"
+#include "intel_io.h"
+#include "i830_reg.h"
+#include "intel_chipset.h"
+
+static drm_intel_bufmgr *bufmgr;
+struct intel_batchbuffer *batch;
+static drm_intel_bo *target_buffer;
+
+#define NUM_FD	50
+
+static int mfd[NUM_FD];
+static drm_intel_bufmgr *mbufmgr[NUM_FD];
+static struct intel_batchbuffer *mbatch[NUM_FD];
+static drm_intel_bo *mbuffer[NUM_FD];
+
+
+/*
+ * Testcase: Basic check of ring<->ring sync using a dummy reloc
+ *
+ * Extremely efficient at catching missed irqs with semaphores=0 ...
+ */
+
+#define MI_COND_BATCH_BUFFER_END	(0x36<<23 | 1)
+#define MI_DO_COMPARE			(1<<21)
+
+static void
+store_dword_loop(int fd)
+{
+	int i;
+	int num_rings = gem_get_num_rings(fd);
+
+	srandom(0xdeadbeef);
+
+	for (i = 0; i < SLOW_QUICK(0x100000, 10); i++) {
+		int ring, mindex;
+		ring = random() % num_rings + 1;
+		mindex = random() % NUM_FD;
+		batch = mbatch[mindex];
+		if (ring == I915_EXEC_RENDER) {
+			BEGIN_BATCH(4);
+			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
+			OUT_BATCH(0xffffffff); /* compare dword */
+			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
+					I915_GEM_DOMAIN_RENDER, 0);
+			OUT_BATCH(MI_NOOP);
+			ADVANCE_BATCH();
+		} else {
+			BEGIN_BATCH(4);
+			OUT_BATCH(MI_FLUSH_DW | 1);
+			OUT_BATCH(0); /* reserved */
+			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
+					I915_GEM_DOMAIN_RENDER, 0);
+			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
+			ADVANCE_BATCH();
+		}
+		intel_batchbuffer_flush_on_ring(batch, ring);
+	}
+
+	drm_intel_bo_map(target_buffer, 0);
+	// map to force waiting on rendering
+	drm_intel_bo_unmap(target_buffer);
+}
+
+igt_simple_main
+{
+	int fd;
+	int devid;
+	int i;
+
+	fd = drm_open_any();
+	devid = intel_get_drm_devid(fd);
+	gem_require_ring(fd, I915_EXEC_BLT);
+
+
+	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
+	igt_assert(bufmgr);
+	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
+
+
+	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
+	igt_assert(target_buffer);
+
+	/* Create multiple drm_fd and map one gem_object among multi drm_fd */
+	{
+		unsigned int target_flink;
+		char buffer_name[32];
+		if (dri_bo_flink(target_buffer, &target_flink)) {
+			igt_assert(0);
+			printf("fail to get flink for target buffer\n");
+			goto fail_flink;
+		}
+		for (i = 0; i < NUM_FD; i++) {
+			mfd[i] = 0;
+			mbufmgr[i] = NULL;
+			mbuffer[i] = NULL;
+		}
+		for (i = 0; i < NUM_FD; i++) {
+			sprintf(buffer_name, "Target buffer %d\n", i);
+			mfd[i] = drm_open_any();
+			mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
+			igt_assert(mbufmgr[i]);
+			drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
+			mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
+			igt_assert(mbufmgr[i]);
+			mbuffer[i] = intel_bo_gem_create_from_name(mbufmgr[i], buffer_name, target_flink);
+			igt_assert(mbuffer[i]);
+		}
+	}
+
+	store_dword_loop(fd);
+
+	{
+		for (i = 0; i < NUM_FD; i++) {
+			dri_bo_unreference(mbuffer[i]);
+			intel_batchbuffer_free(mbatch[i]);
+			drm_intel_bufmgr_destroy(mbufmgr[i]);
+			close(mfd[i]);
+		}
+	}
+	drm_intel_bo_unreference(target_buffer);
+	drm_intel_bufmgr_destroy(bufmgr);
+
+	close(fd);
+	return;
+
+fail_flink:
+	drm_intel_bo_unreference(target_buffer);
+	drm_intel_bufmgr_destroy(bufmgr);
+
+	close(fd);
+}
-- 
1.7.10.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* [PATCH I-g-t V2 2/2] tests/gem_dummy_reloc_loop: Add one subtest based on multi drm_fd to test CPU<->GPU sync under multi BSD rings
  2014-04-15  2:38 [PATCH I-g-t V2 0/2] Tests: Add test cases based on multi drm_fd to test sync Zhao Yakui
  2014-04-15  2:38 ` [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings Zhao Yakui
@ 2014-04-15  2:38 ` Zhao Yakui
  2014-04-22 12:05   ` Imre Deak
  1 sibling, 1 reply; 10+ messages in thread
From: Zhao Yakui @ 2014-04-15  2:38 UTC (permalink / raw)
  To: intel-gfx

The Broadwell GT3 machine has two independent BSD rings in kernel driver while
it is transparent to the user-space driver. In such case it needs to check
the CPU<->GPU sync for the second BSD ring.

V1->V2: Follow Daniel's comment to add one subtext instead of one individual
test case, which is used to test the CPU<->GPU sync under multi BSD rings

Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
---
 tests/gem_dummy_reloc_loop.c |  102 +++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 101 insertions(+), 1 deletion(-)

diff --git a/tests/gem_dummy_reloc_loop.c b/tests/gem_dummy_reloc_loop.c
index a61b59b..660d8e1 100644
--- a/tests/gem_dummy_reloc_loop.c
+++ b/tests/gem_dummy_reloc_loop.c
@@ -48,6 +48,13 @@ static drm_intel_bufmgr *bufmgr;
 struct intel_batchbuffer *batch;
 static drm_intel_bo *target_buffer;
 
+#define NUM_FD	50
+
+static int mfd[NUM_FD];
+static drm_intel_bufmgr *mbufmgr[NUM_FD];
+static struct intel_batchbuffer *mbatch[NUM_FD];
+static drm_intel_bo *mbuffer[NUM_FD];
+
 /*
  * Testcase: Basic check of ring<->cpu sync using a dummy reloc
  *
@@ -124,6 +131,50 @@ dummy_reloc_loop_random_ring(int num_rings)
 	}
 }
 
+static void
+dummy_reloc_loop_random_ring_multi_fd(int num_rings)
+{
+	int i;
+	struct intel_batchbuffer *saved_batch;
+
+	saved_batch = batch;
+
+	srandom(0xdeadbeef);
+
+	for (i = 0; i < 0x100000; i++) {
+		int mindex;
+		int ring = random() % num_rings + 1;
+
+		mindex = random() % NUM_FD;
+		batch = mbatch[mindex];
+
+		if (ring == I915_EXEC_RENDER) {
+			BEGIN_BATCH(4);
+			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
+			OUT_BATCH(0xffffffff); /* compare dword */
+			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
+					I915_GEM_DOMAIN_RENDER, 0);
+			OUT_BATCH(MI_NOOP);
+			ADVANCE_BATCH();
+		} else {
+			BEGIN_BATCH(4);
+			OUT_BATCH(MI_FLUSH_DW | 1);
+			OUT_BATCH(0); /* reserved */
+			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
+					I915_GEM_DOMAIN_RENDER, 0);
+			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
+			ADVANCE_BATCH();
+		}
+		intel_batchbuffer_flush_on_ring(batch, ring);
+
+		drm_intel_bo_map(target_buffer, 0);
+		// map to force waiting on rendering
+		drm_intel_bo_unmap(target_buffer);
+	}
+
+	batch = saved_batch;
+}
+
 int fd;
 int devid;
 int num_rings;
@@ -133,6 +184,7 @@ igt_main
 	igt_skip_on_simulation();
 
 	igt_fixture {
+		int i;
 		fd = drm_open_any();
 		devid = intel_get_drm_devid(fd);
 		num_rings = gem_get_num_rings(fd);
@@ -148,6 +200,35 @@ igt_main
 
 		target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
 		igt_assert(target_buffer);
+
+		/* Create multi drm_fd and map one gem object to multi gem_contexts */
+		{
+			unsigned int target_flink;
+			char buffer_name[32];
+			if (dri_bo_flink(target_buffer, &target_flink)) {
+				printf("fail to get flink for target buffer\n");
+				igt_assert(0);
+			}
+			for (i = 0; i < NUM_FD; i++) {
+				mfd[i] = 0;
+				mbufmgr[i] = NULL;
+				mbuffer[i] = NULL;
+			}
+			for (i = 0; i < NUM_FD; i++) {
+				sprintf(buffer_name, "Target buffer %d\n", i);
+				mfd[i] = drm_open_any();
+				mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
+				igt_assert(mbufmgr[i]);
+				drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
+				mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
+				igt_assert(mbufmgr[i]);
+				mbuffer[i] = intel_bo_gem_create_from_name(
+								mbufmgr[i],
+								buffer_name,
+								target_flink);
+				igt_assert(mbuffer[i]);
+			}
+		}
 	}
 
 	igt_subtest("render") {
@@ -190,8 +271,27 @@ igt_main
 			printf("dummy loop run on random rings completed\n");
 		}
 	}
-
+	igt_subtest("mixed_multi_fd") {
+		if (num_rings > 1) {
+			sleep(2);
+			printf("running dummy loop on random rings based on "
+					"multi drm_fd\n");
+			dummy_reloc_loop_random_ring_multi_fd(num_rings);
+			printf("dummy loop run on random rings based on "
+					"multi drm_fd completed\n");
+		}
+	}
 	igt_fixture {
+		int i;
+		/* Free the buffer/batchbuffer/buffer mgr for multi-fd */
+		{
+			for (i = 0; i < NUM_FD; i++) {
+				dri_bo_unreference(mbuffer[i]);
+				intel_batchbuffer_free(mbatch[i]);
+				drm_intel_bufmgr_destroy(mbufmgr[i]);
+				close(mfd[i]);
+			}
+		}
 		drm_intel_bo_unreference(target_buffer);
 		intel_batchbuffer_free(batch);
 		drm_intel_bufmgr_destroy(bufmgr);
-- 
1.7.10.1

^ permalink raw reply related	[flat|nested] 10+ messages in thread

* Re: [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings
  2014-04-15  2:38 ` [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings Zhao Yakui
@ 2014-04-22 11:52   ` Imre Deak
  2014-04-22 19:44     ` Daniel Vetter
  0 siblings, 1 reply; 10+ messages in thread
From: Imre Deak @ 2014-04-22 11:52 UTC (permalink / raw)
  To: Zhao Yakui; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 6962 bytes --]

On Tue, 2014-04-15 at 10:38 +0800, Zhao Yakui wrote:
> The Broadwell GT3 machine has two independent BSD rings in kernel driver while
> it is transparent to the user-space driver. In such case it needs to check
> the ring sync between the two BSD rings. At the same time it also needs to
> check the sync among the second BSD ring and the other rings.
> 
> Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
> ---
>  tests/Makefile.sources          |    1 +
>  tests/gem_multi_bsd_sync_loop.c |  172 +++++++++++++++++++++++++++++++++++++++
>  2 files changed, 173 insertions(+)
>  create mode 100644 tests/gem_multi_bsd_sync_loop.c
> 
> diff --git a/tests/Makefile.sources b/tests/Makefile.sources
> index c957ace..7cd9ca8 100644
> --- a/tests/Makefile.sources
> +++ b/tests/Makefile.sources
> @@ -105,6 +105,7 @@ TESTS_progs = \
>  	gem_render_tiled_blits \
>  	gem_ring_sync_copy \
>  	gem_ring_sync_loop \
> +	gem_multi_bsd_sync_loop \
>  	gem_seqno_wrap \
>  	gem_set_tiling_vs_gtt \
>  	gem_set_tiling_vs_pwrite \
> diff --git a/tests/gem_multi_bsd_sync_loop.c b/tests/gem_multi_bsd_sync_loop.c
> new file mode 100644
> index 0000000..7f5b832
> --- /dev/null
> +++ b/tests/gem_multi_bsd_sync_loop.c
> @@ -0,0 +1,172 @@
> +/*
> + * Copyright © 2014 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> + * IN THE SOFTWARE.
> + *
> + * Authors:
> + *    Daniel Vetter <daniel.vetter@ffwll.ch> (based on gem_ring_sync_loop_*.c)
> + *    Zhao Yakui <yakui.zhao@intel.com>
> + *
> + */
> +
> +#include <stdlib.h>
> +#include <stdio.h>
> +#include <string.h>
> +#include <fcntl.h>
> +#include <inttypes.h>
> +#include <errno.h>
> +#include <sys/stat.h>
> +#include <sys/time.h>
> +#include "drm.h"
> +#include "ioctl_wrappers.h"
> +#include "drmtest.h"
> +#include "intel_bufmgr.h"
> +#include "intel_batchbuffer.h"
> +#include "intel_io.h"
> +#include "i830_reg.h"
> +#include "intel_chipset.h"
> +
> +static drm_intel_bufmgr *bufmgr;
> +struct intel_batchbuffer *batch;
> +static drm_intel_bo *target_buffer;
> +
> +#define NUM_FD	50
> +
> +static int mfd[NUM_FD];
> +static drm_intel_bufmgr *mbufmgr[NUM_FD];
> +static struct intel_batchbuffer *mbatch[NUM_FD];
> +static drm_intel_bo *mbuffer[NUM_FD];
> +
> +
> +/*
> + * Testcase: Basic check of ring<->ring sync using a dummy reloc
> + *
> + * Extremely efficient at catching missed irqs with semaphores=0 ...
> + */
> +
> +#define MI_COND_BATCH_BUFFER_END	(0x36<<23 | 1)
> +#define MI_DO_COMPARE			(1<<21)
> +
> +static void
> +store_dword_loop(int fd)
> +{
> +	int i;
> +	int num_rings = gem_get_num_rings(fd);
> +
> +	srandom(0xdeadbeef);
> +
> +	for (i = 0; i < SLOW_QUICK(0x100000, 10); i++) {
> +		int ring, mindex;
> +		ring = random() % num_rings + 1;
> +		mindex = random() % NUM_FD;
> +		batch = mbatch[mindex];
> +		if (ring == I915_EXEC_RENDER) {
> +			BEGIN_BATCH(4);
> +			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
> +			OUT_BATCH(0xffffffff); /* compare dword */
> +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> +					I915_GEM_DOMAIN_RENDER, 0);
> +			OUT_BATCH(MI_NOOP);
> +			ADVANCE_BATCH();
> +		} else {
> +			BEGIN_BATCH(4);
> +			OUT_BATCH(MI_FLUSH_DW | 1);
> +			OUT_BATCH(0); /* reserved */
> +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> +					I915_GEM_DOMAIN_RENDER, 0);
> +			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
> +			ADVANCE_BATCH();
> +		}
> +		intel_batchbuffer_flush_on_ring(batch, ring);
> +	}
> +
> +	drm_intel_bo_map(target_buffer, 0);
> +	// map to force waiting on rendering
> +	drm_intel_bo_unmap(target_buffer);

This test looks the same as dummy_reloc_loop_random_ring_multi_fd() that
you add in patch 2/2, except the above two calls. Unless I'm missing
something else .. Is there any reason why we don't want to make this
also a subtest of gem_dummy_reloc_loop.c to avoid duplicating all the
setup here?

--Imre


> +}
> +
> +igt_simple_main
> +{
> +	int fd;
> +	int devid;
> +	int i;
> +
> +	fd = drm_open_any();
> +	devid = intel_get_drm_devid(fd);
> +	gem_require_ring(fd, I915_EXEC_BLT);
> +
> +
> +	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
> +	igt_assert(bufmgr);
> +	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
> +
> +
> +	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
> +	igt_assert(target_buffer);
> +
> +	/* Create multiple drm_fd and map one gem_object among multi drm_fd */
> +	{
> +		unsigned int target_flink;
> +		char buffer_name[32];
> +		if (dri_bo_flink(target_buffer, &target_flink)) {
> +			igt_assert(0);
> +			printf("fail to get flink for target buffer\n");
> +			goto fail_flink;
> +		}
> +		for (i = 0; i < NUM_FD; i++) {
> +			mfd[i] = 0;
> +			mbufmgr[i] = NULL;
> +			mbuffer[i] = NULL;
> +		}
> +		for (i = 0; i < NUM_FD; i++) {
> +			sprintf(buffer_name, "Target buffer %d\n", i);
> +			mfd[i] = drm_open_any();
> +			mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
> +			igt_assert(mbufmgr[i]);
> +			drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
> +			mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
> +			igt_assert(mbufmgr[i]);
> +			mbuffer[i] = intel_bo_gem_create_from_name(mbufmgr[i], buffer_name, target_flink);
> +			igt_assert(mbuffer[i]);
> +		}
> +	}
> +
> +	store_dword_loop(fd);
> +
> +	{
> +		for (i = 0; i < NUM_FD; i++) {
> +			dri_bo_unreference(mbuffer[i]);
> +			intel_batchbuffer_free(mbatch[i]);
> +			drm_intel_bufmgr_destroy(mbufmgr[i]);
> +			close(mfd[i]);
> +		}
> +	}
> +	drm_intel_bo_unreference(target_buffer);
> +	drm_intel_bufmgr_destroy(bufmgr);
> +
> +	close(fd);
> +	return;
> +
> +fail_flink:
> +	drm_intel_bo_unreference(target_buffer);
> +	drm_intel_bufmgr_destroy(bufmgr);
> +
> +	close(fd);
> +}


[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 490 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH I-g-t V2 2/2] tests/gem_dummy_reloc_loop: Add one subtest based on multi drm_fd to test CPU<->GPU sync under multi BSD rings
  2014-04-15  2:38 ` [PATCH I-g-t V2 2/2] tests/gem_dummy_reloc_loop: Add one subtest based on multi drm_fd to test CPU<->GPU " Zhao Yakui
@ 2014-04-22 12:05   ` Imre Deak
  2014-04-22 19:48     ` Daniel Vetter
  0 siblings, 1 reply; 10+ messages in thread
From: Imre Deak @ 2014-04-22 12:05 UTC (permalink / raw)
  To: Zhao Yakui; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 5021 bytes --]

On Tue, 2014-04-15 at 10:38 +0800, Zhao Yakui wrote:
> The Broadwell GT3 machine has two independent BSD rings in kernel driver while
> it is transparent to the user-space driver. In such case it needs to check
> the CPU<->GPU sync for the second BSD ring.
> 
> V1->V2: Follow Daniel's comment to add one subtext instead of one individual
> test case, which is used to test the CPU<->GPU sync under multi BSD rings
> 
> Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
> ---
>  tests/gem_dummy_reloc_loop.c |  102 +++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 101 insertions(+), 1 deletion(-)
> 
> diff --git a/tests/gem_dummy_reloc_loop.c b/tests/gem_dummy_reloc_loop.c
> index a61b59b..660d8e1 100644
> --- a/tests/gem_dummy_reloc_loop.c
> +++ b/tests/gem_dummy_reloc_loop.c
> @@ -48,6 +48,13 @@ static drm_intel_bufmgr *bufmgr;
>  struct intel_batchbuffer *batch;
>  static drm_intel_bo *target_buffer;
>  
> +#define NUM_FD	50
> +
> +static int mfd[NUM_FD];
> +static drm_intel_bufmgr *mbufmgr[NUM_FD];
> +static struct intel_batchbuffer *mbatch[NUM_FD];
> +static drm_intel_bo *mbuffer[NUM_FD];
> +
>  /*
>   * Testcase: Basic check of ring<->cpu sync using a dummy reloc
>   *
> @@ -124,6 +131,50 @@ dummy_reloc_loop_random_ring(int num_rings)
>  	}
>  }
>  
> +static void
> +dummy_reloc_loop_random_ring_multi_fd(int num_rings)
> +{
> +	int i;
> +	struct intel_batchbuffer *saved_batch;
> +
> +	saved_batch = batch;
> +
> +	srandom(0xdeadbeef);
> +
> +	for (i = 0; i < 0x100000; i++) {
> +		int mindex;
> +		int ring = random() % num_rings + 1;
> +
> +		mindex = random() % NUM_FD;
> +		batch = mbatch[mindex];
> +
> +		if (ring == I915_EXEC_RENDER) {
> +			BEGIN_BATCH(4);
> +			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
> +			OUT_BATCH(0xffffffff); /* compare dword */
> +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> +					I915_GEM_DOMAIN_RENDER, 0);
> +			OUT_BATCH(MI_NOOP);
> +			ADVANCE_BATCH();
> +		} else {
> +			BEGIN_BATCH(4);
> +			OUT_BATCH(MI_FLUSH_DW | 1);
> +			OUT_BATCH(0); /* reserved */
> +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> +					I915_GEM_DOMAIN_RENDER, 0);
> +			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
> +			ADVANCE_BATCH();
> +		}
> +		intel_batchbuffer_flush_on_ring(batch, ring);
> +
> +		drm_intel_bo_map(target_buffer, 0);
> +		// map to force waiting on rendering
> +		drm_intel_bo_unmap(target_buffer);
> +	}
> +
> +	batch = saved_batch;
> +}
> +
>  int fd;
>  int devid;
>  int num_rings;
> @@ -133,6 +184,7 @@ igt_main
>  	igt_skip_on_simulation();
>  
>  	igt_fixture {
> +		int i;
>  		fd = drm_open_any();
>  		devid = intel_get_drm_devid(fd);
>  		num_rings = gem_get_num_rings(fd);
> @@ -148,6 +200,35 @@ igt_main
>  
>  		target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
>  		igt_assert(target_buffer);
> +
> +		/* Create multi drm_fd and map one gem object to multi gem_contexts */
> +		{
> +			unsigned int target_flink;
> +			char buffer_name[32];
> +			if (dri_bo_flink(target_buffer, &target_flink)) {
> +				printf("fail to get flink for target buffer\n");
> +				igt_assert(0);

For the future: could be just igt_assert_f().

> +			}
> +			for (i = 0; i < NUM_FD; i++) {
> +				mfd[i] = 0;
> +				mbufmgr[i] = NULL;
> +				mbuffer[i] = NULL;
> +			}

Nitpick: the above are all statics, so no need to init them.

Other than the above this looks good:
Reviewed-by: Imre Deak <imre.deak@intel.com>

> +			for (i = 0; i < NUM_FD; i++) {
> +				sprintf(buffer_name, "Target buffer %d\n", i);
> +				mfd[i] = drm_open_any();
> +				mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
> +				igt_assert(mbufmgr[i]);
> +				drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
> +				mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
> +				igt_assert(mbufmgr[i]);
> +				mbuffer[i] = intel_bo_gem_create_from_name(
> +								mbufmgr[i],
> +								buffer_name,
> +								target_flink);
> +				igt_assert(mbuffer[i]);
> +			}
> +		}
>  	}
>  
>  	igt_subtest("render") {
> @@ -190,8 +271,27 @@ igt_main
>  			printf("dummy loop run on random rings completed\n");
>  		}
>  	}
> -
> +	igt_subtest("mixed_multi_fd") {
> +		if (num_rings > 1) {
> +			sleep(2);
> +			printf("running dummy loop on random rings based on "
> +					"multi drm_fd\n");
> +			dummy_reloc_loop_random_ring_multi_fd(num_rings);
> +			printf("dummy loop run on random rings based on "
> +					"multi drm_fd completed\n");
> +		}
> +	}
>  	igt_fixture {
> +		int i;
> +		/* Free the buffer/batchbuffer/buffer mgr for multi-fd */
> +		{
> +			for (i = 0; i < NUM_FD; i++) {
> +				dri_bo_unreference(mbuffer[i]);
> +				intel_batchbuffer_free(mbatch[i]);
> +				drm_intel_bufmgr_destroy(mbufmgr[i]);
> +				close(mfd[i]);
> +			}
> +		}
>  		drm_intel_bo_unreference(target_buffer);
>  		intel_batchbuffer_free(batch);
>  		drm_intel_bufmgr_destroy(bufmgr);


[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 490 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings
  2014-04-22 11:52   ` Imre Deak
@ 2014-04-22 19:44     ` Daniel Vetter
  2014-04-23  1:13       ` Zhao Yakui
  0 siblings, 1 reply; 10+ messages in thread
From: Daniel Vetter @ 2014-04-22 19:44 UTC (permalink / raw)
  To: Imre Deak; +Cc: intel-gfx

On Tue, Apr 22, 2014 at 02:52:04PM +0300, Imre Deak wrote:
> On Tue, 2014-04-15 at 10:38 +0800, Zhao Yakui wrote:
> > The Broadwell GT3 machine has two independent BSD rings in kernel driver while
> > it is transparent to the user-space driver. In such case it needs to check
> > the ring sync between the two BSD rings. At the same time it also needs to
> > check the sync among the second BSD ring and the other rings.
> > 
> > Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
> > ---
> >  tests/Makefile.sources          |    1 +
> >  tests/gem_multi_bsd_sync_loop.c |  172 +++++++++++++++++++++++++++++++++++++++
> >  2 files changed, 173 insertions(+)
> >  create mode 100644 tests/gem_multi_bsd_sync_loop.c
> > 
> > diff --git a/tests/Makefile.sources b/tests/Makefile.sources
> > index c957ace..7cd9ca8 100644
> > --- a/tests/Makefile.sources
> > +++ b/tests/Makefile.sources
> > @@ -105,6 +105,7 @@ TESTS_progs = \
> >  	gem_render_tiled_blits \
> >  	gem_ring_sync_copy \
> >  	gem_ring_sync_loop \
> > +	gem_multi_bsd_sync_loop \
> >  	gem_seqno_wrap \
> >  	gem_set_tiling_vs_gtt \
> >  	gem_set_tiling_vs_pwrite \
> > diff --git a/tests/gem_multi_bsd_sync_loop.c b/tests/gem_multi_bsd_sync_loop.c
> > new file mode 100644
> > index 0000000..7f5b832
> > --- /dev/null
> > +++ b/tests/gem_multi_bsd_sync_loop.c
> > @@ -0,0 +1,172 @@
> > +/*
> > + * Copyright © 2014 Intel Corporation
> > + *
> > + * Permission is hereby granted, free of charge, to any person obtaining a
> > + * copy of this software and associated documentation files (the "Software"),
> > + * to deal in the Software without restriction, including without limitation
> > + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> > + * and/or sell copies of the Software, and to permit persons to whom the
> > + * Software is furnished to do so, subject to the following conditions:
> > + *
> > + * The above copyright notice and this permission notice (including the next
> > + * paragraph) shall be included in all copies or substantial portions of the
> > + * Software.
> > + *
> > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> > + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> > + * IN THE SOFTWARE.
> > + *
> > + * Authors:
> > + *    Daniel Vetter <daniel.vetter@ffwll.ch> (based on gem_ring_sync_loop_*.c)
> > + *    Zhao Yakui <yakui.zhao@intel.com>
> > + *
> > + */
> > +
> > +#include <stdlib.h>
> > +#include <stdio.h>
> > +#include <string.h>
> > +#include <fcntl.h>
> > +#include <inttypes.h>
> > +#include <errno.h>
> > +#include <sys/stat.h>
> > +#include <sys/time.h>
> > +#include "drm.h"
> > +#include "ioctl_wrappers.h"
> > +#include "drmtest.h"
> > +#include "intel_bufmgr.h"
> > +#include "intel_batchbuffer.h"
> > +#include "intel_io.h"
> > +#include "i830_reg.h"
> > +#include "intel_chipset.h"
> > +
> > +static drm_intel_bufmgr *bufmgr;
> > +struct intel_batchbuffer *batch;
> > +static drm_intel_bo *target_buffer;
> > +
> > +#define NUM_FD	50
> > +
> > +static int mfd[NUM_FD];
> > +static drm_intel_bufmgr *mbufmgr[NUM_FD];
> > +static struct intel_batchbuffer *mbatch[NUM_FD];
> > +static drm_intel_bo *mbuffer[NUM_FD];
> > +
> > +
> > +/*
> > + * Testcase: Basic check of ring<->ring sync using a dummy reloc
> > + *
> > + * Extremely efficient at catching missed irqs with semaphores=0 ...
> > + */
> > +
> > +#define MI_COND_BATCH_BUFFER_END	(0x36<<23 | 1)
> > +#define MI_DO_COMPARE			(1<<21)
> > +
> > +static void
> > +store_dword_loop(int fd)
> > +{
> > +	int i;
> > +	int num_rings = gem_get_num_rings(fd);
> > +
> > +	srandom(0xdeadbeef);
> > +
> > +	for (i = 0; i < SLOW_QUICK(0x100000, 10); i++) {
> > +		int ring, mindex;
> > +		ring = random() % num_rings + 1;
> > +		mindex = random() % NUM_FD;
> > +		batch = mbatch[mindex];
> > +		if (ring == I915_EXEC_RENDER) {
> > +			BEGIN_BATCH(4);
> > +			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
> > +			OUT_BATCH(0xffffffff); /* compare dword */
> > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > +					I915_GEM_DOMAIN_RENDER, 0);
> > +			OUT_BATCH(MI_NOOP);
> > +			ADVANCE_BATCH();
> > +		} else {
> > +			BEGIN_BATCH(4);
> > +			OUT_BATCH(MI_FLUSH_DW | 1);
> > +			OUT_BATCH(0); /* reserved */
> > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > +					I915_GEM_DOMAIN_RENDER, 0);
> > +			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
> > +			ADVANCE_BATCH();
> > +		}
> > +		intel_batchbuffer_flush_on_ring(batch, ring);
> > +	}
> > +
> > +	drm_intel_bo_map(target_buffer, 0);
> > +	// map to force waiting on rendering
> > +	drm_intel_bo_unmap(target_buffer);
> 
> This test looks the same as dummy_reloc_loop_random_ring_multi_fd() that
> you add in patch 2/2, except the above two calls. Unless I'm missing
> something else .. Is there any reason why we don't want to make this
> also a subtest of gem_dummy_reloc_loop.c to avoid duplicating all the
> setup here?

Historical accident since for the other rings we also have this
duplication between inter-ring sync tests and ring/cpu sync tests with
dummy relocs. I don't mind really ;-)
-Daniel

> 
> --Imre
> 
> 
> > +}
> > +
> > +igt_simple_main
> > +{
> > +	int fd;
> > +	int devid;
> > +	int i;
> > +
> > +	fd = drm_open_any();
> > +	devid = intel_get_drm_devid(fd);
> > +	gem_require_ring(fd, I915_EXEC_BLT);
> > +
> > +
> > +	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
> > +	igt_assert(bufmgr);
> > +	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
> > +
> > +
> > +	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
> > +	igt_assert(target_buffer);
> > +
> > +	/* Create multiple drm_fd and map one gem_object among multi drm_fd */
> > +	{
> > +		unsigned int target_flink;
> > +		char buffer_name[32];
> > +		if (dri_bo_flink(target_buffer, &target_flink)) {
> > +			igt_assert(0);
> > +			printf("fail to get flink for target buffer\n");
> > +			goto fail_flink;
> > +		}
> > +		for (i = 0; i < NUM_FD; i++) {
> > +			mfd[i] = 0;
> > +			mbufmgr[i] = NULL;
> > +			mbuffer[i] = NULL;
> > +		}
> > +		for (i = 0; i < NUM_FD; i++) {
> > +			sprintf(buffer_name, "Target buffer %d\n", i);
> > +			mfd[i] = drm_open_any();
> > +			mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
> > +			igt_assert(mbufmgr[i]);
> > +			drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
> > +			mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
> > +			igt_assert(mbufmgr[i]);
> > +			mbuffer[i] = intel_bo_gem_create_from_name(mbufmgr[i], buffer_name, target_flink);
> > +			igt_assert(mbuffer[i]);
> > +		}
> > +	}
> > +
> > +	store_dword_loop(fd);
> > +
> > +	{
> > +		for (i = 0; i < NUM_FD; i++) {
> > +			dri_bo_unreference(mbuffer[i]);
> > +			intel_batchbuffer_free(mbatch[i]);
> > +			drm_intel_bufmgr_destroy(mbufmgr[i]);
> > +			close(mfd[i]);
> > +		}
> > +	}
> > +	drm_intel_bo_unreference(target_buffer);
> > +	drm_intel_bufmgr_destroy(bufmgr);
> > +
> > +	close(fd);
> > +	return;
> > +
> > +fail_flink:
> > +	drm_intel_bo_unreference(target_buffer);
> > +	drm_intel_bufmgr_destroy(bufmgr);
> > +
> > +	close(fd);
> > +}
> 



> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx


-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH I-g-t V2 2/2] tests/gem_dummy_reloc_loop: Add one subtest based on multi drm_fd to test CPU<->GPU sync under multi BSD rings
  2014-04-22 12:05   ` Imre Deak
@ 2014-04-22 19:48     ` Daniel Vetter
  2014-04-23  0:26       ` Zhao Yakui
  0 siblings, 1 reply; 10+ messages in thread
From: Daniel Vetter @ 2014-04-22 19:48 UTC (permalink / raw)
  To: Imre Deak; +Cc: intel-gfx

On Tue, Apr 22, 2014 at 03:05:03PM +0300, Imre Deak wrote:
> On Tue, 2014-04-15 at 10:38 +0800, Zhao Yakui wrote:
> > The Broadwell GT3 machine has two independent BSD rings in kernel driver while
> > it is transparent to the user-space driver. In such case it needs to check
> > the CPU<->GPU sync for the second BSD ring.
> > 
> > V1->V2: Follow Daniel's comment to add one subtext instead of one individual
> > test case, which is used to test the CPU<->GPU sync under multi BSD rings
> > 
> > Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
> > ---
> >  tests/gem_dummy_reloc_loop.c |  102 +++++++++++++++++++++++++++++++++++++++++-
> >  1 file changed, 101 insertions(+), 1 deletion(-)
> > 
> > diff --git a/tests/gem_dummy_reloc_loop.c b/tests/gem_dummy_reloc_loop.c
> > index a61b59b..660d8e1 100644
> > --- a/tests/gem_dummy_reloc_loop.c
> > +++ b/tests/gem_dummy_reloc_loop.c
> > @@ -48,6 +48,13 @@ static drm_intel_bufmgr *bufmgr;
> >  struct intel_batchbuffer *batch;
> >  static drm_intel_bo *target_buffer;
> >  
> > +#define NUM_FD	50
> > +
> > +static int mfd[NUM_FD];
> > +static drm_intel_bufmgr *mbufmgr[NUM_FD];
> > +static struct intel_batchbuffer *mbatch[NUM_FD];
> > +static drm_intel_bo *mbuffer[NUM_FD];
> > +
> >  /*
> >   * Testcase: Basic check of ring<->cpu sync using a dummy reloc
> >   *
> > @@ -124,6 +131,50 @@ dummy_reloc_loop_random_ring(int num_rings)
> >  	}
> >  }
> >  
> > +static void
> > +dummy_reloc_loop_random_ring_multi_fd(int num_rings)
> > +{
> > +	int i;
> > +	struct intel_batchbuffer *saved_batch;
> > +
> > +	saved_batch = batch;
> > +
> > +	srandom(0xdeadbeef);
> > +
> > +	for (i = 0; i < 0x100000; i++) {
> > +		int mindex;
> > +		int ring = random() % num_rings + 1;
> > +
> > +		mindex = random() % NUM_FD;
> > +		batch = mbatch[mindex];
> > +
> > +		if (ring == I915_EXEC_RENDER) {
> > +			BEGIN_BATCH(4);
> > +			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
> > +			OUT_BATCH(0xffffffff); /* compare dword */
> > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > +					I915_GEM_DOMAIN_RENDER, 0);
> > +			OUT_BATCH(MI_NOOP);
> > +			ADVANCE_BATCH();
> > +		} else {
> > +			BEGIN_BATCH(4);
> > +			OUT_BATCH(MI_FLUSH_DW | 1);
> > +			OUT_BATCH(0); /* reserved */
> > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > +					I915_GEM_DOMAIN_RENDER, 0);
> > +			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
> > +			ADVANCE_BATCH();
> > +		}
> > +		intel_batchbuffer_flush_on_ring(batch, ring);
> > +
> > +		drm_intel_bo_map(target_buffer, 0);
> > +		// map to force waiting on rendering
> > +		drm_intel_bo_unmap(target_buffer);
> > +	}
> > +
> > +	batch = saved_batch;
> > +}
> > +
> >  int fd;
> >  int devid;
> >  int num_rings;
> > @@ -133,6 +184,7 @@ igt_main
> >  	igt_skip_on_simulation();
> >  
> >  	igt_fixture {
> > +		int i;
> >  		fd = drm_open_any();
> >  		devid = intel_get_drm_devid(fd);
> >  		num_rings = gem_get_num_rings(fd);
> > @@ -148,6 +200,35 @@ igt_main
> >  
> >  		target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
> >  		igt_assert(target_buffer);
> > +
> > +		/* Create multi drm_fd and map one gem object to multi gem_contexts */
> > +		{
> > +			unsigned int target_flink;
> > +			char buffer_name[32];
> > +			if (dri_bo_flink(target_buffer, &target_flink)) {
> > +				printf("fail to get flink for target buffer\n");
> > +				igt_assert(0);
> 
> For the future: could be just igt_assert_f().

Yeah I think for new testcases we should try to use the latest igt_*
macros and helpers as much as possible. Reducing control flow and
replacing it by the right igt_assert/require/... macro imo really helps
the readability of testcases.
-Daniel
> 
> > +			}
> > +			for (i = 0; i < NUM_FD; i++) {
> > +				mfd[i] = 0;
> > +				mbufmgr[i] = NULL;
> > +				mbuffer[i] = NULL;
> > +			}
> 
> Nitpick: the above are all statics, so no need to init them.
> 
> Other than the above this looks good:
> Reviewed-by: Imre Deak <imre.deak@intel.com>
> 
> > +			for (i = 0; i < NUM_FD; i++) {
> > +				sprintf(buffer_name, "Target buffer %d\n", i);
> > +				mfd[i] = drm_open_any();
> > +				mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
> > +				igt_assert(mbufmgr[i]);
> > +				drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
> > +				mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
> > +				igt_assert(mbufmgr[i]);
> > +				mbuffer[i] = intel_bo_gem_create_from_name(
> > +								mbufmgr[i],
> > +								buffer_name,
> > +								target_flink);
> > +				igt_assert(mbuffer[i]);
> > +			}
> > +		}
> >  	}
> >  
> >  	igt_subtest("render") {
> > @@ -190,8 +271,27 @@ igt_main
> >  			printf("dummy loop run on random rings completed\n");
> >  		}
> >  	}
> > -
> > +	igt_subtest("mixed_multi_fd") {
> > +		if (num_rings > 1) {
> > +			sleep(2);
> > +			printf("running dummy loop on random rings based on "
> > +					"multi drm_fd\n");
> > +			dummy_reloc_loop_random_ring_multi_fd(num_rings);
> > +			printf("dummy loop run on random rings based on "
> > +					"multi drm_fd completed\n");
> > +		}
> > +	}
> >  	igt_fixture {
> > +		int i;
> > +		/* Free the buffer/batchbuffer/buffer mgr for multi-fd */
> > +		{
> > +			for (i = 0; i < NUM_FD; i++) {
> > +				dri_bo_unreference(mbuffer[i]);
> > +				intel_batchbuffer_free(mbatch[i]);
> > +				drm_intel_bufmgr_destroy(mbufmgr[i]);
> > +				close(mfd[i]);
> > +			}
> > +		}
> >  		drm_intel_bo_unreference(target_buffer);
> >  		intel_batchbuffer_free(batch);
> >  		drm_intel_bufmgr_destroy(bufmgr);
> 



> _______________________________________________
> Intel-gfx mailing list
> Intel-gfx@lists.freedesktop.org
> http://lists.freedesktop.org/mailman/listinfo/intel-gfx


-- 
Daniel Vetter
Software Engineer, Intel Corporation
+41 (0) 79 365 57 48 - http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH I-g-t V2 2/2] tests/gem_dummy_reloc_loop: Add one subtest based on multi drm_fd to test CPU<->GPU sync under multi BSD rings
  2014-04-22 19:48     ` Daniel Vetter
@ 2014-04-23  0:26       ` Zhao Yakui
  0 siblings, 0 replies; 10+ messages in thread
From: Zhao Yakui @ 2014-04-23  0:26 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx

On Tue, 2014-04-22 at 13:48 -0600, Daniel Vetter wrote:
> On Tue, Apr 22, 2014 at 03:05:03PM +0300, Imre Deak wrote:
> > On Tue, 2014-04-15 at 10:38 +0800, Zhao Yakui wrote:
> > > The Broadwell GT3 machine has two independent BSD rings in kernel driver while
> > > it is transparent to the user-space driver. In such case it needs to check
> > > the CPU<->GPU sync for the second BSD ring.
> > > 
> > > V1->V2: Follow Daniel's comment to add one subtext instead of one individual
> > > test case, which is used to test the CPU<->GPU sync under multi BSD rings
> > > 
> > > Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
> > > ---
> > >  tests/gem_dummy_reloc_loop.c |  102 +++++++++++++++++++++++++++++++++++++++++-
> > >  1 file changed, 101 insertions(+), 1 deletion(-)
> > > 
> > > diff --git a/tests/gem_dummy_reloc_loop.c b/tests/gem_dummy_reloc_loop.c
> > > index a61b59b..660d8e1 100644
> > > --- a/tests/gem_dummy_reloc_loop.c
> > > +++ b/tests/gem_dummy_reloc_loop.c
> > > @@ -48,6 +48,13 @@ static drm_intel_bufmgr *bufmgr;
> > >  struct intel_batchbuffer *batch;
> > >  static drm_intel_bo *target_buffer;
> > >  
> > > +#define NUM_FD	50
> > > +
> > > +static int mfd[NUM_FD];
> > > +static drm_intel_bufmgr *mbufmgr[NUM_FD];
> > > +static struct intel_batchbuffer *mbatch[NUM_FD];
> > > +static drm_intel_bo *mbuffer[NUM_FD];
> > > +
> > >  /*
> > >   * Testcase: Basic check of ring<->cpu sync using a dummy reloc
> > >   *
> > > @@ -124,6 +131,50 @@ dummy_reloc_loop_random_ring(int num_rings)
> > >  	}
> > >  }
> > >  
> > > +static void
> > > +dummy_reloc_loop_random_ring_multi_fd(int num_rings)
> > > +{
> > > +	int i;
> > > +	struct intel_batchbuffer *saved_batch;
> > > +
> > > +	saved_batch = batch;
> > > +
> > > +	srandom(0xdeadbeef);
> > > +
> > > +	for (i = 0; i < 0x100000; i++) {
> > > +		int mindex;
> > > +		int ring = random() % num_rings + 1;
> > > +
> > > +		mindex = random() % NUM_FD;
> > > +		batch = mbatch[mindex];
> > > +
> > > +		if (ring == I915_EXEC_RENDER) {
> > > +			BEGIN_BATCH(4);
> > > +			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
> > > +			OUT_BATCH(0xffffffff); /* compare dword */
> > > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > > +					I915_GEM_DOMAIN_RENDER, 0);
> > > +			OUT_BATCH(MI_NOOP);
> > > +			ADVANCE_BATCH();
> > > +		} else {
> > > +			BEGIN_BATCH(4);
> > > +			OUT_BATCH(MI_FLUSH_DW | 1);
> > > +			OUT_BATCH(0); /* reserved */
> > > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > > +					I915_GEM_DOMAIN_RENDER, 0);
> > > +			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
> > > +			ADVANCE_BATCH();
> > > +		}
> > > +		intel_batchbuffer_flush_on_ring(batch, ring);
> > > +
> > > +		drm_intel_bo_map(target_buffer, 0);
> > > +		// map to force waiting on rendering
> > > +		drm_intel_bo_unmap(target_buffer);
> > > +	}
> > > +
> > > +	batch = saved_batch;
> > > +}
> > > +
> > >  int fd;
> > >  int devid;
> > >  int num_rings;
> > > @@ -133,6 +184,7 @@ igt_main
> > >  	igt_skip_on_simulation();
> > >  
> > >  	igt_fixture {
> > > +		int i;
> > >  		fd = drm_open_any();
> > >  		devid = intel_get_drm_devid(fd);
> > >  		num_rings = gem_get_num_rings(fd);
> > > @@ -148,6 +200,35 @@ igt_main
> > >  
> > >  		target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
> > >  		igt_assert(target_buffer);
> > > +
> > > +		/* Create multi drm_fd and map one gem object to multi gem_contexts */
> > > +		{
> > > +			unsigned int target_flink;
> > > +			char buffer_name[32];
> > > +			if (dri_bo_flink(target_buffer, &target_flink)) {
> > > +				printf("fail to get flink for target buffer\n");
> > > +				igt_assert(0);
> > 
> > For the future: could be just igt_assert_f().
> 
> Yeah I think for new testcases we should try to use the latest igt_*
> macros and helpers as much as possible. Reducing control flow and
> replacing it by the right igt_assert/require/... macro imo really helps
> the readability of testcases.

Hi, Daniel/Imre

    Thanks for your comments and advice.
    I will update it.

Thanks.
    Yakui

> -Daniel
> > 
> > > +			}
> > > +			for (i = 0; i < NUM_FD; i++) {
> > > +				mfd[i] = 0;
> > > +				mbufmgr[i] = NULL;
> > > +				mbuffer[i] = NULL;
> > > +			}
> > 
> > Nitpick: the above are all statics, so no need to init them.
> > 
> > Other than the above this looks good:
> > Reviewed-by: Imre Deak <imre.deak@intel.com>
> > 
> > > +			for (i = 0; i < NUM_FD; i++) {
> > > +				sprintf(buffer_name, "Target buffer %d\n", i);
> > > +				mfd[i] = drm_open_any();
> > > +				mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
> > > +				igt_assert(mbufmgr[i]);
> > > +				drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
> > > +				mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
> > > +				igt_assert(mbufmgr[i]);
> > > +				mbuffer[i] = intel_bo_gem_create_from_name(
> > > +								mbufmgr[i],
> > > +								buffer_name,
> > > +								target_flink);
> > > +				igt_assert(mbuffer[i]);
> > > +			}
> > > +		}
> > >  	}
> > >  
> > >  	igt_subtest("render") {
> > > @@ -190,8 +271,27 @@ igt_main
> > >  			printf("dummy loop run on random rings completed\n");
> > >  		}
> > >  	}
> > > -
> > > +	igt_subtest("mixed_multi_fd") {
> > > +		if (num_rings > 1) {
> > > +			sleep(2);
> > > +			printf("running dummy loop on random rings based on "
> > > +					"multi drm_fd\n");
> > > +			dummy_reloc_loop_random_ring_multi_fd(num_rings);
> > > +			printf("dummy loop run on random rings based on "
> > > +					"multi drm_fd completed\n");
> > > +		}
> > > +	}
> > >  	igt_fixture {
> > > +		int i;
> > > +		/* Free the buffer/batchbuffer/buffer mgr for multi-fd */
> > > +		{
> > > +			for (i = 0; i < NUM_FD; i++) {
> > > +				dri_bo_unreference(mbuffer[i]);
> > > +				intel_batchbuffer_free(mbatch[i]);
> > > +				drm_intel_bufmgr_destroy(mbufmgr[i]);
> > > +				close(mfd[i]);
> > > +			}
> > > +		}
> > >  		drm_intel_bo_unreference(target_buffer);
> > >  		intel_batchbuffer_free(batch);
> > >  		drm_intel_bufmgr_destroy(bufmgr);
> > 
> 
> 
> 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> 

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings
  2014-04-22 19:44     ` Daniel Vetter
@ 2014-04-23  1:13       ` Zhao Yakui
  2014-04-23  9:17         ` Imre Deak
  0 siblings, 1 reply; 10+ messages in thread
From: Zhao Yakui @ 2014-04-23  1:13 UTC (permalink / raw)
  To: Daniel Vetter; +Cc: intel-gfx

On Tue, 2014-04-22 at 13:44 -0600, Daniel Vetter wrote:
> On Tue, Apr 22, 2014 at 02:52:04PM +0300, Imre Deak wrote:
> > On Tue, 2014-04-15 at 10:38 +0800, Zhao Yakui wrote:
> > > The Broadwell GT3 machine has two independent BSD rings in kernel driver while
> > > it is transparent to the user-space driver. In such case it needs to check
> > > the ring sync between the two BSD rings. At the same time it also needs to
> > > check the sync among the second BSD ring and the other rings.
> > > 
> > > Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
> > > ---
> > >  tests/Makefile.sources          |    1 +
> > >  tests/gem_multi_bsd_sync_loop.c |  172 +++++++++++++++++++++++++++++++++++++++
> > >  2 files changed, 173 insertions(+)
> > >  create mode 100644 tests/gem_multi_bsd_sync_loop.c
> > > 
> > > diff --git a/tests/Makefile.sources b/tests/Makefile.sources
> > > index c957ace..7cd9ca8 100644
> > > --- a/tests/Makefile.sources
> > > +++ b/tests/Makefile.sources
> > > @@ -105,6 +105,7 @@ TESTS_progs = \
> > >  	gem_render_tiled_blits \
> > >  	gem_ring_sync_copy \
> > >  	gem_ring_sync_loop \
> > > +	gem_multi_bsd_sync_loop \
> > >  	gem_seqno_wrap \
> > >  	gem_set_tiling_vs_gtt \
> > >  	gem_set_tiling_vs_pwrite \
> > > diff --git a/tests/gem_multi_bsd_sync_loop.c b/tests/gem_multi_bsd_sync_loop.c
> > > new file mode 100644
> > > index 0000000..7f5b832
> > > --- /dev/null
> > > +++ b/tests/gem_multi_bsd_sync_loop.c
> > > @@ -0,0 +1,172 @@
> > > +/*
> > > + * Copyright © 2014 Intel Corporation
> > > + *
> > > + * Permission is hereby granted, free of charge, to any person obtaining a
> > > + * copy of this software and associated documentation files (the "Software"),
> > > + * to deal in the Software without restriction, including without limitation
> > > + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> > > + * and/or sell copies of the Software, and to permit persons to whom the
> > > + * Software is furnished to do so, subject to the following conditions:
> > > + *
> > > + * The above copyright notice and this permission notice (including the next
> > > + * paragraph) shall be included in all copies or substantial portions of the
> > > + * Software.
> > > + *
> > > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> > > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> > > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> > > + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> > > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> > > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> > > + * IN THE SOFTWARE.
> > > + *
> > > + * Authors:
> > > + *    Daniel Vetter <daniel.vetter@ffwll.ch> (based on gem_ring_sync_loop_*.c)
> > > + *    Zhao Yakui <yakui.zhao@intel.com>
> > > + *
> > > + */
> > > +
> > > +#include <stdlib.h>
> > > +#include <stdio.h>
> > > +#include <string.h>
> > > +#include <fcntl.h>
> > > +#include <inttypes.h>
> > > +#include <errno.h>
> > > +#include <sys/stat.h>
> > > +#include <sys/time.h>
> > > +#include "drm.h"
> > > +#include "ioctl_wrappers.h"
> > > +#include "drmtest.h"
> > > +#include "intel_bufmgr.h"
> > > +#include "intel_batchbuffer.h"
> > > +#include "intel_io.h"
> > > +#include "i830_reg.h"
> > > +#include "intel_chipset.h"
> > > +
> > > +static drm_intel_bufmgr *bufmgr;
> > > +struct intel_batchbuffer *batch;
> > > +static drm_intel_bo *target_buffer;
> > > +
> > > +#define NUM_FD	50
> > > +
> > > +static int mfd[NUM_FD];
> > > +static drm_intel_bufmgr *mbufmgr[NUM_FD];
> > > +static struct intel_batchbuffer *mbatch[NUM_FD];
> > > +static drm_intel_bo *mbuffer[NUM_FD];
> > > +
> > > +
> > > +/*
> > > + * Testcase: Basic check of ring<->ring sync using a dummy reloc
> > > + *
> > > + * Extremely efficient at catching missed irqs with semaphores=0 ...
> > > + */
> > > +
> > > +#define MI_COND_BATCH_BUFFER_END	(0x36<<23 | 1)
> > > +#define MI_DO_COMPARE			(1<<21)
> > > +
> > > +static void
> > > +store_dword_loop(int fd)
> > > +{
> > > +	int i;
> > > +	int num_rings = gem_get_num_rings(fd);
> > > +
> > > +	srandom(0xdeadbeef);
> > > +
> > > +	for (i = 0; i < SLOW_QUICK(0x100000, 10); i++) {
> > > +		int ring, mindex;
> > > +		ring = random() % num_rings + 1;
> > > +		mindex = random() % NUM_FD;
> > > +		batch = mbatch[mindex];
> > > +		if (ring == I915_EXEC_RENDER) {
> > > +			BEGIN_BATCH(4);
> > > +			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
> > > +			OUT_BATCH(0xffffffff); /* compare dword */
> > > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > > +					I915_GEM_DOMAIN_RENDER, 0);
> > > +			OUT_BATCH(MI_NOOP);
> > > +			ADVANCE_BATCH();
> > > +		} else {
> > > +			BEGIN_BATCH(4);
> > > +			OUT_BATCH(MI_FLUSH_DW | 1);
> > > +			OUT_BATCH(0); /* reserved */
> > > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > > +					I915_GEM_DOMAIN_RENDER, 0);
> > > +			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
> > > +			ADVANCE_BATCH();
> > > +		}
> > > +		intel_batchbuffer_flush_on_ring(batch, ring);
> > > +	}
> > > +
> > > +	drm_intel_bo_map(target_buffer, 0);
> > > +	// map to force waiting on rendering
> > > +	drm_intel_bo_unmap(target_buffer);
> > 
> > This test looks the same as dummy_reloc_loop_random_ring_multi_fd() that
> > you add in patch 2/2, except the above two calls. Unless I'm missing
> > something else .. Is there any reason why we don't want to make this
> > also a subtest of gem_dummy_reloc_loop.c to avoid duplicating all the
> > setup here?
> 
> Historical accident since for the other rings we also have this
> duplication between inter-ring sync tests and ring/cpu sync tests with
> dummy relocs. I don't mind really ;-)

Hi, Imre

    So based on Daniel's reply, we will use the separated test case to
do inter-ring sync test.

Anyway, thanks for your comment.

Thanks.
    Yakui

> -Daniel
> 
> > 
> > --Imre
> > 
> > 
> > > +}
> > > +
> > > +igt_simple_main
> > > +{
> > > +	int fd;
> > > +	int devid;
> > > +	int i;
> > > +
> > > +	fd = drm_open_any();
> > > +	devid = intel_get_drm_devid(fd);
> > > +	gem_require_ring(fd, I915_EXEC_BLT);
> > > +
> > > +
> > > +	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
> > > +	igt_assert(bufmgr);
> > > +	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
> > > +
> > > +
> > > +	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
> > > +	igt_assert(target_buffer);
> > > +
> > > +	/* Create multiple drm_fd and map one gem_object among multi drm_fd */
> > > +	{
> > > +		unsigned int target_flink;
> > > +		char buffer_name[32];
> > > +		if (dri_bo_flink(target_buffer, &target_flink)) {
> > > +			igt_assert(0);
> > > +			printf("fail to get flink for target buffer\n");
> > > +			goto fail_flink;
> > > +		}
> > > +		for (i = 0; i < NUM_FD; i++) {
> > > +			mfd[i] = 0;
> > > +			mbufmgr[i] = NULL;
> > > +			mbuffer[i] = NULL;
> > > +		}
> > > +		for (i = 0; i < NUM_FD; i++) {
> > > +			sprintf(buffer_name, "Target buffer %d\n", i);
> > > +			mfd[i] = drm_open_any();
> > > +			mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
> > > +			igt_assert(mbufmgr[i]);
> > > +			drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
> > > +			mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
> > > +			igt_assert(mbufmgr[i]);
> > > +			mbuffer[i] = intel_bo_gem_create_from_name(mbufmgr[i], buffer_name, target_flink);
> > > +			igt_assert(mbuffer[i]);
> > > +		}
> > > +	}
> > > +
> > > +	store_dword_loop(fd);
> > > +
> > > +	{
> > > +		for (i = 0; i < NUM_FD; i++) {
> > > +			dri_bo_unreference(mbuffer[i]);
> > > +			intel_batchbuffer_free(mbatch[i]);
> > > +			drm_intel_bufmgr_destroy(mbufmgr[i]);
> > > +			close(mfd[i]);
> > > +		}
> > > +	}
> > > +	drm_intel_bo_unreference(target_buffer);
> > > +	drm_intel_bufmgr_destroy(bufmgr);
> > > +
> > > +	close(fd);
> > > +	return;
> > > +
> > > +fail_flink:
> > > +	drm_intel_bo_unreference(target_buffer);
> > > +	drm_intel_bufmgr_destroy(bufmgr);
> > > +
> > > +	close(fd);
> > > +}
> > 
> 
> 
> 
> > _______________________________________________
> > Intel-gfx mailing list
> > Intel-gfx@lists.freedesktop.org
> > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> 
> 


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

* Re: [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings
  2014-04-23  1:13       ` Zhao Yakui
@ 2014-04-23  9:17         ` Imre Deak
  0 siblings, 0 replies; 10+ messages in thread
From: Imre Deak @ 2014-04-23  9:17 UTC (permalink / raw)
  To: Zhao Yakui; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 9145 bytes --]

On Wed, 2014-04-23 at 09:13 +0800, Zhao Yakui wrote:
> On Tue, 2014-04-22 at 13:44 -0600, Daniel Vetter wrote:
> > On Tue, Apr 22, 2014 at 02:52:04PM +0300, Imre Deak wrote:
> > > On Tue, 2014-04-15 at 10:38 +0800, Zhao Yakui wrote:
> > > > The Broadwell GT3 machine has two independent BSD rings in kernel driver while
> > > > it is transparent to the user-space driver. In such case it needs to check
> > > > the ring sync between the two BSD rings. At the same time it also needs to
> > > > check the sync among the second BSD ring and the other rings.
> > > > 
> > > > Signed-off-by: Zhao Yakui <yakui.zhao@intel.com>
> > > > ---
> > > >  tests/Makefile.sources          |    1 +
> > > >  tests/gem_multi_bsd_sync_loop.c |  172 +++++++++++++++++++++++++++++++++++++++
> > > >  2 files changed, 173 insertions(+)
> > > >  create mode 100644 tests/gem_multi_bsd_sync_loop.c
> > > > 
> > > > diff --git a/tests/Makefile.sources b/tests/Makefile.sources
> > > > index c957ace..7cd9ca8 100644
> > > > --- a/tests/Makefile.sources
> > > > +++ b/tests/Makefile.sources
> > > > @@ -105,6 +105,7 @@ TESTS_progs = \
> > > >  	gem_render_tiled_blits \
> > > >  	gem_ring_sync_copy \
> > > >  	gem_ring_sync_loop \
> > > > +	gem_multi_bsd_sync_loop \
> > > >  	gem_seqno_wrap \
> > > >  	gem_set_tiling_vs_gtt \
> > > >  	gem_set_tiling_vs_pwrite \
> > > > diff --git a/tests/gem_multi_bsd_sync_loop.c b/tests/gem_multi_bsd_sync_loop.c
> > > > new file mode 100644
> > > > index 0000000..7f5b832
> > > > --- /dev/null
> > > > +++ b/tests/gem_multi_bsd_sync_loop.c
> > > > @@ -0,0 +1,172 @@
> > > > +/*
> > > > + * Copyright © 2014 Intel Corporation
> > > > + *
> > > > + * Permission is hereby granted, free of charge, to any person obtaining a
> > > > + * copy of this software and associated documentation files (the "Software"),
> > > > + * to deal in the Software without restriction, including without limitation
> > > > + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> > > > + * and/or sell copies of the Software, and to permit persons to whom the
> > > > + * Software is furnished to do so, subject to the following conditions:
> > > > + *
> > > > + * The above copyright notice and this permission notice (including the next
> > > > + * paragraph) shall be included in all copies or substantial portions of the
> > > > + * Software.
> > > > + *
> > > > + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> > > > + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> > > > + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> > > > + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> > > > + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> > > > + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> > > > + * IN THE SOFTWARE.
> > > > + *
> > > > + * Authors:
> > > > + *    Daniel Vetter <daniel.vetter@ffwll.ch> (based on gem_ring_sync_loop_*.c)
> > > > + *    Zhao Yakui <yakui.zhao@intel.com>
> > > > + *
> > > > + */
> > > > +
> > > > +#include <stdlib.h>
> > > > +#include <stdio.h>
> > > > +#include <string.h>
> > > > +#include <fcntl.h>
> > > > +#include <inttypes.h>
> > > > +#include <errno.h>
> > > > +#include <sys/stat.h>
> > > > +#include <sys/time.h>
> > > > +#include "drm.h"
> > > > +#include "ioctl_wrappers.h"
> > > > +#include "drmtest.h"
> > > > +#include "intel_bufmgr.h"
> > > > +#include "intel_batchbuffer.h"
> > > > +#include "intel_io.h"
> > > > +#include "i830_reg.h"
> > > > +#include "intel_chipset.h"
> > > > +
> > > > +static drm_intel_bufmgr *bufmgr;
> > > > +struct intel_batchbuffer *batch;
> > > > +static drm_intel_bo *target_buffer;
> > > > +
> > > > +#define NUM_FD	50
> > > > +
> > > > +static int mfd[NUM_FD];
> > > > +static drm_intel_bufmgr *mbufmgr[NUM_FD];
> > > > +static struct intel_batchbuffer *mbatch[NUM_FD];
> > > > +static drm_intel_bo *mbuffer[NUM_FD];
> > > > +
> > > > +
> > > > +/*
> > > > + * Testcase: Basic check of ring<->ring sync using a dummy reloc
> > > > + *
> > > > + * Extremely efficient at catching missed irqs with semaphores=0 ...
> > > > + */
> > > > +
> > > > +#define MI_COND_BATCH_BUFFER_END	(0x36<<23 | 1)
> > > > +#define MI_DO_COMPARE			(1<<21)
> > > > +
> > > > +static void
> > > > +store_dword_loop(int fd)
> > > > +{
> > > > +	int i;
> > > > +	int num_rings = gem_get_num_rings(fd);
> > > > +
> > > > +	srandom(0xdeadbeef);
> > > > +
> > > > +	for (i = 0; i < SLOW_QUICK(0x100000, 10); i++) {
> > > > +		int ring, mindex;
> > > > +		ring = random() % num_rings + 1;
> > > > +		mindex = random() % NUM_FD;
> > > > +		batch = mbatch[mindex];
> > > > +		if (ring == I915_EXEC_RENDER) {
> > > > +			BEGIN_BATCH(4);
> > > > +			OUT_BATCH(MI_COND_BATCH_BUFFER_END | MI_DO_COMPARE);
> > > > +			OUT_BATCH(0xffffffff); /* compare dword */
> > > > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > > > +					I915_GEM_DOMAIN_RENDER, 0);
> > > > +			OUT_BATCH(MI_NOOP);
> > > > +			ADVANCE_BATCH();
> > > > +		} else {
> > > > +			BEGIN_BATCH(4);
> > > > +			OUT_BATCH(MI_FLUSH_DW | 1);
> > > > +			OUT_BATCH(0); /* reserved */
> > > > +			OUT_RELOC(mbuffer[mindex], I915_GEM_DOMAIN_RENDER,
> > > > +					I915_GEM_DOMAIN_RENDER, 0);
> > > > +			OUT_BATCH(MI_NOOP | (1<<22) | (0xf));
> > > > +			ADVANCE_BATCH();
> > > > +		}
> > > > +		intel_batchbuffer_flush_on_ring(batch, ring);
> > > > +	}
> > > > +
> > > > +	drm_intel_bo_map(target_buffer, 0);
> > > > +	// map to force waiting on rendering
> > > > +	drm_intel_bo_unmap(target_buffer);
> > > 
> > > This test looks the same as dummy_reloc_loop_random_ring_multi_fd() that
> > > you add in patch 2/2, except the above two calls. Unless I'm missing
> > > something else .. Is there any reason why we don't want to make this
> > > also a subtest of gem_dummy_reloc_loop.c to avoid duplicating all the
> > > setup here?
> > 
> > Historical accident since for the other rings we also have this
> > duplication between inter-ring sync tests and ring/cpu sync tests with
> > dummy relocs. I don't mind really ;-)
> 
> Hi, Imre
> 
>     So based on Daniel's reply, we will use the separated test case to
> do inter-ring sync test.
> 
> Anyway, thanks for your comment.

Ok, based on the above this is also:
Reviewed-by: Imre Deak <imre.deak@intel.com>

> 
> Thanks.
>     Yakui
> 
> > -Daniel
> > 
> > > 
> > > --Imre
> > > 
> > > 
> > > > +}
> > > > +
> > > > +igt_simple_main
> > > > +{
> > > > +	int fd;
> > > > +	int devid;
> > > > +	int i;
> > > > +
> > > > +	fd = drm_open_any();
> > > > +	devid = intel_get_drm_devid(fd);
> > > > +	gem_require_ring(fd, I915_EXEC_BLT);
> > > > +
> > > > +
> > > > +	bufmgr = drm_intel_bufmgr_gem_init(fd, 4096);
> > > > +	igt_assert(bufmgr);
> > > > +	drm_intel_bufmgr_gem_enable_reuse(bufmgr);
> > > > +
> > > > +
> > > > +	target_buffer = drm_intel_bo_alloc(bufmgr, "target bo", 4096, 4096);
> > > > +	igt_assert(target_buffer);
> > > > +
> > > > +	/* Create multiple drm_fd and map one gem_object among multi drm_fd */
> > > > +	{
> > > > +		unsigned int target_flink;
> > > > +		char buffer_name[32];
> > > > +		if (dri_bo_flink(target_buffer, &target_flink)) {
> > > > +			igt_assert(0);
> > > > +			printf("fail to get flink for target buffer\n");
> > > > +			goto fail_flink;
> > > > +		}
> > > > +		for (i = 0; i < NUM_FD; i++) {
> > > > +			mfd[i] = 0;
> > > > +			mbufmgr[i] = NULL;
> > > > +			mbuffer[i] = NULL;
> > > > +		}
> > > > +		for (i = 0; i < NUM_FD; i++) {
> > > > +			sprintf(buffer_name, "Target buffer %d\n", i);
> > > > +			mfd[i] = drm_open_any();
> > > > +			mbufmgr[i] = drm_intel_bufmgr_gem_init(mfd[i], 4096);
> > > > +			igt_assert(mbufmgr[i]);
> > > > +			drm_intel_bufmgr_gem_enable_reuse(mbufmgr[i]);
> > > > +			mbatch[i] = intel_batchbuffer_alloc(mbufmgr[i], devid);
> > > > +			igt_assert(mbufmgr[i]);
> > > > +			mbuffer[i] = intel_bo_gem_create_from_name(mbufmgr[i], buffer_name, target_flink);
> > > > +			igt_assert(mbuffer[i]);
> > > > +		}
> > > > +	}
> > > > +
> > > > +	store_dword_loop(fd);
> > > > +
> > > > +	{
> > > > +		for (i = 0; i < NUM_FD; i++) {
> > > > +			dri_bo_unreference(mbuffer[i]);
> > > > +			intel_batchbuffer_free(mbatch[i]);
> > > > +			drm_intel_bufmgr_destroy(mbufmgr[i]);
> > > > +			close(mfd[i]);
> > > > +		}
> > > > +	}
> > > > +	drm_intel_bo_unreference(target_buffer);
> > > > +	drm_intel_bufmgr_destroy(bufmgr);
> > > > +
> > > > +	close(fd);
> > > > +	return;
> > > > +
> > > > +fail_flink:
> > > > +	drm_intel_bo_unreference(target_buffer);
> > > > +	drm_intel_bufmgr_destroy(bufmgr);
> > > > +
> > > > +	close(fd);
> > > > +}
> > > 
> > 
> > 
> > 
> > > _______________________________________________
> > > Intel-gfx mailing list
> > > Intel-gfx@lists.freedesktop.org
> > > http://lists.freedesktop.org/mailman/listinfo/intel-gfx
> > 
> > 
> 
> 


[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 490 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 10+ messages in thread

end of thread, other threads:[~2014-04-23  9:17 UTC | newest]

Thread overview: 10+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-04-15  2:38 [PATCH I-g-t V2 0/2] Tests: Add test cases based on multi drm_fd to test sync Zhao Yakui
2014-04-15  2:38 ` [PATCH I-g-t V2 1/2] tests: Add one ring sync case based on multi drm_fd to test ring semaphore sync under multi BSD rings Zhao Yakui
2014-04-22 11:52   ` Imre Deak
2014-04-22 19:44     ` Daniel Vetter
2014-04-23  1:13       ` Zhao Yakui
2014-04-23  9:17         ` Imre Deak
2014-04-15  2:38 ` [PATCH I-g-t V2 2/2] tests/gem_dummy_reloc_loop: Add one subtest based on multi drm_fd to test CPU<->GPU " Zhao Yakui
2014-04-22 12:05   ` Imre Deak
2014-04-22 19:48     ` Daniel Vetter
2014-04-23  0:26       ` Zhao Yakui

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.