All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
@ 2015-11-24 17:42 marius.c.vlad
  2015-11-24 22:57 ` Imre Deak
                   ` (2 more replies)
  0 siblings, 3 replies; 12+ messages in thread
From: marius.c.vlad @ 2015-11-24 17:42 UTC (permalink / raw)
  To: intel-gfx, imre.deak

From: Marius Vlad <marius.c.vlad@intel.com>

Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
---
 tests/pm_rpm.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 90 insertions(+)

diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
index c4fb19c..86d16ad 100644
--- a/tests/pm_rpm.c
+++ b/tests/pm_rpm.c
@@ -1729,6 +1729,90 @@ static void planes_subtest(bool universal, bool dpms)
 	}
 }
 
+static void pm_test_tiling(void)
+{
+	uint32_t handle;
+	uint8_t *gem_buf;
+	uint32_t i, tiling_modes[3] = {
+		I915_TILING_NONE,
+		I915_TILING_X,
+		I915_TILING_Y,
+	};
+	uint32_t ti, sw, j;
+	uint32_t obj_size = (8 * 1024 * 1024);
+
+	handle = gem_create(drm_fd, obj_size);
+
+	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
+		disable_all_screens_and_wait(&ms_data);
+
+		if (tiling_modes[i] == 0) {
+			gem_set_tiling(drm_fd, handle, tiling_modes[i], 0);
+
+			gem_buf = gem_mmap__cpu(drm_fd, handle, 0,
+					obj_size, PROT_WRITE);
+
+			for (j = 0; j < obj_size; j++)
+				gem_buf[j] = j & 0xff;
+
+			igt_assert(munmap(gem_buf, obj_size) == 0);
+
+			gem_get_tiling(drm_fd, handle, &ti, &sw);
+			igt_assert(tiling_modes[i] == ti);
+		} else {
+			gem_set_tiling(drm_fd, handle, tiling_modes[i], 512);
+			gem_buf = gem_mmap__cpu(drm_fd, handle, 0,
+					obj_size, PROT_WRITE);
+
+			for (j = 0; j < obj_size; j++)
+				gem_buf[j] = j & 0xff;
+
+			igt_assert(munmap(gem_buf, obj_size) == 0);
+
+			gem_get_tiling(drm_fd, handle, &ti, &sw);
+			igt_assert(tiling_modes[i] == ti);
+		}
+
+		enable_one_screen_and_wait(&ms_data);
+	}
+
+	gem_close(drm_fd, handle);
+}
+
+static void pm_test_caching(void)
+{
+	uint32_t handle, got_caching, obj_size = (8 * 1024 * 1024);
+	void *src_buf;
+	uint32_t i, cache_levels[3] = {
+		I915_CACHING_NONE,
+		I915_CACHING_CACHED,
+		I915_CACHING_DISPLAY,
+	};
+
+	handle = gem_create(drm_fd, obj_size);
+	src_buf = malloc(obj_size);
+
+	memset(src_buf, 0x65, obj_size);
+
+	for (i = 0; i < ARRAY_SIZE(cache_levels); i++) {
+		disable_all_screens_and_wait(&ms_data);
+
+		gem_set_caching(drm_fd, handle, cache_levels[i]);
+		gem_write(drm_fd, handle, 0, src_buf, obj_size);
+
+		got_caching = gem_get_caching(drm_fd, handle);
+
+		enable_one_screen_and_wait(&ms_data);
+
+		/* skip CACHING_DISPLAY, some platforms do not have it */
+		if (i != 2)
+			igt_assert(got_caching == cache_levels[i]);
+	}
+
+	free(src_buf);
+	gem_close(drm_fd, handle);
+}
+
 static void fences_subtest(bool dpms)
 {
 	int i;
@@ -1927,6 +2011,12 @@ int main(int argc, char *argv[])
 	igt_subtest("gem-execbuf-stress-extra-wait")
 		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
 
+	/* power-wake reference tests */
+	igt_subtest("pm-tiling")
+		pm_test_tiling();
+	igt_subtest("pm-caching")
+		pm_test_caching();
+
 	igt_fixture
 		teardown_environment();
 
-- 
2.6.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-24 17:42 [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s) marius.c.vlad
@ 2015-11-24 22:57 ` Imre Deak
  2015-11-25 17:16   ` [PATCH i-g-t v2] " marius.c.vlad
  2015-11-26 16:32 ` Marius Vlad
  2015-11-27 18:08 ` [PATCH i-g-t v4] tests/pm_rpm tests for set_caching and set_tiling Marius Vlad
  2 siblings, 1 reply; 12+ messages in thread
From: Imre Deak @ 2015-11-24 22:57 UTC (permalink / raw)
  To: marius.c.vlad, intel-gfx

Hi,

thanks for the patch. Looks ok in general, I have a few comments below.

On Tue, 2015-11-24 at 19:42 +0200, marius.c.vlad@intel.com wrote:
> From: Marius Vlad <marius.c.vlad@intel.com>
> 
> Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
> ---
>  tests/pm_rpm.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 90 insertions(+)
> 
> diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
> index c4fb19c..86d16ad 100644
> --- a/tests/pm_rpm.c
> +++ b/tests/pm_rpm.c
> @@ -1729,6 +1729,90 @@ static void planes_subtest(bool universal, bool dpms)
>  	}
>  }
>  
> +static void pm_test_tiling(void)
> +{
> +	uint32_t handle;
> +	uint8_t *gem_buf;
> +	uint32_t i, tiling_modes[3] = {
> +		I915_TILING_NONE,
> +		I915_TILING_X,
> +		I915_TILING_Y,
> +	};
> +	uint32_t ti, sw, j;
> +	uint32_t obj_size = (8 * 1024 * 1024);
> +
> +	handle = gem_create(drm_fd, obj_size);
> +
> +	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
> +		disable_all_screens_and_wait(&ms_data);
> +
> +		if (tiling_modes[i] == 0) {

Better not to hardcode.

> +			gem_set_tiling(drm_fd, handle, tiling_modes[i], 0);

You can just pass the same stride always, flattening the if-else.

> +
> +			gem_buf = gem_mmap__cpu(drm_fd, handle, 0,
> +					obj_size, PROT_WRITE);

Mapping to the CPU doesn't make a difference in this test case. On old
HW we could make sure that there will be an unbind during the IOCTL,
for that you need an alignment that isn't valid for the tiling mode.
The easiest would be to map a few smaller sized objects to GGTT and do
a memset on each, not sure if there is a more precise way. Also this
should be done before calling gem_set_tiling()
and disable_all_screens_and_wait().

> +
> +			for (j = 0; j < obj_size; j++)
> +				gem_buf[j] = j & 0xff;
> +
> +			igt_assert(munmap(gem_buf, obj_size) == 0);
> +
> +			gem_get_tiling(drm_fd, handle, &ti, &sw);
> +			igt_assert(tiling_modes[i] == ti);
> +		} else {
> +			gem_set_tiling(drm_fd, handle, tiling_modes[i], 512);
> +			gem_buf = gem_mmap__cpu(drm_fd, handle, 0,
> +					obj_size, PROT_WRITE);
> +
> +			for (j = 0; j < obj_size; j++)
> +				gem_buf[j] = j & 0xff;
> +
> +			igt_assert(munmap(gem_buf, obj_size) == 0);
> +
> +			gem_get_tiling(drm_fd, handle, &ti, &sw);
> +			igt_assert(tiling_modes[i] == ti);
> +		}
> +
> +		enable_one_screen_and_wait(&ms_data);
> +	}
> +
> +	gem_close(drm_fd, handle);
> +}
> +
> +static void pm_test_caching(void)
> +{
> +	uint32_t handle, got_caching, obj_size = (8 * 1024 * 1024);
> +	void *src_buf;
> +	uint32_t i, cache_levels[3] = {
> +		I915_CACHING_NONE,
> +		I915_CACHING_CACHED,
> +		I915_CACHING_DISPLAY,
> +	};
> +
> +	handle = gem_create(drm_fd, obj_size);
> +	src_buf = malloc(obj_size);
> +
> +	memset(src_buf, 0x65, obj_size);
> +
> +	for (i = 0; i < ARRAY_SIZE(cache_levels); i++) {
> +		disable_all_screens_and_wait(&ms_data);
> +
> +		gem_set_caching(drm_fd, handle, cache_levels[i]);
> +		gem_write(drm_fd, handle, 0, src_buf, obj_size);

Similarly as above we need to force an unbind here, by mapping to GGTT
and doing a memset on it before calling disable_all_screens_and_wait().
We don't need a specific object alignment here.

> +
> +		got_caching = gem_get_caching(drm_fd, handle);
> +
> +		enable_one_screen_and_wait(&ms_data);
> +
> +		/* skip CACHING_DISPLAY, some platforms do not have it */
> +		if (i != 2)

Better not to hardcode.

> +			igt_assert(got_caching == cache_levels[i]);

You could make it more precise by requiring either CACHING_DISPLAY or
CACHING_NONE in this case.

--Imre

> +	}
> +
> +	free(src_buf);
> +	gem_close(drm_fd, handle);
> +}
> +
>  static void fences_subtest(bool dpms)
>  {
>  	int i;
> @@ -1927,6 +2011,12 @@ int main(int argc, char *argv[])
>  	igt_subtest("gem-execbuf-stress-extra-wait")
>  		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
>  
> +	/* power-wake reference tests */
> +	igt_subtest("pm-tiling")
> +		pm_test_tiling();
> +	igt_subtest("pm-caching")
> +		pm_test_caching();
> +
>  	igt_fixture
>  		teardown_environment();
>  
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH i-g-t v2] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-24 22:57 ` Imre Deak
@ 2015-11-25 17:16   ` marius.c.vlad
  2015-11-25 17:16     ` [PATCH i-g-t] " marius.c.vlad
  0 siblings, 1 reply; 12+ messages in thread
From: marius.c.vlad @ 2015-11-25 17:16 UTC (permalink / raw)
  To: intel-gfx, imre.deak

Second attempt using Imres' hints.
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-25 17:16   ` [PATCH i-g-t v2] " marius.c.vlad
@ 2015-11-25 17:16     ` marius.c.vlad
  2015-11-25 20:08       ` Imre Deak
  0 siblings, 1 reply; 12+ messages in thread
From: marius.c.vlad @ 2015-11-25 17:16 UTC (permalink / raw)
  To: intel-gfx, imre.deak

From: Marius Vlad <marius.c.vlad@intel.com>

Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
---
 tests/pm_rpm.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 120 insertions(+)

diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
index c4fb19c..157cf29 100644
--- a/tests/pm_rpm.c
+++ b/tests/pm_rpm.c
@@ -1729,6 +1729,120 @@ static void planes_subtest(bool universal, bool dpms)
 	}
 }
 
+static void pm_test_tiling(void)
+{
+	uint32_t *handles;
+	uint8_t **gem_bufs;
+
+	int max_gem_objs = 0;
+	uint8_t off_bit = 20;
+	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
+
+	uint32_t i, j, tiling_modes[3] = {
+		I915_TILING_NONE,
+		I915_TILING_X,
+		I915_TILING_Y,
+	};
+	uint32_t ti, sw;
+
+	/* default value */
+	uint32_t stride = 1024;
+
+	/* calculate how many objects we can map */
+	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
+		;
+
+	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
+	handles = malloc(sizeof(uint32_t) * max_gem_objs);
+
+	/* map to gtt and store some random data */
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		handles[i] = gem_create(drm_fd, j);
+		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
+		memset(gem_bufs[i], 0x65, j);
+	}
+
+	/* try to set different tiling for each handle */
+	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
+		disable_all_screens_and_wait(&ms_data);
+
+		for (j = 0; j < max_gem_objs; j++) {
+			gem_set_tiling(drm_fd, handles[j], tiling_modes[i], stride);
+
+			gem_get_tiling(drm_fd, handles[j], &ti, &sw);
+			igt_assert(tiling_modes[i] == ti);
+		}
+
+		enable_one_screen_and_wait(&ms_data);
+	}
+
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		igt_assert(munmap(gem_bufs[i], j) == 0);
+		gem_close(drm_fd, handles[i]);
+	}
+
+	free(gem_bufs);
+	free(handles);
+}
+
+static void pm_test_caching(void)
+{
+	uint32_t *handles;
+	uint8_t **gem_bufs;
+	int8_t has_caching_display = -1;
+
+	uint32_t i, j, got_caching;
+	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
+	uint32_t cache_levels[3] = {
+		I915_CACHING_NONE,
+		I915_CACHING_CACHED,		/* LLC caching */
+		I915_CACHING_DISPLAY,		/* eDRAM caching */
+	};
+
+	int max_gem_objs = 0;
+	uint8_t off_bit = 20;
+
+	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
+		;
+
+	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
+	handles = malloc(sizeof(uint32_t) * max_gem_objs);
+
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		handles[i] = gem_create(drm_fd, j);
+		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
+		memset(gem_bufs[i], 0x65, j);
+	}
+
+	/* figure out if we have cache display available on the platform */
+	gem_set_caching(drm_fd, handles[0], I915_CACHING_DISPLAY);
+	if (gem_get_caching(drm_fd, handles[0]))
+		has_caching_display++;
+
+	for (i = 0; i < ARRAY_SIZE(cache_levels) + has_caching_display; i++) {
+		disable_all_screens_and_wait(&ms_data);
+
+		for (j = 0; j < max_gem_objs; j++) {
+			gem_set_caching(drm_fd, handles[j], cache_levels[i]);
+
+			igt_debug("Verying cache for handle %u, level %u\n", j, i);
+			got_caching = gem_get_caching(drm_fd, handles[j]);
+
+			igt_assert(got_caching == cache_levels[i]);
+		}
+
+		enable_one_screen_and_wait(&ms_data);
+	}
+
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		igt_assert(munmap(gem_bufs[i], j) == 0);
+		gem_close(drm_fd, handles[i]);
+	}
+
+	free(handles);
+	free(gem_bufs);
+}
+
 static void fences_subtest(bool dpms)
 {
 	int i;
@@ -1927,6 +2041,12 @@ int main(int argc, char *argv[])
 	igt_subtest("gem-execbuf-stress-extra-wait")
 		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
 
+	/* power-wake reference tests */
+	igt_subtest("pm-tiling")
+		pm_test_tiling();
+	igt_subtest("pm-caching")
+		pm_test_caching();
+
 	igt_fixture
 		teardown_environment();
 
-- 
2.6.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-25 17:16     ` [PATCH i-g-t] " marius.c.vlad
@ 2015-11-25 20:08       ` Imre Deak
  2015-11-26 10:55         ` Marius Vlad
  0 siblings, 1 reply; 12+ messages in thread
From: Imre Deak @ 2015-11-25 20:08 UTC (permalink / raw)
  To: marius.c.vlad, intel-gfx

On ke, 2015-11-25 at 19:16 +0200, marius.c.vlad@intel.com wrote:
> From: Marius Vlad <marius.c.vlad@intel.com>
> 
> Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
> ---
>  tests/pm_rpm.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 120 insertions(+)
> 
> diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
> index c4fb19c..157cf29 100644
> --- a/tests/pm_rpm.c
> +++ b/tests/pm_rpm.c
> @@ -1729,6 +1729,120 @@ static void planes_subtest(bool universal, bool dpms)
>  	}
>  }
>  
> +static void pm_test_tiling(void)
> +{
> +	uint32_t *handles;
> +	uint8_t **gem_bufs;
> +
> +	int max_gem_objs = 0;
> +	uint8_t off_bit = 20;
> +	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
> +
> +	uint32_t i, j, tiling_modes[3] = {
> +		I915_TILING_NONE,
> +		I915_TILING_X,
> +		I915_TILING_Y,
> +	};
> +	uint32_t ti, sw;
> +
> +	/* default value */
> +	uint32_t stride = 1024;
> +
> +	/* calculate how many objects we can map */
> +	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
> +		;

With these sizes we may end up with all objects properly aligned,
that's why I suggested smaller objects. Based on I830_FENCE_START_MASK
we could allocate for example starting from 16kB to 256kB.

> +
> +	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
> +	handles = malloc(sizeof(uint32_t) * max_gem_objs);

Nitpick: sizeof(*ptr) is safer and you could've used calloc in both
cases.

> +
> +	/* map to gtt and store some random data */
> +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> +		handles[i] = gem_create(drm_fd, j);
> +		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
> +		memset(gem_bufs[i], 0x65, j);
> +	}
> +
> +	/* try to set different tiling for each handle */
> +	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
> +		disable_all_screens_and_wait(&ms_data);
> +
> +		for (j = 0; j < max_gem_objs; j++) {
> +			gem_set_tiling(drm_fd, handles[j], tiling_modes[i], stride);
> +
> +			gem_get_tiling(drm_fd, handles[j], &ti, &sw);
> +			igt_assert(tiling_modes[i] == ti);
> +		}
> +
> +		enable_one_screen_and_wait(&ms_data);

Ok, but after the second iteration all objects could be properly
aligned, so it's better to close/realloc/memset the objects in each
iteration.

> +	}
> +
> +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> +		igt_assert(munmap(gem_bufs[i], j) == 0);
> +		gem_close(drm_fd, handles[i]);
> +	}
> +
> +	free(gem_bufs);
> +	free(handles);
> +}
> +
> +static void pm_test_caching(void)
> +{
> +	uint32_t *handles;
> +	uint8_t **gem_bufs;
> +	int8_t has_caching_display = -1;
> +
> +	uint32_t i, j, got_caching;
> +	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
> +	uint32_t cache_levels[3] = {
> +		I915_CACHING_NONE,
> +		I915_CACHING_CACHED,		/* LLC caching */
> +		I915_CACHING_DISPLAY,		/* eDRAM caching */
> +	};
> +
> +	int max_gem_objs = 0;
> +	uint8_t off_bit = 20;
> +
> +	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
> +		;

No need to bother about alignment here, so we can just use a single
16kB object for example.

> +
> +	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
> +	handles = malloc(sizeof(uint32_t) * max_gem_objs);
> +
> +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> +		handles[i] = gem_create(drm_fd, j);
> +		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
> +		memset(gem_bufs[i], 0x65, j);
> +	}
> +
> +	/* figure out if we have cache display available on the platform */
> +	gem_set_caching(drm_fd, handles[0], I915_CACHING_DISPLAY);
> +	if (gem_get_caching(drm_fd, handles[0]))

No need to hardcode I915_CACHING_NONE here. Also I liked the original
version to check this everywhere better, by accepting both
CACHING_DISPLAY and CACHING_NONE as a result.

> +		has_caching_display++;
> +
> +	for (i = 0; i < ARRAY_SIZE(cache_levels) + has_caching_display; i++) {
> +		disable_all_screens_and_wait(&ms_data);
> +
> +		for (j = 0; j < max_gem_objs; j++) {
> +			gem_set_caching(drm_fd, handles[j], cache_levels[i]);
> +
> +			igt_debug("Verying cache for handle %u, level %u\n", j, i);
> +			got_caching = gem_get_caching(drm_fd, handles[j]);
> +
> +			igt_assert(got_caching == cache_levels[i]);
> +		}
> +
> +		enable_one_screen_and_wait(&ms_data);

The object can be unbound after the IOCTL so you need to do a memset at
the begin of each iteration.

> +	}
> +
> +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> +		igt_assert(munmap(gem_bufs[i], j) == 0);
> +		gem_close(drm_fd, handles[i]);
> +	}
> +
> +	free(handles);
> +	free(gem_bufs);
> +}
> +
>  static void fences_subtest(bool dpms)
>  {
>  	int i;
> @@ -1927,6 +2041,12 @@ int main(int argc, char *argv[])
>  	igt_subtest("gem-execbuf-stress-extra-wait")
>  		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
>  
> +	/* power-wake reference tests */
> +	igt_subtest("pm-tiling")
> +		pm_test_tiling();
> +	igt_subtest("pm-caching")
> +		pm_test_caching();
> +
>  	igt_fixture
>  		teardown_environment();
>  
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-25 20:08       ` Imre Deak
@ 2015-11-26 10:55         ` Marius Vlad
  2015-11-26 11:57           ` Imre Deak
  0 siblings, 1 reply; 12+ messages in thread
From: Marius Vlad @ 2015-11-26 10:55 UTC (permalink / raw)
  To: Imre Deak; +Cc: intel-gfx


[-- Attachment #1.1: Type: text/plain, Size: 6356 bytes --]

On Wed, Nov 25, 2015 at 10:08:21PM +0200, Imre Deak wrote:
> On ke, 2015-11-25 at 19:16 +0200, marius.c.vlad@intel.com wrote:
> > From: Marius Vlad <marius.c.vlad@intel.com>
> > 
> > Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
> > ---
> >  tests/pm_rpm.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> >  1 file changed, 120 insertions(+)
> > 
> > diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
> > index c4fb19c..157cf29 100644
> > --- a/tests/pm_rpm.c
> > +++ b/tests/pm_rpm.c
> > @@ -1729,6 +1729,120 @@ static void planes_subtest(bool universal, bool dpms)
> >  	}
> >  }
> >  
> > +static void pm_test_tiling(void)
> > +{
> > +	uint32_t *handles;
> > +	uint8_t **gem_bufs;
> > +
> > +	int max_gem_objs = 0;
> > +	uint8_t off_bit = 20;
> > +	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
> > +
> > +	uint32_t i, j, tiling_modes[3] = {
> > +		I915_TILING_NONE,
> > +		I915_TILING_X,
> > +		I915_TILING_Y,
> > +	};
> > +	uint32_t ti, sw;
> > +
> > +	/* default value */
> > +	uint32_t stride = 1024;
> > +
> > +	/* calculate how many objects we can map */
> > +	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
> > +		;
> 
> With these sizes we may end up with all objects properly aligned,
> that's why I suggested smaller objects. Based on I830_FENCE_START_MASK
> we could allocate for example starting from 16kB to 256kB.
> 

Initially, I've tried with smaller sizes, but the assertion(s) failed.
I'll try as you suggested.

> > +
> > +	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
> > +	handles = malloc(sizeof(uint32_t) * max_gem_objs);
> 
> Nitpick: sizeof(*ptr) is safer and you could've used calloc in both
> cases.

Indeed.

> 
> > +
> > +	/* map to gtt and store some random data */
> > +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> > +		handles[i] = gem_create(drm_fd, j);
> > +		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
> > +		memset(gem_bufs[i], 0x65, j);
> > +	}
> > +
> > +	/* try to set different tiling for each handle */
> > +	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
> > +		disable_all_screens_and_wait(&ms_data);
> > +
> > +		for (j = 0; j < max_gem_objs; j++) {
> > +			gem_set_tiling(drm_fd, handles[j], tiling_modes[i], stride);
> > +
> > +			gem_get_tiling(drm_fd, handles[j], &ti, &sw);
> > +			igt_assert(tiling_modes[i] == ti);
> > +		}
> > +
> > +		enable_one_screen_and_wait(&ms_data);
> 
> Ok, but after the second iteration all objects could be properly
> aligned, so it's better to close/realloc/memset the objects in each
> iteration.

Alright. I'll do that.

> 
> > +	}
> > +
> > +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> > +		igt_assert(munmap(gem_bufs[i], j) == 0);
> > +		gem_close(drm_fd, handles[i]);
> > +	}
> > +
> > +	free(gem_bufs);
> > +	free(handles);
> > +}
> > +
> > +static void pm_test_caching(void)
> > +{
> > +	uint32_t *handles;
> > +	uint8_t **gem_bufs;
> > +	int8_t has_caching_display = -1;
> > +
> > +	uint32_t i, j, got_caching;
> > +	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
> > +	uint32_t cache_levels[3] = {
> > +		I915_CACHING_NONE,
> > +		I915_CACHING_CACHED,		/* LLC caching */
> > +		I915_CACHING_DISPLAY,		/* eDRAM caching */
> > +	};
> > +
> > +	int max_gem_objs = 0;
> > +	uint8_t off_bit = 20;
> > +
> > +	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
> > +		;
> 
> No need to bother about alignment here, so we can just use a single
> 16kB object for example.

Alright.

> 
> > +
> > +	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
> > +	handles = malloc(sizeof(uint32_t) * max_gem_objs);
> > +
> > +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> > +		handles[i] = gem_create(drm_fd, j);
> > +		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
> > +		memset(gem_bufs[i], 0x65, j);
> > +	}
> > +
> > +	/* figure out if we have cache display available on the platform */
> > +	gem_set_caching(drm_fd, handles[0], I915_CACHING_DISPLAY);
> > +	if (gem_get_caching(drm_fd, handles[0]))
> 
> No need to hardcode I915_CACHING_NONE here. Also I liked the original
> version to check this everywhere better, by accepting both
> CACHING_DISPLAY and CACHING_NONE as a result.

I don't think I get it.

As far as I understand CACHING_DISPLAY will fall-back to CACHING_NONE if
the platform doesn't have support for it. Is there a proper way to check
for this?
igt_require()/igt_skip_on()/igt_require_f() can indeed be used to bypass
certain test(s), but there has to be a way to determine apriori if (indeed)
the platform supports CACHING_DISPLAY, before asserting the status.

Most likely this kind of issue has come up in other circumstances...

> 
> > +		has_caching_display++;
> > +
> > +	for (i = 0; i < ARRAY_SIZE(cache_levels) + has_caching_display; i++) {
> > +		disable_all_screens_and_wait(&ms_data);
> > +
> > +		for (j = 0; j < max_gem_objs; j++) {
> > +			gem_set_caching(drm_fd, handles[j], cache_levels[i]);
> > +
> > +			igt_debug("Verying cache for handle %u, level %u\n", j, i);
> > +			got_caching = gem_get_caching(drm_fd, handles[j]);
> > +
> > +			igt_assert(got_caching == cache_levels[i]);
> > +		}
> > +
> > +		enable_one_screen_and_wait(&ms_data);
> 
> The object can be unbound after the IOCTL so you need to do a memset at
> the begin of each iteration.

Okay. Will redo and send another try. Thanks for taking to time to
review!.

> 
> > +	}
> > +
> > +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> > +		igt_assert(munmap(gem_bufs[i], j) == 0);
> > +		gem_close(drm_fd, handles[i]);
> > +	}
> > +
> > +	free(handles);
> > +	free(gem_bufs);
> > +}
> > +
> >  static void fences_subtest(bool dpms)
> >  {
> >  	int i;
> > @@ -1927,6 +2041,12 @@ int main(int argc, char *argv[])
> >  	igt_subtest("gem-execbuf-stress-extra-wait")
> >  		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
> >  
> > +	/* power-wake reference tests */
> > +	igt_subtest("pm-tiling")
> > +		pm_test_tiling();
> > +	igt_subtest("pm-caching")
> > +		pm_test_caching();
> > +
> >  	igt_fixture
> >  		teardown_environment();
> >  

[-- Attachment #1.2: Digital signature --]
[-- Type: application/pgp-signature, Size: 473 bytes --]

[-- Attachment #2: Type: text/plain, Size: 159 bytes --]

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-26 10:55         ` Marius Vlad
@ 2015-11-26 11:57           ` Imre Deak
  0 siblings, 0 replies; 12+ messages in thread
From: Imre Deak @ 2015-11-26 11:57 UTC (permalink / raw)
  To: Marius Vlad; +Cc: intel-gfx

On to, 2015-11-26 at 12:55 +0200, Marius Vlad wrote:
> On Wed, Nov 25, 2015 at 10:08:21PM +0200, Imre Deak wrote:
> > On ke, 2015-11-25 at 19:16 +0200, marius.c.vlad@intel.com wrote:
> > > From: Marius Vlad <marius.c.vlad@intel.com>
> > > 
> > > Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
> > > ---
> > >  tests/pm_rpm.c | 120
> > > +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
> > >  1 file changed, 120 insertions(+)
> > > 
> > > diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
> > > index c4fb19c..157cf29 100644
> > > --- a/tests/pm_rpm.c
> > > +++ b/tests/pm_rpm.c
> > > @@ -1729,6 +1729,120 @@ static void planes_subtest(bool
> > > universal, bool dpms)
> > >  	}
> > >  }
> > >  
> > > +static void pm_test_tiling(void)
> > > +{
> > > +	uint32_t *handles;
> > > +	uint8_t **gem_bufs;
> > > +
> > > +	int max_gem_objs = 0;
> > > +	uint8_t off_bit = 20;
> > > +	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
> > > +
> > > +	uint32_t i, j, tiling_modes[3] = {
> > > +		I915_TILING_NONE,
> > > +		I915_TILING_X,
> > > +		I915_TILING_Y,
> > > +	};
> > > +	uint32_t ti, sw;
> > > +
> > > +	/* default value */
> > > +	uint32_t stride = 1024;
> > > +
> > > +	/* calculate how many objects we can map */
> > > +	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
> > > +		;
> > 
> > With these sizes we may end up with all objects properly aligned,
> > that's why I suggested smaller objects. Based on
> > I830_FENCE_START_MASK
> > we could allocate for example starting from 16kB to 256kB.
> > 
> 
> Initially, I've tried with smaller sizes, but the assertion(s)
> failed.
> I'll try as you suggested.

Hm, there shouldn't be any asserts. The only practical restriction is
on the stride size which on new platforms should be a multiple of 128
or 512 bytes based on the tiling mode and power-of-two on GEN < 4.

> > > +
> > > +	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
> > > +	handles = malloc(sizeof(uint32_t) * max_gem_objs);
> > 
> > Nitpick: sizeof(*ptr) is safer and you could've used calloc in both
> > cases.
> 
> Indeed.
> 
> > 
> > > +
> > > +	/* map to gtt and store some random data */
> > > +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> > > +		handles[i] = gem_create(drm_fd, j);
> > > +		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
> > > +		memset(gem_bufs[i], 0x65, j);
> > > +	}
> > > +
> > > +	/* try to set different tiling for each handle */
> > > +	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
> > > +		disable_all_screens_and_wait(&ms_data);
> > > +
> > > +		for (j = 0; j < max_gem_objs; j++) {
> > > +			gem_set_tiling(drm_fd, handles[j], tiling_modes[i], stride);
> > > +
> > > +			gem_get_tiling(drm_fd, handles[j], &ti, &sw);
> > > +			igt_assert(tiling_modes[i] == ti);
> > > +		}
> > > +
> > > +		enable_one_screen_and_wait(&ms_data);
> > 
> > Ok, but after the second iteration all objects could be properly
> > aligned, so it's better to close/realloc/memset the objects in each
> > iteration.
> 
> Alright. I'll do that.
> 
> > 
> > > +	}
> > > +
> > > +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j
> > > <<= 1, i++) {
> > > +		igt_assert(munmap(gem_bufs[i], j) == 0);
> > > +		gem_close(drm_fd, handles[i]);
> > > +	}
> > > +
> > > +	free(gem_bufs);
> > > +	free(handles);
> > > +}
> > > +
> > > +static void pm_test_caching(void)
> > > +{
> > > +	uint32_t *handles;
> > > +	uint8_t **gem_bufs;
> > > +	int8_t has_caching_display = -1;
> > > +
> > > +	uint32_t i, j, got_caching;
> > > +	uint32_t gtt_obj_max_size = (16 * 1024 * 1024);
> > > +	uint32_t cache_levels[3] = {
> > > +		I915_CACHING_NONE,
> > > +		I915_CACHING_CACHED,		/* LLC
> > > caching */
> > > +		I915_CACHING_DISPLAY,		/* eDRAM
> > > caching */
> > > +	};
> > > +
> > > +	int max_gem_objs = 0;
> > > +	uint8_t off_bit = 20;
> > > +
> > > +	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1,
> > > max_gem_objs++)
> > > +		;
> > 
> > No need to bother about alignment here, so we can just use a single
> > 16kB object for example.
> 
> Alright.
> 
> > 
> > > +
> > > +	gem_bufs = calloc(max_gem_objs, sizeof(uint8_t *));
> > > +	handles = malloc(sizeof(uint32_t) * max_gem_objs);
> > > +
> > > +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j
> > > <<= 1, i++) {
> > > +		handles[i] = gem_create(drm_fd, j);
> > > +		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i],
> > > j, PROT_WRITE);
> > > +		memset(gem_bufs[i], 0x65, j);
> > > +	}
> > > +
> > > +	/* figure out if we have cache display available on the
> > > platform */
> > > +	gem_set_caching(drm_fd, handles[0],
> > > I915_CACHING_DISPLAY);
> > > +	if (gem_get_caching(drm_fd, handles[0]))
> > 
> > No need to hardcode I915_CACHING_NONE here. Also I liked the
> > original
> > version to check this everywhere better, by accepting both
> > CACHING_DISPLAY and CACHING_NONE as a result.
> 
> I don't think I get it.
> 
> As far as I understand CACHING_DISPLAY will fall-back to CACHING_NONE
> if
> the platform doesn't have support for it. Is there a proper way to
> check
> for this?
> igt_require()/igt_skip_on()/igt_require_f() can indeed be used to
> bypass
> certain test(s), but there has to be a way to determine apriori if
> (indeed)
> the platform supports CACHING_DISPLAY, before asserting the status.

We don't need to skip the test, it's valid on any platform to call the
IOCTL with CACHING_DISPLAY, it may just fall back to CACHING_NONE as
you said. So instead of skipping the test just call the IOCTL
everywhere but allow for both CACHING_DISPLAY and CACHING_NONE as the
result of gem_get_caching().

--Imre

> Most likely this kind of issue has come up in other circumstances...
> 
> > 
> > > +		has_caching_display++;
> > > +
> > > +	for (i = 0; i < ARRAY_SIZE(cache_levels) + has_caching_display; i++) {
> > > +		disable_all_screens_and_wait(&ms_data);
> > > +
> > > +		for (j = 0; j < max_gem_objs; j++) {
> > > +			gem_set_caching(drm_fd, handles[j], cache_levels[i]);
> > > +
> > > +			igt_debug("Verying cache for handle %u, level %u\n", j, i);
> > > +			got_caching = gem_get_caching(drm_fd, handles[j]);
> > > +
> > > +			igt_assert(got_caching == cache_levels[i]);
> > > +		}
> > > +
> > > +		enable_one_screen_and_wait(&ms_data);
> > 
> > The object can be unbound after the IOCTL so you need to do a
> > memset at
> > the begin of each iteration.
> 
> Okay. Will redo and send another try. Thanks for taking to time to
> review!.
> 
> > 
> > > +	}
> > > +
> > > +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j
> > > <<= 1, i++) {
> > > +		igt_assert(munmap(gem_bufs[i], j) == 0);
> > > +		gem_close(drm_fd, handles[i]);
> > > +	}
> > > +
> > > +	free(handles);
> > > +	free(gem_bufs);
> > > +}
> > > +
> > >  static void fences_subtest(bool dpms)
> > >  {
> > >  	int i;
> > > @@ -1927,6 +2041,12 @@ int main(int argc, char *argv[])
> > >  	igt_subtest("gem-execbuf-stress-extra-wait")
> > >  		gem_execbuf_stress_subtest(rounds, WAIT_STATUS |
> > > WAIT_EXTRA);
> > >  
> > > +	/* power-wake reference tests */
> > > +	igt_subtest("pm-tiling")
> > > +		pm_test_tiling();
> > > +	igt_subtest("pm-caching")
> > > +		pm_test_caching();
> > > +
> > >  	igt_fixture
> > >  		teardown_environment();
> > >  
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-24 17:42 [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s) marius.c.vlad
  2015-11-24 22:57 ` Imre Deak
@ 2015-11-26 16:32 ` Marius Vlad
  2015-11-26 18:23   ` Imre Deak
  2015-11-27 18:08 ` [PATCH i-g-t v4] tests/pm_rpm tests for set_caching and set_tiling Marius Vlad
  2 siblings, 1 reply; 12+ messages in thread
From: Marius Vlad @ 2015-11-26 16:32 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
---
 tests/pm_rpm.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 120 insertions(+)

diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
index c4fb19c..d34b2b2 100644
--- a/tests/pm_rpm.c
+++ b/tests/pm_rpm.c
@@ -1729,6 +1729,120 @@ static void planes_subtest(bool universal, bool dpms)
 	}
 }
 
+static void pm_test_tiling(void)
+{
+	uint32_t *handles;
+	uint8_t **gem_bufs;
+
+	int max_gem_objs = 0;
+	uint8_t off_bit = 14;
+	uint32_t gtt_obj_max_size = (256 * 1024);
+
+	uint32_t i, j, p, tiling_modes[3] = {
+		I915_TILING_NONE,
+		I915_TILING_X,
+		I915_TILING_Y,
+	};
+	uint32_t ti, sw;
+
+	/* default stride value */
+	uint32_t stride = 512;
+
+	/* calculate how many objects we can map */
+	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
+		;
+
+	gem_bufs = calloc(max_gem_objs, sizeof(*gem_bufs));
+	handles = calloc(max_gem_objs, sizeof(*handles));
+
+	/* map to gtt */
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		handles[i] = gem_create(drm_fd, j);
+		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
+	}
+
+	/* try to set different tiling for each handle */
+	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
+		disable_all_screens_and_wait(&ms_data);
+
+		for (j = 0, p = 1 << off_bit; j < max_gem_objs; j++, p <<= 1) {
+
+			/* modify the contents each time */
+			memset(gem_bufs[j], 16 << j, p);
+
+			igt_debug("Testing tiling mode %u, gem %u, "
+				   "size=%ukB (d=0x%x)\n", i, j,
+				   (p / (1 << 10)), (16 << j));
+
+			gem_set_tiling(drm_fd, handles[j],
+					tiling_modes[i], stride);
+
+			gem_get_tiling(drm_fd, handles[j], &ti, &sw);
+			igt_assert(tiling_modes[i] == ti);
+
+		}
+
+		enable_one_screen_and_wait(&ms_data);
+	}
+
+	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
+		igt_assert(munmap(gem_bufs[i], j) == 0);
+		gem_close(drm_fd, handles[i]);
+	}
+
+	free(gem_bufs);
+	free(handles);
+}
+
+static void pm_test_caching(void)
+{
+	uint32_t handle;
+	uint8_t *gem_buf;
+
+	uint32_t i, got_caching;
+	uint32_t gtt_obj_max_size = (16 * 1024);
+	uint32_t cache_levels[3] = {
+		I915_CACHING_NONE,
+		I915_CACHING_CACHED,            /* LLC caching */
+		I915_CACHING_DISPLAY,           /* eDRAM caching */
+	};
+
+
+	handle = gem_create(drm_fd, gtt_obj_max_size);
+	gem_buf = gem_mmap__gtt(drm_fd, handle, gtt_obj_max_size, PROT_WRITE);
+
+	for (i = 0; i < ARRAY_SIZE(cache_levels); i++) {
+		memset(gem_buf, 16 << i, gtt_obj_max_size);
+
+		disable_all_screens_and_wait(&ms_data);
+
+		igt_debug("Setting cache level %u\n", cache_levels[i]);
+
+		gem_set_caching(drm_fd, handle, cache_levels[i]);
+
+		got_caching = gem_get_caching(drm_fd, handle);
+
+		igt_debug("Got back %u\n", got_caching);
+
+		/*
+		 * Allow fall-back to CACHING_NONE in case the platform does
+		 * not support it.
+		 */
+		if (cache_levels[i] == I915_CACHING_DISPLAY)
+			igt_assert(got_caching == I915_CACHING_NONE ||
+				   got_caching == I915_CACHING_DISPLAY);
+		else
+			igt_assert(got_caching == cache_levels[i]);
+
+		enable_one_screen_and_wait(&ms_data);
+	}
+
+	igt_assert(munmap(gem_buf, gtt_obj_max_size) == 0);
+	gem_close(drm_fd, handle);
+}
+
+
+
 static void fences_subtest(bool dpms)
 {
 	int i;
@@ -1927,6 +2041,12 @@ int main(int argc, char *argv[])
 	igt_subtest("gem-execbuf-stress-extra-wait")
 		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
 
+	/* power-wake reference tests */
+	igt_subtest("pm-tiling")
+		pm_test_tiling();
+	igt_subtest("pm-caching")
+		pm_test_caching();
+
 	igt_fixture
 		teardown_environment();
 
-- 
2.6.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-26 16:32 ` Marius Vlad
@ 2015-11-26 18:23   ` Imre Deak
  0 siblings, 0 replies; 12+ messages in thread
From: Imre Deak @ 2015-11-26 18:23 UTC (permalink / raw)
  To: Marius Vlad, intel-gfx

On to, 2015-11-26 at 18:32 +0200, Marius Vlad wrote:
> Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
> ---
>  tests/pm_rpm.c | 120 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 120 insertions(+)
> 
> diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
> index c4fb19c..d34b2b2 100644
> --- a/tests/pm_rpm.c
> +++ b/tests/pm_rpm.c
> @@ -1729,6 +1729,120 @@ static void planes_subtest(bool universal, bool dpms)
>  	}
>  }
>  
> +static void pm_test_tiling(void)
> +{
> +	uint32_t *handles;
> +	uint8_t **gem_bufs;
> +
> +	int max_gem_objs = 0;
> +	uint8_t off_bit = 14;
> +	uint32_t gtt_obj_max_size = (256 * 1024);
> +
> +	uint32_t i, j, p, tiling_modes[3] = {
> +		I915_TILING_NONE,
> +		I915_TILING_X,
> +		I915_TILING_Y,
> +	};
> +	uint32_t ti, sw;
> +
> +	/* default stride value */
> +	uint32_t stride = 512;
> +
> +	/* calculate how many objects we can map */
> +	for (j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, max_gem_objs++)
> +		;
> +
> +	gem_bufs = calloc(max_gem_objs, sizeof(*gem_bufs));
> +	handles = calloc(max_gem_objs, sizeof(*handles));
> +
> +	/* map to gtt */
> +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> +		handles[i] = gem_create(drm_fd, j);
> +		gem_bufs[i] = gem_mmap__gtt(drm_fd, handles[i], j, PROT_WRITE);
> +	}
> +
> +	/* try to set different tiling for each handle */
> +	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {

This still has the problem that we don't rebind the objects in each
iteration before disable_all_screens_and_wait(). So please move the
above "map to gtt" loop allocating and mmaping the objects and the
memset(gem_bufs[]) below here, and ..

> +		disable_all_screens_and_wait(&ms_data);
> +
> +		for (j = 0, p = 1 << off_bit; j < max_gem_objs; j++, p <<= 1) {
> +
> +			/* modify the contents each time */
> +			memset(gem_bufs[j], 16 << j, p);
> +
> +			igt_debug("Testing tiling mode %u, gem %u, "
> +				   "size=%ukB (d=0x%x)\n", i, j,
> +				   (p / (1 << 10)), (16 << j));
> +
> +			gem_set_tiling(drm_fd, handles[j],
> +					tiling_modes[i], stride);
> +
> +			gem_get_tiling(drm_fd, handles[j], &ti, &sw);
> +			igt_assert(tiling_modes[i] == ti);
> +
> +		}
> +
> +		enable_one_screen_and_wait(&ms_data);

move the loop below doing the munmap() and gem_close() on the objects
here. With that fixed this looks ok to me:
Reviewed-by: Imre Deak <imre.deak@intel.com>

> +	}
> +
> +	for (i = 0, j = 1 << off_bit; j <= gtt_obj_max_size; j <<= 1, i++) {
> +		igt_assert(munmap(gem_bufs[i], j) == 0);
> +		gem_close(drm_fd, handles[i]);
> +	}
> +
> +	free(gem_bufs);
> +	free(handles);
> +}
> +
> +static void pm_test_caching(void)
> +{
> +	uint32_t handle;
> +	uint8_t *gem_buf;
> +
> +	uint32_t i, got_caching;
> +	uint32_t gtt_obj_max_size = (16 * 1024);
> +	uint32_t cache_levels[3] = {
> +		I915_CACHING_NONE,
> +		I915_CACHING_CACHED,            /* LLC caching */
> +		I915_CACHING_DISPLAY,           /* eDRAM caching */
> +	};
> +
> +
> +	handle = gem_create(drm_fd, gtt_obj_max_size);
> +	gem_buf = gem_mmap__gtt(drm_fd, handle, gtt_obj_max_size, PROT_WRITE);
> +
> +	for (i = 0; i < ARRAY_SIZE(cache_levels); i++) {
> +		memset(gem_buf, 16 << i, gtt_obj_max_size);
> +
> +		disable_all_screens_and_wait(&ms_data);
> +
> +		igt_debug("Setting cache level %u\n", cache_levels[i]);
> +
> +		gem_set_caching(drm_fd, handle, cache_levels[i]);
> +
> +		got_caching = gem_get_caching(drm_fd, handle);
> +
> +		igt_debug("Got back %u\n", got_caching);
> +
> +		/*
> +		 * Allow fall-back to CACHING_NONE in case the platform does
> +		 * not support it.
> +		 */
> +		if (cache_levels[i] == I915_CACHING_DISPLAY)
> +			igt_assert(got_caching == I915_CACHING_NONE ||
> +				   got_caching == I915_CACHING_DISPLAY);
> +		else
> +			igt_assert(got_caching == cache_levels[i]);
> +
> +		enable_one_screen_and_wait(&ms_data);
> +	}
> +
> +	igt_assert(munmap(gem_buf, gtt_obj_max_size) == 0);
> +	gem_close(drm_fd, handle);
> +}
> +
> +
> +
>  static void fences_subtest(bool dpms)
>  {
>  	int i;
> @@ -1927,6 +2041,12 @@ int main(int argc, char *argv[])
>  	igt_subtest("gem-execbuf-stress-extra-wait")
>  		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
>  
> +	/* power-wake reference tests */
> +	igt_subtest("pm-tiling")
> +		pm_test_tiling();
> +	igt_subtest("pm-caching")
> +		pm_test_caching();
> +
>  	igt_fixture
>  		teardown_environment();
>  
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH i-g-t v4] tests/pm_rpm tests for set_caching and set_tiling
  2015-11-24 17:42 [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s) marius.c.vlad
  2015-11-24 22:57 ` Imre Deak
  2015-11-26 16:32 ` Marius Vlad
@ 2015-11-27 18:08 ` Marius Vlad
  2015-11-27 18:08   ` [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s) Marius Vlad
  2 siblings, 1 reply; 12+ messages in thread
From: Marius Vlad @ 2015-11-27 18:08 UTC (permalink / raw)
  To: intel-gfx

v4: re-bind the gem objects each time before calling
disable_all_screens_and_wait().

v3: Use smaller sizes when allocating gem objects for caching tests.

v2: use mmap to gtt instead off cpu and various style-changes.

Reviewed-by: Imre Deak <imre.deak@intel.com>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-27 18:08 ` [PATCH i-g-t v4] tests/pm_rpm tests for set_caching and set_tiling Marius Vlad
@ 2015-11-27 18:08   ` Marius Vlad
  2015-11-27 19:51     ` Imre Deak
  0 siblings, 1 reply; 12+ messages in thread
From: Marius Vlad @ 2015-11-27 18:08 UTC (permalink / raw)
  To: intel-gfx

Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>
---
 tests/pm_rpm.c | 114 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 114 insertions(+)

diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
index c4fb19c..e9ba9ea 100644
--- a/tests/pm_rpm.c
+++ b/tests/pm_rpm.c
@@ -1729,6 +1729,114 @@ static void planes_subtest(bool universal, bool dpms)
 	}
 }
 
+static void pm_test_tiling(void)
+{
+	uint32_t *handles;
+	uint8_t **gem_bufs;
+
+	int max_gem_objs = 0;
+	uint8_t off_bit = 14;
+	uint32_t gtt_obj_max_size = (256 * 1024);
+
+	uint32_t i, j, k, tiling_modes[3] = {
+		I915_TILING_NONE,
+		I915_TILING_X,
+		I915_TILING_Y,
+	};
+	uint32_t ti, sw;
+
+	/* default stride value */
+	uint32_t stride = 512;
+
+	/* calculate how many objects we can map */
+	for (i = 1 << off_bit; i <= gtt_obj_max_size; i <<= 1, max_gem_objs++)
+		;
+
+	gem_bufs = calloc(max_gem_objs, sizeof(*gem_bufs));
+	handles = calloc(max_gem_objs, sizeof(*handles));
+
+	/* try to set different tiling for each handle */
+	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
+
+		for (j = 0, k = 1 << off_bit;
+		     k <= gtt_obj_max_size; k <<= 1, j++) {
+			handles[j] = gem_create(drm_fd, k);
+			gem_bufs[j] = gem_mmap__gtt(drm_fd, handles[j],
+						    k, PROT_WRITE);
+			memset(gem_bufs[j], 0x0, k);
+		}
+
+		disable_all_screens_and_wait(&ms_data);
+
+		for (j = 0; j < max_gem_objs; j++) {
+			gem_set_tiling(drm_fd, handles[j],
+					tiling_modes[i], stride);
+			gem_get_tiling(drm_fd, handles[j], &ti, &sw);
+			igt_assert(tiling_modes[i] == ti);
+		}
+
+		enable_one_screen_and_wait(&ms_data);
+
+		for (j = 0, k = 1 << off_bit;
+		     k <= gtt_obj_max_size; k <<= 1, j++) {
+			igt_assert(munmap(gem_bufs[j], k) == 0);
+			gem_close(drm_fd, handles[j]);
+		}
+	}
+
+	free(gem_bufs);
+	free(handles);
+}
+
+static void pm_test_caching(void)
+{
+	uint32_t handle;
+	uint8_t *gem_buf;
+
+	uint32_t i, got_caching;
+	uint32_t gtt_obj_max_size = (16 * 1024);
+	uint32_t cache_levels[3] = {
+		I915_CACHING_NONE,
+		I915_CACHING_CACHED,            /* LLC caching */
+		I915_CACHING_DISPLAY,           /* eDRAM caching */
+	};
+
+
+	handle = gem_create(drm_fd, gtt_obj_max_size);
+	gem_buf = gem_mmap__gtt(drm_fd, handle, gtt_obj_max_size, PROT_WRITE);
+
+	for (i = 0; i < ARRAY_SIZE(cache_levels); i++) {
+		memset(gem_buf, 16 << i, gtt_obj_max_size);
+
+		disable_all_screens_and_wait(&ms_data);
+
+		igt_debug("Setting cache level %u\n", cache_levels[i]);
+
+		gem_set_caching(drm_fd, handle, cache_levels[i]);
+
+		got_caching = gem_get_caching(drm_fd, handle);
+
+		igt_debug("Got back %u\n", got_caching);
+
+		/*
+		 * Allow fall-back to CACHING_NONE in case the platform does
+		 * not support it.
+		 */
+		if (cache_levels[i] == I915_CACHING_DISPLAY)
+			igt_assert(got_caching == I915_CACHING_NONE ||
+				   got_caching == I915_CACHING_DISPLAY);
+		else
+			igt_assert(got_caching == cache_levels[i]);
+
+		enable_one_screen_and_wait(&ms_data);
+	}
+
+	igt_assert(munmap(gem_buf, gtt_obj_max_size) == 0);
+	gem_close(drm_fd, handle);
+}
+
+
+
 static void fences_subtest(bool dpms)
 {
 	int i;
@@ -1927,6 +2035,12 @@ int main(int argc, char *argv[])
 	igt_subtest("gem-execbuf-stress-extra-wait")
 		gem_execbuf_stress_subtest(rounds, WAIT_STATUS | WAIT_EXTRA);
 
+	/* power-wake reference tests */
+	igt_subtest("pm-tiling")
+		pm_test_tiling();
+	igt_subtest("pm-caching")
+		pm_test_caching();
+
 	igt_fixture
 		teardown_environment();
 
-- 
2.6.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s)
  2015-11-27 18:08   ` [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s) Marius Vlad
@ 2015-11-27 19:51     ` Imre Deak
  0 siblings, 0 replies; 12+ messages in thread
From: Imre Deak @ 2015-11-27 19:51 UTC (permalink / raw)
  To: Marius Vlad, intel-gfx; +Cc: Thomas Wood

On pe, 2015-11-27 at 20:08 +0200, Marius Vlad wrote:
> Signed-off-by: Marius Vlad <marius.c.vlad@intel.com>

For the future: please send the patch revision log and commit tags
together with the patch itself, not as a separate email. The patch
looks ok I pushed it to igt with the amended commit log.

> ---
>  tests/pm_rpm.c | 114
> +++++++++++++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 114 insertions(+)
> 
> diff --git a/tests/pm_rpm.c b/tests/pm_rpm.c
> index c4fb19c..e9ba9ea 100644
> --- a/tests/pm_rpm.c
> +++ b/tests/pm_rpm.c
> @@ -1729,6 +1729,114 @@ static void planes_subtest(bool universal,
> bool dpms)
>  	}
>  }
>  
> +static void pm_test_tiling(void)
> +{
> +	uint32_t *handles;
> +	uint8_t **gem_bufs;
> +
> +	int max_gem_objs = 0;
> +	uint8_t off_bit = 14;
> +	uint32_t gtt_obj_max_size = (256 * 1024);
> +
> +	uint32_t i, j, k, tiling_modes[3] = {
> +		I915_TILING_NONE,
> +		I915_TILING_X,
> +		I915_TILING_Y,
> +	};
> +	uint32_t ti, sw;
> +
> +	/* default stride value */
> +	uint32_t stride = 512;
> +
> +	/* calculate how many objects we can map */
> +	for (i = 1 << off_bit; i <= gtt_obj_max_size; i <<= 1,
> max_gem_objs++)
> +		;
> +
> +	gem_bufs = calloc(max_gem_objs, sizeof(*gem_bufs));
> +	handles = calloc(max_gem_objs, sizeof(*handles));
> +
> +	/* try to set different tiling for each handle */
> +	for (i = 0; i < ARRAY_SIZE(tiling_modes); i++) {
> +
> +		for (j = 0, k = 1 << off_bit;
> +		     k <= gtt_obj_max_size; k <<= 1, j++) {
> +			handles[j] = gem_create(drm_fd, k);
> +			gem_bufs[j] = gem_mmap__gtt(drm_fd,
> handles[j],
> +						    k, PROT_WRITE);
> +			memset(gem_bufs[j], 0x0, k);
> +		}
> +
> +		disable_all_screens_and_wait(&ms_data);
> +
> +		for (j = 0; j < max_gem_objs; j++) {
> +			gem_set_tiling(drm_fd, handles[j],
> +					tiling_modes[i], stride);
> +			gem_get_tiling(drm_fd, handles[j], &ti,
> &sw);
> +			igt_assert(tiling_modes[i] == ti);
> +		}
> +
> +		enable_one_screen_and_wait(&ms_data);
> +
> +		for (j = 0, k = 1 << off_bit;
> +		     k <= gtt_obj_max_size; k <<= 1, j++) {
> +			igt_assert(munmap(gem_bufs[j], k) == 0);
> +			gem_close(drm_fd, handles[j]);
> +		}
> +	}
> +
> +	free(gem_bufs);
> +	free(handles);
> +}
> +
> +static void pm_test_caching(void)
> +{
> +	uint32_t handle;
> +	uint8_t *gem_buf;
> +
> +	uint32_t i, got_caching;
> +	uint32_t gtt_obj_max_size = (16 * 1024);
> +	uint32_t cache_levels[3] = {
> +		I915_CACHING_NONE,
> +		I915_CACHING_CACHED,            /* LLC caching */
> +		I915_CACHING_DISPLAY,           /* eDRAM caching */
> +	};
> +
> +
> +	handle = gem_create(drm_fd, gtt_obj_max_size);
> +	gem_buf = gem_mmap__gtt(drm_fd, handle, gtt_obj_max_size,
> PROT_WRITE);
> +
> +	for (i = 0; i < ARRAY_SIZE(cache_levels); i++) {
> +		memset(gem_buf, 16 << i, gtt_obj_max_size);
> +
> +		disable_all_screens_and_wait(&ms_data);
> +
> +		igt_debug("Setting cache level %u\n",
> cache_levels[i]);
> +
> +		gem_set_caching(drm_fd, handle, cache_levels[i]);
> +
> +		got_caching = gem_get_caching(drm_fd, handle);
> +
> +		igt_debug("Got back %u\n", got_caching);
> +
> +		/*
> +		 * Allow fall-back to CACHING_NONE in case the
> platform does
> +		 * not support it.
> +		 */
> +		if (cache_levels[i] == I915_CACHING_DISPLAY)
> +			igt_assert(got_caching == I915_CACHING_NONE
> ||
> +				   got_caching ==
> I915_CACHING_DISPLAY);
> +		else
> +			igt_assert(got_caching == cache_levels[i]);
> +
> +		enable_one_screen_and_wait(&ms_data);
> +	}
> +
> +	igt_assert(munmap(gem_buf, gtt_obj_max_size) == 0);
> +	gem_close(drm_fd, handle);
> +}
> +
> +
> +
>  static void fences_subtest(bool dpms)
>  {
>  	int i;
> @@ -1927,6 +2035,12 @@ int main(int argc, char *argv[])
>  	igt_subtest("gem-execbuf-stress-extra-wait")
>  		gem_execbuf_stress_subtest(rounds, WAIT_STATUS |
> WAIT_EXTRA);
>  
> +	/* power-wake reference tests */
> +	igt_subtest("pm-tiling")
> +		pm_test_tiling();
> +	igt_subtest("pm-caching")
> +		pm_test_caching();
> +
>  	igt_fixture
>  		teardown_environment();
>  
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2015-11-27 19:53 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-11-24 17:42 [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s) marius.c.vlad
2015-11-24 22:57 ` Imre Deak
2015-11-25 17:16   ` [PATCH i-g-t v2] " marius.c.vlad
2015-11-25 17:16     ` [PATCH i-g-t] " marius.c.vlad
2015-11-25 20:08       ` Imre Deak
2015-11-26 10:55         ` Marius Vlad
2015-11-26 11:57           ` Imre Deak
2015-11-26 16:32 ` Marius Vlad
2015-11-26 18:23   ` Imre Deak
2015-11-27 18:08 ` [PATCH i-g-t v4] tests/pm_rpm tests for set_caching and set_tiling Marius Vlad
2015-11-27 18:08   ` [PATCH i-g-t] tests/pm_rpm tests for set_caching and set_tiling ioctl(s) Marius Vlad
2015-11-27 19:51     ` Imre Deak

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.