All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] igt/gem_trtt: Exercise the TRTT hardware
@ 2016-01-09 11:31 akash.goel
  2016-01-11 12:32 ` Chris Wilson
  2016-01-12  6:00 ` [PATCH] " Tian, Kevin
  0 siblings, 2 replies; 30+ messages in thread
From: akash.goel @ 2016-01-09 11:31 UTC (permalink / raw)
  To: intel-gfx; +Cc: Akash Goel

From: Akash Goel <akash.goel@intel.com>

This patch provides the testcase to exercise the TRTT hardware.

Some platforms have an additional address translation hardware support in
form of Tiled Resource Translation Table (TR-TT) which provides an extra level
of abstraction over PPGTT.
This is useful for mapping Sparse/Tiled texture resources.

TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
for a new PPGTT instance, but TR-TT may not enabled for every context.
1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
which such chunk to use is conveyed to HW through a register.
Any GFX address, which lies in that reserved 44 bit range will be translated
through TR-TT first and then through PPGTT to get the actual physical address.

TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
each level is contained within a 4KB page hence L3 and L2 is composed of
512 64b entries and L1 is composed of 1024 32b entries.

There is a provision to keep TR-TT Tables in virtual space, where the pages of
TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
UMD will have a full control on TR-TT management, with bare minimum support
from KMD.
So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
The entries of L1 table will contain the PPGTT offset of BOs actually backing
the Sparse resources.

I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
added to the CONTEXT_SETPARAM ioctl for that purpose.

Signed-off-by: Akash Goel <akash.goel@intel.com>
---
 tests/Makefile.sources |   1 +
 tests/gem_trtt.c       | 396 +++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 397 insertions(+)
 create mode 100644 tests/gem_trtt.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index d594038..068a44e 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -64,6 +64,7 @@ TESTS_progs_M = \
 	gem_streaming_writes \
 	gem_tiled_blits \
 	gem_tiled_partial_pwrite_pread \
+	gem_trtt \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
 	kms_addfb_basic \
diff --git a/tests/gem_trtt.c b/tests/gem_trtt.c
new file mode 100644
index 0000000..f652b67
--- /dev/null
+++ b/tests/gem_trtt.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Akash Goel <akash.goel@intel.com>
+ *
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <malloc.h>
+#include "drm.h"
+#include "ioctl_wrappers.h"
+#include "drmtest.h"
+#include "intel_chipset.h"
+#include "intel_io.h"
+#include "i915_drm.h"
+#include <assert.h>
+#include <sys/wait.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include "igt_kms.h"
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#define BO_SIZE 4096
+#define EXEC_OBJECT_PINNED	(1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+
+#define NO_PPGTT 0
+#define ALIASING_PPGTT 1
+#define FULL_32_BIT_PPGTT 2
+#define FULL_48_BIT_PPGTT 3
+/* uses_full_ppgtt
+ * Finds supported PPGTT details.
+ * @fd DRM fd
+ * @min can be
+ * 0 - No PPGTT
+ * 1 - Aliasing PPGTT
+ * 2 - Full PPGTT (32b)
+ * 3 - Full PPGTT (48b)
+ * RETURNS true/false if min support is present
+*/
+static bool uses_full_ppgtt(int fd, int min)
+{
+	struct drm_i915_getparam gp;
+	int val = 0;
+
+	memset(&gp, 0, sizeof(gp));
+	gp.param = 18; /* HAS_ALIASING_PPGTT */
+	gp.value = &val;
+
+	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+		return 0;
+
+	errno = 0;
+	return val >= min;
+}
+
+/* has_softpin_support
+ * Finds if softpin feature is supported
+ * @fd DRM fd
+*/
+static bool has_softpin_support(int fd)
+{
+	struct drm_i915_getparam gp;
+	int val = 0;
+
+	memset(&gp, 0, sizeof(gp));
+	gp.param = 37; /* I915_PARAM_HAS_EXEC_SOFTPIN */
+	gp.value = &val;
+
+	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+		return 0;
+
+	errno = 0;
+	return (val == 1);
+}
+
+/* has_trtt_support
+ * Finds if trtt hw is present
+ * @fd DRM fd
+*/
+static bool has_trtt_support(int fd)
+{
+	struct drm_i915_getparam gp;
+	int val = 0;
+
+	memset(&gp, 0, sizeof(gp));
+	gp.param = 38; /* I915_PARAM_HAS_TRTT */
+	gp.value = &val;
+
+	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+		return 0;
+
+	errno = 0;
+	return (val == 1);
+}
+
+/* mmap_bo
+ * helper for creating a CPU mmapping of the buffer
+ * @fd - drm fd
+ * @handle - handle of the buffer to mmap
+ * @size: size of the buffer
+*/
+static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
+{
+	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	return ptr;
+}
+
+/* setup_exec_obj
+ * populate exec object
+ * @exec - exec object
+ * @handle - handle to gem buffer
+ * @flags - any flags
+ * @offset - requested VMA
+*/
+static void setup_exec_obj(struct drm_i915_gem_exec_object2 *exec,
+			   uint32_t handle, uint32_t flags,
+			   uint64_t offset)
+{
+	memset(exec, 0, sizeof(struct drm_i915_gem_exec_object2));
+	exec->handle = handle;
+	exec->flags = flags;
+	exec->offset = offset;
+}
+
+/* gem_store_data_svm
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: data to be store at destination
+ * @end: whether to end batch buffer or not
+*/
+static int gem_store_data_svm(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			uint64_t vaddr, uint32_t data, bool end)
+{
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
+	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
+	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
+
+	cmd_buf[dw_offset++] = data;
+	if (end) {
+		cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
+		cmd_buf[dw_offset++] = 0;
+	}
+
+	return dw_offset;
+}
+
+/* setup_execbuffer
+ * helper for buffer execution
+ * @execbuf - pointer to execbuffer
+ * @exec_object - pointer to exec object2 struct
+ * @ring - ring to be used
+ * @buffer_count - how manu buffers to submit
+ * @batch_length - length of batch buffer
+*/
+static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
+			     struct drm_i915_gem_exec_object2 *exec_object,
+			     int ring, int buffer_count, int batch_length)
+{
+	execbuf->buffers_ptr = (unsigned long)exec_object;
+	execbuf->buffer_count = buffer_count;
+	execbuf->batch_start_offset = 0;
+	execbuf->batch_len = batch_length;
+	execbuf->cliprects_ptr = 0;
+	execbuf->num_cliprects = 0;
+	execbuf->DR1 = 0;
+	execbuf->DR4 = 0;
+	execbuf->flags = ring;
+	i915_execbuffer2_set_context_id(*execbuf, 0);
+	execbuf->rsvd2 = 0;
+}
+
+/* submit_and_sync
+ * Helper function for exec and sync functions
+ * @fd - drm fd
+ * @execbuf - pointer to execbuffer
+ * @batch_buf_handle - batch buffer handle
+*/
+static void submit_and_sync(int fd, struct drm_i915_gem_execbuffer2 *execbuf,
+			    uint32_t batch_buf_handle)
+{
+	gem_execbuf(fd, execbuf);
+	gem_sync(fd, batch_buf_handle);
+}
+
+struct local_i915_gem_context_trtt_param {
+	uint64_t l3_table_address;
+	uint32_t invd_tile_val;
+	uint32_t null_tile_val;
+};
+
+/* send_trtt_params
+ * Helper function to request KMD to enable TRTT
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+*/
+static void send_trtt_params(int fd, uint32_t ctx_id, uint64_t l3_table_address)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	memset(&ctx_param, 0, sizeof(ctx_param));
+
+	trtt_param.null_tile_val = 0xFFFFFFFF;
+	trtt_param.invd_tile_val = 0xFFFFFFFE;
+	trtt_param.l3_table_address = l3_table_address;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = 4; /* CONTEXT_PARAM_ENABLE_TRTT */
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	gem_context_set_param(fd, &ctx_param);
+}
+
+#define TABLE_SIZE 0x1000
+#define TILE_SIZE 0x10000
+
+#define FIRST_TILE_ADDRESS 0xF00000000000
+#define LAST_TILE_ADDRESS  0xFFFFFFFF0000
+
+#define BO_ALLOC_AND_SETUP(fd, bo_size, bo_handle, bo_offset, idx) \
+	bo_handle = gem_create(fd, bo_size); \
+	bo_offset = current_ppgtt_offset; \
+	setup_exec_obj(&exec_object2[idx], bo_handle, EXEC_OBJECT_PINNED, bo_offset); \
+	current_ppgtt_offset += bo_size;
+
+/* basic test
+ * This test will add a series of MI_STORE_ commands, first to update the
+ * TR-TT table entries and then to update the data buffers using the TR-TT VA,
+ * exercising the programming the table programming done previously
+*/
+static void gem_basic_trtt_use(void)
+{
+	int fd;
+	int ring, len = 0;
+	uint32_t *ptr;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec_object2[8];
+	uint32_t batch_buffer[BO_SIZE];
+
+	uint32_t l3_tbl_handle, l2_tbl1_handle, l2_tbl2_handle;
+	uint32_t l1_tbl1_handle, l1_tbl2_handle, batch_buf_handle;
+	uint32_t buffer1_handle, buffer2_handle;
+
+	uint64_t l3_tbl_offset, l2_tbl1_offset, l2_tbl2_offset;
+	uint64_t l1_tbl1_offset, l1_tbl2_offset;
+	uint64_t buffer1_offset, buffer2_offset;
+
+	uint32_t data;
+	uint64_t address, current_ppgtt_offset = 0x10000;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	igt_require(uses_full_ppgtt(fd, FULL_48_BIT_PPGTT));
+	igt_require(has_softpin_support(fd));
+	igt_require(has_trtt_support(fd));
+
+	/* Allocate a L3 table BO */
+	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l3_tbl_handle, l3_tbl_offset, 0);
+
+	/* Allocate two L2 table BOs */
+	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l2_tbl1_handle, l2_tbl1_offset, 1);
+	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l2_tbl2_handle, l2_tbl2_offset, 2);
+
+	/* Allocate two L1 table BOs */
+	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l1_tbl1_handle, l1_tbl1_offset, 3);
+	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l1_tbl2_handle, l1_tbl2_offset, 4);
+
+	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
+	current_ppgtt_offset = ALIGN(current_ppgtt_offset, TILE_SIZE);
+
+	/* Allocate two Data buffer BOs */
+	BO_ALLOC_AND_SETUP(fd, TILE_SIZE, buffer1_handle, buffer1_offset, 5);
+	BO_ALLOC_AND_SETUP(fd, TILE_SIZE, buffer2_handle, buffer2_offset, 6);
+
+	/* Finally allocate Batch buffer BO */
+	batch_buf_handle = gem_create(fd, BO_SIZE);
+	setup_exec_obj(&exec_object2[7], batch_buf_handle, 0, 0);
+
+	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
+	address = l3_tbl_offset;
+	data = l2_tbl1_offset;
+	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
+
+	address = l3_tbl_offset + 511*sizeof(uint64_t);
+	data = l2_tbl2_offset;
+	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
+
+	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
+	address = l2_tbl1_offset;
+	data = l1_tbl1_offset;
+	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
+
+	address = l2_tbl2_offset + 511*sizeof(uint64_t);
+	data = l1_tbl2_offset;
+	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
+
+	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
+	address = l1_tbl1_offset;
+	data = buffer1_offset >> 16;
+	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
+
+	address = l1_tbl2_offset + 1023*sizeof(uint32_t);
+	data = buffer2_offset >> 16;
+	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
+
+	/* Add commands to update the 2 data buffers, using their TRTT VA */
+	data = 0x12345678;
+	len = gem_store_data_svm(fd, batch_buffer, len, FIRST_TILE_ADDRESS, data, false);
+	len = gem_store_data_svm(fd, batch_buffer, len, LAST_TILE_ADDRESS, data, true);
+
+	gem_write(fd, batch_buf_handle, 0, batch_buffer, len*4);
+
+	/* Request KMD to setup the TR-TT */
+	send_trtt_params(fd, 0, l3_tbl_offset);
+
+	ring = I915_EXEC_RENDER;
+	setup_execbuffer(&execbuf, exec_object2, ring, 8, len*4);
+
+	/* submit command buffer */
+	submit_and_sync(fd, &execbuf, batch_buf_handle);
+
+	/* read the 2 data buffers to check for the value written by the GPU */
+	ptr = mmap_bo(fd, buffer1_handle, TILE_SIZE);
+	igt_fail_on_f(ptr[0] != data,
+		"\nCPU read does not match GPU write,\
+		expected: 0x%x, got: 0x%x\n",
+		data, ptr[0]);
+
+	ptr = mmap_bo(fd, buffer2_handle, TILE_SIZE);
+	igt_fail_on_f(ptr[0] != data,
+		"\nCPU read does not match GPU write,\
+		expected: 0x%x, got: 0x%x\n",
+		data, ptr[0]);
+
+	gem_close(fd, l3_tbl_handle);
+	gem_close(fd, l2_tbl1_handle);
+	gem_close(fd, l2_tbl2_handle);
+	gem_close(fd, l1_tbl1_handle);
+	gem_close(fd, l1_tbl2_handle);
+	gem_close(fd, buffer1_handle);
+	gem_close(fd, buffer2_handle);
+	gem_close(fd, batch_buf_handle);
+	close(fd);
+}
+
+int main(int argc, char* argv[])
+{
+	igt_subtest_init(argc, argv);
+	igt_skip_on_simulation();
+
+	/* test needs 48 PPGTT & Soft Pin support */
+	igt_subtest("basic") {
+		gem_basic_trtt_use();
+	}
+
+	igt_exit();
+}
+
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH] igt/gem_trtt: Exercise the TRTT hardware
  2016-01-09 11:31 [PATCH] igt/gem_trtt: Exercise the TRTT hardware akash.goel
@ 2016-01-11 12:32 ` Chris Wilson
  2016-01-11 12:37   ` Chris Wilson
  2016-01-20 10:24   ` Goel, Akash
  2016-01-12  6:00 ` [PATCH] " Tian, Kevin
  1 sibling, 2 replies; 30+ messages in thread
From: Chris Wilson @ 2016-01-11 12:32 UTC (permalink / raw)
  To: akash.goel; +Cc: intel-gfx

On Sat, Jan 09, 2016 at 05:01:30PM +0530, akash.goel@intel.com wrote:
> +static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
> +{
> +	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
> +	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);

read-only mapping, but set to the cpu write domain? Seems inconsistent.

> +	return ptr;
> +}

> +/* gem_store_data_svm
> + * populate batch buffer with MI_STORE_DWORD_IMM command
> + * @fd: drm file descriptor
> + * @cmd_buf: batch buffer
> + * @dw_offset: write offset in batch buffer
> + * @vaddr: destination Virtual address
> + * @data: data to be store at destination
> + * @end: whether to end batch buffer or not
> +*/
> +static int gem_store_data_svm(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
> +			uint64_t vaddr, uint32_t data, bool end)

Urm, what?

Just call this what it is,
emit_store_dword(cs, offset, vaddr, value);

Don't pass in bool end, since it is going to used exactly once and just
confuses all the other callers.

> +{
> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
> +	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
> +	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
> +
> +	cmd_buf[dw_offset++] = data;

Interesting use of whitespace.

> +	if (end) {
> +		cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
> +		cmd_buf[dw_offset++] = 0;

> +	}
> +
> +	return dw_offset;
> +}
> +
> +/* setup_execbuffer
> + * helper for buffer execution
> + * @execbuf - pointer to execbuffer
> + * @exec_object - pointer to exec object2 struct
> + * @ring - ring to be used
> + * @buffer_count - how manu buffers to submit
> + * @batch_length - length of batch buffer
> +*/
> +static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
> +			     struct drm_i915_gem_exec_object2 *exec_object,
> +			     int ring, int buffer_count, int batch_length)
> +{

How about memset(execbuf, 0, sizeof(*execbuf));

> +	execbuf->buffers_ptr = (unsigned long)exec_object;
> +	execbuf->buffer_count = buffer_count;
> +	execbuf->batch_start_offset = 0;
> +	execbuf->batch_len = batch_length;
> +	execbuf->cliprects_ptr = 0;
> +	execbuf->num_cliprects = 0;
> +	execbuf->DR1 = 0;
> +	execbuf->DR4 = 0;
> +	execbuf->flags = ring;
> +	i915_execbuffer2_set_context_id(*execbuf, 0);
> +	execbuf->rsvd2 = 0;
> +}
> +
> +/* submit_and_sync
> + * Helper function for exec and sync functions
> + * @fd - drm fd
> + * @execbuf - pointer to execbuffer
> + * @batch_buf_handle - batch buffer handle
> +*/
> +static void submit_and_sync(int fd, struct drm_i915_gem_execbuffer2 *execbuf,
> +			    uint32_t batch_buf_handle)
> +{
> +	gem_execbuf(fd, execbuf);
> +	gem_sync(fd, batch_buf_handle);

The only caller of this also does its own sync. This seems irrelevant
and serves a bad example.

> +}
> +
> +struct local_i915_gem_context_trtt_param {
> +	uint64_t l3_table_address;
> +	uint32_t invd_tile_val;
> +	uint32_t null_tile_val;
> +};
> +
> +/* send_trtt_params
> + * Helper function to request KMD to enable TRTT
> + * @fd - drm fd
> + * @ctx_id - id of the context for which TRTT is to be enabled
> + * @l3_table_address - GFX address of the L3 table
> +*/
> +static void send_trtt_params(int fd, uint32_t ctx_id, uint64_t l3_table_address)

It is not a socket, pipe, or other transport medium. Just setup_trtt().

> +#define TABLE_SIZE 0x1000
> +#define TILE_SIZE 0x10000
> +
> +#define FIRST_TILE_ADDRESS 0xF00000000000
> +#define LAST_TILE_ADDRESS  0xFFFFFFFF0000
> +
> +#define BO_ALLOC_AND_SETUP(fd, bo_size, bo_handle, bo_offset, idx) \
> +	bo_handle = gem_create(fd, bo_size); \
> +	bo_offset = current_ppgtt_offset; \
> +	setup_exec_obj(&exec_object2[idx], bo_handle, EXEC_OBJECT_PINNED, bo_offset); \
> +	current_ppgtt_offset += bo_size;

Function!

> +
> +/* basic test
> + * This test will add a series of MI_STORE_ commands, first to update the
> + * TR-TT table entries and then to update the data buffers using the TR-TT VA,
> + * exercising the programming the table programming done previously
> +*/
> +static void gem_basic_trtt_use(void)
> +{
> +	int fd;
> +	int ring, len = 0;
> +	uint32_t *ptr;
> +	struct drm_i915_gem_execbuffer2 execbuf;
> +	struct drm_i915_gem_exec_object2 exec_object2[8];
> +	uint32_t batch_buffer[BO_SIZE];
> +
> +	uint32_t l3_tbl_handle, l2_tbl1_handle, l2_tbl2_handle;
> +	uint32_t l1_tbl1_handle, l1_tbl2_handle, batch_buf_handle;
> +	uint32_t buffer1_handle, buffer2_handle;
> +
> +	uint64_t l3_tbl_offset, l2_tbl1_offset, l2_tbl2_offset;
> +	uint64_t l1_tbl1_offset, l1_tbl2_offset;
> +	uint64_t buffer1_offset, buffer2_offset;
> +
> +	uint32_t data;
> +	uint64_t address, current_ppgtt_offset = 0x10000;
> +
> +	fd = drm_open_driver(DRIVER_INTEL);
> +	igt_require(uses_full_ppgtt(fd, FULL_48_BIT_PPGTT));
> +	igt_require(has_softpin_support(fd));
> +	igt_require(has_trtt_support(fd));
> +
> +	/* Allocate a L3 table BO */
> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l3_tbl_handle, l3_tbl_offset, 0);
> +
> +	/* Allocate two L2 table BOs */
> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l2_tbl1_handle, l2_tbl1_offset, 1);
> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l2_tbl2_handle, l2_tbl2_offset, 2);
> +
> +	/* Allocate two L1 table BOs */
> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l1_tbl1_handle, l1_tbl1_offset, 3);
> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l1_tbl2_handle, l1_tbl2_offset, 4);
> +
> +	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
> +	current_ppgtt_offset = ALIGN(current_ppgtt_offset, TILE_SIZE);
> +
> +	/* Allocate two Data buffer BOs */
> +	BO_ALLOC_AND_SETUP(fd, TILE_SIZE, buffer1_handle, buffer1_offset, 5);
> +	BO_ALLOC_AND_SETUP(fd, TILE_SIZE, buffer2_handle, buffer2_offset, 6);
> +
> +	/* Finally allocate Batch buffer BO */
> +	batch_buf_handle = gem_create(fd, BO_SIZE);
> +	setup_exec_obj(&exec_object2[7], batch_buf_handle, 0, 0);

Scary jump from idx to 7.
Why not just pin this as well to reduce the code complexity? Afterwards
setup_exec_obj() can allocate an object all by itself.

> +
> +	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
> +	address = l3_tbl_offset;
> +	data = l2_tbl1_offset;
> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
> +
> +	address = l3_tbl_offset + 511*sizeof(uint64_t);
> +	data = l2_tbl2_offset;
> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
> +
> +	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
> +	address = l2_tbl1_offset;
> +	data = l1_tbl1_offset;
> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
> +
> +	address = l2_tbl2_offset + 511*sizeof(uint64_t);
> +	data = l1_tbl2_offset;
> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
> +
> +	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
> +	address = l1_tbl1_offset;
> +	data = buffer1_offset >> 16;
> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
> +
> +	address = l1_tbl2_offset + 1023*sizeof(uint32_t);
> +	data = buffer2_offset >> 16;
> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
> +
> +	/* Add commands to update the 2 data buffers, using their TRTT VA */
> +	data = 0x12345678;
> +	len = gem_store_data_svm(fd, batch_buffer, len, FIRST_TILE_ADDRESS, data, false);
> +	len = gem_store_data_svm(fd, batch_buffer, len, LAST_TILE_ADDRESS, data, true);
> +
> +	gem_write(fd, batch_buf_handle, 0, batch_buffer, len*4);

Or for even shorter code: batch_buffer =
gem_mmap__cpu(exec_object[batch].handle);

> +
> +	/* Request KMD to setup the TR-TT */
> +	send_trtt_params(fd, 0, l3_tbl_offset);
> +
> +	ring = I915_EXEC_RENDER;
> +	setup_execbuffer(&execbuf, exec_object2, ring, 8, len*4);
> +
> +	/* submit command buffer */
> +	submit_and_sync(fd, &execbuf, batch_buf_handle);
> +
> +	/* read the 2 data buffers to check for the value written by the GPU */
> +	ptr = mmap_bo(fd, buffer1_handle, TILE_SIZE);
> +	igt_fail_on_f(ptr[0] != data,
> +		"\nCPU read does not match GPU write,\
> +		expected: 0x%x, got: 0x%x\n",
> +		data, ptr[0]);
> +
> +	ptr = mmap_bo(fd, buffer2_handle, TILE_SIZE);
> +	igt_fail_on_f(ptr[0] != data,
> +		"\nCPU read does not match GPU write,\
> +		expected: 0x%x, got: 0x%x\n",
> +		data, ptr[0]);
> +
> +	gem_close(fd, l3_tbl_handle);
> +	gem_close(fd, l2_tbl1_handle);
> +	gem_close(fd, l2_tbl2_handle);
> +	gem_close(fd, l1_tbl1_handle);
> +	gem_close(fd, l1_tbl2_handle);
> +	gem_close(fd, buffer1_handle);
> +	gem_close(fd, buffer2_handle);
> +	gem_close(fd, batch_buf_handle);
> +	close(fd);
> +}
> +
> +int main(int argc, char* argv[])
> +{
> +	igt_subtest_init(argc, argv);

Together these are igt_main, and then you can also drop igt_exit.

> +	igt_skip_on_simulation();

I think you want this on simulation as well, as least "basic".

> +
> +	/* test needs 48 PPGTT & Soft Pin support */
> +	igt_subtest("basic") {
> +		gem_basic_trtt_use();
> +	}
> +
> +	igt_exit();
> +}
> +
> -- 
> 1.9.2
> 

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] igt/gem_trtt: Exercise the TRTT hardware
  2016-01-11 12:32 ` Chris Wilson
@ 2016-01-11 12:37   ` Chris Wilson
  2016-01-20 10:24   ` Goel, Akash
  1 sibling, 0 replies; 30+ messages in thread
From: Chris Wilson @ 2016-01-11 12:37 UTC (permalink / raw)
  To: akash.goel, intel-gfx, Piotr.Luc

On Mon, Jan 11, 2016 at 12:32:08PM +0000, Chris Wilson wrote:
> On Sat, Jan 09, 2016 at 05:01:30PM +0530, akash.goel@intel.com wrote:
> > +static void submit_and_sync(int fd, struct drm_i915_gem_execbuffer2 *execbuf,
> > +			    uint32_t batch_buf_handle)
> > +{
> > +	gem_execbuf(fd, execbuf);
> > +	gem_sync(fd, batch_buf_handle);
> 
> The only caller of this also does its own sync. This seems irrelevant
> and serves a bad example.

Oh, I see why. Your code is broken...

...
...
...


You didn't mark the output buffers as being written.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] igt/gem_trtt: Exercise the TRTT hardware
  2016-01-09 11:31 [PATCH] igt/gem_trtt: Exercise the TRTT hardware akash.goel
  2016-01-11 12:32 ` Chris Wilson
@ 2016-01-12  6:00 ` Tian, Kevin
  1 sibling, 0 replies; 30+ messages in thread
From: Tian, Kevin @ 2016-01-12  6:00 UTC (permalink / raw)
  To: intel-gfx; +Cc: Goel, Akash

> From: akash.goel@intel.com
> Sent: Saturday, January 09, 2016 7:32 PM
> 

[...]

> 
> There is a provision to keep TR-TT Tables in virtual space, where the pages of
> TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
> UMD will have a full control on TR-TT management, with bare minimum support
> from KMD.
> So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
> similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
> The entries of L1 table will contain the PPGTT offset of BOs actually backing
> the Sparse resources.
> 

Just a side note. Using virtual address for TRTT table also benefits virtualization
side. This way we can let guest own TRTT completely. Otherwise we have to
virtualize TRTT table if physical address is used, which is very complex and
could bring obvious performance impact.

It's appreciated if you can add this virtualization requirement in code comment
so others can catch this limitation if they want to do some change in the future. :-)

Thanks
Kevin
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH] igt/gem_trtt: Exercise the TRTT hardware
  2016-01-11 12:32 ` Chris Wilson
  2016-01-11 12:37   ` Chris Wilson
@ 2016-01-20 10:24   ` Goel, Akash
  2016-01-22 15:37     ` [PATCH v2] " akash.goel
  1 sibling, 1 reply; 30+ messages in thread
From: Goel, Akash @ 2016-01-20 10:24 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: akash.goel



On 1/11/2016 6:02 PM, Chris Wilson wrote:
> On Sat, Jan 09, 2016 at 05:01:30PM +0530, akash.goel@intel.com wrote:
>> +static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
>> +{
>> +	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
>> +	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
>
> read-only mapping, but set to the cpu write domain? Seems inconsistent.
Yes write not required, will change.
>
>> +	return ptr;
>> +}
>
>> +/* gem_store_data_svm
>> + * populate batch buffer with MI_STORE_DWORD_IMM command
>> + * @fd: drm file descriptor
>> + * @cmd_buf: batch buffer
>> + * @dw_offset: write offset in batch buffer
>> + * @vaddr: destination Virtual address
>> + * @data: data to be store at destination
>> + * @end: whether to end batch buffer or not
>> +*/
>> +static int gem_store_data_svm(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
>> +			uint64_t vaddr, uint32_t data, bool end)
>
> Urm, what?
>
> Just call this what it is,
> emit_store_dword(cs, offset, vaddr, value);
>
> Don't pass in bool end, since it is going to used exactly once and just
> confuses all the other callers.
>
Fine, will cleanup this.

>> +{
>> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
>> +	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
>> +	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
>> +
>> +	cmd_buf[dw_offset++] = data;
>
> Interesting use of whitespace.
>
Sorry, will remove.

>> +	if (end) {
>> +		cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
>> +		cmd_buf[dw_offset++] = 0;
>
>> +	}
>> +
>> +	return dw_offset;
>> +}
>> +
>> +/* setup_execbuffer
>> + * helper for buffer execution
>> + * @execbuf - pointer to execbuffer
>> + * @exec_object - pointer to exec object2 struct
>> + * @ring - ring to be used
>> + * @buffer_count - how manu buffers to submit
>> + * @batch_length - length of batch buffer
>> +*/
>> +static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
>> +			     struct drm_i915_gem_exec_object2 *exec_object,
>> +			     int ring, int buffer_count, int batch_length)
>> +{
>
> How about memset(execbuf, 0, sizeof(*execbuf));
>
Will use memset.
>> +	execbuf->buffers_ptr = (unsigned long)exec_object;
>> +	execbuf->buffer_count = buffer_count;
>> +	execbuf->batch_start_offset = 0;
>> +	execbuf->batch_len = batch_length;
>> +	execbuf->cliprects_ptr = 0;
>> +	execbuf->num_cliprects = 0;
>> +	execbuf->DR1 = 0;
>> +	execbuf->DR4 = 0;
>> +	execbuf->flags = ring;
>> +	i915_execbuffer2_set_context_id(*execbuf, 0);
>> +	execbuf->rsvd2 = 0;
>> +}
>> +
>> +/* submit_and_sync
>> + * Helper function for exec and sync functions
>> + * @fd - drm fd
>> + * @execbuf - pointer to execbuffer
>> + * @batch_buf_handle - batch buffer handle
>> +*/
>> +static void submit_and_sync(int fd, struct drm_i915_gem_execbuffer2 *execbuf,
>> +			    uint32_t batch_buf_handle)
>> +{
>> +	gem_execbuf(fd, execbuf);
>> +	gem_sync(fd, batch_buf_handle);
>
> The only caller of this also does its own sync. This seems irrelevant
> and serves a bad example.
>
Ok no real need to do an explicit sync, when Sync will happen implicitly 
on mmap of BOs (moving to CPU domain) to verify the Store result.

>> +}
>> +
>> +struct local_i915_gem_context_trtt_param {
>> +	uint64_t l3_table_address;
>> +	uint32_t invd_tile_val;
>> +	uint32_t null_tile_val;
>> +};
>> +
>> +/* send_trtt_params
>> + * Helper function to request KMD to enable TRTT
>> + * @fd - drm fd
>> + * @ctx_id - id of the context for which TRTT is to be enabled
>> + * @l3_table_address - GFX address of the L3 table
>> +*/
>> +static void send_trtt_params(int fd, uint32_t ctx_id, uint64_t l3_table_address)
>
> It is not a socket, pipe, or other transport medium. Just setup_trtt().
>
Fine will rename it.

>> +#define TABLE_SIZE 0x1000
>> +#define TILE_SIZE 0x10000
>> +
>> +#define FIRST_TILE_ADDRESS 0xF00000000000
>> +#define LAST_TILE_ADDRESS  0xFFFFFFFF0000
>> +
>> +#define BO_ALLOC_AND_SETUP(fd, bo_size, bo_handle, bo_offset, idx) \
>> +	bo_handle = gem_create(fd, bo_size); \
>> +	bo_offset = current_ppgtt_offset; \
>> +	setup_exec_obj(&exec_object2[idx], bo_handle, EXEC_OBJECT_PINNED, bo_offset); \
>> +	current_ppgtt_offset += bo_size;
>
> Function!
>

Is it affecting the readability ?
Actually in this case, it was more convenient for me to use a macro.
With function will have to pass the addresses of multiple variables.

>> +
>> +/* basic test
>> + * This test will add a series of MI_STORE_ commands, first to update the
>> + * TR-TT table entries and then to update the data buffers using the TR-TT VA,
>> + * exercising the programming the table programming done previously
>> +*/
>> +static void gem_basic_trtt_use(void)
>> +{
>> +	int fd;
>> +	int ring, len = 0;
>> +	uint32_t *ptr;
>> +	struct drm_i915_gem_execbuffer2 execbuf;
>> +	struct drm_i915_gem_exec_object2 exec_object2[8];
>> +	uint32_t batch_buffer[BO_SIZE];
>> +
>> +	uint32_t l3_tbl_handle, l2_tbl1_handle, l2_tbl2_handle;
>> +	uint32_t l1_tbl1_handle, l1_tbl2_handle, batch_buf_handle;
>> +	uint32_t buffer1_handle, buffer2_handle;
>> +
>> +	uint64_t l3_tbl_offset, l2_tbl1_offset, l2_tbl2_offset;
>> +	uint64_t l1_tbl1_offset, l1_tbl2_offset;
>> +	uint64_t buffer1_offset, buffer2_offset;
>> +
>> +	uint32_t data;
>> +	uint64_t address, current_ppgtt_offset = 0x10000;
>> +
>> +	fd = drm_open_driver(DRIVER_INTEL);
>> +	igt_require(uses_full_ppgtt(fd, FULL_48_BIT_PPGTT));
>> +	igt_require(has_softpin_support(fd));
>> +	igt_require(has_trtt_support(fd));
>> +
>> +	/* Allocate a L3 table BO */
>> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l3_tbl_handle, l3_tbl_offset, 0);
>> +
>> +	/* Allocate two L2 table BOs */
>> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l2_tbl1_handle, l2_tbl1_offset, 1);
>> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l2_tbl2_handle, l2_tbl2_offset, 2);
>> +
>> +	/* Allocate two L1 table BOs */
>> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l1_tbl1_handle, l1_tbl1_offset, 3);
>> +	BO_ALLOC_AND_SETUP(fd, TABLE_SIZE, l1_tbl2_handle, l1_tbl2_offset, 4);
>> +
>> +	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
>> +	current_ppgtt_offset = ALIGN(current_ppgtt_offset, TILE_SIZE);
>> +
>> +	/* Allocate two Data buffer BOs */
>> +	BO_ALLOC_AND_SETUP(fd, TILE_SIZE, buffer1_handle, buffer1_offset, 5);
>> +	BO_ALLOC_AND_SETUP(fd, TILE_SIZE, buffer2_handle, buffer2_offset, 6);
>> +
>> +	/* Finally allocate Batch buffer BO */
>> +	batch_buf_handle = gem_create(fd, BO_SIZE);
>> +	setup_exec_obj(&exec_object2[7], batch_buf_handle, 0, 0);
>
> Scary jump from idx to 7.
> Why not just pin this as well to reduce the code complexity? Afterwards
> setup_exec_obj() can allocate an object all by itself.

Did not pin the BB also, as it could have given an impression that Soft 
Pinning is required for all the BOs when using TR-TT. Also this will let 
us test KMD, that it doesn't place the BB in the segment reserved for TR-TT.

>
>> +
>> +	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
>> +	address = l3_tbl_offset;
>> +	data = l2_tbl1_offset;
>> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
>> +
>> +	address = l3_tbl_offset + 511*sizeof(uint64_t);
>> +	data = l2_tbl2_offset;
>> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
>> +
>> +	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
>> +	address = l2_tbl1_offset;
>> +	data = l1_tbl1_offset;
>> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
>> +
>> +	address = l2_tbl2_offset + 511*sizeof(uint64_t);
>> +	data = l1_tbl2_offset;
>> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
>> +
>> +	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
>> +	address = l1_tbl1_offset;
>> +	data = buffer1_offset >> 16;
>> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
>> +
>> +	address = l1_tbl2_offset + 1023*sizeof(uint32_t);
>> +	data = buffer2_offset >> 16;
>> +	len = gem_store_data_svm(fd, batch_buffer, len, address, data, false);
>> +
>> +	/* Add commands to update the 2 data buffers, using their TRTT VA */
>> +	data = 0x12345678;
>> +	len = gem_store_data_svm(fd, batch_buffer, len, FIRST_TILE_ADDRESS, data, false);
>> +	len = gem_store_data_svm(fd, batch_buffer, len, LAST_TILE_ADDRESS, data, true);
>> +
>> +	gem_write(fd, batch_buf_handle, 0, batch_buffer, len*4);
>
> Or for even shorter code: batch_buffer =
> gem_mmap__cpu(exec_object[batch].handle);
>
Actually its easier to trace the explicit update through Pwrite.

>> +
>> +	/* Request KMD to setup the TR-TT */
>> +	send_trtt_params(fd, 0, l3_tbl_offset);
>> +
>> +	ring = I915_EXEC_RENDER;
>> +	setup_execbuffer(&execbuf, exec_object2, ring, 8, len*4);
>> +
>> +	/* submit command buffer */
>> +	submit_and_sync(fd, &execbuf, batch_buf_handle);
>> +
>> +	/* read the 2 data buffers to check for the value written by the GPU */
>> +	ptr = mmap_bo(fd, buffer1_handle, TILE_SIZE);
>> +	igt_fail_on_f(ptr[0] != data,
>> +		"\nCPU read does not match GPU write,\
>> +		expected: 0x%x, got: 0x%x\n",
>> +		data, ptr[0]);
>> +
>> +	ptr = mmap_bo(fd, buffer2_handle, TILE_SIZE);
>> +	igt_fail_on_f(ptr[0] != data,
>> +		"\nCPU read does not match GPU write,\
>> +		expected: 0x%x, got: 0x%x\n",
>> +		data, ptr[0]);
>> +
>> +	gem_close(fd, l3_tbl_handle);
>> +	gem_close(fd, l2_tbl1_handle);
>> +	gem_close(fd, l2_tbl2_handle);
>> +	gem_close(fd, l1_tbl1_handle);
>> +	gem_close(fd, l1_tbl2_handle);
>> +	gem_close(fd, buffer1_handle);
>> +	gem_close(fd, buffer2_handle);
>> +	gem_close(fd, batch_buf_handle);
>> +	close(fd);
>> +}
>> +
>> +int main(int argc, char* argv[])
>> +{
>> +	igt_subtest_init(argc, argv);
>
> Together these are igt_main, and then you can also drop igt_exit.
>
>> +	igt_skip_on_simulation();
>
> I think you want this on simulation as well, as least "basic".
>

Thanks, will use igt_main..

>> +
>> +	/* test needs 48 PPGTT & Soft Pin support */
>> +	igt_subtest("basic") {
>> +		gem_basic_trtt_use();
>> +	}
>> +
>> +	igt_exit();
>> +}
>> +
>> --
>> 1.9.2
>>
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH v2] igt/gem_trtt: Exercise the TRTT hardware
  2016-01-20 10:24   ` Goel, Akash
@ 2016-01-22 15:37     ` akash.goel
  2016-01-22 20:41       ` Chris Wilson
  0 siblings, 1 reply; 30+ messages in thread
From: akash.goel @ 2016-01-22 15:37 UTC (permalink / raw)
  To: intel-gfx; +Cc: Akash Goel

From: Akash Goel <akash.goel@intel.com>

This patch provides the testcase to exercise the TRTT hardware.

Some platforms have an additional address translation hardware support in
form of Tiled Resource Translation Table (TR-TT) which provides an extra level
of abstraction over PPGTT.
This is useful for mapping Sparse/Tiled texture resources.

TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
for a new PPGTT instance, but TR-TT may not enabled for every context.
1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
which such chunk to use is conveyed to HW through a register.
Any GFX address, which lies in that reserved 44 bit range will be translated
through TR-TT first and then through PPGTT to get the actual physical address.

TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
each level is contained within a 4KB page hence L3 and L2 is composed of
512 64b entries and L1 is composed of 1024 32b entries.

There is a provision to keep TR-TT Tables in virtual space, where the pages of
TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
UMD will have a full control on TR-TT management, with bare minimum support
from KMD.
So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
The entries of L1 table will contain the PPGTT offset of BOs actually backing
the Sparse resources.

I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
added to the CONTEXT_SETPARAM ioctl for that purpose.

v2:
 - Add new wrapper function __gem_context_require_param and used that
   to detect the TR-TT support
 - Use igt_main macro, rename certain function, remove extra white space,
   cleanup the code (Chris)
 - Enhance the basic subtest to exercise all possible TR-TT segment start
   locations (i.e. 16 of them) & for every iteration create a new context.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Akash Goel <akash.goel@intel.com>
---
 lib/ioctl_wrappers.c   |  25 ++-
 lib/ioctl_wrappers.h   |   2 +
 tests/Makefile.sources |   1 +
 tests/gem_trtt.c       | 446 +++++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 466 insertions(+), 8 deletions(-)
 create mode 100644 tests/gem_trtt.c

diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index e348f26..46e1db2 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -833,6 +833,22 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
 	do_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_SETPARAM, p);
 }
 
+int __gem_context_require_param(int fd, uint64_t param)
+{
+	struct local_i915_gem_context_param p;
+	int ret;
+
+	p.context = 0;
+	p.param = param;
+	p.value = 0;
+	p.size = 0;
+
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
+	if (ret)
+		return -errno;
+	return 0;
+}
+
 /**
  * gem_context_require_param:
  * @fd: open i915 drm file descriptor
@@ -843,14 +859,7 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
  */
 void gem_context_require_param(int fd, uint64_t param)
 {
-	struct local_i915_gem_context_param p;
-
-	p.context = 0;
-	p.param = param;
-	p.value = 0;
-	p.size = 0;
-
-	igt_require(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
+	igt_require(__gem_context_require_param(fd, param) == 0);
 }
 
 void gem_context_require_ban_period(int fd)
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index fe2f687..258c612 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -105,9 +105,11 @@ struct local_i915_gem_context_param {
 #define LOCAL_CONTEXT_PARAM_BAN_PERIOD	0x1
 #define LOCAL_CONTEXT_PARAM_NO_ZEROMAP	0x2
 #define LOCAL_CONTEXT_PARAM_GTT_SIZE	0x3
+#define LOCAL_CONTEXT_PARAM_TRTT	0x4
 	uint64_t value;
 };
 void gem_context_require_ban_period(int fd);
+int __gem_context_require_param(int fd, uint64_t param);
 void gem_context_require_param(int fd, uint64_t param);
 void gem_context_get_param(int fd, struct local_i915_gem_context_param *p);
 void gem_context_set_param(int fd, struct local_i915_gem_context_param *p);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index d594038..068a44e 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -64,6 +64,7 @@ TESTS_progs_M = \
 	gem_streaming_writes \
 	gem_tiled_blits \
 	gem_tiled_partial_pwrite_pread \
+	gem_trtt \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
 	kms_addfb_basic \
diff --git a/tests/gem_trtt.c b/tests/gem_trtt.c
new file mode 100644
index 0000000..5076e18
--- /dev/null
+++ b/tests/gem_trtt.c
@@ -0,0 +1,446 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Akash Goel <akash.goel@intel.com>
+ *
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <malloc.h>
+#include "drm.h"
+#include "ioctl_wrappers.h"
+#include "drmtest.h"
+#include "intel_chipset.h"
+#include "intel_io.h"
+#include "i915_drm.h"
+#include <assert.h>
+#include <sys/wait.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include "igt_kms.h"
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#define BO_SIZE 4096
+#define EXEC_OBJECT_PINNED	(1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+
+#define NO_PPGTT 0
+#define ALIASING_PPGTT 1
+#define FULL_32_BIT_PPGTT 2
+#define FULL_48_BIT_PPGTT 3
+/* uses_full_ppgtt
+ * Finds supported PPGTT details.
+ * @fd DRM fd
+ * @min can be
+ * 0 - No PPGTT
+ * 1 - Aliasing PPGTT
+ * 2 - Full PPGTT (32b)
+ * 3 - Full PPGTT (48b)
+ * RETURNS true/false if min support is present
+*/
+static bool uses_full_ppgtt(int fd, int min)
+{
+	struct drm_i915_getparam gp;
+	int val = 0;
+
+	memset(&gp, 0, sizeof(gp));
+	gp.param = 18; /* HAS_ALIASING_PPGTT */
+	gp.value = &val;
+
+	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+		return 0;
+
+	errno = 0;
+	return val >= min;
+}
+
+/* has_softpin_support
+ * Finds if softpin feature is supported
+ * @fd DRM fd
+*/
+static bool has_softpin_support(int fd)
+{
+	struct drm_i915_getparam gp;
+	int val = 0;
+
+	memset(&gp, 0, sizeof(gp));
+	gp.param = 37; /* I915_PARAM_HAS_EXEC_SOFTPIN */
+	gp.value = &val;
+
+	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+		return 0;
+
+	errno = 0;
+	return (val == 1);
+}
+
+/* has_trtt_support
+ * Finds if trtt hw is present
+ * @fd DRM fd
+*/
+static bool has_trtt_support(int fd)
+{
+	int ret = __gem_context_require_param(fd, LOCAL_CONTEXT_PARAM_TRTT);
+
+	errno = 0;
+	return (ret == 0);
+}
+
+/* mmap_bo
+ * helper for creating a CPU mmapping of the buffer
+ * @fd - drm fd
+ * @handle - handle of the buffer to mmap
+ * @size: size of the buffer
+*/
+static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
+{
+	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0);
+	return ptr;
+}
+
+/* emit_store_dword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u32 data to be stored at destination
+*/
+static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint32_t data)
+{
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
+	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
+	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
+	cmd_buf[dw_offset++] = data;
+
+	return dw_offset;
+}
+
+/* emit_store_qword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u64 data to be stored at destination
+*/
+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint64_t data)
+{
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
+	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
+	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
+	cmd_buf[dw_offset++] = data;
+	cmd_buf[dw_offset++] = data >> 32;
+
+	return dw_offset;
+}
+
+/* emit_bb_end
+ * populate batch buffer with MI_BATCH_BUFFER_END command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+*/
+static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
+{
+	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
+	cmd_buf[dw_offset++] = 0;
+
+	return dw_offset;
+}
+
+/* setup_execbuffer
+ * helper for buffer execution
+ * @execbuf - pointer to execbuffer
+ * @exec_object - pointer to exec object2 struct
+ * @ring - ring to be used
+ * @buffer_count - how manu buffers to submit
+ * @batch_length - length of batch buffer
+*/
+static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
+			     struct drm_i915_gem_exec_object2 *exec_object,
+			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
+{
+	memset(execbuf, 0, sizeof(*execbuf));
+
+	execbuf->buffers_ptr = (unsigned long)exec_object;
+	execbuf->buffer_count = buffer_count;
+	execbuf->batch_len = batch_length;
+	execbuf->flags = ring;
+	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
+}
+
+#define TABLE_SIZE 0x1000
+#define TILE_SIZE 0x10000
+
+#define TRTT_SEGMENT_SIZE (1ULL << 44)
+#define PPGTT_SIZE (1ULL << 48)
+
+#define NULL_TILE_PATTERN    0xFFFFFFFF
+#define INVALID_TILE_PATTERN 0xFFFFFFFE
+
+struct local_i915_gem_context_trtt_param {
+	uint64_t segment_base_addr;
+	uint64_t l3_table_address;
+	uint32_t invd_tile_val;
+	uint32_t null_tile_val;
+};
+
+/* setup_trtt
+ * Helper function to request KMD to enable TRTT
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static void
+setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	memset(&ctx_param, 0, sizeof(ctx_param));
+
+	trtt_param.null_tile_val = NULL_TILE_PATTERN;
+	trtt_param.invd_tile_val = INVALID_TILE_PATTERN;
+	trtt_param.l3_table_address = l3_table_address;
+	trtt_param.segment_base_addr = segment_base_addr;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	gem_context_set_param(fd, &ctx_param);
+}
+
+/* bo_alloc_setup
+ * allocate bo and populate exec object
+ * @exec_object2 - pointer to exec object
+ * @bo_sizee - buffer size
+ * @flags - exec flags
+ * @bo_offset - pointer to the current PPGTT offset
+ */
+static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
+			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
+{
+	memset(exec_object2, 0, sizeof(*exec_object2));
+	exec_object2->handle = gem_create(fd, bo_size);
+	exec_object2->flags = flags;
+
+	if (bo_offset)
+	{
+		exec_object2->offset = *bo_offset;
+		*bo_offset += bo_size;
+	}
+}
+
+/* basic test
+ * This test will create a context, allocate a L3 table page, 2 pages apiece
+ * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
+ * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
+ * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
+ * buffer to first update the TR-TT table entries and then to update the data
+ * buffers using their TR-TT VA, exercising the table programming done
+ * previously.
+ * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
+ * Invoke execbuffer to submit the batch buffer.
+ * Verify value of first DWORD in the 2 data buffer matches the data asked
+ * to be written by the GPU.
+ */
+static void submit_trtt_context(int fd, uint64_t segment_base_addr)
+{
+	enum {
+		L3_TBL,
+		L2_TBL1,
+		L2_TBL2,
+		L1_TBL1,
+		L1_TBL2,
+		DATA1,
+		DATA2,
+		BATCH,
+		NUM_BUFFERS,
+	};
+
+	int ring, len = 0;
+	uint32_t *ptr;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
+	uint32_t batch_buffer[BO_SIZE];
+	uint32_t ctx_id, data32;
+	uint64_t address, data64, cur_ppgtt_off, exec_flags;
+	uint64_t first_tile_addr, last_tile_addr;
+
+	first_tile_addr = segment_base_addr;
+	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
+
+	if (segment_base_addr == 0) {
+		/* Use the default context for first iteration */
+		ctx_id = 0;
+		cur_ppgtt_off = TRTT_SEGMENT_SIZE;
+		exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+	} else {
+		ctx_id = gem_context_create(fd);
+		cur_ppgtt_off = 0;
+		exec_flags = 0;
+	}
+
+	/* first allocate Batch buffer BO */
+	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
+
+	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
+	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
+
+	/* Allocate a L3 table BO */
+	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L2 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L1 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
+	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
+
+	/* Allocate two Data buffer BOs */
+	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
+	address = exec_object2[L3_TBL].offset;
+	data64 = exec_object2[L2_TBL1].offset;
+	len = emit_store_qword(fd, batch_buffer, len, address, data64);
+
+	address = exec_object2[L3_TBL].offset + 511*sizeof(uint64_t);
+	data64 = exec_object2[L2_TBL2].offset;
+	len = emit_store_qword(fd, batch_buffer, len, address, data64);
+
+	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
+	address = exec_object2[L2_TBL1].offset;
+	data64 = exec_object2[L1_TBL1].offset;
+	len = emit_store_qword(fd, batch_buffer, len, address, data64);
+
+	address = exec_object2[L2_TBL2].offset + 511*sizeof(uint64_t);
+	data64 = exec_object2[L1_TBL2].offset;
+	len = emit_store_qword(fd, batch_buffer, len, address, data64);
+
+	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
+	address = exec_object2[L1_TBL1].offset;
+	data32 = exec_object2[DATA1].offset >> 16;
+	len = emit_store_dword(fd, batch_buffer, len, address, data32);
+
+	address = exec_object2[L1_TBL2].offset + 1023*sizeof(uint32_t);
+	data32 = exec_object2[DATA2].offset >> 16;
+	len = emit_store_dword(fd, batch_buffer, len, address, data32);
+
+	/* Add commands to update the 2 data buffers, using their TRTT VA */
+	data32 = 0x12345678;
+	len = emit_store_dword(fd, batch_buffer, len, first_tile_addr, data32);
+	len = emit_store_dword(fd, batch_buffer, len, last_tile_addr, data32);
+
+	len = emit_bb_end(fd, batch_buffer, len);
+	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
+
+	/* Request KMD to setup the TR-TT */
+	setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
+
+	ring = I915_EXEC_RENDER;
+	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
+
+	/* submit command buffer */
+	gem_execbuf(fd, &execbuf);
+
+	/* read the 2 data buffers to check for the value written by the GPU */
+	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
+	igt_fail_on_f(ptr[0] != data32,
+		"\nCPU read does not match GPU write,\
+		expected: 0x%x, got: 0x%x\n",
+		data32, ptr[0]);
+
+	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
+	igt_fail_on_f(ptr[0] != data32,
+		"\nCPU read does not match GPU write,\
+		expected: 0x%x, got: 0x%x\n",
+		data32, ptr[0]);
+
+	gem_close(fd, exec_object2[L3_TBL].handle);
+	gem_close(fd, exec_object2[L2_TBL1].handle);
+	gem_close(fd, exec_object2[L2_TBL2].handle);
+	gem_close(fd, exec_object2[L1_TBL1].handle);
+	gem_close(fd, exec_object2[L1_TBL2].handle);
+	gem_close(fd, exec_object2[DATA1].handle);
+	gem_close(fd, exec_object2[DATA2].handle);
+	gem_close(fd, exec_object2[BATCH].handle);
+
+	if (ctx_id)
+		gem_context_destroy(fd, ctx_id);
+}
+
+static void gem_basic_trtt_use(void)
+{
+	int fd;
+	uint64_t segment_base_addr;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	igt_require(uses_full_ppgtt(fd, FULL_48_BIT_PPGTT));
+	igt_require(has_softpin_support(fd));
+	igt_require(has_trtt_support(fd));
+
+	for (segment_base_addr = 0;
+	     segment_base_addr < PPGTT_SIZE;
+	     segment_base_addr += TRTT_SEGMENT_SIZE)
+	{
+		submit_trtt_context(fd, segment_base_addr);
+	}
+
+	close(fd);
+}
+
+igt_main
+{
+
+	/* test needs 48 PPGTT & Soft Pin support */
+	igt_subtest("basic") {
+		gem_basic_trtt_use();
+	}
+}
+
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH v2] igt/gem_trtt: Exercise the TRTT hardware
  2016-01-22 15:37     ` [PATCH v2] " akash.goel
@ 2016-01-22 20:41       ` Chris Wilson
  2016-03-03  4:55         ` [PATCH v3] " akash.goel
  0 siblings, 1 reply; 30+ messages in thread
From: Chris Wilson @ 2016-01-22 20:41 UTC (permalink / raw)
  To: akash.goel; +Cc: intel-gfx

On Fri, Jan 22, 2016 at 09:07:47PM +0530, akash.goel@intel.com wrote:
> +static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
> +			    uint64_t vaddr, uint32_t data)
> +{
> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
> +	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
> +	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */

To violate this would be a severe bug in the caller. To mask it here
does something that the caller does not expect, very dangerous when
playing with PT. Either way does not serve well as a demonstration.

assert((vaddr & (-(1<<48)|3)) == 0); or hex equivalent.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
http://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH v3] igt/gem_trtt: Exercise the TRTT hardware
  2016-01-22 20:41       ` Chris Wilson
@ 2016-03-03  4:55         ` akash.goel
  2016-03-03 10:04           ` Chris Wilson
  0 siblings, 1 reply; 30+ messages in thread
From: akash.goel @ 2016-03-03  4:55 UTC (permalink / raw)
  To: intel-gfx; +Cc: Akash Goel

From: Akash Goel <akash.goel@intel.com>

This patch provides the testcase to exercise the TRTT hardware.

Some platforms have an additional address translation hardware support in
form of Tiled Resource Translation Table (TR-TT) which provides an extra level
of abstraction over PPGTT.
This is useful for mapping Sparse/Tiled texture resources.

TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
for a new PPGTT instance, but TR-TT may not enabled for every context.
1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
which such chunk to use is conveyed to HW through a register.
Any GFX address, which lies in that reserved 44 bit range will be translated
through TR-TT first and then through PPGTT to get the actual physical address.

TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
each level is contained within a 4KB page hence L3 and L2 is composed of
512 64b entries and L1 is composed of 1024 32b entries.

There is a provision to keep TR-TT Tables in virtual space, where the pages of
TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
UMD will have a full control on TR-TT management, with bare minimum support
from KMD.
So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
The entries of L1 table will contain the PPGTT offset of BOs actually backing
the Sparse resources.

I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
added to the CONTEXT_SETPARAM ioctl for that purpose.

v2:
 - Add new wrapper function __gem_context_require_param and used that
   to detect the TR-TT support
 - Use igt_main macro, rename certain function, remove extra white space,
   cleanup the code (Chris)
 - Enhance the basic subtest to exercise all possible TR-TT segment start
   locations (i.e. 16 of them) & for every iteration create a new context.

v3:
 - Get rid of some superfluous local variables (Chris)
 - Add asserts to validate whether the GFX address used in MI_STORE_DATA_IMM
   command is in canonical form & is correctly aligned or not (Chris)
 - Remove clearing of errno in has_trtt_support function (Chris)
 - Use the 48B_ADDRESS flag for batch buffer BO also (Chris)
 - Rebased.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Akash Goel <akash.goel@intel.com>
---
 lib/ioctl_wrappers.c   |  25 ++-
 lib/ioctl_wrappers.h   |   2 +
 tests/Makefile.sources |   1 +
 tests/gem_trtt.c       | 483 +++++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 503 insertions(+), 8 deletions(-)
 create mode 100644 tests/gem_trtt.c

diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 4071260..f6ba8e1 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -882,6 +882,22 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
 	igt_assert(__gem_context_set_param(fd, p) == 0);
 }
 
+int __gem_context_require_param(int fd, uint64_t param)
+{
+	struct local_i915_gem_context_param p;
+	int ret;
+
+	p.context = 0;
+	p.param = param;
+	p.value = 0;
+	p.size = 0;
+
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
+	if (ret)
+		return -errno;
+	return 0;
+}
+
 /**
  * gem_context_require_param:
  * @fd: open i915 drm file descriptor
@@ -892,14 +908,7 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
  */
 void gem_context_require_param(int fd, uint64_t param)
 {
-	struct local_i915_gem_context_param p;
-
-	p.context = 0;
-	p.param = param;
-	p.value = 0;
-	p.size = 0;
-
-	igt_require(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
+	igt_require(__gem_context_require_param(fd, param) == 0);
 }
 
 void gem_context_require_ban_period(int fd)
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index dc0827a..bcabe30 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -107,9 +107,11 @@ struct local_i915_gem_context_param {
 #define LOCAL_CONTEXT_PARAM_BAN_PERIOD	0x1
 #define LOCAL_CONTEXT_PARAM_NO_ZEROMAP	0x2
 #define LOCAL_CONTEXT_PARAM_GTT_SIZE	0x3
+#define LOCAL_CONTEXT_PARAM_TRTT	0x4
 	uint64_t value;
 };
 void gem_context_require_ban_period(int fd);
+int __gem_context_require_param(int fd, uint64_t param);
 void gem_context_require_param(int fd, uint64_t param);
 void gem_context_get_param(int fd, struct local_i915_gem_context_param *p);
 void gem_context_set_param(int fd, struct local_i915_gem_context_param *p);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index f8b18b0..e6081f6 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -73,6 +73,7 @@ TESTS_progs_M = \
 	gem_streaming_writes \
 	gem_tiled_blits \
 	gem_tiled_partial_pwrite_pread \
+	gem_trtt \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
 	kms_addfb_basic \
diff --git a/tests/gem_trtt.c b/tests/gem_trtt.c
new file mode 100644
index 0000000..f8a9c3f
--- /dev/null
+++ b/tests/gem_trtt.c
@@ -0,0 +1,483 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Akash Goel <akash.goel@intel.com>
+ *
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <malloc.h>
+#include "drm.h"
+#include "ioctl_wrappers.h"
+#include "drmtest.h"
+#include "intel_chipset.h"
+#include "intel_io.h"
+#include "i915_drm.h"
+#include <assert.h>
+#include <sys/wait.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include "igt_kms.h"
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#define BO_SIZE 4096
+#define EXEC_OBJECT_PINNED	(1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+
+/* gen8_canonical_addr
+ * Used to convert any address into canonical form, i.e. [63:48] == [47].
+ * Based on kernel's sign_extend64 implementation.
+ * @address - a virtual address
+ */
+#define GEN8_HIGH_ADDRESS_BIT 47
+static uint64_t gen8_canonical_addr(uint64_t address)
+{
+	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
+	return (__s64)(address << shift) >> shift;
+}
+
+#define NO_PPGTT 0
+#define ALIASING_PPGTT 1
+#define FULL_32_BIT_PPGTT 2
+#define FULL_48_BIT_PPGTT 3
+/* uses_full_ppgtt
+ * Finds supported PPGTT details.
+ * @fd DRM fd
+ * @min can be
+ * 0 - No PPGTT
+ * 1 - Aliasing PPGTT
+ * 2 - Full PPGTT (32b)
+ * 3 - Full PPGTT (48b)
+ * RETURNS true/false if min support is present
+ */
+static bool uses_full_ppgtt(int fd, int min)
+{
+	struct drm_i915_getparam gp;
+	int val = 0;
+
+	memset(&gp, 0, sizeof(gp));
+	gp.param = 18; /* HAS_ALIASING_PPGTT */
+	gp.value = &val;
+
+	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+		return 0;
+
+	errno = 0;
+	return val >= min;
+}
+
+/* has_softpin_support
+ * Finds if softpin feature is supported
+ * @fd DRM fd
+ */
+static bool has_softpin_support(int fd)
+{
+	struct drm_i915_getparam gp;
+	int val = 0;
+
+	memset(&gp, 0, sizeof(gp));
+	gp.param = 37; /* I915_PARAM_HAS_EXEC_SOFTPIN */
+	gp.value = &val;
+
+	if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
+		return 0;
+
+	errno = 0;
+	return (val == 1);
+}
+
+/* has_trtt_support
+ * Finds if trtt hw is present
+ * @fd DRM fd
+ */
+static bool has_trtt_support(int fd)
+{
+	int ret = __gem_context_require_param(fd, LOCAL_CONTEXT_PARAM_TRTT);
+
+	return (ret == 0);
+}
+
+/* mmap_bo
+ * helper for creating a CPU mmapping of the buffer
+ * @fd - drm fd
+ * @handle - handle of the buffer to mmap
+ * @size: size of the buffer
+ */
+static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
+{
+	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0);
+	return ptr;
+}
+
+/* emit_store_dword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u32 data to be stored at destination
+ */
+static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint32_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	assert(vaddr == gen8_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
+	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
+	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
+	cmd_buf[dw_offset++] = data;
+
+	return dw_offset;
+}
+
+/* emit_store_qword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u64 data to be stored at destination
+ */
+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint64_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	assert(vaddr == gen8_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
+	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
+	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
+	cmd_buf[dw_offset++] = data;
+	cmd_buf[dw_offset++] = data >> 32;
+
+	return dw_offset;
+}
+
+/* emit_bb_end
+ * populate batch buffer with MI_BATCH_BUFFER_END command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ */
+static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
+{
+	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
+	cmd_buf[dw_offset++] = 0;
+
+	return dw_offset;
+}
+
+/* setup_execbuffer
+ * helper for buffer execution
+ * @execbuf - pointer to execbuffer
+ * @exec_object - pointer to exec object2 struct
+ * @ring - ring to be used
+ * @buffer_count - how manu buffers to submit
+ * @batch_length - length of batch buffer
+ */
+static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
+			     struct drm_i915_gem_exec_object2 *exec_object,
+			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
+{
+	memset(execbuf, 0, sizeof(*execbuf));
+
+	execbuf->buffers_ptr = (unsigned long)exec_object;
+	execbuf->buffer_count = buffer_count;
+	execbuf->batch_len = batch_length;
+	execbuf->flags = ring;
+	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
+}
+
+#define TABLE_SIZE 0x1000
+#define TILE_SIZE 0x10000
+
+#define TRTT_SEGMENT_SIZE (1ULL << 44)
+#define PPGTT_SIZE (1ULL << 48)
+
+#define NULL_TILE_PATTERN    0xFFFFFFFF
+#define INVALID_TILE_PATTERN 0xFFFFFFFE
+
+struct local_i915_gem_context_trtt_param {
+	uint64_t segment_base_addr;
+	uint64_t l3_table_address;
+	uint32_t invd_tile_val;
+	uint32_t null_tile_val;
+};
+
+/* setup_trtt
+ * Helper function to request KMD to enable TRTT
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static void
+setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	memset(&ctx_param, 0, sizeof(ctx_param));
+
+	trtt_param.null_tile_val = NULL_TILE_PATTERN;
+	trtt_param.invd_tile_val = INVALID_TILE_PATTERN;
+	trtt_param.l3_table_address = l3_table_address;
+	trtt_param.segment_base_addr = segment_base_addr;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	gem_context_set_param(fd, &ctx_param);
+}
+
+/* bo_alloc_setup
+ * allocate bo and populate exec object
+ * @exec_object2 - pointer to exec object
+ * @bo_sizee - buffer size
+ * @flags - exec flags
+ * @bo_offset - pointer to the current PPGTT offset
+ */
+static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
+			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
+{
+	memset(exec_object2, 0, sizeof(*exec_object2));
+	exec_object2->handle = gem_create(fd, bo_size);
+	exec_object2->flags = flags;
+
+	if (bo_offset)
+	{
+		exec_object2->offset = *bo_offset;
+		*bo_offset += bo_size;
+	}
+}
+
+/* submit_trtt_context
+ * This helper function will create a new context if the TR-TT segment
+ * base address is not zero, allocate a L3 table page, 2 pages apiece
+ * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
+ * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
+ * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
+ * buffer to first update the TR-TT table entries and then to update the data
+ * buffers using their TR-TT VA, exercising the table programming done
+ * previously.
+ * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
+ * Invoke execbuffer to submit the batch buffer.
+ * Verify value of first DWORD in the 2 data buffer matches the data asked
+ * to be written by the GPU.
+ */
+static void submit_trtt_context(int fd, uint64_t segment_base_addr)
+{
+	enum {
+		L3_TBL,
+		L2_TBL1,
+		L2_TBL2,
+		L1_TBL1,
+		L1_TBL2,
+		DATA1,
+		DATA2,
+		BATCH,
+		NUM_BUFFERS,
+	};
+
+	int ring, len = 0;
+	uint32_t *ptr;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
+	uint32_t batch_buffer[BO_SIZE];
+	uint32_t ctx_id, data, last_entry_offset;
+	uint64_t cur_ppgtt_off, exec_flags;
+	uint64_t first_tile_addr, last_tile_addr;
+
+	first_tile_addr = segment_base_addr;
+	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
+
+	if (segment_base_addr == 0) {
+		/* Use the default context for first iteration */
+		ctx_id = 0;
+		/* To avoid conflict with the TR-TT segment */
+		cur_ppgtt_off = TRTT_SEGMENT_SIZE;
+	} else {
+		/* Create a new context to have different TRTT settings
+		 * on every iteration.
+		 */
+		ctx_id = gem_context_create(fd);
+		cur_ppgtt_off = 0;
+	}
+
+	exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+	/* first allocate Batch buffer BO */
+	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
+
+	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
+	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
+
+	/* Allocate a L3 table BO */
+	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L2 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L1 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
+	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
+
+	/* Allocate two Data buffer BOs */
+	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
+	last_entry_offset = 511*sizeof(uint64_t);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset,
+			       exec_object2[L2_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset + last_entry_offset,
+			       exec_object2[L2_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL1].offset,
+			       exec_object2[L1_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL2].offset + last_entry_offset,
+			       exec_object2[L1_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
+	last_entry_offset = 1023*sizeof(uint32_t);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL1].offset,
+			       exec_object2[DATA1].offset >> 16);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL2].offset + last_entry_offset,
+			       exec_object2[DATA2].offset >> 16);
+
+	/* Add commands to update the 2 data buffers, using their TRTT VA */
+	data = 0x12345678;
+	len = emit_store_dword(fd, batch_buffer, len, first_tile_addr, data);
+	len = emit_store_dword(fd, batch_buffer, len, last_tile_addr, data);
+
+	len = emit_bb_end(fd, batch_buffer, len);
+	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
+
+	/* Request KMD to setup the TR-TT */
+	setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
+
+	ring = I915_EXEC_RENDER;
+	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
+
+	/* submit command buffer */
+	gem_execbuf(fd, &execbuf);
+
+	/* read the 2 data buffers to check for the value written by the GPU */
+	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
+	igt_fail_on_f(ptr[0] != data,
+		"\nCPU read does not match GPU write,\
+		expected: 0x%x, got: 0x%x\n",
+		data, ptr[0]);
+
+	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
+	igt_fail_on_f(ptr[0] != data,
+		"\nCPU read does not match GPU write,\
+		expected: 0x%x, got: 0x%x\n",
+		data, ptr[0]);
+
+	gem_close(fd, exec_object2[L3_TBL].handle);
+	gem_close(fd, exec_object2[L2_TBL1].handle);
+	gem_close(fd, exec_object2[L2_TBL2].handle);
+	gem_close(fd, exec_object2[L1_TBL1].handle);
+	gem_close(fd, exec_object2[L1_TBL2].handle);
+	gem_close(fd, exec_object2[DATA1].handle);
+	gem_close(fd, exec_object2[DATA2].handle);
+	gem_close(fd, exec_object2[BATCH].handle);
+
+	if (ctx_id)
+		gem_context_destroy(fd, ctx_id);
+}
+
+/* basic trtt test
+ * This will test the basic TR-TT functionality by doing couple of store
+ * operations through it. Also it will exercise all possible TR-TT segment
+ * start locations (i.e. 16 of them).
+ */
+static void gem_basic_trtt_use(void)
+{
+	int fd;
+	uint64_t segment_base_addr;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	igt_require(uses_full_ppgtt(fd, FULL_48_BIT_PPGTT));
+	igt_require(has_softpin_support(fd));
+	igt_require(has_trtt_support(fd));
+
+	for (segment_base_addr = 0;
+	     segment_base_addr < PPGTT_SIZE;
+	     segment_base_addr += TRTT_SEGMENT_SIZE)
+	{
+		submit_trtt_context(fd, segment_base_addr);
+	}
+
+	close(fd);
+}
+
+igt_main
+{
+
+	/* test needs 48 PPGTT & Soft Pin support */
+	igt_subtest("basic") {
+		gem_basic_trtt_use();
+	}
+}
+
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH v3] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-03  4:55         ` [PATCH v3] " akash.goel
@ 2016-03-03 10:04           ` Chris Wilson
  2016-03-03 15:38             ` Goel, Akash
  0 siblings, 1 reply; 30+ messages in thread
From: Chris Wilson @ 2016-03-03 10:04 UTC (permalink / raw)
  To: akash.goel; +Cc: intel-gfx

On Thu, Mar 03, 2016 at 10:25:59AM +0530, akash.goel@intel.com wrote:
> +static bool uses_full_ppgtt(int fd, int min)
gem_gtt_type()

> +static bool has_softpin_support(int fd)

gem_has_softpin()

> +static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
> +			    uint64_t vaddr, uint32_t data)
> +{
> +	/* Check that softpin addresses are in the correct form */
> +	assert(vaddr == gen8_canonical_addr(vaddr));
> +
> +	/* SDI cannot write to unaligned addresses */
> +	assert((vaddr & 3) == 0);
> +
> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
> +	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
> +	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
> +	cmd_buf[dw_offset++] = data;
> +
> +	return dw_offset;
> +}
> +
> +/* emit_store_qword
> + * populate batch buffer with MI_STORE_DWORD_IMM command
> + * @fd: drm file descriptor
> + * @cmd_buf: batch buffer
> + * @dw_offset: write offset in batch buffer
> + * @vaddr: destination Virtual address
> + * @data: u64 data to be stored at destination
> + */
> +static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
> +			    uint64_t vaddr, uint64_t data)
> +{
> +	/* Check that softpin addresses are in the correct form */
> +	assert(vaddr == gen8_canonical_addr(vaddr));
> +
> +	/* SDI cannot write to unaligned addresses */
> +	assert((vaddr & 3) == 0);
> +
> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
> +	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;

You just asserted above that the low bits aren't set.

> +	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */

This conflicts with the value that would be set by the kernel (if it had
to the relocation). Who is correct? If not required, please don't raise
the spectre of devastating bugs.

> +	cmd_buf[dw_offset++] = data;
> +	cmd_buf[dw_offset++] = data >> 32;
> +
> +	return dw_offset;
> +}
> +
> +/* emit_bb_end
> + * populate batch buffer with MI_BATCH_BUFFER_END command
> + * @fd: drm file descriptor
> + * @cmd_buf: batch buffer
> + * @dw_offset: write offset in batch buffer
> + */
> +static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
> +{
> +	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
> +	cmd_buf[dw_offset++] = 0;

Why? execbuf.batch_len must be aligned, but the CS parser doesn't care,
and there is no guarantee that you are aligned coming into
emit_bb_end().

> +
> +	return dw_offset;
> +}
> +
> +/* setup_execbuffer
> + * helper for buffer execution
> + * @execbuf - pointer to execbuffer
> + * @exec_object - pointer to exec object2 struct
> + * @ring - ring to be used
> + * @buffer_count - how manu buffers to submit
> + * @batch_length - length of batch buffer
> + */
> +static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
> +			     struct drm_i915_gem_exec_object2 *exec_object,
> +			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
> +{
> +	memset(execbuf, 0, sizeof(*execbuf));
> +
> +	execbuf->buffers_ptr = (unsigned long)exec_object;
> +	execbuf->buffer_count = buffer_count;
> +	execbuf->batch_len = batch_length;
> +	execbuf->flags = ring;
> +	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
> +}
> +
> +#define TABLE_SIZE 0x1000
> +#define TILE_SIZE 0x10000
> +
> +#define TRTT_SEGMENT_SIZE (1ULL << 44)
> +#define PPGTT_SIZE (1ULL << 48)
> +
> +#define NULL_TILE_PATTERN    0xFFFFFFFF
> +#define INVALID_TILE_PATTERN 0xFFFFFFFE
> +
> +struct local_i915_gem_context_trtt_param {
> +	uint64_t segment_base_addr;
> +	uint64_t l3_table_address;
> +	uint32_t invd_tile_val;
> +	uint32_t null_tile_val;
> +};
> +
> +/* setup_trtt
> + * Helper function to request KMD to enable TRTT
> + * @fd - drm fd
> + * @ctx_id - id of the context for which TRTT is to be enabled
> + * @l3_table_address - GFX address of the L3 table
> + * @segment_base_addr - offset of the TRTT segment in PPGTT space
> + */
> +static void
> +setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
> +	   uint64_t segment_base_addr)
> +{
> +	struct local_i915_gem_context_param ctx_param;
> +	struct local_i915_gem_context_trtt_param trtt_param;
> +
> +	memset(&ctx_param, 0, sizeof(ctx_param));
> +
> +	trtt_param.null_tile_val = NULL_TILE_PATTERN;
> +	trtt_param.invd_tile_val = INVALID_TILE_PATTERN;
> +	trtt_param.l3_table_address = l3_table_address;
> +	trtt_param.segment_base_addr = segment_base_addr;
> +
> +	ctx_param.context = ctx_id;
> +	ctx_param.size = sizeof(trtt_param);
> +	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
> +	ctx_param.value = (uint64_t)&trtt_param;
> +
> +	gem_context_set_param(fd, &ctx_param);
> +}
> +
> +/* bo_alloc_setup
> + * allocate bo and populate exec object
> + * @exec_object2 - pointer to exec object
> + * @bo_sizee - buffer size
> + * @flags - exec flags
> + * @bo_offset - pointer to the current PPGTT offset
> + */
> +static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
> +			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
> +{
> +	memset(exec_object2, 0, sizeof(*exec_object2));
> +	exec_object2->handle = gem_create(fd, bo_size);
> +	exec_object2->flags = flags;
> +
> +	if (bo_offset)
> +	{
> +		exec_object2->offset = *bo_offset;
> +		*bo_offset += bo_size;
> +	}
> +}
> +
> +/* submit_trtt_context
> + * This helper function will create a new context if the TR-TT segment
> + * base address is not zero, allocate a L3 table page, 2 pages apiece
> + * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
> + * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
> + * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
> + * buffer to first update the TR-TT table entries and then to update the data
> + * buffers using their TR-TT VA, exercising the table programming done
> + * previously.
> + * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
> + * Invoke execbuffer to submit the batch buffer.
> + * Verify value of first DWORD in the 2 data buffer matches the data asked
> + * to be written by the GPU.
> + */
> +static void submit_trtt_context(int fd, uint64_t segment_base_addr)
> +{
> +	enum {
> +		L3_TBL,
> +		L2_TBL1,
> +		L2_TBL2,
> +		L1_TBL1,
> +		L1_TBL2,
> +		DATA1,
> +		DATA2,
> +		BATCH,
> +		NUM_BUFFERS,
> +	};
> +
> +	int ring, len = 0;
> +	uint32_t *ptr;
> +	struct drm_i915_gem_execbuffer2 execbuf;
> +	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
> +	uint32_t batch_buffer[BO_SIZE];
> +	uint32_t ctx_id, data, last_entry_offset;
> +	uint64_t cur_ppgtt_off, exec_flags;
> +	uint64_t first_tile_addr, last_tile_addr;
> +
> +	first_tile_addr = segment_base_addr;
> +	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
> +
> +	if (segment_base_addr == 0) {
> +		/* Use the default context for first iteration */
> +		ctx_id = 0;

Seems like a variable the callee wants to control. (Otherwise you are
missing a significant test for non-default contexts). It also implies
that we don't test what happens when we call set-trtt-context twice on a
context.

> +		/* To avoid conflict with the TR-TT segment */
> +		cur_ppgtt_off = TRTT_SEGMENT_SIZE;
> +	} else {
> +		/* Create a new context to have different TRTT settings
> +		 * on every iteration.
> +		 */
> +		ctx_id = gem_context_create(fd);
> +		cur_ppgtt_off = 0;
> +	}
> +
> +	exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
> +
> +	/* first allocate Batch buffer BO */
> +	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
> +
> +	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
> +	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
> +
> +	/* Allocate a L3 table BO */
> +	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
> +
> +	/* Allocate two L2 table BOs */
> +	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
> +	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
> +
> +	/* Allocate two L1 table BOs */
> +	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
> +	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
> +
> +	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
> +	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
> +
> +	/* Allocate two Data buffer BOs */
> +	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
> +	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
> +
> +	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
> +	last_entry_offset = 511*sizeof(uint64_t);
> +
> +	len = emit_store_qword(fd, batch_buffer, len,
> +			       exec_object2[L3_TBL].offset,
> +			       exec_object2[L2_TBL1].offset);
> +
> +	len = emit_store_qword(fd, batch_buffer, len,
> +			       exec_object2[L3_TBL].offset + last_entry_offset,
> +			       exec_object2[L2_TBL2].offset);
> +
> +	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
> +	len = emit_store_qword(fd, batch_buffer, len,
> +			       exec_object2[L2_TBL1].offset,
> +			       exec_object2[L1_TBL1].offset);
> +
> +	len = emit_store_qword(fd, batch_buffer, len,
> +			       exec_object2[L2_TBL2].offset + last_entry_offset,
> +			       exec_object2[L1_TBL2].offset);
> +
> +	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
> +	last_entry_offset = 1023*sizeof(uint32_t);
> +
> +	len = emit_store_dword(fd, batch_buffer, len,
> +			       exec_object2[L1_TBL1].offset,
> +			       exec_object2[DATA1].offset >> 16);
> +
> +	len = emit_store_dword(fd, batch_buffer, len,
> +			       exec_object2[L1_TBL2].offset + last_entry_offset,
> +			       exec_object2[DATA2].offset >> 16);
> +
> +	/* Add commands to update the 2 data buffers, using their TRTT VA */
> +	data = 0x12345678;
> +	len = emit_store_dword(fd, batch_buffer, len, first_tile_addr, data);
> +	len = emit_store_dword(fd, batch_buffer, len, last_tile_addr, data);
> +
> +	len = emit_bb_end(fd, batch_buffer, len);
> +	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
> +
> +	/* Request KMD to setup the TR-TT */
> +	setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
> +
> +	ring = I915_EXEC_RENDER;
> +	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
> +
> +	/* submit command buffer */
> +	gem_execbuf(fd, &execbuf);
> +
> +	/* read the 2 data buffers to check for the value written by the GPU */
> +	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
> +	igt_fail_on_f(ptr[0] != data,
> +		"\nCPU read does not match GPU write,\
> +		expected: 0x%x, got: 0x%x\n",
> +		data, ptr[0]);

Just igt_assert_eq_u32(ptr[0], expected);

> +
> +	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
> +	igt_fail_on_f(ptr[0] != data,
> +		"\nCPU read does not match GPU write,\
> +		expected: 0x%x, got: 0x%x\n",
> +		data, ptr[0]);
> +
> +	gem_close(fd, exec_object2[L3_TBL].handle);
> +	gem_close(fd, exec_object2[L2_TBL1].handle);
> +	gem_close(fd, exec_object2[L2_TBL2].handle);
> +	gem_close(fd, exec_object2[L1_TBL1].handle);
> +	gem_close(fd, exec_object2[L1_TBL2].handle);
> +	gem_close(fd, exec_object2[DATA1].handle);
> +	gem_close(fd, exec_object2[DATA2].handle);
> +	gem_close(fd, exec_object2[BATCH].handle);

Before we destroy the context (or exit), how about a query_trtt().
We should also query after create to ensure that the defaults are set.
Just thinking that is better doing the query after several steps (i.e.
the execbuf) rather than immediately after the set in order to give time
for something to go wrong. We should also ensure that everything remains
set after a GPU hang and suspend/resume.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v3] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-03 10:04           ` Chris Wilson
@ 2016-03-03 15:38             ` Goel, Akash
  2016-03-03 15:46               ` Chris Wilson
  0 siblings, 1 reply; 30+ messages in thread
From: Goel, Akash @ 2016-03-03 15:38 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: akash.goel


Thanks for the review.

On 3/3/2016 3:34 PM, Chris Wilson wrote:
> On Thu, Mar 03, 2016 at 10:25:59AM +0530, akash.goel@intel.com wrote:
>> +static bool uses_full_ppgtt(int fd, int min)
> gem_gtt_type()
fine will change like this,
	gem_gtt_type(fd) > 2

>
>> +static bool has_softpin_support(int fd)
>
> gem_has_softpin()

fine will use gem_has_softpin()
>
>> +static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
>> +			    uint64_t vaddr, uint32_t data)
>> +{
>> +	/* Check that softpin addresses are in the correct form */
>> +	assert(vaddr == gen8_canonical_addr(vaddr));
>> +
>> +	/* SDI cannot write to unaligned addresses */
>> +	assert((vaddr & 3) == 0);
>> +
>> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
>> +	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
>> +	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
>> +	cmd_buf[dw_offset++] = data;
>> +
>> +	return dw_offset;
>> +}
>> +
>> +/* emit_store_qword
>> + * populate batch buffer with MI_STORE_DWORD_IMM command
>> + * @fd: drm file descriptor
>> + * @cmd_buf: batch buffer
>> + * @dw_offset: write offset in batch buffer
>> + * @vaddr: destination Virtual address
>> + * @data: u64 data to be stored at destination
>> + */
>> +static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
>> +			    uint64_t vaddr, uint64_t data)
>> +{
>> +	/* Check that softpin addresses are in the correct form */
>> +	assert(vaddr == gen8_canonical_addr(vaddr));
>> +
>> +	/* SDI cannot write to unaligned addresses */
>> +	assert((vaddr & 3) == 0);
>> +
>> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
>> +	cmd_buf[dw_offset++] = vaddr & 0xFFFFFFFC;
>
> You just asserted above that the low bits aren't set.
>
>> +	cmd_buf[dw_offset++] = (vaddr >> 32) & 0xFFFF; /* bits 32:47 */
>
> This conflicts with the value that would be set by the kernel (if it had
> to the relocation). Who is correct? If not required, please don't raise
> the spectre of devastating bugs.
>
Sorry will modify like this,
	cmd_buf[dw_offset++] = (uint32_t)vaddr;
	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);

>> +	cmd_buf[dw_offset++] = data;
>> +	cmd_buf[dw_offset++] = data >> 32;
>> +
>> +	return dw_offset;
>> +}
>> +
>> +/* emit_bb_end
>> + * populate batch buffer with MI_BATCH_BUFFER_END command
>> + * @fd: drm file descriptor
>> + * @cmd_buf: batch buffer
>> + * @dw_offset: write offset in batch buffer
>> + */
>> +static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
>> +{
>> +	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
>> +	cmd_buf[dw_offset++] = 0;
>
> Why? execbuf.batch_len must be aligned, but the CS parser doesn't care,
> and there is no guarantee that you are aligned coming into
> emit_bb_end().
>
Sorry, will change like this,
	dw_offset = ALIGN(dw_offset, 2);
	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
	dw_offset++;

>> +
>> +	return dw_offset;
>> +}
>> +
>> +/* setup_execbuffer
>> + * helper for buffer execution
>> + * @execbuf - pointer to execbuffer
>> + * @exec_object - pointer to exec object2 struct
>> + * @ring - ring to be used
>> + * @buffer_count - how manu buffers to submit
>> + * @batch_length - length of batch buffer
>> + */
>> +static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
>> +			     struct drm_i915_gem_exec_object2 *exec_object,
>> +			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
>> +{
>> +	memset(execbuf, 0, sizeof(*execbuf));
>> +
>> +	execbuf->buffers_ptr = (unsigned long)exec_object;
>> +	execbuf->buffer_count = buffer_count;
>> +	execbuf->batch_len = batch_length;
>> +	execbuf->flags = ring;
>> +	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
>> +}
>> +
>> +#define TABLE_SIZE 0x1000
>> +#define TILE_SIZE 0x10000
>> +
>> +#define TRTT_SEGMENT_SIZE (1ULL << 44)
>> +#define PPGTT_SIZE (1ULL << 48)
>> +
>> +#define NULL_TILE_PATTERN    0xFFFFFFFF
>> +#define INVALID_TILE_PATTERN 0xFFFFFFFE
>> +
>> +struct local_i915_gem_context_trtt_param {
>> +	uint64_t segment_base_addr;
>> +	uint64_t l3_table_address;
>> +	uint32_t invd_tile_val;
>> +	uint32_t null_tile_val;
>> +};
>> +
>> +/* setup_trtt
>> + * Helper function to request KMD to enable TRTT
>> + * @fd - drm fd
>> + * @ctx_id - id of the context for which TRTT is to be enabled
>> + * @l3_table_address - GFX address of the L3 table
>> + * @segment_base_addr - offset of the TRTT segment in PPGTT space
>> + */
>> +static void
>> +setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
>> +	   uint64_t segment_base_addr)
>> +{
>> +	struct local_i915_gem_context_param ctx_param;
>> +	struct local_i915_gem_context_trtt_param trtt_param;
>> +
>> +	memset(&ctx_param, 0, sizeof(ctx_param));
>> +
>> +	trtt_param.null_tile_val = NULL_TILE_PATTERN;
>> +	trtt_param.invd_tile_val = INVALID_TILE_PATTERN;
>> +	trtt_param.l3_table_address = l3_table_address;
>> +	trtt_param.segment_base_addr = segment_base_addr;
>> +
>> +	ctx_param.context = ctx_id;
>> +	ctx_param.size = sizeof(trtt_param);
>> +	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
>> +	ctx_param.value = (uint64_t)&trtt_param;
>> +
>> +	gem_context_set_param(fd, &ctx_param);
>> +}
>> +
>> +/* bo_alloc_setup
>> + * allocate bo and populate exec object
>> + * @exec_object2 - pointer to exec object
>> + * @bo_sizee - buffer size
>> + * @flags - exec flags
>> + * @bo_offset - pointer to the current PPGTT offset
>> + */
>> +static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
>> +			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
>> +{
>> +	memset(exec_object2, 0, sizeof(*exec_object2));
>> +	exec_object2->handle = gem_create(fd, bo_size);
>> +	exec_object2->flags = flags;
>> +
>> +	if (bo_offset)
>> +	{
>> +		exec_object2->offset = *bo_offset;
>> +		*bo_offset += bo_size;
>> +	}
>> +}
>> +
>> +/* submit_trtt_context
>> + * This helper function will create a new context if the TR-TT segment
>> + * base address is not zero, allocate a L3 table page, 2 pages apiece
>> + * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
>> + * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
>> + * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
>> + * buffer to first update the TR-TT table entries and then to update the data
>> + * buffers using their TR-TT VA, exercising the table programming done
>> + * previously.
>> + * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
>> + * Invoke execbuffer to submit the batch buffer.
>> + * Verify value of first DWORD in the 2 data buffer matches the data asked
>> + * to be written by the GPU.
>> + */
>> +static void submit_trtt_context(int fd, uint64_t segment_base_addr)
>> +{
>> +	enum {
>> +		L3_TBL,
>> +		L2_TBL1,
>> +		L2_TBL2,
>> +		L1_TBL1,
>> +		L1_TBL2,
>> +		DATA1,
>> +		DATA2,
>> +		BATCH,
>> +		NUM_BUFFERS,
>> +	};
>> +
>> +	int ring, len = 0;
>> +	uint32_t *ptr;
>> +	struct drm_i915_gem_execbuffer2 execbuf;
>> +	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
>> +	uint32_t batch_buffer[BO_SIZE];
>> +	uint32_t ctx_id, data, last_entry_offset;
>> +	uint64_t cur_ppgtt_off, exec_flags;
>> +	uint64_t first_tile_addr, last_tile_addr;
>> +
>> +	first_tile_addr = segment_base_addr;
>> +	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
>> +
>> +	if (segment_base_addr == 0) {
>> +		/* Use the default context for first iteration */
>> +		ctx_id = 0;
>
> Seems like a variable the callee wants to control. (Otherwise you are
> missing a significant test for non-default contexts). It also implies
> that we don't test what happens when we call set-trtt-context twice on a
> context.
>

As per the current logic, the callee will use non-default context for 
the non zero TR-TT segment start location.

Should a new subtest be added to check the case when set-trtt-context is 
called twice, which is expected to fail.

>> +		/* To avoid conflict with the TR-TT segment */
>> +		cur_ppgtt_off = TRTT_SEGMENT_SIZE;
>> +	} else {
>> +		/* Create a new context to have different TRTT settings
>> +		 * on every iteration.
>> +		 */
>> +		ctx_id = gem_context_create(fd);
>> +		cur_ppgtt_off = 0;
>> +	}
>> +
>> +	exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
>> +
>> +	/* first allocate Batch buffer BO */
>> +	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
>> +
>> +	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
>> +	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
>> +
>> +	/* Allocate a L3 table BO */
>> +	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
>> +
>> +	/* Allocate two L2 table BOs */
>> +	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
>> +	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
>> +
>> +	/* Allocate two L1 table BOs */
>> +	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
>> +	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
>> +
>> +	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
>> +	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
>> +
>> +	/* Allocate two Data buffer BOs */
>> +	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
>> +	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
>> +
>> +	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
>> +	last_entry_offset = 511*sizeof(uint64_t);
>> +
>> +	len = emit_store_qword(fd, batch_buffer, len,
>> +			       exec_object2[L3_TBL].offset,
>> +			       exec_object2[L2_TBL1].offset);
>> +
>> +	len = emit_store_qword(fd, batch_buffer, len,
>> +			       exec_object2[L3_TBL].offset + last_entry_offset,
>> +			       exec_object2[L2_TBL2].offset);
>> +
>> +	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
>> +	len = emit_store_qword(fd, batch_buffer, len,
>> +			       exec_object2[L2_TBL1].offset,
>> +			       exec_object2[L1_TBL1].offset);
>> +
>> +	len = emit_store_qword(fd, batch_buffer, len,
>> +			       exec_object2[L2_TBL2].offset + last_entry_offset,
>> +			       exec_object2[L1_TBL2].offset);
>> +
>> +	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
>> +	last_entry_offset = 1023*sizeof(uint32_t);
>> +
>> +	len = emit_store_dword(fd, batch_buffer, len,
>> +			       exec_object2[L1_TBL1].offset,
>> +			       exec_object2[DATA1].offset >> 16);
>> +
>> +	len = emit_store_dword(fd, batch_buffer, len,
>> +			       exec_object2[L1_TBL2].offset + last_entry_offset,
>> +			       exec_object2[DATA2].offset >> 16);
>> +
>> +	/* Add commands to update the 2 data buffers, using their TRTT VA */
>> +	data = 0x12345678;
>> +	len = emit_store_dword(fd, batch_buffer, len, first_tile_addr, data);
>> +	len = emit_store_dword(fd, batch_buffer, len, last_tile_addr, data);
>> +
>> +	len = emit_bb_end(fd, batch_buffer, len);
>> +	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
>> +
>> +	/* Request KMD to setup the TR-TT */
>> +	setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
>> +
>> +	ring = I915_EXEC_RENDER;
>> +	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
>> +
>> +	/* submit command buffer */
>> +	gem_execbuf(fd, &execbuf);
>> +
>> +	/* read the 2 data buffers to check for the value written by the GPU */
>> +	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
>> +	igt_fail_on_f(ptr[0] != data,
>> +		"\nCPU read does not match GPU write,\
>> +		expected: 0x%x, got: 0x%x\n",
>> +		data, ptr[0]);
>
> Just igt_assert_eq_u32(ptr[0], expected);

Fine will change.
>
>> +
>> +	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
>> +	igt_fail_on_f(ptr[0] != data,
>> +		"\nCPU read does not match GPU write,\
>> +		expected: 0x%x, got: 0x%x\n",
>> +		data, ptr[0]);
>> +
>> +	gem_close(fd, exec_object2[L3_TBL].handle);
>> +	gem_close(fd, exec_object2[L2_TBL1].handle);
>> +	gem_close(fd, exec_object2[L2_TBL2].handle);
>> +	gem_close(fd, exec_object2[L1_TBL1].handle);
>> +	gem_close(fd, exec_object2[L1_TBL2].handle);
>> +	gem_close(fd, exec_object2[DATA1].handle);
>> +	gem_close(fd, exec_object2[DATA2].handle);
>> +	gem_close(fd, exec_object2[BATCH].handle);
>
> Before we destroy the context (or exit), how about a query_trtt().
> We should also query after create to ensure that the defaults are set.
> Just thinking that is better doing the query after several steps (i.e.
> the execbuf) rather than immediately after the set in order to give time
> for something to go wrong. We should also ensure that everything remains
> set after a GPU hang and suspend/resume.

Nice suggestion, currently get-trtt-context will just retrieve the TR-TT 
params stored with a Driver for a given context.
Should the Context image itself be read to ensure that settings are 
intact or not ?

Best regards
Akash
> -Chris
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v3] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-03 15:38             ` Goel, Akash
@ 2016-03-03 15:46               ` Chris Wilson
  2016-03-03 16:47                 ` Goel, Akash
  0 siblings, 1 reply; 30+ messages in thread
From: Chris Wilson @ 2016-03-03 15:46 UTC (permalink / raw)
  To: Goel, Akash; +Cc: intel-gfx

On Thu, Mar 03, 2016 at 09:08:25PM +0530, Goel, Akash wrote:
> >Before we destroy the context (or exit), how about a query_trtt().
> >We should also query after create to ensure that the defaults are set.
> >Just thinking that is better doing the query after several steps (i.e.
> >the execbuf) rather than immediately after the set in order to give time
> >for something to go wrong. We should also ensure that everything remains
> >set after a GPU hang and suspend/resume.
> 
> Nice suggestion, currently get-trtt-context will just retrieve the
> TR-TT params stored with a Driver for a given context.
> Should the Context image itself be read to ensure that settings are
> intact or not ?

I'm happy with reading back the driver state (as opposed to the context
image). We're verifying that the hw is setup via the execbuf, we just
want to double check that the kernel reports the correct setup as well.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v3] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-03 15:46               ` Chris Wilson
@ 2016-03-03 16:47                 ` Goel, Akash
  2016-03-09 11:31                   ` [PATCH v4] " akash.goel
  0 siblings, 1 reply; 30+ messages in thread
From: Goel, Akash @ 2016-03-03 16:47 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: akash.goel



On 3/3/2016 9:16 PM, Chris Wilson wrote:
> On Thu, Mar 03, 2016 at 09:08:25PM +0530, Goel, Akash wrote:
>>> Before we destroy the context (or exit), how about a query_trtt().
>>> We should also query after create to ensure that the defaults are set.
>>> Just thinking that is better doing the query after several steps (i.e.
>>> the execbuf) rather than immediately after the set in order to give time
>>> for something to go wrong. We should also ensure that everything remains
>>> set after a GPU hang and suspend/resume.
>>
>> Nice suggestion, currently get-trtt-context will just retrieve the
>> TR-TT params stored with a Driver for a given context.
>> Should the Context image itself be read to ensure that settings are
>> intact or not ?
>
> I'm happy with reading back the driver state (as opposed to the context
> image). We're verifying that the hw is setup via the execbuf, we just
> want to double check that the kernel reports the correct setup as well.

Fine, will use get-trtt-context only. The trtt params stored with the 
Driver should be invariant.

Best regards
Akash

> -Chris
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH v4] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-03 16:47                 ` Goel, Akash
@ 2016-03-09 11:31                   ` akash.goel
  2016-03-10 14:26                     ` Michel Thierry
  0 siblings, 1 reply; 30+ messages in thread
From: akash.goel @ 2016-03-09 11:31 UTC (permalink / raw)
  To: intel-gfx; +Cc: Akash Goel

From: Akash Goel <akash.goel@intel.com>

This patch provides the testcase to exercise the TRTT hardware.

Some platforms have an additional address translation hardware support in
form of Tiled Resource Translation Table (TR-TT) which provides an extra level
of abstraction over PPGTT.
This is useful for mapping Sparse/Tiled texture resources.

TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
for a new PPGTT instance, but TR-TT may not enabled for every context.
1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
which such chunk to use is conveyed to HW through a register.
Any GFX address, which lies in that reserved 44 bit range will be translated
through TR-TT first and then through PPGTT to get the actual physical address.

TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
each level is contained within a 4KB page hence L3 and L2 is composed of
512 64b entries and L1 is composed of 1024 32b entries.

There is a provision to keep TR-TT Tables in virtual space, where the pages of
TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
UMD will have a full control on TR-TT management, with bare minimum support
from KMD.
So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
The entries of L1 table will contain the PPGTT offset of BOs actually backing
the Sparse resources.

I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
added to the CONTEXT_SETPARAM ioctl for that purpose.

v2:
 - Add new wrapper function __gem_context_require_param and used that
   to detect the TR-TT support
 - Use igt_main macro, rename certain function, remove extra white space,
   cleanup the code (Chris)
 - Enhance the basic subtest to exercise all possible TR-TT segment start
   locations (i.e. 16 of them) & for every iteration create a new context.

v3:
 - Get rid of some superfluous local variables (Chris)
 - Add asserts to validate whether the GFX address used in MI_STORE_DATA_IMM
   command is in canonical form & is correctly aligned or not (Chris)
 - Remove clearing of errno in has_trtt_support function (Chris)
 - Use the 48B_ADDRESS flag for batch buffer BO also (Chris)
 - Rebased.

v4:
 - Add new subtest for invalid settings.
 - Add new local function query_trtt to check the Driver state (Chris)
 - Add new helper function gem_uses_64b_ppgtt to detect 64bit PPGTT support
 - Remove local functions uses_full_ppgtt & has_softpin_support, instead use
   existing wrappers gem_has_softpin & gem_uses_64b_ppgtt (Chris).
 - Remove redundant bit masking in emit_store_xxx functions (Chris).

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Akash Goel <akash.goel@intel.com>
---
 lib/ioctl_wrappers.c   |  39 +++-
 lib/ioctl_wrappers.h   |   3 +
 tests/Makefile.sources |   1 +
 tests/gem_trtt.c       | 498 +++++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 533 insertions(+), 8 deletions(-)
 create mode 100644 tests/gem_trtt.c

diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 4071260..fb5de07 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -882,6 +882,22 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
 	igt_assert(__gem_context_set_param(fd, p) == 0);
 }
 
+int __gem_context_require_param(int fd, uint64_t param)
+{
+	struct local_i915_gem_context_param p;
+	int ret;
+
+	p.context = 0;
+	p.param = param;
+	p.value = 0;
+	p.size = 0;
+
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
+	if (ret)
+		return -errno;
+	return 0;
+}
+
 /**
  * gem_context_require_param:
  * @fd: open i915 drm file descriptor
@@ -892,14 +908,7 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
  */
 void gem_context_require_param(int fd, uint64_t param)
 {
-	struct local_i915_gem_context_param p;
-
-	p.context = 0;
-	p.param = param;
-	p.value = 0;
-	p.size = 0;
-
-	igt_require(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
+	igt_require(__gem_context_require_param(fd, param) == 0);
 }
 
 void gem_context_require_ban_period(int fd)
@@ -1063,6 +1072,20 @@ bool gem_uses_full_ppgtt(int fd)
 }
 
 /**
+ * gem_uses_64b_ppgtt:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to check whether the kernel internally uses full
+ * 64b per-process gtt to execute batches.
+ *
+ * Returns: Whether batches are run through full 64b ppgtt.
+ */
+bool gem_uses_64b_ppgtt(int fd)
+{
+	return gem_gtt_type(fd) > 2;
+}
+
+/**
  * gem_available_fences:
  * @fd: open i915 drm file descriptor
  *
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index dc0827a..d23fa96 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -107,9 +107,11 @@ struct local_i915_gem_context_param {
 #define LOCAL_CONTEXT_PARAM_BAN_PERIOD	0x1
 #define LOCAL_CONTEXT_PARAM_NO_ZEROMAP	0x2
 #define LOCAL_CONTEXT_PARAM_GTT_SIZE	0x3
+#define LOCAL_CONTEXT_PARAM_TRTT	0x4
 	uint64_t value;
 };
 void gem_context_require_ban_period(int fd);
+int __gem_context_require_param(int fd, uint64_t param);
 void gem_context_require_param(int fd, uint64_t param);
 void gem_context_get_param(int fd, struct local_i915_gem_context_param *p);
 void gem_context_set_param(int fd, struct local_i915_gem_context_param *p);
@@ -143,6 +145,7 @@ bool gem_has_bsd2(int fd);
 int gem_gtt_type(int fd);
 bool gem_uses_ppgtt(int fd);
 bool gem_uses_full_ppgtt(int fd);
+bool gem_uses_64b_ppgtt(int fd);
 int gem_available_fences(int fd);
 uint64_t gem_available_aperture_size(int fd);
 uint64_t gem_aperture_size(int fd);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index f8b18b0..e6081f6 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -73,6 +73,7 @@ TESTS_progs_M = \
 	gem_streaming_writes \
 	gem_tiled_blits \
 	gem_tiled_partial_pwrite_pread \
+	gem_trtt \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
 	kms_addfb_basic \
diff --git a/tests/gem_trtt.c b/tests/gem_trtt.c
new file mode 100644
index 0000000..19b803d
--- /dev/null
+++ b/tests/gem_trtt.c
@@ -0,0 +1,498 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Akash Goel <akash.goel@intel.com>
+ *
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <malloc.h>
+#include "drm.h"
+#include "ioctl_wrappers.h"
+#include "drmtest.h"
+#include "intel_chipset.h"
+#include "intel_io.h"
+#include "i915_drm.h"
+#include <assert.h>
+#include <sys/wait.h>
+#include <sys/ipc.h>
+#include <sys/shm.h>
+#include "igt_kms.h"
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+#define BO_SIZE 4096
+#define EXEC_OBJECT_PINNED	(1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+
+/* gen8_canonical_addr
+ * Used to convert any address into canonical form, i.e. [63:48] == [47].
+ * Based on kernel's sign_extend64 implementation.
+ * @address - a virtual address
+ */
+#define GEN8_HIGH_ADDRESS_BIT 47
+static uint64_t gen8_canonical_addr(uint64_t address)
+{
+	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
+	return (__s64)(address << shift) >> shift;
+}
+
+/* has_trtt_support
+ * Finds if trtt hw is present
+ * @fd DRM fd
+ */
+static bool has_trtt_support(int fd)
+{
+	int ret = __gem_context_require_param(fd, LOCAL_CONTEXT_PARAM_TRTT);
+
+	return (ret == 0);
+}
+
+/* mmap_bo
+ * helper for creating a CPU mmapping of the buffer
+ * @fd - drm fd
+ * @handle - handle of the buffer to mmap
+ * @size: size of the buffer
+ */
+static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
+{
+	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0);
+	return ptr;
+}
+
+/* emit_store_dword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u32 data to be stored at destination
+ */
+static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint32_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, gen8_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+
+	return dw_offset;
+}
+
+/* emit_store_qword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u64 data to be stored at destination
+ */
+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint64_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, gen8_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+	cmd_buf[dw_offset++] = data >> 32;
+
+	return dw_offset;
+}
+
+/* emit_bb_end
+ * populate batch buffer with MI_BATCH_BUFFER_END command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ */
+static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
+{
+	dw_offset = ALIGN(dw_offset, 2);
+	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
+	dw_offset++;
+
+	return dw_offset;
+}
+
+/* setup_execbuffer
+ * helper for buffer execution
+ * @execbuf - pointer to execbuffer
+ * @exec_object - pointer to exec object2 struct
+ * @ring - ring to be used
+ * @buffer_count - how manu buffers to submit
+ * @batch_length - length of batch buffer
+ */
+static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
+			     struct drm_i915_gem_exec_object2 *exec_object,
+			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
+{
+	memset(execbuf, 0, sizeof(*execbuf));
+
+	execbuf->buffers_ptr = (unsigned long)exec_object;
+	execbuf->buffer_count = buffer_count;
+	execbuf->batch_len = batch_length;
+	execbuf->flags = ring;
+	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
+}
+
+#define TABLE_SIZE 0x1000
+#define TILE_SIZE 0x10000
+
+#define TRTT_SEGMENT_SIZE (1ULL << 44)
+#define PPGTT_SIZE (1ULL << 48)
+
+#define NULL_TILE_PATTERN    0xFFFFFFFF
+#define INVALID_TILE_PATTERN 0xFFFFFFFE
+
+struct local_i915_gem_context_trtt_param {
+	uint64_t segment_base_addr;
+	uint64_t l3_table_address;
+	uint32_t invd_tile_val;
+	uint32_t null_tile_val;
+};
+
+/* query_trtt
+ * Helper function to check if the TR-TT settings stored with the KMD,
+ * for a context, have the expected values (set previously).
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static void
+query_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	gem_context_get_param(fd, &ctx_param);
+
+	igt_assert_eq_u64(trtt_param.l3_table_address, l3_table_address);
+	igt_assert_eq_u64(trtt_param.segment_base_addr, segment_base_addr);
+	igt_assert_eq_u32(trtt_param.invd_tile_val, INVALID_TILE_PATTERN);
+	igt_assert_eq_u32(trtt_param.null_tile_val, NULL_TILE_PATTERN);
+}
+
+/* setup_trtt
+ * Helper function to request KMD to enable TRTT
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static int
+setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	trtt_param.null_tile_val = NULL_TILE_PATTERN;
+	trtt_param.invd_tile_val = INVALID_TILE_PATTERN;
+	trtt_param.l3_table_address = l3_table_address;
+	trtt_param.segment_base_addr = segment_base_addr;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	return __gem_context_set_param(fd, &ctx_param);
+}
+
+/* bo_alloc_setup
+ * allocate bo and populate exec object
+ * @exec_object2 - pointer to exec object
+ * @bo_sizee - buffer size
+ * @flags - exec flags
+ * @bo_offset - pointer to the current PPGTT offset
+ */
+static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
+			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
+{
+	memset(exec_object2, 0, sizeof(*exec_object2));
+	exec_object2->handle = gem_create(fd, bo_size);
+	exec_object2->flags = flags;
+
+	if (bo_offset)
+	{
+		exec_object2->offset = *bo_offset;
+		*bo_offset += bo_size;
+	}
+}
+
+/* submit_trtt_context
+ * This helper function will create a new context if the TR-TT segment
+ * base address is not zero, allocate a L3 table page, 2 pages apiece
+ * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
+ * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
+ * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
+ * buffer to first update the TR-TT table entries and then to update the data
+ * buffers using their TR-TT VA, exercising the table programming done
+ * previously.
+ * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
+ * Invoke execbuffer to submit the batch buffer.
+ * Verify value of first DWORD in the 2 data buffer matches the data asked
+ * to be written by the GPU.
+ */
+static void submit_trtt_context(int fd, uint64_t segment_base_addr)
+{
+	enum {
+		L3_TBL,
+		L2_TBL1,
+		L2_TBL2,
+		L1_TBL1,
+		L1_TBL2,
+		DATA1,
+		DATA2,
+		BATCH,
+		NUM_BUFFERS,
+	};
+
+	int ring, len = 0;
+	uint32_t *ptr;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
+	uint32_t batch_buffer[BO_SIZE];
+	uint32_t ctx_id, data, last_entry_offset;
+	uint64_t cur_ppgtt_off, exec_flags;
+	uint64_t first_tile_addr, last_tile_addr;
+
+	first_tile_addr = segment_base_addr;
+	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
+
+	if (segment_base_addr == 0) {
+		/* Use the default context for this case */
+		ctx_id = 0;
+		/* To avoid conflict with the TR-TT segment */
+		cur_ppgtt_off = TRTT_SEGMENT_SIZE;
+	} else {
+		/* Create a new context to have different TRTT settings */
+		ctx_id = gem_context_create(fd);
+		cur_ppgtt_off = 0;
+	}
+
+	exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+	/* first allocate Batch buffer BO */
+	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
+
+	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
+	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
+
+	/* Allocate a L3 table BO */
+	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L2 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L1 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
+	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
+
+	/* Allocate two Data buffer BOs */
+	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
+	last_entry_offset = 511*sizeof(uint64_t);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset,
+			       exec_object2[L2_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset + last_entry_offset,
+			       exec_object2[L2_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL1].offset,
+			       exec_object2[L1_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL2].offset + last_entry_offset,
+			       exec_object2[L1_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
+	last_entry_offset = 1023*sizeof(uint32_t);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL1].offset,
+			       exec_object2[DATA1].offset >> 16);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL2].offset + last_entry_offset,
+			       exec_object2[DATA2].offset >> 16);
+
+	/* Add commands to update the 2 data buffers, using their TRTT VA */
+	data = 0x12345678;
+	len = emit_store_dword(fd, batch_buffer, len,
+			       gen8_canonical_addr(first_tile_addr),
+			       data);
+	len = emit_store_dword(fd, batch_buffer, len,
+			       gen8_canonical_addr(last_tile_addr),
+			       data);
+
+	len = emit_bb_end(fd, batch_buffer, len);
+	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
+
+	/* Request KMD to setup the TR-TT */
+	igt_assert(setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr) == 0);
+
+	ring = I915_EXEC_RENDER;
+	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
+
+	/* submit command buffer */
+	gem_execbuf(fd, &execbuf);
+
+	/* read the 2 data buffers to check for the value written by the GPU */
+	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	gem_close(fd, exec_object2[L3_TBL].handle);
+	gem_close(fd, exec_object2[L2_TBL1].handle);
+	gem_close(fd, exec_object2[L2_TBL2].handle);
+	gem_close(fd, exec_object2[L1_TBL1].handle);
+	gem_close(fd, exec_object2[L1_TBL2].handle);
+	gem_close(fd, exec_object2[DATA1].handle);
+	gem_close(fd, exec_object2[DATA2].handle);
+	gem_close(fd, exec_object2[BATCH].handle);
+
+	/* Check if the TRTT params stored with the Driver are intact or not */
+	query_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
+
+	if (ctx_id)
+		gem_context_destroy(fd, ctx_id);
+}
+
+/* basic trtt test
+ * This will test the basic TR-TT functionality by doing couple of store
+ * operations through it. Also it will exercise all possible TR-TT segment
+ * start locations (i.e. 16 of them).
+ */
+static void test_basic_trtt_use(int fd)
+{
+	uint64_t segment_base_addr;
+
+	for (segment_base_addr = 0;
+	     segment_base_addr < PPGTT_SIZE;
+	     segment_base_addr += TRTT_SEGMENT_SIZE)
+	{
+		submit_trtt_context(fd, segment_base_addr);
+	}
+}
+
+static void test_invalid(int fd)
+{
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	ctx_id = gem_context_create(fd);
+
+	/* Check for an incorrectly aligned base location for TR-TT segment */
+	segment_base_addr = TRTT_SEGMENT_SIZE + 0x1000;
+	l3_offset = TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Check for the same/conflicting value for L3 table and TR-TT segment location */
+	segment_base_addr = TRTT_SEGMENT_SIZE;
+	l3_offset = segment_base_addr;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Check for an incorrectly aligned location for L3 table */
+	l3_offset = TILE_SIZE + 0x1000;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Use the correct settings now */
+	l3_offset = TILE_SIZE;
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+	/* Check the overriding of TR-TT settings for the same context */
+	segment_base_addr += TRTT_SEGMENT_SIZE;
+	l3_offset += TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EEXIST);
+
+	gem_context_destroy(fd, ctx_id);
+}
+
+igt_main
+{
+	int fd = -1;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		/* test needs 48 PPGTT & Soft Pin support */
+		igt_require(gem_has_softpin(fd));
+		igt_require(gem_uses_64b_ppgtt(fd));
+		igt_require(has_trtt_support(fd));
+	}
+
+	igt_subtest("invalid")
+		test_invalid(fd);
+
+	igt_subtest("basic")
+		test_basic_trtt_use(fd);
+
+	igt_fixture
+		close(fd);
+}
+
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH v4] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-09 11:31                   ` [PATCH v4] " akash.goel
@ 2016-03-10 14:26                     ` Michel Thierry
  2016-03-11  5:59                       ` Goel, Akash
  0 siblings, 1 reply; 30+ messages in thread
From: Michel Thierry @ 2016-03-10 14:26 UTC (permalink / raw)
  To: akash.goel, intel-gfx

On 3/9/2016 11:31 AM, akash.goel@intel.com wrote:
> From: Akash Goel <akash.goel@intel.com>
>
> This patch provides the testcase to exercise the TRTT hardware.
>
...
> --- /dev/null
> +++ b/tests/gem_trtt.c
> @@ -0,0 +1,498 @@
...
> +
> +/* gen8_canonical_addr
> + * Used to convert any address into canonical form, i.e. [63:48] == [47].
> + * Based on kernel's sign_extend64 implementation.
> + * @address - a virtual address
> + */
> +#define GEN8_HIGH_ADDRESS_BIT 47
> +static uint64_t gen8_canonical_addr(uint64_t address)
> +{
> +	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
> +	return (__s64)(address << shift) >> shift;
> +}

^^^ These came from gem_softpin, didnt they?
[https://cgit.freedesktop.org/xorg/app/intel-gpu-tools/commit/?id=7cb35109645e6495f67981b9930587c1ddfe4f90]

Would you consider moving them to lib/?
(I think igt_aux is a good place).



_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v4] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-10 14:26                     ` Michel Thierry
@ 2016-03-11  5:59                       ` Goel, Akash
  2016-03-11 11:48                         ` [PATCH v5] " akash.goel
  0 siblings, 1 reply; 30+ messages in thread
From: Goel, Akash @ 2016-03-11  5:59 UTC (permalink / raw)
  To: Michel Thierry, intel-gfx; +Cc: akash.goel



On 3/10/2016 7:56 PM, Michel Thierry wrote:
> On 3/9/2016 11:31 AM, akash.goel@intel.com wrote:
>> From: Akash Goel <akash.goel@intel.com>
>>
>> This patch provides the testcase to exercise the TRTT hardware.
>>
> ...
>> --- /dev/null
>> +++ b/tests/gem_trtt.c
>> @@ -0,0 +1,498 @@
> ...
>> +
>> +/* gen8_canonical_addr
>> + * Used to convert any address into canonical form, i.e. [63:48] ==
>> [47].
>> + * Based on kernel's sign_extend64 implementation.
>> + * @address - a virtual address
>> + */
>> +#define GEN8_HIGH_ADDRESS_BIT 47
>> +static uint64_t gen8_canonical_addr(uint64_t address)
>> +{
>> +    __u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
>> +    return (__s64)(address << shift) >> shift;
>> +}
>
> ^^^ These came from gem_softpin, didnt they?
> [https://cgit.freedesktop.org/xorg/app/intel-gpu-tools/commit/?id=7cb35109645e6495f67981b9930587c1ddfe4f90]

Yes copied as is from gem_softpin.

> Would you consider moving them to lib/?
> (I think igt_aux is a good place).

Can I move it to igt_aux.h then ?

Best regards
Akash

>
>
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH v5] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-11  5:59                       ` Goel, Akash
@ 2016-03-11 11:48                         ` akash.goel
  2016-03-17 10:14                           ` Michel Thierry
  0 siblings, 1 reply; 30+ messages in thread
From: akash.goel @ 2016-03-11 11:48 UTC (permalink / raw)
  To: intel-gfx; +Cc: Akash Goel

From: Akash Goel <akash.goel@intel.com>

This patch provides the testcase to exercise the TRTT hardware.

Some platforms have an additional address translation hardware support in
form of Tiled Resource Translation Table (TR-TT) which provides an extra level
of abstraction over PPGTT.
This is useful for mapping Sparse/Tiled texture resources.

TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
for a new PPGTT instance, but TR-TT may not enabled for every context.
1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
which such chunk to use is conveyed to HW through a register.
Any GFX address, which lies in that reserved 44 bit range will be translated
through TR-TT first and then through PPGTT to get the actual physical address.

TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
each level is contained within a 4KB page hence L3 and L2 is composed of
512 64b entries and L1 is composed of 1024 32b entries.

There is a provision to keep TR-TT Tables in virtual space, where the pages of
TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
UMD will have a full control on TR-TT management, with bare minimum support
from KMD.
So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
The entries of L1 table will contain the PPGTT offset of BOs actually backing
the Sparse resources.

I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
added to the CONTEXT_SETPARAM ioctl for that purpose.

v2:
 - Add new wrapper function __gem_context_require_param and used that
   to detect the TR-TT support
 - Use igt_main macro, rename certain function, remove extra white space,
   cleanup the code (Chris)
 - Enhance the basic subtest to exercise all possible TR-TT segment start
   locations (i.e. 16 of them) & for every iteration create a new context.

v3:
 - Get rid of some superfluous local variables (Chris)
 - Add asserts to validate whether the GFX address used in MI_STORE_DATA_IMM
   command is in canonical form & is correctly aligned or not (Chris)
 - Remove clearing of errno in has_trtt_support function (Chris)
 - Use the 48B_ADDRESS flag for batch buffer BO also (Chris)
 - Rebased.

v4:
 - Add new subtest for invalid settings.
 - Add new local function query_trtt to check the Driver state (Chris)
 - Add new helper function gem_uses_64b_ppgtt to detect 64bit PPGTT support
 - Remove local functions uses_full_ppgtt & has_softpin_support, instead use
   existing wrappers gem_has_softpin & gem_uses_64b_ppgtt (Chris).
 - Remove redundant bit masking in emit_store_xxx functions (Chris).

v5:
 - Add 2 new subtests checking the forceful eviction of active/hanging
   objects overlapping with the TR-TT segment (Chris).
 - Move gen8_canonical_addr to igt_aux as its needed by other tests also,
   which does soft pinning, and not just gem_softpin (Michel)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Akash Goel <akash.goel@intel.com>
---
 lib/igt_aux.c          |  11 +
 lib/igt_aux.h          |   1 +
 lib/igt_gt.h           |   2 +
 lib/ioctl_wrappers.c   |  39 +++-
 lib/ioctl_wrappers.h   |   3 +
 tests/Makefile.sources |   1 +
 tests/gem_softpin.c    |  18 +-
 tests/gem_trtt.c       | 601 +++++++++++++++++++++++++++++++++++++++++++++++++
 8 files changed, 653 insertions(+), 23 deletions(-)
 create mode 100644 tests/gem_trtt.c

diff --git a/lib/igt_aux.c b/lib/igt_aux.c
index 7d35666..c5fedb5 100644
--- a/lib/igt_aux.c
+++ b/lib/igt_aux.c
@@ -206,6 +206,17 @@ void igt_exchange_int(void *array, unsigned i, unsigned j)
 	int_arr[j] = tmp;
 }
 
+/* igt_canonical_addr
+ * Used to convert any address into canonical form, i.e. [63:48] == [47].
+ * Based on kernel's sign_extend64 implementation.
+ * @address - a virtual address
+*/
+uint64_t igt_canonical_addr(uint64_t address)
+{
+	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
+	return (__s64)(address << shift) >> shift;
+}
+
 static uint32_t
 hars_petruska_f54_1_random_unsafe(void)
 {
diff --git a/lib/igt_aux.h b/lib/igt_aux.h
index 427719e..85fc64b 100644
--- a/lib/igt_aux.h
+++ b/lib/igt_aux.h
@@ -48,6 +48,7 @@ void igt_permute_array(void *array, unsigned size,
 void igt_progress(const char *header, uint64_t i, uint64_t total);
 void igt_print_activity(void);
 bool igt_check_boolean_env_var(const char *env_var, bool default_value);
+uint64_t igt_canonical_addr(uint64_t address);
 
 bool igt_aub_dump_enabled(void);
 
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index b7c5c4a..d4e9dbb 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -98,4 +98,6 @@ extern const struct intel_execution_engine {
 	unsigned flags;
 } intel_execution_engines[];
 
+#define GEN8_HIGH_ADDRESS_BIT 47
+
 #endif /* IGT_GT_H */
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 4071260..fb5de07 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -882,6 +882,22 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
 	igt_assert(__gem_context_set_param(fd, p) == 0);
 }
 
+int __gem_context_require_param(int fd, uint64_t param)
+{
+	struct local_i915_gem_context_param p;
+	int ret;
+
+	p.context = 0;
+	p.param = param;
+	p.value = 0;
+	p.size = 0;
+
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
+	if (ret)
+		return -errno;
+	return 0;
+}
+
 /**
  * gem_context_require_param:
  * @fd: open i915 drm file descriptor
@@ -892,14 +908,7 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
  */
 void gem_context_require_param(int fd, uint64_t param)
 {
-	struct local_i915_gem_context_param p;
-
-	p.context = 0;
-	p.param = param;
-	p.value = 0;
-	p.size = 0;
-
-	igt_require(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
+	igt_require(__gem_context_require_param(fd, param) == 0);
 }
 
 void gem_context_require_ban_period(int fd)
@@ -1063,6 +1072,20 @@ bool gem_uses_full_ppgtt(int fd)
 }
 
 /**
+ * gem_uses_64b_ppgtt:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to check whether the kernel internally uses full
+ * 64b per-process gtt to execute batches.
+ *
+ * Returns: Whether batches are run through full 64b ppgtt.
+ */
+bool gem_uses_64b_ppgtt(int fd)
+{
+	return gem_gtt_type(fd) > 2;
+}
+
+/**
  * gem_available_fences:
  * @fd: open i915 drm file descriptor
  *
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index dc0827a..d23fa96 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -107,9 +107,11 @@ struct local_i915_gem_context_param {
 #define LOCAL_CONTEXT_PARAM_BAN_PERIOD	0x1
 #define LOCAL_CONTEXT_PARAM_NO_ZEROMAP	0x2
 #define LOCAL_CONTEXT_PARAM_GTT_SIZE	0x3
+#define LOCAL_CONTEXT_PARAM_TRTT	0x4
 	uint64_t value;
 };
 void gem_context_require_ban_period(int fd);
+int __gem_context_require_param(int fd, uint64_t param);
 void gem_context_require_param(int fd, uint64_t param);
 void gem_context_get_param(int fd, struct local_i915_gem_context_param *p);
 void gem_context_set_param(int fd, struct local_i915_gem_context_param *p);
@@ -143,6 +145,7 @@ bool gem_has_bsd2(int fd);
 int gem_gtt_type(int fd);
 bool gem_uses_ppgtt(int fd);
 bool gem_uses_full_ppgtt(int fd);
+bool gem_uses_64b_ppgtt(int fd);
 int gem_available_fences(int fd);
 uint64_t gem_available_aperture_size(int fd);
 uint64_t gem_aperture_size(int fd);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index f8b18b0..e6081f6 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -73,6 +73,7 @@ TESTS_progs_M = \
 	gem_streaming_writes \
 	gem_tiled_blits \
 	gem_tiled_partial_pwrite_pread \
+	gem_trtt \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
 	kms_addfb_basic \
diff --git a/tests/gem_softpin.c b/tests/gem_softpin.c
index 1b3d9d3..3eee08f 100644
--- a/tests/gem_softpin.c
+++ b/tests/gem_softpin.c
@@ -31,18 +31,6 @@
 #define EXEC_OBJECT_PINNED	(1<<4)
 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
 
-/* gen8_canonical_addr
- * Used to convert any address into canonical form, i.e. [63:48] == [47].
- * Based on kernel's sign_extend64 implementation.
- * @address - a virtual address
-*/
-#define GEN8_HIGH_ADDRESS_BIT 47
-static uint64_t gen8_canonical_addr(uint64_t address)
-{
-	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
-	return (__s64)(address << shift) >> shift;
-}
-
 static void test_invalid(int fd)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -70,7 +58,7 @@ static void test_invalid(int fd)
 
 	/* Check beyond bounds of aperture */
 	object.offset = gem_aperture_size(fd) - 4096;
-	object.offset = gen8_canonical_addr(object.offset);
+	object.offset = igt_canonical_addr(object.offset);
 	igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
 	/* Check gen8 canonical addressing */
@@ -78,7 +66,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull << GEN8_HIGH_ADDRESS_BIT;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
 
@@ -88,7 +76,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull<<32;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		object.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
diff --git a/tests/gem_trtt.c b/tests/gem_trtt.c
new file mode 100644
index 0000000..8dd4892
--- /dev/null
+++ b/tests/gem_trtt.c
@@ -0,0 +1,601 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Akash Goel <akash.goel@intel.com>
+ *
+ */
+
+#include "igt.h"
+
+#define BO_SIZE 4096
+#define EXEC_OBJECT_PINNED	(1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+
+/* has_trtt_support
+ * Finds if trtt hw is present
+ * @fd DRM fd
+ */
+static bool has_trtt_support(int fd)
+{
+	int ret = __gem_context_require_param(fd, LOCAL_CONTEXT_PARAM_TRTT);
+
+	return (ret == 0);
+}
+
+/* mmap_bo
+ * helper for creating a CPU mmapping of the buffer
+ * @fd - drm fd
+ * @handle - handle of the buffer to mmap
+ * @size: size of the buffer
+ */
+static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
+{
+	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0);
+	return ptr;
+}
+
+/* emit_store_dword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u32 data to be stored at destination
+ */
+static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint32_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+
+	return dw_offset;
+}
+
+/* emit_store_qword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u64 data to be stored at destination
+ */
+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint64_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+	cmd_buf[dw_offset++] = data >> 32;
+
+	return dw_offset;
+}
+
+/* emit_bb_end
+ * populate batch buffer with MI_BATCH_BUFFER_END command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ */
+static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
+{
+	dw_offset = ALIGN(dw_offset, 2);
+	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
+	dw_offset++;
+
+	return dw_offset;
+}
+
+/* setup_execbuffer
+ * helper for buffer execution
+ * @execbuf - pointer to execbuffer
+ * @exec_object - pointer to exec object2 struct
+ * @ring - ring to be used
+ * @buffer_count - how manu buffers to submit
+ * @batch_length - length of batch buffer
+ */
+static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
+			     struct drm_i915_gem_exec_object2 *exec_object,
+			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
+{
+	memset(execbuf, 0, sizeof(*execbuf));
+
+	execbuf->buffers_ptr = (unsigned long)exec_object;
+	execbuf->buffer_count = buffer_count;
+	execbuf->batch_len = batch_length;
+	execbuf->flags = ring;
+	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
+}
+
+#define TABLE_SIZE 0x1000
+#define TILE_SIZE 0x10000
+
+#define TRTT_SEGMENT_SIZE (1ULL << 44)
+#define PPGTT_SIZE (1ULL << 48)
+
+#define NULL_TILE_PATTERN    0xFFFFFFFF
+#define INVALID_TILE_PATTERN 0xFFFFFFFE
+
+struct local_i915_gem_context_trtt_param {
+	uint64_t segment_base_addr;
+	uint64_t l3_table_address;
+	uint32_t invd_tile_val;
+	uint32_t null_tile_val;
+};
+
+/* query_trtt
+ * Helper function to check if the TR-TT settings stored with the KMD,
+ * for a context, have the expected values (set previously).
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static void
+query_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	gem_context_get_param(fd, &ctx_param);
+
+	igt_assert_eq_u64(trtt_param.l3_table_address, l3_table_address);
+	igt_assert_eq_u64(trtt_param.segment_base_addr, segment_base_addr);
+	igt_assert_eq_u32(trtt_param.invd_tile_val, INVALID_TILE_PATTERN);
+	igt_assert_eq_u32(trtt_param.null_tile_val, NULL_TILE_PATTERN);
+}
+
+/* setup_trtt
+ * Helper function to request KMD to enable TRTT
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static int
+setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	trtt_param.null_tile_val = NULL_TILE_PATTERN;
+	trtt_param.invd_tile_val = INVALID_TILE_PATTERN;
+	trtt_param.l3_table_address = l3_table_address;
+	trtt_param.segment_base_addr = segment_base_addr;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	return __gem_context_set_param(fd, &ctx_param);
+}
+
+/* bo_alloc_setup
+ * allocate bo and populate exec object
+ * @exec_object2 - pointer to exec object
+ * @bo_sizee - buffer size
+ * @flags - exec flags
+ * @bo_offset - pointer to the current PPGTT offset
+ */
+static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
+			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
+{
+	memset(exec_object2, 0, sizeof(*exec_object2));
+	exec_object2->handle = gem_create(fd, bo_size);
+	exec_object2->flags = flags;
+
+	if (bo_offset)
+	{
+		exec_object2->offset = *bo_offset;
+		*bo_offset += bo_size;
+	}
+}
+
+/* busy_batch
+ * This helper function will prepare & submit a batch on the BCS ring,
+ * which will keep the ring busy for sometime, long enough to submit
+ * some other work which can trigger the eviction of that batch object
+ * while it is still getting executed on the ring.
+ */
+static uint64_t busy_batch(int fd, uint32_t ctx_id)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	const int has_64bit_reloc = gen >= 8;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 object[2];
+	uint32_t *map;
+	int factor = 10;
+	int i = 0;
+
+	/* Until the kernel ABI is fixed, only default contexts can be used
+	 * on !RCS rings */
+	igt_require(ctx_id == 0);
+
+	memset(object, 0, sizeof(object));
+	object[0].handle = gem_create(fd, 1024*1024);
+	object[1].handle = gem_create(fd, 4096);
+	map = gem_mmap__cpu(fd, object[1].handle, 0, 4096, PROT_WRITE);
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	setup_execbuffer(&execbuf, object, ctx_id, I915_EXEC_BLT, 2,
+			 emit_bb_end(fd, map, 0)*4);
+	gem_execbuf(fd, &execbuf);
+
+	igt_debug("Active offsets = [%08llx, %08llx]\n",
+		  object[0].offset, object[1].offset);
+
+#define COPY_BLT_CMD		(2<<29|0x53<<22|0x6)
+#define BLT_WRITE_ALPHA		(1<<21)
+#define BLT_WRITE_RGB		(1<<20)
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	while (factor--) {
+		/* XY_SRC_COPY */
+		map[i++] = COPY_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB;
+		if (has_64bit_reloc)
+			map[i-1] += 2;
+		map[i++] = 0xcc << 16 | 1 << 25 | 1 << 24 | (4*1024);
+		map[i++] = 0;
+		map[i++] = 256 << 16 | 1024;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+		map[i++] = 0;
+		map[i++] = 4096;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+	}
+	i = emit_bb_end(fd, map, i);
+	munmap(map, 4096);
+
+	object[0].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_WRITE;
+	object[1].flags = EXEC_OBJECT_PINNED;
+	execbuf.batch_len = i*4;
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, object[0].handle);
+	gem_close(fd, object[1].handle);
+
+	return object[1].offset;
+}
+
+/* active object eviction test
+ * This test will force the eviction of an active object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_active(int fd)
+{
+	uint64_t expected;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	/* Create another file instance, as currently this test uses BCS ring
+	 * which can work only with the default context and the TR-TT would
+	 * already have been enabled by the earlier subtests for the
+	 * the default context associated with the original file instance.
+	 */
+	fd = drm_open_driver(DRIVER_INTEL);
+
+	expected = busy_batch(fd, 0);
+
+	/* Determine the segment_base_addr according to the offset of active
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, 0, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, 0, l3_offset, segment_base_addr);
+	close(fd);
+}
+
+/* hanging object eviction test
+ * This test will force the eviction of a hanging object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_hang(int fd)
+{
+	uint64_t expected;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	ctx_id = gem_context_create(fd);
+
+	igt_hang_ctx(fd, ctx_id, I915_EXEC_RENDER, 0, (uint64_t *)&expected);
+
+	/* Determine the segment_base_addr according to the offset of hanging
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, ctx_id, l3_offset, segment_base_addr);
+	gem_context_destroy(fd, ctx_id);
+}
+
+/* submit_trtt_context
+ * This helper function will create a new context if the TR-TT segment
+ * base address is not zero, allocate a L3 table page, 2 pages apiece
+ * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
+ * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
+ * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
+ * buffer to first update the TR-TT table entries and then to update the data
+ * buffers using their TR-TT VA, exercising the table programming done
+ * previously.
+ * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
+ * Invoke execbuffer to submit the batch buffer.
+ * Verify value of first DWORD in the 2 data buffer matches the data asked
+ * to be written by the GPU.
+ */
+static void submit_trtt_context(int fd, uint64_t segment_base_addr)
+{
+	enum {
+		L3_TBL,
+		L2_TBL1,
+		L2_TBL2,
+		L1_TBL1,
+		L1_TBL2,
+		DATA1,
+		DATA2,
+		BATCH,
+		NUM_BUFFERS,
+	};
+
+	int ring, len = 0;
+	uint32_t *ptr;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
+	uint32_t batch_buffer[BO_SIZE];
+	uint32_t ctx_id, data, last_entry_offset;
+	uint64_t cur_ppgtt_off, exec_flags;
+	uint64_t first_tile_addr, last_tile_addr;
+
+	first_tile_addr = segment_base_addr;
+	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
+
+	/* To avoid conflict with the TR-TT segment */
+	cur_ppgtt_off = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	/* Create a new context to have different TRTT settings, but also
+	 * use the default context. Don't use the default context for the
+	 * segment_base_addr value of 0, as that will break all the implicit
+	 * tests which uses only the default context without the 48B_ADDRESS
+	 * flag.
+	 */
+	if (segment_base_addr + TRTT_SEGMENT_SIZE == PPGTT_SIZE)
+		ctx_id = 0;
+	else
+		ctx_id = gem_context_create(fd);
+
+	exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+	/* first allocate Batch buffer BO */
+	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
+
+	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
+	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
+
+	/* Allocate a L3 table BO */
+	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L2 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L1 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
+	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
+
+	/* Allocate two Data buffer BOs */
+	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
+	last_entry_offset = 511*sizeof(uint64_t);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset,
+			       exec_object2[L2_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset + last_entry_offset,
+			       exec_object2[L2_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL1].offset,
+			       exec_object2[L1_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL2].offset + last_entry_offset,
+			       exec_object2[L1_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
+	last_entry_offset = 1023*sizeof(uint32_t);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL1].offset,
+			       exec_object2[DATA1].offset >> 16);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL2].offset + last_entry_offset,
+			       exec_object2[DATA2].offset >> 16);
+
+	/* Add commands to update the 2 data buffers, using their TRTT VA */
+	data = 0x12345678;
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(first_tile_addr),
+			       data);
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(last_tile_addr),
+			       data);
+
+	len = emit_bb_end(fd, batch_buffer, len);
+	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
+
+	/* Request KMD to setup the TR-TT */
+	igt_assert(setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr) == 0);
+
+	ring = I915_EXEC_RENDER;
+	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
+
+	/* submit command buffer */
+	gem_execbuf(fd, &execbuf);
+
+	/* read the 2 data buffers to check for the value written by the GPU */
+	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	gem_close(fd, exec_object2[L3_TBL].handle);
+	gem_close(fd, exec_object2[L2_TBL1].handle);
+	gem_close(fd, exec_object2[L2_TBL2].handle);
+	gem_close(fd, exec_object2[L1_TBL1].handle);
+	gem_close(fd, exec_object2[L1_TBL2].handle);
+	gem_close(fd, exec_object2[DATA1].handle);
+	gem_close(fd, exec_object2[DATA2].handle);
+	gem_close(fd, exec_object2[BATCH].handle);
+
+	/* Check if the TRTT params stored with the Driver are intact or not */
+	query_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
+
+	if (ctx_id)
+		gem_context_destroy(fd, ctx_id);
+}
+
+/* basic trtt test
+ * This will test the basic TR-TT functionality by doing couple of store
+ * operations through it. Also it will exercise all possible TR-TT segment
+ * start locations (i.e. 16 of them).
+ */
+static void test_basic_trtt_use(int fd)
+{
+	uint64_t segment_base_addr;
+
+	for (segment_base_addr = 0;
+	     segment_base_addr < PPGTT_SIZE;
+	     segment_base_addr += TRTT_SEGMENT_SIZE)
+	{
+		submit_trtt_context(fd, segment_base_addr);
+	}
+}
+
+static void test_invalid(int fd)
+{
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	ctx_id = gem_context_create(fd);
+
+	/* Check for an incorrectly aligned base location for TR-TT segment */
+	segment_base_addr = TRTT_SEGMENT_SIZE + 0x1000;
+	l3_offset = TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Check for the same/conflicting value for L3 table and TR-TT segment location */
+	segment_base_addr = TRTT_SEGMENT_SIZE;
+	l3_offset = segment_base_addr;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Check for an incorrectly aligned location for L3 table */
+	l3_offset = TILE_SIZE + 0x1000;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Use the correct settings now */
+	l3_offset = TILE_SIZE;
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+	/* Check the overriding of TR-TT settings for the same context */
+	segment_base_addr += TRTT_SEGMENT_SIZE;
+	l3_offset += TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EEXIST);
+
+	gem_context_destroy(fd, ctx_id);
+}
+
+igt_main
+{
+	int fd = -1;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		/* test needs 48 PPGTT & Soft Pin support */
+		igt_require(gem_has_softpin(fd));
+		igt_require(gem_uses_64b_ppgtt(fd));
+		igt_require(has_trtt_support(fd));
+	}
+
+	igt_subtest("invalid")
+		test_invalid(fd);
+
+	igt_subtest("basic")
+		test_basic_trtt_use(fd);
+
+	igt_subtest("evict_active")
+		test_evict_active(fd);
+
+	igt_subtest("evict_hang")
+		test_evict_hang(fd);
+
+	igt_fixture
+		close(fd);
+}
+
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH v5] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-11 11:48                         ` [PATCH v5] " akash.goel
@ 2016-03-17 10:14                           ` Michel Thierry
  2016-03-18  8:37                             ` [PATCH v6] " akash.goel
  0 siblings, 1 reply; 30+ messages in thread
From: Michel Thierry @ 2016-03-17 10:14 UTC (permalink / raw)
  To: akash.goel, intel-gfx

On 3/11/2016 11:48 AM, akash.goel@intel.com wrote:
> From: Akash Goel <akash.goel@intel.com>
>
> This patch provides the testcase to exercise the TRTT hardware.
>
> Some platforms have an additional address translation hardware support in
> form of Tiled Resource Translation Table (TR-TT) which provides an extra level
> of abstraction over PPGTT.
> This is useful for mapping Sparse/Tiled texture resources.
>
> TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
> for a new PPGTT instance, but TR-TT may not enabled for every context.
> 1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
> which such chunk to use is conveyed to HW through a register.
> Any GFX address, which lies in that reserved 44 bit range will be translated
> through TR-TT first and then through PPGTT to get the actual physical address.
>
> TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
> leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
> each level is contained within a 4KB page hence L3 and L2 is composed of
> 512 64b entries and L1 is composed of 1024 32b entries.
>
> There is a provision to keep TR-TT Tables in virtual space, where the pages of
> TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
> UMD will have a full control on TR-TT management, with bare minimum support
> from KMD.
> So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
> similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
> The entries of L1 table will contain the PPGTT offset of BOs actually backing
> the Sparse resources.
>
> I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
> certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
> added to the CONTEXT_SETPARAM ioctl for that purpose.
>
> v2:
>   - Add new wrapper function __gem_context_require_param and used that
>     to detect the TR-TT support
>   - Use igt_main macro, rename certain function, remove extra white space,
>     cleanup the code (Chris)
>   - Enhance the basic subtest to exercise all possible TR-TT segment start
>     locations (i.e. 16 of them) & for every iteration create a new context.
>
> v3:
>   - Get rid of some superfluous local variables (Chris)
>   - Add asserts to validate whether the GFX address used in MI_STORE_DATA_IMM
>     command is in canonical form & is correctly aligned or not (Chris)
>   - Remove clearing of errno in has_trtt_support function (Chris)
>   - Use the 48B_ADDRESS flag for batch buffer BO also (Chris)
>   - Rebased.
>
> v4:
>   - Add new subtest for invalid settings.
>   - Add new local function query_trtt to check the Driver state (Chris)
>   - Add new helper function gem_uses_64b_ppgtt to detect 64bit PPGTT support
>   - Remove local functions uses_full_ppgtt & has_softpin_support, instead use
>     existing wrappers gem_has_softpin & gem_uses_64b_ppgtt (Chris).
>   - Remove redundant bit masking in emit_store_xxx functions (Chris).
>
> v5:
>   - Add 2 new subtests checking the forceful eviction of active/hanging
>     objects overlapping with the TR-TT segment (Chris).
>   - Move gen8_canonical_addr to igt_aux as its needed by other tests also,
>     which does soft pinning, and not just gem_softpin (Michel)
>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Michel Thierry <michel.thierry@intel.com>
> Signed-off-by: Akash Goel <akash.goel@intel.com>

Test looks good to me.

Reviewed-by: Michel Thierry <michel.thierry@intel.com>


_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-18  8:37                             ` [PATCH v6] " akash.goel
@ 2016-03-18  8:36                               ` Chris Wilson
  2016-03-18  9:01                                 ` Goel, Akash
  0 siblings, 1 reply; 30+ messages in thread
From: Chris Wilson @ 2016-03-18  8:36 UTC (permalink / raw)
  To: akash.goel; +Cc: intel-gfx

On Fri, Mar 18, 2016 at 02:07:40PM +0530, akash.goel@intel.com wrote:
> +/* emit_store_qword
> + * populate batch buffer with MI_STORE_DWORD_IMM command
> + * @fd: drm file descriptor
> + * @cmd_buf: batch buffer
> + * @dw_offset: write offset in batch buffer
> + * @vaddr: destination Virtual address
> + * @data: u64 data to be stored at destination
> + */
> +static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
> +			    uint64_t vaddr, uint64_t data)
> +{
> +	/* Check that softpin addresses are in the correct form */
> +	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
> +
> +	/* SDI cannot write to unaligned addresses */
> +	igt_assert((vaddr & 3) == 0);

If I remember correctly a qword write from SDI must be 8 byte aligned.
Right?

> +
> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
> +	cmd_buf[dw_offset++] = (uint32_t)vaddr;
> +	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
> +	cmd_buf[dw_offset++] = data;
> +	cmd_buf[dw_offset++] = data >> 32;
> +
> +	return dw_offset;
> +}

Hopefully final comments!

Missed EINTR handling during evict, If you repeat the busy/hang tests
within the igt_fork_signal_helper(); igt_stop_signal_helper() that
should cover catching an inopportune signal.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-17 10:14                           ` Michel Thierry
@ 2016-03-18  8:37                             ` akash.goel
  2016-03-18  8:36                               ` Chris Wilson
  0 siblings, 1 reply; 30+ messages in thread
From: akash.goel @ 2016-03-18  8:37 UTC (permalink / raw)
  To: intel-gfx; +Cc: Akash Goel

From: Akash Goel <akash.goel@intel.com>

This patch provides the testcase to exercise the TRTT hardware.

Some platforms have an additional address translation hardware support in
form of Tiled Resource Translation Table (TR-TT) which provides an extra level
of abstraction over PPGTT.
This is useful for mapping Sparse/Tiled texture resources.

TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
for a new PPGTT instance, but TR-TT may not enabled for every context.
1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
which such chunk to use is conveyed to HW through a register.
Any GFX address, which lies in that reserved 44 bit range will be translated
through TR-TT first and then through PPGTT to get the actual physical address.

TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
each level is contained within a 4KB page hence L3 and L2 is composed of
512 64b entries and L1 is composed of 1024 32b entries.

There is a provision to keep TR-TT Tables in virtual space, where the pages of
TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
UMD will have a full control on TR-TT management, with bare minimum support
from KMD.
So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
The entries of L1 table will contain the PPGTT offset of BOs actually backing
the Sparse resources.

I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
added to the CONTEXT_SETPARAM ioctl for that purpose.

v2:
 - Add new wrapper function __gem_context_require_param and used that
   to detect the TR-TT support
 - Use igt_main macro, rename certain function, remove extra white space,
   cleanup the code (Chris)
 - Enhance the basic subtest to exercise all possible TR-TT segment start
   locations (i.e. 16 of them) & for every iteration create a new context.

v3:
 - Get rid of some superfluous local variables (Chris)
 - Add asserts to validate whether the GFX address used in MI_STORE_DATA_IMM
   command is in canonical form & is correctly aligned or not (Chris)
 - Remove clearing of errno in has_trtt_support function (Chris)
 - Use the 48B_ADDRESS flag for batch buffer BO also (Chris)
 - Rebased.

v4:
 - Add new subtest for invalid settings.
 - Add new local function query_trtt to check the Driver state (Chris)
 - Add new helper function gem_uses_64b_ppgtt to detect 64bit PPGTT support
 - Remove local functions uses_full_ppgtt & has_softpin_support, instead use
   existing wrappers gem_has_softpin & gem_uses_64b_ppgtt (Chris).
 - Remove redundant bit masking in emit_store_xxx functions (Chris).

v5:
 - Add 2 new subtests checking the forceful eviction of active/hanging
   objects overlapping with the TR-TT segment (Chris).
 - Move gen8_canonical_addr to igt_aux as its needed by other tests also,
   which does soft pinning, and not just gem_softpin (Michel)

v6:
 - Allow each subtest to have their own private drm file instance.
 - Update the basic subtest to check each segment location for both default
   and User created contexts (Chris).
 - Reorder igt_require(softpin/64b_ppgtt/trtt) to have trtt first (Chris).
 - Update the invalid subtest to check for the improper value of Null &
   Invalid tiles.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Akash Goel <akash.goel@intel.com>
---
 lib/igt_aux.c          |  11 +
 lib/igt_aux.h          |   1 +
 lib/igt_gt.h           |   2 +
 lib/ioctl_wrappers.c   |  39 ++-
 lib/ioctl_wrappers.h   |   3 +
 tests/Makefile.sources |   1 +
 tests/gem_softpin.c    |  18 +-
 tests/gem_trtt.c       | 627 +++++++++++++++++++++++++++++++++++++++++++++++++
 8 files changed, 679 insertions(+), 23 deletions(-)
 create mode 100644 tests/gem_trtt.c

diff --git a/lib/igt_aux.c b/lib/igt_aux.c
index 7d35666..c5fedb5 100644
--- a/lib/igt_aux.c
+++ b/lib/igt_aux.c
@@ -206,6 +206,17 @@ void igt_exchange_int(void *array, unsigned i, unsigned j)
 	int_arr[j] = tmp;
 }
 
+/* igt_canonical_addr
+ * Used to convert any address into canonical form, i.e. [63:48] == [47].
+ * Based on kernel's sign_extend64 implementation.
+ * @address - a virtual address
+*/
+uint64_t igt_canonical_addr(uint64_t address)
+{
+	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
+	return (__s64)(address << shift) >> shift;
+}
+
 static uint32_t
 hars_petruska_f54_1_random_unsafe(void)
 {
diff --git a/lib/igt_aux.h b/lib/igt_aux.h
index 427719e..85fc64b 100644
--- a/lib/igt_aux.h
+++ b/lib/igt_aux.h
@@ -48,6 +48,7 @@ void igt_permute_array(void *array, unsigned size,
 void igt_progress(const char *header, uint64_t i, uint64_t total);
 void igt_print_activity(void);
 bool igt_check_boolean_env_var(const char *env_var, bool default_value);
+uint64_t igt_canonical_addr(uint64_t address);
 
 bool igt_aub_dump_enabled(void);
 
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index b7c5c4a..d4e9dbb 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -98,4 +98,6 @@ extern const struct intel_execution_engine {
 	unsigned flags;
 } intel_execution_engines[];
 
+#define GEN8_HIGH_ADDRESS_BIT 47
+
 #endif /* IGT_GT_H */
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 4071260..fb5de07 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -882,6 +882,22 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
 	igt_assert(__gem_context_set_param(fd, p) == 0);
 }
 
+int __gem_context_require_param(int fd, uint64_t param)
+{
+	struct local_i915_gem_context_param p;
+	int ret;
+
+	p.context = 0;
+	p.param = param;
+	p.value = 0;
+	p.size = 0;
+
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
+	if (ret)
+		return -errno;
+	return 0;
+}
+
 /**
  * gem_context_require_param:
  * @fd: open i915 drm file descriptor
@@ -892,14 +908,7 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
  */
 void gem_context_require_param(int fd, uint64_t param)
 {
-	struct local_i915_gem_context_param p;
-
-	p.context = 0;
-	p.param = param;
-	p.value = 0;
-	p.size = 0;
-
-	igt_require(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
+	igt_require(__gem_context_require_param(fd, param) == 0);
 }
 
 void gem_context_require_ban_period(int fd)
@@ -1063,6 +1072,20 @@ bool gem_uses_full_ppgtt(int fd)
 }
 
 /**
+ * gem_uses_64b_ppgtt:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to check whether the kernel internally uses full
+ * 64b per-process gtt to execute batches.
+ *
+ * Returns: Whether batches are run through full 64b ppgtt.
+ */
+bool gem_uses_64b_ppgtt(int fd)
+{
+	return gem_gtt_type(fd) > 2;
+}
+
+/**
  * gem_available_fences:
  * @fd: open i915 drm file descriptor
  *
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index dc0827a..d23fa96 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -107,9 +107,11 @@ struct local_i915_gem_context_param {
 #define LOCAL_CONTEXT_PARAM_BAN_PERIOD	0x1
 #define LOCAL_CONTEXT_PARAM_NO_ZEROMAP	0x2
 #define LOCAL_CONTEXT_PARAM_GTT_SIZE	0x3
+#define LOCAL_CONTEXT_PARAM_TRTT	0x4
 	uint64_t value;
 };
 void gem_context_require_ban_period(int fd);
+int __gem_context_require_param(int fd, uint64_t param);
 void gem_context_require_param(int fd, uint64_t param);
 void gem_context_get_param(int fd, struct local_i915_gem_context_param *p);
 void gem_context_set_param(int fd, struct local_i915_gem_context_param *p);
@@ -143,6 +145,7 @@ bool gem_has_bsd2(int fd);
 int gem_gtt_type(int fd);
 bool gem_uses_ppgtt(int fd);
 bool gem_uses_full_ppgtt(int fd);
+bool gem_uses_64b_ppgtt(int fd);
 int gem_available_fences(int fd);
 uint64_t gem_available_aperture_size(int fd);
 uint64_t gem_aperture_size(int fd);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index f8b18b0..e6081f6 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -73,6 +73,7 @@ TESTS_progs_M = \
 	gem_streaming_writes \
 	gem_tiled_blits \
 	gem_tiled_partial_pwrite_pread \
+	gem_trtt \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
 	kms_addfb_basic \
diff --git a/tests/gem_softpin.c b/tests/gem_softpin.c
index 1b3d9d3..3eee08f 100644
--- a/tests/gem_softpin.c
+++ b/tests/gem_softpin.c
@@ -31,18 +31,6 @@
 #define EXEC_OBJECT_PINNED	(1<<4)
 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
 
-/* gen8_canonical_addr
- * Used to convert any address into canonical form, i.e. [63:48] == [47].
- * Based on kernel's sign_extend64 implementation.
- * @address - a virtual address
-*/
-#define GEN8_HIGH_ADDRESS_BIT 47
-static uint64_t gen8_canonical_addr(uint64_t address)
-{
-	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
-	return (__s64)(address << shift) >> shift;
-}
-
 static void test_invalid(int fd)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -70,7 +58,7 @@ static void test_invalid(int fd)
 
 	/* Check beyond bounds of aperture */
 	object.offset = gem_aperture_size(fd) - 4096;
-	object.offset = gen8_canonical_addr(object.offset);
+	object.offset = igt_canonical_addr(object.offset);
 	igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
 	/* Check gen8 canonical addressing */
@@ -78,7 +66,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull << GEN8_HIGH_ADDRESS_BIT;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
 
@@ -88,7 +76,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull<<32;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		object.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
diff --git a/tests/gem_trtt.c b/tests/gem_trtt.c
new file mode 100644
index 0000000..46ed399
--- /dev/null
+++ b/tests/gem_trtt.c
@@ -0,0 +1,627 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Akash Goel <akash.goel@intel.com>
+ *
+ */
+
+#include "igt.h"
+
+#define BO_SIZE 4096
+#define EXEC_OBJECT_PINNED	(1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+
+/* has_trtt_support
+ * Finds if trtt hw is present
+ * @fd DRM fd
+ */
+static bool has_trtt_support(int fd)
+{
+	int ret = __gem_context_require_param(fd, LOCAL_CONTEXT_PARAM_TRTT);
+
+	return (ret == 0);
+}
+
+/* mmap_bo
+ * helper for creating a CPU mmapping of the buffer
+ * @fd - drm fd
+ * @handle - handle of the buffer to mmap
+ * @size: size of the buffer
+ */
+static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
+{
+	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0);
+	return ptr;
+}
+
+/* emit_store_dword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u32 data to be stored at destination
+ */
+static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint32_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+
+	return dw_offset;
+}
+
+/* emit_store_qword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u64 data to be stored at destination
+ */
+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint64_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+	cmd_buf[dw_offset++] = data >> 32;
+
+	return dw_offset;
+}
+
+/* emit_bb_end
+ * populate batch buffer with MI_BATCH_BUFFER_END command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ */
+static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
+{
+	dw_offset = ALIGN(dw_offset, 2);
+	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
+	dw_offset++;
+
+	return dw_offset;
+}
+
+/* setup_execbuffer
+ * helper for buffer execution
+ * @execbuf - pointer to execbuffer
+ * @exec_object - pointer to exec object2 struct
+ * @ring - ring to be used
+ * @buffer_count - how manu buffers to submit
+ * @batch_length - length of batch buffer
+ */
+static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
+			     struct drm_i915_gem_exec_object2 *exec_object,
+			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
+{
+	memset(execbuf, 0, sizeof(*execbuf));
+
+	execbuf->buffers_ptr = (unsigned long)exec_object;
+	execbuf->buffer_count = buffer_count;
+	execbuf->batch_len = batch_length;
+	execbuf->flags = ring;
+	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
+}
+
+#define TABLE_SIZE 0x1000
+#define TILE_SIZE 0x10000
+
+#define TRTT_SEGMENT_SIZE (1ULL << 44)
+#define PPGTT_SIZE (1ULL << 48)
+
+#define NULL_TILE_PATTERN    0xFFFFFFFF
+#define INVALID_TILE_PATTERN 0xFFFFFFFE
+
+struct local_i915_gem_context_trtt_param {
+	uint64_t segment_base_addr;
+	uint64_t l3_table_address;
+	uint32_t invd_tile_val;
+	uint32_t null_tile_val;
+};
+
+/* query_trtt
+ * Helper function to check if the TR-TT settings stored with the KMD,
+ * for a context, have the expected values (set previously).
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static void
+query_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	gem_context_get_param(fd, &ctx_param);
+
+	igt_assert_eq_u64(trtt_param.l3_table_address, l3_table_address);
+	igt_assert_eq_u64(trtt_param.segment_base_addr, segment_base_addr);
+	igt_assert_eq_u32(trtt_param.invd_tile_val, INVALID_TILE_PATTERN);
+	igt_assert_eq_u32(trtt_param.null_tile_val, NULL_TILE_PATTERN);
+}
+
+static int
+__setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	     uint64_t segment_base_addr, uint32_t null_tile_val,
+	     uint32_t invd_tile_val)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	trtt_param.null_tile_val = null_tile_val;
+	trtt_param.invd_tile_val = invd_tile_val;
+	trtt_param.l3_table_address = l3_table_address;
+	trtt_param.segment_base_addr = segment_base_addr;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	return __gem_context_set_param(fd, &ctx_param);
+}
+
+/* setup_trtt
+ * Helper function to request KMD to enable TRTT
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static int
+setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	return __setup_trtt(fd, ctx_id, l3_table_address, segment_base_addr,
+			NULL_TILE_PATTERN, INVALID_TILE_PATTERN);
+}
+
+/* bo_alloc_setup
+ * allocate bo and populate exec object
+ * @exec_object2 - pointer to exec object
+ * @bo_sizee - buffer size
+ * @flags - exec flags
+ * @bo_offset - pointer to the current PPGTT offset
+ */
+static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
+			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
+{
+	memset(exec_object2, 0, sizeof(*exec_object2));
+	exec_object2->handle = gem_create(fd, bo_size);
+	exec_object2->flags = flags;
+
+	if (bo_offset)
+	{
+		exec_object2->offset = *bo_offset;
+		*bo_offset += bo_size;
+	}
+}
+
+/* busy_batch
+ * This helper function will prepare & submit a batch on the BCS ring,
+ * which will keep the ring busy for sometime, long enough to submit
+ * some other work which can trigger the eviction of that batch object
+ * while it is still getting executed on the ring.
+ */
+static uint64_t busy_batch(int fd, uint32_t ctx_id)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	const int has_64bit_reloc = gen >= 8;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 object[2];
+	uint32_t *map;
+	int factor = 10;
+	int i = 0;
+
+	/* Until the kernel ABI is fixed, only default contexts can be used
+	 * on !RCS rings */
+	igt_require(ctx_id == 0);
+
+	memset(object, 0, sizeof(object));
+	object[0].handle = gem_create(fd, 1024*1024);
+	object[1].handle = gem_create(fd, 4096);
+	map = gem_mmap__cpu(fd, object[1].handle, 0, 4096, PROT_WRITE);
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	setup_execbuffer(&execbuf, object, ctx_id, I915_EXEC_BLT, 2,
+			 emit_bb_end(fd, map, 0)*4);
+	gem_execbuf(fd, &execbuf);
+
+	igt_debug("Active offsets = [%08llx, %08llx]\n",
+		  object[0].offset, object[1].offset);
+
+#define COPY_BLT_CMD		(2<<29|0x53<<22|0x6)
+#define BLT_WRITE_ALPHA		(1<<21)
+#define BLT_WRITE_RGB		(1<<20)
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	while (factor--) {
+		/* XY_SRC_COPY */
+		map[i++] = COPY_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB;
+		if (has_64bit_reloc)
+			map[i-1] += 2;
+		map[i++] = 0xcc << 16 | 1 << 25 | 1 << 24 | (4*1024);
+		map[i++] = 0;
+		map[i++] = 256 << 16 | 1024;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+		map[i++] = 0;
+		map[i++] = 4096;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+	}
+	i = emit_bb_end(fd, map, i);
+	munmap(map, 4096);
+
+	object[0].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_WRITE;
+	object[1].flags = EXEC_OBJECT_PINNED;
+	execbuf.batch_len = i*4;
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, object[0].handle);
+	gem_close(fd, object[1].handle);
+
+	return object[1].offset;
+}
+
+/* active object eviction test
+ * This test will force the eviction of an active object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_active(void)
+{
+	int fd;
+	uint64_t expected;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+
+	expected = busy_batch(fd, 0);
+
+	/* Determine the segment_base_addr according to the offset of active
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, 0, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, 0, l3_offset, segment_base_addr);
+	close(fd);
+}
+
+/* hanging object eviction test
+ * This test will force the eviction of a hanging object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_hang(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+	uint64_t expected;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	ctx_id = gem_context_create(fd);
+
+	igt_hang_ctx(fd, ctx_id, I915_EXEC_RENDER, 0, (uint64_t *)&expected);
+
+	/* Determine the segment_base_addr according to the offset of hanging
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, ctx_id, l3_offset, segment_base_addr);
+	gem_context_destroy(fd, ctx_id);
+	close (fd);
+}
+
+/* submit_trtt_context
+ * This helper function will create a new context if the TR-TT segment
+ * base address is not zero, allocate a L3 table page, 2 pages apiece
+ * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
+ * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
+ * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
+ * buffer to first update the TR-TT table entries and then to update the data
+ * buffers using their TR-TT VA, exercising the table programming done
+ * previously.
+ * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
+ * Invoke execbuffer to submit the batch buffer.
+ * Verify value of first DWORD in the 2 data buffer matches the data asked
+ * to be written by the GPU.
+ */
+static void submit_trtt_context(int fd, uint64_t segment_base_addr, uint32_t ctx_id)
+{
+	enum {
+		L3_TBL,
+		L2_TBL1,
+		L2_TBL2,
+		L1_TBL1,
+		L1_TBL2,
+		DATA1,
+		DATA2,
+		BATCH,
+		NUM_BUFFERS,
+	};
+
+	int ring, len = 0;
+	uint32_t *ptr;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
+	uint32_t batch_buffer[BO_SIZE];
+	uint32_t data, last_entry_offset;
+	uint64_t cur_ppgtt_off, exec_flags;
+	uint64_t first_tile_addr, last_tile_addr;
+
+	first_tile_addr = segment_base_addr;
+	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
+
+	/* To avoid conflict with the TR-TT segment */
+	cur_ppgtt_off = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+	/* first allocate Batch buffer BO */
+	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
+
+	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
+	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
+
+	/* Allocate a L3 table BO */
+	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L2 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L1 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
+	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
+
+	/* Allocate two Data buffer BOs */
+	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
+	last_entry_offset = 511*sizeof(uint64_t);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset,
+			       exec_object2[L2_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset + last_entry_offset,
+			       exec_object2[L2_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL1].offset,
+			       exec_object2[L1_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL2].offset + last_entry_offset,
+			       exec_object2[L1_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
+	last_entry_offset = 1023*sizeof(uint32_t);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL1].offset,
+			       exec_object2[DATA1].offset >> 16);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL2].offset + last_entry_offset,
+			       exec_object2[DATA2].offset >> 16);
+
+	/* Add commands to update the 2 data buffers, using their TRTT VA */
+	data = 0x12345678;
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(first_tile_addr),
+			       data);
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(last_tile_addr),
+			       data);
+
+	len = emit_bb_end(fd, batch_buffer, len);
+	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
+
+	/* Request KMD to setup the TR-TT */
+	igt_assert(setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr) == 0);
+
+	ring = I915_EXEC_RENDER;
+	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
+
+	/* submit command buffer */
+	gem_execbuf(fd, &execbuf);
+
+	/* read the 2 data buffers to check for the value written by the GPU */
+	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	gem_close(fd, exec_object2[L3_TBL].handle);
+	gem_close(fd, exec_object2[L2_TBL1].handle);
+	gem_close(fd, exec_object2[L2_TBL2].handle);
+	gem_close(fd, exec_object2[L1_TBL1].handle);
+	gem_close(fd, exec_object2[L1_TBL2].handle);
+	gem_close(fd, exec_object2[DATA1].handle);
+	gem_close(fd, exec_object2[DATA2].handle);
+	gem_close(fd, exec_object2[BATCH].handle);
+
+	/* Check if the TRTT params stored with the Driver are intact or not */
+	query_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
+}
+
+/* basic trtt test
+ * This will test the basic TR-TT functionality by doing couple of store
+ * operations through it. Also it will exercise all possible TR-TT segment
+ * start locations (i.e. 16 of them) for both default & User created contexts.
+ */
+static void test_basic_trtt_use(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+
+	for (segment_base_addr = 0;
+	     segment_base_addr < PPGTT_SIZE;
+	     segment_base_addr += TRTT_SEGMENT_SIZE)
+	{
+		/* In order to test the default context for all segment start
+		 * locations, need to open a new file instance on every iteration
+		 * as TRTT settings are immutable once set for a context.
+		 */
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		submit_trtt_context(fd, segment_base_addr, 0);
+
+		ctx_id = gem_context_create(fd);
+		submit_trtt_context(fd, segment_base_addr, ctx_id);
+		gem_context_destroy(fd, ctx_id);
+
+		close(fd);
+	}
+}
+
+static void test_invalid(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	ctx_id = gem_context_create(fd);
+
+	/* Check for an incorrectly aligned base location for TR-TT segment */
+	segment_base_addr = TRTT_SEGMENT_SIZE + 0x1000;
+	l3_offset = TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Correct the segment_base_addr value */
+	segment_base_addr = TRTT_SEGMENT_SIZE;
+
+	/* Check for the same/conflicting value for L3 table and TR-TT segment location */
+	l3_offset = segment_base_addr;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Check for an incorrectly aligned location for L3 table */
+	l3_offset = TILE_SIZE + 0x1000;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Correct the l3_offset value */
+	l3_offset = TILE_SIZE;
+
+	/* Check for the same value for Null & Invalid tile patterns */
+	igt_assert_eq(__setup_trtt(fd, ctx_id, l3_offset, segment_base_addr,
+				   NULL_TILE_PATTERN, NULL_TILE_PATTERN), -EINVAL);
+
+	/* Use the correct settings now */
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+	/* Check the overriding of TR-TT settings for the same context */
+	segment_base_addr += TRTT_SEGMENT_SIZE;
+	l3_offset += TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EEXIST);
+
+	gem_context_destroy(fd, ctx_id);
+	close(fd);
+}
+
+igt_main
+{
+	int fd = -1;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		igt_require(has_trtt_support(fd));
+		/* test also needs 48 PPGTT & Soft Pin support */
+		igt_require(gem_has_softpin(fd));
+		igt_require(gem_uses_64b_ppgtt(fd));
+	}
+
+	/* Each subtest will open its own private file instance to avoid
+	 * any interference. Otherwise once TRTT is enabled for the default
+	 * context with segment_base_addr value of 0, all the other tests which
+	 * are implicitly done, such as quiescent_gpu, will break as they only
+	 * use the default context and do not use the 48B_ADDRESS flag for it.
+	 */
+
+	igt_subtest("invalid")
+		test_invalid();
+
+	igt_subtest("basic")
+		test_basic_trtt_use();
+
+	igt_subtest("evict_active")
+		test_evict_active();
+
+	igt_subtest("evict_hang")
+		test_evict_hang();
+
+	igt_fixture
+		close(fd);
+}
+
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-18  8:36                               ` Chris Wilson
@ 2016-03-18  9:01                                 ` Goel, Akash
  2016-03-18  9:22                                   ` Chris Wilson
  0 siblings, 1 reply; 30+ messages in thread
From: Goel, Akash @ 2016-03-18  9:01 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: akash.goel



On 3/18/2016 2:06 PM, Chris Wilson wrote:
> On Fri, Mar 18, 2016 at 02:07:40PM +0530, akash.goel@intel.com wrote:
>> +/* emit_store_qword
>> + * populate batch buffer with MI_STORE_DWORD_IMM command
>> + * @fd: drm file descriptor
>> + * @cmd_buf: batch buffer
>> + * @dw_offset: write offset in batch buffer
>> + * @vaddr: destination Virtual address
>> + * @data: u64 data to be stored at destination
>> + */
>> +static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
>> +			    uint64_t vaddr, uint64_t data)
>> +{
>> +	/* Check that softpin addresses are in the correct form */
>> +	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
>> +
>> +	/* SDI cannot write to unaligned addresses */
>> +	igt_assert((vaddr & 3) == 0);
>
> If I remember correctly a qword write from SDI must be 8 byte aligned.
> Right?

Yes right. Sorry, my bad..
>
>> +
>> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
>> +	cmd_buf[dw_offset++] = (uint32_t)vaddr;
>> +	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
>> +	cmd_buf[dw_offset++] = data;
>> +	cmd_buf[dw_offset++] = data >> 32;
>> +
>> +	return dw_offset;
>> +}
>
> Hopefully final comments!
>
> Missed EINTR handling during evict, If you repeat the busy/hang tests
> within the igt_fork_signal_helper(); igt_stop_signal_helper() that
> should cover catching an inopportune signal.

Fine will add, thanks for suggesting this
So the signal will interrupt the Driver, which would be waiting for the 
vma unbind to complete, from the eviction path.

Best regards
Akash

> -Chris
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-18  9:01                                 ` Goel, Akash
@ 2016-03-18  9:22                                   ` Chris Wilson
  2016-03-18  9:52                                     ` Goel, Akash
  0 siblings, 1 reply; 30+ messages in thread
From: Chris Wilson @ 2016-03-18  9:22 UTC (permalink / raw)
  To: Goel, Akash; +Cc: intel-gfx

On Fri, Mar 18, 2016 at 02:31:23PM +0530, Goel, Akash wrote:
> 
> 
> On 3/18/2016 2:06 PM, Chris Wilson wrote:
> >On Fri, Mar 18, 2016 at 02:07:40PM +0530, akash.goel@intel.com wrote:
> >>+/* emit_store_qword
> >>+ * populate batch buffer with MI_STORE_DWORD_IMM command
> >>+ * @fd: drm file descriptor
> >>+ * @cmd_buf: batch buffer
> >>+ * @dw_offset: write offset in batch buffer
> >>+ * @vaddr: destination Virtual address
> >>+ * @data: u64 data to be stored at destination
> >>+ */
> >>+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
> >>+			    uint64_t vaddr, uint64_t data)
> >>+{
> >>+	/* Check that softpin addresses are in the correct form */
> >>+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
> >>+
> >>+	/* SDI cannot write to unaligned addresses */
> >>+	igt_assert((vaddr & 3) == 0);
> >
> >If I remember correctly a qword write from SDI must be 8 byte aligned.
> >Right?
> 
> Yes right. Sorry, my bad..
> >
> >>+
> >>+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
> >>+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
> >>+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
> >>+	cmd_buf[dw_offset++] = data;
> >>+	cmd_buf[dw_offset++] = data >> 32;
> >>+
> >>+	return dw_offset;
> >>+}
> >
> >Hopefully final comments!
> >
> >Missed EINTR handling during evict, If you repeat the busy/hang tests
> >within the igt_fork_signal_helper(); igt_stop_signal_helper() that
> >should cover catching an inopportune signal.
> 
> Fine will add, thanks for suggesting this
> So the signal will interrupt the Driver, which would be waiting for
> the vma unbind to complete, from the eviction path.

Right, and we will report the error back to userspace as EINTR and
userspace will restart the syscall and we expect it to succeed
(eventually). Just useful for flushing out the error handling.

Having just remembered how useful this might be, I just extended
gem_softpin for similar reasons:
+       igt_subtest("evict-active-interruptible") {
+               struct timespec start = {};
+               while (igt_seconds_elapsed(&start) < 20)
+                       test_evict_active(fd);
+       }
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-18  9:22                                   ` Chris Wilson
@ 2016-03-18  9:52                                     ` Goel, Akash
  2016-03-18 10:25                                       ` [PATCH v7] " akash.goel
  2016-03-18 10:32                                       ` [PATCH v6] " Chris Wilson
  0 siblings, 2 replies; 30+ messages in thread
From: Goel, Akash @ 2016-03-18  9:52 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: akash.goel



On 3/18/2016 2:52 PM, Chris Wilson wrote:
> On Fri, Mar 18, 2016 at 02:31:23PM +0530, Goel, Akash wrote:
>>
>>
>> On 3/18/2016 2:06 PM, Chris Wilson wrote:
>>> On Fri, Mar 18, 2016 at 02:07:40PM +0530, akash.goel@intel.com wrote:
>>>> +/* emit_store_qword
>>>> + * populate batch buffer with MI_STORE_DWORD_IMM command
>>>> + * @fd: drm file descriptor
>>>> + * @cmd_buf: batch buffer
>>>> + * @dw_offset: write offset in batch buffer
>>>> + * @vaddr: destination Virtual address
>>>> + * @data: u64 data to be stored at destination
>>>> + */
>>>> +static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
>>>> +			    uint64_t vaddr, uint64_t data)
>>>> +{
>>>> +	/* Check that softpin addresses are in the correct form */
>>>> +	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
>>>> +
>>>> +	/* SDI cannot write to unaligned addresses */
>>>> +	igt_assert((vaddr & 3) == 0);
>>>
>>> If I remember correctly a qword write from SDI must be 8 byte aligned.
>>> Right?
>>
>> Yes right. Sorry, my bad..
>>>
>>>> +
>>>> +	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
>>>> +	cmd_buf[dw_offset++] = (uint32_t)vaddr;
>>>> +	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
>>>> +	cmd_buf[dw_offset++] = data;
>>>> +	cmd_buf[dw_offset++] = data >> 32;
>>>> +
>>>> +	return dw_offset;
>>>> +}
>>>
>>> Hopefully final comments!
>>>
>>> Missed EINTR handling during evict, If you repeat the busy/hang tests
>>> within the igt_fork_signal_helper(); igt_stop_signal_helper() that
>>> should cover catching an inopportune signal.
>>
>> Fine will add, thanks for suggesting this
>> So the signal will interrupt the Driver, which would be waiting for
>> the vma unbind to complete, from the eviction path.
>
> Right, and we will report the error back to userspace as EINTR and
> userspace will restart the syscall and we expect it to succeed
> (eventually). Just useful for flushing out the error handling.
>
> Having just remembered how useful this might be, I just extended
> gem_softpin for similar reasons:
> +       igt_subtest("evict-active-interruptible") {
> +               struct timespec start = {};
> +               while (igt_seconds_elapsed(&start) < 20)
> +                       test_evict_active(fd);
> +       }
Thanks I just tested the interruptible versions like this :-

+	igt_fork_signal_helper();
+	igt_subtest("evict_active-interruptible")
+		 test_evict_active();
+
+	igt_subtest("evict_hang-interruptible")
+		test_evict_hang();
+	igt_stop_signal_helper();

Actually the hanging object test implicitly exercises the interruption 
case (otherwise the test won't pass), error recovery as a part of GPU 
reset wakes up/interrupts the waiters.

Best regards
Akash


> -Chris
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH v7] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-18  9:52                                     ` Goel, Akash
@ 2016-03-18 10:25                                       ` akash.goel
  2016-03-18 10:32                                       ` [PATCH v6] " Chris Wilson
  1 sibling, 0 replies; 30+ messages in thread
From: akash.goel @ 2016-03-18 10:25 UTC (permalink / raw)
  To: intel-gfx; +Cc: Akash Goel

From: Akash Goel <akash.goel@intel.com>

This patch provides the testcase to exercise the TRTT hardware.

Some platforms have an additional address translation hardware support in
form of Tiled Resource Translation Table (TR-TT) which provides an extra level
of abstraction over PPGTT.
This is useful for mapping Sparse/Tiled texture resources.

TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
for a new PPGTT instance, but TR-TT may not enabled for every context.
1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
which such chunk to use is conveyed to HW through a register.
Any GFX address, which lies in that reserved 44 bit range will be translated
through TR-TT first and then through PPGTT to get the actual physical address.

TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
each level is contained within a 4KB page hence L3 and L2 is composed of
512 64b entries and L1 is composed of 1024 32b entries.

There is a provision to keep TR-TT Tables in virtual space, where the pages of
TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
UMD will have a full control on TR-TT management, with bare minimum support
from KMD.
So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
The entries of L1 table will contain the PPGTT offset of BOs actually backing
the Sparse resources.

I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
added to the CONTEXT_SETPARAM ioctl for that purpose.

v2:
 - Add new wrapper function __gem_context_require_param and used that
   to detect the TR-TT support
 - Use igt_main macro, rename certain function, remove extra white space,
   cleanup the code (Chris)
 - Enhance the basic subtest to exercise all possible TR-TT segment start
   locations (i.e. 16 of them) & for every iteration create a new context.

v3:
 - Get rid of some superfluous local variables (Chris)
 - Add asserts to validate whether the GFX address used in MI_STORE_DATA_IMM
   command is in canonical form & is correctly aligned or not (Chris)
 - Remove clearing of errno in has_trtt_support function (Chris)
 - Use the 48B_ADDRESS flag for batch buffer BO also (Chris)
 - Rebased.

v4:
 - Add new subtest for invalid settings.
 - Add new local function query_trtt to check the Driver state (Chris)
 - Add new helper function gem_uses_64b_ppgtt to detect 64bit PPGTT support
 - Remove local functions uses_full_ppgtt & has_softpin_support, instead use
   existing wrappers gem_has_softpin & gem_uses_64b_ppgtt (Chris).
 - Remove redundant bit masking in emit_store_xxx functions (Chris).

v5:
 - Add 2 new subtests checking the forceful eviction of active/hanging
   objects overlapping with the TR-TT segment (Chris).
 - Move gen8_canonical_addr to igt_aux as its needed by other tests also,
   which does soft pinning, and not just gem_softpin (Michel)

v6:
 - Allow each subtest to have their own private drm file instance.
 - Update the basic subtest to check each segment location for both default
   and User created contexts (Chris).
 - Reorder igt_require(softpin/64b_ppgtt/trtt) to have trtt first (Chris).
 - Update the invalid subtest to check for the improper value of Null &
   Invalid tiles.

v7:
 - Add interruptible version of the 2 eviction subtests (Chris)
 - Correct the alignment check for the MI Store qword command (Chris).

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Akash Goel <akash.goel@intel.com>
---
 lib/igt_aux.c          |  11 +
 lib/igt_aux.h          |   1 +
 lib/igt_gt.h           |   2 +
 lib/ioctl_wrappers.c   |  39 ++-
 lib/ioctl_wrappers.h   |   3 +
 tests/Makefile.sources |   1 +
 tests/gem_softpin.c    |  18 +-
 tests/gem_trtt.c       | 635 +++++++++++++++++++++++++++++++++++++++++++++++++
 8 files changed, 687 insertions(+), 23 deletions(-)
 create mode 100644 tests/gem_trtt.c

diff --git a/lib/igt_aux.c b/lib/igt_aux.c
index 7d35666..c5fedb5 100644
--- a/lib/igt_aux.c
+++ b/lib/igt_aux.c
@@ -206,6 +206,17 @@ void igt_exchange_int(void *array, unsigned i, unsigned j)
 	int_arr[j] = tmp;
 }
 
+/* igt_canonical_addr
+ * Used to convert any address into canonical form, i.e. [63:48] == [47].
+ * Based on kernel's sign_extend64 implementation.
+ * @address - a virtual address
+*/
+uint64_t igt_canonical_addr(uint64_t address)
+{
+	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
+	return (__s64)(address << shift) >> shift;
+}
+
 static uint32_t
 hars_petruska_f54_1_random_unsafe(void)
 {
diff --git a/lib/igt_aux.h b/lib/igt_aux.h
index 427719e..85fc64b 100644
--- a/lib/igt_aux.h
+++ b/lib/igt_aux.h
@@ -48,6 +48,7 @@ void igt_permute_array(void *array, unsigned size,
 void igt_progress(const char *header, uint64_t i, uint64_t total);
 void igt_print_activity(void);
 bool igt_check_boolean_env_var(const char *env_var, bool default_value);
+uint64_t igt_canonical_addr(uint64_t address);
 
 bool igt_aub_dump_enabled(void);
 
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index b7c5c4a..d4e9dbb 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -98,4 +98,6 @@ extern const struct intel_execution_engine {
 	unsigned flags;
 } intel_execution_engines[];
 
+#define GEN8_HIGH_ADDRESS_BIT 47
+
 #endif /* IGT_GT_H */
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 4071260..fb5de07 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -882,6 +882,22 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
 	igt_assert(__gem_context_set_param(fd, p) == 0);
 }
 
+int __gem_context_require_param(int fd, uint64_t param)
+{
+	struct local_i915_gem_context_param p;
+	int ret;
+
+	p.context = 0;
+	p.param = param;
+	p.value = 0;
+	p.size = 0;
+
+	ret = drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
+	if (ret)
+		return -errno;
+	return 0;
+}
+
 /**
  * gem_context_require_param:
  * @fd: open i915 drm file descriptor
@@ -892,14 +908,7 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
  */
 void gem_context_require_param(int fd, uint64_t param)
 {
-	struct local_i915_gem_context_param p;
-
-	p.context = 0;
-	p.param = param;
-	p.value = 0;
-	p.size = 0;
-
-	igt_require(drmIoctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
+	igt_require(__gem_context_require_param(fd, param) == 0);
 }
 
 void gem_context_require_ban_period(int fd)
@@ -1063,6 +1072,20 @@ bool gem_uses_full_ppgtt(int fd)
 }
 
 /**
+ * gem_uses_64b_ppgtt:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to check whether the kernel internally uses full
+ * 64b per-process gtt to execute batches.
+ *
+ * Returns: Whether batches are run through full 64b ppgtt.
+ */
+bool gem_uses_64b_ppgtt(int fd)
+{
+	return gem_gtt_type(fd) > 2;
+}
+
+/**
  * gem_available_fences:
  * @fd: open i915 drm file descriptor
  *
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index dc0827a..d23fa96 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -107,9 +107,11 @@ struct local_i915_gem_context_param {
 #define LOCAL_CONTEXT_PARAM_BAN_PERIOD	0x1
 #define LOCAL_CONTEXT_PARAM_NO_ZEROMAP	0x2
 #define LOCAL_CONTEXT_PARAM_GTT_SIZE	0x3
+#define LOCAL_CONTEXT_PARAM_TRTT	0x4
 	uint64_t value;
 };
 void gem_context_require_ban_period(int fd);
+int __gem_context_require_param(int fd, uint64_t param);
 void gem_context_require_param(int fd, uint64_t param);
 void gem_context_get_param(int fd, struct local_i915_gem_context_param *p);
 void gem_context_set_param(int fd, struct local_i915_gem_context_param *p);
@@ -143,6 +145,7 @@ bool gem_has_bsd2(int fd);
 int gem_gtt_type(int fd);
 bool gem_uses_ppgtt(int fd);
 bool gem_uses_full_ppgtt(int fd);
+bool gem_uses_64b_ppgtt(int fd);
 int gem_available_fences(int fd);
 uint64_t gem_available_aperture_size(int fd);
 uint64_t gem_aperture_size(int fd);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index f8b18b0..e6081f6 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -73,6 +73,7 @@ TESTS_progs_M = \
 	gem_streaming_writes \
 	gem_tiled_blits \
 	gem_tiled_partial_pwrite_pread \
+	gem_trtt \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
 	kms_addfb_basic \
diff --git a/tests/gem_softpin.c b/tests/gem_softpin.c
index 1b3d9d3..3eee08f 100644
--- a/tests/gem_softpin.c
+++ b/tests/gem_softpin.c
@@ -31,18 +31,6 @@
 #define EXEC_OBJECT_PINNED	(1<<4)
 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
 
-/* gen8_canonical_addr
- * Used to convert any address into canonical form, i.e. [63:48] == [47].
- * Based on kernel's sign_extend64 implementation.
- * @address - a virtual address
-*/
-#define GEN8_HIGH_ADDRESS_BIT 47
-static uint64_t gen8_canonical_addr(uint64_t address)
-{
-	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
-	return (__s64)(address << shift) >> shift;
-}
-
 static void test_invalid(int fd)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -70,7 +58,7 @@ static void test_invalid(int fd)
 
 	/* Check beyond bounds of aperture */
 	object.offset = gem_aperture_size(fd) - 4096;
-	object.offset = gen8_canonical_addr(object.offset);
+	object.offset = igt_canonical_addr(object.offset);
 	igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
 	/* Check gen8 canonical addressing */
@@ -78,7 +66,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull << GEN8_HIGH_ADDRESS_BIT;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
 
@@ -88,7 +76,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull<<32;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		object.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
diff --git a/tests/gem_trtt.c b/tests/gem_trtt.c
new file mode 100644
index 0000000..e184dc8
--- /dev/null
+++ b/tests/gem_trtt.c
@@ -0,0 +1,635 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Akash Goel <akash.goel@intel.com>
+ *
+ */
+
+#include "igt.h"
+
+#define BO_SIZE 4096
+#define EXEC_OBJECT_PINNED	(1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+
+/* has_trtt_support
+ * Finds if trtt hw is present
+ * @fd DRM fd
+ */
+static bool has_trtt_support(int fd)
+{
+	int ret = __gem_context_require_param(fd, LOCAL_CONTEXT_PARAM_TRTT);
+
+	return (ret == 0);
+}
+
+/* mmap_bo
+ * helper for creating a CPU mmapping of the buffer
+ * @fd - drm fd
+ * @handle - handle of the buffer to mmap
+ * @size: size of the buffer
+ */
+static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
+{
+	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0);
+	return ptr;
+}
+
+/* emit_store_dword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u32 data to be stored at destination
+ */
+static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint32_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+
+	return dw_offset;
+}
+
+/* emit_store_qword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u64 data to be stored at destination
+ */
+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint64_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 7) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+	cmd_buf[dw_offset++] = data >> 32;
+
+	return dw_offset;
+}
+
+/* emit_bb_end
+ * populate batch buffer with MI_BATCH_BUFFER_END command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ */
+static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
+{
+	dw_offset = ALIGN(dw_offset, 2);
+	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
+	dw_offset++;
+
+	return dw_offset;
+}
+
+/* setup_execbuffer
+ * helper for buffer execution
+ * @execbuf - pointer to execbuffer
+ * @exec_object - pointer to exec object2 struct
+ * @ring - ring to be used
+ * @buffer_count - how manu buffers to submit
+ * @batch_length - length of batch buffer
+ */
+static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
+			     struct drm_i915_gem_exec_object2 *exec_object,
+			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
+{
+	memset(execbuf, 0, sizeof(*execbuf));
+
+	execbuf->buffers_ptr = (unsigned long)exec_object;
+	execbuf->buffer_count = buffer_count;
+	execbuf->batch_len = batch_length;
+	execbuf->flags = ring;
+	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
+}
+
+#define TABLE_SIZE 0x1000
+#define TILE_SIZE 0x10000
+
+#define TRTT_SEGMENT_SIZE (1ULL << 44)
+#define PPGTT_SIZE (1ULL << 48)
+
+#define NULL_TILE_PATTERN    0xFFFFFFFF
+#define INVALID_TILE_PATTERN 0xFFFFFFFE
+
+struct local_i915_gem_context_trtt_param {
+	uint64_t segment_base_addr;
+	uint64_t l3_table_address;
+	uint32_t invd_tile_val;
+	uint32_t null_tile_val;
+};
+
+/* query_trtt
+ * Helper function to check if the TR-TT settings stored with the KMD,
+ * for a context, have the expected values (set previously).
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static void
+query_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	gem_context_get_param(fd, &ctx_param);
+
+	igt_assert_eq_u64(trtt_param.l3_table_address, l3_table_address);
+	igt_assert_eq_u64(trtt_param.segment_base_addr, segment_base_addr);
+	igt_assert_eq_u32(trtt_param.invd_tile_val, INVALID_TILE_PATTERN);
+	igt_assert_eq_u32(trtt_param.null_tile_val, NULL_TILE_PATTERN);
+}
+
+static int
+__setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	     uint64_t segment_base_addr, uint32_t null_tile_val,
+	     uint32_t invd_tile_val)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	trtt_param.null_tile_val = null_tile_val;
+	trtt_param.invd_tile_val = invd_tile_val;
+	trtt_param.l3_table_address = l3_table_address;
+	trtt_param.segment_base_addr = segment_base_addr;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	return __gem_context_set_param(fd, &ctx_param);
+}
+
+/* setup_trtt
+ * Helper function to request KMD to enable TRTT
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static int
+setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	return __setup_trtt(fd, ctx_id, l3_table_address, segment_base_addr,
+			NULL_TILE_PATTERN, INVALID_TILE_PATTERN);
+}
+
+/* bo_alloc_setup
+ * allocate bo and populate exec object
+ * @exec_object2 - pointer to exec object
+ * @bo_sizee - buffer size
+ * @flags - exec flags
+ * @bo_offset - pointer to the current PPGTT offset
+ */
+static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
+			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
+{
+	memset(exec_object2, 0, sizeof(*exec_object2));
+	exec_object2->handle = gem_create(fd, bo_size);
+	exec_object2->flags = flags;
+
+	if (bo_offset)
+	{
+		exec_object2->offset = *bo_offset;
+		*bo_offset += bo_size;
+	}
+}
+
+/* busy_batch
+ * This helper function will prepare & submit a batch on the BCS ring,
+ * which will keep the ring busy for sometime, long enough to submit
+ * some other work which can trigger the eviction of that batch object
+ * while it is still getting executed on the ring.
+ */
+static uint64_t busy_batch(int fd, uint32_t ctx_id)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	const int has_64bit_reloc = gen >= 8;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 object[2];
+	uint32_t *map;
+	int factor = 10;
+	int i = 0;
+
+	/* Until the kernel ABI is fixed, only default contexts can be used
+	 * on !RCS rings */
+	igt_require(ctx_id == 0);
+
+	memset(object, 0, sizeof(object));
+	object[0].handle = gem_create(fd, 1024*1024);
+	object[1].handle = gem_create(fd, 4096);
+	map = gem_mmap__cpu(fd, object[1].handle, 0, 4096, PROT_WRITE);
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	setup_execbuffer(&execbuf, object, ctx_id, I915_EXEC_BLT, 2,
+			 emit_bb_end(fd, map, 0)*4);
+	gem_execbuf(fd, &execbuf);
+
+	igt_debug("Active offsets = [%08llx, %08llx]\n",
+		  object[0].offset, object[1].offset);
+
+#define COPY_BLT_CMD		(2<<29|0x53<<22|0x6)
+#define BLT_WRITE_ALPHA		(1<<21)
+#define BLT_WRITE_RGB		(1<<20)
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	while (factor--) {
+		/* XY_SRC_COPY */
+		map[i++] = COPY_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB;
+		if (has_64bit_reloc)
+			map[i-1] += 2;
+		map[i++] = 0xcc << 16 | 1 << 25 | 1 << 24 | (4*1024);
+		map[i++] = 0;
+		map[i++] = 256 << 16 | 1024;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+		map[i++] = 0;
+		map[i++] = 4096;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+	}
+	i = emit_bb_end(fd, map, i);
+	munmap(map, 4096);
+
+	object[0].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_WRITE;
+	object[1].flags = EXEC_OBJECT_PINNED;
+	execbuf.batch_len = i*4;
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, object[0].handle);
+	gem_close(fd, object[1].handle);
+
+	return object[1].offset;
+}
+
+/* active object eviction test
+ * This test will force the eviction of an active object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_active(void)
+{
+	int fd;
+	uint64_t expected;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+
+	expected = busy_batch(fd, 0);
+
+	/* Determine the segment_base_addr according to the offset of active
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, 0, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, 0, l3_offset, segment_base_addr);
+	close(fd);
+}
+
+/* hanging object eviction test
+ * This test will force the eviction of a hanging object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_hang(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+	uint64_t expected;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	ctx_id = gem_context_create(fd);
+
+	igt_hang_ctx(fd, ctx_id, I915_EXEC_RENDER, 0, (uint64_t *)&expected);
+
+	/* Determine the segment_base_addr according to the offset of hanging
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, ctx_id, l3_offset, segment_base_addr);
+	gem_context_destroy(fd, ctx_id);
+	close (fd);
+}
+
+/* submit_trtt_context
+ * This helper function will create a new context if the TR-TT segment
+ * base address is not zero, allocate a L3 table page, 2 pages apiece
+ * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
+ * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
+ * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
+ * buffer to first update the TR-TT table entries and then to update the data
+ * buffers using their TR-TT VA, exercising the table programming done
+ * previously.
+ * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
+ * Invoke execbuffer to submit the batch buffer.
+ * Verify value of first DWORD in the 2 data buffer matches the data asked
+ * to be written by the GPU.
+ */
+static void submit_trtt_context(int fd, uint64_t segment_base_addr, uint32_t ctx_id)
+{
+	enum {
+		L3_TBL,
+		L2_TBL1,
+		L2_TBL2,
+		L1_TBL1,
+		L1_TBL2,
+		DATA1,
+		DATA2,
+		BATCH,
+		NUM_BUFFERS,
+	};
+
+	int ring, len = 0;
+	uint32_t *ptr;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
+	uint32_t batch_buffer[BO_SIZE];
+	uint32_t data, last_entry_offset;
+	uint64_t cur_ppgtt_off, exec_flags;
+	uint64_t first_tile_addr, last_tile_addr;
+
+	first_tile_addr = segment_base_addr;
+	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
+
+	/* To avoid conflict with the TR-TT segment */
+	cur_ppgtt_off = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+	/* first allocate Batch buffer BO */
+	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
+
+	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
+	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
+
+	/* Allocate a L3 table BO */
+	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L2 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L1 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
+	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
+
+	/* Allocate two Data buffer BOs */
+	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
+	last_entry_offset = 511*sizeof(uint64_t);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset,
+			       exec_object2[L2_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset + last_entry_offset,
+			       exec_object2[L2_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL1].offset,
+			       exec_object2[L1_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL2].offset + last_entry_offset,
+			       exec_object2[L1_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
+	last_entry_offset = 1023*sizeof(uint32_t);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL1].offset,
+			       exec_object2[DATA1].offset >> 16);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL2].offset + last_entry_offset,
+			       exec_object2[DATA2].offset >> 16);
+
+	/* Add commands to update the 2 data buffers, using their TRTT VA */
+	data = 0x12345678;
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(first_tile_addr),
+			       data);
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(last_tile_addr),
+			       data);
+
+	len = emit_bb_end(fd, batch_buffer, len);
+	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
+
+	/* Request KMD to setup the TR-TT */
+	igt_assert(setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr) == 0);
+
+	ring = I915_EXEC_RENDER;
+	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
+
+	/* submit command buffer */
+	gem_execbuf(fd, &execbuf);
+
+	/* read the 2 data buffers to check for the value written by the GPU */
+	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	gem_close(fd, exec_object2[L3_TBL].handle);
+	gem_close(fd, exec_object2[L2_TBL1].handle);
+	gem_close(fd, exec_object2[L2_TBL2].handle);
+	gem_close(fd, exec_object2[L1_TBL1].handle);
+	gem_close(fd, exec_object2[L1_TBL2].handle);
+	gem_close(fd, exec_object2[DATA1].handle);
+	gem_close(fd, exec_object2[DATA2].handle);
+	gem_close(fd, exec_object2[BATCH].handle);
+
+	/* Check if the TRTT params stored with the Driver are intact or not */
+	query_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
+}
+
+/* basic trtt test
+ * This will test the basic TR-TT functionality by doing couple of store
+ * operations through it. Also it will exercise all possible TR-TT segment
+ * start locations (i.e. 16 of them) for both default & User created contexts.
+ */
+static void test_basic_trtt_use(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+
+	for (segment_base_addr = 0;
+	     segment_base_addr < PPGTT_SIZE;
+	     segment_base_addr += TRTT_SEGMENT_SIZE)
+	{
+		/* In order to test the default context for all segment start
+		 * locations, need to open a new file instance on every iteration
+		 * as TRTT settings are immutable once set for a context.
+		 */
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		submit_trtt_context(fd, segment_base_addr, 0);
+
+		ctx_id = gem_context_create(fd);
+		submit_trtt_context(fd, segment_base_addr, ctx_id);
+		gem_context_destroy(fd, ctx_id);
+
+		close(fd);
+	}
+}
+
+static void test_invalid(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	ctx_id = gem_context_create(fd);
+
+	/* Check for an incorrectly aligned base location for TR-TT segment */
+	segment_base_addr = TRTT_SEGMENT_SIZE + 0x1000;
+	l3_offset = TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Correct the segment_base_addr value */
+	segment_base_addr = TRTT_SEGMENT_SIZE;
+
+	/* Check for the same/conflicting value for L3 table and TR-TT segment location */
+	l3_offset = segment_base_addr;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Check for an incorrectly aligned location for L3 table */
+	l3_offset = TILE_SIZE + 0x1000;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Correct the l3_offset value */
+	l3_offset = TILE_SIZE;
+
+	/* Check for the same value for Null & Invalid tile patterns */
+	igt_assert_eq(__setup_trtt(fd, ctx_id, l3_offset, segment_base_addr,
+				   NULL_TILE_PATTERN, NULL_TILE_PATTERN), -EINVAL);
+
+	/* Use the correct settings now */
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+	/* Check the overriding of TR-TT settings for the same context */
+	segment_base_addr += TRTT_SEGMENT_SIZE;
+	l3_offset += TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EEXIST);
+
+	gem_context_destroy(fd, ctx_id);
+	close(fd);
+}
+
+igt_main
+{
+	int fd = -1;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		igt_require(has_trtt_support(fd));
+		/* test also needs 48 PPGTT & Soft Pin support */
+		igt_require(gem_has_softpin(fd));
+		igt_require(gem_uses_64b_ppgtt(fd));
+	}
+
+	/* Each subtest will open its own private file instance to avoid
+	 * any interference. Otherwise once TRTT is enabled for the default
+	 * context with segment_base_addr value of 0, all the other tests which
+	 * are implicitly done, such as quiescent_gpu, will break as they only
+	 * use the default context and do not use the 48B_ADDRESS flag for it.
+	 */
+
+	igt_subtest("invalid")
+		test_invalid();
+
+	igt_subtest("basic")
+		test_basic_trtt_use();
+
+	igt_subtest("evict_active")
+		test_evict_active();
+
+	igt_subtest("evict_hang")
+		test_evict_hang();
+
+	igt_fork_signal_helper();
+	igt_subtest("evict_active-interruptible")
+		 test_evict_active();
+
+	igt_subtest("evict_hang-interruptible")
+		test_evict_hang();
+	igt_stop_signal_helper();
+
+	igt_fixture
+		close(fd);
+}
+
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* Re: [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-18  9:52                                     ` Goel, Akash
  2016-03-18 10:25                                       ` [PATCH v7] " akash.goel
@ 2016-03-18 10:32                                       ` Chris Wilson
  2016-03-18 15:49                                         ` Goel, Akash
  1 sibling, 1 reply; 30+ messages in thread
From: Chris Wilson @ 2016-03-18 10:32 UTC (permalink / raw)
  To: Goel, Akash; +Cc: intel-gfx

On Fri, Mar 18, 2016 at 03:22:51PM +0530, Goel, Akash wrote:
> 
> 
> On 3/18/2016 2:52 PM, Chris Wilson wrote:
> >On Fri, Mar 18, 2016 at 02:31:23PM +0530, Goel, Akash wrote:
> >>
> >>
> >>On 3/18/2016 2:06 PM, Chris Wilson wrote:
> >>>On Fri, Mar 18, 2016 at 02:07:40PM +0530, akash.goel@intel.com wrote:
> >>>>+/* emit_store_qword
> >>>>+ * populate batch buffer with MI_STORE_DWORD_IMM command
> >>>>+ * @fd: drm file descriptor
> >>>>+ * @cmd_buf: batch buffer
> >>>>+ * @dw_offset: write offset in batch buffer
> >>>>+ * @vaddr: destination Virtual address
> >>>>+ * @data: u64 data to be stored at destination
> >>>>+ */
> >>>>+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
> >>>>+			    uint64_t vaddr, uint64_t data)
> >>>>+{
> >>>>+	/* Check that softpin addresses are in the correct form */
> >>>>+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
> >>>>+
> >>>>+	/* SDI cannot write to unaligned addresses */
> >>>>+	igt_assert((vaddr & 3) == 0);
> >>>
> >>>If I remember correctly a qword write from SDI must be 8 byte aligned.
> >>>Right?
> >>
> >>Yes right. Sorry, my bad..
> >>>
> >>>>+
> >>>>+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
> >>>>+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
> >>>>+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
> >>>>+	cmd_buf[dw_offset++] = data;
> >>>>+	cmd_buf[dw_offset++] = data >> 32;
> >>>>+
> >>>>+	return dw_offset;
> >>>>+}
> >>>
> >>>Hopefully final comments!
> >>>
> >>>Missed EINTR handling during evict, If you repeat the busy/hang tests
> >>>within the igt_fork_signal_helper(); igt_stop_signal_helper() that
> >>>should cover catching an inopportune signal.
> >>
> >>Fine will add, thanks for suggesting this
> >>So the signal will interrupt the Driver, which would be waiting for
> >>the vma unbind to complete, from the eviction path.
> >
> >Right, and we will report the error back to userspace as EINTR and
> >userspace will restart the syscall and we expect it to succeed
> >(eventually). Just useful for flushing out the error handling.
> >
> >Having just remembered how useful this might be, I just extended
> >gem_softpin for similar reasons:
> >+       igt_subtest("evict-active-interruptible") {
> >+               struct timespec start = {};
> >+               while (igt_seconds_elapsed(&start) < 20)
> >+                       test_evict_active(fd);
> >+       }
> Thanks I just tested the interruptible versions like this :-
> 
> +	igt_fork_signal_helper();
> +	igt_subtest("evict_active-interruptible")
> +		 test_evict_active();

The point about looping is to try and ensure that every possible code
path is interrupted (since we only interrupt every 2us and the code paths
tend to be shorter than than!). So we repeat the test in the vain hope
of hitting something else.

> +	igt_subtest("evict_hang-interruptible")
> +		test_evict_hang();
> +	igt_stop_signal_helper();
> 
> Actually the hanging object test implicitly exercises the
> interruption case (otherwise the test won't pass), error recovery as
> a part of GPU reset wakes up/interrupts the waiters.

But only in one spot :)
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-18 10:32                                       ` [PATCH v6] " Chris Wilson
@ 2016-03-18 15:49                                         ` Goel, Akash
  2016-03-18 16:01                                           ` Chris Wilson
  0 siblings, 1 reply; 30+ messages in thread
From: Goel, Akash @ 2016-03-18 15:49 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: akash.goel



On 3/18/2016 4:02 PM, Chris Wilson wrote:
> On Fri, Mar 18, 2016 at 03:22:51PM +0530, Goel, Akash wrote:
>>
>>
>> On 3/18/2016 2:52 PM, Chris Wilson wrote:
>>> On Fri, Mar 18, 2016 at 02:31:23PM +0530, Goel, Akash wrote:
>>>>
>>>>> Hopefully final comments!
>>>>>
>>>>> Missed EINTR handling during evict, If you repeat the busy/hang tests
>>>>> within the igt_fork_signal_helper(); igt_stop_signal_helper() that
>>>>> should cover catching an inopportune signal.
>>>>
>>>> Fine will add, thanks for suggesting this
>>>> So the signal will interrupt the Driver, which would be waiting for
>>>> the vma unbind to complete, from the eviction path.
>>>
>>> Right, and we will report the error back to userspace as EINTR and
>>> userspace will restart the syscall and we expect it to succeed
>>> (eventually). Just useful for flushing out the error handling.
>>>
>>> Having just remembered how useful this might be, I just extended
>>> gem_softpin for similar reasons:
>>> +       igt_subtest("evict-active-interruptible") {
>>> +               struct timespec start = {};
>>> +               while (igt_seconds_elapsed(&start) < 20)
>>> +                       test_evict_active(fd);
>>> +       }
>> Thanks I just tested the interruptible versions like this :-
>>
>> +	igt_fork_signal_helper();
>> +	igt_subtest("evict_active-interruptible")
>> +		 test_evict_active();
>
> The point about looping is to try and ensure that every possible code
> path is interrupted (since we only interrupt every 2us and the code paths
> tend to be shorter than than!).

Thanks, will follow the gem_softpin.c example.

I hope you meant 2ms here & not 2us, since the signal_helper_process is 
sending signals at the ~500 Hz rate.

Best regards
Akash

> So we repeat the test in the vain hope of hitting something else.
>

>> +	igt_subtest("evict_hang-interruptible")
>> +		test_evict_hang();
>> +	igt_stop_signal_helper();
>>
>> Actually the hanging object test implicitly exercises the
>> interruption case (otherwise the test won't pass), error recovery as
>> a part of GPU reset wakes up/interrupts the waiters.
>
> But only in one spot :)
> -Chris
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-18 15:49                                         ` Goel, Akash
@ 2016-03-18 16:01                                           ` Chris Wilson
  2016-03-21 10:00                                             ` Goel, Akash
  0 siblings, 1 reply; 30+ messages in thread
From: Chris Wilson @ 2016-03-18 16:01 UTC (permalink / raw)
  To: Goel, Akash; +Cc: intel-gfx

On Fri, Mar 18, 2016 at 09:19:35PM +0530, Goel, Akash wrote:
> On 3/18/2016 4:02 PM, Chris Wilson wrote:
> >The point about looping is to try and ensure that every possible code
> >path is interrupted (since we only interrupt every 2us and the code paths
> >tend to be shorter than than!).
> 
> Thanks, will follow the gem_softpin.c example.
> 
> I hope you meant 2ms here & not 2us, since the signal_helper_process
> is sending signals at the ~500 Hz rate.

Yeah, failed hopeless. Though I thought it was a 5000Hz rate. Any way
the point is that the chance of a signal interrupting a critical path
anywhere other than at a wait is small, and so we want to repeat the
test a few times to increase our chances.
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-18 16:01                                           ` Chris Wilson
@ 2016-03-21 10:00                                             ` Goel, Akash
  2016-03-21 10:11                                               ` Chris Wilson
  0 siblings, 1 reply; 30+ messages in thread
From: Goel, Akash @ 2016-03-21 10:00 UTC (permalink / raw)
  To: Chris Wilson, intel-gfx; +Cc: akash.goel



On 3/18/2016 9:31 PM, Chris Wilson wrote:
> On Fri, Mar 18, 2016 at 09:19:35PM +0530, Goel, Akash wrote:
>> On 3/18/2016 4:02 PM, Chris Wilson wrote:
>>> The point about looping is to try and ensure that every possible code
>>> path is interrupted (since we only interrupt every 2us and the code paths
>>> tend to be shorter than than!).
>>
>> Thanks, will follow the gem_softpin.c example.
>>
>> I hope you meant 2ms here & not 2us, since the signal_helper_process
>> is sending signals at the ~500 Hz rate.
>
> Yeah, failed hopeless. Though I thought it was a 5000Hz rate. Any way
> the point is that the chance of a signal interrupting a critical path
> anywhere other than at a wait is small, and so we want to repeat the
> test a few times to increase our chances.

Just rebased. Should I use the 'igt_interruptible' now for eviction 
tests in place of

+       igt_subtest("evict-active-interruptible") {
+               struct timespec start = {};
+               while (igt_seconds_elapsed(&start) < 20)
+                       test_evict_active(fd);
+       }


> -Chris
>
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* Re: [PATCH v6] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-21 10:00                                             ` Goel, Akash
@ 2016-03-21 10:11                                               ` Chris Wilson
  2016-03-22  8:37                                                 ` [PATCH v8] " akash.goel
  0 siblings, 1 reply; 30+ messages in thread
From: Chris Wilson @ 2016-03-21 10:11 UTC (permalink / raw)
  To: Goel, Akash; +Cc: intel-gfx

On Mon, Mar 21, 2016 at 03:30:33PM +0530, Goel, Akash wrote:
> 
> 
> On 3/18/2016 9:31 PM, Chris Wilson wrote:
> >On Fri, Mar 18, 2016 at 09:19:35PM +0530, Goel, Akash wrote:
> >>On 3/18/2016 4:02 PM, Chris Wilson wrote:
> >>>The point about looping is to try and ensure that every possible code
> >>>path is interrupted (since we only interrupt every 2us and the code paths
> >>>tend to be shorter than than!).
> >>
> >>Thanks, will follow the gem_softpin.c example.
> >>
> >>I hope you meant 2ms here & not 2us, since the signal_helper_process
> >>is sending signals at the ~500 Hz rate.
> >
> >Yeah, failed hopeless. Though I thought it was a 5000Hz rate. Any way
> >the point is that the chance of a signal interrupting a critical path
> >anywhere other than at a wait is small, and so we want to repeat the
> >test a few times to increase our chances.
> 
> Just rebased. Should I use the 'igt_interruptible' now for eviction
> tests in place of
> 
> +       igt_subtest("evict-active-interruptible") {
> +               struct timespec start = {};
> +               while (igt_seconds_elapsed(&start) < 20)
> +                       test_evict_active(fd);
> +       }

Please give it a whirl. It was the outcome of our discussion, trying to
be more accurate as to when repeating an ioctl due to signal interruption
is actually worthwhile. Improvements welcome!
-Chris

-- 
Chris Wilson, Intel Open Source Technology Centre
_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply	[flat|nested] 30+ messages in thread

* [PATCH v8] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-21 10:11                                               ` Chris Wilson
@ 2016-03-22  8:37                                                 ` akash.goel
  2016-10-27 15:35                                                   ` [PATCH v9] " akash.goel
  0 siblings, 1 reply; 30+ messages in thread
From: akash.goel @ 2016-03-22  8:37 UTC (permalink / raw)
  To: intel-gfx; +Cc: Akash Goel

From: Akash Goel <akash.goel@intel.com>

This patch provides the testcase to exercise the TRTT hardware.

Some platforms have an additional address translation hardware support in
form of Tiled Resource Translation Table (TR-TT) which provides an extra level
of abstraction over PPGTT.
This is useful for mapping Sparse/Tiled texture resources.

TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
for a new PPGTT instance, but TR-TT may not enabled for every context.
1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
which such chunk to use is conveyed to HW through a register.
Any GFX address, which lies in that reserved 44 bit range will be translated
through TR-TT first and then through PPGTT to get the actual physical address.

TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
each level is contained within a 4KB page hence L3 and L2 is composed of
512 64b entries and L1 is composed of 1024 32b entries.

There is a provision to keep TR-TT Tables in virtual space, where the pages of
TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
UMD will have a full control on TR-TT management, with bare minimum support
from KMD.
So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
The entries of L1 table will contain the PPGTT offset of BOs actually backing
the Sparse resources.

I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
added to the CONTEXT_SETPARAM ioctl for that purpose.

v2:
 - Add new wrapper function __gem_context_require_param and used that
   to detect the TR-TT support
 - Use igt_main macro, rename certain function, remove extra white space,
   cleanup the code (Chris)
 - Enhance the basic subtest to exercise all possible TR-TT segment start
   locations (i.e. 16 of them) & for every iteration create a new context.

v3:
 - Get rid of some superfluous local variables (Chris)
 - Add asserts to validate whether the GFX address used in MI_STORE_DATA_IMM
   command is in canonical form & is correctly aligned or not (Chris)
 - Remove clearing of errno in has_trtt_support function (Chris)
 - Use the 48B_ADDRESS flag for batch buffer BO also (Chris)
 - Rebased.

v4:
 - Add new subtest for invalid settings.
 - Add new local function query_trtt to check the Driver state (Chris)
 - Add new helper function gem_uses_64b_ppgtt to detect 64bit PPGTT support
 - Remove local functions uses_full_ppgtt & has_softpin_support, instead use
   existing wrappers gem_has_softpin & gem_uses_64b_ppgtt (Chris).
 - Remove redundant bit masking in emit_store_xxx functions (Chris).

v5:
 - Add 2 new subtests checking the forceful eviction of active/hanging
   objects overlapping with the TR-TT segment (Chris).
 - Move gen8_canonical_addr to igt_aux as its needed by other tests also,
   which does soft pinning, and not just gem_softpin (Michel)

v6:
 - Allow each subtest to have their own private drm file instance.
 - Update the basic subtest to check each segment location for both default
   and User created contexts (Chris).
 - Reorder igt_require(softpin/64b_ppgtt/trtt) to have trtt first (Chris).
 - Update the invalid subtest to check for the improper value of Null &
   Invalid tiles.

v7:
 - Add interruptible version of the 2 eviction subtests (Chris)
 - Correct the alignment check for the MI Store qword command (Chris).

v8:
 - Use new igt_interruptible macro for the interruptible subtests, as its
   more effective & precise.
 - Remove interruptible version of evict_hang subtest, as it doesn't provide
   any additional coverage (Chris)
 - Rebased.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Akash Goel <akash.goel@intel.com>
---
 lib/igt_aux.c          |  11 +
 lib/igt_aux.h          |   1 +
 lib/igt_gt.h           |   1 +
 lib/ioctl_wrappers.c   |  39 ++-
 lib/ioctl_wrappers.h   |   3 +
 tests/Makefile.sources |   1 +
 tests/gem_softpin.c    |  18 +-
 tests/gem_trtt.c       | 630 +++++++++++++++++++++++++++++++++++++++++++++++++
 8 files changed, 681 insertions(+), 23 deletions(-)
 create mode 100644 tests/gem_trtt.c

diff --git a/lib/igt_aux.c b/lib/igt_aux.c
index da21f10..958cc53 100644
--- a/lib/igt_aux.c
+++ b/lib/igt_aux.c
@@ -399,6 +399,17 @@ void igt_exchange_int(void *array, unsigned i, unsigned j)
 	int_arr[j] = tmp;
 }
 
+/* igt_canonical_addr
+ * Used to convert any address into canonical form, i.e. [63:48] == [47].
+ * Based on kernel's sign_extend64 implementation.
+ * @address - a virtual address
+*/
+uint64_t igt_canonical_addr(uint64_t address)
+{
+	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
+	return (__s64)(address << shift) >> shift;
+}
+
 static uint32_t
 hars_petruska_f54_1_random_unsafe(void)
 {
diff --git a/lib/igt_aux.h b/lib/igt_aux.h
index 101fad1..d1154f0 100644
--- a/lib/igt_aux.h
+++ b/lib/igt_aux.h
@@ -56,6 +56,7 @@ void igt_permute_array(void *array, unsigned size,
 void igt_progress(const char *header, uint64_t i, uint64_t total);
 void igt_print_activity(void);
 bool igt_check_boolean_env_var(const char *env_var, bool default_value);
+uint64_t igt_canonical_addr(uint64_t address);
 
 bool igt_aub_dump_enabled(void);
 
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index dfe1cfb..28f2e2a 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -108,5 +108,6 @@ extern const struct intel_execution_engine {
 	     e__++) \
 		for_if (gem_has_ring(fd, flags__ = e__->exec_id | e__->flags))
 
+#define GEN8_HIGH_ADDRESS_BIT 47
 
 #endif /* IGT_GT_H */
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 076bce8..b7c38f9 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -892,6 +892,22 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
 	igt_assert(__gem_context_set_param(fd, p) == 0);
 }
 
+int __gem_context_require_param(int fd, uint64_t param)
+{
+	struct local_i915_gem_context_param p;
+	int ret;
+
+	p.context = 0;
+	p.param = param;
+	p.value = 0;
+	p.size = 0;
+
+	ret = igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
+	if (ret)
+		return -errno;
+	return 0;
+}
+
 /**
  * gem_context_require_param:
  * @fd: open i915 drm file descriptor
@@ -902,14 +918,7 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
  */
 void gem_context_require_param(int fd, uint64_t param)
 {
-	struct local_i915_gem_context_param p;
-
-	p.context = 0;
-	p.param = param;
-	p.value = 0;
-	p.size = 0;
-
-	igt_require(igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
+	igt_require(__gem_context_require_param(fd, param) == 0);
 }
 
 void gem_context_require_ban_period(int fd)
@@ -1073,6 +1082,20 @@ bool gem_uses_full_ppgtt(int fd)
 }
 
 /**
+ * gem_uses_64b_ppgtt:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to check whether the kernel internally uses full
+ * 64b per-process gtt to execute batches.
+ *
+ * Returns: Whether batches are run through full 64b ppgtt.
+ */
+bool gem_uses_64b_ppgtt(int fd)
+{
+	return gem_gtt_type(fd) > 2;
+}
+
+/**
  * gem_available_fences:
  * @fd: open i915 drm file descriptor
  *
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index d986f61..c7a2348 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -109,9 +109,11 @@ struct local_i915_gem_context_param {
 #define LOCAL_CONTEXT_PARAM_BAN_PERIOD	0x1
 #define LOCAL_CONTEXT_PARAM_NO_ZEROMAP	0x2
 #define LOCAL_CONTEXT_PARAM_GTT_SIZE	0x3
+#define LOCAL_CONTEXT_PARAM_TRTT	0x4
 	uint64_t value;
 };
 void gem_context_require_ban_period(int fd);
+int __gem_context_require_param(int fd, uint64_t param);
 void gem_context_require_param(int fd, uint64_t param);
 void gem_context_get_param(int fd, struct local_i915_gem_context_param *p);
 void gem_context_set_param(int fd, struct local_i915_gem_context_param *p);
@@ -143,6 +145,7 @@ bool gem_has_bsd2(int fd);
 int gem_gtt_type(int fd);
 bool gem_uses_ppgtt(int fd);
 bool gem_uses_full_ppgtt(int fd);
+bool gem_uses_64b_ppgtt(int fd);
 int gem_available_fences(int fd);
 uint64_t gem_available_aperture_size(int fd);
 uint64_t gem_aperture_size(int fd);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 5b119af..10e780c 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -75,6 +75,7 @@ TESTS_progs_M = \
 	gem_streaming_writes \
 	gem_tiled_blits \
 	gem_tiled_partial_pwrite_pread \
+	gem_trtt \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
 	kms_addfb_basic \
diff --git a/tests/gem_softpin.c b/tests/gem_softpin.c
index 1a9ef02..e7a796a 100644
--- a/tests/gem_softpin.c
+++ b/tests/gem_softpin.c
@@ -31,18 +31,6 @@
 #define EXEC_OBJECT_PINNED	(1<<4)
 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
 
-/* gen8_canonical_addr
- * Used to convert any address into canonical form, i.e. [63:48] == [47].
- * Based on kernel's sign_extend64 implementation.
- * @address - a virtual address
-*/
-#define GEN8_HIGH_ADDRESS_BIT 47
-static uint64_t gen8_canonical_addr(uint64_t address)
-{
-	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
-	return (__s64)(address << shift) >> shift;
-}
-
 static void test_invalid(int fd)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -70,7 +58,7 @@ static void test_invalid(int fd)
 
 	/* Check beyond bounds of aperture */
 	object.offset = gem_aperture_size(fd) - 4096;
-	object.offset = gen8_canonical_addr(object.offset);
+	object.offset = igt_canonical_addr(object.offset);
 	igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
 	/* Check gen8 canonical addressing */
@@ -78,7 +66,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull << GEN8_HIGH_ADDRESS_BIT;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
 
@@ -88,7 +76,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull<<32;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		object.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
diff --git a/tests/gem_trtt.c b/tests/gem_trtt.c
new file mode 100644
index 0000000..468e069
--- /dev/null
+++ b/tests/gem_trtt.c
@@ -0,0 +1,630 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Akash Goel <akash.goel@intel.com>
+ *
+ */
+
+#include "igt.h"
+
+#define BO_SIZE 4096
+#define EXEC_OBJECT_PINNED	(1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+
+/* has_trtt_support
+ * Finds if trtt hw is present
+ * @fd DRM fd
+ */
+static bool has_trtt_support(int fd)
+{
+	int ret = __gem_context_require_param(fd, LOCAL_CONTEXT_PARAM_TRTT);
+
+	return (ret == 0);
+}
+
+/* mmap_bo
+ * helper for creating a CPU mmapping of the buffer
+ * @fd - drm fd
+ * @handle - handle of the buffer to mmap
+ * @size: size of the buffer
+ */
+static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
+{
+	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0);
+	return ptr;
+}
+
+/* emit_store_dword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u32 data to be stored at destination
+ */
+static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint32_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+
+	return dw_offset;
+}
+
+/* emit_store_qword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u64 data to be stored at destination
+ */
+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint64_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 7) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+	cmd_buf[dw_offset++] = data >> 32;
+
+	return dw_offset;
+}
+
+/* emit_bb_end
+ * populate batch buffer with MI_BATCH_BUFFER_END command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ */
+static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
+{
+	dw_offset = ALIGN(dw_offset, 2);
+	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
+	dw_offset++;
+
+	return dw_offset;
+}
+
+/* setup_execbuffer
+ * helper for buffer execution
+ * @execbuf - pointer to execbuffer
+ * @exec_object - pointer to exec object2 struct
+ * @ring - ring to be used
+ * @buffer_count - how manu buffers to submit
+ * @batch_length - length of batch buffer
+ */
+static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
+			     struct drm_i915_gem_exec_object2 *exec_object,
+			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
+{
+	memset(execbuf, 0, sizeof(*execbuf));
+
+	execbuf->buffers_ptr = (unsigned long)exec_object;
+	execbuf->buffer_count = buffer_count;
+	execbuf->batch_len = batch_length;
+	execbuf->flags = ring;
+	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
+}
+
+#define TABLE_SIZE 0x1000
+#define TILE_SIZE 0x10000
+
+#define TRTT_SEGMENT_SIZE (1ULL << 44)
+#define PPGTT_SIZE (1ULL << 48)
+
+#define NULL_TILE_PATTERN    0xFFFFFFFF
+#define INVALID_TILE_PATTERN 0xFFFFFFFE
+
+struct local_i915_gem_context_trtt_param {
+	uint64_t segment_base_addr;
+	uint64_t l3_table_address;
+	uint32_t invd_tile_val;
+	uint32_t null_tile_val;
+};
+
+/* query_trtt
+ * Helper function to check if the TR-TT settings stored with the KMD,
+ * for a context, have the expected values (set previously).
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static void
+query_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	gem_context_get_param(fd, &ctx_param);
+
+	igt_assert_eq_u64(trtt_param.l3_table_address, l3_table_address);
+	igt_assert_eq_u64(trtt_param.segment_base_addr, segment_base_addr);
+	igt_assert_eq_u32(trtt_param.invd_tile_val, INVALID_TILE_PATTERN);
+	igt_assert_eq_u32(trtt_param.null_tile_val, NULL_TILE_PATTERN);
+}
+
+static int
+__setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	     uint64_t segment_base_addr, uint32_t null_tile_val,
+	     uint32_t invd_tile_val)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	trtt_param.null_tile_val = null_tile_val;
+	trtt_param.invd_tile_val = invd_tile_val;
+	trtt_param.l3_table_address = l3_table_address;
+	trtt_param.segment_base_addr = segment_base_addr;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	return __gem_context_set_param(fd, &ctx_param);
+}
+
+/* setup_trtt
+ * Helper function to request KMD to enable TRTT
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static int
+setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	return __setup_trtt(fd, ctx_id, l3_table_address, segment_base_addr,
+			NULL_TILE_PATTERN, INVALID_TILE_PATTERN);
+}
+
+/* bo_alloc_setup
+ * allocate bo and populate exec object
+ * @exec_object2 - pointer to exec object
+ * @bo_sizee - buffer size
+ * @flags - exec flags
+ * @bo_offset - pointer to the current PPGTT offset
+ */
+static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
+			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
+{
+	memset(exec_object2, 0, sizeof(*exec_object2));
+	exec_object2->handle = gem_create(fd, bo_size);
+	exec_object2->flags = flags;
+
+	if (bo_offset)
+	{
+		exec_object2->offset = *bo_offset;
+		*bo_offset += bo_size;
+	}
+}
+
+/* busy_batch
+ * This helper function will prepare & submit a batch on the BCS ring,
+ * which will keep the ring busy for sometime, long enough to submit
+ * some other work which can trigger the eviction of that batch object
+ * while it is still getting executed on the ring.
+ */
+static uint64_t busy_batch(int fd, uint32_t ctx_id)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	const int has_64bit_reloc = gen >= 8;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 object[2];
+	uint32_t *map;
+	int factor = 10;
+	int i = 0;
+
+	/* Until the kernel ABI is fixed, only default contexts can be used
+	 * on !RCS rings */
+	igt_require(ctx_id == 0);
+
+	memset(object, 0, sizeof(object));
+	object[0].handle = gem_create(fd, 1024*1024);
+	object[1].handle = gem_create(fd, 4096);
+	map = gem_mmap__cpu(fd, object[1].handle, 0, 4096, PROT_WRITE);
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	setup_execbuffer(&execbuf, object, ctx_id, I915_EXEC_BLT, 2,
+			 emit_bb_end(fd, map, 0)*4);
+	gem_execbuf(fd, &execbuf);
+
+	igt_debug("Active offsets = [%08llx, %08llx]\n",
+		  object[0].offset, object[1].offset);
+
+#define COPY_BLT_CMD		(2<<29|0x53<<22|0x6)
+#define BLT_WRITE_ALPHA		(1<<21)
+#define BLT_WRITE_RGB		(1<<20)
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	while (factor--) {
+		/* XY_SRC_COPY */
+		map[i++] = COPY_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB;
+		if (has_64bit_reloc)
+			map[i-1] += 2;
+		map[i++] = 0xcc << 16 | 1 << 25 | 1 << 24 | (4*1024);
+		map[i++] = 0;
+		map[i++] = 256 << 16 | 1024;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+		map[i++] = 0;
+		map[i++] = 4096;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+	}
+	i = emit_bb_end(fd, map, i);
+	munmap(map, 4096);
+
+	object[0].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_WRITE;
+	object[1].flags = EXEC_OBJECT_PINNED;
+	execbuf.batch_len = i*4;
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, object[0].handle);
+	gem_close(fd, object[1].handle);
+
+	return object[1].offset;
+}
+
+/* active object eviction test
+ * This test will force the eviction of an active object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_active(void)
+{
+	int fd;
+	uint64_t expected;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+
+	expected = busy_batch(fd, 0);
+
+	/* Determine the segment_base_addr according to the offset of active
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, 0, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, 0, l3_offset, segment_base_addr);
+	close(fd);
+}
+
+/* hanging object eviction test
+ * This test will force the eviction of a hanging object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_hang(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+	uint64_t expected;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	ctx_id = gem_context_create(fd);
+
+	igt_hang_ctx(fd, ctx_id, I915_EXEC_RENDER, 0, (uint64_t *)&expected);
+
+	/* Determine the segment_base_addr according to the offset of hanging
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, ctx_id, l3_offset, segment_base_addr);
+	gem_context_destroy(fd, ctx_id);
+	close (fd);
+}
+
+/* submit_trtt_context
+ * This helper function will create a new context if the TR-TT segment
+ * base address is not zero, allocate a L3 table page, 2 pages apiece
+ * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
+ * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
+ * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
+ * buffer to first update the TR-TT table entries and then to update the data
+ * buffers using their TR-TT VA, exercising the table programming done
+ * previously.
+ * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
+ * Invoke execbuffer to submit the batch buffer.
+ * Verify value of first DWORD in the 2 data buffer matches the data asked
+ * to be written by the GPU.
+ */
+static void submit_trtt_context(int fd, uint64_t segment_base_addr, uint32_t ctx_id)
+{
+	enum {
+		L3_TBL,
+		L2_TBL1,
+		L2_TBL2,
+		L1_TBL1,
+		L1_TBL2,
+		DATA1,
+		DATA2,
+		BATCH,
+		NUM_BUFFERS,
+	};
+
+	int ring, len = 0;
+	uint32_t *ptr;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
+	uint32_t batch_buffer[BO_SIZE];
+	uint32_t data, last_entry_offset;
+	uint64_t cur_ppgtt_off, exec_flags;
+	uint64_t first_tile_addr, last_tile_addr;
+
+	first_tile_addr = segment_base_addr;
+	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
+
+	/* To avoid conflict with the TR-TT segment */
+	cur_ppgtt_off = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+	/* first allocate Batch buffer BO */
+	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
+
+	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
+	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
+
+	/* Allocate a L3 table BO */
+	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L2 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L1 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
+	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
+
+	/* Allocate two Data buffer BOs */
+	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
+	last_entry_offset = 511*sizeof(uint64_t);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset,
+			       exec_object2[L2_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset + last_entry_offset,
+			       exec_object2[L2_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL1].offset,
+			       exec_object2[L1_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL2].offset + last_entry_offset,
+			       exec_object2[L1_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
+	last_entry_offset = 1023*sizeof(uint32_t);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL1].offset,
+			       exec_object2[DATA1].offset >> 16);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL2].offset + last_entry_offset,
+			       exec_object2[DATA2].offset >> 16);
+
+	/* Add commands to update the 2 data buffers, using their TRTT VA */
+	data = 0x12345678;
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(first_tile_addr),
+			       data);
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(last_tile_addr),
+			       data);
+
+	len = emit_bb_end(fd, batch_buffer, len);
+	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
+
+	/* Request KMD to setup the TR-TT */
+	igt_assert(setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr) == 0);
+
+	ring = I915_EXEC_RENDER;
+	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
+
+	/* submit command buffer */
+	gem_execbuf(fd, &execbuf);
+
+	/* read the 2 data buffers to check for the value written by the GPU */
+	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	gem_close(fd, exec_object2[L3_TBL].handle);
+	gem_close(fd, exec_object2[L2_TBL1].handle);
+	gem_close(fd, exec_object2[L2_TBL2].handle);
+	gem_close(fd, exec_object2[L1_TBL1].handle);
+	gem_close(fd, exec_object2[L1_TBL2].handle);
+	gem_close(fd, exec_object2[DATA1].handle);
+	gem_close(fd, exec_object2[DATA2].handle);
+	gem_close(fd, exec_object2[BATCH].handle);
+
+	/* Check if the TRTT params stored with the Driver are intact or not */
+	query_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
+}
+
+/* basic trtt test
+ * This will test the basic TR-TT functionality by doing couple of store
+ * operations through it. Also it will exercise all possible TR-TT segment
+ * start locations (i.e. 16 of them) for both default & User created contexts.
+ */
+static void test_basic_trtt_use(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+
+	for (segment_base_addr = 0;
+	     segment_base_addr < PPGTT_SIZE;
+	     segment_base_addr += TRTT_SEGMENT_SIZE)
+	{
+		/* In order to test the default context for all segment start
+		 * locations, need to open a new file instance on every iteration
+		 * as TRTT settings are immutable once set for a context.
+		 */
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		submit_trtt_context(fd, segment_base_addr, 0);
+
+		ctx_id = gem_context_create(fd);
+		submit_trtt_context(fd, segment_base_addr, ctx_id);
+		gem_context_destroy(fd, ctx_id);
+
+		close(fd);
+	}
+}
+
+static void test_invalid(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	ctx_id = gem_context_create(fd);
+
+	/* Check for an incorrectly aligned base location for TR-TT segment */
+	segment_base_addr = TRTT_SEGMENT_SIZE + 0x1000;
+	l3_offset = TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Correct the segment_base_addr value */
+	segment_base_addr = TRTT_SEGMENT_SIZE;
+
+	/* Check for the same/conflicting value for L3 table and TR-TT segment location */
+	l3_offset = segment_base_addr;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Check for an incorrectly aligned location for L3 table */
+	l3_offset = TILE_SIZE + 0x1000;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Correct the l3_offset value */
+	l3_offset = TILE_SIZE;
+
+	/* Check for the same value for Null & Invalid tile patterns */
+	igt_assert_eq(__setup_trtt(fd, ctx_id, l3_offset, segment_base_addr,
+				   NULL_TILE_PATTERN, NULL_TILE_PATTERN), -EINVAL);
+
+	/* Use the correct settings now */
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+	/* Check the overriding of TR-TT settings for the same context */
+	segment_base_addr += TRTT_SEGMENT_SIZE;
+	l3_offset += TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EEXIST);
+
+	gem_context_destroy(fd, ctx_id);
+	close(fd);
+}
+
+igt_main
+{
+	int fd = -1;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		igt_require(has_trtt_support(fd));
+		/* test also needs 48 PPGTT & Soft Pin support */
+		igt_require(gem_has_softpin(fd));
+		igt_require(gem_uses_64b_ppgtt(fd));
+	}
+
+	/* Each subtest will open its own private file instance to avoid
+	 * any interference. Otherwise once TRTT is enabled for the default
+	 * context with segment_base_addr value of 0, all the other tests which
+	 * are implicitly done, such as quiescent_gpu, will break as they only
+	 * use the default context and do not use the 48B_ADDRESS flag for it.
+	 */
+
+	igt_subtest("invalid")
+		test_invalid();
+
+	igt_subtest("basic")
+		test_basic_trtt_use();
+
+	igt_subtest("evict_active")
+		test_evict_active();
+
+	igt_subtest("evict_hang")
+		test_evict_hang();
+
+	igt_subtest("evict_active-interruptible")
+		igt_interruptible(true) test_evict_active();
+
+	igt_fixture
+		close(fd);
+}
+
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

* [PATCH v9] igt/gem_trtt: Exercise the TRTT hardware
  2016-03-22  8:37                                                 ` [PATCH v8] " akash.goel
@ 2016-10-27 15:35                                                   ` akash.goel
  0 siblings, 0 replies; 30+ messages in thread
From: akash.goel @ 2016-10-27 15:35 UTC (permalink / raw)
  To: intel-gfx; +Cc: Akash Goel

From: Akash Goel <akash.goel@intel.com>

This patch provides the testcase to exercise the TRTT hardware.

Some platforms have an additional address translation hardware support in
form of Tiled Resource Translation Table (TR-TT) which provides an extra level
of abstraction over PPGTT.
This is useful for mapping Sparse/Tiled texture resources.

TR-TT is tightly coupled with PPGTT, a new instance of TR-TT will be required
for a new PPGTT instance, but TR-TT may not enabled for every context.
1/16th of the 48bit PPGTT space is earmarked for the translation by TR-TT,
which such chunk to use is conveyed to HW through a register.
Any GFX address, which lies in that reserved 44 bit range will be translated
through TR-TT first and then through PPGTT to get the actual physical address.

TRTT is constructed as a 3 level tile Table. Each tile is 64KB is size which
leaves behind 44-16=28 address bits. 28bits are partitioned as 9+9+10, and
each level is contained within a 4KB page hence L3 and L2 is composed of
512 64b entries and L1 is composed of 1024 32b entries.

There is a provision to keep TR-TT Tables in virtual space, where the pages of
TRTT tables will be mapped to PPGTT. This is the adopted mode, as in this mode
UMD will have a full control on TR-TT management, with bare minimum support
from KMD.
So the entries of L3 table will contain the PPGTT offset of L2 Table pages,
similarly entries of L2 table will contain the PPGTT offset of L1 Table pages.
The entries of L1 table will contain the PPGTT offset of BOs actually backing
the Sparse resources.

I915_GEM_CONTEXT_SETPARAM ioctl is used to request KMD to enable TRTT for a
certain context, a new I915_CONTEXT_PARAM_ENABLE_TRTT param has been
added to the CONTEXT_SETPARAM ioctl for that purpose.

v2:
 - Add new wrapper function __gem_context_require_param and used that
   to detect the TR-TT support
 - Use igt_main macro, rename certain function, remove extra white space,
   cleanup the code (Chris)
 - Enhance the basic subtest to exercise all possible TR-TT segment start
   locations (i.e. 16 of them) & for every iteration create a new context.

v3:
 - Get rid of some superfluous local variables (Chris)
 - Add asserts to validate whether the GFX address used in MI_STORE_DATA_IMM
   command is in canonical form & is correctly aligned or not (Chris)
 - Remove clearing of errno in has_trtt_support function (Chris)
 - Use the 48B_ADDRESS flag for batch buffer BO also (Chris)
 - Rebased.

v4:
 - Add new subtest for invalid settings.
 - Add new local function query_trtt to check the Driver state (Chris)
 - Add new helper function gem_uses_64b_ppgtt to detect 64bit PPGTT support
 - Remove local functions uses_full_ppgtt & has_softpin_support, instead use
   existing wrappers gem_has_softpin & gem_uses_64b_ppgtt (Chris).
 - Remove redundant bit masking in emit_store_xxx functions (Chris).

v5:
 - Add 2 new subtests checking the forceful eviction of active/hanging
   objects overlapping with the TR-TT segment (Chris).
 - Move gen8_canonical_addr to igt_aux as its needed by other tests also,
   which does soft pinning, and not just gem_softpin (Michel)

v6:
 - Allow each subtest to have their own private drm file instance.
 - Update the basic subtest to check each segment location for both default
   and User created contexts (Chris).
 - Reorder igt_require(softpin/64b_ppgtt/trtt) to have trtt first (Chris).
 - Update the invalid subtest to check for the improper value of Null &
   Invalid tiles.

v7:
 - Add interruptible version of the 2 eviction subtests (Chris)
 - Correct the alignment check for the MI Store qword command (Chris).

v8:
 - Use new igt_interruptible macro for the interruptible subtests, as its
   more effective & precise.
 - Remove interruptible version of evict_hang subtest, as it doesn't provide
   any additional coverage (Chris)
 - Rebased.

v9:
- Rebased.

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Signed-off-by: Akash Goel <akash.goel@intel.com>
Reviewed-by: Michel Thierry <michel.thierry@intel.com> (v5)
---
 lib/igt_aux.c          |  11 +
 lib/igt_aux.h          |   1 +
 lib/igt_gt.h           |   1 +
 lib/ioctl_wrappers.c   |  39 ++-
 lib/ioctl_wrappers.h   |   3 +
 tests/Makefile.sources |   1 +
 tests/gem_softpin.c    |  18 +-
 tests/gem_trtt.c       | 630 +++++++++++++++++++++++++++++++++++++++++++++++++
 8 files changed, 681 insertions(+), 23 deletions(-)
 create mode 100644 tests/gem_trtt.c

diff --git a/lib/igt_aux.c b/lib/igt_aux.c
index 421f6d4..9cb4666 100644
--- a/lib/igt_aux.c
+++ b/lib/igt_aux.c
@@ -487,6 +487,17 @@ void igt_exchange_int(void *array, unsigned i, unsigned j)
 	int_arr[j] = tmp;
 }
 
+/* igt_canonical_addr
+ * Used to convert any address into canonical form, i.e. [63:48] == [47].
+ * Based on kernel's sign_extend64 implementation.
+ * @address - a virtual address
+*/
+uint64_t igt_canonical_addr(uint64_t address)
+{
+	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
+	return (__s64)(address << shift) >> shift;
+}
+
 /**
  * igt_permute_array:
  * @array: pointer to array
diff --git a/lib/igt_aux.h b/lib/igt_aux.h
index d30196b..3c55152 100644
--- a/lib/igt_aux.h
+++ b/lib/igt_aux.h
@@ -106,6 +106,7 @@ void igt_permute_array(void *array, unsigned size,
 void igt_progress(const char *header, uint64_t i, uint64_t total);
 void igt_print_activity(void);
 bool igt_check_boolean_env_var(const char *env_var, bool default_value);
+uint64_t igt_canonical_addr(uint64_t address);
 
 bool igt_aub_dump_enabled(void);
 
diff --git a/lib/igt_gt.h b/lib/igt_gt.h
index 8d6c573..45f0f18 100644
--- a/lib/igt_gt.h
+++ b/lib/igt_gt.h
@@ -78,5 +78,6 @@ extern const struct intel_execution_engine {
 	     e__++) \
 		for_if (gem_has_ring(fd, flags__ = e__->exec_id | e__->flags))
 
+#define GEN8_HIGH_ADDRESS_BIT 47
 
 #endif /* IGT_GT_H */
diff --git a/lib/ioctl_wrappers.c b/lib/ioctl_wrappers.c
index 95bc5e2..763eb4d 100644
--- a/lib/ioctl_wrappers.c
+++ b/lib/ioctl_wrappers.c
@@ -938,6 +938,22 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
 	igt_assert(__gem_context_set_param(fd, p) == 0);
 }
 
+int __gem_context_require_param(int fd, uint64_t param)
+{
+	struct local_i915_gem_context_param p;
+	int ret;
+
+	p.context = 0;
+	p.param = param;
+	p.value = 0;
+	p.size = 0;
+
+	ret = igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p);
+	if (ret)
+		return -errno;
+	return 0;
+}
+
 /**
  * gem_context_require_param:
  * @fd: open i915 drm file descriptor
@@ -948,14 +964,7 @@ void gem_context_set_param(int fd, struct local_i915_gem_context_param *p)
  */
 void gem_context_require_param(int fd, uint64_t param)
 {
-	struct local_i915_gem_context_param p;
-
-	p.context = 0;
-	p.param = param;
-	p.value = 0;
-	p.size = 0;
-
-	igt_require(igt_ioctl(fd, LOCAL_IOCTL_I915_GEM_CONTEXT_GETPARAM, &p) == 0);
+	igt_require(__gem_context_require_param(fd, param) == 0);
 }
 
 void gem_context_require_ban_period(int fd)
@@ -1112,6 +1121,20 @@ bool gem_uses_full_ppgtt(int fd)
 }
 
 /**
+ * gem_uses_64b_ppgtt:
+ * @fd: open i915 drm file descriptor
+ *
+ * Feature test macro to check whether the kernel internally uses full
+ * 64b per-process gtt to execute batches.
+ *
+ * Returns: Whether batches are run through full 64b ppgtt.
+ */
+bool gem_uses_64b_ppgtt(int fd)
+{
+	return gem_gtt_type(fd) > 2;
+}
+
+/**
  * gem_available_fences:
  * @fd: open i915 drm file descriptor
  *
diff --git a/lib/ioctl_wrappers.h b/lib/ioctl_wrappers.h
index 465f760..775c098 100644
--- a/lib/ioctl_wrappers.h
+++ b/lib/ioctl_wrappers.h
@@ -120,9 +120,11 @@ struct local_i915_gem_context_param {
 #define LOCAL_CONTEXT_PARAM_NO_ZEROMAP	0x2
 #define LOCAL_CONTEXT_PARAM_GTT_SIZE	0x3
 #define LOCAL_CONTEXT_PARAM_NO_ERROR_CAPTURE	0x4
+#define LOCAL_CONTEXT_PARAM_TRTT	0x5
 	uint64_t value;
 };
 void gem_context_require_ban_period(int fd);
+int __gem_context_require_param(int fd, uint64_t param);
 void gem_context_require_param(int fd, uint64_t param);
 void gem_context_get_param(int fd, struct local_i915_gem_context_param *p);
 void gem_context_set_param(int fd, struct local_i915_gem_context_param *p);
@@ -155,6 +157,7 @@ bool gem_has_bsd2(int fd);
 int gem_gtt_type(int fd);
 bool gem_uses_ppgtt(int fd);
 bool gem_uses_full_ppgtt(int fd);
+bool gem_uses_64b_ppgtt(int fd);
 int gem_available_fences(int fd);
 uint64_t gem_total_mappable_size(int fd);
 uint64_t gem_total_stolen_size(int fd);
diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index 6d081c3..211fb2e 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -81,6 +81,7 @@ TESTS_progs_M = \
 	gem_streaming_writes \
 	gem_tiled_blits \
 	gem_tiled_partial_pwrite_pread \
+	gem_trtt \
 	gem_userptr_blits \
 	gem_write_read_ring_switch \
 	gvt_basic \
diff --git a/tests/gem_softpin.c b/tests/gem_softpin.c
index ea162c8..bd3631f 100644
--- a/tests/gem_softpin.c
+++ b/tests/gem_softpin.c
@@ -31,18 +31,6 @@
 #define EXEC_OBJECT_PINNED	(1<<4)
 #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
 
-/* gen8_canonical_addr
- * Used to convert any address into canonical form, i.e. [63:48] == [47].
- * Based on kernel's sign_extend64 implementation.
- * @address - a virtual address
-*/
-#define GEN8_HIGH_ADDRESS_BIT 47
-static uint64_t gen8_canonical_addr(uint64_t address)
-{
-	__u8 shift = 63 - GEN8_HIGH_ADDRESS_BIT;
-	return (__s64)(address << shift) >> shift;
-}
-
 static void test_invalid(int fd)
 {
 	const uint32_t bbe = MI_BATCH_BUFFER_END;
@@ -70,7 +58,7 @@ static void test_invalid(int fd)
 
 	/* Check beyond bounds of aperture */
 	object.offset = gem_aperture_size(fd) - 4096;
-	object.offset = gen8_canonical_addr(object.offset);
+	object.offset = igt_canonical_addr(object.offset);
 	igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
 	/* Check gen8 canonical addressing */
@@ -78,7 +66,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull << GEN8_HIGH_ADDRESS_BIT;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
 
@@ -88,7 +76,7 @@ static void test_invalid(int fd)
 		object.offset = 1ull<<32;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), -EINVAL);
 
-		object.offset = gen8_canonical_addr(object.offset);
+		object.offset = igt_canonical_addr(object.offset);
 		object.flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
 		igt_assert_eq(__gem_execbuf(fd, &execbuf), 0);
 	}
diff --git a/tests/gem_trtt.c b/tests/gem_trtt.c
new file mode 100644
index 0000000..81816a1
--- /dev/null
+++ b/tests/gem_trtt.c
@@ -0,0 +1,630 @@
+/*
+ * Copyright © 2016 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Akash Goel <akash.goel@intel.com>
+ *
+ */
+
+#include "igt.h"
+
+#define BO_SIZE 4096
+#define EXEC_OBJECT_PINNED	(1<<4)
+#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
+
+/* has_trtt_support
+ * Finds if trtt hw is present
+ * @fd DRM fd
+ */
+static bool has_trtt_support(int fd)
+{
+	int ret = __gem_context_require_param(fd, LOCAL_CONTEXT_PARAM_TRTT);
+
+	return (ret == 0);
+}
+
+/* mmap_bo
+ * helper for creating a CPU mmapping of the buffer
+ * @fd - drm fd
+ * @handle - handle of the buffer to mmap
+ * @size: size of the buffer
+ */
+static void* mmap_bo(int fd, uint32_t handle, uint64_t size)
+{
+	uint32_t *ptr = gem_mmap__cpu(fd, handle, 0, size, PROT_READ);
+	gem_set_domain(fd, handle, I915_GEM_DOMAIN_CPU, 0);
+	return ptr;
+}
+
+/* emit_store_dword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u32 data to be stored at destination
+ */
+static int emit_store_dword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint32_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 3) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+
+	return dw_offset;
+}
+
+/* emit_store_qword
+ * populate batch buffer with MI_STORE_DWORD_IMM command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ * @vaddr: destination Virtual address
+ * @data: u64 data to be stored at destination
+ */
+static int emit_store_qword(int fd, uint32_t *cmd_buf, uint32_t dw_offset,
+			    uint64_t vaddr, uint64_t data)
+{
+	/* Check that softpin addresses are in the correct form */
+	igt_assert_eq_u64(vaddr, igt_canonical_addr(vaddr));
+
+	/* SDI cannot write to unaligned addresses */
+	igt_assert((vaddr & 7) == 0);
+
+	cmd_buf[dw_offset++] = MI_STORE_DWORD_IMM | 0x3;
+	cmd_buf[dw_offset++] = (uint32_t)vaddr;
+	cmd_buf[dw_offset++] = (uint32_t)(vaddr >> 32);
+	cmd_buf[dw_offset++] = data;
+	cmd_buf[dw_offset++] = data >> 32;
+
+	return dw_offset;
+}
+
+/* emit_bb_end
+ * populate batch buffer with MI_BATCH_BUFFER_END command
+ * @fd: drm file descriptor
+ * @cmd_buf: batch buffer
+ * @dw_offset: write offset in batch buffer
+ */
+static int emit_bb_end(int fd, uint32_t *cmd_buf, uint32_t dw_offset)
+{
+	dw_offset = ALIGN(dw_offset, 2);
+	cmd_buf[dw_offset++] = MI_BATCH_BUFFER_END;
+	dw_offset++;
+
+	return dw_offset;
+}
+
+/* setup_execbuffer
+ * helper for buffer execution
+ * @execbuf - pointer to execbuffer
+ * @exec_object - pointer to exec object2 struct
+ * @ring - ring to be used
+ * @buffer_count - how manu buffers to submit
+ * @batch_length - length of batch buffer
+ */
+static void setup_execbuffer(struct drm_i915_gem_execbuffer2 *execbuf,
+			     struct drm_i915_gem_exec_object2 *exec_object,
+			     uint32_t ctx_id, int ring, int buffer_count, int batch_length)
+{
+	memset(execbuf, 0, sizeof(*execbuf));
+
+	execbuf->buffers_ptr = (unsigned long)exec_object;
+	execbuf->buffer_count = buffer_count;
+	execbuf->batch_len = batch_length;
+	execbuf->flags = ring;
+	i915_execbuffer2_set_context_id(*execbuf, ctx_id);
+}
+
+#define TABLE_SIZE 0x1000
+#define TILE_SIZE 0x10000
+
+#define TRTT_SEGMENT_SIZE (1ULL << 44)
+#define PPGTT_SIZE (1ULL << 48)
+
+#define NULL_TILE_PATTERN    0xFFFFFFFF
+#define INVALID_TILE_PATTERN 0xFFFFFFFE
+
+struct local_i915_gem_context_trtt_param {
+	uint64_t segment_base_addr;
+	uint64_t l3_table_address;
+	uint32_t invd_tile_val;
+	uint32_t null_tile_val;
+};
+
+/* query_trtt
+ * Helper function to check if the TR-TT settings stored with the KMD,
+ * for a context, have the expected values (set previously).
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static void
+query_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	gem_context_get_param(fd, &ctx_param);
+
+	igt_assert_eq_u64(trtt_param.l3_table_address, l3_table_address);
+	igt_assert_eq_u64(trtt_param.segment_base_addr, segment_base_addr);
+	igt_assert_eq_u32(trtt_param.invd_tile_val, INVALID_TILE_PATTERN);
+	igt_assert_eq_u32(trtt_param.null_tile_val, NULL_TILE_PATTERN);
+}
+
+static int
+__setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	     uint64_t segment_base_addr, uint32_t null_tile_val,
+	     uint32_t invd_tile_val)
+{
+	struct local_i915_gem_context_param ctx_param;
+	struct local_i915_gem_context_trtt_param trtt_param;
+
+	trtt_param.null_tile_val = null_tile_val;
+	trtt_param.invd_tile_val = invd_tile_val;
+	trtt_param.l3_table_address = l3_table_address;
+	trtt_param.segment_base_addr = segment_base_addr;
+
+	ctx_param.context = ctx_id;
+	ctx_param.size = sizeof(trtt_param);
+	ctx_param.param = LOCAL_CONTEXT_PARAM_TRTT;
+	ctx_param.value = (uint64_t)&trtt_param;
+
+	return __gem_context_set_param(fd, &ctx_param);
+}
+
+/* setup_trtt
+ * Helper function to request KMD to enable TRTT
+ * @fd - drm fd
+ * @ctx_id - id of the context for which TRTT is to be enabled
+ * @l3_table_address - GFX address of the L3 table
+ * @segment_base_addr - offset of the TRTT segment in PPGTT space
+ */
+static int
+setup_trtt(int fd, uint32_t ctx_id, uint64_t l3_table_address,
+	   uint64_t segment_base_addr)
+{
+	return __setup_trtt(fd, ctx_id, l3_table_address, segment_base_addr,
+			NULL_TILE_PATTERN, INVALID_TILE_PATTERN);
+}
+
+/* bo_alloc_setup
+ * allocate bo and populate exec object
+ * @exec_object2 - pointer to exec object
+ * @bo_sizee - buffer size
+ * @flags - exec flags
+ * @bo_offset - pointer to the current PPGTT offset
+ */
+static void bo_alloc_setup(int fd, struct drm_i915_gem_exec_object2 *exec_object2,
+			   uint64_t bo_size, uint64_t flags, uint64_t *bo_offset)
+{
+	memset(exec_object2, 0, sizeof(*exec_object2));
+	exec_object2->handle = gem_create(fd, bo_size);
+	exec_object2->flags = flags;
+
+	if (bo_offset)
+	{
+		exec_object2->offset = *bo_offset;
+		*bo_offset += bo_size;
+	}
+}
+
+/* busy_batch
+ * This helper function will prepare & submit a batch on the BCS ring,
+ * which will keep the ring busy for sometime, long enough to submit
+ * some other work which can trigger the eviction of that batch object
+ * while it is still getting executed on the ring.
+ */
+static uint64_t busy_batch(int fd, uint32_t ctx_id)
+{
+	const int gen = intel_gen(intel_get_drm_devid(fd));
+	const int has_64bit_reloc = gen >= 8;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 object[2];
+	uint32_t *map;
+	int factor = 10;
+	int i = 0;
+
+	/* Until the kernel ABI is fixed, only default contexts can be used
+	 * on !RCS rings */
+	igt_require(ctx_id == 0);
+
+	memset(object, 0, sizeof(object));
+	object[0].handle = gem_create(fd, 1024*1024);
+	object[1].handle = gem_create(fd, 4096);
+	map = gem_mmap__cpu(fd, object[1].handle, 0, 4096, PROT_WRITE);
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+
+	setup_execbuffer(&execbuf, object, ctx_id, I915_EXEC_BLT, 2,
+			 emit_bb_end(fd, map, 0)*4);
+	gem_execbuf(fd, &execbuf);
+
+	igt_debug("Active offsets = [%08llx, %08llx]\n",
+		  object[0].offset, object[1].offset);
+
+#define COPY_BLT_CMD		(2<<29|0x53<<22|0x6)
+#define BLT_WRITE_ALPHA		(1<<21)
+#define BLT_WRITE_RGB		(1<<20)
+	gem_set_domain(fd, object[1].handle,
+		       I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	while (factor--) {
+		/* XY_SRC_COPY */
+		map[i++] = COPY_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB;
+		if (has_64bit_reloc)
+			map[i-1] += 2;
+		map[i++] = 0xcc << 16 | 1 << 25 | 1 << 24 | (4*1024);
+		map[i++] = 0;
+		map[i++] = 256 << 16 | 1024;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+		map[i++] = 0;
+		map[i++] = 4096;
+		map[i++] = object[0].offset;
+		if (has_64bit_reloc)
+			map[i++] = object[0].offset >> 32;
+	}
+	i = emit_bb_end(fd, map, i);
+	munmap(map, 4096);
+
+	object[0].flags = EXEC_OBJECT_PINNED | EXEC_OBJECT_WRITE;
+	object[1].flags = EXEC_OBJECT_PINNED;
+	execbuf.batch_len = i*4;
+	gem_execbuf(fd, &execbuf);
+	gem_close(fd, object[0].handle);
+	gem_close(fd, object[1].handle);
+
+	return object[1].offset;
+}
+
+/* active object eviction test
+ * This test will force the eviction of an active object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_active(void)
+{
+	int fd;
+	uint64_t expected;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+
+	expected = busy_batch(fd, 0);
+
+	/* Determine the segment_base_addr according to the offset of active
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, 0, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, 0, l3_offset, segment_base_addr);
+	close(fd);
+}
+
+/* hanging object eviction test
+ * This test will force the eviction of a hanging object, by choosing the
+ * TR-TT segment location which will overlap with the object's location.
+ */
+static void test_evict_hang(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+	uint64_t expected;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	ctx_id = gem_context_create(fd);
+
+	igt_hang_ctx(fd, ctx_id, I915_EXEC_RENDER, 0, (uint64_t *)&expected);
+
+	/* Determine the segment_base_addr according to the offset of hanging
+	 * buffer, forcing its eviction
+	 */
+	segment_base_addr = expected & (~(TRTT_SEGMENT_SIZE - 1));
+
+	/* Keep the l3 table outside the segment to avoid the conflict */
+	l3_offset = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+
+	query_trtt(fd, ctx_id, l3_offset, segment_base_addr);
+	gem_context_destroy(fd, ctx_id);
+	close (fd);
+}
+
+/* submit_trtt_context
+ * This helper function will create a new context if the TR-TT segment
+ * base address is not zero, allocate a L3 table page, 2 pages apiece
+ * for L2/L1 tables and couple of data buffers of 64KB in size, matching the
+ * Tile size. The 2 data buffers will be mapped to the 2 ends of TRTT virtual
+ * space. Series of MI_STORE_DWORD_IMM commands will be added in the batch
+ * buffer to first update the TR-TT table entries and then to update the data
+ * buffers using their TR-TT VA, exercising the table programming done
+ * previously.
+ * Invoke CONTEXT_SETPARAM ioctl to request KMD to enable TRTT.
+ * Invoke execbuffer to submit the batch buffer.
+ * Verify value of first DWORD in the 2 data buffer matches the data asked
+ * to be written by the GPU.
+ */
+static void submit_trtt_context(int fd, uint64_t segment_base_addr, uint32_t ctx_id)
+{
+	enum {
+		L3_TBL,
+		L2_TBL1,
+		L2_TBL2,
+		L1_TBL1,
+		L1_TBL2,
+		DATA1,
+		DATA2,
+		BATCH,
+		NUM_BUFFERS,
+	};
+
+	int ring, len = 0;
+	uint32_t *ptr;
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 exec_object2[NUM_BUFFERS];
+	uint32_t batch_buffer[BO_SIZE];
+	uint32_t data, last_entry_offset;
+	uint64_t cur_ppgtt_off, exec_flags;
+	uint64_t first_tile_addr, last_tile_addr;
+
+	first_tile_addr = segment_base_addr;
+	last_tile_addr  = first_tile_addr + TRTT_SEGMENT_SIZE - TILE_SIZE;
+
+	/* To avoid conflict with the TR-TT segment */
+	cur_ppgtt_off = segment_base_addr ? 0 : TRTT_SEGMENT_SIZE;
+
+	exec_flags = EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
+
+	/* first allocate Batch buffer BO */
+	bo_alloc_setup(fd, &exec_object2[BATCH], BO_SIZE, exec_flags, NULL);
+
+	/* table BOs and data buffer BOs are written by GPU and are soft pinned */
+	exec_flags |= (EXEC_OBJECT_WRITE | EXEC_OBJECT_PINNED);
+
+	/* Allocate a L3 table BO */
+	bo_alloc_setup(fd, &exec_object2[L3_TBL], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L2 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L2_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L2_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Allocate two L1 table BOs */
+	bo_alloc_setup(fd, &exec_object2[L1_TBL1], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[L1_TBL2], TABLE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Align the PPGTT offsets for the 2 data buffers to next 64 KB boundary */
+	cur_ppgtt_off = ALIGN(cur_ppgtt_off, TILE_SIZE);
+
+	/* Allocate two Data buffer BOs */
+	bo_alloc_setup(fd, &exec_object2[DATA1], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+	bo_alloc_setup(fd, &exec_object2[DATA2], TILE_SIZE, exec_flags, &cur_ppgtt_off);
+
+	/* Add commands to update the two L3 table entries to point them to the L2 tables*/
+	last_entry_offset = 511*sizeof(uint64_t);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset,
+			       exec_object2[L2_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L3_TBL].offset + last_entry_offset,
+			       exec_object2[L2_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L2 tables to point them to the L1 tables*/
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL1].offset,
+			       exec_object2[L1_TBL1].offset);
+
+	len = emit_store_qword(fd, batch_buffer, len,
+			       exec_object2[L2_TBL2].offset + last_entry_offset,
+			       exec_object2[L1_TBL2].offset);
+
+	/* Add commands to update an entry of 2 L1 tables to point them to the data buffers*/
+	last_entry_offset = 1023*sizeof(uint32_t);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL1].offset,
+			       exec_object2[DATA1].offset >> 16);
+
+	len = emit_store_dword(fd, batch_buffer, len,
+			       exec_object2[L1_TBL2].offset + last_entry_offset,
+			       exec_object2[DATA2].offset >> 16);
+
+	/* Add commands to update the 2 data buffers, using their TRTT VA */
+	data = 0x12345678;
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(first_tile_addr),
+			       data);
+	len = emit_store_dword(fd, batch_buffer, len,
+			       igt_canonical_addr(last_tile_addr),
+			       data);
+
+	len = emit_bb_end(fd, batch_buffer, len);
+	gem_write(fd, exec_object2[BATCH].handle, 0, batch_buffer, len*4);
+
+	/* Request KMD to setup the TR-TT */
+	igt_assert(setup_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr) == 0);
+
+	ring = I915_EXEC_RENDER;
+	setup_execbuffer(&execbuf, exec_object2, ctx_id, ring, NUM_BUFFERS, len*4);
+
+	/* submit command buffer */
+	gem_execbuf(fd, &execbuf);
+
+	/* read the 2 data buffers to check for the value written by the GPU */
+	ptr = mmap_bo(fd, exec_object2[DATA1].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	ptr = mmap_bo(fd, exec_object2[DATA2].handle, TILE_SIZE);
+	igt_assert_eq_u32(ptr[0], data);
+
+	gem_close(fd, exec_object2[L3_TBL].handle);
+	gem_close(fd, exec_object2[L2_TBL1].handle);
+	gem_close(fd, exec_object2[L2_TBL2].handle);
+	gem_close(fd, exec_object2[L1_TBL1].handle);
+	gem_close(fd, exec_object2[L1_TBL2].handle);
+	gem_close(fd, exec_object2[DATA1].handle);
+	gem_close(fd, exec_object2[DATA2].handle);
+	gem_close(fd, exec_object2[BATCH].handle);
+
+	/* Check if the TRTT params stored with the Driver are intact or not */
+	query_trtt(fd, ctx_id, exec_object2[L3_TBL].offset, first_tile_addr);
+}
+
+/* basic trtt test
+ * This will test the basic TR-TT functionality by doing couple of store
+ * operations through it. Also it will exercise all possible TR-TT segment
+ * start locations (i.e. 16 of them) for both default & User created contexts.
+ */
+static void test_basic_trtt_use(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+
+	for (segment_base_addr = 0;
+	     segment_base_addr < PPGTT_SIZE;
+	     segment_base_addr += TRTT_SEGMENT_SIZE)
+	{
+		/* In order to test the default context for all segment start
+		 * locations, need to open a new file instance on every iteration
+		 * as TRTT settings are immutable once set for a context.
+		 */
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		submit_trtt_context(fd, segment_base_addr, 0);
+
+		ctx_id = gem_context_create(fd);
+		submit_trtt_context(fd, segment_base_addr, ctx_id);
+		gem_context_destroy(fd, ctx_id);
+
+		close(fd);
+	}
+}
+
+static void test_invalid(void)
+{
+	int fd;
+	uint32_t ctx_id;
+	uint64_t segment_base_addr;
+	uint64_t l3_offset;
+
+	fd = drm_open_driver(DRIVER_INTEL);
+	ctx_id = gem_context_create(fd);
+
+	/* Check for an incorrectly aligned base location for TR-TT segment */
+	segment_base_addr = TRTT_SEGMENT_SIZE + 0x1000;
+	l3_offset = TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Correct the segment_base_addr value */
+	segment_base_addr = TRTT_SEGMENT_SIZE;
+
+	/* Check for the same/conflicting value for L3 table and TR-TT segment location */
+	l3_offset = segment_base_addr;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Check for an incorrectly aligned location for L3 table */
+	l3_offset = TILE_SIZE + 0x1000;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EINVAL);
+
+	/* Correct the l3_offset value */
+	l3_offset = TILE_SIZE;
+
+	/* Check for the same value for Null & Invalid tile patterns */
+	igt_assert_eq(__setup_trtt(fd, ctx_id, l3_offset, segment_base_addr,
+				   NULL_TILE_PATTERN, NULL_TILE_PATTERN), -EINVAL);
+
+	/* Use the correct settings now */
+	igt_assert(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr) == 0);
+	/* Check the overriding of TR-TT settings for the same context */
+	segment_base_addr += TRTT_SEGMENT_SIZE;
+	l3_offset += TILE_SIZE;
+	igt_assert_eq(setup_trtt(fd, ctx_id, l3_offset, segment_base_addr), -EEXIST);
+
+	gem_context_destroy(fd, ctx_id);
+	close(fd);
+}
+
+igt_main
+{
+	int fd = -1;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+
+		igt_require(has_trtt_support(fd));
+		/* test also needs 48 PPGTT & Soft Pin support */
+		igt_require(gem_has_softpin(fd));
+		igt_require(gem_uses_64b_ppgtt(fd));
+	}
+
+	/* Each subtest will open its own private file instance to avoid
+	 * any interference. Otherwise once TRTT is enabled for the default
+	 * context with segment_base_addr value of 0, all the other tests which
+	 * are implicitly done, such as quiescent_gpu, will break as they only
+	 * use the default context and do not use the 48B_ADDRESS flag for it.
+	 */
+
+	igt_subtest("invalid")
+		test_invalid();
+
+	igt_subtest("basic")
+		test_basic_trtt_use();
+
+	igt_subtest("evict_active")
+		test_evict_active();
+
+	igt_subtest("evict_hang")
+		test_evict_hang();
+
+	igt_subtest("evict_active-interruptible")
+		igt_while_interruptible(true) test_evict_active();
+
+	igt_fixture
+		close(fd);
+}
+
-- 
1.9.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

^ permalink raw reply related	[flat|nested] 30+ messages in thread

end of thread, other threads:[~2016-10-27 15:17 UTC | newest]

Thread overview: 30+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-09 11:31 [PATCH] igt/gem_trtt: Exercise the TRTT hardware akash.goel
2016-01-11 12:32 ` Chris Wilson
2016-01-11 12:37   ` Chris Wilson
2016-01-20 10:24   ` Goel, Akash
2016-01-22 15:37     ` [PATCH v2] " akash.goel
2016-01-22 20:41       ` Chris Wilson
2016-03-03  4:55         ` [PATCH v3] " akash.goel
2016-03-03 10:04           ` Chris Wilson
2016-03-03 15:38             ` Goel, Akash
2016-03-03 15:46               ` Chris Wilson
2016-03-03 16:47                 ` Goel, Akash
2016-03-09 11:31                   ` [PATCH v4] " akash.goel
2016-03-10 14:26                     ` Michel Thierry
2016-03-11  5:59                       ` Goel, Akash
2016-03-11 11:48                         ` [PATCH v5] " akash.goel
2016-03-17 10:14                           ` Michel Thierry
2016-03-18  8:37                             ` [PATCH v6] " akash.goel
2016-03-18  8:36                               ` Chris Wilson
2016-03-18  9:01                                 ` Goel, Akash
2016-03-18  9:22                                   ` Chris Wilson
2016-03-18  9:52                                     ` Goel, Akash
2016-03-18 10:25                                       ` [PATCH v7] " akash.goel
2016-03-18 10:32                                       ` [PATCH v6] " Chris Wilson
2016-03-18 15:49                                         ` Goel, Akash
2016-03-18 16:01                                           ` Chris Wilson
2016-03-21 10:00                                             ` Goel, Akash
2016-03-21 10:11                                               ` Chris Wilson
2016-03-22  8:37                                                 ` [PATCH v8] " akash.goel
2016-10-27 15:35                                                   ` [PATCH v9] " akash.goel
2016-01-12  6:00 ` [PATCH] " Tian, Kevin

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.