All of lore.kernel.org
 help / color / mirror / Atom feed
* [igt-dev] [PATCH i-g-t v6 0/2] intel: per context powergating tests & benchmark
@ 2018-05-25 15:26 Lionel Landwerlin
  2018-05-25 15:26 ` [igt-dev] [PATCH i-g-t v6 1/2] headers: bump Lionel Landwerlin
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Lionel Landwerlin @ 2018-05-25 15:26 UTC (permalink / raw)
  To: igt-dev

Hi,

Updating following Chris' request to drop the drm_intel APIs. Also
dropping a silly test that Kelvin pointed out.

Thanks all for your reviews,

Lionel Landwerlin (2):
  headers: bump
  tests: add slice power programming test

 include/drm-uapi/amdgpu_drm.h  |  23 +
 include/drm-uapi/drm.h         |   7 +
 include/drm-uapi/drm_mode.h    |  22 +-
 include/drm-uapi/etnaviv_drm.h |   6 +
 include/drm-uapi/exynos_drm.h  | 240 +++++++++
 include/drm-uapi/i915_drm.h    |  43 ++
 include/drm-uapi/msm_drm.h     |   2 +
 include/drm-uapi/tegra_drm.h   | 492 +++++++++++++++++-
 include/drm-uapi/vc4_drm.h     |  13 +-
 include/drm-uapi/virtgpu_drm.h |   1 +
 tests/Makefile.sources         |   1 +
 tests/gem_ctx_param.c          |   4 +-
 tests/gem_ctx_sseu.c           | 881 +++++++++++++++++++++++++++++++++
 tests/meson.build              |   1 +
 14 files changed, 1719 insertions(+), 17 deletions(-)
 create mode 100644 tests/gem_ctx_sseu.c

--
2.17.0
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [igt-dev] [PATCH i-g-t v6 1/2] headers: bump
  2018-05-25 15:26 [igt-dev] [PATCH i-g-t v6 0/2] intel: per context powergating tests & benchmark Lionel Landwerlin
@ 2018-05-25 15:26 ` Lionel Landwerlin
  2018-05-25 15:26 ` [igt-dev] [PATCH i-g-t v6 2/2] tests: add slice power programming test Lionel Landwerlin
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 7+ messages in thread
From: Lionel Landwerlin @ 2018-05-25 15:26 UTC (permalink / raw)
  To: igt-dev

---
 include/drm-uapi/amdgpu_drm.h  |  23 ++
 include/drm-uapi/drm.h         |   7 +
 include/drm-uapi/drm_mode.h    |  22 +-
 include/drm-uapi/etnaviv_drm.h |   6 +
 include/drm-uapi/exynos_drm.h  | 240 ++++++++++++++++
 include/drm-uapi/i915_drm.h    |  43 +++
 include/drm-uapi/msm_drm.h     |   2 +
 include/drm-uapi/tegra_drm.h   | 492 ++++++++++++++++++++++++++++++++-
 include/drm-uapi/vc4_drm.h     |  13 +-
 include/drm-uapi/virtgpu_drm.h |   1 +
 10 files changed, 833 insertions(+), 16 deletions(-)

diff --git a/include/drm-uapi/amdgpu_drm.h b/include/drm-uapi/amdgpu_drm.h
index 1816bd82..78b4dd89 100644
--- a/include/drm-uapi/amdgpu_drm.h
+++ b/include/drm-uapi/amdgpu_drm.h
@@ -78,6 +78,12 @@ extern "C" {
 #define AMDGPU_GEM_DOMAIN_GDS		0x8
 #define AMDGPU_GEM_DOMAIN_GWS		0x10
 #define AMDGPU_GEM_DOMAIN_OA		0x20
+#define AMDGPU_GEM_DOMAIN_MASK		(AMDGPU_GEM_DOMAIN_CPU | \
+					 AMDGPU_GEM_DOMAIN_GTT | \
+					 AMDGPU_GEM_DOMAIN_VRAM | \
+					 AMDGPU_GEM_DOMAIN_GDS | \
+					 AMDGPU_GEM_DOMAIN_GWS | \
+					 AMDGPU_GEM_DOMAIN_OA)
 
 /* Flag that CPU access will be required for the case of VRAM domain */
 #define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED	(1 << 0)
@@ -95,6 +101,10 @@ extern "C" {
 #define AMDGPU_GEM_CREATE_VM_ALWAYS_VALID	(1 << 6)
 /* Flag that BO sharing will be explicitly synchronized */
 #define AMDGPU_GEM_CREATE_EXPLICIT_SYNC		(1 << 7)
+/* Flag that indicates allocating MQD gart on GFX9, where the mtype
+ * for the second page onward should be set to NC.
+ */
+#define AMDGPU_GEM_CREATE_MQD_GFX9		(1 << 8)
 
 struct drm_amdgpu_gem_create_in  {
 	/** the requested memory size */
@@ -520,6 +530,10 @@ union drm_amdgpu_cs {
 /* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
 #define AMDGPU_IB_FLAG_PREEMPT (1<<2)
 
+/* The IB fence should do the L2 writeback but not invalidate any shader
+ * caches (L2/vL1/sL1/I$). */
+#define AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE (1 << 3)
+
 struct drm_amdgpu_cs_chunk_ib {
 	__u32 _pad;
 	/** AMDGPU_IB_FLAG_* */
@@ -618,6 +632,14 @@ struct drm_amdgpu_cs_chunk_data {
 	#define AMDGPU_INFO_FW_SOS		0x0c
 	/* Subquery id: Query PSP ASD firmware version */
 	#define AMDGPU_INFO_FW_ASD		0x0d
+	/* Subquery id: Query VCN firmware version */
+	#define AMDGPU_INFO_FW_VCN		0x0e
+	/* Subquery id: Query GFX RLC SRLC firmware version */
+	#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_CNTL 0x0f
+	/* Subquery id: Query GFX RLC SRLG firmware version */
+	#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_GPM_MEM 0x10
+	/* Subquery id: Query GFX RLC SRLS firmware version */
+	#define AMDGPU_INFO_FW_GFX_RLC_RESTORE_LIST_SRM_MEM 0x11
 /* number of bytes moved for TTM migration */
 #define AMDGPU_INFO_NUM_BYTES_MOVED		0x0f
 /* the used VRAM size */
@@ -806,6 +828,7 @@ struct drm_amdgpu_info_firmware {
 #define AMDGPU_VRAM_TYPE_GDDR5 5
 #define AMDGPU_VRAM_TYPE_HBM   6
 #define AMDGPU_VRAM_TYPE_DDR3  7
+#define AMDGPU_VRAM_TYPE_DDR4  8
 
 struct drm_amdgpu_info_device {
 	/** PCI Device ID */
diff --git a/include/drm-uapi/drm.h b/include/drm-uapi/drm.h
index f0bd91de..778a97fc 100644
--- a/include/drm-uapi/drm.h
+++ b/include/drm-uapi/drm.h
@@ -674,6 +674,13 @@ struct drm_get_cap {
  */
 #define DRM_CLIENT_CAP_ATOMIC	3
 
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO    4
+
 /** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
 struct drm_set_client_cap {
 	__u64 capability;
diff --git a/include/drm-uapi/drm_mode.h b/include/drm-uapi/drm_mode.h
index 2c575794..971c016b 100644
--- a/include/drm-uapi/drm_mode.h
+++ b/include/drm-uapi/drm_mode.h
@@ -93,6 +93,15 @@ extern "C" {
 #define DRM_MODE_PICTURE_ASPECT_NONE		0
 #define DRM_MODE_PICTURE_ASPECT_4_3		1
 #define DRM_MODE_PICTURE_ASPECT_16_9		2
+#define DRM_MODE_PICTURE_ASPECT_64_27		3
+#define DRM_MODE_PICTURE_ASPECT_256_135		4
+
+/* Content type options */
+#define DRM_MODE_CONTENT_TYPE_NO_DATA		0
+#define DRM_MODE_CONTENT_TYPE_GRAPHICS		1
+#define DRM_MODE_CONTENT_TYPE_PHOTO		2
+#define DRM_MODE_CONTENT_TYPE_CINEMA		3
+#define DRM_MODE_CONTENT_TYPE_GAME		4
 
 /* Aspect ratio flag bitmask (4 bits 22:19) */
 #define DRM_MODE_FLAG_PIC_AR_MASK		(0x0F<<19)
@@ -102,6 +111,10 @@ extern "C" {
 			(DRM_MODE_PICTURE_ASPECT_4_3<<19)
 #define  DRM_MODE_FLAG_PIC_AR_16_9 \
 			(DRM_MODE_PICTURE_ASPECT_16_9<<19)
+#define  DRM_MODE_FLAG_PIC_AR_64_27 \
+			(DRM_MODE_PICTURE_ASPECT_64_27<<19)
+#define  DRM_MODE_FLAG_PIC_AR_256_135 \
+			(DRM_MODE_PICTURE_ASPECT_256_135<<19)
 
 #define  DRM_MODE_FLAG_ALL	(DRM_MODE_FLAG_PHSYNC |		\
 				 DRM_MODE_FLAG_NHSYNC |		\
@@ -363,7 +376,7 @@ struct drm_mode_get_connector {
 	__u32 pad;
 };
 
-#define DRM_MODE_PROP_PENDING	(1<<0)
+#define DRM_MODE_PROP_PENDING	(1<<0) /* deprecated, do not use */
 #define DRM_MODE_PROP_RANGE	(1<<1)
 #define DRM_MODE_PROP_IMMUTABLE	(1<<2)
 #define DRM_MODE_PROP_ENUM	(1<<3) /* enumerated type with text strings */
@@ -598,8 +611,11 @@ struct drm_mode_crtc_lut {
 };
 
 struct drm_color_ctm {
-	/* Conversion matrix in S31.32 format. */
-	__s64 matrix[9];
+	/*
+	 * Conversion matrix in S31.32 sign-magnitude
+	 * (not two's complement!) format.
+	 */
+	__u64 matrix[9];
 };
 
 struct drm_color_lut {
diff --git a/include/drm-uapi/etnaviv_drm.h b/include/drm-uapi/etnaviv_drm.h
index e9b997a0..0d5c49dc 100644
--- a/include/drm-uapi/etnaviv_drm.h
+++ b/include/drm-uapi/etnaviv_drm.h
@@ -55,6 +55,12 @@ struct drm_etnaviv_timespec {
 #define ETNAVIV_PARAM_GPU_FEATURES_4                0x07
 #define ETNAVIV_PARAM_GPU_FEATURES_5                0x08
 #define ETNAVIV_PARAM_GPU_FEATURES_6                0x09
+#define ETNAVIV_PARAM_GPU_FEATURES_7                0x0a
+#define ETNAVIV_PARAM_GPU_FEATURES_8                0x0b
+#define ETNAVIV_PARAM_GPU_FEATURES_9                0x0c
+#define ETNAVIV_PARAM_GPU_FEATURES_10               0x0d
+#define ETNAVIV_PARAM_GPU_FEATURES_11               0x0e
+#define ETNAVIV_PARAM_GPU_FEATURES_12               0x0f
 
 #define ETNAVIV_PARAM_GPU_STREAM_COUNT              0x10
 #define ETNAVIV_PARAM_GPU_REGISTER_MAX              0x11
diff --git a/include/drm-uapi/exynos_drm.h b/include/drm-uapi/exynos_drm.h
index a00116b5..7414cfd7 100644
--- a/include/drm-uapi/exynos_drm.h
+++ b/include/drm-uapi/exynos_drm.h
@@ -135,6 +135,219 @@ struct drm_exynos_g2d_exec {
 	__u64					async;
 };
 
+/* Exynos DRM IPP v2 API */
+
+/**
+ * Enumerate available IPP hardware modules.
+ *
+ * @count_ipps: size of ipp_id array / number of ipp modules (set by driver)
+ * @reserved: padding
+ * @ipp_id_ptr: pointer to ipp_id array or NULL
+ */
+struct drm_exynos_ioctl_ipp_get_res {
+	__u32 count_ipps;
+	__u32 reserved;
+	__u64 ipp_id_ptr;
+};
+
+enum drm_exynos_ipp_format_type {
+	DRM_EXYNOS_IPP_FORMAT_SOURCE		= 0x01,
+	DRM_EXYNOS_IPP_FORMAT_DESTINATION	= 0x02,
+};
+
+struct drm_exynos_ipp_format {
+	__u32 fourcc;
+	__u32 type;
+	__u64 modifier;
+};
+
+enum drm_exynos_ipp_capability {
+	DRM_EXYNOS_IPP_CAP_CROP		= 0x01,
+	DRM_EXYNOS_IPP_CAP_ROTATE	= 0x02,
+	DRM_EXYNOS_IPP_CAP_SCALE	= 0x04,
+	DRM_EXYNOS_IPP_CAP_CONVERT	= 0x08,
+};
+
+/**
+ * Get IPP hardware capabilities and supported image formats.
+ *
+ * @ipp_id: id of IPP module to query
+ * @capabilities: bitmask of drm_exynos_ipp_capability (set by driver)
+ * @reserved: padding
+ * @formats_count: size of formats array (in entries) / number of filled
+ *		   formats (set by driver)
+ * @formats_ptr: pointer to formats array or NULL
+ */
+struct drm_exynos_ioctl_ipp_get_caps {
+	__u32 ipp_id;
+	__u32 capabilities;
+	__u32 reserved;
+	__u32 formats_count;
+	__u64 formats_ptr;
+};
+
+enum drm_exynos_ipp_limit_type {
+	/* size (horizontal/vertial) limits, in pixels (min, max, alignment) */
+	DRM_EXYNOS_IPP_LIMIT_TYPE_SIZE		= 0x0001,
+	/* scale ratio (horizonta/vertial), 16.16 fixed point (min, max) */
+	DRM_EXYNOS_IPP_LIMIT_TYPE_SCALE		= 0x0002,
+
+	/* image buffer area */
+	DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER	= 0x0001 << 16,
+	/* src/dst rectangle area */
+	DRM_EXYNOS_IPP_LIMIT_SIZE_AREA		= 0x0002 << 16,
+	/* src/dst rectangle area when rotation enabled */
+	DRM_EXYNOS_IPP_LIMIT_SIZE_ROTATED	= 0x0003 << 16,
+
+	DRM_EXYNOS_IPP_LIMIT_TYPE_MASK		= 0x000f,
+	DRM_EXYNOS_IPP_LIMIT_SIZE_MASK		= 0x000f << 16,
+};
+
+struct drm_exynos_ipp_limit_val {
+	__u32 min;
+	__u32 max;
+	__u32 align;
+	__u32 reserved;
+};
+
+/**
+ * IPP module limitation.
+ *
+ * @type: limit type (see drm_exynos_ipp_limit_type enum)
+ * @reserved: padding
+ * @h: horizontal limits
+ * @v: vertical limits
+ */
+struct drm_exynos_ipp_limit {
+	__u32 type;
+	__u32 reserved;
+	struct drm_exynos_ipp_limit_val h;
+	struct drm_exynos_ipp_limit_val v;
+};
+
+/**
+ * Get IPP limits for given image format.
+ *
+ * @ipp_id: id of IPP module to query
+ * @fourcc: image format code (see DRM_FORMAT_* in drm_fourcc.h)
+ * @modifier: image format modifier (see DRM_FORMAT_MOD_* in drm_fourcc.h)
+ * @type: source/destination identifier (drm_exynos_ipp_format_flag enum)
+ * @limits_count: size of limits array (in entries) / number of filled entries
+ *		 (set by driver)
+ * @limits_ptr: pointer to limits array or NULL
+ */
+struct drm_exynos_ioctl_ipp_get_limits {
+	__u32 ipp_id;
+	__u32 fourcc;
+	__u64 modifier;
+	__u32 type;
+	__u32 limits_count;
+	__u64 limits_ptr;
+};
+
+enum drm_exynos_ipp_task_id {
+	/* buffer described by struct drm_exynos_ipp_task_buffer */
+	DRM_EXYNOS_IPP_TASK_BUFFER		= 0x0001,
+	/* rectangle described by struct drm_exynos_ipp_task_rect */
+	DRM_EXYNOS_IPP_TASK_RECTANGLE		= 0x0002,
+	/* transformation described by struct drm_exynos_ipp_task_transform */
+	DRM_EXYNOS_IPP_TASK_TRANSFORM		= 0x0003,
+	/* alpha configuration described by struct drm_exynos_ipp_task_alpha */
+	DRM_EXYNOS_IPP_TASK_ALPHA		= 0x0004,
+
+	/* source image data (for buffer and rectangle chunks) */
+	DRM_EXYNOS_IPP_TASK_TYPE_SOURCE		= 0x0001 << 16,
+	/* destination image data (for buffer and rectangle chunks) */
+	DRM_EXYNOS_IPP_TASK_TYPE_DESTINATION	= 0x0002 << 16,
+};
+
+/**
+ * Memory buffer with image data.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_BUFFER
+ * other parameters are same as for AddFB2 generic DRM ioctl
+ */
+struct drm_exynos_ipp_task_buffer {
+	__u32	id;
+	__u32	fourcc;
+	__u32	width, height;
+	__u32	gem_id[4];
+	__u32	offset[4];
+	__u32	pitch[4];
+	__u64	modifier;
+};
+
+/**
+ * Rectangle for processing.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_RECTANGLE
+ * @reserved: padding
+ * @x,@y: left corner in pixels
+ * @w,@h: width/height in pixels
+ */
+struct drm_exynos_ipp_task_rect {
+	__u32	id;
+	__u32	reserved;
+	__u32	x;
+	__u32	y;
+	__u32	w;
+	__u32	h;
+};
+
+/**
+ * Image tranformation description.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_TRANSFORM
+ * @rotation: DRM_MODE_ROTATE_* and DRM_MODE_REFLECT_* values
+ */
+struct drm_exynos_ipp_task_transform {
+	__u32	id;
+	__u32	rotation;
+};
+
+/**
+ * Image global alpha configuration for formats without alpha values.
+ *
+ * @id: must be DRM_EXYNOS_IPP_TASK_ALPHA
+ * @value: global alpha value (0-255)
+ */
+struct drm_exynos_ipp_task_alpha {
+	__u32	id;
+	__u32	value;
+};
+
+enum drm_exynos_ipp_flag {
+	/* generate DRM event after processing */
+	DRM_EXYNOS_IPP_FLAG_EVENT	= 0x01,
+	/* dry run, only check task parameters */
+	DRM_EXYNOS_IPP_FLAG_TEST_ONLY	= 0x02,
+	/* non-blocking processing */
+	DRM_EXYNOS_IPP_FLAG_NONBLOCK	= 0x04,
+};
+
+#define DRM_EXYNOS_IPP_FLAGS (DRM_EXYNOS_IPP_FLAG_EVENT |\
+		DRM_EXYNOS_IPP_FLAG_TEST_ONLY | DRM_EXYNOS_IPP_FLAG_NONBLOCK)
+
+/**
+ * Perform image processing described by array of drm_exynos_ipp_task_*
+ * structures (parameters array).
+ *
+ * @ipp_id: id of IPP module to run the task
+ * @flags: bitmask of drm_exynos_ipp_flag values
+ * @reserved: padding
+ * @params_size: size of parameters array (in bytes)
+ * @params_ptr: pointer to parameters array or NULL
+ * @user_data: (optional) data for drm event
+ */
+struct drm_exynos_ioctl_ipp_commit {
+	__u32 ipp_id;
+	__u32 flags;
+	__u32 reserved;
+	__u32 params_size;
+	__u64 params_ptr;
+	__u64 user_data;
+};
+
 #define DRM_EXYNOS_GEM_CREATE		0x00
 #define DRM_EXYNOS_GEM_MAP		0x01
 /* Reserved 0x03 ~ 0x05 for exynos specific gem ioctl */
@@ -147,6 +360,11 @@ struct drm_exynos_g2d_exec {
 #define DRM_EXYNOS_G2D_EXEC		0x22
 
 /* Reserved 0x30 ~ 0x33 for obsolete Exynos IPP ioctls */
+/* IPP - Image Post Processing */
+#define DRM_EXYNOS_IPP_GET_RESOURCES	0x40
+#define DRM_EXYNOS_IPP_GET_CAPS		0x41
+#define DRM_EXYNOS_IPP_GET_LIMITS	0x42
+#define DRM_EXYNOS_IPP_COMMIT		0x43
 
 #define DRM_IOCTL_EXYNOS_GEM_CREATE		DRM_IOWR(DRM_COMMAND_BASE + \
 		DRM_EXYNOS_GEM_CREATE, struct drm_exynos_gem_create)
@@ -165,8 +383,20 @@ struct drm_exynos_g2d_exec {
 #define DRM_IOCTL_EXYNOS_G2D_EXEC		DRM_IOWR(DRM_COMMAND_BASE + \
 		DRM_EXYNOS_G2D_EXEC, struct drm_exynos_g2d_exec)
 
+#define DRM_IOCTL_EXYNOS_IPP_GET_RESOURCES	DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_GET_RESOURCES, \
+		struct drm_exynos_ioctl_ipp_get_res)
+#define DRM_IOCTL_EXYNOS_IPP_GET_CAPS		DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_GET_CAPS, struct drm_exynos_ioctl_ipp_get_caps)
+#define DRM_IOCTL_EXYNOS_IPP_GET_LIMITS		DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_GET_LIMITS, \
+		struct drm_exynos_ioctl_ipp_get_limits)
+#define DRM_IOCTL_EXYNOS_IPP_COMMIT		DRM_IOWR(DRM_COMMAND_BASE + \
+		DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
+
 /* EXYNOS specific events */
 #define DRM_EXYNOS_G2D_EVENT		0x80000000
+#define DRM_EXYNOS_IPP_EVENT		0x80000002
 
 struct drm_exynos_g2d_event {
 	struct drm_event	base;
@@ -177,6 +407,16 @@ struct drm_exynos_g2d_event {
 	__u32			reserved;
 };
 
+struct drm_exynos_ipp_event {
+	struct drm_event	base;
+	__u64			user_data;
+	__u32			tv_sec;
+	__u32			tv_usec;
+	__u32			ipp_id;
+	__u32			sequence;
+	__u64			reserved;
+};
+
 #if defined(__cplusplus)
 }
 #endif
diff --git a/include/drm-uapi/i915_drm.h b/include/drm-uapi/i915_drm.h
index 16e452aa..ab80759a 100644
--- a/include/drm-uapi/i915_drm.h
+++ b/include/drm-uapi/i915_drm.h
@@ -1456,9 +1456,52 @@ struct drm_i915_gem_context_param {
 #define   I915_CONTEXT_MAX_USER_PRIORITY	1023 /* inclusive */
 #define   I915_CONTEXT_DEFAULT_PRIORITY		0
 #define   I915_CONTEXT_MIN_USER_PRIORITY	-1023 /* inclusive */
+	/*
+	 * When using the following param, value should be a pointer to
+	 * drm_i915_gem_context_param_sseu.
+	 */
+#define I915_CONTEXT_PARAM_SSEU		0x7
 	__u64 value;
 };
 
+struct drm_i915_gem_context_param_sseu {
+	/*
+	 * Engine class & instance to be configured or queried.
+	 */
+	__u16 class;
+	__u16 instance;
+
+	/*
+	 * Unused for now. Must be cleared to zero.
+	 */
+	__u32 rsvd1;
+
+	/*
+	 * Mask of slices to enable for the context. Valid values are a subset
+	 * of the bitmask value returned for I915_PARAM_SLICE_MASK.
+	 */
+	__u64 slice_mask;
+
+	/*
+	 * Mask of subslices to enable for the context. Valid values are a
+	 * subset of the bitmask value return by I915_PARAM_SUBSLICE_MASK.
+	 */
+	__u64 subslice_mask;
+
+	/*
+	 * Minimum/Maximum number of EUs to enable per subslice for the
+	 * context. min_eus_per_subslice must be inferior or equal to
+	 * max_eus_per_subslice.
+	 */
+	__u16 min_eus_per_subslice;
+	__u16 max_eus_per_subslice;
+
+	/*
+	 * Unused for now. Must be cleared to zero.
+	 */
+	__u32 rsvd2;
+};
+
 enum drm_i915_oa_format {
 	I915_OA_FORMAT_A13 = 1,	    /* HSW only */
 	I915_OA_FORMAT_A29,	    /* HSW only */
diff --git a/include/drm-uapi/msm_drm.h b/include/drm-uapi/msm_drm.h
index bbbaffad..c06d0a5b 100644
--- a/include/drm-uapi/msm_drm.h
+++ b/include/drm-uapi/msm_drm.h
@@ -201,10 +201,12 @@ struct drm_msm_gem_submit_bo {
 #define MSM_SUBMIT_NO_IMPLICIT   0x80000000 /* disable implicit sync */
 #define MSM_SUBMIT_FENCE_FD_IN   0x40000000 /* enable input fence_fd */
 #define MSM_SUBMIT_FENCE_FD_OUT  0x20000000 /* enable output fence_fd */
+#define MSM_SUBMIT_SUDO          0x10000000 /* run submitted cmds from RB */
 #define MSM_SUBMIT_FLAGS                ( \
 		MSM_SUBMIT_NO_IMPLICIT   | \
 		MSM_SUBMIT_FENCE_FD_IN   | \
 		MSM_SUBMIT_FENCE_FD_OUT  | \
+		MSM_SUBMIT_SUDO          | \
 		0)
 
 /* Each cmdstream submit consists of a table of buffers involved, and
diff --git a/include/drm-uapi/tegra_drm.h b/include/drm-uapi/tegra_drm.h
index 12f9bf84..6c07919c 100644
--- a/include/drm-uapi/tegra_drm.h
+++ b/include/drm-uapi/tegra_drm.h
@@ -32,143 +32,615 @@ extern "C" {
 #define DRM_TEGRA_GEM_CREATE_TILED     (1 << 0)
 #define DRM_TEGRA_GEM_CREATE_BOTTOM_UP (1 << 1)
 
+/**
+ * struct drm_tegra_gem_create - parameters for the GEM object creation IOCTL
+ */
 struct drm_tegra_gem_create {
+	/**
+	 * @size:
+	 *
+	 * The size, in bytes, of the buffer object to be created.
+	 */
 	__u64 size;
+
+	/**
+	 * @flags:
+	 *
+	 * A bitmask of flags that influence the creation of GEM objects:
+	 *
+	 * DRM_TEGRA_GEM_CREATE_TILED
+	 *   Use the 16x16 tiling format for this buffer.
+	 *
+	 * DRM_TEGRA_GEM_CREATE_BOTTOM_UP
+	 *   The buffer has a bottom-up layout.
+	 */
 	__u32 flags;
+
+	/**
+	 * @handle:
+	 *
+	 * The handle of the created GEM object. Set by the kernel upon
+	 * successful completion of the IOCTL.
+	 */
 	__u32 handle;
 };
 
+/**
+ * struct drm_tegra_gem_mmap - parameters for the GEM mmap IOCTL
+ */
 struct drm_tegra_gem_mmap {
+	/**
+	 * @handle:
+	 *
+	 * Handle of the GEM object to obtain an mmap offset for.
+	 */
 	__u32 handle;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
+
+	/**
+	 * @offset:
+	 *
+	 * The mmap offset for the given GEM object. Set by the kernel upon
+	 * successful completion of the IOCTL.
+	 */
 	__u64 offset;
 };
 
+/**
+ * struct drm_tegra_syncpt_read - parameters for the read syncpoint IOCTL
+ */
 struct drm_tegra_syncpt_read {
+	/**
+	 * @id:
+	 *
+	 * ID of the syncpoint to read the current value from.
+	 */
 	__u32 id;
+
+	/**
+	 * @value:
+	 *
+	 * The current syncpoint value. Set by the kernel upon successful
+	 * completion of the IOCTL.
+	 */
 	__u32 value;
 };
 
+/**
+ * struct drm_tegra_syncpt_incr - parameters for the increment syncpoint IOCTL
+ */
 struct drm_tegra_syncpt_incr {
+	/**
+	 * @id:
+	 *
+	 * ID of the syncpoint to increment.
+	 */
 	__u32 id;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
+/**
+ * struct drm_tegra_syncpt_wait - parameters for the wait syncpoint IOCTL
+ */
 struct drm_tegra_syncpt_wait {
+	/**
+	 * @id:
+	 *
+	 * ID of the syncpoint to wait on.
+	 */
 	__u32 id;
+
+	/**
+	 * @thresh:
+	 *
+	 * Threshold value for which to wait.
+	 */
 	__u32 thresh;
+
+	/**
+	 * @timeout:
+	 *
+	 * Timeout, in milliseconds, to wait.
+	 */
 	__u32 timeout;
+
+	/**
+	 * @value:
+	 *
+	 * The new syncpoint value after the wait. Set by the kernel upon
+	 * successful completion of the IOCTL.
+	 */
 	__u32 value;
 };
 
 #define DRM_TEGRA_NO_TIMEOUT	(0xffffffff)
 
+/**
+ * struct drm_tegra_open_channel - parameters for the open channel IOCTL
+ */
 struct drm_tegra_open_channel {
+	/**
+	 * @client:
+	 *
+	 * The client ID for this channel.
+	 */
 	__u32 client;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
+
+	/**
+	 * @context:
+	 *
+	 * The application context of this channel. Set by the kernel upon
+	 * successful completion of the IOCTL. This context needs to be passed
+	 * to the DRM_TEGRA_CHANNEL_CLOSE or the DRM_TEGRA_SUBMIT IOCTLs.
+	 */
 	__u64 context;
 };
 
+/**
+ * struct drm_tegra_close_channel - parameters for the close channel IOCTL
+ */
 struct drm_tegra_close_channel {
+	/**
+	 * @context:
+	 *
+	 * The application context of this channel. This is obtained from the
+	 * DRM_TEGRA_OPEN_CHANNEL IOCTL.
+	 */
 	__u64 context;
 };
 
+/**
+ * struct drm_tegra_get_syncpt - parameters for the get syncpoint IOCTL
+ */
 struct drm_tegra_get_syncpt {
+	/**
+	 * @context:
+	 *
+	 * The application context identifying the channel for which to obtain
+	 * the syncpoint ID.
+	 */
 	__u64 context;
+
+	/**
+	 * @index:
+	 *
+	 * Index of the client syncpoint for which to obtain the ID.
+	 */
 	__u32 index;
+
+	/**
+	 * @id:
+	 *
+	 * The ID of the given syncpoint. Set by the kernel upon successful
+	 * completion of the IOCTL.
+	 */
 	__u32 id;
 };
 
+/**
+ * struct drm_tegra_get_syncpt_base - parameters for the get wait base IOCTL
+ */
 struct drm_tegra_get_syncpt_base {
+	/**
+	 * @context:
+	 *
+	 * The application context identifying for which channel to obtain the
+	 * wait base.
+	 */
 	__u64 context;
+
+	/**
+	 * @syncpt:
+	 *
+	 * ID of the syncpoint for which to obtain the wait base.
+	 */
 	__u32 syncpt;
+
+	/**
+	 * @id:
+	 *
+	 * The ID of the wait base corresponding to the client syncpoint. Set
+	 * by the kernel upon successful completion of the IOCTL.
+	 */
 	__u32 id;
 };
 
+/**
+ * struct drm_tegra_syncpt - syncpoint increment operation
+ */
 struct drm_tegra_syncpt {
+	/**
+	 * @id:
+	 *
+	 * ID of the syncpoint to operate on.
+	 */
 	__u32 id;
+
+	/**
+	 * @incrs:
+	 *
+	 * Number of increments to perform for the syncpoint.
+	 */
 	__u32 incrs;
 };
 
+/**
+ * struct drm_tegra_cmdbuf - structure describing a command buffer
+ */
 struct drm_tegra_cmdbuf {
+	/**
+	 * @handle:
+	 *
+	 * Handle to a GEM object containing the command buffer.
+	 */
 	__u32 handle;
+
+	/**
+	 * @offset:
+	 *
+	 * Offset, in bytes, into the GEM object identified by @handle at
+	 * which the command buffer starts.
+	 */
 	__u32 offset;
+
+	/**
+	 * @words:
+	 *
+	 * Number of 32-bit words in this command buffer.
+	 */
 	__u32 words;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
+/**
+ * struct drm_tegra_reloc - GEM object relocation structure
+ */
 struct drm_tegra_reloc {
 	struct {
+		/**
+		 * @cmdbuf.handle:
+		 *
+		 * Handle to the GEM object containing the command buffer for
+		 * which to perform this GEM object relocation.
+		 */
 		__u32 handle;
+
+		/**
+		 * @cmdbuf.offset:
+		 *
+		 * Offset, in bytes, into the command buffer at which to
+		 * insert the relocated address.
+		 */
 		__u32 offset;
 	} cmdbuf;
 	struct {
+		/**
+		 * @target.handle:
+		 *
+		 * Handle to the GEM object to be relocated.
+		 */
 		__u32 handle;
+
+		/**
+		 * @target.offset:
+		 *
+		 * Offset, in bytes, into the target GEM object at which the
+		 * relocated data starts.
+		 */
 		__u32 offset;
 	} target;
+
+	/**
+	 * @shift:
+	 *
+	 * The number of bits by which to shift relocated addresses.
+	 */
 	__u32 shift;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
+/**
+ * struct drm_tegra_waitchk - wait check structure
+ */
 struct drm_tegra_waitchk {
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object containing a command stream on which to
+	 * perform the wait check.
+	 */
 	__u32 handle;
+
+	/**
+	 * @offset:
+	 *
+	 * Offset, in bytes, of the location in the command stream to perform
+	 * the wait check on.
+	 */
 	__u32 offset;
+
+	/**
+	 * @syncpt:
+	 *
+	 * ID of the syncpoint to wait check.
+	 */
 	__u32 syncpt;
+
+	/**
+	 * @thresh:
+	 *
+	 * Threshold value for which to check.
+	 */
 	__u32 thresh;
 };
 
+/**
+ * struct drm_tegra_submit - job submission structure
+ */
 struct drm_tegra_submit {
+	/**
+	 * @context:
+	 *
+	 * The application context identifying the channel to use for the
+	 * execution of this job.
+	 */
 	__u64 context;
+
+	/**
+	 * @num_syncpts:
+	 *
+	 * The number of syncpoints operated on by this job. This defines the
+	 * length of the array pointed to by @syncpts.
+	 */
 	__u32 num_syncpts;
+
+	/**
+	 * @num_cmdbufs:
+	 *
+	 * The number of command buffers to execute as part of this job. This
+	 * defines the length of the array pointed to by @cmdbufs.
+	 */
 	__u32 num_cmdbufs;
+
+	/**
+	 * @num_relocs:
+	 *
+	 * The number of relocations to perform before executing this job.
+	 * This defines the length of the array pointed to by @relocs.
+	 */
 	__u32 num_relocs;
+
+	/**
+	 * @num_waitchks:
+	 *
+	 * The number of wait checks to perform as part of this job. This
+	 * defines the length of the array pointed to by @waitchks.
+	 */
 	__u32 num_waitchks;
+
+	/**
+	 * @waitchk_mask:
+	 *
+	 * Bitmask of valid wait checks.
+	 */
 	__u32 waitchk_mask;
+
+	/**
+	 * @timeout:
+	 *
+	 * Timeout, in milliseconds, before this job is cancelled.
+	 */
 	__u32 timeout;
+
+	/**
+	 * @syncpts:
+	 *
+	 * A pointer to an array of &struct drm_tegra_syncpt structures that
+	 * specify the syncpoint operations performed as part of this job.
+	 * The number of elements in the array must be equal to the value
+	 * given by @num_syncpts.
+	 */
 	__u64 syncpts;
+
+	/**
+	 * @cmdbufs:
+	 *
+	 * A pointer to an array of &struct drm_tegra_cmdbuf structures that
+	 * define the command buffers to execute as part of this job. The
+	 * number of elements in the array must be equal to the value given
+	 * by @num_syncpts.
+	 */
 	__u64 cmdbufs;
+
+	/**
+	 * @relocs:
+	 *
+	 * A pointer to an array of &struct drm_tegra_reloc structures that
+	 * specify the relocations that need to be performed before executing
+	 * this job. The number of elements in the array must be equal to the
+	 * value given by @num_relocs.
+	 */
 	__u64 relocs;
+
+	/**
+	 * @waitchks:
+	 *
+	 * A pointer to an array of &struct drm_tegra_waitchk structures that
+	 * specify the wait checks to be performed while executing this job.
+	 * The number of elements in the array must be equal to the value
+	 * given by @num_waitchks.
+	 */
 	__u64 waitchks;
-	__u32 fence;		/* Return value */
 
-	__u32 reserved[5];	/* future expansion */
+	/**
+	 * @fence:
+	 *
+	 * The threshold of the syncpoint associated with this job after it
+	 * has been completed. Set by the kernel upon successful completion of
+	 * the IOCTL. This can be used with the DRM_TEGRA_SYNCPT_WAIT IOCTL to
+	 * wait for this job to be finished.
+	 */
+	__u32 fence;
+
+	/**
+	 * @reserved:
+	 *
+	 * This field is reserved for future use. Must be 0.
+	 */
+	__u32 reserved[5];
 };
 
 #define DRM_TEGRA_GEM_TILING_MODE_PITCH 0
 #define DRM_TEGRA_GEM_TILING_MODE_TILED 1
 #define DRM_TEGRA_GEM_TILING_MODE_BLOCK 2
 
+/**
+ * struct drm_tegra_gem_set_tiling - parameters for the set tiling IOCTL
+ */
 struct drm_tegra_gem_set_tiling {
-	/* input */
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object for which to set the tiling parameters.
+	 */
 	__u32 handle;
+
+	/**
+	 * @mode:
+	 *
+	 * The tiling mode to set. Must be one of:
+	 *
+	 * DRM_TEGRA_GEM_TILING_MODE_PITCH
+	 *   pitch linear format
+	 *
+	 * DRM_TEGRA_GEM_TILING_MODE_TILED
+	 *   16x16 tiling format
+	 *
+	 * DRM_TEGRA_GEM_TILING_MODE_BLOCK
+	 *   16Bx2 tiling format
+	 */
 	__u32 mode;
+
+	/**
+	 * @value:
+	 *
+	 * The value to set for the tiling mode parameter.
+	 */
 	__u32 value;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
+/**
+ * struct drm_tegra_gem_get_tiling - parameters for the get tiling IOCTL
+ */
 struct drm_tegra_gem_get_tiling {
-	/* input */
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object for which to query the tiling parameters.
+	 */
 	__u32 handle;
-	/* output */
+
+	/**
+	 * @mode:
+	 *
+	 * The tiling mode currently associated with the GEM object. Set by
+	 * the kernel upon successful completion of the IOCTL.
+	 */
 	__u32 mode;
+
+	/**
+	 * @value:
+	 *
+	 * The tiling mode parameter currently associated with the GEM object.
+	 * Set by the kernel upon successful completion of the IOCTL.
+	 */
 	__u32 value;
+
+	/**
+	 * @pad:
+	 *
+	 * Structure padding that may be used in the future. Must be 0.
+	 */
 	__u32 pad;
 };
 
 #define DRM_TEGRA_GEM_BOTTOM_UP		(1 << 0)
 #define DRM_TEGRA_GEM_FLAGS		(DRM_TEGRA_GEM_BOTTOM_UP)
 
+/**
+ * struct drm_tegra_gem_set_flags - parameters for the set flags IOCTL
+ */
 struct drm_tegra_gem_set_flags {
-	/* input */
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object for which to set the flags.
+	 */
 	__u32 handle;
-	/* output */
+
+	/**
+	 * @flags:
+	 *
+	 * The flags to set for the GEM object.
+	 */
 	__u32 flags;
 };
 
+/**
+ * struct drm_tegra_gem_get_flags - parameters for the get flags IOCTL
+ */
 struct drm_tegra_gem_get_flags {
-	/* input */
+	/**
+	 * @handle:
+	 *
+	 * Handle to the GEM object for which to query the flags.
+	 */
 	__u32 handle;
-	/* output */
+
+	/**
+	 * @flags:
+	 *
+	 * The flags currently associated with the GEM object. Set by the
+	 * kernel upon successful completion of the IOCTL.
+	 */
 	__u32 flags;
 };
 
@@ -193,7 +665,7 @@ struct drm_tegra_gem_get_flags {
 #define DRM_IOCTL_TEGRA_SYNCPT_INCR DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_INCR, struct drm_tegra_syncpt_incr)
 #define DRM_IOCTL_TEGRA_SYNCPT_WAIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SYNCPT_WAIT, struct drm_tegra_syncpt_wait)
 #define DRM_IOCTL_TEGRA_OPEN_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_OPEN_CHANNEL, struct drm_tegra_open_channel)
-#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_open_channel)
+#define DRM_IOCTL_TEGRA_CLOSE_CHANNEL DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_CLOSE_CHANNEL, struct drm_tegra_close_channel)
 #define DRM_IOCTL_TEGRA_GET_SYNCPT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT, struct drm_tegra_get_syncpt)
 #define DRM_IOCTL_TEGRA_SUBMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_SUBMIT, struct drm_tegra_submit)
 #define DRM_IOCTL_TEGRA_GET_SYNCPT_BASE DRM_IOWR(DRM_COMMAND_BASE + DRM_TEGRA_GET_SYNCPT_BASE, struct drm_tegra_get_syncpt_base)
diff --git a/include/drm-uapi/vc4_drm.h b/include/drm-uapi/vc4_drm.h
index 4117117b..31f50de3 100644
--- a/include/drm-uapi/vc4_drm.h
+++ b/include/drm-uapi/vc4_drm.h
@@ -183,10 +183,17 @@ struct drm_vc4_submit_cl {
 	/* ID of the perfmon to attach to this job. 0 means no perfmon. */
 	__u32 perfmonid;
 
-	/* Unused field to align this struct on 64 bits. Must be set to 0.
-	 * If one ever needs to add an u32 field to this struct, this field
-	 * can be used.
+	/* Syncobj handle to wait on. If set, processing of this render job
+	 * will not start until the syncobj is signaled. 0 means ignore.
 	 */
+	__u32 in_sync;
+
+	/* Syncobj handle to export fence to. If set, the fence in the syncobj
+	 * will be replaced with a fence that signals upon completion of this
+	 * render job. 0 means ignore.
+	 */
+	__u32 out_sync;
+
 	__u32 pad2;
 };
 
diff --git a/include/drm-uapi/virtgpu_drm.h b/include/drm-uapi/virtgpu_drm.h
index 91a31ffe..9a781f06 100644
--- a/include/drm-uapi/virtgpu_drm.h
+++ b/include/drm-uapi/virtgpu_drm.h
@@ -63,6 +63,7 @@ struct drm_virtgpu_execbuffer {
 };
 
 #define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
+#define VIRTGPU_PARAM_CAPSET_QUERY_FIX 2 /* do we have the capset fix */
 
 struct drm_virtgpu_getparam {
 	__u64 param;
-- 
2.17.0

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* [igt-dev] [PATCH i-g-t v6 2/2] tests: add slice power programming test
  2018-05-25 15:26 [igt-dev] [PATCH i-g-t v6 0/2] intel: per context powergating tests & benchmark Lionel Landwerlin
  2018-05-25 15:26 ` [igt-dev] [PATCH i-g-t v6 1/2] headers: bump Lionel Landwerlin
@ 2018-05-25 15:26 ` Lionel Landwerlin
  2018-05-25 15:41   ` Chris Wilson
  2018-05-25 17:01 ` [igt-dev] ✓ Fi.CI.BAT: success for intel: per context powergating tests & benchmark (rev6) Patchwork
  2018-05-26  2:18 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
  3 siblings, 1 reply; 7+ messages in thread
From: Lionel Landwerlin @ 2018-05-25 15:26 UTC (permalink / raw)
  To: igt-dev

Verifies that the kernel programs slices correctly based by reading
the value of PWR_CLK_STATE register or MI_SET_PREDICATE on platforms
before Cannonlake.

v2: Add subslice tests (Lionel)
    Use MI_SET_PREDICATE for further verification when available (Lionel)

v3: Rename to gem_ctx_rpcs (Lionel)

v4: Update kernel API (Lionel)
    Add 0 value test (Lionel)
    Exercise invalid values (Lionel)

v5: Add perf tests (Lionel)

v6: Add new sysfs entry tests (Lionel)

v7: Test rsvd fields
    Update for kernel series changes

v8: Drop test_no_sseu_support() test (Kelvin)
    Drop drm_intel_*() apis (Chris)

Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
---
 tests/Makefile.sources |   1 +
 tests/gem_ctx_param.c  |   4 +-
 tests/gem_ctx_sseu.c   | 881 +++++++++++++++++++++++++++++++++++++++++
 tests/meson.build      |   1 +
 4 files changed, 886 insertions(+), 1 deletion(-)
 create mode 100644 tests/gem_ctx_sseu.c

diff --git a/tests/Makefile.sources b/tests/Makefile.sources
index ad62611f..fd44b720 100644
--- a/tests/Makefile.sources
+++ b/tests/Makefile.sources
@@ -60,6 +60,7 @@ TESTS_progs = \
 	gem_ctx_exec \
 	gem_ctx_isolation \
 	gem_ctx_param \
+	gem_ctx_sseu \
 	gem_ctx_switch \
 	gem_ctx_thrash \
 	gem_double_irq_loop \
diff --git a/tests/gem_ctx_param.c b/tests/gem_ctx_param.c
index c46fd709..af1afeaa 100644
--- a/tests/gem_ctx_param.c
+++ b/tests/gem_ctx_param.c
@@ -294,11 +294,13 @@ igt_main
 			set_priority(fd);
 	}
 
+	/* I915_CONTEXT_PARAM_SSEU tests are located in gem_ctx_sseu.c */
+
 	/* NOTE: This testcase intentionally tests for the next free parameter
 	 * to catch ABI extensions. Don't "fix" this testcase without adding all
 	 * the tests for the new param first.
 	 */
-	arg.param = I915_CONTEXT_PARAM_PRIORITY + 1;
+	arg.param = I915_CONTEXT_PARAM_SSEU + 1;
 
 	igt_subtest("invalid-param-get") {
 		arg.ctx_id = ctx;
diff --git a/tests/gem_ctx_sseu.c b/tests/gem_ctx_sseu.c
new file mode 100644
index 00000000..75095a6b
--- /dev/null
+++ b/tests/gem_ctx_sseu.c
@@ -0,0 +1,881 @@
+/*
+ * Copyright © 2017 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Lionel Landwerlin <lionel.g.landwerlin@intel.com>
+ *
+ */
+
+#include "igt.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <errno.h>
+#include <time.h>
+#include <sys/wait.h>
+
+#include "igt_sysfs.h"
+#include "ioctl_wrappers.h"
+
+IGT_TEST_DESCRIPTION("Test context render powergating programming.");
+
+#define MI_STORE_REGISTER_MEM (0x24 << 23)
+
+#define MI_SET_PREDICATE      (0x1 << 23)
+#define  MI_SET_PREDICATE_NOOP_NEVER (0)
+#define  MI_SET_PREDICATE_1_SLICES   (5)
+#define  MI_SET_PREDICATE_2_SLICES   (6)
+#define  MI_SET_PREDICATE_3_SLICES   (7)
+
+#define GEN8_R_PWR_CLK_STATE		0x20C8
+#define   GEN8_RPCS_ENABLE		(1 << 31)
+#define   GEN8_RPCS_S_CNT_ENABLE	(1 << 18)
+#define   GEN8_RPCS_S_CNT_SHIFT		15
+#define   GEN8_RPCS_S_CNT_MASK		(0x7 << GEN8_RPCS_S_CNT_SHIFT)
+#define   GEN8_RPCS_SS_CNT_ENABLE	(1 << 11)
+#define   GEN8_RPCS_SS_CNT_SHIFT	8
+#define   GEN8_RPCS_SS_CNT_MASK		(0x7 << GEN8_RPCS_SS_CNT_SHIFT)
+#define   GEN8_RPCS_EU_MAX_SHIFT	4
+#define   GEN8_RPCS_EU_MAX_MASK		(0xf << GEN8_RPCS_EU_MAX_SHIFT)
+#define   GEN8_RPCS_EU_MIN_SHIFT	0
+#define   GEN8_RPCS_EU_MIN_MASK		(0xf << GEN8_RPCS_EU_MIN_SHIFT)
+
+#define RCS_TIMESTAMP (0x2000 + 0x358)
+
+static int drm_fd;
+static int devid;
+static uint64_t device_slice_mask = 0;
+static uint64_t device_subslice_mask = 0;
+static uint32_t device_slice_count = 0;
+static uint32_t device_subslice_count = 0;
+
+static uint64_t mask_minus_one(uint64_t mask)
+{
+	int i;
+
+	for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
+		if ((1UL << i) & mask) {
+			return mask & ~(1UL << i);
+		}
+	}
+
+	igt_assert(!"reached");
+	return 0;
+}
+
+static uint64_t mask_plus_one(uint64_t mask)
+{
+	int i;
+
+	for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
+		if (((1UL << i) & mask) == 0) {
+			return mask | (1UL << i);
+		}
+	}
+
+	igt_assert(!"reached");
+	return 0;
+}
+
+static uint64_t mask_minus(uint64_t mask, int n)
+{
+	int i;
+
+	for (i = 0; i < n; i++)
+		mask = mask_minus_one(mask);
+
+	return mask;
+}
+
+static uint64_t mask_plus(uint64_t mask, int n)
+{
+	int i;
+
+	for (i = 0; i < n; i++)
+		mask = mask_plus_one(mask);
+
+	return mask;
+}
+
+static uint32_t *
+fill_relocation(uint32_t *batch,
+		struct drm_i915_gem_relocation_entry *reloc,
+		uint32_t gem_handle, uint32_t delta, /* in bytes */
+		uint32_t offset, /* in dwords */
+		uint32_t read_domains, uint32_t write_domains)
+{
+	reloc->target_handle = gem_handle;
+	reloc->delta = delta;
+	reloc->offset = offset * sizeof(uint32_t);
+	reloc->presumed_offset = 0;
+	reloc->read_domains = read_domains;
+	reloc->write_domain = write_domains;
+
+	*batch++ = delta;
+	*batch++ = 0;
+
+	return batch;
+}
+
+
+static uint32_t
+read_rpcs_reg(uint32_t context,
+	      uint32_t expected_slices)
+{
+	struct drm_i915_gem_execbuffer2 execbuf;
+	struct drm_i915_gem_exec_object2 obj[2];
+	struct drm_i915_gem_relocation_entry relocs[2];
+	uint32_t *batch, *b, *data;
+	uint32_t rpcs;
+	int n_relocs = 0;
+
+	memset(obj, 0, sizeof(obj));
+	obj[0].handle = gem_create(drm_fd, 4096);
+	obj[1].handle = gem_create(drm_fd, 4096);
+
+	batch = b = gem_mmap__cpu(drm_fd, obj[1].handle, 0, 4096,
+				  PROT_READ | PROT_WRITE);
+
+	if (expected_slices != 0 && intel_gen(devid) < 10) {
+		*b++ = MI_SET_PREDICATE | (1 - 1) |
+			(MI_SET_PREDICATE_1_SLICES + expected_slices - 1);
+	}
+
+	*b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+	*b++ = RCS_TIMESTAMP;
+	b = fill_relocation(b, &relocs[n_relocs++], obj[0].handle,
+			    0, b - batch,
+			    I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+	*b++ = MI_STORE_REGISTER_MEM | (4 - 2);
+	*b++ = GEN8_R_PWR_CLK_STATE;
+	b = fill_relocation(b, &relocs[n_relocs++], obj[0].handle,
+			    4, b - batch,
+			    I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
+
+	if (expected_slices != 0 && intel_gen(devid) < 10)
+		*b++ = MI_SET_PREDICATE | (1 - 1) | MI_SET_PREDICATE_NOOP_NEVER;
+
+	*b++ = MI_BATCH_BUFFER_END;
+
+	gem_munmap(batch, 4096);
+
+	obj[1].relocation_count = n_relocs;
+	obj[1].relocs_ptr = to_user_pointer(relocs);
+
+	memset(&execbuf, 0, sizeof(execbuf));
+	execbuf.buffers_ptr = to_user_pointer(obj);
+	execbuf.buffer_count = ARRAY_SIZE(obj);
+	i915_execbuffer2_set_context_id(execbuf, context);
+
+	data = gem_mmap__cpu(drm_fd, obj[0].handle, 0, 4096, PROT_READ | PROT_WRITE);
+	memset(data, 0, 4096);
+	gem_munmap(data, 4096);
+
+	gem_execbuf(drm_fd, &execbuf);
+	gem_sync(drm_fd, obj[0].handle);
+
+	data = gem_mmap__cpu(drm_fd, obj[0].handle, 0, 4096, PROT_READ);
+
+	rpcs = data[1];
+
+	igt_debug("rcs_timestamp=0x%x rpcs=0x%x/0x%x\n", data[0], data[1], ((data[1] & GEN8_RPCS_S_CNT_MASK) >> GEN8_RPCS_S_CNT_SHIFT));
+
+	gem_munmap(data, 4096);
+
+	gem_close(drm_fd, obj[0].handle);
+	gem_close(drm_fd, obj[1].handle);
+
+	return rpcs;
+}
+
+static uint32_t
+read_slice_count(uint32_t context,
+		 uint32_t expected_slice_count)
+{
+	return (read_rpcs_reg(context, expected_slice_count) & GEN8_RPCS_S_CNT_MASK)
+		>> GEN8_RPCS_S_CNT_SHIFT;
+}
+
+static uint32_t
+read_subslice_count(uint32_t context)
+{
+	return (read_rpcs_reg(context, 0) & GEN8_RPCS_SS_CNT_MASK)
+		>> GEN8_RPCS_SS_CNT_SHIFT;
+}
+
+static bool
+kernel_has_per_context_sseu_support(void)
+{
+	struct drm_i915_gem_context_param arg;
+	struct drm_i915_gem_context_param_sseu sseu;
+
+	memset(&sseu, 0, sizeof(sseu));
+	sseu.class = 0; /* rcs */
+	sseu.instance = 0;
+
+	memset(&arg, 0, sizeof(arg));
+	arg.ctx_id = 0; /* default context */
+	arg.param = I915_CONTEXT_PARAM_SSEU;
+	arg.value = (uintptr_t) &sseu;
+
+	if (igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg))
+		return false;
+
+	return true;
+}
+
+static bool
+platform_has_per_context_sseu_support(void)
+{
+	struct drm_i915_gem_context_param arg;
+	struct drm_i915_gem_context_param_sseu sseu;
+	int ret;
+
+	memset(&sseu, 0, sizeof(sseu));
+	sseu.class = 0; /* rcs */
+	sseu.instance = 0;
+
+	memset(&arg, 0, sizeof(arg));
+	arg.ctx_id = 0; /* default context */
+	arg.param = I915_CONTEXT_PARAM_SSEU;
+	arg.value = (uintptr_t) &sseu;
+
+	ret = igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
+	igt_assert(ret == 0 || errno == EINVAL);
+	if (ret)
+		return false;
+
+	ret = igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg);
+	igt_assert(ret == 0 || errno == ENODEV);
+	if (ret)
+		return false;
+
+	return true;
+}
+
+static void
+context_get_sseu_masks(uint32_t context,
+		       uint32_t *slice_mask,
+		       uint32_t *subslice_mask)
+{
+	struct drm_i915_gem_context_param arg;
+	struct drm_i915_gem_context_param_sseu sseu;
+
+	memset(&sseu, 0, sizeof(sseu));
+	sseu.class = 0; /* rcs */
+	sseu.instance = 0;
+
+	memset(&arg, 0, sizeof(arg));
+	arg.ctx_id = context;
+	arg.param = I915_CONTEXT_PARAM_SSEU;
+	arg.value = (uintptr_t) &sseu;
+
+	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
+
+	if (slice_mask)
+		*slice_mask = sseu.slice_mask;
+	if (subslice_mask)
+		*subslice_mask = sseu.subslice_mask;
+}
+
+static void
+context_set_slice_mask(uint32_t context, uint32_t slice_mask)
+{
+	struct drm_i915_gem_context_param arg;
+	struct drm_i915_gem_context_param_sseu sseu;
+
+	memset(&sseu, 0, sizeof(sseu));
+	sseu.class = 0; /* rcs */
+	sseu.instance = 0;
+
+	memset(&arg, 0, sizeof(arg));
+	arg.ctx_id = context;
+	arg.param = I915_CONTEXT_PARAM_SSEU;
+	arg.value = (uintptr_t) &sseu;
+
+	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
+
+	sseu.slice_mask = slice_mask;
+
+	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg);
+}
+
+static void
+context_set_subslice_mask(uint32_t context, uint32_t subslice_mask)
+{
+	struct drm_i915_gem_context_param arg;
+	struct drm_i915_gem_context_param_sseu sseu;
+
+	memset(&sseu, 0, sizeof(sseu));
+	sseu.class = 0; /* rcs */
+	sseu.instance = 0;
+
+	memset(&arg, 0, sizeof(arg));
+	arg.ctx_id = context;
+	arg.param = I915_CONTEXT_PARAM_SSEU;
+	arg.value = (uintptr_t) &sseu;
+
+	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
+
+	sseu.subslice_mask = subslice_mask;
+
+	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg);
+}
+
+/*
+ * Verify that we can program the slice count.
+ */
+static void
+test_sseu_slice_program_gt(uint32_t pg_slice_count)
+{
+	uint32_t pg_contexts[2], df_contexts[2];
+	uint32_t pg_slice_mask = mask_minus(device_slice_mask, pg_slice_count);
+	uint32_t slice_count = __builtin_popcount(pg_slice_mask);
+	uint32_t slice_mask;
+	int i;
+
+	igt_debug("Running with %i slices powergated\n", pg_slice_count);
+
+	for (i = 0; i < ARRAY_SIZE(pg_contexts); i++) {
+		pg_contexts[i] = gem_context_create(drm_fd);
+		df_contexts[i] = gem_context_create(drm_fd);
+
+		context_set_slice_mask(pg_contexts[i], pg_slice_mask);
+		context_set_slice_mask(df_contexts[i], device_slice_mask);
+	}
+
+	for (int i = 0; i < ARRAY_SIZE(pg_contexts); i++) {
+		context_get_sseu_masks(pg_contexts[i], &slice_mask, NULL);
+		igt_assert_eq(pg_slice_mask, slice_mask);
+	}
+
+	for (int i = 0; i < ARRAY_SIZE(df_contexts); i++) {
+		context_get_sseu_masks(df_contexts[i], &slice_mask, NULL);
+		igt_assert_eq(device_slice_mask, slice_mask);
+	}
+
+	/*
+	 * Test false positives with predicates (only available on
+	 * before Gen10).
+	 */
+	if (intel_gen(devid) < 10) {
+		igt_assert_eq(0, read_slice_count(pg_contexts[0],
+						  device_slice_count));
+	}
+
+	igt_debug("pg_contexts:\n");
+	igt_assert_eq(slice_count, read_slice_count(pg_contexts[0],
+						    slice_count));
+	igt_assert_eq(slice_count, read_slice_count(pg_contexts[1],
+						    slice_count));
+	igt_assert_eq(slice_count, read_slice_count(pg_contexts[0],
+						    slice_count));
+	igt_assert_eq(slice_count, read_slice_count(pg_contexts[0],
+						    slice_count));
+
+	igt_debug("df_contexts:\n");
+	igt_assert_eq(device_slice_count, read_slice_count(df_contexts[0],
+							   device_slice_count));
+	igt_assert_eq(device_slice_count, read_slice_count(df_contexts[1],
+							   device_slice_count));
+	igt_assert_eq(device_slice_count, read_slice_count(df_contexts[0],
+							   device_slice_count));
+	igt_assert_eq(device_slice_count, read_slice_count(df_contexts[0],
+							   device_slice_count));
+
+	igt_debug("mixed:\n");
+	igt_assert_eq(slice_count, read_slice_count(pg_contexts[0],
+						    slice_count));
+
+	igt_assert_eq(device_slice_count, read_slice_count(df_contexts[0],
+							   device_slice_count));
+
+
+	for (int i = 0; i < ARRAY_SIZE(pg_contexts); i++) {
+		gem_context_destroy(drm_fd, pg_contexts[i]);
+		gem_context_destroy(drm_fd, df_contexts[i]);
+	}
+}
+
+/*
+ * Verify that we can program the subslice count.
+ */
+static void
+test_sseu_subslice_program_gt(int pg_subslice_count)
+{
+	uint32_t pg_subslice_mask =
+		mask_minus(device_subslice_mask, pg_subslice_count);
+	uint32_t subslice_count = __builtin_popcount(pg_subslice_mask);
+	uint32_t subslice_mask;
+	uint32_t context1, context2;
+
+	igt_debug("Running with %i subslices powergated\n", pg_subslice_count);
+
+	context1 = gem_context_create(drm_fd);
+	context2 = gem_context_create(drm_fd);
+
+	context_set_subslice_mask(context1, pg_subslice_mask);
+	context_set_subslice_mask(context2, device_subslice_mask);
+
+	context_get_sseu_masks(context1, NULL, &subslice_mask);
+	igt_assert_eq(pg_subslice_mask, subslice_mask);
+	context_get_sseu_masks(context2, NULL, &subslice_mask);
+	igt_assert_eq(device_subslice_mask, subslice_mask);
+
+	igt_assert_eq(subslice_count, read_subslice_count(context1));
+	igt_assert_eq(device_subslice_count, read_subslice_count(context2));
+
+	context_set_subslice_mask(context1, device_subslice_mask);
+	context_set_subslice_mask(context2, pg_subslice_mask);
+
+	context_get_sseu_masks(context1, NULL, &subslice_mask);
+	igt_assert_eq(device_subslice_mask, subslice_mask);
+	context_get_sseu_masks(context2, NULL, &subslice_mask);
+	igt_assert_eq(pg_subslice_mask, subslice_mask);
+
+	igt_assert_eq(device_subslice_count, read_subslice_count(context1));
+	igt_assert_eq(subslice_count, read_subslice_count(context2));
+
+	gem_context_destroy(drm_fd, context1);
+	gem_context_destroy(drm_fd, context2);
+}
+
+/*
+ * Verify that invalid engine class/instance is properly rejected.
+ */
+static void
+test_sseu_invalid_engine(void)
+{
+	struct drm_i915_gem_context_param arg;
+	struct drm_i915_gem_context_param_sseu sseu;
+
+	memset(&sseu, 0, sizeof(sseu));
+
+	memset(&arg, 0, sizeof(arg));
+	arg.ctx_id = 0; /* default context */
+	arg.param = I915_CONTEXT_PARAM_SSEU;
+	arg.value = (uintptr_t) &sseu;
+
+	sseu.class = I915_ENGINE_CLASS_VIDEO_ENHANCE + 1; /* invalid */
+	sseu.instance = 0;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg, EINVAL);
+
+	sseu.class = 0;
+	sseu.instance = 0xffff; /* assumed invalid */
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg, EINVAL);
+
+	/*
+	 * Get some proper values before trying to reprogram them onto
+	 * an invalid engine.
+	 */
+	sseu.class = 0;
+	sseu.instance = 0;
+	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
+
+
+	sseu.class = I915_ENGINE_CLASS_VIDEO_ENHANCE + 2; /* invalid */
+	sseu.instance = 0;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	sseu.class = 0;
+	sseu.instance = 0xffff; /* assumed invalid */
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+}
+
+/*
+ * Verify that invalid values are rejected.
+ */
+static void
+test_sseu_invalid_values(void)
+{
+	struct drm_i915_gem_context_param arg;
+	struct drm_i915_gem_context_param_sseu default_sseu, sseu;
+	int i;
+
+	memset(&default_sseu, 0, sizeof(default_sseu));
+	default_sseu.class = 0; /* rcs */
+	default_sseu.instance = 0;
+
+	memset(&arg, 0, sizeof(arg));
+	arg.ctx_id = 0; /* default context */
+	arg.param = I915_CONTEXT_PARAM_SSEU;
+	arg.value = (uintptr_t) &default_sseu;
+
+	do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
+
+	arg.value = (uintptr_t) &sseu;
+
+        /* Try non 0 rsvd fields. */
+	sseu = default_sseu;
+	sseu.rsvd1 = 1;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	sseu = default_sseu;
+	sseu.rsvd1 = 0xff00ff00;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	sseu = default_sseu;
+	sseu.rsvd2 = 1;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	sseu = default_sseu;
+	sseu.rsvd2 = 0xff00ff00;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	sseu = default_sseu;
+	sseu.rsvd1 = 42;
+	sseu.rsvd2 = 42 * 42;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	/* Try all slice masks known to be invalid. */
+	sseu = default_sseu;
+	for (i = 1; i <= (8 - device_slice_count); i++) {
+		sseu.slice_mask = mask_plus(device_slice_mask, i);
+		do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+	}
+
+	/* 0 slices. */
+	sseu.slice_mask = 0;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	/* Try all subslice masks known to be invalid. */
+	sseu = default_sseu;
+	for (i = 1; i <= (8 - device_subslice_count); i++) {
+		sseu.subslice_mask = mask_plus(device_subslice_mask, i);
+		do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+	}
+
+	/* 0 subslices. */
+	sseu.subslice_mask = 0;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	/* Try number of EUs superior to the max available. */
+	sseu = default_sseu;
+	sseu.min_eus_per_subslice = default_sseu.max_eus_per_subslice + 1;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	sseu = default_sseu;
+	sseu.max_eus_per_subslice = default_sseu.max_eus_per_subslice + 1;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+
+	/* Try to program 0 max EUs. */
+	sseu = default_sseu;
+	sseu.max_eus_per_subslice = 0;
+	do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg, EINVAL);
+}
+
+/* Verify that the kernel returns a correct error value on Gen < 8. */
+static void
+init_contexts(uint32_t *contexts,
+	      int n_contexts,
+	      uint32_t device_slice_mask,
+	      uint32_t pg_slice_mask)
+{
+	int i;
+
+	for (i = 0; i < n_contexts; i++)
+		contexts[i] = gem_context_create(drm_fd);
+
+	context_set_slice_mask(contexts[0], device_slice_mask);
+	context_set_slice_mask(contexts[1], pg_slice_mask);
+}
+
+/*
+ * Verify that powergating settings are put on hold while i915/perf is
+ * active.
+ */
+static void
+test_sseu_perf(void)
+{
+	uint64_t properties[] = {
+		/* Include OA reports in samples */
+		DRM_I915_PERF_PROP_SAMPLE_OA, true,
+
+		/* OA unit configuration */
+		DRM_I915_PERF_PROP_OA_METRICS_SET, 1, /* test metric */
+		DRM_I915_PERF_PROP_OA_FORMAT, I915_OA_FORMAT_A32u40_A4u32_B8_C8,
+		DRM_I915_PERF_PROP_OA_EXPONENT, 20,
+	};
+	struct drm_i915_perf_open_param param = {
+		.flags = I915_PERF_FLAG_FD_CLOEXEC |
+		I915_PERF_FLAG_FD_NONBLOCK,
+		.num_properties = ARRAY_SIZE(properties) / 2,
+		.properties_ptr = to_user_pointer(properties),
+	};
+	uint32_t pg_slice_mask = mask_minus(device_slice_mask, 1);
+	uint32_t slice_count = __builtin_popcount(pg_slice_mask);
+	uint32_t contexts[2];
+	int i, perf_fd;
+
+	init_contexts(contexts, 2, device_slice_mask, pg_slice_mask);
+
+	/*
+	 * Test false positives with predicates (only available on
+	 * before Gen10).
+	 */
+	if (intel_gen(devid) < 10) {
+		igt_assert_eq(0, read_slice_count(contexts[1],
+						  device_slice_count));
+	}
+	igt_assert_eq(device_slice_count, read_slice_count(contexts[0],
+							   device_slice_count));
+	igt_assert_eq(slice_count, read_slice_count(contexts[1],
+						    slice_count));
+
+	/*
+	 * Now open i915/perf and verify that all contexts have been
+	 * reconfigured to the device's default.
+	 */
+	perf_fd = igt_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
+	igt_assert(perf_fd >= 0);
+
+	if (intel_gen(devid) < 10) {
+		igt_assert_eq(0, read_slice_count(contexts[1], slice_count));
+	}
+	igt_assert_eq(device_slice_count, read_slice_count(contexts[0],
+							   device_slice_count));
+	igt_assert_eq(device_slice_count, read_slice_count(contexts[1],
+							   device_slice_count));
+
+	close(perf_fd);
+
+	/*
+	 * After closing the perf stream, configurations should be
+	 * back to the programmed values.
+	 */
+	if (intel_gen(devid) < 10) {
+		igt_assert_eq(0, read_slice_count(contexts[1],
+						  device_slice_count));
+	}
+	igt_assert_eq(device_slice_count, read_slice_count(contexts[0],
+							   device_slice_count));
+	igt_assert_eq(slice_count, read_slice_count(contexts[1],
+						    slice_count));
+
+	for (i = 0; i < ARRAY_SIZE(contexts); i++)
+		gem_context_destroy(drm_fd, contexts[i]);
+
+	/*
+	 * Open i915/perf first and verify that all contexts created
+	 * afterward are reconfigured to the device's default.
+	 */
+	perf_fd = igt_ioctl(drm_fd, DRM_IOCTL_I915_PERF_OPEN, &param);
+	igt_assert(perf_fd >= 0);
+
+	init_contexts(contexts, 2, device_slice_mask, pg_slice_mask);
+
+	/*
+	 * Check the device's default values, despite setting
+	 * otherwise.
+	 */
+	if (intel_gen(devid) < 10) {
+		igt_assert_eq(0, read_slice_count(contexts[1],
+						  slice_count));
+	}
+	igt_assert_eq(device_slice_count, read_slice_count(contexts[0],
+							   device_slice_count));
+	igt_assert_eq(device_slice_count, read_slice_count(contexts[1],
+							   device_slice_count));
+
+	close(perf_fd);
+
+	/*
+	 * After closing the perf stream, configurations should be
+	 * back to the programmed values.
+	 */
+	if (intel_gen(devid) < 10) {
+		igt_assert_eq(0, read_slice_count(contexts[1],
+						  device_slice_count));
+	}
+	igt_assert_eq(device_slice_count, read_slice_count(contexts[0],
+							   device_slice_count));
+	igt_assert_eq(slice_count, read_slice_count(contexts[1],
+						    slice_count));
+
+	for (i = 0; i < ARRAY_SIZE(contexts); i++)
+		gem_context_destroy(drm_fd, contexts[i]);
+}
+
+static bool get_allow_dynamic_sseu(int fd)
+{
+	int sysfs;
+	bool ret;
+
+	sysfs = igt_sysfs_open(fd, NULL);
+	igt_assert_lte(0, sysfs);
+
+	ret = igt_sysfs_get_boolean(sysfs, "allow_dynamic_sseu");
+
+	close(sysfs);
+	return ret;
+}
+
+static void set_allow_dynamic_sseu(int fd, bool allowed)
+{
+	int sysfs;
+
+	sysfs = igt_sysfs_open(fd, NULL);
+	igt_assert_lte(0, sysfs);
+
+	igt_assert_eq(true,
+                      igt_sysfs_set_boolean(sysfs,
+                                            "allow_dynamic_sseu",
+                                            allowed));
+
+	close(sysfs);
+}
+
+/*
+ * Verify that powergating settings are put on hold while i915/perf is
+ * active.
+ */
+static void
+test_dynamic_sseu(bool set_allowed, bool allowed)
+{
+	if (set_allowed)
+		set_allow_dynamic_sseu(drm_fd, allowed);
+
+	igt_fork(child, 1) {
+		uint32_t pg_slice_mask = mask_minus(device_slice_mask, 1);
+		struct drm_i915_gem_context_param arg;
+		struct drm_i915_gem_context_param_sseu sseu;
+
+		igt_drop_root();
+
+		memset(&sseu, 0, sizeof(sseu));
+		sseu.class = 0; /* rcs */
+		sseu.instance = 0;
+
+		memset(&arg, 0, sizeof(arg));
+		arg.ctx_id = 0; /* default context */
+		arg.param = I915_CONTEXT_PARAM_SSEU;
+		arg.value = (uintptr_t) &sseu;
+
+		do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
+
+		sseu.slice_mask = pg_slice_mask;
+
+		if (allowed) {
+			do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg);
+		} else {
+			do_ioctl_err(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM,
+				     &arg, EPERM);
+		}
+	}
+
+	igt_waitchildren();
+}
+
+igt_main
+{
+	int i, max_slices = 3, max_subslices = 3;
+	drm_i915_getparam_t gp;
+
+	igt_fixture {
+		/* Use drm_open_driver to verify device existence */
+		drm_fd = drm_open_driver(DRIVER_INTEL);
+		igt_require_gem(drm_fd);
+
+		devid = intel_get_drm_devid(drm_fd);
+
+		/* Old kernel? */
+		igt_require(kernel_has_per_context_sseu_support());
+
+		gp.param = I915_PARAM_SLICE_MASK;
+		gp.value = (int *) &device_slice_mask;
+		do_ioctl(drm_fd, DRM_IOCTL_I915_GETPARAM, &gp);
+		device_slice_count = __builtin_popcount(device_slice_mask);
+
+		gp.param = I915_PARAM_SUBSLICE_MASK;
+		gp.value = (int *) &device_subslice_mask;
+		do_ioctl(drm_fd, DRM_IOCTL_I915_GETPARAM, &gp);
+		device_subslice_count = __builtin_popcount(device_subslice_mask);
+
+		igt_require(!get_allow_dynamic_sseu(drm_fd));
+	}
+
+	igt_subtest("default-dynamic-sseu-disallowed") {
+		igt_require(platform_has_per_context_sseu_support());
+		igt_require(device_slice_count > 1);
+		test_dynamic_sseu(false, false);
+	}
+
+	igt_subtest("sseu-invalid-engine") {
+		igt_require(platform_has_per_context_sseu_support());
+		test_sseu_invalid_engine();
+	}
+
+	igt_subtest("sseu-invalid-values") {
+		igt_require(platform_has_per_context_sseu_support());
+		test_sseu_invalid_values();
+	}
+
+	for (i = 1; i < max_slices; i++) {
+		igt_subtest_f("sseu-%i-pg-slice-program-rcs", i) {
+			igt_require(device_slice_count > i);
+			igt_require(platform_has_per_context_sseu_support());
+
+			test_sseu_slice_program_gt(i);
+		}
+	}
+
+	for (i = 1; i < max_subslices; i++) {
+		igt_subtest_f("sseu-%i-pg-subslice-program-rcs", i) {
+			igt_require(device_subslice_count >= 2);
+			igt_require(platform_has_per_context_sseu_support());
+
+			/* Only available on some Atom platforms and Gen10+. */
+			igt_require(IS_BROXTON(devid) || IS_GEMINILAKE(devid) ||
+				    intel_gen(devid) >= 10);
+
+			test_sseu_subslice_program_gt(i);
+		}
+	}
+
+	igt_subtest("dynamic-sseu-disallow") {
+		igt_require(platform_has_per_context_sseu_support());
+		igt_require(device_slice_count > 1);
+		test_dynamic_sseu(true, false);
+	}
+
+	igt_subtest("dynamic-sseu-allow") {
+		igt_require(platform_has_per_context_sseu_support());
+		igt_require(device_slice_count > 1);
+		test_dynamic_sseu(true, true);
+	}
+
+	igt_subtest("sseu-perf") {
+		igt_require(platform_has_per_context_sseu_support());
+		igt_require(device_slice_count > 1);
+		test_sseu_perf();
+	}
+
+	igt_fixture {
+		set_allow_dynamic_sseu(drm_fd, false);
+
+		close(drm_fd);
+	}
+}
diff --git a/tests/meson.build b/tests/meson.build
index cedb4ff1..74111554 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -37,6 +37,7 @@ test_progs = [
 	'gem_ctx_exec',
 	'gem_ctx_isolation',
 	'gem_ctx_param',
+	'gem_ctx_sseu',
 	'gem_ctx_switch',
 	'gem_ctx_thrash',
 	'gem_double_irq_loop',
-- 
2.17.0

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply related	[flat|nested] 7+ messages in thread

* Re: [igt-dev] [PATCH i-g-t v6 2/2] tests: add slice power programming test
  2018-05-25 15:26 ` [igt-dev] [PATCH i-g-t v6 2/2] tests: add slice power programming test Lionel Landwerlin
@ 2018-05-25 15:41   ` Chris Wilson
  2018-05-25 15:57     ` Lionel Landwerlin
  0 siblings, 1 reply; 7+ messages in thread
From: Chris Wilson @ 2018-05-25 15:41 UTC (permalink / raw)
  To: Lionel Landwerlin, igt-dev

Quoting Lionel Landwerlin (2018-05-25 16:26:40)
> Verifies that the kernel programs slices correctly based by reading
> the value of PWR_CLK_STATE register or MI_SET_PREDICATE on platforms
> before Cannonlake.
> 
> v2: Add subslice tests (Lionel)
>     Use MI_SET_PREDICATE for further verification when available (Lionel)
> 
> v3: Rename to gem_ctx_rpcs (Lionel)
> 
> v4: Update kernel API (Lionel)
>     Add 0 value test (Lionel)
>     Exercise invalid values (Lionel)
> 
> v5: Add perf tests (Lionel)
> 
> v6: Add new sysfs entry tests (Lionel)
> 
> v7: Test rsvd fields
>     Update for kernel series changes
> 
> v8: Drop test_no_sseu_support() test (Kelvin)
>     Drop drm_intel_*() apis (Chris)
> 
> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> ---
>  tests/Makefile.sources |   1 +
>  tests/gem_ctx_param.c  |   4 +-
>  tests/gem_ctx_sseu.c   | 881 +++++++++++++++++++++++++++++++++++++++++
>  tests/meson.build      |   1 +
>  4 files changed, 886 insertions(+), 1 deletion(-)
>  create mode 100644 tests/gem_ctx_sseu.c
> 
> diff --git a/tests/Makefile.sources b/tests/Makefile.sources
> index ad62611f..fd44b720 100644
> --- a/tests/Makefile.sources
> +++ b/tests/Makefile.sources
> @@ -60,6 +60,7 @@ TESTS_progs = \
>         gem_ctx_exec \
>         gem_ctx_isolation \
>         gem_ctx_param \
> +       gem_ctx_sseu \
>         gem_ctx_switch \
>         gem_ctx_thrash \
>         gem_double_irq_loop \
> diff --git a/tests/gem_ctx_param.c b/tests/gem_ctx_param.c
> index c46fd709..af1afeaa 100644
> --- a/tests/gem_ctx_param.c
> +++ b/tests/gem_ctx_param.c
> @@ -294,11 +294,13 @@ igt_main
>                         set_priority(fd);
>         }
>  
> +       /* I915_CONTEXT_PARAM_SSEU tests are located in gem_ctx_sseu.c */
> +
>         /* NOTE: This testcase intentionally tests for the next free parameter
>          * to catch ABI extensions. Don't "fix" this testcase without adding all
>          * the tests for the new param first.
>          */
> -       arg.param = I915_CONTEXT_PARAM_PRIORITY + 1;
> +       arg.param = I915_CONTEXT_PARAM_SSEU + 1;
>  
>         igt_subtest("invalid-param-get") {
>                 arg.ctx_id = ctx;
> diff --git a/tests/gem_ctx_sseu.c b/tests/gem_ctx_sseu.c
> new file mode 100644
> index 00000000..75095a6b
> --- /dev/null
> +++ b/tests/gem_ctx_sseu.c
> @@ -0,0 +1,881 @@
> +/*
> + * Copyright © 2017 Intel Corporation
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice (including the next
> + * paragraph) shall be included in all copies or substantial portions of the
> + * Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
> + * IN THE SOFTWARE.
> + *
> + * Authors:
> + *    Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> + *
> + */
> +
> +#include "igt.h"
> +#include <stdio.h>
> +#include <stdlib.h>
> +#include <string.h>
> +#include <unistd.h>
> +#include <fcntl.h>
> +#include <signal.h>
> +#include <errno.h>
> +#include <time.h>
> +#include <sys/wait.h>
> +
> +#include "igt_sysfs.h"
> +#include "ioctl_wrappers.h"
> +
> +IGT_TEST_DESCRIPTION("Test context render powergating programming.");
> +
> +#define MI_STORE_REGISTER_MEM (0x24 << 23)
> +
> +#define MI_SET_PREDICATE      (0x1 << 23)
> +#define  MI_SET_PREDICATE_NOOP_NEVER (0)
> +#define  MI_SET_PREDICATE_1_SLICES   (5)
> +#define  MI_SET_PREDICATE_2_SLICES   (6)
> +#define  MI_SET_PREDICATE_3_SLICES   (7)
> +
> +#define GEN8_R_PWR_CLK_STATE           0x20C8
> +#define   GEN8_RPCS_ENABLE             (1 << 31)
> +#define   GEN8_RPCS_S_CNT_ENABLE       (1 << 18)
> +#define   GEN8_RPCS_S_CNT_SHIFT                15
> +#define   GEN8_RPCS_S_CNT_MASK         (0x7 << GEN8_RPCS_S_CNT_SHIFT)
> +#define   GEN8_RPCS_SS_CNT_ENABLE      (1 << 11)
> +#define   GEN8_RPCS_SS_CNT_SHIFT       8
> +#define   GEN8_RPCS_SS_CNT_MASK                (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
> +#define   GEN8_RPCS_EU_MAX_SHIFT       4
> +#define   GEN8_RPCS_EU_MAX_MASK                (0xf << GEN8_RPCS_EU_MAX_SHIFT)
> +#define   GEN8_RPCS_EU_MIN_SHIFT       0
> +#define   GEN8_RPCS_EU_MIN_MASK                (0xf << GEN8_RPCS_EU_MIN_SHIFT)
> +
> +#define RCS_TIMESTAMP (0x2000 + 0x358)
> +
> +static int drm_fd;
> +static int devid;
> +static uint64_t device_slice_mask = 0;
> +static uint64_t device_subslice_mask = 0;
> +static uint32_t device_slice_count = 0;
> +static uint32_t device_subslice_count = 0;
> +
> +static uint64_t mask_minus_one(uint64_t mask)
> +{
> +       int i;
> +
> +       for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
> +               if ((1UL << i) & mask) {
> +                       return mask & ~(1UL << i);
> +               }
> +       }
> +
> +       igt_assert(!"reached");
> +       return 0;
> +}
> +
> +static uint64_t mask_plus_one(uint64_t mask)
> +{
> +       int i;
> +
> +       for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
> +               if (((1UL << i) & mask) == 0) {
> +                       return mask | (1UL << i);
> +               }
> +       }
> +
> +       igt_assert(!"reached");
> +       return 0;
> +}
> +
> +static uint64_t mask_minus(uint64_t mask, int n)
> +{
> +       int i;
> +
> +       for (i = 0; i < n; i++)
> +               mask = mask_minus_one(mask);
> +
> +       return mask;
> +}
> +
> +static uint64_t mask_plus(uint64_t mask, int n)
> +{
> +       int i;
> +
> +       for (i = 0; i < n; i++)
> +               mask = mask_plus_one(mask);
> +
> +       return mask;
> +}
> +
> +static uint32_t *
> +fill_relocation(uint32_t *batch,
> +               struct drm_i915_gem_relocation_entry *reloc,
> +               uint32_t gem_handle, uint32_t delta, /* in bytes */
> +               uint32_t offset, /* in dwords */
> +               uint32_t read_domains, uint32_t write_domains)
> +{
> +       reloc->target_handle = gem_handle;
> +       reloc->delta = delta;
> +       reloc->offset = offset * sizeof(uint32_t);
> +       reloc->presumed_offset = 0;
> +       reloc->read_domains = read_domains;
> +       reloc->write_domain = write_domains;
> +
> +       *batch++ = delta;
> +       *batch++ = 0;
> +
> +       return batch;
> +}
> +
> +
> +static uint32_t
> +read_rpcs_reg(uint32_t context,
> +             uint32_t expected_slices)
> +{
> +       struct drm_i915_gem_execbuffer2 execbuf;
> +       struct drm_i915_gem_exec_object2 obj[2];
> +       struct drm_i915_gem_relocation_entry relocs[2];
> +       uint32_t *batch, *b, *data;
> +       uint32_t rpcs;
> +       int n_relocs = 0;
> +
> +       memset(obj, 0, sizeof(obj));
> +       obj[0].handle = gem_create(drm_fd, 4096);
> +       obj[1].handle = gem_create(drm_fd, 4096);
> +
> +       batch = b = gem_mmap__cpu(drm_fd, obj[1].handle, 0, 4096,
> +                                 PROT_READ | PROT_WRITE);
> +
> +       if (expected_slices != 0 && intel_gen(devid) < 10) {
> +               *b++ = MI_SET_PREDICATE | (1 - 1) |
> +                       (MI_SET_PREDICATE_1_SLICES + expected_slices - 1);
> +       }
> +
> +       *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
> +       *b++ = RCS_TIMESTAMP;
> +       b = fill_relocation(b, &relocs[n_relocs++], obj[0].handle,
> +                           0, b - batch,
> +                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
> +
> +       *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
> +       *b++ = GEN8_R_PWR_CLK_STATE;
> +       b = fill_relocation(b, &relocs[n_relocs++], obj[0].handle,
> +                           4, b - batch,
> +                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
> +
> +       if (expected_slices != 0 && intel_gen(devid) < 10)
> +               *b++ = MI_SET_PREDICATE | (1 - 1) | MI_SET_PREDICATE_NOOP_NEVER;
> +
> +       *b++ = MI_BATCH_BUFFER_END;
> +
> +       gem_munmap(batch, 4096);
> +
> +       obj[1].relocation_count = n_relocs;
> +       obj[1].relocs_ptr = to_user_pointer(relocs);
> +
> +       memset(&execbuf, 0, sizeof(execbuf));
> +       execbuf.buffers_ptr = to_user_pointer(obj);
> +       execbuf.buffer_count = ARRAY_SIZE(obj);
> +       i915_execbuffer2_set_context_id(execbuf, context);

Heh.

> +       data = gem_mmap__cpu(drm_fd, obj[0].handle, 0, 4096, PROT_READ | PROT_WRITE);
> +       memset(data, 0, 4096);

You know it's already zero.

> +       gem_munmap(data, 4096);
> +
> +       gem_execbuf(drm_fd, &execbuf);
> +       gem_sync(drm_fd, obj[0].handle);
> +
> +       data = gem_mmap__cpu(drm_fd, obj[0].handle, 0, 4096, PROT_READ);
> +
> +       rpcs = data[1];
> +

u32 data[2];
gem_read(drm_fd, obj[0].handle, 0, data, sizeof(data));

No need for the gem_sync, the mmap__cpu or the missing gem_set_domain.

> +       igt_debug("rcs_timestamp=0x%x rpcs=0x%x/0x%x\n", data[0], data[1], ((data[1] & GEN8_RPCS_S_CNT_MASK) >> GEN8_RPCS_S_CNT_SHIFT));
> +
> +       gem_munmap(data, 4096);
> +
> +       gem_close(drm_fd, obj[0].handle);
> +       gem_close(drm_fd, obj[1].handle);
> +
> +       return rpcs;
> +}
> +
> +static uint32_t
> +read_slice_count(uint32_t context,
> +                uint32_t expected_slice_count)
> +{
> +       return (read_rpcs_reg(context, expected_slice_count) & GEN8_RPCS_S_CNT_MASK)
> +               >> GEN8_RPCS_S_CNT_SHIFT;
> +}
> +
> +static uint32_t
> +read_subslice_count(uint32_t context)
> +{
> +       return (read_rpcs_reg(context, 0) & GEN8_RPCS_SS_CNT_MASK)
> +               >> GEN8_RPCS_SS_CNT_SHIFT;
> +}
> +
> +static bool
> +kernel_has_per_context_sseu_support(void)
> +{
> +       struct drm_i915_gem_context_param arg;
> +       struct drm_i915_gem_context_param_sseu sseu;
> +
> +       memset(&sseu, 0, sizeof(sseu));
> +       sseu.class = 0; /* rcs */
> +       sseu.instance = 0;
> +
> +       memset(&arg, 0, sizeof(arg));
> +       arg.ctx_id = 0; /* default context */
> +       arg.param = I915_CONTEXT_PARAM_SSEU;
> +       arg.value = (uintptr_t) &sseu;

arg.value = to_user_pointer(&sseu);

> +
> +       if (igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg))
> +               return false;

__gem_context_get_param

> +
> +       return true;
> +}
> +
> +static bool
> +platform_has_per_context_sseu_support(void)
> +{
> +       struct drm_i915_gem_context_param arg;
> +       struct drm_i915_gem_context_param_sseu sseu;
> +       int ret;
> +
> +       memset(&sseu, 0, sizeof(sseu));
> +       sseu.class = 0; /* rcs */
> +       sseu.instance = 0;
> +
> +       memset(&arg, 0, sizeof(arg));
> +       arg.ctx_id = 0; /* default context */
> +       arg.param = I915_CONTEXT_PARAM_SSEU;
> +       arg.value = (uintptr_t) &sseu;
> +
> +       ret = igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
> +       igt_assert(ret == 0 || errno == EINVAL);
> +       if (ret)
> +               return false;
> +
> +       ret = igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg);
> +       igt_assert(ret == 0 || errno == ENODEV);
> +       if (ret)
> +               return false;
> +
> +       return true;
> +}
> +
> +static void
> +context_get_sseu_masks(uint32_t context,
> +                      uint32_t *slice_mask,
> +                      uint32_t *subslice_mask)
> +{
> +       struct drm_i915_gem_context_param arg;
> +       struct drm_i915_gem_context_param_sseu sseu;
> +
> +       memset(&sseu, 0, sizeof(sseu));
> +       sseu.class = 0; /* rcs */
> +       sseu.instance = 0;
> +
> +       memset(&arg, 0, sizeof(arg));
> +       arg.ctx_id = context;
> +       arg.param = I915_CONTEXT_PARAM_SSEU;
> +       arg.value = (uintptr_t) &sseu;
> +
> +       do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);

gem_context_get_param etc
-Chris
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* Re: [igt-dev] [PATCH i-g-t v6 2/2] tests: add slice power programming test
  2018-05-25 15:41   ` Chris Wilson
@ 2018-05-25 15:57     ` Lionel Landwerlin
  0 siblings, 0 replies; 7+ messages in thread
From: Lionel Landwerlin @ 2018-05-25 15:57 UTC (permalink / raw)
  To: Chris Wilson, igt-dev

Thanks, all applied.

On 25/05/18 16:41, Chris Wilson wrote:
> Quoting Lionel Landwerlin (2018-05-25 16:26:40)
>> Verifies that the kernel programs slices correctly based by reading
>> the value of PWR_CLK_STATE register or MI_SET_PREDICATE on platforms
>> before Cannonlake.
>>
>> v2: Add subslice tests (Lionel)
>>      Use MI_SET_PREDICATE for further verification when available (Lionel)
>>
>> v3: Rename to gem_ctx_rpcs (Lionel)
>>
>> v4: Update kernel API (Lionel)
>>      Add 0 value test (Lionel)
>>      Exercise invalid values (Lionel)
>>
>> v5: Add perf tests (Lionel)
>>
>> v6: Add new sysfs entry tests (Lionel)
>>
>> v7: Test rsvd fields
>>      Update for kernel series changes
>>
>> v8: Drop test_no_sseu_support() test (Kelvin)
>>      Drop drm_intel_*() apis (Chris)
>>
>> Signed-off-by: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>> ---
>>   tests/Makefile.sources |   1 +
>>   tests/gem_ctx_param.c  |   4 +-
>>   tests/gem_ctx_sseu.c   | 881 +++++++++++++++++++++++++++++++++++++++++
>>   tests/meson.build      |   1 +
>>   4 files changed, 886 insertions(+), 1 deletion(-)
>>   create mode 100644 tests/gem_ctx_sseu.c
>>
>> diff --git a/tests/Makefile.sources b/tests/Makefile.sources
>> index ad62611f..fd44b720 100644
>> --- a/tests/Makefile.sources
>> +++ b/tests/Makefile.sources
>> @@ -60,6 +60,7 @@ TESTS_progs = \
>>          gem_ctx_exec \
>>          gem_ctx_isolation \
>>          gem_ctx_param \
>> +       gem_ctx_sseu \
>>          gem_ctx_switch \
>>          gem_ctx_thrash \
>>          gem_double_irq_loop \
>> diff --git a/tests/gem_ctx_param.c b/tests/gem_ctx_param.c
>> index c46fd709..af1afeaa 100644
>> --- a/tests/gem_ctx_param.c
>> +++ b/tests/gem_ctx_param.c
>> @@ -294,11 +294,13 @@ igt_main
>>                          set_priority(fd);
>>          }
>>   
>> +       /* I915_CONTEXT_PARAM_SSEU tests are located in gem_ctx_sseu.c */
>> +
>>          /* NOTE: This testcase intentionally tests for the next free parameter
>>           * to catch ABI extensions. Don't "fix" this testcase without adding all
>>           * the tests for the new param first.
>>           */
>> -       arg.param = I915_CONTEXT_PARAM_PRIORITY + 1;
>> +       arg.param = I915_CONTEXT_PARAM_SSEU + 1;
>>   
>>          igt_subtest("invalid-param-get") {
>>                  arg.ctx_id = ctx;
>> diff --git a/tests/gem_ctx_sseu.c b/tests/gem_ctx_sseu.c
>> new file mode 100644
>> index 00000000..75095a6b
>> --- /dev/null
>> +++ b/tests/gem_ctx_sseu.c
>> @@ -0,0 +1,881 @@
>> +/*
>> + * Copyright © 2017 Intel Corporation
>> + *
>> + * Permission is hereby granted, free of charge, to any person obtaining a
>> + * copy of this software and associated documentation files (the "Software"),
>> + * to deal in the Software without restriction, including without limitation
>> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
>> + * and/or sell copies of the Software, and to permit persons to whom the
>> + * Software is furnished to do so, subject to the following conditions:
>> + *
>> + * The above copyright notice and this permission notice (including the next
>> + * paragraph) shall be included in all copies or substantial portions of the
>> + * Software.
>> + *
>> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
>> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
>> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
>> + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
>> + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
>> + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
>> + * IN THE SOFTWARE.
>> + *
>> + * Authors:
>> + *    Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>> + *
>> + */
>> +
>> +#include "igt.h"
>> +#include <stdio.h>
>> +#include <stdlib.h>
>> +#include <string.h>
>> +#include <unistd.h>
>> +#include <fcntl.h>
>> +#include <signal.h>
>> +#include <errno.h>
>> +#include <time.h>
>> +#include <sys/wait.h>
>> +
>> +#include "igt_sysfs.h"
>> +#include "ioctl_wrappers.h"
>> +
>> +IGT_TEST_DESCRIPTION("Test context render powergating programming.");
>> +
>> +#define MI_STORE_REGISTER_MEM (0x24 << 23)
>> +
>> +#define MI_SET_PREDICATE      (0x1 << 23)
>> +#define  MI_SET_PREDICATE_NOOP_NEVER (0)
>> +#define  MI_SET_PREDICATE_1_SLICES   (5)
>> +#define  MI_SET_PREDICATE_2_SLICES   (6)
>> +#define  MI_SET_PREDICATE_3_SLICES   (7)
>> +
>> +#define GEN8_R_PWR_CLK_STATE           0x20C8
>> +#define   GEN8_RPCS_ENABLE             (1 << 31)
>> +#define   GEN8_RPCS_S_CNT_ENABLE       (1 << 18)
>> +#define   GEN8_RPCS_S_CNT_SHIFT                15
>> +#define   GEN8_RPCS_S_CNT_MASK         (0x7 << GEN8_RPCS_S_CNT_SHIFT)
>> +#define   GEN8_RPCS_SS_CNT_ENABLE      (1 << 11)
>> +#define   GEN8_RPCS_SS_CNT_SHIFT       8
>> +#define   GEN8_RPCS_SS_CNT_MASK                (0x7 << GEN8_RPCS_SS_CNT_SHIFT)
>> +#define   GEN8_RPCS_EU_MAX_SHIFT       4
>> +#define   GEN8_RPCS_EU_MAX_MASK                (0xf << GEN8_RPCS_EU_MAX_SHIFT)
>> +#define   GEN8_RPCS_EU_MIN_SHIFT       0
>> +#define   GEN8_RPCS_EU_MIN_MASK                (0xf << GEN8_RPCS_EU_MIN_SHIFT)
>> +
>> +#define RCS_TIMESTAMP (0x2000 + 0x358)
>> +
>> +static int drm_fd;
>> +static int devid;
>> +static uint64_t device_slice_mask = 0;
>> +static uint64_t device_subslice_mask = 0;
>> +static uint32_t device_slice_count = 0;
>> +static uint32_t device_subslice_count = 0;
>> +
>> +static uint64_t mask_minus_one(uint64_t mask)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
>> +               if ((1UL << i) & mask) {
>> +                       return mask & ~(1UL << i);
>> +               }
>> +       }
>> +
>> +       igt_assert(!"reached");
>> +       return 0;
>> +}
>> +
>> +static uint64_t mask_plus_one(uint64_t mask)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < (sizeof(mask) * 8 - 1); i++) {
>> +               if (((1UL << i) & mask) == 0) {
>> +                       return mask | (1UL << i);
>> +               }
>> +       }
>> +
>> +       igt_assert(!"reached");
>> +       return 0;
>> +}
>> +
>> +static uint64_t mask_minus(uint64_t mask, int n)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < n; i++)
>> +               mask = mask_minus_one(mask);
>> +
>> +       return mask;
>> +}
>> +
>> +static uint64_t mask_plus(uint64_t mask, int n)
>> +{
>> +       int i;
>> +
>> +       for (i = 0; i < n; i++)
>> +               mask = mask_plus_one(mask);
>> +
>> +       return mask;
>> +}
>> +
>> +static uint32_t *
>> +fill_relocation(uint32_t *batch,
>> +               struct drm_i915_gem_relocation_entry *reloc,
>> +               uint32_t gem_handle, uint32_t delta, /* in bytes */
>> +               uint32_t offset, /* in dwords */
>> +               uint32_t read_domains, uint32_t write_domains)
>> +{
>> +       reloc->target_handle = gem_handle;
>> +       reloc->delta = delta;
>> +       reloc->offset = offset * sizeof(uint32_t);
>> +       reloc->presumed_offset = 0;
>> +       reloc->read_domains = read_domains;
>> +       reloc->write_domain = write_domains;
>> +
>> +       *batch++ = delta;
>> +       *batch++ = 0;
>> +
>> +       return batch;
>> +}
>> +
>> +
>> +static uint32_t
>> +read_rpcs_reg(uint32_t context,
>> +             uint32_t expected_slices)
>> +{
>> +       struct drm_i915_gem_execbuffer2 execbuf;
>> +       struct drm_i915_gem_exec_object2 obj[2];
>> +       struct drm_i915_gem_relocation_entry relocs[2];
>> +       uint32_t *batch, *b, *data;
>> +       uint32_t rpcs;
>> +       int n_relocs = 0;
>> +
>> +       memset(obj, 0, sizeof(obj));
>> +       obj[0].handle = gem_create(drm_fd, 4096);
>> +       obj[1].handle = gem_create(drm_fd, 4096);
>> +
>> +       batch = b = gem_mmap__cpu(drm_fd, obj[1].handle, 0, 4096,
>> +                                 PROT_READ | PROT_WRITE);
>> +
>> +       if (expected_slices != 0 && intel_gen(devid) < 10) {
>> +               *b++ = MI_SET_PREDICATE | (1 - 1) |
>> +                       (MI_SET_PREDICATE_1_SLICES + expected_slices - 1);
>> +       }
>> +
>> +       *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
>> +       *b++ = RCS_TIMESTAMP;
>> +       b = fill_relocation(b, &relocs[n_relocs++], obj[0].handle,
>> +                           0, b - batch,
>> +                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
>> +
>> +       *b++ = MI_STORE_REGISTER_MEM | (4 - 2);
>> +       *b++ = GEN8_R_PWR_CLK_STATE;
>> +       b = fill_relocation(b, &relocs[n_relocs++], obj[0].handle,
>> +                           4, b - batch,
>> +                           I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER);
>> +
>> +       if (expected_slices != 0 && intel_gen(devid) < 10)
>> +               *b++ = MI_SET_PREDICATE | (1 - 1) | MI_SET_PREDICATE_NOOP_NEVER;
>> +
>> +       *b++ = MI_BATCH_BUFFER_END;
>> +
>> +       gem_munmap(batch, 4096);
>> +
>> +       obj[1].relocation_count = n_relocs;
>> +       obj[1].relocs_ptr = to_user_pointer(relocs);
>> +
>> +       memset(&execbuf, 0, sizeof(execbuf));
>> +       execbuf.buffers_ptr = to_user_pointer(obj);
>> +       execbuf.buffer_count = ARRAY_SIZE(obj);
>> +       i915_execbuffer2_set_context_id(execbuf, context);
> Heh.
>
>> +       data = gem_mmap__cpu(drm_fd, obj[0].handle, 0, 4096, PROT_READ | PROT_WRITE);
>> +       memset(data, 0, 4096);
> You know it's already zero.
>
>> +       gem_munmap(data, 4096);
>> +
>> +       gem_execbuf(drm_fd, &execbuf);
>> +       gem_sync(drm_fd, obj[0].handle);
>> +
>> +       data = gem_mmap__cpu(drm_fd, obj[0].handle, 0, 4096, PROT_READ);
>> +
>> +       rpcs = data[1];
>> +
> u32 data[2];
> gem_read(drm_fd, obj[0].handle, 0, data, sizeof(data));
>
> No need for the gem_sync, the mmap__cpu or the missing gem_set_domain.
>
>> +       igt_debug("rcs_timestamp=0x%x rpcs=0x%x/0x%x\n", data[0], data[1], ((data[1] & GEN8_RPCS_S_CNT_MASK) >> GEN8_RPCS_S_CNT_SHIFT));
>> +
>> +       gem_munmap(data, 4096);
>> +
>> +       gem_close(drm_fd, obj[0].handle);
>> +       gem_close(drm_fd, obj[1].handle);
>> +
>> +       return rpcs;
>> +}
>> +
>> +static uint32_t
>> +read_slice_count(uint32_t context,
>> +                uint32_t expected_slice_count)
>> +{
>> +       return (read_rpcs_reg(context, expected_slice_count) & GEN8_RPCS_S_CNT_MASK)
>> +               >> GEN8_RPCS_S_CNT_SHIFT;
>> +}
>> +
>> +static uint32_t
>> +read_subslice_count(uint32_t context)
>> +{
>> +       return (read_rpcs_reg(context, 0) & GEN8_RPCS_SS_CNT_MASK)
>> +               >> GEN8_RPCS_SS_CNT_SHIFT;
>> +}
>> +
>> +static bool
>> +kernel_has_per_context_sseu_support(void)
>> +{
>> +       struct drm_i915_gem_context_param arg;
>> +       struct drm_i915_gem_context_param_sseu sseu;
>> +
>> +       memset(&sseu, 0, sizeof(sseu));
>> +       sseu.class = 0; /* rcs */
>> +       sseu.instance = 0;
>> +
>> +       memset(&arg, 0, sizeof(arg));
>> +       arg.ctx_id = 0; /* default context */
>> +       arg.param = I915_CONTEXT_PARAM_SSEU;
>> +       arg.value = (uintptr_t) &sseu;
> arg.value = to_user_pointer(&sseu);
>
>> +
>> +       if (igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg))
>> +               return false;
> __gem_context_get_param
>
>> +
>> +       return true;
>> +}
>> +
>> +static bool
>> +platform_has_per_context_sseu_support(void)
>> +{
>> +       struct drm_i915_gem_context_param arg;
>> +       struct drm_i915_gem_context_param_sseu sseu;
>> +       int ret;
>> +
>> +       memset(&sseu, 0, sizeof(sseu));
>> +       sseu.class = 0; /* rcs */
>> +       sseu.instance = 0;
>> +
>> +       memset(&arg, 0, sizeof(arg));
>> +       arg.ctx_id = 0; /* default context */
>> +       arg.param = I915_CONTEXT_PARAM_SSEU;
>> +       arg.value = (uintptr_t) &sseu;
>> +
>> +       ret = igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
>> +       igt_assert(ret == 0 || errno == EINVAL);
>> +       if (ret)
>> +               return false;
>> +
>> +       ret = igt_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_SETPARAM, &arg);
>> +       igt_assert(ret == 0 || errno == ENODEV);
>> +       if (ret)
>> +               return false;
>> +
>> +       return true;
>> +}
>> +
>> +static void
>> +context_get_sseu_masks(uint32_t context,
>> +                      uint32_t *slice_mask,
>> +                      uint32_t *subslice_mask)
>> +{
>> +       struct drm_i915_gem_context_param arg;
>> +       struct drm_i915_gem_context_param_sseu sseu;
>> +
>> +       memset(&sseu, 0, sizeof(sseu));
>> +       sseu.class = 0; /* rcs */
>> +       sseu.instance = 0;
>> +
>> +       memset(&arg, 0, sizeof(arg));
>> +       arg.ctx_id = context;
>> +       arg.param = I915_CONTEXT_PARAM_SSEU;
>> +       arg.value = (uintptr_t) &sseu;
>> +
>> +       do_ioctl(drm_fd, DRM_IOCTL_I915_GEM_CONTEXT_GETPARAM, &arg);
> gem_context_get_param etc
> -Chris
>

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [igt-dev] ✓ Fi.CI.BAT: success for intel: per context powergating tests & benchmark (rev6)
  2018-05-25 15:26 [igt-dev] [PATCH i-g-t v6 0/2] intel: per context powergating tests & benchmark Lionel Landwerlin
  2018-05-25 15:26 ` [igt-dev] [PATCH i-g-t v6 1/2] headers: bump Lionel Landwerlin
  2018-05-25 15:26 ` [igt-dev] [PATCH i-g-t v6 2/2] tests: add slice power programming test Lionel Landwerlin
@ 2018-05-25 17:01 ` Patchwork
  2018-05-26  2:18 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
  3 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2018-05-25 17:01 UTC (permalink / raw)
  To: Lionel Landwerlin; +Cc: igt-dev

== Series Details ==

Series: intel: per context powergating tests & benchmark (rev6)
URL   : https://patchwork.freedesktop.org/series/42286/
State : success

== Summary ==

= CI Bug Log - changes from CI_DRM_4238 -> IGTPW_1403 =

== Summary - SUCCESS ==

  No regressions found.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/42286/revisions/6/mbox/

== Known issues ==

  Here are the changes found in IGTPW_1403 that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-c:
      fi-bxt-dsi:         PASS -> INCOMPLETE (fdo#103927)

    
    ==== Possible fixes ====

    igt@kms_pipe_crc_basic@suspend-read-crc-pipe-b:
      fi-snb-2520m:       INCOMPLETE (fdo#103713) -> PASS

    
  fdo#103713 https://bugs.freedesktop.org/show_bug.cgi?id=103713
  fdo#103927 https://bugs.freedesktop.org/show_bug.cgi?id=103927


== Participating hosts (44 -> 38) ==

  Missing    (6): fi-ilk-m540 fi-cnl-y3 fi-byt-squawks fi-bsw-cyan fi-ctg-p8600 fi-skl-6700hq 


== Build changes ==

    * IGT: IGT_4498 -> IGTPW_1403

  CI_DRM_4238: 2771a5e6347eb63e43fdfc432a9f15ffb55ef209 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_1403: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1403/
  IGT_4498: f9ecb79ad8b02278cfdb5b82495df47061c04f8f @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools



== Testlist changes ==

+igt@gem_ctx_sseu@default-dynamic-sseu-disallowed
+igt@gem_ctx_sseu@dynamic-sseu-allow
+igt@gem_ctx_sseu@dynamic-sseu-disallow
+igt@gem_ctx_sseu@sseu-1-pg-slice-program-rcs
+igt@gem_ctx_sseu@sseu-1-pg-subslice-program-rcs
+igt@gem_ctx_sseu@sseu-2-pg-slice-program-rcs
+igt@gem_ctx_sseu@sseu-2-pg-subslice-program-rcs
+igt@gem_ctx_sseu@sseu-invalid-engine
+igt@gem_ctx_sseu@sseu-invalid-values
+igt@gem_ctx_sseu@sseu-perf

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1403/issues.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

* [igt-dev] ✗ Fi.CI.IGT: failure for intel: per context powergating tests & benchmark (rev6)
  2018-05-25 15:26 [igt-dev] [PATCH i-g-t v6 0/2] intel: per context powergating tests & benchmark Lionel Landwerlin
                   ` (2 preceding siblings ...)
  2018-05-25 17:01 ` [igt-dev] ✓ Fi.CI.BAT: success for intel: per context powergating tests & benchmark (rev6) Patchwork
@ 2018-05-26  2:18 ` Patchwork
  3 siblings, 0 replies; 7+ messages in thread
From: Patchwork @ 2018-05-26  2:18 UTC (permalink / raw)
  To: Lionel Landwerlin; +Cc: igt-dev

== Series Details ==

Series: intel: per context powergating tests & benchmark (rev6)
URL   : https://patchwork.freedesktop.org/series/42286/
State : failure

== Summary ==

= CI Bug Log - changes from IGT_4498_full -> IGTPW_1403_full =

== Summary - FAILURE ==

  Serious unknown changes coming with IGTPW_1403_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in IGTPW_1403_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://patchwork.freedesktop.org/api/1.0/series/42286/revisions/6/mbox/

== Possible new issues ==

  Here are the unknown changes that may have been introduced in IGTPW_1403_full:

  === IGT changes ===

    ==== Possible regressions ====

    igt@drv_missed_irq:
      shard-glk:          PASS -> FAIL

    
    ==== Warnings ====

    igt@gem_mocs_settings@mocs-rc6-vebox:
      shard-kbl:          SKIP -> PASS +1

    igt@kms_chv_cursor_fail@pipe-a-256x256-bottom-edge:
      shard-snb:          SKIP -> PASS

    
== Known issues ==

  Here are the changes found in IGTPW_1403_full that come from known issues:

  === IGT changes ===

    ==== Issues hit ====

    igt@kms_cursor_legacy@2x-long-flip-vs-cursor-atomic:
      shard-glk:          PASS -> FAIL (fdo#104873)

    igt@kms_cursor_legacy@2x-nonblocking-modeset-vs-cursor-atomic:
      shard-glk:          PASS -> FAIL (fdo#105454, fdo#106509)

    igt@kms_flip@2x-dpms-vs-vblank-race:
      shard-glk:          PASS -> FAIL (fdo#103060)

    igt@kms_flip@2x-plain-flip-fb-recreate-interruptible:
      shard-hsw:          PASS -> FAIL (fdo#100368)

    igt@kms_flip_tiling@flip-x-tiled:
      shard-glk:          PASS -> FAIL (fdo#103822, fdo#104724) +1

    igt@kms_rotation_crc@primary-rotation-180:
      shard-snb:          PASS -> FAIL (fdo#104724, fdo#103925)

    
    ==== Possible fixes ====

    igt@drv_selftest@live_gtt:
      shard-kbl:          INCOMPLETE (fdo#103665) -> PASS +1

    igt@gem_ppgtt@blt-vs-render-ctx0:
      shard-kbl:          INCOMPLETE (fdo#106023, fdo#103665) -> PASS

    igt@kms_atomic_transition@1x-modeset-transitions-nonblocking-fencing:
      shard-glk:          FAIL (fdo#105703) -> PASS

    {igt@kms_available_modes_crc@available_mode_test_crc}:
      shard-apl:          FAIL (fdo#106641) -> PASS

    igt@kms_flip@2x-plain-flip-fb-recreate:
      shard-hsw:          FAIL (fdo#103928) -> PASS

    igt@kms_flip@flip-vs-expired-vblank:
      shard-glk:          FAIL (fdo#102887) -> PASS

    igt@kms_flip@flip-vs-expired-vblank-interruptible:
      shard-glk:          FAIL (fdo#102887, fdo#105363) -> PASS

    igt@kms_flip@plain-flip-fb-recreate:
      shard-glk:          FAIL (fdo#100368) -> PASS

    igt@kms_flip_tiling@flip-to-x-tiled:
      shard-glk:          FAIL (fdo#103822, fdo#104724) -> PASS

    igt@kms_plane_multiple@atomic-pipe-a-tiling-x:
      shard-snb:          FAIL (fdo#104724, fdo#103166) -> PASS

    
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  fdo#100368 https://bugs.freedesktop.org/show_bug.cgi?id=100368
  fdo#102887 https://bugs.freedesktop.org/show_bug.cgi?id=102887
  fdo#103060 https://bugs.freedesktop.org/show_bug.cgi?id=103060
  fdo#103166 https://bugs.freedesktop.org/show_bug.cgi?id=103166
  fdo#103665 https://bugs.freedesktop.org/show_bug.cgi?id=103665
  fdo#103822 https://bugs.freedesktop.org/show_bug.cgi?id=103822
  fdo#103925 https://bugs.freedesktop.org/show_bug.cgi?id=103925
  fdo#103928 https://bugs.freedesktop.org/show_bug.cgi?id=103928
  fdo#104724 https://bugs.freedesktop.org/show_bug.cgi?id=104724
  fdo#104873 https://bugs.freedesktop.org/show_bug.cgi?id=104873
  fdo#105363 https://bugs.freedesktop.org/show_bug.cgi?id=105363
  fdo#105454 https://bugs.freedesktop.org/show_bug.cgi?id=105454
  fdo#105703 https://bugs.freedesktop.org/show_bug.cgi?id=105703
  fdo#106023 https://bugs.freedesktop.org/show_bug.cgi?id=106023
  fdo#106509 https://bugs.freedesktop.org/show_bug.cgi?id=106509
  fdo#106641 https://bugs.freedesktop.org/show_bug.cgi?id=106641


== Participating hosts (5 -> 5) ==

  No changes in participating hosts


== Build changes ==

    * IGT: IGT_4498 -> IGTPW_1403
    * Linux: CI_DRM_4227 -> CI_DRM_4238

  CI_DRM_4227: a8727d3fe03770e4d523468dfbc487dfe01597d3 @ git://anongit.freedesktop.org/gfx-ci/linux
  CI_DRM_4238: 2771a5e6347eb63e43fdfc432a9f15ffb55ef209 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGTPW_1403: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1403/
  IGT_4498: f9ecb79ad8b02278cfdb5b82495df47061c04f8f @ git://anongit.freedesktop.org/xorg/app/intel-gpu-tools

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/IGTPW_1403/shards.html
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2018-05-26  2:18 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-05-25 15:26 [igt-dev] [PATCH i-g-t v6 0/2] intel: per context powergating tests & benchmark Lionel Landwerlin
2018-05-25 15:26 ` [igt-dev] [PATCH i-g-t v6 1/2] headers: bump Lionel Landwerlin
2018-05-25 15:26 ` [igt-dev] [PATCH i-g-t v6 2/2] tests: add slice power programming test Lionel Landwerlin
2018-05-25 15:41   ` Chris Wilson
2018-05-25 15:57     ` Lionel Landwerlin
2018-05-25 17:01 ` [igt-dev] ✓ Fi.CI.BAT: success for intel: per context powergating tests & benchmark (rev6) Patchwork
2018-05-26  2:18 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.