All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-20 17:13 ` Matthew Auld
  0 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-04-20 17:13 UTC (permalink / raw)
  To: intel-gfx
  Cc: Thomas Hellström, Jordan Justen, Lionel Landwerlin,
	Kenneth Graunke, Jon Bloomfield, dri-devel, Daniel Vetter,
	mesa-dev, Akeem G Abodunrin

Add an entry for the new uapi needed for small BAR on DG2+.

v2:
  - Some spelling fixes and other small tweaks. (Akeem & Thomas)
  - Rework error capture interactions, including no longer needing
    NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
  - Add probed_cpu_visible_size. (Lionel)

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Kenneth Graunke <kenneth@whitecape.org>
Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
Cc: mesa-dev@lists.freedesktop.org
---
 Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
 Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
 Documentation/gpu/rfc/index.rst          |   4 +
 3 files changed, 252 insertions(+)
 create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
 create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst

diff --git a/Documentation/gpu/rfc/i915_small_bar.h b/Documentation/gpu/rfc/i915_small_bar.h
new file mode 100644
index 000000000000..7bfd0cf44d35
--- /dev/null
+++ b/Documentation/gpu/rfc/i915_small_bar.h
@@ -0,0 +1,190 @@
+/**
+ * struct __drm_i915_memory_region_info - Describes one region as known to the
+ * driver.
+ *
+ * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
+ * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
+ * at &drm_i915_query_item.query_id.
+ */
+struct __drm_i915_memory_region_info {
+	/** @region: The class:instance pair encoding */
+	struct drm_i915_gem_memory_class_instance region;
+
+	/** @rsvd0: MBZ */
+	__u32 rsvd0;
+
+	/** @probed_size: Memory probed by the driver (-1 = unknown) */
+	__u64 probed_size;
+
+	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
+	__u64 unallocated_size;
+
+	union {
+		/** @rsvd1: MBZ */
+		__u64 rsvd1[8];
+		struct {
+			/**
+			 * @probed_cpu_visible_size: Memory probed by the driver
+			 * that is CPU accessible. (-1 = unknown).
+			 *
+			 * This will be always be <= @probed_size, and the
+			 * remainder(if there is any) will not be CPU
+			 * accessible.
+			 */
+			__u64 probed_cpu_visible_size;
+		};
+	};
+};
+
+/**
+ * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, with added
+ * extension support using struct i915_user_extension.
+ *
+ * Note that new buffer flags should be added here, at least for the stuff that
+ * is immutable. Previously we would have two ioctls, one to create the object
+ * with gem_create, and another to apply various parameters, however this
+ * creates some ambiguity for the params which are considered immutable. Also in
+ * general we're phasing out the various SET/GET ioctls.
+ */
+struct __drm_i915_gem_create_ext {
+	/**
+	 * @size: Requested size for the object.
+	 *
+	 * The (page-aligned) allocated size for the object will be returned.
+	 *
+	 * Note that for some devices we have might have further minimum
+	 * page-size restrictions(larger than 4K), like for device local-memory.
+	 * However in general the final size here should always reflect any
+	 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
+	 * extension to place the object in device local-memory.
+	 */
+	__u64 size;
+	/**
+	 * @handle: Returned handle for the object.
+	 *
+	 * Object handles are nonzero.
+	 */
+	__u32 handle;
+	/**
+	 * @flags: Optional flags.
+	 *
+	 * Supported values:
+	 *
+	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
+	 * the object will need to be accessed via the CPU.
+	 *
+	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
+	 * only strictly required on platforms where only some of the device
+	 * memory is directly visible or mappable through the CPU, like on DG2+.
+	 *
+	 * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
+	 * ensure we can always spill the allocation to system memory, if we
+	 * can't place the object in the mappable part of
+	 * I915_MEMORY_CLASS_DEVICE.
+	 *
+	 * Note that since the kernel only supports flat-CCS on objects that can
+	 * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
+	 * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
+	 * flat-CCS.
+	 *
+	 * Without this hint, the kernel will assume that non-mappable
+	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
+	 * kernel can still migrate the object to the mappable part, as a last
+	 * resort, if userspace ever CPU faults this object, but this might be
+	 * expensive, and so ideally should be avoided.
+	 */
+#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
+	__u32 flags;
+	/**
+	 * @extensions: The chain of extensions to apply to this object.
+	 *
+	 * This will be useful in the future when we need to support several
+	 * different extensions, and we need to apply more than one when
+	 * creating the object. See struct i915_user_extension.
+	 *
+	 * If we don't supply any extensions then we get the same old gem_create
+	 * behaviour.
+	 *
+	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
+	 * struct drm_i915_gem_create_ext_memory_regions.
+	 *
+	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
+	 * struct drm_i915_gem_create_ext_protected_content.
+	 */
+#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
+	__u64 extensions;
+};
+
+#define DRM_I915_QUERY_VMA_INFO	5
+
+/**
+ * struct __drm_i915_query_vma_info
+ *
+ * Given a vm and GTT address, lookup the corresponding vma, returning its set
+ * of attributes.
+ *
+ * .. code-block:: C
+ *
+ *	struct drm_i915_query_vma_info info = {};
+ *	struct drm_i915_query_item item = {
+ *		.data_ptr = (uintptr_t)&info,
+ *		.query_id = DRM_I915_QUERY_VMA_INFO,
+ *	};
+ *	struct drm_i915_query query = {
+ *		.num_items = 1,
+ *		.items_ptr = (uintptr_t)&item,
+ *	};
+ *	int err;
+ *
+ *	// Unlike some other types of queries, there is no need to first query
+ *	// the size of the data_ptr blob here, since we already know ahead of
+ *	// time how big this needs to be.
+ *	item.length = sizeof(info);
+ *
+ *	// Next we fill in the vm_id and ppGTT address of the vma we wish
+ *	// to query, before then firing off the query.
+ *	info.vm_id = vm_id;
+ *	info.offset = gtt_address;
+ *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ *	if (err || item.length < 0) ...
+ *
+ *	// If all went well we can now inspect the returned attributes.
+ *	if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
+ */
+struct __drm_i915_query_vma_info {
+	/**
+	 * @vm_id: The given vm id that contains the vma. The id is the value
+	 * returned by the DRM_I915_GEM_VM_CREATE. See struct
+	 * drm_i915_gem_vm_control.vm_id.
+	 */
+	__u32 vm_id;
+	/** @pad: MBZ. */
+	__u32 pad;
+	/**
+	 * @offset: The corresponding ppGTT address of the vma which the kernel
+	 * will use to perform the lookup.
+	 */
+	__u64 offset;
+	/**
+	 * @attributes: The returned attributes for the given vma.
+	 *
+	 * Possible values:
+	 *
+	 * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages backing the
+	 * vma are currently CPU accessible. If this is not set then the vma is
+	 * currently backed by I915_MEMORY_CLASS_DEVICE memory, which the CPU
+	 * cannot directly access(this is only possible on discrete devices with
+	 * a small BAR). Attempting to MMAP and fault such an object will
+	 * require the kernel first synchronising any GPU work tied to the
+	 * object, before then migrating the pages, either to the CPU accessible
+	 * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, if the
+	 * placements permit it. See I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
+	 *
+	 * Note that this is inherently racy.
+	 */
+#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
+	__u64 attributes;
+	/** @rsvd: MBZ */
+	__u32 rsvd[4];
+};
diff --git a/Documentation/gpu/rfc/i915_small_bar.rst b/Documentation/gpu/rfc/i915_small_bar.rst
new file mode 100644
index 000000000000..be3d9bcdd86d
--- /dev/null
+++ b/Documentation/gpu/rfc/i915_small_bar.rst
@@ -0,0 +1,58 @@
+==========================
+I915 Small BAR RFC Section
+==========================
+Starting from DG2 we will have resizable BAR support for device local-memory(i.e
+I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might still be
+smaller than the total probed_size. In such cases, only some subset of
+I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 256M),
+while the remainder is only accessible via the GPU.
+
+I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
+----------------------------------------------
+New gem_create_ext flag to tell the kernel that a BO will require CPU access.
+This becomes important when placing an object in I915_MEMORY_CLASS_DEVICE, where
+underneath the device has a small BAR, meaning only some portion of it is CPU
+accessible. Without this flag the kernel will assume that CPU access is not
+required, and prioritize using the non-CPU visible portion of
+I915_MEMORY_CLASS_DEVICE.
+
+.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
+   :functions: __drm_i915_gem_create_ext
+
+probed_cpu_visible_size attribute
+---------------------------------
+New struct__drm_i915_memory_region attribute which returns the total size of the
+CPU accessible portion, for the particular region. This should only be
+applicable for I915_MEMORY_CLASS_DEVICE.
+
+Vulkan will need this as part of creating a separate VkMemoryHeap with the
+VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible portion,
+where the total size of the heap needs to be known.
+
+.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
+   :functions: __drm_i915_memory_region_info
+
+DRM_I915_QUERY_VMA_INFO query
+-----------------------------
+Query the attributes of some vma. Given a vm and GTT offset, find the
+respective vma, and return its set of attributes. For now we only support
+DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
+currently placed in memory that is accessible by the CPU. This should always be
+set on devices where the CPU probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE
+matches the probed_size. If this is not set then CPU faulting the object will
+likely first require migrating the pages.
+
+.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
+   :functions: __drm_i915_query_vma_info
+
+Error Capture restrictions
+--------------------------
+With error capture we have two new restrictions:
+
+    1) Error capture is best effort on small BAR systems; if the pages are not
+    CPU accessible, at the time of capture, then the kernel is free to skip
+    trying to capture them.
+
+    2) On discrete we now reject error capture on recoverable contexts. In the
+    future the kernel may want to blit during error capture, when for example
+    something is not currently CPU accessible.
diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst
index 91e93a705230..5a3bd3924ba6 100644
--- a/Documentation/gpu/rfc/index.rst
+++ b/Documentation/gpu/rfc/index.rst
@@ -23,3 +23,7 @@ host such documentation:
 .. toctree::
 
     i915_scheduler.rst
+
+.. toctree::
+
+    i915_small_bar.rst
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-20 17:13 ` Matthew Auld
  0 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-04-20 17:13 UTC (permalink / raw)
  To: intel-gfx
  Cc: Thomas Hellström, Kenneth Graunke, dri-devel, Daniel Vetter,
	mesa-dev

Add an entry for the new uapi needed for small BAR on DG2+.

v2:
  - Some spelling fixes and other small tweaks. (Akeem & Thomas)
  - Rework error capture interactions, including no longer needing
    NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
  - Add probed_cpu_visible_size. (Lionel)

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Jordan Justen <jordan.l.justen@intel.com>
Cc: Kenneth Graunke <kenneth@whitecape.org>
Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
Cc: mesa-dev@lists.freedesktop.org
---
 Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
 Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
 Documentation/gpu/rfc/index.rst          |   4 +
 3 files changed, 252 insertions(+)
 create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
 create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst

diff --git a/Documentation/gpu/rfc/i915_small_bar.h b/Documentation/gpu/rfc/i915_small_bar.h
new file mode 100644
index 000000000000..7bfd0cf44d35
--- /dev/null
+++ b/Documentation/gpu/rfc/i915_small_bar.h
@@ -0,0 +1,190 @@
+/**
+ * struct __drm_i915_memory_region_info - Describes one region as known to the
+ * driver.
+ *
+ * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
+ * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
+ * at &drm_i915_query_item.query_id.
+ */
+struct __drm_i915_memory_region_info {
+	/** @region: The class:instance pair encoding */
+	struct drm_i915_gem_memory_class_instance region;
+
+	/** @rsvd0: MBZ */
+	__u32 rsvd0;
+
+	/** @probed_size: Memory probed by the driver (-1 = unknown) */
+	__u64 probed_size;
+
+	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
+	__u64 unallocated_size;
+
+	union {
+		/** @rsvd1: MBZ */
+		__u64 rsvd1[8];
+		struct {
+			/**
+			 * @probed_cpu_visible_size: Memory probed by the driver
+			 * that is CPU accessible. (-1 = unknown).
+			 *
+			 * This will be always be <= @probed_size, and the
+			 * remainder(if there is any) will not be CPU
+			 * accessible.
+			 */
+			__u64 probed_cpu_visible_size;
+		};
+	};
+};
+
+/**
+ * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, with added
+ * extension support using struct i915_user_extension.
+ *
+ * Note that new buffer flags should be added here, at least for the stuff that
+ * is immutable. Previously we would have two ioctls, one to create the object
+ * with gem_create, and another to apply various parameters, however this
+ * creates some ambiguity for the params which are considered immutable. Also in
+ * general we're phasing out the various SET/GET ioctls.
+ */
+struct __drm_i915_gem_create_ext {
+	/**
+	 * @size: Requested size for the object.
+	 *
+	 * The (page-aligned) allocated size for the object will be returned.
+	 *
+	 * Note that for some devices we have might have further minimum
+	 * page-size restrictions(larger than 4K), like for device local-memory.
+	 * However in general the final size here should always reflect any
+	 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
+	 * extension to place the object in device local-memory.
+	 */
+	__u64 size;
+	/**
+	 * @handle: Returned handle for the object.
+	 *
+	 * Object handles are nonzero.
+	 */
+	__u32 handle;
+	/**
+	 * @flags: Optional flags.
+	 *
+	 * Supported values:
+	 *
+	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
+	 * the object will need to be accessed via the CPU.
+	 *
+	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
+	 * only strictly required on platforms where only some of the device
+	 * memory is directly visible or mappable through the CPU, like on DG2+.
+	 *
+	 * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
+	 * ensure we can always spill the allocation to system memory, if we
+	 * can't place the object in the mappable part of
+	 * I915_MEMORY_CLASS_DEVICE.
+	 *
+	 * Note that since the kernel only supports flat-CCS on objects that can
+	 * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
+	 * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
+	 * flat-CCS.
+	 *
+	 * Without this hint, the kernel will assume that non-mappable
+	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
+	 * kernel can still migrate the object to the mappable part, as a last
+	 * resort, if userspace ever CPU faults this object, but this might be
+	 * expensive, and so ideally should be avoided.
+	 */
+#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
+	__u32 flags;
+	/**
+	 * @extensions: The chain of extensions to apply to this object.
+	 *
+	 * This will be useful in the future when we need to support several
+	 * different extensions, and we need to apply more than one when
+	 * creating the object. See struct i915_user_extension.
+	 *
+	 * If we don't supply any extensions then we get the same old gem_create
+	 * behaviour.
+	 *
+	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
+	 * struct drm_i915_gem_create_ext_memory_regions.
+	 *
+	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
+	 * struct drm_i915_gem_create_ext_protected_content.
+	 */
+#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
+#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
+	__u64 extensions;
+};
+
+#define DRM_I915_QUERY_VMA_INFO	5
+
+/**
+ * struct __drm_i915_query_vma_info
+ *
+ * Given a vm and GTT address, lookup the corresponding vma, returning its set
+ * of attributes.
+ *
+ * .. code-block:: C
+ *
+ *	struct drm_i915_query_vma_info info = {};
+ *	struct drm_i915_query_item item = {
+ *		.data_ptr = (uintptr_t)&info,
+ *		.query_id = DRM_I915_QUERY_VMA_INFO,
+ *	};
+ *	struct drm_i915_query query = {
+ *		.num_items = 1,
+ *		.items_ptr = (uintptr_t)&item,
+ *	};
+ *	int err;
+ *
+ *	// Unlike some other types of queries, there is no need to first query
+ *	// the size of the data_ptr blob here, since we already know ahead of
+ *	// time how big this needs to be.
+ *	item.length = sizeof(info);
+ *
+ *	// Next we fill in the vm_id and ppGTT address of the vma we wish
+ *	// to query, before then firing off the query.
+ *	info.vm_id = vm_id;
+ *	info.offset = gtt_address;
+ *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
+ *	if (err || item.length < 0) ...
+ *
+ *	// If all went well we can now inspect the returned attributes.
+ *	if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
+ */
+struct __drm_i915_query_vma_info {
+	/**
+	 * @vm_id: The given vm id that contains the vma. The id is the value
+	 * returned by the DRM_I915_GEM_VM_CREATE. See struct
+	 * drm_i915_gem_vm_control.vm_id.
+	 */
+	__u32 vm_id;
+	/** @pad: MBZ. */
+	__u32 pad;
+	/**
+	 * @offset: The corresponding ppGTT address of the vma which the kernel
+	 * will use to perform the lookup.
+	 */
+	__u64 offset;
+	/**
+	 * @attributes: The returned attributes for the given vma.
+	 *
+	 * Possible values:
+	 *
+	 * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages backing the
+	 * vma are currently CPU accessible. If this is not set then the vma is
+	 * currently backed by I915_MEMORY_CLASS_DEVICE memory, which the CPU
+	 * cannot directly access(this is only possible on discrete devices with
+	 * a small BAR). Attempting to MMAP and fault such an object will
+	 * require the kernel first synchronising any GPU work tied to the
+	 * object, before then migrating the pages, either to the CPU accessible
+	 * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, if the
+	 * placements permit it. See I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
+	 *
+	 * Note that this is inherently racy.
+	 */
+#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
+	__u64 attributes;
+	/** @rsvd: MBZ */
+	__u32 rsvd[4];
+};
diff --git a/Documentation/gpu/rfc/i915_small_bar.rst b/Documentation/gpu/rfc/i915_small_bar.rst
new file mode 100644
index 000000000000..be3d9bcdd86d
--- /dev/null
+++ b/Documentation/gpu/rfc/i915_small_bar.rst
@@ -0,0 +1,58 @@
+==========================
+I915 Small BAR RFC Section
+==========================
+Starting from DG2 we will have resizable BAR support for device local-memory(i.e
+I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might still be
+smaller than the total probed_size. In such cases, only some subset of
+I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 256M),
+while the remainder is only accessible via the GPU.
+
+I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
+----------------------------------------------
+New gem_create_ext flag to tell the kernel that a BO will require CPU access.
+This becomes important when placing an object in I915_MEMORY_CLASS_DEVICE, where
+underneath the device has a small BAR, meaning only some portion of it is CPU
+accessible. Without this flag the kernel will assume that CPU access is not
+required, and prioritize using the non-CPU visible portion of
+I915_MEMORY_CLASS_DEVICE.
+
+.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
+   :functions: __drm_i915_gem_create_ext
+
+probed_cpu_visible_size attribute
+---------------------------------
+New struct__drm_i915_memory_region attribute which returns the total size of the
+CPU accessible portion, for the particular region. This should only be
+applicable for I915_MEMORY_CLASS_DEVICE.
+
+Vulkan will need this as part of creating a separate VkMemoryHeap with the
+VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible portion,
+where the total size of the heap needs to be known.
+
+.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
+   :functions: __drm_i915_memory_region_info
+
+DRM_I915_QUERY_VMA_INFO query
+-----------------------------
+Query the attributes of some vma. Given a vm and GTT offset, find the
+respective vma, and return its set of attributes. For now we only support
+DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
+currently placed in memory that is accessible by the CPU. This should always be
+set on devices where the CPU probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE
+matches the probed_size. If this is not set then CPU faulting the object will
+likely first require migrating the pages.
+
+.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
+   :functions: __drm_i915_query_vma_info
+
+Error Capture restrictions
+--------------------------
+With error capture we have two new restrictions:
+
+    1) Error capture is best effort on small BAR systems; if the pages are not
+    CPU accessible, at the time of capture, then the kernel is free to skip
+    trying to capture them.
+
+    2) On discrete we now reject error capture on recoverable contexts. In the
+    future the kernel may want to blit during error capture, when for example
+    something is not currently CPU accessible.
diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst
index 91e93a705230..5a3bd3924ba6 100644
--- a/Documentation/gpu/rfc/index.rst
+++ b/Documentation/gpu/rfc/index.rst
@@ -23,3 +23,7 @@ host such documentation:
 .. toctree::
 
     i915_scheduler.rst
+
+.. toctree::
+
+    i915_small_bar.rst
-- 
2.34.1


^ permalink raw reply related	[flat|nested] 50+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for drm/doc: add rfc section for small BAR uapi
  2022-04-20 17:13 ` [Intel-gfx] " Matthew Auld
  (?)
@ 2022-04-20 20:47 ` Patchwork
  -1 siblings, 0 replies; 50+ messages in thread
From: Patchwork @ 2022-04-20 20:47 UTC (permalink / raw)
  To: Matthew Auld; +Cc: intel-gfx

== Series Details ==

Series: drm/doc: add rfc section for small BAR uapi
URL   : https://patchwork.freedesktop.org/series/102875/
State : warning

== Summary ==

Error: dim checkpatch failed
778440992ee2 drm/doc: add rfc section for small BAR uapi
-:28: WARNING:FILE_PATH_CHANGES: added, moved or deleted file(s), does MAINTAINERS need updating?
#28: 
new file mode 100644

-:33: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#33: FILE: Documentation/gpu/rfc/i915_small_bar.h:1:
+/**

-:218: CHECK:SPACING: spaces preferred around that '<<' (ctx:VxV)
#218: FILE: Documentation/gpu/rfc/i915_small_bar.h:186:
+#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
                                               ^

-:229: WARNING:SPDX_LICENSE_TAG: Missing or malformed SPDX-License-Identifier tag in line 1
#229: FILE: Documentation/gpu/rfc/i915_small_bar.rst:1:
+==========================

total: 0 errors, 3 warnings, 1 checks, 255 lines checked



^ permalink raw reply	[flat|nested] 50+ messages in thread

* [Intel-gfx] ✗ Fi.CI.BAT: failure for drm/doc: add rfc section for small BAR uapi
  2022-04-20 17:13 ` [Intel-gfx] " Matthew Auld
  (?)
  (?)
@ 2022-04-20 21:14 ` Patchwork
  -1 siblings, 0 replies; 50+ messages in thread
From: Patchwork @ 2022-04-20 21:14 UTC (permalink / raw)
  To: Matthew Auld; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 13145 bytes --]

== Series Details ==

Series: drm/doc: add rfc section for small BAR uapi
URL   : https://patchwork.freedesktop.org/series/102875/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_11528 -> Patchwork_102875v1
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_102875v1 absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_102875v1, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/index.html

Participating hosts (46 -> 46)
------------------------------

  Additional (2): fi-cml-u2 fi-icl-u2 
  Missing    (2): fi-bsw-cyan bat-jsl-1 

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_102875v1:

### IGT changes ###

#### Possible regressions ####

  * igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a:
    - fi-cml-u2:          NOTRUN -> [INCOMPLETE][1]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-cml-u2/igt@kms_pipe_crc_basic@suspend-read-crc-pipe-a.html

  
Known issues
------------

  Here are the changes found in Patchwork_102875v1 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@gem_exec_fence@basic-busy@bcs0:
    - fi-cml-u2:          NOTRUN -> [SKIP][2] ([i915#1208]) +1 similar issue
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-cml-u2/igt@gem_exec_fence@basic-busy@bcs0.html

  * igt@gem_huc_copy@huc-copy:
    - fi-cml-u2:          NOTRUN -> [SKIP][3] ([i915#2190])
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-cml-u2/igt@gem_huc_copy@huc-copy.html
    - fi-rkl-11600:       NOTRUN -> [SKIP][4] ([i915#2190])
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@gem_huc_copy@huc-copy.html
    - fi-icl-u2:          NOTRUN -> [SKIP][5] ([i915#2190])
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-icl-u2/igt@gem_huc_copy@huc-copy.html

  * igt@gem_lmem_swapping@basic:
    - fi-rkl-11600:       NOTRUN -> [SKIP][6] ([i915#4613]) +3 similar issues
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@gem_lmem_swapping@basic.html

  * igt@gem_lmem_swapping@parallel-random-engines:
    - fi-icl-u2:          NOTRUN -> [SKIP][7] ([i915#4613]) +3 similar issues
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-icl-u2/igt@gem_lmem_swapping@parallel-random-engines.html

  * igt@gem_tiled_pread_basic:
    - fi-rkl-11600:       NOTRUN -> [SKIP][8] ([i915#3282])
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@gem_tiled_pread_basic.html

  * igt@i915_module_load@reload:
    - fi-kbl-soraka:      [PASS][9] -> [DMESG-WARN][10] ([i915#1982])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11528/fi-kbl-soraka/igt@i915_module_load@reload.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-kbl-soraka/igt@i915_module_load@reload.html

  * igt@i915_pm_backlight@basic-brightness:
    - fi-rkl-11600:       NOTRUN -> [SKIP][11] ([i915#3012])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@i915_pm_backlight@basic-brightness.html

  * igt@i915_selftest@live@hangcheck:
    - bat-dg1-6:          [PASS][12] -> [DMESG-FAIL][13] ([i915#4494] / [i915#4957])
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11528/bat-dg1-6/igt@i915_selftest@live@hangcheck.html
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/bat-dg1-6/igt@i915_selftest@live@hangcheck.html

  * igt@kms_chamelium@dp-crc-fast:
    - fi-rkl-11600:       NOTRUN -> [SKIP][14] ([fdo#111827]) +8 similar issues
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@kms_chamelium@dp-crc-fast.html

  * igt@kms_chamelium@dp-hpd-fast:
    - fi-cml-u2:          NOTRUN -> [SKIP][15] ([fdo#109284] / [fdo#111827]) +8 similar issues
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-cml-u2/igt@kms_chamelium@dp-hpd-fast.html

  * igt@kms_chamelium@hdmi-hpd-fast:
    - fi-icl-u2:          NOTRUN -> [SKIP][16] ([fdo#111827]) +8 similar issues
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-icl-u2/igt@kms_chamelium@hdmi-hpd-fast.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic:
    - fi-rkl-11600:       NOTRUN -> [SKIP][17] ([i915#4070] / [i915#4103]) +1 similar issue
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html
    - fi-cml-u2:          NOTRUN -> [SKIP][18] ([fdo#109278]) +1 similar issue
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-cml-u2/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy:
    - fi-icl-u2:          NOTRUN -> [SKIP][19] ([fdo#109278]) +2 similar issues
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-icl-u2/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-legacy.html

  * igt@kms_flip@basic-plain-flip@a-edp1:
    - fi-tgl-u2:          [PASS][20] -> [DMESG-WARN][21] ([i915#402]) +1 similar issue
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11528/fi-tgl-u2/igt@kms_flip@basic-plain-flip@a-edp1.html
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-tgl-u2/igt@kms_flip@basic-plain-flip@a-edp1.html

  * igt@kms_force_connector_basic@force-load-detect:
    - fi-cml-u2:          NOTRUN -> [SKIP][22] ([fdo#109285])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-cml-u2/igt@kms_force_connector_basic@force-load-detect.html
    - fi-rkl-11600:       NOTRUN -> [SKIP][23] ([fdo#109285] / [i915#4098])
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@kms_force_connector_basic@force-load-detect.html
    - fi-icl-u2:          NOTRUN -> [SKIP][24] ([fdo#109285])
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-icl-u2/igt@kms_force_connector_basic@force-load-detect.html

  * igt@kms_frontbuffer_tracking@basic:
    - fi-cml-u2:          NOTRUN -> [DMESG-WARN][25] ([i915#4269])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-cml-u2/igt@kms_frontbuffer_tracking@basic.html

  * igt@kms_pipe_crc_basic@compare-crc-sanitycheck-pipe-d:
    - fi-rkl-11600:       NOTRUN -> [SKIP][26] ([i915#4070] / [i915#533])
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@kms_pipe_crc_basic@compare-crc-sanitycheck-pipe-d.html
    - fi-cml-u2:          NOTRUN -> [SKIP][27] ([fdo#109278] / [i915#533])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-cml-u2/igt@kms_pipe_crc_basic@compare-crc-sanitycheck-pipe-d.html

  * igt@kms_psr@primary_mmap_gtt:
    - fi-rkl-11600:       NOTRUN -> [SKIP][28] ([i915#1072]) +3 similar issues
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@kms_psr@primary_mmap_gtt.html

  * igt@kms_setmode@basic-clone-single-crtc:
    - fi-icl-u2:          NOTRUN -> [SKIP][29] ([i915#3555])
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-icl-u2/igt@kms_setmode@basic-clone-single-crtc.html
    - fi-rkl-11600:       NOTRUN -> [SKIP][30] ([i915#3555] / [i915#4098])
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@kms_setmode@basic-clone-single-crtc.html

  * igt@prime_vgem@basic-userptr:
    - fi-rkl-11600:       NOTRUN -> [SKIP][31] ([i915#3301] / [i915#3708])
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@prime_vgem@basic-userptr.html
    - fi-icl-u2:          NOTRUN -> [SKIP][32] ([i915#3301])
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-icl-u2/igt@prime_vgem@basic-userptr.html

  * igt@prime_vgem@basic-write:
    - fi-rkl-11600:       NOTRUN -> [SKIP][33] ([i915#3291] / [i915#3708]) +2 similar issues
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@prime_vgem@basic-write.html

  
#### Possible fixes ####

  * igt@gem_exec_suspend@basic-s3@smem:
    - fi-rkl-11600:       [INCOMPLETE][34] ([i915#5127]) -> [PASS][35]
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11528/fi-rkl-11600/igt@gem_exec_suspend@basic-s3@smem.html
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-11600/igt@gem_exec_suspend@basic-s3@smem.html

  * igt@i915_selftest@live@gt_mocs:
    - fi-rkl-guc:         [DMESG-WARN][36] -> [PASS][37]
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11528/fi-rkl-guc/igt@i915_selftest@live@gt_mocs.html
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/fi-rkl-guc/igt@i915_selftest@live@gt_mocs.html

  * igt@i915_selftest@live@reset:
    - {bat-adlp-6}:       [DMESG-FAIL][38] ([i915#4983]) -> [PASS][39]
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11528/bat-adlp-6/igt@i915_selftest@live@reset.html
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/bat-adlp-6/igt@i915_selftest@live@reset.html

  
#### Warnings ####

  * igt@i915_selftest@live@hangcheck:
    - bat-dg1-5:          [DMESG-FAIL][40] ([i915#4494] / [i915#4957]) -> [INCOMPLETE][41] ([i915#5757])
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_11528/bat-dg1-5/igt@i915_selftest@live@hangcheck.html
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/bat-dg1-5/igt@i915_selftest@live@hangcheck.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [fdo#109278]: https://bugs.freedesktop.org/show_bug.cgi?id=109278
  [fdo#109284]: https://bugs.freedesktop.org/show_bug.cgi?id=109284
  [fdo#109285]: https://bugs.freedesktop.org/show_bug.cgi?id=109285
  [fdo#109308]: https://bugs.freedesktop.org/show_bug.cgi?id=109308
  [fdo#111825]: https://bugs.freedesktop.org/show_bug.cgi?id=111825
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [i915#1072]: https://gitlab.freedesktop.org/drm/intel/issues/1072
  [i915#1155]: https://gitlab.freedesktop.org/drm/intel/issues/1155
  [i915#1208]: https://gitlab.freedesktop.org/drm/intel/issues/1208
  [i915#1845]: https://gitlab.freedesktop.org/drm/intel/issues/1845
  [i915#1849]: https://gitlab.freedesktop.org/drm/intel/issues/1849
  [i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
  [i915#2190]: https://gitlab.freedesktop.org/drm/intel/issues/2190
  [i915#2582]: https://gitlab.freedesktop.org/drm/intel/issues/2582
  [i915#3012]: https://gitlab.freedesktop.org/drm/intel/issues/3012
  [i915#3282]: https://gitlab.freedesktop.org/drm/intel/issues/3282
  [i915#3291]: https://gitlab.freedesktop.org/drm/intel/issues/3291
  [i915#3301]: https://gitlab.freedesktop.org/drm/intel/issues/3301
  [i915#3555]: https://gitlab.freedesktop.org/drm/intel/issues/3555
  [i915#3576]: https://gitlab.freedesktop.org/drm/intel/issues/3576
  [i915#3637]: https://gitlab.freedesktop.org/drm/intel/issues/3637
  [i915#3708]: https://gitlab.freedesktop.org/drm/intel/issues/3708
  [i915#402]: https://gitlab.freedesktop.org/drm/intel/issues/402
  [i915#4070]: https://gitlab.freedesktop.org/drm/intel/issues/4070
  [i915#4098]: https://gitlab.freedesktop.org/drm/intel/issues/4098
  [i915#4103]: https://gitlab.freedesktop.org/drm/intel/issues/4103
  [i915#4269]: https://gitlab.freedesktop.org/drm/intel/issues/4269
  [i915#4312]: https://gitlab.freedesktop.org/drm/intel/issues/4312
  [i915#4391]: https://gitlab.freedesktop.org/drm/intel/issues/4391
  [i915#4494]: https://gitlab.freedesktop.org/drm/intel/issues/4494
  [i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
  [i915#4897]: https://gitlab.freedesktop.org/drm/intel/issues/4897
  [i915#4957]: https://gitlab.freedesktop.org/drm/intel/issues/4957
  [i915#4983]: https://gitlab.freedesktop.org/drm/intel/issues/4983
  [i915#5127]: https://gitlab.freedesktop.org/drm/intel/issues/5127
  [i915#533]: https://gitlab.freedesktop.org/drm/intel/issues/533
  [i915#5341]: https://gitlab.freedesktop.org/drm/intel/issues/5341
  [i915#5757]: https://gitlab.freedesktop.org/drm/intel/issues/5757


Build changes
-------------

  * Linux: CI_DRM_11528 -> Patchwork_102875v1

  CI-20190529: 20190529
  CI_DRM_11528: bd638cf6c04abbc39e46649f820253a303131df5 @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_6441: a44d1d4c9e7198a2d59d7dcaae38e340f7cadcf9 @ https://gitlab.freedesktop.org/drm/igt-gpu-tools.git
  Patchwork_102875v1: bd638cf6c04abbc39e46649f820253a303131df5 @ git://anongit.freedesktop.org/gfx-ci/linux


== Linux commits ==

d0658ae6e664 drm/doc: add rfc section for small BAR uapi

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_102875v1/index.html

[-- Attachment #2: Type: text/html, Size: 14883 bytes --]

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-20 17:13 ` [Intel-gfx] " Matthew Auld
@ 2022-04-27  6:35   ` Lionel Landwerlin
  -1 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-04-27  6:35 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Jordan Justen, dri-devel, Kenneth Graunke,
	Jon Bloomfield, Daniel Vetter, mesa-dev, Akeem G Abodunrin

Hi Matt,


The proposal looks good to me.

Looking forward to try it on drm-tip.


-Lionel

On 20/04/2022 20:13, Matthew Auld wrote:
> Add an entry for the new uapi needed for small BAR on DG2+.
>
> v2:
>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>    - Rework error capture interactions, including no longer needing
>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>    - Add probed_cpu_visible_size. (Lionel)
>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Cc: Jordan Justen <jordan.l.justen@intel.com>
> Cc: Kenneth Graunke <kenneth@whitecape.org>
> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
> Cc: mesa-dev@lists.freedesktop.org
> ---
>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>   Documentation/gpu/rfc/index.rst          |   4 +
>   3 files changed, 252 insertions(+)
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>
> diff --git a/Documentation/gpu/rfc/i915_small_bar.h b/Documentation/gpu/rfc/i915_small_bar.h
> new file mode 100644
> index 000000000000..7bfd0cf44d35
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.h
> @@ -0,0 +1,190 @@
> +/**
> + * struct __drm_i915_memory_region_info - Describes one region as known to the
> + * driver.
> + *
> + * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
> + * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
> + * at &drm_i915_query_item.query_id.
> + */
> +struct __drm_i915_memory_region_info {
> +	/** @region: The class:instance pair encoding */
> +	struct drm_i915_gem_memory_class_instance region;
> +
> +	/** @rsvd0: MBZ */
> +	__u32 rsvd0;
> +
> +	/** @probed_size: Memory probed by the driver (-1 = unknown) */
> +	__u64 probed_size;
> +
> +	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
> +	__u64 unallocated_size;
> +
> +	union {
> +		/** @rsvd1: MBZ */
> +		__u64 rsvd1[8];
> +		struct {
> +			/**
> +			 * @probed_cpu_visible_size: Memory probed by the driver
> +			 * that is CPU accessible. (-1 = unknown).
> +			 *
> +			 * This will be always be <= @probed_size, and the
> +			 * remainder(if there is any) will not be CPU
> +			 * accessible.
> +			 */
> +			__u64 probed_cpu_visible_size;
> +		};
> +	};
> +};
> +
> +/**
> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, with added
> + * extension support using struct i915_user_extension.
> + *
> + * Note that new buffer flags should be added here, at least for the stuff that
> + * is immutable. Previously we would have two ioctls, one to create the object
> + * with gem_create, and another to apply various parameters, however this
> + * creates some ambiguity for the params which are considered immutable. Also in
> + * general we're phasing out the various SET/GET ioctls.
> + */
> +struct __drm_i915_gem_create_ext {
> +	/**
> +	 * @size: Requested size for the object.
> +	 *
> +	 * The (page-aligned) allocated size for the object will be returned.
> +	 *
> +	 * Note that for some devices we have might have further minimum
> +	 * page-size restrictions(larger than 4K), like for device local-memory.
> +	 * However in general the final size here should always reflect any
> +	 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
> +	 * extension to place the object in device local-memory.
> +	 */
> +	__u64 size;
> +	/**
> +	 * @handle: Returned handle for the object.
> +	 *
> +	 * Object handles are nonzero.
> +	 */
> +	__u32 handle;
> +	/**
> +	 * @flags: Optional flags.
> +	 *
> +	 * Supported values:
> +	 *
> +	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
> +	 * the object will need to be accessed via the CPU.
> +	 *
> +	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
> +	 * only strictly required on platforms where only some of the device
> +	 * memory is directly visible or mappable through the CPU, like on DG2+.
> +	 *
> +	 * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
> +	 * ensure we can always spill the allocation to system memory, if we
> +	 * can't place the object in the mappable part of
> +	 * I915_MEMORY_CLASS_DEVICE.
> +	 *
> +	 * Note that since the kernel only supports flat-CCS on objects that can
> +	 * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
> +	 * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
> +	 * flat-CCS.
> +	 *
> +	 * Without this hint, the kernel will assume that non-mappable
> +	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
> +	 * kernel can still migrate the object to the mappable part, as a last
> +	 * resort, if userspace ever CPU faults this object, but this might be
> +	 * expensive, and so ideally should be avoided.
> +	 */
> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
> +	__u32 flags;
> +	/**
> +	 * @extensions: The chain of extensions to apply to this object.
> +	 *
> +	 * This will be useful in the future when we need to support several
> +	 * different extensions, and we need to apply more than one when
> +	 * creating the object. See struct i915_user_extension.
> +	 *
> +	 * If we don't supply any extensions then we get the same old gem_create
> +	 * behaviour.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
> +	 * struct drm_i915_gem_create_ext_memory_regions.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> +	 * struct drm_i915_gem_create_ext_protected_content.
> +	 */
> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> +	__u64 extensions;
> +};
> +
> +#define DRM_I915_QUERY_VMA_INFO	5
> +
> +/**
> + * struct __drm_i915_query_vma_info
> + *
> + * Given a vm and GTT address, lookup the corresponding vma, returning its set
> + * of attributes.
> + *
> + * .. code-block:: C
> + *
> + *	struct drm_i915_query_vma_info info = {};
> + *	struct drm_i915_query_item item = {
> + *		.data_ptr = (uintptr_t)&info,
> + *		.query_id = DRM_I915_QUERY_VMA_INFO,
> + *	};
> + *	struct drm_i915_query query = {
> + *		.num_items = 1,
> + *		.items_ptr = (uintptr_t)&item,
> + *	};
> + *	int err;
> + *
> + *	// Unlike some other types of queries, there is no need to first query
> + *	// the size of the data_ptr blob here, since we already know ahead of
> + *	// time how big this needs to be.
> + *	item.length = sizeof(info);
> + *
> + *	// Next we fill in the vm_id and ppGTT address of the vma we wish
> + *	// to query, before then firing off the query.
> + *	info.vm_id = vm_id;
> + *	info.offset = gtt_address;
> + *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
> + *	if (err || item.length < 0) ...
> + *
> + *	// If all went well we can now inspect the returned attributes.
> + *	if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
> + */
> +struct __drm_i915_query_vma_info {
> +	/**
> +	 * @vm_id: The given vm id that contains the vma. The id is the value
> +	 * returned by the DRM_I915_GEM_VM_CREATE. See struct
> +	 * drm_i915_gem_vm_control.vm_id.
> +	 */
> +	__u32 vm_id;
> +	/** @pad: MBZ. */
> +	__u32 pad;
> +	/**
> +	 * @offset: The corresponding ppGTT address of the vma which the kernel
> +	 * will use to perform the lookup.
> +	 */
> +	__u64 offset;
> +	/**
> +	 * @attributes: The returned attributes for the given vma.
> +	 *
> +	 * Possible values:
> +	 *
> +	 * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages backing the
> +	 * vma are currently CPU accessible. If this is not set then the vma is
> +	 * currently backed by I915_MEMORY_CLASS_DEVICE memory, which the CPU
> +	 * cannot directly access(this is only possible on discrete devices with
> +	 * a small BAR). Attempting to MMAP and fault such an object will
> +	 * require the kernel first synchronising any GPU work tied to the
> +	 * object, before then migrating the pages, either to the CPU accessible
> +	 * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, if the
> +	 * placements permit it. See I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
> +	 *
> +	 * Note that this is inherently racy.
> +	 */
> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
> +	__u64 attributes;
> +	/** @rsvd: MBZ */
> +	__u32 rsvd[4];
> +};
> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst b/Documentation/gpu/rfc/i915_small_bar.rst
> new file mode 100644
> index 000000000000..be3d9bcdd86d
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
> @@ -0,0 +1,58 @@
> +==========================
> +I915 Small BAR RFC Section
> +==========================
> +Starting from DG2 we will have resizable BAR support for device local-memory(i.e
> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might still be
> +smaller than the total probed_size. In such cases, only some subset of
> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 256M),
> +while the remainder is only accessible via the GPU.
> +
> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
> +----------------------------------------------
> +New gem_create_ext flag to tell the kernel that a BO will require CPU access.
> +This becomes important when placing an object in I915_MEMORY_CLASS_DEVICE, where
> +underneath the device has a small BAR, meaning only some portion of it is CPU
> +accessible. Without this flag the kernel will assume that CPU access is not
> +required, and prioritize using the non-CPU visible portion of
> +I915_MEMORY_CLASS_DEVICE.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_gem_create_ext
> +
> +probed_cpu_visible_size attribute
> +---------------------------------
> +New struct__drm_i915_memory_region attribute which returns the total size of the
> +CPU accessible portion, for the particular region. This should only be
> +applicable for I915_MEMORY_CLASS_DEVICE.
> +
> +Vulkan will need this as part of creating a separate VkMemoryHeap with the
> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible portion,
> +where the total size of the heap needs to be known.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_memory_region_info
> +
> +DRM_I915_QUERY_VMA_INFO query
> +-----------------------------
> +Query the attributes of some vma. Given a vm and GTT offset, find the
> +respective vma, and return its set of attributes. For now we only support
> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
> +currently placed in memory that is accessible by the CPU. This should always be
> +set on devices where the CPU probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE
> +matches the probed_size. If this is not set then CPU faulting the object will
> +likely first require migrating the pages.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_query_vma_info
> +
> +Error Capture restrictions
> +--------------------------
> +With error capture we have two new restrictions:
> +
> +    1) Error capture is best effort on small BAR systems; if the pages are not
> +    CPU accessible, at the time of capture, then the kernel is free to skip
> +    trying to capture them.
> +
> +    2) On discrete we now reject error capture on recoverable contexts. In the
> +    future the kernel may want to blit during error capture, when for example
> +    something is not currently CPU accessible.
> diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst
> index 91e93a705230..5a3bd3924ba6 100644
> --- a/Documentation/gpu/rfc/index.rst
> +++ b/Documentation/gpu/rfc/index.rst
> @@ -23,3 +23,7 @@ host such documentation:
>   .. toctree::
>   
>       i915_scheduler.rst
> +
> +.. toctree::
> +
> +    i915_small_bar.rst



^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-27  6:35   ` Lionel Landwerlin
  0 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-04-27  6:35 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

Hi Matt,


The proposal looks good to me.

Looking forward to try it on drm-tip.


-Lionel

On 20/04/2022 20:13, Matthew Auld wrote:
> Add an entry for the new uapi needed for small BAR on DG2+.
>
> v2:
>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>    - Rework error capture interactions, including no longer needing
>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>    - Add probed_cpu_visible_size. (Lionel)
>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Cc: Jordan Justen <jordan.l.justen@intel.com>
> Cc: Kenneth Graunke <kenneth@whitecape.org>
> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
> Cc: mesa-dev@lists.freedesktop.org
> ---
>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>   Documentation/gpu/rfc/index.rst          |   4 +
>   3 files changed, 252 insertions(+)
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>
> diff --git a/Documentation/gpu/rfc/i915_small_bar.h b/Documentation/gpu/rfc/i915_small_bar.h
> new file mode 100644
> index 000000000000..7bfd0cf44d35
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.h
> @@ -0,0 +1,190 @@
> +/**
> + * struct __drm_i915_memory_region_info - Describes one region as known to the
> + * driver.
> + *
> + * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
> + * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
> + * at &drm_i915_query_item.query_id.
> + */
> +struct __drm_i915_memory_region_info {
> +	/** @region: The class:instance pair encoding */
> +	struct drm_i915_gem_memory_class_instance region;
> +
> +	/** @rsvd0: MBZ */
> +	__u32 rsvd0;
> +
> +	/** @probed_size: Memory probed by the driver (-1 = unknown) */
> +	__u64 probed_size;
> +
> +	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
> +	__u64 unallocated_size;
> +
> +	union {
> +		/** @rsvd1: MBZ */
> +		__u64 rsvd1[8];
> +		struct {
> +			/**
> +			 * @probed_cpu_visible_size: Memory probed by the driver
> +			 * that is CPU accessible. (-1 = unknown).
> +			 *
> +			 * This will be always be <= @probed_size, and the
> +			 * remainder(if there is any) will not be CPU
> +			 * accessible.
> +			 */
> +			__u64 probed_cpu_visible_size;
> +		};
> +	};
> +};
> +
> +/**
> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, with added
> + * extension support using struct i915_user_extension.
> + *
> + * Note that new buffer flags should be added here, at least for the stuff that
> + * is immutable. Previously we would have two ioctls, one to create the object
> + * with gem_create, and another to apply various parameters, however this
> + * creates some ambiguity for the params which are considered immutable. Also in
> + * general we're phasing out the various SET/GET ioctls.
> + */
> +struct __drm_i915_gem_create_ext {
> +	/**
> +	 * @size: Requested size for the object.
> +	 *
> +	 * The (page-aligned) allocated size for the object will be returned.
> +	 *
> +	 * Note that for some devices we have might have further minimum
> +	 * page-size restrictions(larger than 4K), like for device local-memory.
> +	 * However in general the final size here should always reflect any
> +	 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
> +	 * extension to place the object in device local-memory.
> +	 */
> +	__u64 size;
> +	/**
> +	 * @handle: Returned handle for the object.
> +	 *
> +	 * Object handles are nonzero.
> +	 */
> +	__u32 handle;
> +	/**
> +	 * @flags: Optional flags.
> +	 *
> +	 * Supported values:
> +	 *
> +	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
> +	 * the object will need to be accessed via the CPU.
> +	 *
> +	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
> +	 * only strictly required on platforms where only some of the device
> +	 * memory is directly visible or mappable through the CPU, like on DG2+.
> +	 *
> +	 * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
> +	 * ensure we can always spill the allocation to system memory, if we
> +	 * can't place the object in the mappable part of
> +	 * I915_MEMORY_CLASS_DEVICE.
> +	 *
> +	 * Note that since the kernel only supports flat-CCS on objects that can
> +	 * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
> +	 * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
> +	 * flat-CCS.
> +	 *
> +	 * Without this hint, the kernel will assume that non-mappable
> +	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
> +	 * kernel can still migrate the object to the mappable part, as a last
> +	 * resort, if userspace ever CPU faults this object, but this might be
> +	 * expensive, and so ideally should be avoided.
> +	 */
> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
> +	__u32 flags;
> +	/**
> +	 * @extensions: The chain of extensions to apply to this object.
> +	 *
> +	 * This will be useful in the future when we need to support several
> +	 * different extensions, and we need to apply more than one when
> +	 * creating the object. See struct i915_user_extension.
> +	 *
> +	 * If we don't supply any extensions then we get the same old gem_create
> +	 * behaviour.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
> +	 * struct drm_i915_gem_create_ext_memory_regions.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> +	 * struct drm_i915_gem_create_ext_protected_content.
> +	 */
> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> +	__u64 extensions;
> +};
> +
> +#define DRM_I915_QUERY_VMA_INFO	5
> +
> +/**
> + * struct __drm_i915_query_vma_info
> + *
> + * Given a vm and GTT address, lookup the corresponding vma, returning its set
> + * of attributes.
> + *
> + * .. code-block:: C
> + *
> + *	struct drm_i915_query_vma_info info = {};
> + *	struct drm_i915_query_item item = {
> + *		.data_ptr = (uintptr_t)&info,
> + *		.query_id = DRM_I915_QUERY_VMA_INFO,
> + *	};
> + *	struct drm_i915_query query = {
> + *		.num_items = 1,
> + *		.items_ptr = (uintptr_t)&item,
> + *	};
> + *	int err;
> + *
> + *	// Unlike some other types of queries, there is no need to first query
> + *	// the size of the data_ptr blob here, since we already know ahead of
> + *	// time how big this needs to be.
> + *	item.length = sizeof(info);
> + *
> + *	// Next we fill in the vm_id and ppGTT address of the vma we wish
> + *	// to query, before then firing off the query.
> + *	info.vm_id = vm_id;
> + *	info.offset = gtt_address;
> + *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
> + *	if (err || item.length < 0) ...
> + *
> + *	// If all went well we can now inspect the returned attributes.
> + *	if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
> + */
> +struct __drm_i915_query_vma_info {
> +	/**
> +	 * @vm_id: The given vm id that contains the vma. The id is the value
> +	 * returned by the DRM_I915_GEM_VM_CREATE. See struct
> +	 * drm_i915_gem_vm_control.vm_id.
> +	 */
> +	__u32 vm_id;
> +	/** @pad: MBZ. */
> +	__u32 pad;
> +	/**
> +	 * @offset: The corresponding ppGTT address of the vma which the kernel
> +	 * will use to perform the lookup.
> +	 */
> +	__u64 offset;
> +	/**
> +	 * @attributes: The returned attributes for the given vma.
> +	 *
> +	 * Possible values:
> +	 *
> +	 * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages backing the
> +	 * vma are currently CPU accessible. If this is not set then the vma is
> +	 * currently backed by I915_MEMORY_CLASS_DEVICE memory, which the CPU
> +	 * cannot directly access(this is only possible on discrete devices with
> +	 * a small BAR). Attempting to MMAP and fault such an object will
> +	 * require the kernel first synchronising any GPU work tied to the
> +	 * object, before then migrating the pages, either to the CPU accessible
> +	 * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, if the
> +	 * placements permit it. See I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
> +	 *
> +	 * Note that this is inherently racy.
> +	 */
> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
> +	__u64 attributes;
> +	/** @rsvd: MBZ */
> +	__u32 rsvd[4];
> +};
> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst b/Documentation/gpu/rfc/i915_small_bar.rst
> new file mode 100644
> index 000000000000..be3d9bcdd86d
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
> @@ -0,0 +1,58 @@
> +==========================
> +I915 Small BAR RFC Section
> +==========================
> +Starting from DG2 we will have resizable BAR support for device local-memory(i.e
> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might still be
> +smaller than the total probed_size. In such cases, only some subset of
> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 256M),
> +while the remainder is only accessible via the GPU.
> +
> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
> +----------------------------------------------
> +New gem_create_ext flag to tell the kernel that a BO will require CPU access.
> +This becomes important when placing an object in I915_MEMORY_CLASS_DEVICE, where
> +underneath the device has a small BAR, meaning only some portion of it is CPU
> +accessible. Without this flag the kernel will assume that CPU access is not
> +required, and prioritize using the non-CPU visible portion of
> +I915_MEMORY_CLASS_DEVICE.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_gem_create_ext
> +
> +probed_cpu_visible_size attribute
> +---------------------------------
> +New struct__drm_i915_memory_region attribute which returns the total size of the
> +CPU accessible portion, for the particular region. This should only be
> +applicable for I915_MEMORY_CLASS_DEVICE.
> +
> +Vulkan will need this as part of creating a separate VkMemoryHeap with the
> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible portion,
> +where the total size of the heap needs to be known.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_memory_region_info
> +
> +DRM_I915_QUERY_VMA_INFO query
> +-----------------------------
> +Query the attributes of some vma. Given a vm and GTT offset, find the
> +respective vma, and return its set of attributes. For now we only support
> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
> +currently placed in memory that is accessible by the CPU. This should always be
> +set on devices where the CPU probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE
> +matches the probed_size. If this is not set then CPU faulting the object will
> +likely first require migrating the pages.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_query_vma_info
> +
> +Error Capture restrictions
> +--------------------------
> +With error capture we have two new restrictions:
> +
> +    1) Error capture is best effort on small BAR systems; if the pages are not
> +    CPU accessible, at the time of capture, then the kernel is free to skip
> +    trying to capture them.
> +
> +    2) On discrete we now reject error capture on recoverable contexts. In the
> +    future the kernel may want to blit during error capture, when for example
> +    something is not currently CPU accessible.
> diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst
> index 91e93a705230..5a3bd3924ba6 100644
> --- a/Documentation/gpu/rfc/index.rst
> +++ b/Documentation/gpu/rfc/index.rst
> @@ -23,3 +23,7 @@ host such documentation:
>   .. toctree::
>   
>       i915_scheduler.rst
> +
> +.. toctree::
> +
> +    i915_small_bar.rst



^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-27  6:35   ` [Intel-gfx] " Lionel Landwerlin
@ 2022-04-27  6:48     ` Lionel Landwerlin
  -1 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-04-27  6:48 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Jordan Justen, dri-devel, Kenneth Graunke,
	Jon Bloomfield, Daniel Vetter, mesa-dev, Akeem G Abodunrin

One question though, how do we detect that this flag 
(I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given kernel?
I assume older kernels are going to reject object creation if we use 
this flag?

I didn't plan to use __drm_i915_query_vma_info, but isn't it 
inconsistent to select the placement on the GEM object and then query 
whether it's mappable by address?
You made a comment stating this is racy, wouldn't querying on the GEM 
object prevent this?

Thanks,

-Lionel

On 27/04/2022 09:35, Lionel Landwerlin wrote:
> Hi Matt,
>
>
> The proposal looks good to me.
>
> Looking forward to try it on drm-tip.
>
>
> -Lionel
>
> On 20/04/2022 20:13, Matthew Auld wrote:
>> Add an entry for the new uapi needed for small BAR on DG2+.
>>
>> v2:
>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>    - Rework error capture interactions, including no longer needing
>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>    - Add probed_cpu_visible_size. (Lionel)
>>
>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>> Cc: mesa-dev@lists.freedesktop.org
>> ---
>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>   Documentation/gpu/rfc/index.rst          |   4 +
>>   3 files changed, 252 insertions(+)
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>> b/Documentation/gpu/rfc/i915_small_bar.h
>> new file mode 100644
>> index 000000000000..7bfd0cf44d35
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>> @@ -0,0 +1,190 @@
>> +/**
>> + * struct __drm_i915_memory_region_info - Describes one region as 
>> known to the
>> + * driver.
>> + *
>> + * Note this is using both struct drm_i915_query_item and struct 
>> drm_i915_query.
>> + * For this new query we are adding the new query id 
>> DRM_I915_QUERY_MEMORY_REGIONS
>> + * at &drm_i915_query_item.query_id.
>> + */
>> +struct __drm_i915_memory_region_info {
>> +    /** @region: The class:instance pair encoding */
>> +    struct drm_i915_gem_memory_class_instance region;
>> +
>> +    /** @rsvd0: MBZ */
>> +    __u32 rsvd0;
>> +
>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>> +    __u64 probed_size;
>> +
>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>> unknown) */
>> +    __u64 unallocated_size;
>> +
>> +    union {
>> +        /** @rsvd1: MBZ */
>> +        __u64 rsvd1[8];
>> +        struct {
>> +            /**
>> +             * @probed_cpu_visible_size: Memory probed by the driver
>> +             * that is CPU accessible. (-1 = unknown).
>> +             *
>> +             * This will be always be <= @probed_size, and the
>> +             * remainder(if there is any) will not be CPU
>> +             * accessible.
>> +             */
>> +            __u64 probed_cpu_visible_size;
>> +        };
>> +    };
>> +};
>> +
>> +/**
>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>> with added
>> + * extension support using struct i915_user_extension.
>> + *
>> + * Note that new buffer flags should be added here, at least for the 
>> stuff that
>> + * is immutable. Previously we would have two ioctls, one to create 
>> the object
>> + * with gem_create, and another to apply various parameters, however 
>> this
>> + * creates some ambiguity for the params which are considered 
>> immutable. Also in
>> + * general we're phasing out the various SET/GET ioctls.
>> + */
>> +struct __drm_i915_gem_create_ext {
>> +    /**
>> +     * @size: Requested size for the object.
>> +     *
>> +     * The (page-aligned) allocated size for the object will be 
>> returned.
>> +     *
>> +     * Note that for some devices we have might have further minimum
>> +     * page-size restrictions(larger than 4K), like for device 
>> local-memory.
>> +     * However in general the final size here should always reflect any
>> +     * rounding up, if for example using the 
>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>> +     * extension to place the object in device local-memory.
>> +     */
>> +    __u64 size;
>> +    /**
>> +     * @handle: Returned handle for the object.
>> +     *
>> +     * Object handles are nonzero.
>> +     */
>> +    __u32 handle;
>> +    /**
>> +     * @flags: Optional flags.
>> +     *
>> +     * Supported values:
>> +     *
>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>> kernel that
>> +     * the object will need to be accessed via the CPU.
>> +     *
>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>> +     * only strictly required on platforms where only some of the 
>> device
>> +     * memory is directly visible or mappable through the CPU, like 
>> on DG2+.
>> +     *
>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>> +     * ensure we can always spill the allocation to system memory, 
>> if we
>> +     * can't place the object in the mappable part of
>> +     * I915_MEMORY_CLASS_DEVICE.
>> +     *
>> +     * Note that since the kernel only supports flat-CCS on objects 
>> that can
>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>> +     * flat-CCS.
>> +     *
>> +     * Without this hint, the kernel will assume that non-mappable
>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>> that the
>> +     * kernel can still migrate the object to the mappable part, as 
>> a last
>> +     * resort, if userspace ever CPU faults this object, but this 
>> might be
>> +     * expensive, and so ideally should be avoided.
>> +     */
>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>> +    __u32 flags;
>> +    /**
>> +     * @extensions: The chain of extensions to apply to this object.
>> +     *
>> +     * This will be useful in the future when we need to support 
>> several
>> +     * different extensions, and we need to apply more than one when
>> +     * creating the object. See struct i915_user_extension.
>> +     *
>> +     * If we don't supply any extensions then we get the same old 
>> gem_create
>> +     * behaviour.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>> +     * struct drm_i915_gem_create_ext_memory_regions.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>> +     * struct drm_i915_gem_create_ext_protected_content.
>> +     */
>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>> +    __u64 extensions;
>> +};
>> +
>> +#define DRM_I915_QUERY_VMA_INFO    5
>> +
>> +/**
>> + * struct __drm_i915_query_vma_info
>> + *
>> + * Given a vm and GTT address, lookup the corresponding vma, 
>> returning its set
>> + * of attributes.
>> + *
>> + * .. code-block:: C
>> + *
>> + *    struct drm_i915_query_vma_info info = {};
>> + *    struct drm_i915_query_item item = {
>> + *        .data_ptr = (uintptr_t)&info,
>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>> + *    };
>> + *    struct drm_i915_query query = {
>> + *        .num_items = 1,
>> + *        .items_ptr = (uintptr_t)&item,
>> + *    };
>> + *    int err;
>> + *
>> + *    // Unlike some other types of queries, there is no need to 
>> first query
>> + *    // the size of the data_ptr blob here, since we already know 
>> ahead of
>> + *    // time how big this needs to be.
>> + *    item.length = sizeof(info);
>> + *
>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>> + *    // to query, before then firing off the query.
>> + *    info.vm_id = vm_id;
>> + *    info.offset = gtt_address;
>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>> + *    if (err || item.length < 0) ...
>> + *
>> + *    // If all went well we can now inspect the returned attributes.
>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>> + */
>> +struct __drm_i915_query_vma_info {
>> +    /**
>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>> value
>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>> +     * drm_i915_gem_vm_control.vm_id.
>> +     */
>> +    __u32 vm_id;
>> +    /** @pad: MBZ. */
>> +    __u32 pad;
>> +    /**
>> +     * @offset: The corresponding ppGTT address of the vma which the 
>> kernel
>> +     * will use to perform the lookup.
>> +     */
>> +    __u64 offset;
>> +    /**
>> +     * @attributes: The returned attributes for the given vma.
>> +     *
>> +     * Possible values:
>> +     *
>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>> backing the
>> +     * vma are currently CPU accessible. If this is not set then the 
>> vma is
>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>> the CPU
>> +     * cannot directly access(this is only possible on discrete 
>> devices with
>> +     * a small BAR). Attempting to MMAP and fault such an object will
>> +     * require the kernel first synchronising any GPU work tied to the
>> +     * object, before then migrating the pages, either to the CPU 
>> accessible
>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>> I915_MEMORY_CLASS_SYSTEM, if the
>> +     * placements permit it. See 
>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>> +     *
>> +     * Note that this is inherently racy.
>> +     */
>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>> +    __u64 attributes;
>> +    /** @rsvd: MBZ */
>> +    __u32 rsvd[4];
>> +};
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>> b/Documentation/gpu/rfc/i915_small_bar.rst
>> new file mode 100644
>> index 000000000000..be3d9bcdd86d
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>> @@ -0,0 +1,58 @@
>> +==========================
>> +I915 Small BAR RFC Section
>> +==========================
>> +Starting from DG2 we will have resizable BAR support for device 
>> local-memory(i.e
>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>> might still be
>> +smaller than the total probed_size. In such cases, only some subset of
>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>> first 256M),
>> +while the remainder is only accessible via the GPU.
>> +
>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>> +----------------------------------------------
>> +New gem_create_ext flag to tell the kernel that a BO will require 
>> CPU access.
>> +This becomes important when placing an object in 
>> I915_MEMORY_CLASS_DEVICE, where
>> +underneath the device has a small BAR, meaning only some portion of 
>> it is CPU
>> +accessible. Without this flag the kernel will assume that CPU access 
>> is not
>> +required, and prioritize using the non-CPU visible portion of
>> +I915_MEMORY_CLASS_DEVICE.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_gem_create_ext
>> +
>> +probed_cpu_visible_size attribute
>> +---------------------------------
>> +New struct__drm_i915_memory_region attribute which returns the total 
>> size of the
>> +CPU accessible portion, for the particular region. This should only be
>> +applicable for I915_MEMORY_CLASS_DEVICE.
>> +
>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>> with the
>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>> visible portion,
>> +where the total size of the heap needs to be known.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_memory_region_info
>> +
>> +DRM_I915_QUERY_VMA_INFO query
>> +-----------------------------
>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>> +respective vma, and return its set of attributes. For now we only 
>> support
>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>> +currently placed in memory that is accessible by the CPU. This 
>> should always be
>> +set on devices where the CPU probed_cpu_visible_size of 
>> I915_MEMORY_CLASS_DEVICE
>> +matches the probed_size. If this is not set then CPU faulting the 
>> object will
>> +likely first require migrating the pages.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_query_vma_info
>> +
>> +Error Capture restrictions
>> +--------------------------
>> +With error capture we have two new restrictions:
>> +
>> +    1) Error capture is best effort on small BAR systems; if the 
>> pages are not
>> +    CPU accessible, at the time of capture, then the kernel is free 
>> to skip
>> +    trying to capture them.
>> +
>> +    2) On discrete we now reject error capture on recoverable 
>> contexts. In the
>> +    future the kernel may want to blit during error capture, when 
>> for example
>> +    something is not currently CPU accessible.
>> diff --git a/Documentation/gpu/rfc/index.rst 
>> b/Documentation/gpu/rfc/index.rst
>> index 91e93a705230..5a3bd3924ba6 100644
>> --- a/Documentation/gpu/rfc/index.rst
>> +++ b/Documentation/gpu/rfc/index.rst
>> @@ -23,3 +23,7 @@ host such documentation:
>>   .. toctree::
>>         i915_scheduler.rst
>> +
>> +.. toctree::
>> +
>> +    i915_small_bar.rst
>
>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-27  6:48     ` Lionel Landwerlin
  0 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-04-27  6:48 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

One question though, how do we detect that this flag 
(I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given kernel?
I assume older kernels are going to reject object creation if we use 
this flag?

I didn't plan to use __drm_i915_query_vma_info, but isn't it 
inconsistent to select the placement on the GEM object and then query 
whether it's mappable by address?
You made a comment stating this is racy, wouldn't querying on the GEM 
object prevent this?

Thanks,

-Lionel

On 27/04/2022 09:35, Lionel Landwerlin wrote:
> Hi Matt,
>
>
> The proposal looks good to me.
>
> Looking forward to try it on drm-tip.
>
>
> -Lionel
>
> On 20/04/2022 20:13, Matthew Auld wrote:
>> Add an entry for the new uapi needed for small BAR on DG2+.
>>
>> v2:
>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>    - Rework error capture interactions, including no longer needing
>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>    - Add probed_cpu_visible_size. (Lionel)
>>
>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>> Cc: mesa-dev@lists.freedesktop.org
>> ---
>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>   Documentation/gpu/rfc/index.rst          |   4 +
>>   3 files changed, 252 insertions(+)
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>> b/Documentation/gpu/rfc/i915_small_bar.h
>> new file mode 100644
>> index 000000000000..7bfd0cf44d35
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>> @@ -0,0 +1,190 @@
>> +/**
>> + * struct __drm_i915_memory_region_info - Describes one region as 
>> known to the
>> + * driver.
>> + *
>> + * Note this is using both struct drm_i915_query_item and struct 
>> drm_i915_query.
>> + * For this new query we are adding the new query id 
>> DRM_I915_QUERY_MEMORY_REGIONS
>> + * at &drm_i915_query_item.query_id.
>> + */
>> +struct __drm_i915_memory_region_info {
>> +    /** @region: The class:instance pair encoding */
>> +    struct drm_i915_gem_memory_class_instance region;
>> +
>> +    /** @rsvd0: MBZ */
>> +    __u32 rsvd0;
>> +
>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>> +    __u64 probed_size;
>> +
>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>> unknown) */
>> +    __u64 unallocated_size;
>> +
>> +    union {
>> +        /** @rsvd1: MBZ */
>> +        __u64 rsvd1[8];
>> +        struct {
>> +            /**
>> +             * @probed_cpu_visible_size: Memory probed by the driver
>> +             * that is CPU accessible. (-1 = unknown).
>> +             *
>> +             * This will be always be <= @probed_size, and the
>> +             * remainder(if there is any) will not be CPU
>> +             * accessible.
>> +             */
>> +            __u64 probed_cpu_visible_size;
>> +        };
>> +    };
>> +};
>> +
>> +/**
>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>> with added
>> + * extension support using struct i915_user_extension.
>> + *
>> + * Note that new buffer flags should be added here, at least for the 
>> stuff that
>> + * is immutable. Previously we would have two ioctls, one to create 
>> the object
>> + * with gem_create, and another to apply various parameters, however 
>> this
>> + * creates some ambiguity for the params which are considered 
>> immutable. Also in
>> + * general we're phasing out the various SET/GET ioctls.
>> + */
>> +struct __drm_i915_gem_create_ext {
>> +    /**
>> +     * @size: Requested size for the object.
>> +     *
>> +     * The (page-aligned) allocated size for the object will be 
>> returned.
>> +     *
>> +     * Note that for some devices we have might have further minimum
>> +     * page-size restrictions(larger than 4K), like for device 
>> local-memory.
>> +     * However in general the final size here should always reflect any
>> +     * rounding up, if for example using the 
>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>> +     * extension to place the object in device local-memory.
>> +     */
>> +    __u64 size;
>> +    /**
>> +     * @handle: Returned handle for the object.
>> +     *
>> +     * Object handles are nonzero.
>> +     */
>> +    __u32 handle;
>> +    /**
>> +     * @flags: Optional flags.
>> +     *
>> +     * Supported values:
>> +     *
>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>> kernel that
>> +     * the object will need to be accessed via the CPU.
>> +     *
>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>> +     * only strictly required on platforms where only some of the 
>> device
>> +     * memory is directly visible or mappable through the CPU, like 
>> on DG2+.
>> +     *
>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>> +     * ensure we can always spill the allocation to system memory, 
>> if we
>> +     * can't place the object in the mappable part of
>> +     * I915_MEMORY_CLASS_DEVICE.
>> +     *
>> +     * Note that since the kernel only supports flat-CCS on objects 
>> that can
>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>> +     * flat-CCS.
>> +     *
>> +     * Without this hint, the kernel will assume that non-mappable
>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>> that the
>> +     * kernel can still migrate the object to the mappable part, as 
>> a last
>> +     * resort, if userspace ever CPU faults this object, but this 
>> might be
>> +     * expensive, and so ideally should be avoided.
>> +     */
>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>> +    __u32 flags;
>> +    /**
>> +     * @extensions: The chain of extensions to apply to this object.
>> +     *
>> +     * This will be useful in the future when we need to support 
>> several
>> +     * different extensions, and we need to apply more than one when
>> +     * creating the object. See struct i915_user_extension.
>> +     *
>> +     * If we don't supply any extensions then we get the same old 
>> gem_create
>> +     * behaviour.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>> +     * struct drm_i915_gem_create_ext_memory_regions.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>> +     * struct drm_i915_gem_create_ext_protected_content.
>> +     */
>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>> +    __u64 extensions;
>> +};
>> +
>> +#define DRM_I915_QUERY_VMA_INFO    5
>> +
>> +/**
>> + * struct __drm_i915_query_vma_info
>> + *
>> + * Given a vm and GTT address, lookup the corresponding vma, 
>> returning its set
>> + * of attributes.
>> + *
>> + * .. code-block:: C
>> + *
>> + *    struct drm_i915_query_vma_info info = {};
>> + *    struct drm_i915_query_item item = {
>> + *        .data_ptr = (uintptr_t)&info,
>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>> + *    };
>> + *    struct drm_i915_query query = {
>> + *        .num_items = 1,
>> + *        .items_ptr = (uintptr_t)&item,
>> + *    };
>> + *    int err;
>> + *
>> + *    // Unlike some other types of queries, there is no need to 
>> first query
>> + *    // the size of the data_ptr blob here, since we already know 
>> ahead of
>> + *    // time how big this needs to be.
>> + *    item.length = sizeof(info);
>> + *
>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>> + *    // to query, before then firing off the query.
>> + *    info.vm_id = vm_id;
>> + *    info.offset = gtt_address;
>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>> + *    if (err || item.length < 0) ...
>> + *
>> + *    // If all went well we can now inspect the returned attributes.
>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>> + */
>> +struct __drm_i915_query_vma_info {
>> +    /**
>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>> value
>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>> +     * drm_i915_gem_vm_control.vm_id.
>> +     */
>> +    __u32 vm_id;
>> +    /** @pad: MBZ. */
>> +    __u32 pad;
>> +    /**
>> +     * @offset: The corresponding ppGTT address of the vma which the 
>> kernel
>> +     * will use to perform the lookup.
>> +     */
>> +    __u64 offset;
>> +    /**
>> +     * @attributes: The returned attributes for the given vma.
>> +     *
>> +     * Possible values:
>> +     *
>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>> backing the
>> +     * vma are currently CPU accessible. If this is not set then the 
>> vma is
>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>> the CPU
>> +     * cannot directly access(this is only possible on discrete 
>> devices with
>> +     * a small BAR). Attempting to MMAP and fault such an object will
>> +     * require the kernel first synchronising any GPU work tied to the
>> +     * object, before then migrating the pages, either to the CPU 
>> accessible
>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>> I915_MEMORY_CLASS_SYSTEM, if the
>> +     * placements permit it. See 
>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>> +     *
>> +     * Note that this is inherently racy.
>> +     */
>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>> +    __u64 attributes;
>> +    /** @rsvd: MBZ */
>> +    __u32 rsvd[4];
>> +};
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>> b/Documentation/gpu/rfc/i915_small_bar.rst
>> new file mode 100644
>> index 000000000000..be3d9bcdd86d
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>> @@ -0,0 +1,58 @@
>> +==========================
>> +I915 Small BAR RFC Section
>> +==========================
>> +Starting from DG2 we will have resizable BAR support for device 
>> local-memory(i.e
>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>> might still be
>> +smaller than the total probed_size. In such cases, only some subset of
>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>> first 256M),
>> +while the remainder is only accessible via the GPU.
>> +
>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>> +----------------------------------------------
>> +New gem_create_ext flag to tell the kernel that a BO will require 
>> CPU access.
>> +This becomes important when placing an object in 
>> I915_MEMORY_CLASS_DEVICE, where
>> +underneath the device has a small BAR, meaning only some portion of 
>> it is CPU
>> +accessible. Without this flag the kernel will assume that CPU access 
>> is not
>> +required, and prioritize using the non-CPU visible portion of
>> +I915_MEMORY_CLASS_DEVICE.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_gem_create_ext
>> +
>> +probed_cpu_visible_size attribute
>> +---------------------------------
>> +New struct__drm_i915_memory_region attribute which returns the total 
>> size of the
>> +CPU accessible portion, for the particular region. This should only be
>> +applicable for I915_MEMORY_CLASS_DEVICE.
>> +
>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>> with the
>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>> visible portion,
>> +where the total size of the heap needs to be known.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_memory_region_info
>> +
>> +DRM_I915_QUERY_VMA_INFO query
>> +-----------------------------
>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>> +respective vma, and return its set of attributes. For now we only 
>> support
>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>> +currently placed in memory that is accessible by the CPU. This 
>> should always be
>> +set on devices where the CPU probed_cpu_visible_size of 
>> I915_MEMORY_CLASS_DEVICE
>> +matches the probed_size. If this is not set then CPU faulting the 
>> object will
>> +likely first require migrating the pages.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_query_vma_info
>> +
>> +Error Capture restrictions
>> +--------------------------
>> +With error capture we have two new restrictions:
>> +
>> +    1) Error capture is best effort on small BAR systems; if the 
>> pages are not
>> +    CPU accessible, at the time of capture, then the kernel is free 
>> to skip
>> +    trying to capture them.
>> +
>> +    2) On discrete we now reject error capture on recoverable 
>> contexts. In the
>> +    future the kernel may want to blit during error capture, when 
>> for example
>> +    something is not currently CPU accessible.
>> diff --git a/Documentation/gpu/rfc/index.rst 
>> b/Documentation/gpu/rfc/index.rst
>> index 91e93a705230..5a3bd3924ba6 100644
>> --- a/Documentation/gpu/rfc/index.rst
>> +++ b/Documentation/gpu/rfc/index.rst
>> @@ -23,3 +23,7 @@ host such documentation:
>>   .. toctree::
>>         i915_scheduler.rst
>> +
>> +.. toctree::
>> +
>> +    i915_small_bar.rst
>
>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-27  6:48     ` [Intel-gfx] " Lionel Landwerlin
@ 2022-04-27  6:55       ` Christian König
  -1 siblings, 0 replies; 50+ messages in thread
From: Christian König @ 2022-04-27  6:55 UTC (permalink / raw)
  To: Lionel Landwerlin, Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Daniel Vetter, dri-devel, Kenneth Graunke,
	Jon Bloomfield, mesa-dev, Akeem G Abodunrin

Well usually we increment the drm minor version when adding some new 
flags on amdgpu.

Additional to that just one comment from our experience with that: You 
don't just need one flag, but two. The first one is a hint which says 
"CPU access needed" and the second is a promise which says "CPU access 
never needed".

The background is that on a whole bunch of buffers you can 100% certain 
say that you will never ever need CPU access.

Then at least we have a whole bunch of buffers where we might need CPU 
access, but can't tell for sure.

And last we have stuff like transfer buffers you can be 100% sure that 
you need CPU access.

Separating it like this helped a lot with performance on small BAR systems.

Regards,
Christian.

Am 27.04.22 um 08:48 schrieb Lionel Landwerlin:
> One question though, how do we detect that this flag 
> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given 
> kernel?
> I assume older kernels are going to reject object creation if we use 
> this flag?
>
> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
> inconsistent to select the placement on the GEM object and then query 
> whether it's mappable by address?
> You made a comment stating this is racy, wouldn't querying on the GEM 
> object prevent this?
>
> Thanks,
>
> -Lionel
>
> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>> Hi Matt,
>>
>>
>> The proposal looks good to me.
>>
>> Looking forward to try it on drm-tip.
>>
>>
>> -Lionel
>>
>> On 20/04/2022 20:13, Matthew Auld wrote:
>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>
>>> v2:
>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>    - Rework error capture interactions, including no longer needing
>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>    - Add probed_cpu_visible_size. (Lionel)
>>>
>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>> Cc: mesa-dev@lists.freedesktop.org
>>> ---
>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>> +++++++++++++++++++++++
>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>   3 files changed, 252 insertions(+)
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>> new file mode 100644
>>> index 000000000000..7bfd0cf44d35
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>> @@ -0,0 +1,190 @@
>>> +/**
>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>> known to the
>>> + * driver.
>>> + *
>>> + * Note this is using both struct drm_i915_query_item and struct 
>>> drm_i915_query.
>>> + * For this new query we are adding the new query id 
>>> DRM_I915_QUERY_MEMORY_REGIONS
>>> + * at &drm_i915_query_item.query_id.
>>> + */
>>> +struct __drm_i915_memory_region_info {
>>> +    /** @region: The class:instance pair encoding */
>>> +    struct drm_i915_gem_memory_class_instance region;
>>> +
>>> +    /** @rsvd0: MBZ */
>>> +    __u32 rsvd0;
>>> +
>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>> +    __u64 probed_size;
>>> +
>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>> unknown) */
>>> +    __u64 unallocated_size;
>>> +
>>> +    union {
>>> +        /** @rsvd1: MBZ */
>>> +        __u64 rsvd1[8];
>>> +        struct {
>>> +            /**
>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>> +             * that is CPU accessible. (-1 = unknown).
>>> +             *
>>> +             * This will be always be <= @probed_size, and the
>>> +             * remainder(if there is any) will not be CPU
>>> +             * accessible.
>>> +             */
>>> +            __u64 probed_cpu_visible_size;
>>> +        };
>>> +    };
>>> +};
>>> +
>>> +/**
>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>> behaviour, with added
>>> + * extension support using struct i915_user_extension.
>>> + *
>>> + * Note that new buffer flags should be added here, at least for 
>>> the stuff that
>>> + * is immutable. Previously we would have two ioctls, one to create 
>>> the object
>>> + * with gem_create, and another to apply various parameters, 
>>> however this
>>> + * creates some ambiguity for the params which are considered 
>>> immutable. Also in
>>> + * general we're phasing out the various SET/GET ioctls.
>>> + */
>>> +struct __drm_i915_gem_create_ext {
>>> +    /**
>>> +     * @size: Requested size for the object.
>>> +     *
>>> +     * The (page-aligned) allocated size for the object will be 
>>> returned.
>>> +     *
>>> +     * Note that for some devices we have might have further minimum
>>> +     * page-size restrictions(larger than 4K), like for device 
>>> local-memory.
>>> +     * However in general the final size here should always reflect 
>>> any
>>> +     * rounding up, if for example using the 
>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>> +     * extension to place the object in device local-memory.
>>> +     */
>>> +    __u64 size;
>>> +    /**
>>> +     * @handle: Returned handle for the object.
>>> +     *
>>> +     * Object handles are nonzero.
>>> +     */
>>> +    __u32 handle;
>>> +    /**
>>> +     * @flags: Optional flags.
>>> +     *
>>> +     * Supported values:
>>> +     *
>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>> kernel that
>>> +     * the object will need to be accessed via the CPU.
>>> +     *
>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, 
>>> and
>>> +     * only strictly required on platforms where only some of the 
>>> device
>>> +     * memory is directly visible or mappable through the CPU, like 
>>> on DG2+.
>>> +     *
>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>> +     * ensure we can always spill the allocation to system memory, 
>>> if we
>>> +     * can't place the object in the mappable part of
>>> +     * I915_MEMORY_CLASS_DEVICE.
>>> +     *
>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>> that can
>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>> don't
>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>> +     * flat-CCS.
>>> +     *
>>> +     * Without this hint, the kernel will assume that non-mappable
>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>> that the
>>> +     * kernel can still migrate the object to the mappable part, as 
>>> a last
>>> +     * resort, if userspace ever CPU faults this object, but this 
>>> might be
>>> +     * expensive, and so ideally should be avoided.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>> +    __u32 flags;
>>> +    /**
>>> +     * @extensions: The chain of extensions to apply to this object.
>>> +     *
>>> +     * This will be useful in the future when we need to support 
>>> several
>>> +     * different extensions, and we need to apply more than one when
>>> +     * creating the object. See struct i915_user_extension.
>>> +     *
>>> +     * If we don't supply any extensions then we get the same old 
>>> gem_create
>>> +     * behaviour.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>> +    __u64 extensions;
>>> +};
>>> +
>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>> +
>>> +/**
>>> + * struct __drm_i915_query_vma_info
>>> + *
>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>> returning its set
>>> + * of attributes.
>>> + *
>>> + * .. code-block:: C
>>> + *
>>> + *    struct drm_i915_query_vma_info info = {};
>>> + *    struct drm_i915_query_item item = {
>>> + *        .data_ptr = (uintptr_t)&info,
>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>> + *    };
>>> + *    struct drm_i915_query query = {
>>> + *        .num_items = 1,
>>> + *        .items_ptr = (uintptr_t)&item,
>>> + *    };
>>> + *    int err;
>>> + *
>>> + *    // Unlike some other types of queries, there is no need to 
>>> first query
>>> + *    // the size of the data_ptr blob here, since we already know 
>>> ahead of
>>> + *    // time how big this needs to be.
>>> + *    item.length = sizeof(info);
>>> + *
>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>> wish
>>> + *    // to query, before then firing off the query.
>>> + *    info.vm_id = vm_id;
>>> + *    info.offset = gtt_address;
>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>> + *    if (err || item.length < 0) ...
>>> + *
>>> + *    // If all went well we can now inspect the returned attributes.
>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>> + */
>>> +struct __drm_i915_query_vma_info {
>>> +    /**
>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>> value
>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>> +     * drm_i915_gem_vm_control.vm_id.
>>> +     */
>>> +    __u32 vm_id;
>>> +    /** @pad: MBZ. */
>>> +    __u32 pad;
>>> +    /**
>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>> the kernel
>>> +     * will use to perform the lookup.
>>> +     */
>>> +    __u64 offset;
>>> +    /**
>>> +     * @attributes: The returned attributes for the given vma.
>>> +     *
>>> +     * Possible values:
>>> +     *
>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>> backing the
>>> +     * vma are currently CPU accessible. If this is not set then 
>>> the vma is
>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>> the CPU
>>> +     * cannot directly access(this is only possible on discrete 
>>> devices with
>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>> +     * require the kernel first synchronising any GPU work tied to the
>>> +     * object, before then migrating the pages, either to the CPU 
>>> accessible
>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>> I915_MEMORY_CLASS_SYSTEM, if the
>>> +     * placements permit it. See 
>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>> +     *
>>> +     * Note that this is inherently racy.
>>> +     */
>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>> +    __u64 attributes;
>>> +    /** @rsvd: MBZ */
>>> +    __u32 rsvd[4];
>>> +};
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>> new file mode 100644
>>> index 000000000000..be3d9bcdd86d
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>> @@ -0,0 +1,58 @@
>>> +==========================
>>> +I915 Small BAR RFC Section
>>> +==========================
>>> +Starting from DG2 we will have resizable BAR support for device 
>>> local-memory(i.e
>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>> might still be
>>> +smaller than the total probed_size. In such cases, only some subset of
>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>> first 256M),
>>> +while the remainder is only accessible via the GPU.
>>> +
>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>> +----------------------------------------------
>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>> CPU access.
>>> +This becomes important when placing an object in 
>>> I915_MEMORY_CLASS_DEVICE, where
>>> +underneath the device has a small BAR, meaning only some portion of 
>>> it is CPU
>>> +accessible. Without this flag the kernel will assume that CPU 
>>> access is not
>>> +required, and prioritize using the non-CPU visible portion of
>>> +I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_gem_create_ext
>>> +
>>> +probed_cpu_visible_size attribute
>>> +---------------------------------
>>> +New struct__drm_i915_memory_region attribute which returns the 
>>> total size of the
>>> +CPU accessible portion, for the particular region. This should only be
>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>> with the
>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>> visible portion,
>>> +where the total size of the heap needs to be known.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_memory_region_info
>>> +
>>> +DRM_I915_QUERY_VMA_INFO query
>>> +-----------------------------
>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>> +respective vma, and return its set of attributes. For now we only 
>>> support
>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>> +currently placed in memory that is accessible by the CPU. This 
>>> should always be
>>> +set on devices where the CPU probed_cpu_visible_size of 
>>> I915_MEMORY_CLASS_DEVICE
>>> +matches the probed_size. If this is not set then CPU faulting the 
>>> object will
>>> +likely first require migrating the pages.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_query_vma_info
>>> +
>>> +Error Capture restrictions
>>> +--------------------------
>>> +With error capture we have two new restrictions:
>>> +
>>> +    1) Error capture is best effort on small BAR systems; if the 
>>> pages are not
>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>> to skip
>>> +    trying to capture them.
>>> +
>>> +    2) On discrete we now reject error capture on recoverable 
>>> contexts. In the
>>> +    future the kernel may want to blit during error capture, when 
>>> for example
>>> +    something is not currently CPU accessible.
>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>> b/Documentation/gpu/rfc/index.rst
>>> index 91e93a705230..5a3bd3924ba6 100644
>>> --- a/Documentation/gpu/rfc/index.rst
>>> +++ b/Documentation/gpu/rfc/index.rst
>>> @@ -23,3 +23,7 @@ host such documentation:
>>>   .. toctree::
>>>         i915_scheduler.rst
>>> +
>>> +.. toctree::
>>> +
>>> +    i915_small_bar.rst
>>
>>
>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-27  6:55       ` Christian König
  0 siblings, 0 replies; 50+ messages in thread
From: Christian König @ 2022-04-27  6:55 UTC (permalink / raw)
  To: Lionel Landwerlin, Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Daniel Vetter, dri-devel, Kenneth Graunke,
	mesa-dev

Well usually we increment the drm minor version when adding some new 
flags on amdgpu.

Additional to that just one comment from our experience with that: You 
don't just need one flag, but two. The first one is a hint which says 
"CPU access needed" and the second is a promise which says "CPU access 
never needed".

The background is that on a whole bunch of buffers you can 100% certain 
say that you will never ever need CPU access.

Then at least we have a whole bunch of buffers where we might need CPU 
access, but can't tell for sure.

And last we have stuff like transfer buffers you can be 100% sure that 
you need CPU access.

Separating it like this helped a lot with performance on small BAR systems.

Regards,
Christian.

Am 27.04.22 um 08:48 schrieb Lionel Landwerlin:
> One question though, how do we detect that this flag 
> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given 
> kernel?
> I assume older kernels are going to reject object creation if we use 
> this flag?
>
> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
> inconsistent to select the placement on the GEM object and then query 
> whether it's mappable by address?
> You made a comment stating this is racy, wouldn't querying on the GEM 
> object prevent this?
>
> Thanks,
>
> -Lionel
>
> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>> Hi Matt,
>>
>>
>> The proposal looks good to me.
>>
>> Looking forward to try it on drm-tip.
>>
>>
>> -Lionel
>>
>> On 20/04/2022 20:13, Matthew Auld wrote:
>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>
>>> v2:
>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>    - Rework error capture interactions, including no longer needing
>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>    - Add probed_cpu_visible_size. (Lionel)
>>>
>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>> Cc: mesa-dev@lists.freedesktop.org
>>> ---
>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>> +++++++++++++++++++++++
>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>   3 files changed, 252 insertions(+)
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>> new file mode 100644
>>> index 000000000000..7bfd0cf44d35
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>> @@ -0,0 +1,190 @@
>>> +/**
>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>> known to the
>>> + * driver.
>>> + *
>>> + * Note this is using both struct drm_i915_query_item and struct 
>>> drm_i915_query.
>>> + * For this new query we are adding the new query id 
>>> DRM_I915_QUERY_MEMORY_REGIONS
>>> + * at &drm_i915_query_item.query_id.
>>> + */
>>> +struct __drm_i915_memory_region_info {
>>> +    /** @region: The class:instance pair encoding */
>>> +    struct drm_i915_gem_memory_class_instance region;
>>> +
>>> +    /** @rsvd0: MBZ */
>>> +    __u32 rsvd0;
>>> +
>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>> +    __u64 probed_size;
>>> +
>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>> unknown) */
>>> +    __u64 unallocated_size;
>>> +
>>> +    union {
>>> +        /** @rsvd1: MBZ */
>>> +        __u64 rsvd1[8];
>>> +        struct {
>>> +            /**
>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>> +             * that is CPU accessible. (-1 = unknown).
>>> +             *
>>> +             * This will be always be <= @probed_size, and the
>>> +             * remainder(if there is any) will not be CPU
>>> +             * accessible.
>>> +             */
>>> +            __u64 probed_cpu_visible_size;
>>> +        };
>>> +    };
>>> +};
>>> +
>>> +/**
>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>> behaviour, with added
>>> + * extension support using struct i915_user_extension.
>>> + *
>>> + * Note that new buffer flags should be added here, at least for 
>>> the stuff that
>>> + * is immutable. Previously we would have two ioctls, one to create 
>>> the object
>>> + * with gem_create, and another to apply various parameters, 
>>> however this
>>> + * creates some ambiguity for the params which are considered 
>>> immutable. Also in
>>> + * general we're phasing out the various SET/GET ioctls.
>>> + */
>>> +struct __drm_i915_gem_create_ext {
>>> +    /**
>>> +     * @size: Requested size for the object.
>>> +     *
>>> +     * The (page-aligned) allocated size for the object will be 
>>> returned.
>>> +     *
>>> +     * Note that for some devices we have might have further minimum
>>> +     * page-size restrictions(larger than 4K), like for device 
>>> local-memory.
>>> +     * However in general the final size here should always reflect 
>>> any
>>> +     * rounding up, if for example using the 
>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>> +     * extension to place the object in device local-memory.
>>> +     */
>>> +    __u64 size;
>>> +    /**
>>> +     * @handle: Returned handle for the object.
>>> +     *
>>> +     * Object handles are nonzero.
>>> +     */
>>> +    __u32 handle;
>>> +    /**
>>> +     * @flags: Optional flags.
>>> +     *
>>> +     * Supported values:
>>> +     *
>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>> kernel that
>>> +     * the object will need to be accessed via the CPU.
>>> +     *
>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, 
>>> and
>>> +     * only strictly required on platforms where only some of the 
>>> device
>>> +     * memory is directly visible or mappable through the CPU, like 
>>> on DG2+.
>>> +     *
>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>> +     * ensure we can always spill the allocation to system memory, 
>>> if we
>>> +     * can't place the object in the mappable part of
>>> +     * I915_MEMORY_CLASS_DEVICE.
>>> +     *
>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>> that can
>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>> don't
>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>> +     * flat-CCS.
>>> +     *
>>> +     * Without this hint, the kernel will assume that non-mappable
>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>> that the
>>> +     * kernel can still migrate the object to the mappable part, as 
>>> a last
>>> +     * resort, if userspace ever CPU faults this object, but this 
>>> might be
>>> +     * expensive, and so ideally should be avoided.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>> +    __u32 flags;
>>> +    /**
>>> +     * @extensions: The chain of extensions to apply to this object.
>>> +     *
>>> +     * This will be useful in the future when we need to support 
>>> several
>>> +     * different extensions, and we need to apply more than one when
>>> +     * creating the object. See struct i915_user_extension.
>>> +     *
>>> +     * If we don't supply any extensions then we get the same old 
>>> gem_create
>>> +     * behaviour.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>> +    __u64 extensions;
>>> +};
>>> +
>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>> +
>>> +/**
>>> + * struct __drm_i915_query_vma_info
>>> + *
>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>> returning its set
>>> + * of attributes.
>>> + *
>>> + * .. code-block:: C
>>> + *
>>> + *    struct drm_i915_query_vma_info info = {};
>>> + *    struct drm_i915_query_item item = {
>>> + *        .data_ptr = (uintptr_t)&info,
>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>> + *    };
>>> + *    struct drm_i915_query query = {
>>> + *        .num_items = 1,
>>> + *        .items_ptr = (uintptr_t)&item,
>>> + *    };
>>> + *    int err;
>>> + *
>>> + *    // Unlike some other types of queries, there is no need to 
>>> first query
>>> + *    // the size of the data_ptr blob here, since we already know 
>>> ahead of
>>> + *    // time how big this needs to be.
>>> + *    item.length = sizeof(info);
>>> + *
>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>> wish
>>> + *    // to query, before then firing off the query.
>>> + *    info.vm_id = vm_id;
>>> + *    info.offset = gtt_address;
>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>> + *    if (err || item.length < 0) ...
>>> + *
>>> + *    // If all went well we can now inspect the returned attributes.
>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>> + */
>>> +struct __drm_i915_query_vma_info {
>>> +    /**
>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>> value
>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>> +     * drm_i915_gem_vm_control.vm_id.
>>> +     */
>>> +    __u32 vm_id;
>>> +    /** @pad: MBZ. */
>>> +    __u32 pad;
>>> +    /**
>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>> the kernel
>>> +     * will use to perform the lookup.
>>> +     */
>>> +    __u64 offset;
>>> +    /**
>>> +     * @attributes: The returned attributes for the given vma.
>>> +     *
>>> +     * Possible values:
>>> +     *
>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>> backing the
>>> +     * vma are currently CPU accessible. If this is not set then 
>>> the vma is
>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>> the CPU
>>> +     * cannot directly access(this is only possible on discrete 
>>> devices with
>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>> +     * require the kernel first synchronising any GPU work tied to the
>>> +     * object, before then migrating the pages, either to the CPU 
>>> accessible
>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>> I915_MEMORY_CLASS_SYSTEM, if the
>>> +     * placements permit it. See 
>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>> +     *
>>> +     * Note that this is inherently racy.
>>> +     */
>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>> +    __u64 attributes;
>>> +    /** @rsvd: MBZ */
>>> +    __u32 rsvd[4];
>>> +};
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>> new file mode 100644
>>> index 000000000000..be3d9bcdd86d
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>> @@ -0,0 +1,58 @@
>>> +==========================
>>> +I915 Small BAR RFC Section
>>> +==========================
>>> +Starting from DG2 we will have resizable BAR support for device 
>>> local-memory(i.e
>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>> might still be
>>> +smaller than the total probed_size. In such cases, only some subset of
>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>> first 256M),
>>> +while the remainder is only accessible via the GPU.
>>> +
>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>> +----------------------------------------------
>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>> CPU access.
>>> +This becomes important when placing an object in 
>>> I915_MEMORY_CLASS_DEVICE, where
>>> +underneath the device has a small BAR, meaning only some portion of 
>>> it is CPU
>>> +accessible. Without this flag the kernel will assume that CPU 
>>> access is not
>>> +required, and prioritize using the non-CPU visible portion of
>>> +I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_gem_create_ext
>>> +
>>> +probed_cpu_visible_size attribute
>>> +---------------------------------
>>> +New struct__drm_i915_memory_region attribute which returns the 
>>> total size of the
>>> +CPU accessible portion, for the particular region. This should only be
>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>> with the
>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>> visible portion,
>>> +where the total size of the heap needs to be known.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_memory_region_info
>>> +
>>> +DRM_I915_QUERY_VMA_INFO query
>>> +-----------------------------
>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>> +respective vma, and return its set of attributes. For now we only 
>>> support
>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>> +currently placed in memory that is accessible by the CPU. This 
>>> should always be
>>> +set on devices where the CPU probed_cpu_visible_size of 
>>> I915_MEMORY_CLASS_DEVICE
>>> +matches the probed_size. If this is not set then CPU faulting the 
>>> object will
>>> +likely first require migrating the pages.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_query_vma_info
>>> +
>>> +Error Capture restrictions
>>> +--------------------------
>>> +With error capture we have two new restrictions:
>>> +
>>> +    1) Error capture is best effort on small BAR systems; if the 
>>> pages are not
>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>> to skip
>>> +    trying to capture them.
>>> +
>>> +    2) On discrete we now reject error capture on recoverable 
>>> contexts. In the
>>> +    future the kernel may want to blit during error capture, when 
>>> for example
>>> +    something is not currently CPU accessible.
>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>> b/Documentation/gpu/rfc/index.rst
>>> index 91e93a705230..5a3bd3924ba6 100644
>>> --- a/Documentation/gpu/rfc/index.rst
>>> +++ b/Documentation/gpu/rfc/index.rst
>>> @@ -23,3 +23,7 @@ host such documentation:
>>>   .. toctree::
>>>         i915_scheduler.rst
>>> +
>>> +.. toctree::
>>> +
>>> +    i915_small_bar.rst
>>
>>
>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-20 17:13 ` [Intel-gfx] " Matthew Auld
                   ` (3 preceding siblings ...)
  (?)
@ 2022-04-27  8:36 ` Tvrtko Ursulin
  2022-04-27 17:36   ` Matthew Auld
  -1 siblings, 1 reply; 50+ messages in thread
From: Tvrtko Ursulin @ 2022-04-27  8:36 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Kenneth Graunke, mesa-dev, dri-devel,
	Daniel Vetter


On 20/04/2022 18:13, Matthew Auld wrote:
> Add an entry for the new uapi needed for small BAR on DG2+.
> 
> v2:
>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>    - Rework error capture interactions, including no longer needing
>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>    - Add probed_cpu_visible_size. (Lionel)
> 
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Cc: Jordan Justen <jordan.l.justen@intel.com>
> Cc: Kenneth Graunke <kenneth@whitecape.org>
> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
> Cc: mesa-dev@lists.freedesktop.org
> ---
>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>   Documentation/gpu/rfc/index.rst          |   4 +
>   3 files changed, 252 insertions(+)
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
> 
> diff --git a/Documentation/gpu/rfc/i915_small_bar.h b/Documentation/gpu/rfc/i915_small_bar.h
> new file mode 100644
> index 000000000000..7bfd0cf44d35
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.h
> @@ -0,0 +1,190 @@
> +/**
> + * struct __drm_i915_memory_region_info - Describes one region as known to the
> + * driver.
> + *
> + * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
> + * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
> + * at &drm_i915_query_item.query_id.
> + */
> +struct __drm_i915_memory_region_info {
> +	/** @region: The class:instance pair encoding */
> +	struct drm_i915_gem_memory_class_instance region;
> +
> +	/** @rsvd0: MBZ */
> +	__u32 rsvd0;
> +
> +	/** @probed_size: Memory probed by the driver (-1 = unknown) */
> +	__u64 probed_size;
> +
> +	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
> +	__u64 unallocated_size;
> +
> +	union {
> +		/** @rsvd1: MBZ */
> +		__u64 rsvd1[8];
> +		struct {
> +			/**
> +			 * @probed_cpu_visible_size: Memory probed by the driver
> +			 * that is CPU accessible. (-1 = unknown).
> +			 *
> +			 * This will be always be <= @probed_size, and the
> +			 * remainder(if there is any) will not be CPU
> +			 * accessible.
> +			 */
> +			__u64 probed_cpu_visible_size;

Would unallocated_cpu_visible_size be useful, to follow the total 
unallocated_size?

Btw, have we ever considered whether unallocated_size should require 
CAP_SYS_ADMIN/PERFMON or something?

> +		};
> +	};
> +};
> +
> +/**
> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, with added
> + * extension support using struct i915_user_extension.
> + *
> + * Note that new buffer flags should be added here, at least for the stuff that
> + * is immutable. Previously we would have two ioctls, one to create the object
> + * with gem_create, and another to apply various parameters, however this
> + * creates some ambiguity for the params which are considered immutable. Also in
> + * general we're phasing out the various SET/GET ioctls.
> + */
> +struct __drm_i915_gem_create_ext {
> +	/**
> +	 * @size: Requested size for the object.
> +	 *
> +	 * The (page-aligned) allocated size for the object will be returned.
> +	 *
> +	 * Note that for some devices we have might have further minimum
> +	 * page-size restrictions(larger than 4K), like for device local-memory.
> +	 * However in general the final size here should always reflect any
> +	 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
> +	 * extension to place the object in device local-memory.
> +	 */
> +	__u64 size;
> +	/**
> +	 * @handle: Returned handle for the object.
> +	 *
> +	 * Object handles are nonzero.
> +	 */
> +	__u32 handle;
> +	/**
> +	 * @flags: Optional flags.
> +	 *
> +	 * Supported values:
> +	 *
> +	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
> +	 * the object will need to be accessed via the CPU.
> +	 *
> +	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
> +	 * only strictly required on platforms where only some of the device
> +	 * memory is directly visible or mappable through the CPU, like on DG2+.
> +	 *
> +	 * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
> +	 * ensure we can always spill the allocation to system memory, if we
> +	 * can't place the object in the mappable part of
> +	 * I915_MEMORY_CLASS_DEVICE.
> +	 *
> +	 * Note that since the kernel only supports flat-CCS on objects that can
> +	 * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
> +	 * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
> +	 * flat-CCS.
> +	 *
> +	 * Without this hint, the kernel will assume that non-mappable
> +	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
> +	 * kernel can still migrate the object to the mappable part, as a last
> +	 * resort, if userspace ever CPU faults this object, but this might be
> +	 * expensive, and so ideally should be avoided.
> +	 */

So "needs_cpu_access" flag could almost be viewed as a sub-region 
placement priority? What I mean is this:

1)
placements=device,system flags=

This results in placement priorities: device, device_cpu_mappable, system.

2)
placements=device,system flags=needs_cpu_access

This results in placement priorities: device_cpu_mappable, device, system.

Is this correct?

The benefit of the flag is that i915 can place the object to the right 
place from the start instead of on the first CPU access? Is that worth 
it or is there more to it?

> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
> +	__u32 flags;
> +	/**
> +	 * @extensions: The chain of extensions to apply to this object.
> +	 *
> +	 * This will be useful in the future when we need to support several
> +	 * different extensions, and we need to apply more than one when
> +	 * creating the object. See struct i915_user_extension.
> +	 *
> +	 * If we don't supply any extensions then we get the same old gem_create
> +	 * behaviour.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
> +	 * struct drm_i915_gem_create_ext_memory_regions.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> +	 * struct drm_i915_gem_create_ext_protected_content.
> +	 */
> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> +	__u64 extensions;
> +};
> +
> +#define DRM_I915_QUERY_VMA_INFO	5
> +
> +/**
> + * struct __drm_i915_query_vma_info
> + *
> + * Given a vm and GTT address, lookup the corresponding vma, returning its set
> + * of attributes.
> + *
> + * .. code-block:: C
> + *
> + *	struct drm_i915_query_vma_info info = {};
> + *	struct drm_i915_query_item item = {
> + *		.data_ptr = (uintptr_t)&info,
> + *		.query_id = DRM_I915_QUERY_VMA_INFO,
> + *	};
> + *	struct drm_i915_query query = {
> + *		.num_items = 1,
> + *		.items_ptr = (uintptr_t)&item,
> + *	};
> + *	int err;
> + *
> + *	// Unlike some other types of queries, there is no need to first query
> + *	// the size of the data_ptr blob here, since we already know ahead of
> + *	// time how big this needs to be.
> + *	item.length = sizeof(info);
> + *
> + *	// Next we fill in the vm_id and ppGTT address of the vma we wish
> + *	// to query, before then firing off the query.
> + *	info.vm_id = vm_id;
> + *	info.offset = gtt_address;
> + *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
> + *	if (err || item.length < 0) ...
> + *
> + *	// If all went well we can now inspect the returned attributes.
> + *	if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
> + */
> +struct __drm_i915_query_vma_info {
> +	/**
> +	 * @vm_id: The given vm id that contains the vma. The id is the value
> +	 * returned by the DRM_I915_GEM_VM_CREATE. See struct
> +	 * drm_i915_gem_vm_control.vm_id.
> +	 */
> +	__u32 vm_id;
> +	/** @pad: MBZ. */
> +	__u32 pad;
> +	/**
> +	 * @offset: The corresponding ppGTT address of the vma which the kernel
> +	 * will use to perform the lookup.
> +	 */
> +	__u64 offset;
> +	/**
> +	 * @attributes: The returned attributes for the given vma.
> +	 *
> +	 * Possible values:
> +	 *
> +	 * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages backing the
> +	 * vma are currently CPU accessible. If this is not set then the vma is
> +	 * currently backed by I915_MEMORY_CLASS_DEVICE memory, which the CPU
> +	 * cannot directly access(this is only possible on discrete devices with
> +	 * a small BAR). Attempting to MMAP and fault such an object will
> +	 * require the kernel first synchronising any GPU work tied to the
> +	 * object, before then migrating the pages, either to the CPU accessible
> +	 * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, if the
> +	 * placements permit it. See I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
> +	 *
> +	 * Note that this is inherently racy.
> +	 */
> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
> +	__u64 attributes;
> +	/** @rsvd: MBZ */
> +	__u32 rsvd[4];
> +};
> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst b/Documentation/gpu/rfc/i915_small_bar.rst
> new file mode 100644
> index 000000000000..be3d9bcdd86d
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
> @@ -0,0 +1,58 @@
> +==========================
> +I915 Small BAR RFC Section
> +==========================
> +Starting from DG2 we will have resizable BAR support for device local-memory(i.e
> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might still be
> +smaller than the total probed_size. In such cases, only some subset of
> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 256M),
> +while the remainder is only accessible via the GPU.
> +
> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
> +----------------------------------------------
> +New gem_create_ext flag to tell the kernel that a BO will require CPU access.
> +This becomes important when placing an object in I915_MEMORY_CLASS_DEVICE, where
> +underneath the device has a small BAR, meaning only some portion of it is CPU
> +accessible. Without this flag the kernel will assume that CPU access is not
> +required, and prioritize using the non-CPU visible portion of
> +I915_MEMORY_CLASS_DEVICE.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_gem_create_ext
> +
> +probed_cpu_visible_size attribute
> +---------------------------------
> +New struct__drm_i915_memory_region attribute which returns the total size of the
> +CPU accessible portion, for the particular region. This should only be
> +applicable for I915_MEMORY_CLASS_DEVICE.
> +
> +Vulkan will need this as part of creating a separate VkMemoryHeap with the
> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible portion,
> +where the total size of the heap needs to be known.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_memory_region_info
> +
> +DRM_I915_QUERY_VMA_INFO query
> +-----------------------------
> +Query the attributes of some vma. Given a vm and GTT offset, find the
> +respective vma, and return its set of attributes. For now we only support
> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
> +currently placed in memory that is accessible by the CPU. This should always be
> +set on devices where the CPU probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE
> +matches the probed_size. If this is not set then CPU faulting the object will
> +likely first require migrating the pages.

I think there should be justification for the new query documented as 
well. (Why on top of what.)

Without it personally I can't immediately understand why the disconnect 
between the object based and VMA based API. Userspace has to do some 
intervening operations like either execbuf, or vm bind in the future, to 
make this query usable after object creation. So question is why 
wouldn't it know already which placements it allowed and so would i915 
auto-migrate or not for this particular object. No? Or in other words 
why this wouldn't be an object based query since the question it is 
answering is about the object backing store and not the VMA.

Regards,

Tvrtko

> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_query_vma_info
> +
> +Error Capture restrictions
> +--------------------------
> +With error capture we have two new restrictions:
> +
> +    1) Error capture is best effort on small BAR systems; if the pages are not
> +    CPU accessible, at the time of capture, then the kernel is free to skip
> +    trying to capture them.
> +
> +    2) On discrete we now reject error capture on recoverable contexts. In the
> +    future the kernel may want to blit during error capture, when for example
> +    something is not currently CPU accessible.
> diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst
> index 91e93a705230..5a3bd3924ba6 100644
> --- a/Documentation/gpu/rfc/index.rst
> +++ b/Documentation/gpu/rfc/index.rst
> @@ -23,3 +23,7 @@ host such documentation:
>   .. toctree::
>   
>       i915_scheduler.rst
> +
> +.. toctree::
> +
> +    i915_small_bar.rst

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-27  6:55       ` [Intel-gfx] " Christian König
@ 2022-04-27 15:02         ` Matthew Auld
  -1 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-04-27 15:02 UTC (permalink / raw)
  To: Christian König, Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, Daniel Vetter, dri-devel, Kenneth Graunke,
	Jon Bloomfield, mesa-dev, Akeem G Abodunrin

On 27/04/2022 07:55, Christian König wrote:
> Well usually we increment the drm minor version when adding some new 
> flags on amdgpu.
> 
> Additional to that just one comment from our experience with that: You 
> don't just need one flag, but two. The first one is a hint which says 
> "CPU access needed" and the second is a promise which says "CPU access 
> never needed".
> 
> The background is that on a whole bunch of buffers you can 100% certain 
> say that you will never ever need CPU access.
> 
> Then at least we have a whole bunch of buffers where we might need CPU 
> access, but can't tell for sure.
> 
> And last we have stuff like transfer buffers you can be 100% sure that 
> you need CPU access.
> 
> Separating it like this helped a lot with performance on small BAR systems.

Thanks for the comments. For the "CPU access never needed" flag, what 
extra stuff does that do on the kernel side vs not specifying any 
flag/hint? I assume it still prioritizes using the non-CPU visible 
portion first? What else does it do?

> 
> Regards,
> Christian.
> 
> Am 27.04.22 um 08:48 schrieb Lionel Landwerlin:
>> One question though, how do we detect that this flag 
>> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given 
>> kernel?
>> I assume older kernels are going to reject object creation if we use 
>> this flag?
>>
>> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
>> inconsistent to select the placement on the GEM object and then query 
>> whether it's mappable by address?
>> You made a comment stating this is racy, wouldn't querying on the GEM 
>> object prevent this?
>>
>> Thanks,
>>
>> -Lionel
>>
>> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>>> Hi Matt,
>>>
>>>
>>> The proposal looks good to me.
>>>
>>> Looking forward to try it on drm-tip.
>>>
>>>
>>> -Lionel
>>>
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>    - Rework error capture interactions, including no longer needing
>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>> +++++++++++++++++++++++
>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>   3 files changed, 252 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>> known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id 
>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +    /** @region: The class:instance pair encoding */
>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +    /** @rsvd0: MBZ */
>>>> +    __u32 rsvd0;
>>>> +
>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +    __u64 probed_size;
>>>> +
>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>> unknown) */
>>>> +    __u64 unallocated_size;
>>>> +
>>>> +    union {
>>>> +        /** @rsvd1: MBZ */
>>>> +        __u64 rsvd1[8];
>>>> +        struct {
>>>> +            /**
>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>> +             * that is CPU accessible. (-1 = unknown).
>>>> +             *
>>>> +             * This will be always be <= @probed_size, and the
>>>> +             * remainder(if there is any) will not be CPU
>>>> +             * accessible.
>>>> +             */
>>>> +            __u64 probed_cpu_visible_size;
>>>> +        };
>>>> +    };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>> behaviour, with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for 
>>>> the stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to create 
>>>> the object
>>>> + * with gem_create, and another to apply various parameters, 
>>>> however this
>>>> + * creates some ambiguity for the params which are considered 
>>>> immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +    /**
>>>> +     * @size: Requested size for the object.
>>>> +     *
>>>> +     * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +     *
>>>> +     * Note that for some devices we have might have further minimum
>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +     * However in general the final size here should always reflect 
>>>> any
>>>> +     * rounding up, if for example using the 
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +     * extension to place the object in device local-memory.
>>>> +     */
>>>> +    __u64 size;
>>>> +    /**
>>>> +     * @handle: Returned handle for the object.
>>>> +     *
>>>> +     * Object handles are nonzero.
>>>> +     */
>>>> +    __u32 handle;
>>>> +    /**
>>>> +     * @flags: Optional flags.
>>>> +     *
>>>> +     * Supported values:
>>>> +     *
>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>> kernel that
>>>> +     * the object will need to be accessed via the CPU.
>>>> +     *
>>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, 
>>>> and
>>>> +     * only strictly required on platforms where only some of the 
>>>> device
>>>> +     * memory is directly visible or mappable through the CPU, like 
>>>> on DG2+.
>>>> +     *
>>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>>> +     * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +     * can't place the object in the mappable part of
>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>> +     *
>>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>>> that can
>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>> don't
>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>>> +     * flat-CCS.
>>>> +     *
>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>> that the
>>>> +     * kernel can still migrate the object to the mappable part, as 
>>>> a last
>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +     * expensive, and so ideally should be avoided.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +    __u32 flags;
>>>> +    /**
>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>> +     *
>>>> +     * This will be useful in the future when we need to support 
>>>> several
>>>> +     * different extensions, and we need to apply more than one when
>>>> +     * creating the object. See struct i915_user_extension.
>>>> +     *
>>>> +     * If we don't supply any extensions then we get the same old 
>>>> gem_create
>>>> +     * behaviour.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>> returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + *    struct drm_i915_query_vma_info info = {};
>>>> + *    struct drm_i915_query_item item = {
>>>> + *        .data_ptr = (uintptr_t)&info,
>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + *    };
>>>> + *    struct drm_i915_query query = {
>>>> + *        .num_items = 1,
>>>> + *        .items_ptr = (uintptr_t)&item,
>>>> + *    };
>>>> + *    int err;
>>>> + *
>>>> + *    // Unlike some other types of queries, there is no need to 
>>>> first query
>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + *    // time how big this needs to be.
>>>> + *    item.length = sizeof(info);
>>>> + *
>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>> wish
>>>> + *    // to query, before then firing off the query.
>>>> + *    info.vm_id = vm_id;
>>>> + *    info.offset = gtt_address;
>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + *    if (err || item.length < 0) ...
>>>> + *
>>>> + *    // If all went well we can now inspect the returned attributes.
>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +    /**
>>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>>> value
>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>> +     */
>>>> +    __u32 vm_id;
>>>> +    /** @pad: MBZ. */
>>>> +    __u32 pad;
>>>> +    /**
>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>> the kernel
>>>> +     * will use to perform the lookup.
>>>> +     */
>>>> +    __u64 offset;
>>>> +    /**
>>>> +     * @attributes: The returned attributes for the given vma.
>>>> +     *
>>>> +     * Possible values:
>>>> +     *
>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>> backing the
>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>> the vma is
>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>> the CPU
>>>> +     * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>> +     * require the kernel first synchronising any GPU work tied to the
>>>> +     * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +     * placements permit it. See 
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +     *
>>>> +     * Note that this is inherently racy.
>>>> +     */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +    __u64 attributes;
>>>> +    /** @rsvd: MBZ */
>>>> +    __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>> local-memory(i.e
>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>> might still be
>>>> +smaller than the total probed_size. In such cases, only some subset of
>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>> first 256M),
>>>> +while the remainder is only accessible via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in 
>>>> I915_MEMORY_CLASS_DEVICE, where
>>>> +underneath the device has a small BAR, meaning only some portion of 
>>>> it is CPU
>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>> access is not
>>>> +required, and prioritize using the non-CPU visible portion of
>>>> +I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>> total size of the
>>>> +CPU accessible portion, for the particular region. This should only be
>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>> with the
>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>> visible portion,
>>>> +where the total size of the heap needs to be known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only 
>>>> support
>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>>> +currently placed in memory that is accessible by the CPU. This 
>>>> should always be
>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>> I915_MEMORY_CLASS_DEVICE
>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>> object will
>>>> +likely first require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>>> to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>> b/Documentation/gpu/rfc/index.rst
>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>   .. toctree::
>>>>         i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
>>>
>>>
>>
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-27 15:02         ` Matthew Auld
  0 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-04-27 15:02 UTC (permalink / raw)
  To: Christian König, Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, Daniel Vetter, dri-devel, Kenneth Graunke,
	mesa-dev

On 27/04/2022 07:55, Christian König wrote:
> Well usually we increment the drm minor version when adding some new 
> flags on amdgpu.
> 
> Additional to that just one comment from our experience with that: You 
> don't just need one flag, but two. The first one is a hint which says 
> "CPU access needed" and the second is a promise which says "CPU access 
> never needed".
> 
> The background is that on a whole bunch of buffers you can 100% certain 
> say that you will never ever need CPU access.
> 
> Then at least we have a whole bunch of buffers where we might need CPU 
> access, but can't tell for sure.
> 
> And last we have stuff like transfer buffers you can be 100% sure that 
> you need CPU access.
> 
> Separating it like this helped a lot with performance on small BAR systems.

Thanks for the comments. For the "CPU access never needed" flag, what 
extra stuff does that do on the kernel side vs not specifying any 
flag/hint? I assume it still prioritizes using the non-CPU visible 
portion first? What else does it do?

> 
> Regards,
> Christian.
> 
> Am 27.04.22 um 08:48 schrieb Lionel Landwerlin:
>> One question though, how do we detect that this flag 
>> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given 
>> kernel?
>> I assume older kernels are going to reject object creation if we use 
>> this flag?
>>
>> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
>> inconsistent to select the placement on the GEM object and then query 
>> whether it's mappable by address?
>> You made a comment stating this is racy, wouldn't querying on the GEM 
>> object prevent this?
>>
>> Thanks,
>>
>> -Lionel
>>
>> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>>> Hi Matt,
>>>
>>>
>>> The proposal looks good to me.
>>>
>>> Looking forward to try it on drm-tip.
>>>
>>>
>>> -Lionel
>>>
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>    - Rework error capture interactions, including no longer needing
>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>> +++++++++++++++++++++++
>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>   3 files changed, 252 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>> known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id 
>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +    /** @region: The class:instance pair encoding */
>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +    /** @rsvd0: MBZ */
>>>> +    __u32 rsvd0;
>>>> +
>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +    __u64 probed_size;
>>>> +
>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>> unknown) */
>>>> +    __u64 unallocated_size;
>>>> +
>>>> +    union {
>>>> +        /** @rsvd1: MBZ */
>>>> +        __u64 rsvd1[8];
>>>> +        struct {
>>>> +            /**
>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>> +             * that is CPU accessible. (-1 = unknown).
>>>> +             *
>>>> +             * This will be always be <= @probed_size, and the
>>>> +             * remainder(if there is any) will not be CPU
>>>> +             * accessible.
>>>> +             */
>>>> +            __u64 probed_cpu_visible_size;
>>>> +        };
>>>> +    };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>> behaviour, with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for 
>>>> the stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to create 
>>>> the object
>>>> + * with gem_create, and another to apply various parameters, 
>>>> however this
>>>> + * creates some ambiguity for the params which are considered 
>>>> immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +    /**
>>>> +     * @size: Requested size for the object.
>>>> +     *
>>>> +     * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +     *
>>>> +     * Note that for some devices we have might have further minimum
>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +     * However in general the final size here should always reflect 
>>>> any
>>>> +     * rounding up, if for example using the 
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +     * extension to place the object in device local-memory.
>>>> +     */
>>>> +    __u64 size;
>>>> +    /**
>>>> +     * @handle: Returned handle for the object.
>>>> +     *
>>>> +     * Object handles are nonzero.
>>>> +     */
>>>> +    __u32 handle;
>>>> +    /**
>>>> +     * @flags: Optional flags.
>>>> +     *
>>>> +     * Supported values:
>>>> +     *
>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>> kernel that
>>>> +     * the object will need to be accessed via the CPU.
>>>> +     *
>>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, 
>>>> and
>>>> +     * only strictly required on platforms where only some of the 
>>>> device
>>>> +     * memory is directly visible or mappable through the CPU, like 
>>>> on DG2+.
>>>> +     *
>>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>>> +     * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +     * can't place the object in the mappable part of
>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>> +     *
>>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>>> that can
>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>> don't
>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>>> +     * flat-CCS.
>>>> +     *
>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>> that the
>>>> +     * kernel can still migrate the object to the mappable part, as 
>>>> a last
>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +     * expensive, and so ideally should be avoided.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +    __u32 flags;
>>>> +    /**
>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>> +     *
>>>> +     * This will be useful in the future when we need to support 
>>>> several
>>>> +     * different extensions, and we need to apply more than one when
>>>> +     * creating the object. See struct i915_user_extension.
>>>> +     *
>>>> +     * If we don't supply any extensions then we get the same old 
>>>> gem_create
>>>> +     * behaviour.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>> returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + *    struct drm_i915_query_vma_info info = {};
>>>> + *    struct drm_i915_query_item item = {
>>>> + *        .data_ptr = (uintptr_t)&info,
>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + *    };
>>>> + *    struct drm_i915_query query = {
>>>> + *        .num_items = 1,
>>>> + *        .items_ptr = (uintptr_t)&item,
>>>> + *    };
>>>> + *    int err;
>>>> + *
>>>> + *    // Unlike some other types of queries, there is no need to 
>>>> first query
>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + *    // time how big this needs to be.
>>>> + *    item.length = sizeof(info);
>>>> + *
>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>> wish
>>>> + *    // to query, before then firing off the query.
>>>> + *    info.vm_id = vm_id;
>>>> + *    info.offset = gtt_address;
>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + *    if (err || item.length < 0) ...
>>>> + *
>>>> + *    // If all went well we can now inspect the returned attributes.
>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +    /**
>>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>>> value
>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>> +     */
>>>> +    __u32 vm_id;
>>>> +    /** @pad: MBZ. */
>>>> +    __u32 pad;
>>>> +    /**
>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>> the kernel
>>>> +     * will use to perform the lookup.
>>>> +     */
>>>> +    __u64 offset;
>>>> +    /**
>>>> +     * @attributes: The returned attributes for the given vma.
>>>> +     *
>>>> +     * Possible values:
>>>> +     *
>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>> backing the
>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>> the vma is
>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>> the CPU
>>>> +     * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>> +     * require the kernel first synchronising any GPU work tied to the
>>>> +     * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +     * placements permit it. See 
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +     *
>>>> +     * Note that this is inherently racy.
>>>> +     */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +    __u64 attributes;
>>>> +    /** @rsvd: MBZ */
>>>> +    __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>> local-memory(i.e
>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>> might still be
>>>> +smaller than the total probed_size. In such cases, only some subset of
>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>> first 256M),
>>>> +while the remainder is only accessible via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in 
>>>> I915_MEMORY_CLASS_DEVICE, where
>>>> +underneath the device has a small BAR, meaning only some portion of 
>>>> it is CPU
>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>> access is not
>>>> +required, and prioritize using the non-CPU visible portion of
>>>> +I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>> total size of the
>>>> +CPU accessible portion, for the particular region. This should only be
>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>> with the
>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>> visible portion,
>>>> +where the total size of the heap needs to be known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only 
>>>> support
>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>>> +currently placed in memory that is accessible by the CPU. This 
>>>> should always be
>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>> I915_MEMORY_CLASS_DEVICE
>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>> object will
>>>> +likely first require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>>> to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>> b/Documentation/gpu/rfc/index.rst
>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>   .. toctree::
>>>>         i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
>>>
>>>
>>
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-27 15:02         ` [Intel-gfx] " Matthew Auld
@ 2022-04-27 15:04           ` Christian König
  -1 siblings, 0 replies; 50+ messages in thread
From: Christian König @ 2022-04-27 15:04 UTC (permalink / raw)
  To: Matthew Auld, Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, Daniel Vetter, dri-devel, Kenneth Graunke,
	Jon Bloomfield, mesa-dev, Akeem G Abodunrin

Am 27.04.22 um 17:02 schrieb Matthew Auld:
> On 27/04/2022 07:55, Christian König wrote:
>> Well usually we increment the drm minor version when adding some new 
>> flags on amdgpu.
>>
>> Additional to that just one comment from our experience with that: 
>> You don't just need one flag, but two. The first one is a hint which 
>> says "CPU access needed" and the second is a promise which says "CPU 
>> access never needed".
>>
>> The background is that on a whole bunch of buffers you can 100% 
>> certain say that you will never ever need CPU access.
>>
>> Then at least we have a whole bunch of buffers where we might need 
>> CPU access, but can't tell for sure.
>>
>> And last we have stuff like transfer buffers you can be 100% sure 
>> that you need CPU access.
>>
>> Separating it like this helped a lot with performance on small BAR 
>> systems.
>
> Thanks for the comments. For the "CPU access never needed" flag, what 
> extra stuff does that do on the kernel side vs not specifying any 
> flag/hint? I assume it still prioritizes using the non-CPU visible 
> portion first? What else does it do?

It's used as a hint when you need to pin BOs for scanout for example.

In general we try to allocate BOs which are marked "CPU access needed" 
in the CPU visible window if possible, but fallback to any memory if 
that won't fit.

Christian.

>
>>
>> Regards,
>> Christian.
>>
>> Am 27.04.22 um 08:48 schrieb Lionel Landwerlin:
>>> One question though, how do we detect that this flag 
>>> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given 
>>> kernel?
>>> I assume older kernels are going to reject object creation if we use 
>>> this flag?
>>>
>>> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
>>> inconsistent to select the placement on the GEM object and then 
>>> query whether it's mappable by address?
>>> You made a comment stating this is racy, wouldn't querying on the 
>>> GEM object prevent this?
>>>
>>> Thanks,
>>>
>>> -Lionel
>>>
>>> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>>>> Hi Matt,
>>>>
>>>>
>>>> The proposal looks good to me.
>>>>
>>>> Looking forward to try it on drm-tip.
>>>>
>>>>
>>>> -Lionel
>>>>
>>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>
>>>>> v2:
>>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>    - Rework error capture interactions, including no longer needing
>>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>>
>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>> ---
>>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>>> +++++++++++++++++++++++
>>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>>   3 files changed, 252 insertions(+)
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> new file mode 100644
>>>>> index 000000000000..7bfd0cf44d35
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> @@ -0,0 +1,190 @@
>>>>> +/**
>>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>>> known to the
>>>>> + * driver.
>>>>> + *
>>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>>> drm_i915_query.
>>>>> + * For this new query we are adding the new query id 
>>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>>> + * at &drm_i915_query_item.query_id.
>>>>> + */
>>>>> +struct __drm_i915_memory_region_info {
>>>>> +    /** @region: The class:instance pair encoding */
>>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>>> +
>>>>> +    /** @rsvd0: MBZ */
>>>>> +    __u32 rsvd0;
>>>>> +
>>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>> +    __u64 probed_size;
>>>>> +
>>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>> unknown) */
>>>>> +    __u64 unallocated_size;
>>>>> +
>>>>> +    union {
>>>>> +        /** @rsvd1: MBZ */
>>>>> +        __u64 rsvd1[8];
>>>>> +        struct {
>>>>> +            /**
>>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>>> +             * that is CPU accessible. (-1 = unknown).
>>>>> +             *
>>>>> +             * This will be always be <= @probed_size, and the
>>>>> +             * remainder(if there is any) will not be CPU
>>>>> +             * accessible.
>>>>> +             */
>>>>> +            __u64 probed_cpu_visible_size;
>>>>> +        };
>>>>> +    };
>>>>> +};
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>>> behaviour, with added
>>>>> + * extension support using struct i915_user_extension.
>>>>> + *
>>>>> + * Note that new buffer flags should be added here, at least for 
>>>>> the stuff that
>>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>>> create the object
>>>>> + * with gem_create, and another to apply various parameters, 
>>>>> however this
>>>>> + * creates some ambiguity for the params which are considered 
>>>>> immutable. Also in
>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>> + */
>>>>> +struct __drm_i915_gem_create_ext {
>>>>> +    /**
>>>>> +     * @size: Requested size for the object.
>>>>> +     *
>>>>> +     * The (page-aligned) allocated size for the object will be 
>>>>> returned.
>>>>> +     *
>>>>> +     * Note that for some devices we have might have further minimum
>>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>>> local-memory.
>>>>> +     * However in general the final size here should always 
>>>>> reflect any
>>>>> +     * rounding up, if for example using the 
>>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>> +     * extension to place the object in device local-memory.
>>>>> +     */
>>>>> +    __u64 size;
>>>>> +    /**
>>>>> +     * @handle: Returned handle for the object.
>>>>> +     *
>>>>> +     * Object handles are nonzero.
>>>>> +     */
>>>>> +    __u32 handle;
>>>>> +    /**
>>>>> +     * @flags: Optional flags.
>>>>> +     *
>>>>> +     * Supported values:
>>>>> +     *
>>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>>> kernel that
>>>>> +     * the object will need to be accessed via the CPU.
>>>>> +     *
>>>>> +     * Only valid when placing objects in 
>>>>> I915_MEMORY_CLASS_DEVICE, and
>>>>> +     * only strictly required on platforms where only some of the 
>>>>> device
>>>>> +     * memory is directly visible or mappable through the CPU, 
>>>>> like on DG2+.
>>>>> +     *
>>>>> +     * One of the placements MUST also be 
>>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>>> +     * ensure we can always spill the allocation to system 
>>>>> memory, if we
>>>>> +     * can't place the object in the mappable part of
>>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>>> +     *
>>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>>> objects that can
>>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>>> don't
>>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>>> with
>>>>> +     * flat-CCS.
>>>>> +     *
>>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. 
>>>>> Note that the
>>>>> +     * kernel can still migrate the object to the mappable part, 
>>>>> as a last
>>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>>> might be
>>>>> +     * expensive, and so ideally should be avoided.
>>>>> +     */
>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>> +    __u32 flags;
>>>>> +    /**
>>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>>> +     *
>>>>> +     * This will be useful in the future when we need to support 
>>>>> several
>>>>> +     * different extensions, and we need to apply more than one when
>>>>> +     * creating the object. See struct i915_user_extension.
>>>>> +     *
>>>>> +     * If we don't supply any extensions then we get the same old 
>>>>> gem_create
>>>>> +     * behaviour.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>>> +     */
>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>> +    __u64 extensions;
>>>>> +};
>>>>> +
>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_query_vma_info
>>>>> + *
>>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>>> returning its set
>>>>> + * of attributes.
>>>>> + *
>>>>> + * .. code-block:: C
>>>>> + *
>>>>> + *    struct drm_i915_query_vma_info info = {};
>>>>> + *    struct drm_i915_query_item item = {
>>>>> + *        .data_ptr = (uintptr_t)&info,
>>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>> + *    };
>>>>> + *    struct drm_i915_query query = {
>>>>> + *        .num_items = 1,
>>>>> + *        .items_ptr = (uintptr_t)&item,
>>>>> + *    };
>>>>> + *    int err;
>>>>> + *
>>>>> + *    // Unlike some other types of queries, there is no need to 
>>>>> first query
>>>>> + *    // the size of the data_ptr blob here, since we already 
>>>>> know ahead of
>>>>> + *    // time how big this needs to be.
>>>>> + *    item.length = sizeof(info);
>>>>> + *
>>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma 
>>>>> we wish
>>>>> + *    // to query, before then firing off the query.
>>>>> + *    info.vm_id = vm_id;
>>>>> + *    info.offset = gtt_address;
>>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>> + *    if (err || item.length < 0) ...
>>>>> + *
>>>>> + *    // If all went well we can now inspect the returned 
>>>>> attributes.
>>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>>> + */
>>>>> +struct __drm_i915_query_vma_info {
>>>>> +    /**
>>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>>> the value
>>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>>> +     */
>>>>> +    __u32 vm_id;
>>>>> +    /** @pad: MBZ. */
>>>>> +    __u32 pad;
>>>>> +    /**
>>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>>> the kernel
>>>>> +     * will use to perform the lookup.
>>>>> +     */
>>>>> +    __u64 offset;
>>>>> +    /**
>>>>> +     * @attributes: The returned attributes for the given vma.
>>>>> +     *
>>>>> +     * Possible values:
>>>>> +     *
>>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>>> backing the
>>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>>> the vma is
>>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>>> the CPU
>>>>> +     * cannot directly access(this is only possible on discrete 
>>>>> devices with
>>>>> +     * a small BAR). Attempting to MMAP and fault such an object 
>>>>> will
>>>>> +     * require the kernel first synchronising any GPU work tied 
>>>>> to the
>>>>> +     * object, before then migrating the pages, either to the CPU 
>>>>> accessible
>>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>> +     * placements permit it. See 
>>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>> +     *
>>>>> +     * Note that this is inherently racy.
>>>>> +     */
>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>> +    __u64 attributes;
>>>>> +    /** @rsvd: MBZ */
>>>>> +    __u32 rsvd[4];
>>>>> +};
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> new file mode 100644
>>>>> index 000000000000..be3d9bcdd86d
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> @@ -0,0 +1,58 @@
>>>>> +==========================
>>>>> +I915 Small BAR RFC Section
>>>>> +==========================
>>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>>> local-memory(i.e
>>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>>> might still be
>>>>> +smaller than the total probed_size. In such cases, only some 
>>>>> subset of
>>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>>> first 256M),
>>>>> +while the remainder is only accessible via the GPU.
>>>>> +
>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>> +----------------------------------------------
>>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>>> CPU access.
>>>>> +This becomes important when placing an object in 
>>>>> I915_MEMORY_CLASS_DEVICE, where
>>>>> +underneath the device has a small BAR, meaning only some portion 
>>>>> of it is CPU
>>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>>> access is not
>>>>> +required, and prioritize using the non-CPU visible portion of
>>>>> +I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>> +
>>>>> +probed_cpu_visible_size attribute
>>>>> +---------------------------------
>>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>>> total size of the
>>>>> +CPU accessible portion, for the particular region. This should 
>>>>> only be
>>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>>> with the
>>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>>> visible portion,
>>>>> +where the total size of the heap needs to be known.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_memory_region_info
>>>>> +
>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>> +-----------------------------
>>>>> +Query the attributes of some vma. Given a vm and GTT offset, find 
>>>>> the
>>>>> +respective vma, and return its set of attributes. For now we only 
>>>>> support
>>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>>> object/vma is
>>>>> +currently placed in memory that is accessible by the CPU. This 
>>>>> should always be
>>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>>> I915_MEMORY_CLASS_DEVICE
>>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>>> object will
>>>>> +likely first require migrating the pages.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_query_vma_info
>>>>> +
>>>>> +Error Capture restrictions
>>>>> +--------------------------
>>>>> +With error capture we have two new restrictions:
>>>>> +
>>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>>> pages are not
>>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>>> free to skip
>>>>> +    trying to capture them.
>>>>> +
>>>>> +    2) On discrete we now reject error capture on recoverable 
>>>>> contexts. In the
>>>>> +    future the kernel may want to blit during error capture, when 
>>>>> for example
>>>>> +    something is not currently CPU accessible.
>>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>>> b/Documentation/gpu/rfc/index.rst
>>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>>> --- a/Documentation/gpu/rfc/index.rst
>>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>>   .. toctree::
>>>>>         i915_scheduler.rst
>>>>> +
>>>>> +.. toctree::
>>>>> +
>>>>> +    i915_small_bar.rst
>>>>
>>>>
>>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-27 15:04           ` Christian König
  0 siblings, 0 replies; 50+ messages in thread
From: Christian König @ 2022-04-27 15:04 UTC (permalink / raw)
  To: Matthew Auld, Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, Daniel Vetter, dri-devel, Kenneth Graunke,
	mesa-dev

Am 27.04.22 um 17:02 schrieb Matthew Auld:
> On 27/04/2022 07:55, Christian König wrote:
>> Well usually we increment the drm minor version when adding some new 
>> flags on amdgpu.
>>
>> Additional to that just one comment from our experience with that: 
>> You don't just need one flag, but two. The first one is a hint which 
>> says "CPU access needed" and the second is a promise which says "CPU 
>> access never needed".
>>
>> The background is that on a whole bunch of buffers you can 100% 
>> certain say that you will never ever need CPU access.
>>
>> Then at least we have a whole bunch of buffers where we might need 
>> CPU access, but can't tell for sure.
>>
>> And last we have stuff like transfer buffers you can be 100% sure 
>> that you need CPU access.
>>
>> Separating it like this helped a lot with performance on small BAR 
>> systems.
>
> Thanks for the comments. For the "CPU access never needed" flag, what 
> extra stuff does that do on the kernel side vs not specifying any 
> flag/hint? I assume it still prioritizes using the non-CPU visible 
> portion first? What else does it do?

It's used as a hint when you need to pin BOs for scanout for example.

In general we try to allocate BOs which are marked "CPU access needed" 
in the CPU visible window if possible, but fallback to any memory if 
that won't fit.

Christian.

>
>>
>> Regards,
>> Christian.
>>
>> Am 27.04.22 um 08:48 schrieb Lionel Landwerlin:
>>> One question though, how do we detect that this flag 
>>> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given 
>>> kernel?
>>> I assume older kernels are going to reject object creation if we use 
>>> this flag?
>>>
>>> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
>>> inconsistent to select the placement on the GEM object and then 
>>> query whether it's mappable by address?
>>> You made a comment stating this is racy, wouldn't querying on the 
>>> GEM object prevent this?
>>>
>>> Thanks,
>>>
>>> -Lionel
>>>
>>> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>>>> Hi Matt,
>>>>
>>>>
>>>> The proposal looks good to me.
>>>>
>>>> Looking forward to try it on drm-tip.
>>>>
>>>>
>>>> -Lionel
>>>>
>>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>
>>>>> v2:
>>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>    - Rework error capture interactions, including no longer needing
>>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>>
>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>> ---
>>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>>> +++++++++++++++++++++++
>>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>>   3 files changed, 252 insertions(+)
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> new file mode 100644
>>>>> index 000000000000..7bfd0cf44d35
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> @@ -0,0 +1,190 @@
>>>>> +/**
>>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>>> known to the
>>>>> + * driver.
>>>>> + *
>>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>>> drm_i915_query.
>>>>> + * For this new query we are adding the new query id 
>>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>>> + * at &drm_i915_query_item.query_id.
>>>>> + */
>>>>> +struct __drm_i915_memory_region_info {
>>>>> +    /** @region: The class:instance pair encoding */
>>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>>> +
>>>>> +    /** @rsvd0: MBZ */
>>>>> +    __u32 rsvd0;
>>>>> +
>>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>> +    __u64 probed_size;
>>>>> +
>>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>> unknown) */
>>>>> +    __u64 unallocated_size;
>>>>> +
>>>>> +    union {
>>>>> +        /** @rsvd1: MBZ */
>>>>> +        __u64 rsvd1[8];
>>>>> +        struct {
>>>>> +            /**
>>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>>> +             * that is CPU accessible. (-1 = unknown).
>>>>> +             *
>>>>> +             * This will be always be <= @probed_size, and the
>>>>> +             * remainder(if there is any) will not be CPU
>>>>> +             * accessible.
>>>>> +             */
>>>>> +            __u64 probed_cpu_visible_size;
>>>>> +        };
>>>>> +    };
>>>>> +};
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>>> behaviour, with added
>>>>> + * extension support using struct i915_user_extension.
>>>>> + *
>>>>> + * Note that new buffer flags should be added here, at least for 
>>>>> the stuff that
>>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>>> create the object
>>>>> + * with gem_create, and another to apply various parameters, 
>>>>> however this
>>>>> + * creates some ambiguity for the params which are considered 
>>>>> immutable. Also in
>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>> + */
>>>>> +struct __drm_i915_gem_create_ext {
>>>>> +    /**
>>>>> +     * @size: Requested size for the object.
>>>>> +     *
>>>>> +     * The (page-aligned) allocated size for the object will be 
>>>>> returned.
>>>>> +     *
>>>>> +     * Note that for some devices we have might have further minimum
>>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>>> local-memory.
>>>>> +     * However in general the final size here should always 
>>>>> reflect any
>>>>> +     * rounding up, if for example using the 
>>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>> +     * extension to place the object in device local-memory.
>>>>> +     */
>>>>> +    __u64 size;
>>>>> +    /**
>>>>> +     * @handle: Returned handle for the object.
>>>>> +     *
>>>>> +     * Object handles are nonzero.
>>>>> +     */
>>>>> +    __u32 handle;
>>>>> +    /**
>>>>> +     * @flags: Optional flags.
>>>>> +     *
>>>>> +     * Supported values:
>>>>> +     *
>>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>>> kernel that
>>>>> +     * the object will need to be accessed via the CPU.
>>>>> +     *
>>>>> +     * Only valid when placing objects in 
>>>>> I915_MEMORY_CLASS_DEVICE, and
>>>>> +     * only strictly required on platforms where only some of the 
>>>>> device
>>>>> +     * memory is directly visible or mappable through the CPU, 
>>>>> like on DG2+.
>>>>> +     *
>>>>> +     * One of the placements MUST also be 
>>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>>> +     * ensure we can always spill the allocation to system 
>>>>> memory, if we
>>>>> +     * can't place the object in the mappable part of
>>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>>> +     *
>>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>>> objects that can
>>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>>> don't
>>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>>> with
>>>>> +     * flat-CCS.
>>>>> +     *
>>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. 
>>>>> Note that the
>>>>> +     * kernel can still migrate the object to the mappable part, 
>>>>> as a last
>>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>>> might be
>>>>> +     * expensive, and so ideally should be avoided.
>>>>> +     */
>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>> +    __u32 flags;
>>>>> +    /**
>>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>>> +     *
>>>>> +     * This will be useful in the future when we need to support 
>>>>> several
>>>>> +     * different extensions, and we need to apply more than one when
>>>>> +     * creating the object. See struct i915_user_extension.
>>>>> +     *
>>>>> +     * If we don't supply any extensions then we get the same old 
>>>>> gem_create
>>>>> +     * behaviour.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>>> +     */
>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>> +    __u64 extensions;
>>>>> +};
>>>>> +
>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_query_vma_info
>>>>> + *
>>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>>> returning its set
>>>>> + * of attributes.
>>>>> + *
>>>>> + * .. code-block:: C
>>>>> + *
>>>>> + *    struct drm_i915_query_vma_info info = {};
>>>>> + *    struct drm_i915_query_item item = {
>>>>> + *        .data_ptr = (uintptr_t)&info,
>>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>> + *    };
>>>>> + *    struct drm_i915_query query = {
>>>>> + *        .num_items = 1,
>>>>> + *        .items_ptr = (uintptr_t)&item,
>>>>> + *    };
>>>>> + *    int err;
>>>>> + *
>>>>> + *    // Unlike some other types of queries, there is no need to 
>>>>> first query
>>>>> + *    // the size of the data_ptr blob here, since we already 
>>>>> know ahead of
>>>>> + *    // time how big this needs to be.
>>>>> + *    item.length = sizeof(info);
>>>>> + *
>>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma 
>>>>> we wish
>>>>> + *    // to query, before then firing off the query.
>>>>> + *    info.vm_id = vm_id;
>>>>> + *    info.offset = gtt_address;
>>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>> + *    if (err || item.length < 0) ...
>>>>> + *
>>>>> + *    // If all went well we can now inspect the returned 
>>>>> attributes.
>>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>>> + */
>>>>> +struct __drm_i915_query_vma_info {
>>>>> +    /**
>>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>>> the value
>>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>>> +     */
>>>>> +    __u32 vm_id;
>>>>> +    /** @pad: MBZ. */
>>>>> +    __u32 pad;
>>>>> +    /**
>>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>>> the kernel
>>>>> +     * will use to perform the lookup.
>>>>> +     */
>>>>> +    __u64 offset;
>>>>> +    /**
>>>>> +     * @attributes: The returned attributes for the given vma.
>>>>> +     *
>>>>> +     * Possible values:
>>>>> +     *
>>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>>> backing the
>>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>>> the vma is
>>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>>> the CPU
>>>>> +     * cannot directly access(this is only possible on discrete 
>>>>> devices with
>>>>> +     * a small BAR). Attempting to MMAP and fault such an object 
>>>>> will
>>>>> +     * require the kernel first synchronising any GPU work tied 
>>>>> to the
>>>>> +     * object, before then migrating the pages, either to the CPU 
>>>>> accessible
>>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>> +     * placements permit it. See 
>>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>> +     *
>>>>> +     * Note that this is inherently racy.
>>>>> +     */
>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>> +    __u64 attributes;
>>>>> +    /** @rsvd: MBZ */
>>>>> +    __u32 rsvd[4];
>>>>> +};
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> new file mode 100644
>>>>> index 000000000000..be3d9bcdd86d
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> @@ -0,0 +1,58 @@
>>>>> +==========================
>>>>> +I915 Small BAR RFC Section
>>>>> +==========================
>>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>>> local-memory(i.e
>>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>>> might still be
>>>>> +smaller than the total probed_size. In such cases, only some 
>>>>> subset of
>>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>>> first 256M),
>>>>> +while the remainder is only accessible via the GPU.
>>>>> +
>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>> +----------------------------------------------
>>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>>> CPU access.
>>>>> +This becomes important when placing an object in 
>>>>> I915_MEMORY_CLASS_DEVICE, where
>>>>> +underneath the device has a small BAR, meaning only some portion 
>>>>> of it is CPU
>>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>>> access is not
>>>>> +required, and prioritize using the non-CPU visible portion of
>>>>> +I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>> +
>>>>> +probed_cpu_visible_size attribute
>>>>> +---------------------------------
>>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>>> total size of the
>>>>> +CPU accessible portion, for the particular region. This should 
>>>>> only be
>>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>>> with the
>>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>>> visible portion,
>>>>> +where the total size of the heap needs to be known.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_memory_region_info
>>>>> +
>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>> +-----------------------------
>>>>> +Query the attributes of some vma. Given a vm and GTT offset, find 
>>>>> the
>>>>> +respective vma, and return its set of attributes. For now we only 
>>>>> support
>>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>>> object/vma is
>>>>> +currently placed in memory that is accessible by the CPU. This 
>>>>> should always be
>>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>>> I915_MEMORY_CLASS_DEVICE
>>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>>> object will
>>>>> +likely first require migrating the pages.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_query_vma_info
>>>>> +
>>>>> +Error Capture restrictions
>>>>> +--------------------------
>>>>> +With error capture we have two new restrictions:
>>>>> +
>>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>>> pages are not
>>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>>> free to skip
>>>>> +    trying to capture them.
>>>>> +
>>>>> +    2) On discrete we now reject error capture on recoverable 
>>>>> contexts. In the
>>>>> +    future the kernel may want to blit during error capture, when 
>>>>> for example
>>>>> +    something is not currently CPU accessible.
>>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>>> b/Documentation/gpu/rfc/index.rst
>>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>>> --- a/Documentation/gpu/rfc/index.rst
>>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>>   .. toctree::
>>>>>         i915_scheduler.rst
>>>>> +
>>>>> +.. toctree::
>>>>> +
>>>>> +    i915_small_bar.rst
>>>>
>>>>
>>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-27  6:48     ` [Intel-gfx] " Lionel Landwerlin
@ 2022-04-27 15:18       ` Matthew Auld
  -1 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-04-27 15:18 UTC (permalink / raw)
  To: Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, Jordan Justen, dri-devel, Kenneth Graunke,
	Jon Bloomfield, Daniel Vetter, mesa-dev, Akeem G Abodunrin

On 27/04/2022 07:48, Lionel Landwerlin wrote:
> One question though, how do we detect that this flag 
> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given kernel?
> I assume older kernels are going to reject object creation if we use 
> this flag?

 From some offline discussion with Lionel, the plan here is to just do a 
dummy gem_create_ext to check if the kernel throws an error with the new 
flag or not.

> 
> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
> inconsistent to select the placement on the GEM object and then query 
> whether it's mappable by address?
> You made a comment stating this is racy, wouldn't querying on the GEM 
> object prevent this?

Since mesa at this time doesn't currently have a use for this one, then 
I guess we should maybe just drop this part of the uapi, in this version 
at least, if no objections.

> 
> Thanks,
> 
> -Lionel
> 
> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>> Hi Matt,
>>
>>
>> The proposal looks good to me.
>>
>> Looking forward to try it on drm-tip.
>>
>>
>> -Lionel
>>
>> On 20/04/2022 20:13, Matthew Auld wrote:
>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>
>>> v2:
>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>    - Rework error capture interactions, including no longer needing
>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>    - Add probed_cpu_visible_size. (Lionel)
>>>
>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>> Cc: mesa-dev@lists.freedesktop.org
>>> ---
>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>   3 files changed, 252 insertions(+)
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>> new file mode 100644
>>> index 000000000000..7bfd0cf44d35
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>> @@ -0,0 +1,190 @@
>>> +/**
>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>> known to the
>>> + * driver.
>>> + *
>>> + * Note this is using both struct drm_i915_query_item and struct 
>>> drm_i915_query.
>>> + * For this new query we are adding the new query id 
>>> DRM_I915_QUERY_MEMORY_REGIONS
>>> + * at &drm_i915_query_item.query_id.
>>> + */
>>> +struct __drm_i915_memory_region_info {
>>> +    /** @region: The class:instance pair encoding */
>>> +    struct drm_i915_gem_memory_class_instance region;
>>> +
>>> +    /** @rsvd0: MBZ */
>>> +    __u32 rsvd0;
>>> +
>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>> +    __u64 probed_size;
>>> +
>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>> unknown) */
>>> +    __u64 unallocated_size;
>>> +
>>> +    union {
>>> +        /** @rsvd1: MBZ */
>>> +        __u64 rsvd1[8];
>>> +        struct {
>>> +            /**
>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>> +             * that is CPU accessible. (-1 = unknown).
>>> +             *
>>> +             * This will be always be <= @probed_size, and the
>>> +             * remainder(if there is any) will not be CPU
>>> +             * accessible.
>>> +             */
>>> +            __u64 probed_cpu_visible_size;
>>> +        };
>>> +    };
>>> +};
>>> +
>>> +/**
>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>>> with added
>>> + * extension support using struct i915_user_extension.
>>> + *
>>> + * Note that new buffer flags should be added here, at least for the 
>>> stuff that
>>> + * is immutable. Previously we would have two ioctls, one to create 
>>> the object
>>> + * with gem_create, and another to apply various parameters, however 
>>> this
>>> + * creates some ambiguity for the params which are considered 
>>> immutable. Also in
>>> + * general we're phasing out the various SET/GET ioctls.
>>> + */
>>> +struct __drm_i915_gem_create_ext {
>>> +    /**
>>> +     * @size: Requested size for the object.
>>> +     *
>>> +     * The (page-aligned) allocated size for the object will be 
>>> returned.
>>> +     *
>>> +     * Note that for some devices we have might have further minimum
>>> +     * page-size restrictions(larger than 4K), like for device 
>>> local-memory.
>>> +     * However in general the final size here should always reflect any
>>> +     * rounding up, if for example using the 
>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>> +     * extension to place the object in device local-memory.
>>> +     */
>>> +    __u64 size;
>>> +    /**
>>> +     * @handle: Returned handle for the object.
>>> +     *
>>> +     * Object handles are nonzero.
>>> +     */
>>> +    __u32 handle;
>>> +    /**
>>> +     * @flags: Optional flags.
>>> +     *
>>> +     * Supported values:
>>> +     *
>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>> kernel that
>>> +     * the object will need to be accessed via the CPU.
>>> +     *
>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>>> +     * only strictly required on platforms where only some of the 
>>> device
>>> +     * memory is directly visible or mappable through the CPU, like 
>>> on DG2+.
>>> +     *
>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>> +     * ensure we can always spill the allocation to system memory, 
>>> if we
>>> +     * can't place the object in the mappable part of
>>> +     * I915_MEMORY_CLASS_DEVICE.
>>> +     *
>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>> that can
>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>> +     * flat-CCS.
>>> +     *
>>> +     * Without this hint, the kernel will assume that non-mappable
>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>> that the
>>> +     * kernel can still migrate the object to the mappable part, as 
>>> a last
>>> +     * resort, if userspace ever CPU faults this object, but this 
>>> might be
>>> +     * expensive, and so ideally should be avoided.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>> +    __u32 flags;
>>> +    /**
>>> +     * @extensions: The chain of extensions to apply to this object.
>>> +     *
>>> +     * This will be useful in the future when we need to support 
>>> several
>>> +     * different extensions, and we need to apply more than one when
>>> +     * creating the object. See struct i915_user_extension.
>>> +     *
>>> +     * If we don't supply any extensions then we get the same old 
>>> gem_create
>>> +     * behaviour.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>> +    __u64 extensions;
>>> +};
>>> +
>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>> +
>>> +/**
>>> + * struct __drm_i915_query_vma_info
>>> + *
>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>> returning its set
>>> + * of attributes.
>>> + *
>>> + * .. code-block:: C
>>> + *
>>> + *    struct drm_i915_query_vma_info info = {};
>>> + *    struct drm_i915_query_item item = {
>>> + *        .data_ptr = (uintptr_t)&info,
>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>> + *    };
>>> + *    struct drm_i915_query query = {
>>> + *        .num_items = 1,
>>> + *        .items_ptr = (uintptr_t)&item,
>>> + *    };
>>> + *    int err;
>>> + *
>>> + *    // Unlike some other types of queries, there is no need to 
>>> first query
>>> + *    // the size of the data_ptr blob here, since we already know 
>>> ahead of
>>> + *    // time how big this needs to be.
>>> + *    item.length = sizeof(info);
>>> + *
>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>>> + *    // to query, before then firing off the query.
>>> + *    info.vm_id = vm_id;
>>> + *    info.offset = gtt_address;
>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>> + *    if (err || item.length < 0) ...
>>> + *
>>> + *    // If all went well we can now inspect the returned attributes.
>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>> + */
>>> +struct __drm_i915_query_vma_info {
>>> +    /**
>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>> value
>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>> +     * drm_i915_gem_vm_control.vm_id.
>>> +     */
>>> +    __u32 vm_id;
>>> +    /** @pad: MBZ. */
>>> +    __u32 pad;
>>> +    /**
>>> +     * @offset: The corresponding ppGTT address of the vma which the 
>>> kernel
>>> +     * will use to perform the lookup.
>>> +     */
>>> +    __u64 offset;
>>> +    /**
>>> +     * @attributes: The returned attributes for the given vma.
>>> +     *
>>> +     * Possible values:
>>> +     *
>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>> backing the
>>> +     * vma are currently CPU accessible. If this is not set then the 
>>> vma is
>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>> the CPU
>>> +     * cannot directly access(this is only possible on discrete 
>>> devices with
>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>> +     * require the kernel first synchronising any GPU work tied to the
>>> +     * object, before then migrating the pages, either to the CPU 
>>> accessible
>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>> I915_MEMORY_CLASS_SYSTEM, if the
>>> +     * placements permit it. See 
>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>> +     *
>>> +     * Note that this is inherently racy.
>>> +     */
>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>> +    __u64 attributes;
>>> +    /** @rsvd: MBZ */
>>> +    __u32 rsvd[4];
>>> +};
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>> new file mode 100644
>>> index 000000000000..be3d9bcdd86d
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>> @@ -0,0 +1,58 @@
>>> +==========================
>>> +I915 Small BAR RFC Section
>>> +==========================
>>> +Starting from DG2 we will have resizable BAR support for device 
>>> local-memory(i.e
>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>> might still be
>>> +smaller than the total probed_size. In such cases, only some subset of
>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>> first 256M),
>>> +while the remainder is only accessible via the GPU.
>>> +
>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>> +----------------------------------------------
>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>> CPU access.
>>> +This becomes important when placing an object in 
>>> I915_MEMORY_CLASS_DEVICE, where
>>> +underneath the device has a small BAR, meaning only some portion of 
>>> it is CPU
>>> +accessible. Without this flag the kernel will assume that CPU access 
>>> is not
>>> +required, and prioritize using the non-CPU visible portion of
>>> +I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_gem_create_ext
>>> +
>>> +probed_cpu_visible_size attribute
>>> +---------------------------------
>>> +New struct__drm_i915_memory_region attribute which returns the total 
>>> size of the
>>> +CPU accessible portion, for the particular region. This should only be
>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>> with the
>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>> visible portion,
>>> +where the total size of the heap needs to be known.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_memory_region_info
>>> +
>>> +DRM_I915_QUERY_VMA_INFO query
>>> +-----------------------------
>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>> +respective vma, and return its set of attributes. For now we only 
>>> support
>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>> +currently placed in memory that is accessible by the CPU. This 
>>> should always be
>>> +set on devices where the CPU probed_cpu_visible_size of 
>>> I915_MEMORY_CLASS_DEVICE
>>> +matches the probed_size. If this is not set then CPU faulting the 
>>> object will
>>> +likely first require migrating the pages.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_query_vma_info
>>> +
>>> +Error Capture restrictions
>>> +--------------------------
>>> +With error capture we have two new restrictions:
>>> +
>>> +    1) Error capture is best effort on small BAR systems; if the 
>>> pages are not
>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>> to skip
>>> +    trying to capture them.
>>> +
>>> +    2) On discrete we now reject error capture on recoverable 
>>> contexts. In the
>>> +    future the kernel may want to blit during error capture, when 
>>> for example
>>> +    something is not currently CPU accessible.
>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>> b/Documentation/gpu/rfc/index.rst
>>> index 91e93a705230..5a3bd3924ba6 100644
>>> --- a/Documentation/gpu/rfc/index.rst
>>> +++ b/Documentation/gpu/rfc/index.rst
>>> @@ -23,3 +23,7 @@ host such documentation:
>>>   .. toctree::
>>>         i915_scheduler.rst
>>> +
>>> +.. toctree::
>>> +
>>> +    i915_small_bar.rst
>>
>>
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-27 15:18       ` Matthew Auld
  0 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-04-27 15:18 UTC (permalink / raw)
  To: Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 27/04/2022 07:48, Lionel Landwerlin wrote:
> One question though, how do we detect that this flag 
> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given kernel?
> I assume older kernels are going to reject object creation if we use 
> this flag?

 From some offline discussion with Lionel, the plan here is to just do a 
dummy gem_create_ext to check if the kernel throws an error with the new 
flag or not.

> 
> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
> inconsistent to select the placement on the GEM object and then query 
> whether it's mappable by address?
> You made a comment stating this is racy, wouldn't querying on the GEM 
> object prevent this?

Since mesa at this time doesn't currently have a use for this one, then 
I guess we should maybe just drop this part of the uapi, in this version 
at least, if no objections.

> 
> Thanks,
> 
> -Lionel
> 
> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>> Hi Matt,
>>
>>
>> The proposal looks good to me.
>>
>> Looking forward to try it on drm-tip.
>>
>>
>> -Lionel
>>
>> On 20/04/2022 20:13, Matthew Auld wrote:
>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>
>>> v2:
>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>    - Rework error capture interactions, including no longer needing
>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>    - Add probed_cpu_visible_size. (Lionel)
>>>
>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>> Cc: mesa-dev@lists.freedesktop.org
>>> ---
>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>   3 files changed, 252 insertions(+)
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>> new file mode 100644
>>> index 000000000000..7bfd0cf44d35
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>> @@ -0,0 +1,190 @@
>>> +/**
>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>> known to the
>>> + * driver.
>>> + *
>>> + * Note this is using both struct drm_i915_query_item and struct 
>>> drm_i915_query.
>>> + * For this new query we are adding the new query id 
>>> DRM_I915_QUERY_MEMORY_REGIONS
>>> + * at &drm_i915_query_item.query_id.
>>> + */
>>> +struct __drm_i915_memory_region_info {
>>> +    /** @region: The class:instance pair encoding */
>>> +    struct drm_i915_gem_memory_class_instance region;
>>> +
>>> +    /** @rsvd0: MBZ */
>>> +    __u32 rsvd0;
>>> +
>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>> +    __u64 probed_size;
>>> +
>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>> unknown) */
>>> +    __u64 unallocated_size;
>>> +
>>> +    union {
>>> +        /** @rsvd1: MBZ */
>>> +        __u64 rsvd1[8];
>>> +        struct {
>>> +            /**
>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>> +             * that is CPU accessible. (-1 = unknown).
>>> +             *
>>> +             * This will be always be <= @probed_size, and the
>>> +             * remainder(if there is any) will not be CPU
>>> +             * accessible.
>>> +             */
>>> +            __u64 probed_cpu_visible_size;
>>> +        };
>>> +    };
>>> +};
>>> +
>>> +/**
>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>>> with added
>>> + * extension support using struct i915_user_extension.
>>> + *
>>> + * Note that new buffer flags should be added here, at least for the 
>>> stuff that
>>> + * is immutable. Previously we would have two ioctls, one to create 
>>> the object
>>> + * with gem_create, and another to apply various parameters, however 
>>> this
>>> + * creates some ambiguity for the params which are considered 
>>> immutable. Also in
>>> + * general we're phasing out the various SET/GET ioctls.
>>> + */
>>> +struct __drm_i915_gem_create_ext {
>>> +    /**
>>> +     * @size: Requested size for the object.
>>> +     *
>>> +     * The (page-aligned) allocated size for the object will be 
>>> returned.
>>> +     *
>>> +     * Note that for some devices we have might have further minimum
>>> +     * page-size restrictions(larger than 4K), like for device 
>>> local-memory.
>>> +     * However in general the final size here should always reflect any
>>> +     * rounding up, if for example using the 
>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>> +     * extension to place the object in device local-memory.
>>> +     */
>>> +    __u64 size;
>>> +    /**
>>> +     * @handle: Returned handle for the object.
>>> +     *
>>> +     * Object handles are nonzero.
>>> +     */
>>> +    __u32 handle;
>>> +    /**
>>> +     * @flags: Optional flags.
>>> +     *
>>> +     * Supported values:
>>> +     *
>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>> kernel that
>>> +     * the object will need to be accessed via the CPU.
>>> +     *
>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>>> +     * only strictly required on platforms where only some of the 
>>> device
>>> +     * memory is directly visible or mappable through the CPU, like 
>>> on DG2+.
>>> +     *
>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>> +     * ensure we can always spill the allocation to system memory, 
>>> if we
>>> +     * can't place the object in the mappable part of
>>> +     * I915_MEMORY_CLASS_DEVICE.
>>> +     *
>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>> that can
>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>> +     * flat-CCS.
>>> +     *
>>> +     * Without this hint, the kernel will assume that non-mappable
>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>> that the
>>> +     * kernel can still migrate the object to the mappable part, as 
>>> a last
>>> +     * resort, if userspace ever CPU faults this object, but this 
>>> might be
>>> +     * expensive, and so ideally should be avoided.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>> +    __u32 flags;
>>> +    /**
>>> +     * @extensions: The chain of extensions to apply to this object.
>>> +     *
>>> +     * This will be useful in the future when we need to support 
>>> several
>>> +     * different extensions, and we need to apply more than one when
>>> +     * creating the object. See struct i915_user_extension.
>>> +     *
>>> +     * If we don't supply any extensions then we get the same old 
>>> gem_create
>>> +     * behaviour.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>> +    __u64 extensions;
>>> +};
>>> +
>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>> +
>>> +/**
>>> + * struct __drm_i915_query_vma_info
>>> + *
>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>> returning its set
>>> + * of attributes.
>>> + *
>>> + * .. code-block:: C
>>> + *
>>> + *    struct drm_i915_query_vma_info info = {};
>>> + *    struct drm_i915_query_item item = {
>>> + *        .data_ptr = (uintptr_t)&info,
>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>> + *    };
>>> + *    struct drm_i915_query query = {
>>> + *        .num_items = 1,
>>> + *        .items_ptr = (uintptr_t)&item,
>>> + *    };
>>> + *    int err;
>>> + *
>>> + *    // Unlike some other types of queries, there is no need to 
>>> first query
>>> + *    // the size of the data_ptr blob here, since we already know 
>>> ahead of
>>> + *    // time how big this needs to be.
>>> + *    item.length = sizeof(info);
>>> + *
>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>>> + *    // to query, before then firing off the query.
>>> + *    info.vm_id = vm_id;
>>> + *    info.offset = gtt_address;
>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>> + *    if (err || item.length < 0) ...
>>> + *
>>> + *    // If all went well we can now inspect the returned attributes.
>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>> + */
>>> +struct __drm_i915_query_vma_info {
>>> +    /**
>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>> value
>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>> +     * drm_i915_gem_vm_control.vm_id.
>>> +     */
>>> +    __u32 vm_id;
>>> +    /** @pad: MBZ. */
>>> +    __u32 pad;
>>> +    /**
>>> +     * @offset: The corresponding ppGTT address of the vma which the 
>>> kernel
>>> +     * will use to perform the lookup.
>>> +     */
>>> +    __u64 offset;
>>> +    /**
>>> +     * @attributes: The returned attributes for the given vma.
>>> +     *
>>> +     * Possible values:
>>> +     *
>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>> backing the
>>> +     * vma are currently CPU accessible. If this is not set then the 
>>> vma is
>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>> the CPU
>>> +     * cannot directly access(this is only possible on discrete 
>>> devices with
>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>> +     * require the kernel first synchronising any GPU work tied to the
>>> +     * object, before then migrating the pages, either to the CPU 
>>> accessible
>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>> I915_MEMORY_CLASS_SYSTEM, if the
>>> +     * placements permit it. See 
>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>> +     *
>>> +     * Note that this is inherently racy.
>>> +     */
>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>> +    __u64 attributes;
>>> +    /** @rsvd: MBZ */
>>> +    __u32 rsvd[4];
>>> +};
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>> new file mode 100644
>>> index 000000000000..be3d9bcdd86d
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>> @@ -0,0 +1,58 @@
>>> +==========================
>>> +I915 Small BAR RFC Section
>>> +==========================
>>> +Starting from DG2 we will have resizable BAR support for device 
>>> local-memory(i.e
>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>> might still be
>>> +smaller than the total probed_size. In such cases, only some subset of
>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>> first 256M),
>>> +while the remainder is only accessible via the GPU.
>>> +
>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>> +----------------------------------------------
>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>> CPU access.
>>> +This becomes important when placing an object in 
>>> I915_MEMORY_CLASS_DEVICE, where
>>> +underneath the device has a small BAR, meaning only some portion of 
>>> it is CPU
>>> +accessible. Without this flag the kernel will assume that CPU access 
>>> is not
>>> +required, and prioritize using the non-CPU visible portion of
>>> +I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_gem_create_ext
>>> +
>>> +probed_cpu_visible_size attribute
>>> +---------------------------------
>>> +New struct__drm_i915_memory_region attribute which returns the total 
>>> size of the
>>> +CPU accessible portion, for the particular region. This should only be
>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>> with the
>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>> visible portion,
>>> +where the total size of the heap needs to be known.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_memory_region_info
>>> +
>>> +DRM_I915_QUERY_VMA_INFO query
>>> +-----------------------------
>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>> +respective vma, and return its set of attributes. For now we only 
>>> support
>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>> +currently placed in memory that is accessible by the CPU. This 
>>> should always be
>>> +set on devices where the CPU probed_cpu_visible_size of 
>>> I915_MEMORY_CLASS_DEVICE
>>> +matches the probed_size. If this is not set then CPU faulting the 
>>> object will
>>> +likely first require migrating the pages.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_query_vma_info
>>> +
>>> +Error Capture restrictions
>>> +--------------------------
>>> +With error capture we have two new restrictions:
>>> +
>>> +    1) Error capture is best effort on small BAR systems; if the 
>>> pages are not
>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>> to skip
>>> +    trying to capture them.
>>> +
>>> +    2) On discrete we now reject error capture on recoverable 
>>> contexts. In the
>>> +    future the kernel may want to blit during error capture, when 
>>> for example
>>> +    something is not currently CPU accessible.
>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>> b/Documentation/gpu/rfc/index.rst
>>> index 91e93a705230..5a3bd3924ba6 100644
>>> --- a/Documentation/gpu/rfc/index.rst
>>> +++ b/Documentation/gpu/rfc/index.rst
>>> @@ -23,3 +23,7 @@ host such documentation:
>>>   .. toctree::
>>>         i915_scheduler.rst
>>> +
>>> +.. toctree::
>>> +
>>> +    i915_small_bar.rst
>>
>>
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-27 15:18       ` [Intel-gfx] " Matthew Auld
@ 2022-04-27 15:37         ` Lionel Landwerlin
  -1 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-04-27 15:37 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Jordan Justen, dri-devel, Kenneth Graunke,
	Jon Bloomfield, Daniel Vetter, mesa-dev, Akeem G Abodunrin

On 27/04/2022 18:18, Matthew Auld wrote:
> On 27/04/2022 07:48, Lionel Landwerlin wrote:
>> One question though, how do we detect that this flag 
>> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given 
>> kernel?
>> I assume older kernels are going to reject object creation if we use 
>> this flag?
>
> From some offline discussion with Lionel, the plan here is to just do 
> a dummy gem_create_ext to check if the kernel throws an error with the 
> new flag or not.
>
>>
>> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
>> inconsistent to select the placement on the GEM object and then query 
>> whether it's mappable by address?
>> You made a comment stating this is racy, wouldn't querying on the GEM 
>> object prevent this?
>
> Since mesa at this time doesn't currently have a use for this one, 
> then I guess we should maybe just drop this part of the uapi, in this 
> version at least, if no objections.


Just repeating what we discussed (maybe I missed some other discussion 
and that's why I was confused) :


The way I was planning to use this is to have 3 heaps in Vulkan :

     - heap0: local only, no cpu visible

     - heap1: system, cpu visible

     - heap2: local & cpu visible


With heap2 having the reported probed_cpu_visible_size size.

It is an error for the application to map from heap0 [1].


With that said, it means if we created a GEM BO without 
I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS, we'll never mmap it.

So why the query?

I guess it would be useful when we import a buffer from another 
application. But in that case, why not have the query on the BO?


-Lionel


[1] : 
https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkMapMemory.html 
(VUID-vkMapMemory-memory-00682)

>
>>
>> Thanks,
>>
>> -Lionel
>>
>> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>>> Hi Matt,
>>>
>>>
>>> The proposal looks good to me.
>>>
>>> Looking forward to try it on drm-tip.
>>>
>>>
>>> -Lionel
>>>
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>    - Rework error capture interactions, including no longer needing
>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>> +++++++++++++++++++++++
>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>   3 files changed, 252 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>> known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id 
>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +    /** @region: The class:instance pair encoding */
>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +    /** @rsvd0: MBZ */
>>>> +    __u32 rsvd0;
>>>> +
>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +    __u64 probed_size;
>>>> +
>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>> unknown) */
>>>> +    __u64 unallocated_size;
>>>> +
>>>> +    union {
>>>> +        /** @rsvd1: MBZ */
>>>> +        __u64 rsvd1[8];
>>>> +        struct {
>>>> +            /**
>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>> +             * that is CPU accessible. (-1 = unknown).
>>>> +             *
>>>> +             * This will be always be <= @probed_size, and the
>>>> +             * remainder(if there is any) will not be CPU
>>>> +             * accessible.
>>>> +             */
>>>> +            __u64 probed_cpu_visible_size;
>>>> +        };
>>>> +    };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>> behaviour, with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for 
>>>> the stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>> create the object
>>>> + * with gem_create, and another to apply various parameters, 
>>>> however this
>>>> + * creates some ambiguity for the params which are considered 
>>>> immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +    /**
>>>> +     * @size: Requested size for the object.
>>>> +     *
>>>> +     * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +     *
>>>> +     * Note that for some devices we have might have further minimum
>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +     * However in general the final size here should always 
>>>> reflect any
>>>> +     * rounding up, if for example using the 
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +     * extension to place the object in device local-memory.
>>>> +     */
>>>> +    __u64 size;
>>>> +    /**
>>>> +     * @handle: Returned handle for the object.
>>>> +     *
>>>> +     * Object handles are nonzero.
>>>> +     */
>>>> +    __u32 handle;
>>>> +    /**
>>>> +     * @flags: Optional flags.
>>>> +     *
>>>> +     * Supported values:
>>>> +     *
>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>> kernel that
>>>> +     * the object will need to be accessed via the CPU.
>>>> +     *
>>>> +     * Only valid when placing objects in 
>>>> I915_MEMORY_CLASS_DEVICE, and
>>>> +     * only strictly required on platforms where only some of the 
>>>> device
>>>> +     * memory is directly visible or mappable through the CPU, 
>>>> like on DG2+.
>>>> +     *
>>>> +     * One of the placements MUST also be 
>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>> +     * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +     * can't place the object in the mappable part of
>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>> +     *
>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>> objects that can
>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>> don't
>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>> with
>>>> +     * flat-CCS.
>>>> +     *
>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>> that the
>>>> +     * kernel can still migrate the object to the mappable part, 
>>>> as a last
>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +     * expensive, and so ideally should be avoided.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +    __u32 flags;
>>>> +    /**
>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>> +     *
>>>> +     * This will be useful in the future when we need to support 
>>>> several
>>>> +     * different extensions, and we need to apply more than one when
>>>> +     * creating the object. See struct i915_user_extension.
>>>> +     *
>>>> +     * If we don't supply any extensions then we get the same old 
>>>> gem_create
>>>> +     * behaviour.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>> returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + *    struct drm_i915_query_vma_info info = {};
>>>> + *    struct drm_i915_query_item item = {
>>>> + *        .data_ptr = (uintptr_t)&info,
>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + *    };
>>>> + *    struct drm_i915_query query = {
>>>> + *        .num_items = 1,
>>>> + *        .items_ptr = (uintptr_t)&item,
>>>> + *    };
>>>> + *    int err;
>>>> + *
>>>> + *    // Unlike some other types of queries, there is no need to 
>>>> first query
>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + *    // time how big this needs to be.
>>>> + *    item.length = sizeof(info);
>>>> + *
>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>> wish
>>>> + *    // to query, before then firing off the query.
>>>> + *    info.vm_id = vm_id;
>>>> + *    info.offset = gtt_address;
>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + *    if (err || item.length < 0) ...
>>>> + *
>>>> + *    // If all went well we can now inspect the returned attributes.
>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +    /**
>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>> the value
>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>> +     */
>>>> +    __u32 vm_id;
>>>> +    /** @pad: MBZ. */
>>>> +    __u32 pad;
>>>> +    /**
>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>> the kernel
>>>> +     * will use to perform the lookup.
>>>> +     */
>>>> +    __u64 offset;
>>>> +    /**
>>>> +     * @attributes: The returned attributes for the given vma.
>>>> +     *
>>>> +     * Possible values:
>>>> +     *
>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>> backing the
>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>> the vma is
>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>> the CPU
>>>> +     * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>> +     * require the kernel first synchronising any GPU work tied to 
>>>> the
>>>> +     * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +     * placements permit it. See 
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +     *
>>>> +     * Note that this is inherently racy.
>>>> +     */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +    __u64 attributes;
>>>> +    /** @rsvd: MBZ */
>>>> +    __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>> local-memory(i.e
>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>> might still be
>>>> +smaller than the total probed_size. In such cases, only some 
>>>> subset of
>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>> first 256M),
>>>> +while the remainder is only accessible via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in 
>>>> I915_MEMORY_CLASS_DEVICE, where
>>>> +underneath the device has a small BAR, meaning only some portion 
>>>> of it is CPU
>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>> access is not
>>>> +required, and prioritize using the non-CPU visible portion of
>>>> +I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>> total size of the
>>>> +CPU accessible portion, for the particular region. This should 
>>>> only be
>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>> with the
>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>> visible portion,
>>>> +where the total size of the heap needs to be known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only 
>>>> support
>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>> object/vma is
>>>> +currently placed in memory that is accessible by the CPU. This 
>>>> should always be
>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>> I915_MEMORY_CLASS_DEVICE
>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>> object will
>>>> +likely first require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>> free to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>> b/Documentation/gpu/rfc/index.rst
>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>   .. toctree::
>>>>         i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
>>>
>>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-27 15:37         ` Lionel Landwerlin
  0 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-04-27 15:37 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 27/04/2022 18:18, Matthew Auld wrote:
> On 27/04/2022 07:48, Lionel Landwerlin wrote:
>> One question though, how do we detect that this flag 
>> (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given 
>> kernel?
>> I assume older kernels are going to reject object creation if we use 
>> this flag?
>
> From some offline discussion with Lionel, the plan here is to just do 
> a dummy gem_create_ext to check if the kernel throws an error with the 
> new flag or not.
>
>>
>> I didn't plan to use __drm_i915_query_vma_info, but isn't it 
>> inconsistent to select the placement on the GEM object and then query 
>> whether it's mappable by address?
>> You made a comment stating this is racy, wouldn't querying on the GEM 
>> object prevent this?
>
> Since mesa at this time doesn't currently have a use for this one, 
> then I guess we should maybe just drop this part of the uapi, in this 
> version at least, if no objections.


Just repeating what we discussed (maybe I missed some other discussion 
and that's why I was confused) :


The way I was planning to use this is to have 3 heaps in Vulkan :

     - heap0: local only, no cpu visible

     - heap1: system, cpu visible

     - heap2: local & cpu visible


With heap2 having the reported probed_cpu_visible_size size.

It is an error for the application to map from heap0 [1].


With that said, it means if we created a GEM BO without 
I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS, we'll never mmap it.

So why the query?

I guess it would be useful when we import a buffer from another 
application. But in that case, why not have the query on the BO?


-Lionel


[1] : 
https://www.khronos.org/registry/vulkan/specs/1.3-extensions/man/html/vkMapMemory.html 
(VUID-vkMapMemory-memory-00682)

>
>>
>> Thanks,
>>
>> -Lionel
>>
>> On 27/04/2022 09:35, Lionel Landwerlin wrote:
>>> Hi Matt,
>>>
>>>
>>> The proposal looks good to me.
>>>
>>> Looking forward to try it on drm-tip.
>>>
>>>
>>> -Lionel
>>>
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>    - Rework error capture interactions, including no longer needing
>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>> +++++++++++++++++++++++
>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>   3 files changed, 252 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>> known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id 
>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +    /** @region: The class:instance pair encoding */
>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +    /** @rsvd0: MBZ */
>>>> +    __u32 rsvd0;
>>>> +
>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +    __u64 probed_size;
>>>> +
>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>> unknown) */
>>>> +    __u64 unallocated_size;
>>>> +
>>>> +    union {
>>>> +        /** @rsvd1: MBZ */
>>>> +        __u64 rsvd1[8];
>>>> +        struct {
>>>> +            /**
>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>> +             * that is CPU accessible. (-1 = unknown).
>>>> +             *
>>>> +             * This will be always be <= @probed_size, and the
>>>> +             * remainder(if there is any) will not be CPU
>>>> +             * accessible.
>>>> +             */
>>>> +            __u64 probed_cpu_visible_size;
>>>> +        };
>>>> +    };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>> behaviour, with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for 
>>>> the stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>> create the object
>>>> + * with gem_create, and another to apply various parameters, 
>>>> however this
>>>> + * creates some ambiguity for the params which are considered 
>>>> immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +    /**
>>>> +     * @size: Requested size for the object.
>>>> +     *
>>>> +     * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +     *
>>>> +     * Note that for some devices we have might have further minimum
>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +     * However in general the final size here should always 
>>>> reflect any
>>>> +     * rounding up, if for example using the 
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +     * extension to place the object in device local-memory.
>>>> +     */
>>>> +    __u64 size;
>>>> +    /**
>>>> +     * @handle: Returned handle for the object.
>>>> +     *
>>>> +     * Object handles are nonzero.
>>>> +     */
>>>> +    __u32 handle;
>>>> +    /**
>>>> +     * @flags: Optional flags.
>>>> +     *
>>>> +     * Supported values:
>>>> +     *
>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>> kernel that
>>>> +     * the object will need to be accessed via the CPU.
>>>> +     *
>>>> +     * Only valid when placing objects in 
>>>> I915_MEMORY_CLASS_DEVICE, and
>>>> +     * only strictly required on platforms where only some of the 
>>>> device
>>>> +     * memory is directly visible or mappable through the CPU, 
>>>> like on DG2+.
>>>> +     *
>>>> +     * One of the placements MUST also be 
>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>> +     * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +     * can't place the object in the mappable part of
>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>> +     *
>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>> objects that can
>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>> don't
>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>> with
>>>> +     * flat-CCS.
>>>> +     *
>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>> that the
>>>> +     * kernel can still migrate the object to the mappable part, 
>>>> as a last
>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +     * expensive, and so ideally should be avoided.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +    __u32 flags;
>>>> +    /**
>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>> +     *
>>>> +     * This will be useful in the future when we need to support 
>>>> several
>>>> +     * different extensions, and we need to apply more than one when
>>>> +     * creating the object. See struct i915_user_extension.
>>>> +     *
>>>> +     * If we don't supply any extensions then we get the same old 
>>>> gem_create
>>>> +     * behaviour.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>> returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + *    struct drm_i915_query_vma_info info = {};
>>>> + *    struct drm_i915_query_item item = {
>>>> + *        .data_ptr = (uintptr_t)&info,
>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + *    };
>>>> + *    struct drm_i915_query query = {
>>>> + *        .num_items = 1,
>>>> + *        .items_ptr = (uintptr_t)&item,
>>>> + *    };
>>>> + *    int err;
>>>> + *
>>>> + *    // Unlike some other types of queries, there is no need to 
>>>> first query
>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + *    // time how big this needs to be.
>>>> + *    item.length = sizeof(info);
>>>> + *
>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>> wish
>>>> + *    // to query, before then firing off the query.
>>>> + *    info.vm_id = vm_id;
>>>> + *    info.offset = gtt_address;
>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + *    if (err || item.length < 0) ...
>>>> + *
>>>> + *    // If all went well we can now inspect the returned attributes.
>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +    /**
>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>> the value
>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>> +     */
>>>> +    __u32 vm_id;
>>>> +    /** @pad: MBZ. */
>>>> +    __u32 pad;
>>>> +    /**
>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>> the kernel
>>>> +     * will use to perform the lookup.
>>>> +     */
>>>> +    __u64 offset;
>>>> +    /**
>>>> +     * @attributes: The returned attributes for the given vma.
>>>> +     *
>>>> +     * Possible values:
>>>> +     *
>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>> backing the
>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>> the vma is
>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>> the CPU
>>>> +     * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>> +     * require the kernel first synchronising any GPU work tied to 
>>>> the
>>>> +     * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +     * placements permit it. See 
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +     *
>>>> +     * Note that this is inherently racy.
>>>> +     */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +    __u64 attributes;
>>>> +    /** @rsvd: MBZ */
>>>> +    __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>> local-memory(i.e
>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>> might still be
>>>> +smaller than the total probed_size. In such cases, only some 
>>>> subset of
>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>> first 256M),
>>>> +while the remainder is only accessible via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in 
>>>> I915_MEMORY_CLASS_DEVICE, where
>>>> +underneath the device has a small BAR, meaning only some portion 
>>>> of it is CPU
>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>> access is not
>>>> +required, and prioritize using the non-CPU visible portion of
>>>> +I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>> total size of the
>>>> +CPU accessible portion, for the particular region. This should 
>>>> only be
>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>> with the
>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>> visible portion,
>>>> +where the total size of the heap needs to be known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only 
>>>> support
>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>> object/vma is
>>>> +currently placed in memory that is accessible by the CPU. This 
>>>> should always be
>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>> I915_MEMORY_CLASS_DEVICE
>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>> object will
>>>> +likely first require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>> free to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>> b/Documentation/gpu/rfc/index.rst
>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>   .. toctree::
>>>>         i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
>>>
>>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-27  6:55       ` [Intel-gfx] " Christian König
@ 2022-04-27 15:38         ` Daniel Vetter
  -1 siblings, 0 replies; 50+ messages in thread
From: Daniel Vetter @ 2022-04-27 15:38 UTC (permalink / raw)
  To: Christian König
  Cc: Thomas Hellström, Daniel Vetter, intel-gfx, dri-devel,
	Kenneth Graunke, Jon Bloomfield, Matthew Auld, mesa-dev,
	Lionel Landwerlin, Akeem G Abodunrin

On Wed, Apr 27, 2022 at 08:55:07AM +0200, Christian König wrote:
> Well usually we increment the drm minor version when adding some new flags
> on amdgpu.
> 
> Additional to that just one comment from our experience with that: You don't
> just need one flag, but two. The first one is a hint which says "CPU access
> needed" and the second is a promise which says "CPU access never needed".
> 
> The background is that on a whole bunch of buffers you can 100% certain say
> that you will never ever need CPU access.
> 
> Then at least we have a whole bunch of buffers where we might need CPU
> access, but can't tell for sure.
> 
> And last we have stuff like transfer buffers you can be 100% sure that you
> need CPU access.
> 
> Separating it like this helped a lot with performance on small BAR systems.

So my assumption was that for transfer buffers you'd fill them with the
cpu first anyway, so no need for the extra flag.

I guess this if for transfer buffers for gpu -> cpu transfers, where it
would result in costly bo move and stalls and it's better to make sure
it's cpu accessible from the start? At least on current gpu we have where
there's no coherent interconnect, those buffers have to be in system
memory or your cpu access will be a disaster, so again they're naturally
cpu accessible.

What's the use-case for the "cpu access required" flag where "cpu access
before gpu access" isn't a good enough hint already to get the same perf
benefits?

Also for scanout my idea at least is that we just fail mmap when you
haven't set the flag and the scanout is pinned to unmappable, for two
reasons:
- 4k buffers are big, if we force them all into mappable things are
  non-pretty.
- You need mesa anyway to access tiled buffers, and mesa knows how to use
  a transfer buffer. That should work even when you do desktop switching
  and fastboot and stuff like that with the getfb2 ioctl should all work
  (and without getfb2 it's doomed to garbage anyway).

So only dumb kms buffers (which are linear) would ever get the
NEEDS_CPU_ACCESS flag, and only those we'd ever pin into cpu accessible
range for scanout. Is there a hole in that plan?

Cheers, Daniel

> 
> Regards,
> Christian.
> 
> Am 27.04.22 um 08:48 schrieb Lionel Landwerlin:
> > One question though, how do we detect that this flag
> > (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given
> > kernel?
> > I assume older kernels are going to reject object creation if we use
> > this flag?
> > 
> > I didn't plan to use __drm_i915_query_vma_info, but isn't it
> > inconsistent to select the placement on the GEM object and then query
> > whether it's mappable by address?
> > You made a comment stating this is racy, wouldn't querying on the GEM
> > object prevent this?
> > 
> > Thanks,
> > 
> > -Lionel
> > 
> > On 27/04/2022 09:35, Lionel Landwerlin wrote:
> > > Hi Matt,
> > > 
> > > 
> > > The proposal looks good to me.
> > > 
> > > Looking forward to try it on drm-tip.
> > > 
> > > 
> > > -Lionel
> > > 
> > > On 20/04/2022 20:13, Matthew Auld wrote:
> > > > Add an entry for the new uapi needed for small BAR on DG2+.
> > > > 
> > > > v2:
> > > >    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
> > > >    - Rework error capture interactions, including no longer needing
> > > >      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
> > > >    - Add probed_cpu_visible_size. (Lionel)
> > > > 
> > > > Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> > > > Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> > > > Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> > > > Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> > > > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > > > Cc: Jordan Justen <jordan.l.justen@intel.com>
> > > > Cc: Kenneth Graunke <kenneth@whitecape.org>
> > > > Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
> > > > Cc: mesa-dev@lists.freedesktop.org
> > > > ---
> > > >   Documentation/gpu/rfc/i915_small_bar.h   | 190
> > > > +++++++++++++++++++++++
> > > >   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
> > > >   Documentation/gpu/rfc/index.rst          |   4 +
> > > >   3 files changed, 252 insertions(+)
> > > >   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
> > > >   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
> > > > 
> > > > diff --git a/Documentation/gpu/rfc/i915_small_bar.h
> > > > b/Documentation/gpu/rfc/i915_small_bar.h
> > > > new file mode 100644
> > > > index 000000000000..7bfd0cf44d35
> > > > --- /dev/null
> > > > +++ b/Documentation/gpu/rfc/i915_small_bar.h
> > > > @@ -0,0 +1,190 @@
> > > > +/**
> > > > + * struct __drm_i915_memory_region_info - Describes one region
> > > > as known to the
> > > > + * driver.
> > > > + *
> > > > + * Note this is using both struct drm_i915_query_item and
> > > > struct drm_i915_query.
> > > > + * For this new query we are adding the new query id
> > > > DRM_I915_QUERY_MEMORY_REGIONS
> > > > + * at &drm_i915_query_item.query_id.
> > > > + */
> > > > +struct __drm_i915_memory_region_info {
> > > > +    /** @region: The class:instance pair encoding */
> > > > +    struct drm_i915_gem_memory_class_instance region;
> > > > +
> > > > +    /** @rsvd0: MBZ */
> > > > +    __u32 rsvd0;
> > > > +
> > > > +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
> > > > +    __u64 probed_size;
> > > > +
> > > > +    /** @unallocated_size: Estimate of memory remaining (-1 =
> > > > unknown) */
> > > > +    __u64 unallocated_size;
> > > > +
> > > > +    union {
> > > > +        /** @rsvd1: MBZ */
> > > > +        __u64 rsvd1[8];
> > > > +        struct {
> > > > +            /**
> > > > +             * @probed_cpu_visible_size: Memory probed by the driver
> > > > +             * that is CPU accessible. (-1 = unknown).
> > > > +             *
> > > > +             * This will be always be <= @probed_size, and the
> > > > +             * remainder(if there is any) will not be CPU
> > > > +             * accessible.
> > > > +             */
> > > > +            __u64 probed_cpu_visible_size;
> > > > +        };
> > > > +    };
> > > > +};
> > > > +
> > > > +/**
> > > > + * struct __drm_i915_gem_create_ext - Existing gem_create
> > > > behaviour, with added
> > > > + * extension support using struct i915_user_extension.
> > > > + *
> > > > + * Note that new buffer flags should be added here, at least
> > > > for the stuff that
> > > > + * is immutable. Previously we would have two ioctls, one to
> > > > create the object
> > > > + * with gem_create, and another to apply various parameters,
> > > > however this
> > > > + * creates some ambiguity for the params which are considered
> > > > immutable. Also in
> > > > + * general we're phasing out the various SET/GET ioctls.
> > > > + */
> > > > +struct __drm_i915_gem_create_ext {
> > > > +    /**
> > > > +     * @size: Requested size for the object.
> > > > +     *
> > > > +     * The (page-aligned) allocated size for the object will be
> > > > returned.
> > > > +     *
> > > > +     * Note that for some devices we have might have further minimum
> > > > +     * page-size restrictions(larger than 4K), like for device
> > > > local-memory.
> > > > +     * However in general the final size here should always
> > > > reflect any
> > > > +     * rounding up, if for example using the
> > > > I915_GEM_CREATE_EXT_MEMORY_REGIONS
> > > > +     * extension to place the object in device local-memory.
> > > > +     */
> > > > +    __u64 size;
> > > > +    /**
> > > > +     * @handle: Returned handle for the object.
> > > > +     *
> > > > +     * Object handles are nonzero.
> > > > +     */
> > > > +    __u32 handle;
> > > > +    /**
> > > > +     * @flags: Optional flags.
> > > > +     *
> > > > +     * Supported values:
> > > > +     *
> > > > +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to
> > > > the kernel that
> > > > +     * the object will need to be accessed via the CPU.
> > > > +     *
> > > > +     * Only valid when placing objects in
> > > > I915_MEMORY_CLASS_DEVICE, and
> > > > +     * only strictly required on platforms where only some of
> > > > the device
> > > > +     * memory is directly visible or mappable through the CPU,
> > > > like on DG2+.
> > > > +     *
> > > > +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
> > > > +     * ensure we can always spill the allocation to system
> > > > memory, if we
> > > > +     * can't place the object in the mappable part of
> > > > +     * I915_MEMORY_CLASS_DEVICE.
> > > > +     *
> > > > +     * Note that since the kernel only supports flat-CCS on
> > > > objects that can
> > > > +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we
> > > > therefore don't
> > > > +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
> > > > +     * flat-CCS.
> > > > +     *
> > > > +     * Without this hint, the kernel will assume that non-mappable
> > > > +     * I915_MEMORY_CLASS_DEVICE is preferred for this object.
> > > > Note that the
> > > > +     * kernel can still migrate the object to the mappable
> > > > part, as a last
> > > > +     * resort, if userspace ever CPU faults this object, but
> > > > this might be
> > > > +     * expensive, and so ideally should be avoided.
> > > > +     */
> > > > +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
> > > > +    __u32 flags;
> > > > +    /**
> > > > +     * @extensions: The chain of extensions to apply to this object.
> > > > +     *
> > > > +     * This will be useful in the future when we need to
> > > > support several
> > > > +     * different extensions, and we need to apply more than one when
> > > > +     * creating the object. See struct i915_user_extension.
> > > > +     *
> > > > +     * If we don't supply any extensions then we get the same
> > > > old gem_create
> > > > +     * behaviour.
> > > > +     *
> > > > +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
> > > > +     * struct drm_i915_gem_create_ext_memory_regions.
> > > > +     *
> > > > +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> > > > +     * struct drm_i915_gem_create_ext_protected_content.
> > > > +     */
> > > > +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
> > > > +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> > > > +    __u64 extensions;
> > > > +};
> > > > +
> > > > +#define DRM_I915_QUERY_VMA_INFO    5
> > > > +
> > > > +/**
> > > > + * struct __drm_i915_query_vma_info
> > > > + *
> > > > + * Given a vm and GTT address, lookup the corresponding vma,
> > > > returning its set
> > > > + * of attributes.
> > > > + *
> > > > + * .. code-block:: C
> > > > + *
> > > > + *    struct drm_i915_query_vma_info info = {};
> > > > + *    struct drm_i915_query_item item = {
> > > > + *        .data_ptr = (uintptr_t)&info,
> > > > + *        .query_id = DRM_I915_QUERY_VMA_INFO,
> > > > + *    };
> > > > + *    struct drm_i915_query query = {
> > > > + *        .num_items = 1,
> > > > + *        .items_ptr = (uintptr_t)&item,
> > > > + *    };
> > > > + *    int err;
> > > > + *
> > > > + *    // Unlike some other types of queries, there is no need
> > > > to first query
> > > > + *    // the size of the data_ptr blob here, since we already
> > > > know ahead of
> > > > + *    // time how big this needs to be.
> > > > + *    item.length = sizeof(info);
> > > > + *
> > > > + *    // Next we fill in the vm_id and ppGTT address of the vma
> > > > we wish
> > > > + *    // to query, before then firing off the query.
> > > > + *    info.vm_id = vm_id;
> > > > + *    info.offset = gtt_address;
> > > > + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
> > > > + *    if (err || item.length < 0) ...
> > > > + *
> > > > + *    // If all went well we can now inspect the returned attributes.
> > > > + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
> > > > + */
> > > > +struct __drm_i915_query_vma_info {
> > > > +    /**
> > > > +     * @vm_id: The given vm id that contains the vma. The id is
> > > > the value
> > > > +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
> > > > +     * drm_i915_gem_vm_control.vm_id.
> > > > +     */
> > > > +    __u32 vm_id;
> > > > +    /** @pad: MBZ. */
> > > > +    __u32 pad;
> > > > +    /**
> > > > +     * @offset: The corresponding ppGTT address of the vma
> > > > which the kernel
> > > > +     * will use to perform the lookup.
> > > > +     */
> > > > +    __u64 offset;
> > > > +    /**
> > > > +     * @attributes: The returned attributes for the given vma.
> > > > +     *
> > > > +     * Possible values:
> > > > +     *
> > > > +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
> > > > backing the
> > > > +     * vma are currently CPU accessible. If this is not set
> > > > then the vma is
> > > > +     * currently backed by I915_MEMORY_CLASS_DEVICE memory,
> > > > which the CPU
> > > > +     * cannot directly access(this is only possible on discrete
> > > > devices with
> > > > +     * a small BAR). Attempting to MMAP and fault such an object will
> > > > +     * require the kernel first synchronising any GPU work tied to the
> > > > +     * object, before then migrating the pages, either to the
> > > > CPU accessible
> > > > +     * part of I915_MEMORY_CLASS_DEVICE, or
> > > > I915_MEMORY_CLASS_SYSTEM, if the
> > > > +     * placements permit it. See
> > > > I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
> > > > +     *
> > > > +     * Note that this is inherently racy.
> > > > +     */
> > > > +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
> > > > +    __u64 attributes;
> > > > +    /** @rsvd: MBZ */
> > > > +    __u32 rsvd[4];
> > > > +};
> > > > diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
> > > > b/Documentation/gpu/rfc/i915_small_bar.rst
> > > > new file mode 100644
> > > > index 000000000000..be3d9bcdd86d
> > > > --- /dev/null
> > > > +++ b/Documentation/gpu/rfc/i915_small_bar.rst
> > > > @@ -0,0 +1,58 @@
> > > > +==========================
> > > > +I915 Small BAR RFC Section
> > > > +==========================
> > > > +Starting from DG2 we will have resizable BAR support for device
> > > > local-memory(i.e
> > > > +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size
> > > > might still be
> > > > +smaller than the total probed_size. In such cases, only some subset of
> > > > +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the
> > > > first 256M),
> > > > +while the remainder is only accessible via the GPU.
> > > > +
> > > > +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
> > > > +----------------------------------------------
> > > > +New gem_create_ext flag to tell the kernel that a BO will
> > > > require CPU access.
> > > > +This becomes important when placing an object in
> > > > I915_MEMORY_CLASS_DEVICE, where
> > > > +underneath the device has a small BAR, meaning only some
> > > > portion of it is CPU
> > > > +accessible. Without this flag the kernel will assume that CPU
> > > > access is not
> > > > +required, and prioritize using the non-CPU visible portion of
> > > > +I915_MEMORY_CLASS_DEVICE.
> > > > +
> > > > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > > > +   :functions: __drm_i915_gem_create_ext
> > > > +
> > > > +probed_cpu_visible_size attribute
> > > > +---------------------------------
> > > > +New struct__drm_i915_memory_region attribute which returns the
> > > > total size of the
> > > > +CPU accessible portion, for the particular region. This should only be
> > > > +applicable for I915_MEMORY_CLASS_DEVICE.
> > > > +
> > > > +Vulkan will need this as part of creating a separate
> > > > VkMemoryHeap with the
> > > > +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU
> > > > visible portion,
> > > > +where the total size of the heap needs to be known.
> > > > +
> > > > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > > > +   :functions: __drm_i915_memory_region_info
> > > > +
> > > > +DRM_I915_QUERY_VMA_INFO query
> > > > +-----------------------------
> > > > +Query the attributes of some vma. Given a vm and GTT offset, find the
> > > > +respective vma, and return its set of attributes. For now we
> > > > only support
> > > > +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
> > > > +currently placed in memory that is accessible by the CPU. This
> > > > should always be
> > > > +set on devices where the CPU probed_cpu_visible_size of
> > > > I915_MEMORY_CLASS_DEVICE
> > > > +matches the probed_size. If this is not set then CPU faulting
> > > > the object will
> > > > +likely first require migrating the pages.
> > > > +
> > > > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > > > +   :functions: __drm_i915_query_vma_info
> > > > +
> > > > +Error Capture restrictions
> > > > +--------------------------
> > > > +With error capture we have two new restrictions:
> > > > +
> > > > +    1) Error capture is best effort on small BAR systems; if
> > > > the pages are not
> > > > +    CPU accessible, at the time of capture, then the kernel is
> > > > free to skip
> > > > +    trying to capture them.
> > > > +
> > > > +    2) On discrete we now reject error capture on recoverable
> > > > contexts. In the
> > > > +    future the kernel may want to blit during error capture,
> > > > when for example
> > > > +    something is not currently CPU accessible.
> > > > diff --git a/Documentation/gpu/rfc/index.rst
> > > > b/Documentation/gpu/rfc/index.rst
> > > > index 91e93a705230..5a3bd3924ba6 100644
> > > > --- a/Documentation/gpu/rfc/index.rst
> > > > +++ b/Documentation/gpu/rfc/index.rst
> > > > @@ -23,3 +23,7 @@ host such documentation:
> > > >   .. toctree::
> > > >         i915_scheduler.rst
> > > > +
> > > > +.. toctree::
> > > > +
> > > > +    i915_small_bar.rst
> > > 
> > > 
> > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-04-27 15:38         ` Daniel Vetter
  0 siblings, 0 replies; 50+ messages in thread
From: Daniel Vetter @ 2022-04-27 15:38 UTC (permalink / raw)
  To: Christian König
  Cc: Thomas Hellström, Daniel Vetter, intel-gfx, dri-devel,
	Kenneth Graunke, Matthew Auld, mesa-dev

On Wed, Apr 27, 2022 at 08:55:07AM +0200, Christian König wrote:
> Well usually we increment the drm minor version when adding some new flags
> on amdgpu.
> 
> Additional to that just one comment from our experience with that: You don't
> just need one flag, but two. The first one is a hint which says "CPU access
> needed" and the second is a promise which says "CPU access never needed".
> 
> The background is that on a whole bunch of buffers you can 100% certain say
> that you will never ever need CPU access.
> 
> Then at least we have a whole bunch of buffers where we might need CPU
> access, but can't tell for sure.
> 
> And last we have stuff like transfer buffers you can be 100% sure that you
> need CPU access.
> 
> Separating it like this helped a lot with performance on small BAR systems.

So my assumption was that for transfer buffers you'd fill them with the
cpu first anyway, so no need for the extra flag.

I guess this if for transfer buffers for gpu -> cpu transfers, where it
would result in costly bo move and stalls and it's better to make sure
it's cpu accessible from the start? At least on current gpu we have where
there's no coherent interconnect, those buffers have to be in system
memory or your cpu access will be a disaster, so again they're naturally
cpu accessible.

What's the use-case for the "cpu access required" flag where "cpu access
before gpu access" isn't a good enough hint already to get the same perf
benefits?

Also for scanout my idea at least is that we just fail mmap when you
haven't set the flag and the scanout is pinned to unmappable, for two
reasons:
- 4k buffers are big, if we force them all into mappable things are
  non-pretty.
- You need mesa anyway to access tiled buffers, and mesa knows how to use
  a transfer buffer. That should work even when you do desktop switching
  and fastboot and stuff like that with the getfb2 ioctl should all work
  (and without getfb2 it's doomed to garbage anyway).

So only dumb kms buffers (which are linear) would ever get the
NEEDS_CPU_ACCESS flag, and only those we'd ever pin into cpu accessible
range for scanout. Is there a hole in that plan?

Cheers, Daniel

> 
> Regards,
> Christian.
> 
> Am 27.04.22 um 08:48 schrieb Lionel Landwerlin:
> > One question though, how do we detect that this flag
> > (I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS) is accepted on a given
> > kernel?
> > I assume older kernels are going to reject object creation if we use
> > this flag?
> > 
> > I didn't plan to use __drm_i915_query_vma_info, but isn't it
> > inconsistent to select the placement on the GEM object and then query
> > whether it's mappable by address?
> > You made a comment stating this is racy, wouldn't querying on the GEM
> > object prevent this?
> > 
> > Thanks,
> > 
> > -Lionel
> > 
> > On 27/04/2022 09:35, Lionel Landwerlin wrote:
> > > Hi Matt,
> > > 
> > > 
> > > The proposal looks good to me.
> > > 
> > > Looking forward to try it on drm-tip.
> > > 
> > > 
> > > -Lionel
> > > 
> > > On 20/04/2022 20:13, Matthew Auld wrote:
> > > > Add an entry for the new uapi needed for small BAR on DG2+.
> > > > 
> > > > v2:
> > > >    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
> > > >    - Rework error capture interactions, including no longer needing
> > > >      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
> > > >    - Add probed_cpu_visible_size. (Lionel)
> > > > 
> > > > Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> > > > Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> > > > Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> > > > Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> > > > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > > > Cc: Jordan Justen <jordan.l.justen@intel.com>
> > > > Cc: Kenneth Graunke <kenneth@whitecape.org>
> > > > Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
> > > > Cc: mesa-dev@lists.freedesktop.org
> > > > ---
> > > >   Documentation/gpu/rfc/i915_small_bar.h   | 190
> > > > +++++++++++++++++++++++
> > > >   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
> > > >   Documentation/gpu/rfc/index.rst          |   4 +
> > > >   3 files changed, 252 insertions(+)
> > > >   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
> > > >   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
> > > > 
> > > > diff --git a/Documentation/gpu/rfc/i915_small_bar.h
> > > > b/Documentation/gpu/rfc/i915_small_bar.h
> > > > new file mode 100644
> > > > index 000000000000..7bfd0cf44d35
> > > > --- /dev/null
> > > > +++ b/Documentation/gpu/rfc/i915_small_bar.h
> > > > @@ -0,0 +1,190 @@
> > > > +/**
> > > > + * struct __drm_i915_memory_region_info - Describes one region
> > > > as known to the
> > > > + * driver.
> > > > + *
> > > > + * Note this is using both struct drm_i915_query_item and
> > > > struct drm_i915_query.
> > > > + * For this new query we are adding the new query id
> > > > DRM_I915_QUERY_MEMORY_REGIONS
> > > > + * at &drm_i915_query_item.query_id.
> > > > + */
> > > > +struct __drm_i915_memory_region_info {
> > > > +    /** @region: The class:instance pair encoding */
> > > > +    struct drm_i915_gem_memory_class_instance region;
> > > > +
> > > > +    /** @rsvd0: MBZ */
> > > > +    __u32 rsvd0;
> > > > +
> > > > +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
> > > > +    __u64 probed_size;
> > > > +
> > > > +    /** @unallocated_size: Estimate of memory remaining (-1 =
> > > > unknown) */
> > > > +    __u64 unallocated_size;
> > > > +
> > > > +    union {
> > > > +        /** @rsvd1: MBZ */
> > > > +        __u64 rsvd1[8];
> > > > +        struct {
> > > > +            /**
> > > > +             * @probed_cpu_visible_size: Memory probed by the driver
> > > > +             * that is CPU accessible. (-1 = unknown).
> > > > +             *
> > > > +             * This will be always be <= @probed_size, and the
> > > > +             * remainder(if there is any) will not be CPU
> > > > +             * accessible.
> > > > +             */
> > > > +            __u64 probed_cpu_visible_size;
> > > > +        };
> > > > +    };
> > > > +};
> > > > +
> > > > +/**
> > > > + * struct __drm_i915_gem_create_ext - Existing gem_create
> > > > behaviour, with added
> > > > + * extension support using struct i915_user_extension.
> > > > + *
> > > > + * Note that new buffer flags should be added here, at least
> > > > for the stuff that
> > > > + * is immutable. Previously we would have two ioctls, one to
> > > > create the object
> > > > + * with gem_create, and another to apply various parameters,
> > > > however this
> > > > + * creates some ambiguity for the params which are considered
> > > > immutable. Also in
> > > > + * general we're phasing out the various SET/GET ioctls.
> > > > + */
> > > > +struct __drm_i915_gem_create_ext {
> > > > +    /**
> > > > +     * @size: Requested size for the object.
> > > > +     *
> > > > +     * The (page-aligned) allocated size for the object will be
> > > > returned.
> > > > +     *
> > > > +     * Note that for some devices we have might have further minimum
> > > > +     * page-size restrictions(larger than 4K), like for device
> > > > local-memory.
> > > > +     * However in general the final size here should always
> > > > reflect any
> > > > +     * rounding up, if for example using the
> > > > I915_GEM_CREATE_EXT_MEMORY_REGIONS
> > > > +     * extension to place the object in device local-memory.
> > > > +     */
> > > > +    __u64 size;
> > > > +    /**
> > > > +     * @handle: Returned handle for the object.
> > > > +     *
> > > > +     * Object handles are nonzero.
> > > > +     */
> > > > +    __u32 handle;
> > > > +    /**
> > > > +     * @flags: Optional flags.
> > > > +     *
> > > > +     * Supported values:
> > > > +     *
> > > > +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to
> > > > the kernel that
> > > > +     * the object will need to be accessed via the CPU.
> > > > +     *
> > > > +     * Only valid when placing objects in
> > > > I915_MEMORY_CLASS_DEVICE, and
> > > > +     * only strictly required on platforms where only some of
> > > > the device
> > > > +     * memory is directly visible or mappable through the CPU,
> > > > like on DG2+.
> > > > +     *
> > > > +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
> > > > +     * ensure we can always spill the allocation to system
> > > > memory, if we
> > > > +     * can't place the object in the mappable part of
> > > > +     * I915_MEMORY_CLASS_DEVICE.
> > > > +     *
> > > > +     * Note that since the kernel only supports flat-CCS on
> > > > objects that can
> > > > +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we
> > > > therefore don't
> > > > +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
> > > > +     * flat-CCS.
> > > > +     *
> > > > +     * Without this hint, the kernel will assume that non-mappable
> > > > +     * I915_MEMORY_CLASS_DEVICE is preferred for this object.
> > > > Note that the
> > > > +     * kernel can still migrate the object to the mappable
> > > > part, as a last
> > > > +     * resort, if userspace ever CPU faults this object, but
> > > > this might be
> > > > +     * expensive, and so ideally should be avoided.
> > > > +     */
> > > > +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
> > > > +    __u32 flags;
> > > > +    /**
> > > > +     * @extensions: The chain of extensions to apply to this object.
> > > > +     *
> > > > +     * This will be useful in the future when we need to
> > > > support several
> > > > +     * different extensions, and we need to apply more than one when
> > > > +     * creating the object. See struct i915_user_extension.
> > > > +     *
> > > > +     * If we don't supply any extensions then we get the same
> > > > old gem_create
> > > > +     * behaviour.
> > > > +     *
> > > > +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
> > > > +     * struct drm_i915_gem_create_ext_memory_regions.
> > > > +     *
> > > > +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> > > > +     * struct drm_i915_gem_create_ext_protected_content.
> > > > +     */
> > > > +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
> > > > +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> > > > +    __u64 extensions;
> > > > +};
> > > > +
> > > > +#define DRM_I915_QUERY_VMA_INFO    5
> > > > +
> > > > +/**
> > > > + * struct __drm_i915_query_vma_info
> > > > + *
> > > > + * Given a vm and GTT address, lookup the corresponding vma,
> > > > returning its set
> > > > + * of attributes.
> > > > + *
> > > > + * .. code-block:: C
> > > > + *
> > > > + *    struct drm_i915_query_vma_info info = {};
> > > > + *    struct drm_i915_query_item item = {
> > > > + *        .data_ptr = (uintptr_t)&info,
> > > > + *        .query_id = DRM_I915_QUERY_VMA_INFO,
> > > > + *    };
> > > > + *    struct drm_i915_query query = {
> > > > + *        .num_items = 1,
> > > > + *        .items_ptr = (uintptr_t)&item,
> > > > + *    };
> > > > + *    int err;
> > > > + *
> > > > + *    // Unlike some other types of queries, there is no need
> > > > to first query
> > > > + *    // the size of the data_ptr blob here, since we already
> > > > know ahead of
> > > > + *    // time how big this needs to be.
> > > > + *    item.length = sizeof(info);
> > > > + *
> > > > + *    // Next we fill in the vm_id and ppGTT address of the vma
> > > > we wish
> > > > + *    // to query, before then firing off the query.
> > > > + *    info.vm_id = vm_id;
> > > > + *    info.offset = gtt_address;
> > > > + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
> > > > + *    if (err || item.length < 0) ...
> > > > + *
> > > > + *    // If all went well we can now inspect the returned attributes.
> > > > + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
> > > > + */
> > > > +struct __drm_i915_query_vma_info {
> > > > +    /**
> > > > +     * @vm_id: The given vm id that contains the vma. The id is
> > > > the value
> > > > +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
> > > > +     * drm_i915_gem_vm_control.vm_id.
> > > > +     */
> > > > +    __u32 vm_id;
> > > > +    /** @pad: MBZ. */
> > > > +    __u32 pad;
> > > > +    /**
> > > > +     * @offset: The corresponding ppGTT address of the vma
> > > > which the kernel
> > > > +     * will use to perform the lookup.
> > > > +     */
> > > > +    __u64 offset;
> > > > +    /**
> > > > +     * @attributes: The returned attributes for the given vma.
> > > > +     *
> > > > +     * Possible values:
> > > > +     *
> > > > +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
> > > > backing the
> > > > +     * vma are currently CPU accessible. If this is not set
> > > > then the vma is
> > > > +     * currently backed by I915_MEMORY_CLASS_DEVICE memory,
> > > > which the CPU
> > > > +     * cannot directly access(this is only possible on discrete
> > > > devices with
> > > > +     * a small BAR). Attempting to MMAP and fault such an object will
> > > > +     * require the kernel first synchronising any GPU work tied to the
> > > > +     * object, before then migrating the pages, either to the
> > > > CPU accessible
> > > > +     * part of I915_MEMORY_CLASS_DEVICE, or
> > > > I915_MEMORY_CLASS_SYSTEM, if the
> > > > +     * placements permit it. See
> > > > I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
> > > > +     *
> > > > +     * Note that this is inherently racy.
> > > > +     */
> > > > +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
> > > > +    __u64 attributes;
> > > > +    /** @rsvd: MBZ */
> > > > +    __u32 rsvd[4];
> > > > +};
> > > > diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
> > > > b/Documentation/gpu/rfc/i915_small_bar.rst
> > > > new file mode 100644
> > > > index 000000000000..be3d9bcdd86d
> > > > --- /dev/null
> > > > +++ b/Documentation/gpu/rfc/i915_small_bar.rst
> > > > @@ -0,0 +1,58 @@
> > > > +==========================
> > > > +I915 Small BAR RFC Section
> > > > +==========================
> > > > +Starting from DG2 we will have resizable BAR support for device
> > > > local-memory(i.e
> > > > +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size
> > > > might still be
> > > > +smaller than the total probed_size. In such cases, only some subset of
> > > > +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the
> > > > first 256M),
> > > > +while the remainder is only accessible via the GPU.
> > > > +
> > > > +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
> > > > +----------------------------------------------
> > > > +New gem_create_ext flag to tell the kernel that a BO will
> > > > require CPU access.
> > > > +This becomes important when placing an object in
> > > > I915_MEMORY_CLASS_DEVICE, where
> > > > +underneath the device has a small BAR, meaning only some
> > > > portion of it is CPU
> > > > +accessible. Without this flag the kernel will assume that CPU
> > > > access is not
> > > > +required, and prioritize using the non-CPU visible portion of
> > > > +I915_MEMORY_CLASS_DEVICE.
> > > > +
> > > > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > > > +   :functions: __drm_i915_gem_create_ext
> > > > +
> > > > +probed_cpu_visible_size attribute
> > > > +---------------------------------
> > > > +New struct__drm_i915_memory_region attribute which returns the
> > > > total size of the
> > > > +CPU accessible portion, for the particular region. This should only be
> > > > +applicable for I915_MEMORY_CLASS_DEVICE.
> > > > +
> > > > +Vulkan will need this as part of creating a separate
> > > > VkMemoryHeap with the
> > > > +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU
> > > > visible portion,
> > > > +where the total size of the heap needs to be known.
> > > > +
> > > > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > > > +   :functions: __drm_i915_memory_region_info
> > > > +
> > > > +DRM_I915_QUERY_VMA_INFO query
> > > > +-----------------------------
> > > > +Query the attributes of some vma. Given a vm and GTT offset, find the
> > > > +respective vma, and return its set of attributes. For now we
> > > > only support
> > > > +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
> > > > +currently placed in memory that is accessible by the CPU. This
> > > > should always be
> > > > +set on devices where the CPU probed_cpu_visible_size of
> > > > I915_MEMORY_CLASS_DEVICE
> > > > +matches the probed_size. If this is not set then CPU faulting
> > > > the object will
> > > > +likely first require migrating the pages.
> > > > +
> > > > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > > > +   :functions: __drm_i915_query_vma_info
> > > > +
> > > > +Error Capture restrictions
> > > > +--------------------------
> > > > +With error capture we have two new restrictions:
> > > > +
> > > > +    1) Error capture is best effort on small BAR systems; if
> > > > the pages are not
> > > > +    CPU accessible, at the time of capture, then the kernel is
> > > > free to skip
> > > > +    trying to capture them.
> > > > +
> > > > +    2) On discrete we now reject error capture on recoverable
> > > > contexts. In the
> > > > +    future the kernel may want to blit during error capture,
> > > > when for example
> > > > +    something is not currently CPU accessible.
> > > > diff --git a/Documentation/gpu/rfc/index.rst
> > > > b/Documentation/gpu/rfc/index.rst
> > > > index 91e93a705230..5a3bd3924ba6 100644
> > > > --- a/Documentation/gpu/rfc/index.rst
> > > > +++ b/Documentation/gpu/rfc/index.rst
> > > > @@ -23,3 +23,7 @@ host such documentation:
> > > >   .. toctree::
> > > >         i915_scheduler.rst
> > > > +
> > > > +.. toctree::
> > > > +
> > > > +    i915_small_bar.rst
> > > 
> > > 
> > 
> 

-- 
Daniel Vetter
Software Engineer, Intel Corporation
http://blog.ffwll.ch

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-27  8:36 ` Tvrtko Ursulin
@ 2022-04-27 17:36   ` Matthew Auld
  2022-04-28  8:55     ` Tvrtko Ursulin
  0 siblings, 1 reply; 50+ messages in thread
From: Matthew Auld @ 2022-04-27 17:36 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx
  Cc: Thomas Hellström, Kenneth Graunke, mesa-dev, dri-devel,
	Daniel Vetter

On 27/04/2022 09:36, Tvrtko Ursulin wrote:
> 
> On 20/04/2022 18:13, Matthew Auld wrote:
>> Add an entry for the new uapi needed for small BAR on DG2+.
>>
>> v2:
>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>    - Rework error capture interactions, including no longer needing
>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>    - Add probed_cpu_visible_size. (Lionel)
>>
>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>> Cc: mesa-dev@lists.freedesktop.org
>> ---
>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>   Documentation/gpu/rfc/index.rst          |   4 +
>>   3 files changed, 252 insertions(+)
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>> b/Documentation/gpu/rfc/i915_small_bar.h
>> new file mode 100644
>> index 000000000000..7bfd0cf44d35
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>> @@ -0,0 +1,190 @@
>> +/**
>> + * struct __drm_i915_memory_region_info - Describes one region as 
>> known to the
>> + * driver.
>> + *
>> + * Note this is using both struct drm_i915_query_item and struct 
>> drm_i915_query.
>> + * For this new query we are adding the new query id 
>> DRM_I915_QUERY_MEMORY_REGIONS
>> + * at &drm_i915_query_item.query_id.
>> + */
>> +struct __drm_i915_memory_region_info {
>> +    /** @region: The class:instance pair encoding */
>> +    struct drm_i915_gem_memory_class_instance region;
>> +
>> +    /** @rsvd0: MBZ */
>> +    __u32 rsvd0;
>> +
>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>> +    __u64 probed_size;
>> +
>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>> unknown) */
>> +    __u64 unallocated_size;
>> +
>> +    union {
>> +        /** @rsvd1: MBZ */
>> +        __u64 rsvd1[8];
>> +        struct {
>> +            /**
>> +             * @probed_cpu_visible_size: Memory probed by the driver
>> +             * that is CPU accessible. (-1 = unknown).
>> +             *
>> +             * This will be always be <= @probed_size, and the
>> +             * remainder(if there is any) will not be CPU
>> +             * accessible.
>> +             */
>> +            __u64 probed_cpu_visible_size;
> 
> Would unallocated_cpu_visible_size be useful, to follow the total 
> unallocated_size?

Make sense. But I don't think unallocated_size has actually been 
properly wired up yet. It still just gives the same value as 
probed_size. IIRC for unallocated_size we still need a real 
user/usecase/umd, before wiring that up for real with the existing avail 
tracking. Once we have that we can also add unallocated_cpu_visible_size.

> 
> Btw, have we ever considered whether unallocated_size should require 
> CAP_SYS_ADMIN/PERFMON or something?

Note sure. But just in case we do add it for real at some point, why the 
added restriction?

> 
>> +        };
>> +    };
>> +};
>> +
>> +/**
>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>> with added
>> + * extension support using struct i915_user_extension.
>> + *
>> + * Note that new buffer flags should be added here, at least for the 
>> stuff that
>> + * is immutable. Previously we would have two ioctls, one to create 
>> the object
>> + * with gem_create, and another to apply various parameters, however 
>> this
>> + * creates some ambiguity for the params which are considered 
>> immutable. Also in
>> + * general we're phasing out the various SET/GET ioctls.
>> + */
>> +struct __drm_i915_gem_create_ext {
>> +    /**
>> +     * @size: Requested size for the object.
>> +     *
>> +     * The (page-aligned) allocated size for the object will be 
>> returned.
>> +     *
>> +     * Note that for some devices we have might have further minimum
>> +     * page-size restrictions(larger than 4K), likefor device 
>> local-memory.
>> +     * However in general the final size here should always reflect any
>> +     * rounding up, if for example using the 
>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>> +     * extension to place the object in device local-memory.
>> +     */
>> +    __u64 size;
>> +    /**
>> +     * @handle: Returned handle for the object.
>> +     *
>> +     * Object handles are nonzero.
>> +     */
>> +    __u32 handle;
>> +    /**
>> +     * @flags: Optional flags.
>> +     *
>> +     * Supported values:
>> +     *
>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>> kernel that
>> +     * the object will need to be accessed via the CPU.
>> +     *
>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>> +     * only strictly required on platforms where only some of the device
>> +     * memory is directly visible or mappable through the CPU, like 
>> on DG2+.
>> +     *
>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>> +     * ensure we can always spill the allocation tosystem memory, if we
>> +     * can't place the object in the mappable part of
>> +     * I915_MEMORY_CLASS_DEVICE.
>> +     *
>> +     * Note that since the kernel only supports flat-CCS on objects 
>> that can
>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>> +     * flat-CCS.
>> +     *
>> +     * Without this hint, the kernel will assume that non-mappable
>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>> that the
>> +     * kernel can still migrate the object to the mappable part, as a 
>> last
>> +     * resort, if userspace ever CPU faults this object, but this 
>> might be
>> +     * expensive, and so ideally should be avoided.
>> +     */
> 
> So "needs_cpu_access" flag could almost be viewed as a sub-region 
> placement priority? What I mean is this:
> 
> 1)
> placements=device,system flags=
> 
> This results in placement priorities: device, device_cpu_mappable, system.

Yup.

> 
> 2)
> placements=device,system flags=needs_cpu_access
> 
> This results in placement priorities: device_cpu_mappable, device, system.

Here it would only be: device_cpu_mappable, system. We would completely 
ignore "device" in this case.

> 
> Is this correct?
> 
> The benefit of the flag is that i915 can place the object to the right 
> place from the start instead of on the first CPU access? Is that worth 
> it or is there more to it?

Yeah, the object will only be placed somewhere that is also CPU 
mappable, with the flag set.

> 
>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>> +    __u32 flags;
>> +    /**
>> +     * @extensions: The chain of extensions to apply to this object.
>> +     *
>> +     * This will be useful in the future when we need to support several
>> +     * different extensions, and we need to apply more than one when
>> +     * creating the object. See struct i915_user_extension.
>> +     *
>> +     * If we don't supply any extensions then we get the same old 
>> gem_create
>> +     * behaviour.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usagesee
>> +     * struct drm_i915_gem_create_ext_memory_regions.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>> +     * struct drm_i915_gem_create_ext_protected_content.
>> +     */
>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>> +    __u64 extensions;
>> +};
>> +
>> +#define DRM_I915_QUERY_VMA_INFO    5
>> +
>> +/**
>> + * struct __drm_i915_query_vma_info
>> + *
>> + * Given a vm and GTT address, lookup the corresponding vma, 
>> returning its set
>> + * of attributes.
>> + *
>> + * .. code-block:: C
>> + *
>> + *    struct drm_i915_query_vma_info info = {};
>> + *    struct drm_i915_query_item item = {
>> + *        .data_ptr = (uintptr_t)&info,
>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>> + *    };
>> + *    struct drm_i915_query query = {
>> + *        .num_items = 1,
>> + *        .items_ptr = (uintptr_t)&item,
>> + *    };
>> + *    int err;
>> + *
>> + *    // Unlike some other types of queries, there is noneed to 
>> first query
>> + *    // the size of the data_ptr blob here, since we already know 
>> ahead of
>> + *    // time how big this needs to be.
>> + *    item.length = sizeof(info);
>> + *
>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>> + *    // to query, before then firing off the query.
>> + *    info.vm_id = vm_id;
>> + *    info.offset = gtt_address;
>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>> + *    if (err || item.length < 0) ...
>> + *
>> + *    // If all went well we can now inspect the returned attributes.
>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>> + */
>> +struct __drm_i915_query_vma_info {
>> +    /**
>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>> value
>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>> +     * drm_i915_gem_vm_control.vm_id.
>> +     */
>> +    __u32 vm_id;
>> +    /** @pad: MBZ. */
>> +    __u32 pad;
>> +    /**
>> +     * @offset: The corresponding ppGTT address of the vma which the 
>> kernel
>> +     * will use to perform the lookup.
>> +     */
>> +    __u64 offset;
>> +    /**
>> +     * @attributes: The returned attributes for thegiven vma.
>> +     *
>> +     * Possible values:
>> +     *
>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set ifthe pages backing 
>> the
>> +     * vma are currently CPU accessible. If this isnot set then the 
>> vma is
>> +     * currently backed by I915_MEMORY_CLASS_DEVICEmemory, which the 
>> CPU
>> +     * cannot directly access(this is only possibleon discrete 
>> devices with
>> +     * a small BAR). Attempting to MMAP and fault such an object will
>> +     * require the kernel first synchronising any GPU work tied to the
>> +     * object, before then migrating the pages, either to the CPU 
>> accessible
>> +     * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, 
>> if the
>> +     * placements permit it. See 
>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>> +     *
>> +     * Note that this is inherently racy.
>> +     */
>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>> +    __u64 attributes;
>> +    /** @rsvd: MBZ */
>> +    __u32 rsvd[4];
>> +};
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>> b/Documentation/gpu/rfc/i915_small_bar.rst
>> new file mode 100644
>> index 000000000000..be3d9bcdd86d
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>> @@ -0,0 +1,58 @@
>> +==========================
>> +I915 Small BAR RFC Section
>> +==========================
>> +Starting from DG2 we will have resizable BAR support for device 
>> local-memory(i.e
>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might 
>> still be
>> +smaller than the total probed_size. In such cases, only some subset of
>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 
>> 256M),
>> +while the remainder is only accessible via the GPU.
>> +
>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>> +----------------------------------------------
>> +New gem_create_ext flag to tell the kernel that a BO will require CPU 
>> access.
>> +This becomes important when placing an object in 
>> I915_MEMORY_CLASS_DEVICE, where
>> +underneath the device has a small BAR, meaning only some portion of 
>> it is CPU
>> +accessible. Without this flag the kernel will assume that CPU access 
>> is not
>> +required, and prioritize using the non-CPU visible portion of
>> +I915_MEMORY_CLASS_DEVICE.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_gem_create_ext
>> +
>> +probed_cpu_visible_size attribute
>> +---------------------------------
>> +New struct__drm_i915_memory_region attribute which returns the total 
>> size of the
>> +CPU accessible portion, for the particular region. This should only be
>> +applicable for I915_MEMORY_CLASS_DEVICE.
>> +
>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>> with the
>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible 
>> portion,
>> +where the total size of the heap needs to be known.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_memory_region_info
>> +
>> +DRM_I915_QUERY_VMA_INFO query
>> +-----------------------------
>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>> +respective vma, and return its set of attributes. For now we only 
>> support
>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>> +currently placed in memory that is accessible by the CPU. This should 
>> always be
>> +set on devices where the CPU probed_cpu_visible_size of 
>> I915_MEMORY_CLASS_DEVICE
>> +matches the probed_size. If this is not set then CPU faulting the 
>> object will
>> +likely first require migrating the pages.
> 
> I think there should be justification for the new query documented as 
> well. (Why on top of what.)

Yeah, I'm wondering now if we can just drop this part of the uapi, for 
now at least, and focus on landing the new flag stuff first.

> 
> Without it personally I can't immediately understand why the disconnect 
> between the object based and VMA based API. Userspace has to do some 
> intervening operations like either execbuf, or vm bind in the future, to 
> make this query usable after object creation. So question is why 
> wouldn't it know already which placements it allowed and so would i915 
> auto-migrate or not for this particular object. No? Or in other words 
> why this wouldn't be an object based query since the question it is 
> answering is about the object backing store and not the VMA.

Yeah, just using the object handle or so I guess would also work. Thanks 
for the comments.

> 
> Regards,
> 
> Tvrtko
> 
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_query_vma_info
>> +
>> +Error Capture restrictions
>> +--------------------------
>> +With error capture we have two new restrictions:
>> +
>> +    1) Error capture is best effort on small BAR systems; if the 
>> pages are not
>> +    CPU accessible, at the time of capture, then the kernel is free 
>> to skip
>> +    trying to capture them.
>> +
>> +    2) On discrete we now reject error capture on recoverable 
>> contexts. In the
>> +    future the kernel may want to blit during error capture, when for 
>> example
>> +    something is not currently CPU accessible.
>> diff --git a/Documentation/gpu/rfc/index.rst 
>> b/Documentation/gpu/rfc/index.rst
>> index 91e93a705230..5a3bd3924ba6 100644
>> --- a/Documentation/gpu/rfc/index.rst
>> +++ b/Documentation/gpu/rfc/index.rst
>> @@ -23,3 +23,7 @@ host such documentation:
>>   .. toctree::
>>       i915_scheduler.rst
>> +
>> +.. toctree::
>> +
>> +    i915_small_bar.rst

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-27 17:36   ` Matthew Auld
@ 2022-04-28  8:55     ` Tvrtko Ursulin
  2022-04-28 10:25       ` Matthew Auld
  0 siblings, 1 reply; 50+ messages in thread
From: Tvrtko Ursulin @ 2022-04-28  8:55 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Kenneth Graunke, mesa-dev, dri-devel,
	Daniel Vetter


On 27/04/2022 18:36, Matthew Auld wrote:
> On 27/04/2022 09:36, Tvrtko Ursulin wrote:
>>
>> On 20/04/2022 18:13, Matthew Auld wrote:
>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>
>>> v2:
>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>    - Rework error capture interactions, including no longer needing
>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>    - Add probed_cpu_visible_size. (Lionel)
>>>
>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>> Cc: mesa-dev@lists.freedesktop.org
>>> ---
>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>   3 files changed, 252 insertions(+)
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>> new file mode 100644
>>> index 000000000000..7bfd0cf44d35
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>> @@ -0,0 +1,190 @@
>>> +/**
>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>> known to the
>>> + * driver.
>>> + *
>>> + * Note this is using both struct drm_i915_query_item and struct 
>>> drm_i915_query.
>>> + * For this new query we are adding the new query id 
>>> DRM_I915_QUERY_MEMORY_REGIONS
>>> + * at &drm_i915_query_item.query_id.
>>> + */
>>> +struct __drm_i915_memory_region_info {
>>> +    /** @region: The class:instance pair encoding */
>>> +    struct drm_i915_gem_memory_class_instance region;
>>> +
>>> +    /** @rsvd0: MBZ */
>>> +    __u32 rsvd0;
>>> +
>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>> +    __u64 probed_size;
>>> +
>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>> unknown) */
>>> +    __u64 unallocated_size;
>>> +
>>> +    union {
>>> +        /** @rsvd1: MBZ */
>>> +        __u64 rsvd1[8];
>>> +        struct {
>>> +            /**
>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>> +             * that is CPU accessible. (-1 = unknown).
>>> +             *
>>> +             * This will be always be <= @probed_size, and the
>>> +             * remainder(if there is any) will not be CPU
>>> +             * accessible.
>>> +             */
>>> +            __u64 probed_cpu_visible_size;
>>
>> Would unallocated_cpu_visible_size be useful, to follow the total 
>> unallocated_size?
> 
> Make sense. But I don't think unallocated_size has actually been 
> properly wired up yet. It still just gives the same value as 
> probed_size. IIRC for unallocated_size we still need a real 
> user/usecase/umd, before wiring that up for real with the existing avail 
> tracking. Once we have that we can also add unallocated_cpu_visible_size.

So this does nothing at the moment:

  info.unallocated_size = mr->avail;

Right, it is set to "mem->avail = mem->total;" at region init time and I 
indeed can't find it ever getting modified. Okay.

>> Btw, have we ever considered whether unallocated_size should require 
>> CAP_SYS_ADMIN/PERFMON or something?
> 
> Note sure. But just in case we do add it for real at some point, why the 
> added restriction?

To avoid a side channel, albeit perhaps a very weak one. For engine 
utilization we require CAP_SYS_PERFMON, but that is implied by the perf 
core API. It's open for discussion. I guess it may make sense to limit 
it also because it is questionable the field(s) are even useful.

> 
>>
>>> +        };
>>> +    };
>>> +};
>>> +
>>> +/**
>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>>> with added
>>> + * extension support using struct i915_user_extension.
>>> + *
>>> + * Note that new buffer flags should be added here, at least for the 
>>> stuff that
>>> + * is immutable. Previously we would have two ioctls, one to create 
>>> the object
>>> + * with gem_create, and another to apply various parameters, however 
>>> this
>>> + * creates some ambiguity for the params which are considered 
>>> immutable. Also in
>>> + * general we're phasing out the various SET/GET ioctls.
>>> + */
>>> +struct __drm_i915_gem_create_ext {
>>> +    /**
>>> +     * @size: Requested size for the object.
>>> +     *
>>> +     * The (page-aligned) allocated size for the object will be 
>>> returned.
>>> +     *
>>> +     * Note that for some devices we have might have further minimum
>>> +     * page-size restrictions(larger than 4K), likefor device 
>>> local-memory.
>>> +     * However in general the final size here should always reflect any
>>> +     * rounding up, if for example using the 
>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>> +     * extension to place the object in device local-memory.
>>> +     */
>>> +    __u64 size;
>>> +    /**
>>> +     * @handle: Returned handle for the object.
>>> +     *
>>> +     * Object handles are nonzero.
>>> +     */
>>> +    __u32 handle;
>>> +    /**
>>> +     * @flags: Optional flags.
>>> +     *
>>> +     * Supported values:
>>> +     *
>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>> kernel that
>>> +     * the object will need to be accessed via the CPU.
>>> +     *
>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>>> +     * only strictly required on platforms where only some of the 
>>> device
>>> +     * memory is directly visible or mappable through the CPU, like 
>>> on DG2+.
>>> +     *
>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>> +     * ensure we can always spill the allocation tosystem memory, if we
>>> +     * can't place the object in the mappable part of
>>> +     * I915_MEMORY_CLASS_DEVICE.
>>> +     *
>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>> that can
>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>> +     * flat-CCS.
>>> +     *
>>> +     * Without this hint, the kernel will assume that non-mappable
>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>> that the
>>> +     * kernel can still migrate the object to the mappable part, as 
>>> a last
>>> +     * resort, if userspace ever CPU faults this object, but this 
>>> might be
>>> +     * expensive, and so ideally should be avoided.
>>> +     */
>>
>> So "needs_cpu_access" flag could almost be viewed as a sub-region 
>> placement priority? What I mean is this:
>>
>> 1)
>> placements=device,system flags=
>>
>> This results in placement priorities: device, device_cpu_mappable, 
>> system.
> 
> Yup.
> 
>>
>> 2)
>> placements=device,system flags=needs_cpu_access
>>
>> This results in placement priorities: device_cpu_mappable, device, 
>> system.
> 
> Here it would only be: device_cpu_mappable, system. We would completely 
> ignore "device" in this case.
> 
>>
>> Is this correct?
>>
>> The benefit of the flag is that i915 can place the object to the right 
>> place from the start instead of on the first CPU access? Is that worth 
>> it or is there more to it?
> 
> Yeah, the object will only be placed somewhere that is also CPU 
> mappable, with the flag set.

Hm, wouldn't it be more efficient to be able to migrate it over to 
non-mappable in cases when mappable is over-subscribed?

>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>> +    __u32 flags;
>>> +    /**
>>> +     * @extensions: The chain of extensions to apply to this object.
>>> +     *
>>> +     * This will be useful in the future when we need to support 
>>> several
>>> +     * different extensions, and we need to apply more than one when
>>> +     * creating the object. See struct i915_user_extension.
>>> +     *
>>> +     * If we don't supply any extensions then we get the same old 
>>> gem_create
>>> +     * behaviour.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usagesee
>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>> +    __u64 extensions;
>>> +};
>>> +
>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>> +
>>> +/**
>>> + * struct __drm_i915_query_vma_info
>>> + *
>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>> returning its set
>>> + * of attributes.
>>> + *
>>> + * .. code-block:: C
>>> + *
>>> + *    struct drm_i915_query_vma_info info = {};
>>> + *    struct drm_i915_query_item item = {
>>> + *        .data_ptr = (uintptr_t)&info,
>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>> + *    };
>>> + *    struct drm_i915_query query = {
>>> + *        .num_items = 1,
>>> + *        .items_ptr = (uintptr_t)&item,
>>> + *    };
>>> + *    int err;
>>> + *
>>> + *    // Unlike some other types of queries, there is noneed to 
>>> first query
>>> + *    // the size of the data_ptr blob here, since we already know 
>>> ahead of
>>> + *    // time how big this needs to be.
>>> + *    item.length = sizeof(info);
>>> + *
>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>>> + *    // to query, before then firing off the query.
>>> + *    info.vm_id = vm_id;
>>> + *    info.offset = gtt_address;
>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>> + *    if (err || item.length < 0) ...
>>> + *
>>> + *    // If all went well we can now inspect the returned attributes.
>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>> + */
>>> +struct __drm_i915_query_vma_info {
>>> +    /**
>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>> value
>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>> +     * drm_i915_gem_vm_control.vm_id.
>>> +     */
>>> +    __u32 vm_id;
>>> +    /** @pad: MBZ. */
>>> +    __u32 pad;
>>> +    /**
>>> +     * @offset: The corresponding ppGTT address of the vma which the 
>>> kernel
>>> +     * will use to perform the lookup.
>>> +     */
>>> +    __u64 offset;
>>> +    /**
>>> +     * @attributes: The returned attributes for thegiven vma.
>>> +     *
>>> +     * Possible values:
>>> +     *
>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set ifthe pages backing 
>>> the
>>> +     * vma are currently CPU accessible. If this isnot set then the 
>>> vma is
>>> +     * currently backed by I915_MEMORY_CLASS_DEVICEmemory, which the 
>>> CPU
>>> +     * cannot directly access(this is only possibleon discrete 
>>> devices with
>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>> +     * require the kernel first synchronising any GPU work tied to the
>>> +     * object, before then migrating the pages, either to the CPU 
>>> accessible
>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>> I915_MEMORY_CLASS_SYSTEM, if the
>>> +     * placements permit it. See 
>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>> +     *
>>> +     * Note that this is inherently racy.
>>> +     */
>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>> +    __u64 attributes;
>>> +    /** @rsvd: MBZ */
>>> +    __u32 rsvd[4];
>>> +};
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>> new file mode 100644
>>> index 000000000000..be3d9bcdd86d
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>> @@ -0,0 +1,58 @@
>>> +==========================
>>> +I915 Small BAR RFC Section
>>> +==========================
>>> +Starting from DG2 we will have resizable BAR support for device 
>>> local-memory(i.e
>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>> might still be
>>> +smaller than the total probed_size. In such cases, only some subset of
>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>> first 256M),
>>> +while the remainder is only accessible via the GPU.
>>> +
>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>> +----------------------------------------------
>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>> CPU access.
>>> +This becomes important when placing an object in 
>>> I915_MEMORY_CLASS_DEVICE, where
>>> +underneath the device has a small BAR, meaning only some portion of 
>>> it is CPU
>>> +accessible. Without this flag the kernel will assume that CPU access 
>>> is not
>>> +required, and prioritize using the non-CPU visible portion of
>>> +I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_gem_create_ext
>>> +
>>> +probed_cpu_visible_size attribute
>>> +---------------------------------
>>> +New struct__drm_i915_memory_region attribute which returns the total 
>>> size of the
>>> +CPU accessible portion, for the particular region. This should only be
>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>> with the
>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>> visible portion,
>>> +where the total size of the heap needs to be known.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_memory_region_info
>>> +
>>> +DRM_I915_QUERY_VMA_INFO query
>>> +-----------------------------
>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>> +respective vma, and return its set of attributes. For now we only 
>>> support
>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>> +currently placed in memory that is accessible by the CPU. This 
>>> should always be
>>> +set on devices where the CPU probed_cpu_visible_size of 
>>> I915_MEMORY_CLASS_DEVICE
>>> +matches the probed_size. If this is not set then CPU faulting the 
>>> object will
>>> +likely first require migrating the pages.
>>
>> I think there should be justification for the new query documented as 
>> well. (Why on top of what.)
> 
> Yeah, I'm wondering now if we can just drop this part of the uapi, for 
> now at least, and focus on landing the new flag stuff first.
> 
>>
>> Without it personally I can't immediately understand why the 
>> disconnect between the object based and VMA based API. Userspace has 
>> to do some intervening operations like either execbuf, or vm bind in 
>> the future, to make this query usable after object creation. So 
>> question is why wouldn't it know already which placements it allowed 
>> and so would i915 auto-migrate or not for this particular object. No? 
>> Or in other words why this wouldn't be an object based query since the 
>> question it is answering is about the object backing store and not the 
>> VMA.
> 
> Yeah, just using the object handle or so I guess would also work. Thanks 
> for the comments.

I saw other folks have said the same so omitting for now sounds good to 
me indeed.

Regards,

Tvrtko

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-28  8:55     ` Tvrtko Ursulin
@ 2022-04-28 10:25       ` Matthew Auld
  2022-04-28 11:11         ` Tvrtko Ursulin
  0 siblings, 1 reply; 50+ messages in thread
From: Matthew Auld @ 2022-04-28 10:25 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx
  Cc: Thomas Hellström, Kenneth Graunke, mesa-dev, dri-devel,
	Daniel Vetter

On 28/04/2022 09:55, Tvrtko Ursulin wrote:
> 
> On 27/04/2022 18:36, Matthew Auld wrote:
>> On 27/04/2022 09:36, Tvrtko Ursulin wrote:
>>>
>>> On 20/04/2022 18:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>    - Rework error capture interactions, including no longer needing
>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>> +++++++++++++++++++++++
>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>   3 files changed, 252 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>> known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id 
>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +    /** @region: The class:instance pair encoding */
>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +    /** @rsvd0: MBZ */
>>>> +    __u32 rsvd0;
>>>> +
>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +    __u64 probed_size;
>>>> +
>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>> unknown) */
>>>> +    __u64 unallocated_size;
>>>> +
>>>> +    union {
>>>> +        /** @rsvd1: MBZ */
>>>> +        __u64 rsvd1[8];
>>>> +        struct {
>>>> +            /**
>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>> +             * that is CPU accessible. (-1 = unknown).
>>>> +             *
>>>> +             * This will be always be <= @probed_size, and the
>>>> +             * remainder(if there is any) will not be CPU
>>>> +             * accessible.
>>>> +             */
>>>> +            __u64 probed_cpu_visible_size;
>>>
>>> Would unallocated_cpu_visible_size be useful, to follow the total 
>>> unallocated_size?
>>
>> Make sense. But I don't think unallocated_size has actually been 
>> properly wired up yet. It still just gives the same value as 
>> probed_size. IIRC for unallocated_size we still need a real 
>> user/usecase/umd, before wiring that up for real with the existing 
>> avail tracking. Once we have that we can also add 
>> unallocated_cpu_visible_size.
> 
> So this does nothing at the moment:
> 
>   info.unallocated_size = mr->avail;
> 
> Right, it is set to "mem->avail = mem->total;" at region init time and I 
> indeed can't find it ever getting modified. Okay.
> 
>>> Btw, have we ever considered whether unallocated_size should require 
>>> CAP_SYS_ADMIN/PERFMON or something?
>>
>> Note sure. But just in case we do add it for real at some point, why 
>> the added restriction?
> 
> To avoid a side channel, albeit perhaps a very weak one. For engine 
> utilization we require CAP_SYS_PERFMON, but that is implied by the perf 
> core API. It's open for discussion. I guess it may make sense to limit 
> it also because it is questionable the field(s) are even useful.
> 
>>
>>>
>>>> +        };
>>>> +    };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>> behaviour, with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for 
>>>> the stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to create 
>>>> the object
>>>> + * with gem_create, and another to apply various parameters, 
>>>> however this
>>>> + * creates some ambiguity for the params which are considered 
>>>> immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +    /**
>>>> +     * @size: Requested size for the object.
>>>> +     *
>>>> +     * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +     *
>>>> +     * Note that for some devices we have might have further minimum
>>>> +     * page-size restrictions(larger than 4K), likefor device 
>>>> local-memory.
>>>> +     * However in general the final size here should always reflect 
>>>> any
>>>> +     * rounding up, if for example using the 
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +     * extension to place the object in device local-memory.
>>>> +     */
>>>> +    __u64 size;
>>>> +    /**
>>>> +     * @handle: Returned handle for the object.
>>>> +     *
>>>> +     * Object handles are nonzero.
>>>> +     */
>>>> +    __u32 handle;
>>>> +    /**
>>>> +     * @flags: Optional flags.
>>>> +     *
>>>> +     * Supported values:
>>>> +     *
>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>> kernel that
>>>> +     * the object will need to be accessed via the CPU.
>>>> +     *
>>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, 
>>>> and
>>>> +     * only strictly required on platforms where only some of the 
>>>> device
>>>> +     * memory is directly visible or mappable through the CPU, like 
>>>> on DG2+.
>>>> +     *
>>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>>> +     * ensure we can always spill the allocation tosystem memory, 
>>>> if we
>>>> +     * can't place the object in the mappable part of
>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>> +     *
>>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>>> that can
>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>> don't
>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>>> +     * flat-CCS.
>>>> +     *
>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>> that the
>>>> +     * kernel can still migrate the object to the mappable part, as 
>>>> a last
>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +     * expensive, and so ideally should be avoided.
>>>> +     */
>>>
>>> So "needs_cpu_access" flag could almost be viewed as a sub-region 
>>> placement priority? What I mean is this:
>>>
>>> 1)
>>> placements=device,system flags=
>>>
>>> This results in placement priorities: device, device_cpu_mappable, 
>>> system.
>>
>> Yup.
>>
>>>
>>> 2)
>>> placements=device,system flags=needs_cpu_access
>>>
>>> This results in placement priorities: device_cpu_mappable, device, 
>>> system.
>>
>> Here it would only be: device_cpu_mappable, system. We would 
>> completely ignore "device" in this case.
>>
>>>
>>> Is this correct?
>>>
>>> The benefit of the flag is that i915 can place the object to the 
>>> right place from the start instead of on the first CPU access? Is 
>>> that worth it or is there more to it?
>>
>> Yeah, the object will only be placed somewhere that is also CPU 
>> mappable, with the flag set.
> 
> Hm, wouldn't it be more efficient to be able to migrate it over to 
> non-mappable in cases when mappable is over-subscribed?

Not sure. As an alternative strategy, I guess that might be interesting, 
and if userspace wants something like that we can always add a new flag 
I guess? It's a toss up whether just using system memory is better/worse 
than incurring an extra move at fault time?

> 
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +    __u32 flags;
>>>> +    /**
>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>> +     *
>>>> +     * This will be useful in the future when we need to support 
>>>> several
>>>> +     * different extensions, and we need to apply more than one when
>>>> +     * creating the object. See struct i915_user_extension.
>>>> +     *
>>>> +     * If we don't supply any extensions then we get the same old 
>>>> gem_create
>>>> +     * behaviour.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usagesee
>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>> returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + *    struct drm_i915_query_vma_info info = {};
>>>> + *    struct drm_i915_query_item item = {
>>>> + *        .data_ptr = (uintptr_t)&info,
>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + *    };
>>>> + *    struct drm_i915_query query = {
>>>> + *        .num_items = 1,
>>>> + *        .items_ptr = (uintptr_t)&item,
>>>> + *    };
>>>> + *    int err;
>>>> + *
>>>> + *    // Unlike some other types of queries, there is noneed to 
>>>> first query
>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + *    // time how big this needs to be.
>>>> + *    item.length = sizeof(info);
>>>> + *
>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>> wish
>>>> + *    // to query, before then firing off the query.
>>>> + *    info.vm_id = vm_id;
>>>> + *    info.offset = gtt_address;
>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + *    if (err || item.length < 0) ...
>>>> + *
>>>> + *    // If all went well we can now inspect the returned attributes.
>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +    /**
>>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>>> value
>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>> +     */
>>>> +    __u32 vm_id;
>>>> +    /** @pad: MBZ. */
>>>> +    __u32 pad;
>>>> +    /**
>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>> the kernel
>>>> +     * will use to perform the lookup.
>>>> +     */
>>>> +    __u64 offset;
>>>> +    /**
>>>> +     * @attributes: The returned attributes for thegiven vma.
>>>> +     *
>>>> +     * Possible values:
>>>> +     *
>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set ifthe pages 
>>>> backing the
>>>> +     * vma are currently CPU accessible. If this isnot set then the 
>>>> vma is
>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICEmemory, which 
>>>> the CPU
>>>> +     * cannot directly access(this is only possibleon discrete 
>>>> devices with
>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>> +     * require the kernel first synchronising any GPU work tied to the
>>>> +     * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +     * placements permit it. See 
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +     *
>>>> +     * Note that this is inherently racy.
>>>> +     */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +    __u64 attributes;
>>>> +    /** @rsvd: MBZ */
>>>> +    __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>> local-memory(i.e
>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>> might still be
>>>> +smaller than the total probed_size. In such cases, only some subset of
>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>> first 256M),
>>>> +while the remainder is only accessible via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in 
>>>> I915_MEMORY_CLASS_DEVICE, where
>>>> +underneath the device has a small BAR, meaning only some portion of 
>>>> it is CPU
>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>> access is not
>>>> +required, and prioritize using the non-CPU visible portion of
>>>> +I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>> total size of the
>>>> +CPU accessible portion, for the particular region. This should only be
>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>> with the
>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>> visible portion,
>>>> +where the total size of the heap needs to be known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only 
>>>> support
>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>>> +currently placed in memory that is accessible by the CPU. This 
>>>> should always be
>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>> I915_MEMORY_CLASS_DEVICE
>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>> object will
>>>> +likely first require migrating the pages.
>>>
>>> I think there should be justification for the new query documented as 
>>> well. (Why on top of what.)
>>
>> Yeah, I'm wondering now if we can just drop this part of the uapi, for 
>> now at least, and focus on landing the new flag stuff first.
>>
>>>
>>> Without it personally I can't immediately understand why the 
>>> disconnect between the object based and VMA based API. Userspace has 
>>> to do some intervening operations like either execbuf, or vm bind in 
>>> the future, to make this query usable after object creation. So 
>>> question is why wouldn't it know already which placements it allowed 
>>> and so would i915 auto-migrate or not for this particular object. No? 
>>> Or in other words why this wouldn't be an object based query since 
>>> the question it is answering is about the object backing store and 
>>> not the VMA.
>>
>> Yeah, just using the object handle or so I guess would also work. 
>> Thanks for the comments.
> 
> I saw other folks have said the same so omitting for now sounds good to 
> me indeed.
> 
> Regards,
> 
> Tvrtko

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-28 10:25       ` Matthew Auld
@ 2022-04-28 11:11         ` Tvrtko Ursulin
  2022-05-03 14:40           ` Matthew Auld
  0 siblings, 1 reply; 50+ messages in thread
From: Tvrtko Ursulin @ 2022-04-28 11:11 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Kenneth Graunke, mesa-dev, dri-devel,
	Daniel Vetter


On 28/04/2022 11:25, Matthew Auld wrote:
> On 28/04/2022 09:55, Tvrtko Ursulin wrote:
>>
>> On 27/04/2022 18:36, Matthew Auld wrote:
>>> On 27/04/2022 09:36, Tvrtko Ursulin wrote:
>>>>
>>>> On 20/04/2022 18:13, Matthew Auld wrote:
>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>
>>>>> v2:
>>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>    - Rework error capture interactions, including no longer needing
>>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>>
>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>> ---
>>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>>> +++++++++++++++++++++++
>>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>>   3 files changed, 252 insertions(+)
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> new file mode 100644
>>>>> index 000000000000..7bfd0cf44d35
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> @@ -0,0 +1,190 @@
>>>>> +/**
>>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>>> known to the
>>>>> + * driver.
>>>>> + *
>>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>>> drm_i915_query.
>>>>> + * For this new query we are adding the new query id 
>>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>>> + * at &drm_i915_query_item.query_id.
>>>>> + */
>>>>> +struct __drm_i915_memory_region_info {
>>>>> +    /** @region: The class:instance pair encoding */
>>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>>> +
>>>>> +    /** @rsvd0: MBZ */
>>>>> +    __u32 rsvd0;
>>>>> +
>>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>> +    __u64 probed_size;
>>>>> +
>>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>> unknown) */
>>>>> +    __u64 unallocated_size;
>>>>> +
>>>>> +    union {
>>>>> +        /** @rsvd1: MBZ */
>>>>> +        __u64 rsvd1[8];
>>>>> +        struct {
>>>>> +            /**
>>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>>> +             * that is CPU accessible. (-1 = unknown).
>>>>> +             *
>>>>> +             * This will be always be <= @probed_size, and the
>>>>> +             * remainder(if there is any) will not be CPU
>>>>> +             * accessible.
>>>>> +             */
>>>>> +            __u64 probed_cpu_visible_size;
>>>>
>>>> Would unallocated_cpu_visible_size be useful, to follow the total 
>>>> unallocated_size?
>>>
>>> Make sense. But I don't think unallocated_size has actually been 
>>> properly wired up yet. It still just gives the same value as 
>>> probed_size. IIRC for unallocated_size we still need a real 
>>> user/usecase/umd, before wiring that up for real with the existing 
>>> avail tracking. Once we have that we can also add 
>>> unallocated_cpu_visible_size.
>>
>> So this does nothing at the moment:
>>
>>   info.unallocated_size = mr->avail;
>>
>> Right, it is set to "mem->avail = mem->total;" at region init time and 
>> I indeed can't find it ever getting modified. Okay.
>>
>>>> Btw, have we ever considered whether unallocated_size should require 
>>>> CAP_SYS_ADMIN/PERFMON or something?
>>>
>>> Note sure. But just in case we do add it for real at some point, why 
>>> the added restriction?
>>
>> To avoid a side channel, albeit perhaps a very weak one. For engine 
>> utilization we require CAP_SYS_PERFMON, but that is implied by the 
>> perf core API. It's open for discussion. I guess it may make sense to 
>> limit it also because it is questionable the field(s) are even useful.
>>
>>>
>>>>
>>>>> +        };
>>>>> +    };
>>>>> +};
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>>> behaviour, with added
>>>>> + * extension support using struct i915_user_extension.
>>>>> + *
>>>>> + * Note that new buffer flags should be added here, at least for 
>>>>> the stuff that
>>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>>> create the object
>>>>> + * with gem_create, and another to apply various parameters, 
>>>>> however this
>>>>> + * creates some ambiguity for the params which are considered 
>>>>> immutable. Also in
>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>> + */
>>>>> +struct __drm_i915_gem_create_ext {
>>>>> +    /**
>>>>> +     * @size: Requested size for the object.
>>>>> +     *
>>>>> +     * The (page-aligned) allocated size for the object will be 
>>>>> returned.
>>>>> +     *
>>>>> +     * Note that for some devices we have might have further minimum
>>>>> +     * page-size restrictions(larger than 4K), likefor device 
>>>>> local-memory.
>>>>> +     * However in general the final size here should always 
>>>>> reflect any
>>>>> +     * rounding up, if for example using the 
>>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>> +     * extension to place the object in device local-memory.
>>>>> +     */
>>>>> +    __u64 size;
>>>>> +    /**
>>>>> +     * @handle: Returned handle for the object.
>>>>> +     *
>>>>> +     * Object handles are nonzero.
>>>>> +     */
>>>>> +    __u32 handle;
>>>>> +    /**
>>>>> +     * @flags: Optional flags.
>>>>> +     *
>>>>> +     * Supported values:
>>>>> +     *
>>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>>> kernel that
>>>>> +     * the object will need to be accessed via the CPU.
>>>>> +     *
>>>>> +     * Only valid when placing objects in 
>>>>> I915_MEMORY_CLASS_DEVICE, and
>>>>> +     * only strictly required on platforms where only some of the 
>>>>> device
>>>>> +     * memory is directly visible or mappable through the CPU, 
>>>>> like on DG2+.
>>>>> +     *
>>>>> +     * One of the placements MUST also be 
>>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>>> +     * ensure we can always spill the allocation tosystem memory, 
>>>>> if we
>>>>> +     * can't place the object in the mappable part of
>>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>>> +     *
>>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>>> objects that can
>>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>>> don't
>>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>>> with
>>>>> +     * flat-CCS.
>>>>> +     *
>>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>>> that the
>>>>> +     * kernel can still migrate the object to the mappable part, 
>>>>> as a last
>>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>>> might be
>>>>> +     * expensive, and so ideally should be avoided.
>>>>> +     */
>>>>
>>>> So "needs_cpu_access" flag could almost be viewed as a sub-region 
>>>> placement priority? What I mean is this:
>>>>
>>>> 1)
>>>> placements=device,system flags=
>>>>
>>>> This results in placement priorities: device, device_cpu_mappable, 
>>>> system.
>>>
>>> Yup.
>>>
>>>>
>>>> 2)
>>>> placements=device,system flags=needs_cpu_access
>>>>
>>>> This results in placement priorities: device_cpu_mappable, device, 
>>>> system.
>>>
>>> Here it would only be: device_cpu_mappable, system. We would 
>>> completely ignore "device" in this case.
>>>
>>>>
>>>> Is this correct?
>>>>
>>>> The benefit of the flag is that i915 can place the object to the 
>>>> right place from the start instead of on the first CPU access? Is 
>>>> that worth it or is there more to it?
>>>
>>> Yeah, the object will only be placed somewhere that is also CPU 
>>> mappable, with the flag set.
>>
>> Hm, wouldn't it be more efficient to be able to migrate it over to 
>> non-mappable in cases when mappable is over-subscribed?
> 
> Not sure. As an alternative strategy, I guess that might be interesting, 
> and if userspace wants something like that we can always add a new flag 
> I guess? It's a toss up whether just using system memory is better/worse 
> than incurring an extra move at fault time?

For me it doesn't make sense to allow bos wo/ cpu mappable flag to use 
the mappable section (albeit as 2nd priority) and not allowing the cpu 
mappable ones to temporarily go anywhere in lmem.

 From i915 side it needs to know the allowed regions (for this argument 
I speak of mappable / non-mappable as separate regions, even if uapi 
does not expose them as separate memory regions). i915 does not know in 
advance the exact usage pattern.

In case of multiple clients, one might touch a buffer from the CPU once 
and then render with GPU many times. Another client might touch from CPU 
a lot more. With mappable space contention this would cause the buffer 
from the first client to constantly get migrated between smem and lmem, 
while in reality it could have been migrated to non mappable lmem and 
used by the GPU without any problems.

As some sort of diagram:

	Client A BO	Client B BO
	--------------	--------------
	CPU access		
			CPU access (Client A BO "evicted" to smem)
	GPU access	GPU access
	GPU access	CPU access
	GPU access	GPU access

If we assume there is only space for one BO in mappable, allowing 
non-mappable placement allows Client A to be unaffected by Client B 
activity. While with the current proposal it needlessly takes a hit on 
every, or every other GPU access.

So I think, if there isn't a fundamental reason to disallow it which I 
am missing, not limiting the implied placement when cpu access flag is 
given is beneficial to flexibility of migration decisions i915 can make.

Regards,

Tvrtko

> 
>>
>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>> +    __u32 flags;
>>>>> +    /**
>>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>>> +     *
>>>>> +     * This will be useful in the future when we need to support 
>>>>> several
>>>>> +     * different extensions, and we need to apply more than one when
>>>>> +     * creating the object. See struct i915_user_extension.
>>>>> +     *
>>>>> +     * If we don't supply any extensions then we get the same old 
>>>>> gem_create
>>>>> +     * behaviour.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usagesee
>>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>>> +     */
>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>> +    __u64 extensions;
>>>>> +};
>>>>> +
>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_query_vma_info
>>>>> + *
>>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>>> returning its set
>>>>> + * of attributes.
>>>>> + *
>>>>> + * .. code-block:: C
>>>>> + *
>>>>> + *    struct drm_i915_query_vma_info info = {};
>>>>> + *    struct drm_i915_query_item item = {
>>>>> + *        .data_ptr = (uintptr_t)&info,
>>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>> + *    };
>>>>> + *    struct drm_i915_query query = {
>>>>> + *        .num_items = 1,
>>>>> + *        .items_ptr = (uintptr_t)&item,
>>>>> + *    };
>>>>> + *    int err;
>>>>> + *
>>>>> + *    // Unlike some other types of queries, there is noneed to 
>>>>> first query
>>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>>> ahead of
>>>>> + *    // time how big this needs to be.
>>>>> + *    item.length = sizeof(info);
>>>>> + *
>>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>>> wish
>>>>> + *    // to query, before then firing off the query.
>>>>> + *    info.vm_id = vm_id;
>>>>> + *    info.offset = gtt_address;
>>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>> + *    if (err || item.length < 0) ...
>>>>> + *
>>>>> + *    // If all went well we can now inspect the returned attributes.
>>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>>> + */
>>>>> +struct __drm_i915_query_vma_info {
>>>>> +    /**
>>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>>> the value
>>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>>> +     */
>>>>> +    __u32 vm_id;
>>>>> +    /** @pad: MBZ. */
>>>>> +    __u32 pad;
>>>>> +    /**
>>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>>> the kernel
>>>>> +     * will use to perform the lookup.
>>>>> +     */
>>>>> +    __u64 offset;
>>>>> +    /**
>>>>> +     * @attributes: The returned attributes for thegiven vma.
>>>>> +     *
>>>>> +     * Possible values:
>>>>> +     *
>>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set ifthe pages 
>>>>> backing the
>>>>> +     * vma are currently CPU accessible. If this isnot set then 
>>>>> the vma is
>>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICEmemory, which 
>>>>> the CPU
>>>>> +     * cannot directly access(this is only possibleon discrete 
>>>>> devices with
>>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>>> +     * require the kernel first synchronising any GPU work tied to 
>>>>> the
>>>>> +     * object, before then migrating the pages, either to the CPU 
>>>>> accessible
>>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>> +     * placements permit it. See 
>>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>> +     *
>>>>> +     * Note that this is inherently racy.
>>>>> +     */
>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>> +    __u64 attributes;
>>>>> +    /** @rsvd: MBZ */
>>>>> +    __u32 rsvd[4];
>>>>> +};
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> new file mode 100644
>>>>> index 000000000000..be3d9bcdd86d
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> @@ -0,0 +1,58 @@
>>>>> +==========================
>>>>> +I915 Small BAR RFC Section
>>>>> +==========================
>>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>>> local-memory(i.e
>>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>>> might still be
>>>>> +smaller than the total probed_size. In such cases, only some 
>>>>> subset of
>>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>>> first 256M),
>>>>> +while the remainder is only accessible via the GPU.
>>>>> +
>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>> +----------------------------------------------
>>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>>> CPU access.
>>>>> +This becomes important when placing an object in 
>>>>> I915_MEMORY_CLASS_DEVICE, where
>>>>> +underneath the device has a small BAR, meaning only some portion 
>>>>> of it is CPU
>>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>>> access is not
>>>>> +required, and prioritize using the non-CPU visible portion of
>>>>> +I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>> +
>>>>> +probed_cpu_visible_size attribute
>>>>> +---------------------------------
>>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>>> total size of the
>>>>> +CPU accessible portion, for the particular region. This should 
>>>>> only be
>>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>>> with the
>>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>>> visible portion,
>>>>> +where the total size of the heap needs to be known.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_memory_region_info
>>>>> +
>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>> +-----------------------------
>>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>>> +respective vma, and return its set of attributes. For now we only 
>>>>> support
>>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>>> object/vma is
>>>>> +currently placed in memory that is accessible by the CPU. This 
>>>>> should always be
>>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>>> I915_MEMORY_CLASS_DEVICE
>>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>>> object will
>>>>> +likely first require migrating the pages.
>>>>
>>>> I think there should be justification for the new query documented 
>>>> as well. (Why on top of what.)
>>>
>>> Yeah, I'm wondering now if we can just drop this part of the uapi, 
>>> for now at least, and focus on landing the new flag stuff first.
>>>
>>>>
>>>> Without it personally I can't immediately understand why the 
>>>> disconnect between the object based and VMA based API. Userspace has 
>>>> to do some intervening operations like either execbuf, or vm bind in 
>>>> the future, to make this query usable after object creation. So 
>>>> question is why wouldn't it know already which placements it allowed 
>>>> and so would i915 auto-migrate or not for this particular object. 
>>>> No? Or in other words why this wouldn't be an object based query 
>>>> since the question it is answering is about the object backing store 
>>>> and not the VMA.
>>>
>>> Yeah, just using the object handle or so I guess would also work. 
>>> Thanks for the comments.
>>
>> I saw other folks have said the same so omitting for now sounds good 
>> to me indeed.
>>
>> Regards,
>>
>> Tvrtko

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-20 17:13 ` [Intel-gfx] " Matthew Auld
@ 2022-05-02  7:54   ` Lionel Landwerlin
  -1 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-02  7:54 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Jordan Justen, dri-devel, Kenneth Graunke,
	Jon Bloomfield, Daniel Vetter, mesa-dev, Akeem G Abodunrin

On 20/04/2022 20:13, Matthew Auld wrote:
> Add an entry for the new uapi needed for small BAR on DG2+.
>
> v2:
>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>    - Rework error capture interactions, including no longer needing
>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>    - Add probed_cpu_visible_size. (Lionel)
>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Cc: Jordan Justen <jordan.l.justen@intel.com>
> Cc: Kenneth Graunke <kenneth@whitecape.org>
> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
> Cc: mesa-dev@lists.freedesktop.org
> ---
>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>   Documentation/gpu/rfc/index.rst          |   4 +
>   3 files changed, 252 insertions(+)
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>
> diff --git a/Documentation/gpu/rfc/i915_small_bar.h b/Documentation/gpu/rfc/i915_small_bar.h
> new file mode 100644
> index 000000000000..7bfd0cf44d35
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.h
> @@ -0,0 +1,190 @@
> +/**
> + * struct __drm_i915_memory_region_info - Describes one region as known to the
> + * driver.
> + *
> + * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
> + * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
> + * at &drm_i915_query_item.query_id.
> + */
> +struct __drm_i915_memory_region_info {
> +	/** @region: The class:instance pair encoding */
> +	struct drm_i915_gem_memory_class_instance region;
> +
> +	/** @rsvd0: MBZ */
> +	__u32 rsvd0;
> +
> +	/** @probed_size: Memory probed by the driver (-1 = unknown) */
> +	__u64 probed_size;
> +
> +	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
> +	__u64 unallocated_size;
> +
> +	union {
> +		/** @rsvd1: MBZ */
> +		__u64 rsvd1[8];
> +		struct {
> +			/**
> +			 * @probed_cpu_visible_size: Memory probed by the driver
> +			 * that is CPU accessible. (-1 = unknown).
> +			 *
> +			 * This will be always be <= @probed_size, and the
> +			 * remainder(if there is any) will not be CPU
> +			 * accessible.
> +			 */
> +			__u64 probed_cpu_visible_size;
> +		};


Trying to implement userspace support in Vulkan for this, I have an 
additional question about the value of probed_cpu_visible_size.

When is it set to -1?

I'm guessing before there is support for this value it'll be 0 (MBZ).

After after it should either be the entire lmem or something smaller.


-Lionel


> +	};
> +};
> +
> +/**
> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, with added
> + * extension support using struct i915_user_extension.
> + *
> + * Note that new buffer flags should be added here, at least for the stuff that
> + * is immutable. Previously we would have two ioctls, one to create the object
> + * with gem_create, and another to apply various parameters, however this
> + * creates some ambiguity for the params which are considered immutable. Also in
> + * general we're phasing out the various SET/GET ioctls.
> + */
> +struct __drm_i915_gem_create_ext {
> +	/**
> +	 * @size: Requested size for the object.
> +	 *
> +	 * The (page-aligned) allocated size for the object will be returned.
> +	 *
> +	 * Note that for some devices we have might have further minimum
> +	 * page-size restrictions(larger than 4K), like for device local-memory.
> +	 * However in general the final size here should always reflect any
> +	 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
> +	 * extension to place the object in device local-memory.
> +	 */
> +	__u64 size;
> +	/**
> +	 * @handle: Returned handle for the object.
> +	 *
> +	 * Object handles are nonzero.
> +	 */
> +	__u32 handle;
> +	/**
> +	 * @flags: Optional flags.
> +	 *
> +	 * Supported values:
> +	 *
> +	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
> +	 * the object will need to be accessed via the CPU.
> +	 *
> +	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
> +	 * only strictly required on platforms where only some of the device
> +	 * memory is directly visible or mappable through the CPU, like on DG2+.
> +	 *
> +	 * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
> +	 * ensure we can always spill the allocation to system memory, if we
> +	 * can't place the object in the mappable part of
> +	 * I915_MEMORY_CLASS_DEVICE.
> +	 *
> +	 * Note that since the kernel only supports flat-CCS on objects that can
> +	 * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
> +	 * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
> +	 * flat-CCS.
> +	 *
> +	 * Without this hint, the kernel will assume that non-mappable
> +	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
> +	 * kernel can still migrate the object to the mappable part, as a last
> +	 * resort, if userspace ever CPU faults this object, but this might be
> +	 * expensive, and so ideally should be avoided.
> +	 */
> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
> +	__u32 flags;
> +	/**
> +	 * @extensions: The chain of extensions to apply to this object.
> +	 *
> +	 * This will be useful in the future when we need to support several
> +	 * different extensions, and we need to apply more than one when
> +	 * creating the object. See struct i915_user_extension.
> +	 *
> +	 * If we don't supply any extensions then we get the same old gem_create
> +	 * behaviour.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
> +	 * struct drm_i915_gem_create_ext_memory_regions.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> +	 * struct drm_i915_gem_create_ext_protected_content.
> +	 */
> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> +	__u64 extensions;
> +};
> +
> +#define DRM_I915_QUERY_VMA_INFO	5
> +
> +/**
> + * struct __drm_i915_query_vma_info
> + *
> + * Given a vm and GTT address, lookup the corresponding vma, returning its set
> + * of attributes.
> + *
> + * .. code-block:: C
> + *
> + *	struct drm_i915_query_vma_info info = {};
> + *	struct drm_i915_query_item item = {
> + *		.data_ptr = (uintptr_t)&info,
> + *		.query_id = DRM_I915_QUERY_VMA_INFO,
> + *	};
> + *	struct drm_i915_query query = {
> + *		.num_items = 1,
> + *		.items_ptr = (uintptr_t)&item,
> + *	};
> + *	int err;
> + *
> + *	// Unlike some other types of queries, there is no need to first query
> + *	// the size of the data_ptr blob here, since we already know ahead of
> + *	// time how big this needs to be.
> + *	item.length = sizeof(info);
> + *
> + *	// Next we fill in the vm_id and ppGTT address of the vma we wish
> + *	// to query, before then firing off the query.
> + *	info.vm_id = vm_id;
> + *	info.offset = gtt_address;
> + *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
> + *	if (err || item.length < 0) ...
> + *
> + *	// If all went well we can now inspect the returned attributes.
> + *	if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
> + */
> +struct __drm_i915_query_vma_info {
> +	/**
> +	 * @vm_id: The given vm id that contains the vma. The id is the value
> +	 * returned by the DRM_I915_GEM_VM_CREATE. See struct
> +	 * drm_i915_gem_vm_control.vm_id.
> +	 */
> +	__u32 vm_id;
> +	/** @pad: MBZ. */
> +	__u32 pad;
> +	/**
> +	 * @offset: The corresponding ppGTT address of the vma which the kernel
> +	 * will use to perform the lookup.
> +	 */
> +	__u64 offset;
> +	/**
> +	 * @attributes: The returned attributes for the given vma.
> +	 *
> +	 * Possible values:
> +	 *
> +	 * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages backing the
> +	 * vma are currently CPU accessible. If this is not set then the vma is
> +	 * currently backed by I915_MEMORY_CLASS_DEVICE memory, which the CPU
> +	 * cannot directly access(this is only possible on discrete devices with
> +	 * a small BAR). Attempting to MMAP and fault such an object will
> +	 * require the kernel first synchronising any GPU work tied to the
> +	 * object, before then migrating the pages, either to the CPU accessible
> +	 * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, if the
> +	 * placements permit it. See I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
> +	 *
> +	 * Note that this is inherently racy.
> +	 */
> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
> +	__u64 attributes;
> +	/** @rsvd: MBZ */
> +	__u32 rsvd[4];
> +};
> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst b/Documentation/gpu/rfc/i915_small_bar.rst
> new file mode 100644
> index 000000000000..be3d9bcdd86d
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
> @@ -0,0 +1,58 @@
> +==========================
> +I915 Small BAR RFC Section
> +==========================
> +Starting from DG2 we will have resizable BAR support for device local-memory(i.e
> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might still be
> +smaller than the total probed_size. In such cases, only some subset of
> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 256M),
> +while the remainder is only accessible via the GPU.
> +
> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
> +----------------------------------------------
> +New gem_create_ext flag to tell the kernel that a BO will require CPU access.
> +This becomes important when placing an object in I915_MEMORY_CLASS_DEVICE, where
> +underneath the device has a small BAR, meaning only some portion of it is CPU
> +accessible. Without this flag the kernel will assume that CPU access is not
> +required, and prioritize using the non-CPU visible portion of
> +I915_MEMORY_CLASS_DEVICE.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_gem_create_ext
> +
> +probed_cpu_visible_size attribute
> +---------------------------------
> +New struct__drm_i915_memory_region attribute which returns the total size of the
> +CPU accessible portion, for the particular region. This should only be
> +applicable for I915_MEMORY_CLASS_DEVICE.
> +
> +Vulkan will need this as part of creating a separate VkMemoryHeap with the
> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible portion,
> +where the total size of the heap needs to be known.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_memory_region_info
> +
> +DRM_I915_QUERY_VMA_INFO query
> +-----------------------------
> +Query the attributes of some vma. Given a vm and GTT offset, find the
> +respective vma, and return its set of attributes. For now we only support
> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
> +currently placed in memory that is accessible by the CPU. This should always be
> +set on devices where the CPU probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE
> +matches the probed_size. If this is not set then CPU faulting the object will
> +likely first require migrating the pages.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_query_vma_info
> +
> +Error Capture restrictions
> +--------------------------
> +With error capture we have two new restrictions:
> +
> +    1) Error capture is best effort on small BAR systems; if the pages are not
> +    CPU accessible, at the time of capture, then the kernel is free to skip
> +    trying to capture them.
> +
> +    2) On discrete we now reject error capture on recoverable contexts. In the
> +    future the kernel may want to blit during error capture, when for example
> +    something is not currently CPU accessible.
> diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst
> index 91e93a705230..5a3bd3924ba6 100644
> --- a/Documentation/gpu/rfc/index.rst
> +++ b/Documentation/gpu/rfc/index.rst
> @@ -23,3 +23,7 @@ host such documentation:
>   .. toctree::
>   
>       i915_scheduler.rst
> +
> +.. toctree::
> +
> +    i915_small_bar.rst



^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-02  7:54   ` Lionel Landwerlin
  0 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-02  7:54 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 20/04/2022 20:13, Matthew Auld wrote:
> Add an entry for the new uapi needed for small BAR on DG2+.
>
> v2:
>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>    - Rework error capture interactions, including no longer needing
>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>    - Add probed_cpu_visible_size. (Lionel)
>
> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> Cc: Jordan Justen <jordan.l.justen@intel.com>
> Cc: Kenneth Graunke <kenneth@whitecape.org>
> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
> Cc: mesa-dev@lists.freedesktop.org
> ---
>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>   Documentation/gpu/rfc/index.rst          |   4 +
>   3 files changed, 252 insertions(+)
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>
> diff --git a/Documentation/gpu/rfc/i915_small_bar.h b/Documentation/gpu/rfc/i915_small_bar.h
> new file mode 100644
> index 000000000000..7bfd0cf44d35
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.h
> @@ -0,0 +1,190 @@
> +/**
> + * struct __drm_i915_memory_region_info - Describes one region as known to the
> + * driver.
> + *
> + * Note this is using both struct drm_i915_query_item and struct drm_i915_query.
> + * For this new query we are adding the new query id DRM_I915_QUERY_MEMORY_REGIONS
> + * at &drm_i915_query_item.query_id.
> + */
> +struct __drm_i915_memory_region_info {
> +	/** @region: The class:instance pair encoding */
> +	struct drm_i915_gem_memory_class_instance region;
> +
> +	/** @rsvd0: MBZ */
> +	__u32 rsvd0;
> +
> +	/** @probed_size: Memory probed by the driver (-1 = unknown) */
> +	__u64 probed_size;
> +
> +	/** @unallocated_size: Estimate of memory remaining (-1 = unknown) */
> +	__u64 unallocated_size;
> +
> +	union {
> +		/** @rsvd1: MBZ */
> +		__u64 rsvd1[8];
> +		struct {
> +			/**
> +			 * @probed_cpu_visible_size: Memory probed by the driver
> +			 * that is CPU accessible. (-1 = unknown).
> +			 *
> +			 * This will be always be <= @probed_size, and the
> +			 * remainder(if there is any) will not be CPU
> +			 * accessible.
> +			 */
> +			__u64 probed_cpu_visible_size;
> +		};


Trying to implement userspace support in Vulkan for this, I have an 
additional question about the value of probed_cpu_visible_size.

When is it set to -1?

I'm guessing before there is support for this value it'll be 0 (MBZ).

After after it should either be the entire lmem or something smaller.


-Lionel


> +	};
> +};
> +
> +/**
> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, with added
> + * extension support using struct i915_user_extension.
> + *
> + * Note that new buffer flags should be added here, at least for the stuff that
> + * is immutable. Previously we would have two ioctls, one to create the object
> + * with gem_create, and another to apply various parameters, however this
> + * creates some ambiguity for the params which are considered immutable. Also in
> + * general we're phasing out the various SET/GET ioctls.
> + */
> +struct __drm_i915_gem_create_ext {
> +	/**
> +	 * @size: Requested size for the object.
> +	 *
> +	 * The (page-aligned) allocated size for the object will be returned.
> +	 *
> +	 * Note that for some devices we have might have further minimum
> +	 * page-size restrictions(larger than 4K), like for device local-memory.
> +	 * However in general the final size here should always reflect any
> +	 * rounding up, if for example using the I915_GEM_CREATE_EXT_MEMORY_REGIONS
> +	 * extension to place the object in device local-memory.
> +	 */
> +	__u64 size;
> +	/**
> +	 * @handle: Returned handle for the object.
> +	 *
> +	 * Object handles are nonzero.
> +	 */
> +	__u32 handle;
> +	/**
> +	 * @flags: Optional flags.
> +	 *
> +	 * Supported values:
> +	 *
> +	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the kernel that
> +	 * the object will need to be accessed via the CPU.
> +	 *
> +	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
> +	 * only strictly required on platforms where only some of the device
> +	 * memory is directly visible or mappable through the CPU, like on DG2+.
> +	 *
> +	 * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
> +	 * ensure we can always spill the allocation to system memory, if we
> +	 * can't place the object in the mappable part of
> +	 * I915_MEMORY_CLASS_DEVICE.
> +	 *
> +	 * Note that since the kernel only supports flat-CCS on objects that can
> +	 * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
> +	 * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
> +	 * flat-CCS.
> +	 *
> +	 * Without this hint, the kernel will assume that non-mappable
> +	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that the
> +	 * kernel can still migrate the object to the mappable part, as a last
> +	 * resort, if userspace ever CPU faults this object, but this might be
> +	 * expensive, and so ideally should be avoided.
> +	 */
> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
> +	__u32 flags;
> +	/**
> +	 * @extensions: The chain of extensions to apply to this object.
> +	 *
> +	 * This will be useful in the future when we need to support several
> +	 * different extensions, and we need to apply more than one when
> +	 * creating the object. See struct i915_user_extension.
> +	 *
> +	 * If we don't supply any extensions then we get the same old gem_create
> +	 * behaviour.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
> +	 * struct drm_i915_gem_create_ext_memory_regions.
> +	 *
> +	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> +	 * struct drm_i915_gem_create_ext_protected_content.
> +	 */
> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> +	__u64 extensions;
> +};
> +
> +#define DRM_I915_QUERY_VMA_INFO	5
> +
> +/**
> + * struct __drm_i915_query_vma_info
> + *
> + * Given a vm and GTT address, lookup the corresponding vma, returning its set
> + * of attributes.
> + *
> + * .. code-block:: C
> + *
> + *	struct drm_i915_query_vma_info info = {};
> + *	struct drm_i915_query_item item = {
> + *		.data_ptr = (uintptr_t)&info,
> + *		.query_id = DRM_I915_QUERY_VMA_INFO,
> + *	};
> + *	struct drm_i915_query query = {
> + *		.num_items = 1,
> + *		.items_ptr = (uintptr_t)&item,
> + *	};
> + *	int err;
> + *
> + *	// Unlike some other types of queries, there is no need to first query
> + *	// the size of the data_ptr blob here, since we already know ahead of
> + *	// time how big this needs to be.
> + *	item.length = sizeof(info);
> + *
> + *	// Next we fill in the vm_id and ppGTT address of the vma we wish
> + *	// to query, before then firing off the query.
> + *	info.vm_id = vm_id;
> + *	info.offset = gtt_address;
> + *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
> + *	if (err || item.length < 0) ...
> + *
> + *	// If all went well we can now inspect the returned attributes.
> + *	if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
> + */
> +struct __drm_i915_query_vma_info {
> +	/**
> +	 * @vm_id: The given vm id that contains the vma. The id is the value
> +	 * returned by the DRM_I915_GEM_VM_CREATE. See struct
> +	 * drm_i915_gem_vm_control.vm_id.
> +	 */
> +	__u32 vm_id;
> +	/** @pad: MBZ. */
> +	__u32 pad;
> +	/**
> +	 * @offset: The corresponding ppGTT address of the vma which the kernel
> +	 * will use to perform the lookup.
> +	 */
> +	__u64 offset;
> +	/**
> +	 * @attributes: The returned attributes for the given vma.
> +	 *
> +	 * Possible values:
> +	 *
> +	 * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages backing the
> +	 * vma are currently CPU accessible. If this is not set then the vma is
> +	 * currently backed by I915_MEMORY_CLASS_DEVICE memory, which the CPU
> +	 * cannot directly access(this is only possible on discrete devices with
> +	 * a small BAR). Attempting to MMAP and fault such an object will
> +	 * require the kernel first synchronising any GPU work tied to the
> +	 * object, before then migrating the pages, either to the CPU accessible
> +	 * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, if the
> +	 * placements permit it. See I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
> +	 *
> +	 * Note that this is inherently racy.
> +	 */
> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
> +	__u64 attributes;
> +	/** @rsvd: MBZ */
> +	__u32 rsvd[4];
> +};
> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst b/Documentation/gpu/rfc/i915_small_bar.rst
> new file mode 100644
> index 000000000000..be3d9bcdd86d
> --- /dev/null
> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
> @@ -0,0 +1,58 @@
> +==========================
> +I915 Small BAR RFC Section
> +==========================
> +Starting from DG2 we will have resizable BAR support for device local-memory(i.e
> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might still be
> +smaller than the total probed_size. In such cases, only some subset of
> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 256M),
> +while the remainder is only accessible via the GPU.
> +
> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
> +----------------------------------------------
> +New gem_create_ext flag to tell the kernel that a BO will require CPU access.
> +This becomes important when placing an object in I915_MEMORY_CLASS_DEVICE, where
> +underneath the device has a small BAR, meaning only some portion of it is CPU
> +accessible. Without this flag the kernel will assume that CPU access is not
> +required, and prioritize using the non-CPU visible portion of
> +I915_MEMORY_CLASS_DEVICE.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_gem_create_ext
> +
> +probed_cpu_visible_size attribute
> +---------------------------------
> +New struct__drm_i915_memory_region attribute which returns the total size of the
> +CPU accessible portion, for the particular region. This should only be
> +applicable for I915_MEMORY_CLASS_DEVICE.
> +
> +Vulkan will need this as part of creating a separate VkMemoryHeap with the
> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible portion,
> +where the total size of the heap needs to be known.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_memory_region_info
> +
> +DRM_I915_QUERY_VMA_INFO query
> +-----------------------------
> +Query the attributes of some vma. Given a vm and GTT offset, find the
> +respective vma, and return its set of attributes. For now we only support
> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
> +currently placed in memory that is accessible by the CPU. This should always be
> +set on devices where the CPU probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE
> +matches the probed_size. If this is not set then CPU faulting the object will
> +likely first require migrating the pages.
> +
> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> +   :functions: __drm_i915_query_vma_info
> +
> +Error Capture restrictions
> +--------------------------
> +With error capture we have two new restrictions:
> +
> +    1) Error capture is best effort on small BAR systems; if the pages are not
> +    CPU accessible, at the time of capture, then the kernel is free to skip
> +    trying to capture them.
> +
> +    2) On discrete we now reject error capture on recoverable contexts. In the
> +    future the kernel may want to blit during error capture, when for example
> +    something is not currently CPU accessible.
> diff --git a/Documentation/gpu/rfc/index.rst b/Documentation/gpu/rfc/index.rst
> index 91e93a705230..5a3bd3924ba6 100644
> --- a/Documentation/gpu/rfc/index.rst
> +++ b/Documentation/gpu/rfc/index.rst
> @@ -23,3 +23,7 @@ host such documentation:
>   .. toctree::
>   
>       i915_scheduler.rst
> +
> +.. toctree::
> +
> +    i915_small_bar.rst



^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-02  7:54   ` [Intel-gfx] " Lionel Landwerlin
@ 2022-05-02  8:53     ` Lionel Landwerlin
  -1 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-02  8:53 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Jordan Justen, dri-devel, Kenneth Graunke,
	Jon Bloomfield, Daniel Vetter, mesa-dev, Akeem G Abodunrin

On 02/05/2022 10:54, Lionel Landwerlin wrote:
> On 20/04/2022 20:13, Matthew Auld wrote:
>> Add an entry for the new uapi needed for small BAR on DG2+.
>>
>> v2:
>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>    - Rework error capture interactions, including no longer needing
>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>    - Add probed_cpu_visible_size. (Lionel)
>>
>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>> Cc: mesa-dev@lists.freedesktop.org
>> ---
>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>   Documentation/gpu/rfc/index.rst          |   4 +
>>   3 files changed, 252 insertions(+)
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>> b/Documentation/gpu/rfc/i915_small_bar.h
>> new file mode 100644
>> index 000000000000..7bfd0cf44d35
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>> @@ -0,0 +1,190 @@
>> +/**
>> + * struct __drm_i915_memory_region_info - Describes one region as 
>> known to the
>> + * driver.
>> + *
>> + * Note this is using both struct drm_i915_query_item and struct 
>> drm_i915_query.
>> + * For this new query we are adding the new query id 
>> DRM_I915_QUERY_MEMORY_REGIONS
>> + * at &drm_i915_query_item.query_id.
>> + */
>> +struct __drm_i915_memory_region_info {
>> +    /** @region: The class:instance pair encoding */
>> +    struct drm_i915_gem_memory_class_instance region;
>> +
>> +    /** @rsvd0: MBZ */
>> +    __u32 rsvd0;
>> +
>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>> +    __u64 probed_size;
>> +
>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>> unknown) */
>> +    __u64 unallocated_size;
>> +
>> +    union {
>> +        /** @rsvd1: MBZ */
>> +        __u64 rsvd1[8];
>> +        struct {
>> +            /**
>> +             * @probed_cpu_visible_size: Memory probed by the driver
>> +             * that is CPU accessible. (-1 = unknown).
>> +             *
>> +             * This will be always be <= @probed_size, and the
>> +             * remainder(if there is any) will not be CPU
>> +             * accessible.
>> +             */
>> +            __u64 probed_cpu_visible_size;
>> +        };
>
>
> Trying to implement userspace support in Vulkan for this, I have an 
> additional question about the value of probed_cpu_visible_size.
>
> When is it set to -1?
>
> I'm guessing before there is support for this value it'll be 0 (MBZ).
>
> After after it should either be the entire lmem or something smaller.
>
>
> -Lionel


Other pain point of this new uAPI, previously we could query the 
unallocated size for each heap.

Now lmem is effectively divided into 2 heaps, but unallocated_size is 
tracking allocation from both parts of lmem.

Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?


-Lionel


>
>
>> +    };
>> +};
>> +
>> +/**
>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>> with added
>> + * extension support using struct i915_user_extension.
>> + *
>> + * Note that new buffer flags should be added here, at least for the 
>> stuff that
>> + * is immutable. Previously we would have two ioctls, one to create 
>> the object
>> + * with gem_create, and another to apply various parameters, however 
>> this
>> + * creates some ambiguity for the params which are considered 
>> immutable. Also in
>> + * general we're phasing out the various SET/GET ioctls.
>> + */
>> +struct __drm_i915_gem_create_ext {
>> +    /**
>> +     * @size: Requested size for the object.
>> +     *
>> +     * The (page-aligned) allocated size for the object will be 
>> returned.
>> +     *
>> +     * Note that for some devices we have might have further minimum
>> +     * page-size restrictions(larger than 4K), like for device 
>> local-memory.
>> +     * However in general the final size here should always reflect any
>> +     * rounding up, if for example using the 
>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>> +     * extension to place the object in device local-memory.
>> +     */
>> +    __u64 size;
>> +    /**
>> +     * @handle: Returned handle for the object.
>> +     *
>> +     * Object handles are nonzero.
>> +     */
>> +    __u32 handle;
>> +    /**
>> +     * @flags: Optional flags.
>> +     *
>> +     * Supported values:
>> +     *
>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>> kernel that
>> +     * the object will need to be accessed via the CPU.
>> +     *
>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>> +     * only strictly required on platforms where only some of the 
>> device
>> +     * memory is directly visible or mappable through the CPU, like 
>> on DG2+.
>> +     *
>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>> +     * ensure we can always spill the allocation to system memory, 
>> if we
>> +     * can't place the object in the mappable part of
>> +     * I915_MEMORY_CLASS_DEVICE.
>> +     *
>> +     * Note that since the kernel only supports flat-CCS on objects 
>> that can
>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>> +     * flat-CCS.
>> +     *
>> +     * Without this hint, the kernel will assume that non-mappable
>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>> that the
>> +     * kernel can still migrate the object to the mappable part, as 
>> a last
>> +     * resort, if userspace ever CPU faults this object, but this 
>> might be
>> +     * expensive, and so ideally should be avoided.
>> +     */
>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>> +    __u32 flags;
>> +    /**
>> +     * @extensions: The chain of extensions to apply to this object.
>> +     *
>> +     * This will be useful in the future when we need to support 
>> several
>> +     * different extensions, and we need to apply more than one when
>> +     * creating the object. See struct i915_user_extension.
>> +     *
>> +     * If we don't supply any extensions then we get the same old 
>> gem_create
>> +     * behaviour.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>> +     * struct drm_i915_gem_create_ext_memory_regions.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>> +     * struct drm_i915_gem_create_ext_protected_content.
>> +     */
>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>> +    __u64 extensions;
>> +};
>> +
>> +#define DRM_I915_QUERY_VMA_INFO    5
>> +
>> +/**
>> + * struct __drm_i915_query_vma_info
>> + *
>> + * Given a vm and GTT address, lookup the corresponding vma, 
>> returning its set
>> + * of attributes.
>> + *
>> + * .. code-block:: C
>> + *
>> + *    struct drm_i915_query_vma_info info = {};
>> + *    struct drm_i915_query_item item = {
>> + *        .data_ptr = (uintptr_t)&info,
>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>> + *    };
>> + *    struct drm_i915_query query = {
>> + *        .num_items = 1,
>> + *        .items_ptr = (uintptr_t)&item,
>> + *    };
>> + *    int err;
>> + *
>> + *    // Unlike some other types of queries, there is no need to 
>> first query
>> + *    // the size of the data_ptr blob here, since we already know 
>> ahead of
>> + *    // time how big this needs to be.
>> + *    item.length = sizeof(info);
>> + *
>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>> + *    // to query, before then firing off the query.
>> + *    info.vm_id = vm_id;
>> + *    info.offset = gtt_address;
>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>> + *    if (err || item.length < 0) ...
>> + *
>> + *    // If all went well we can now inspect the returned attributes.
>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>> + */
>> +struct __drm_i915_query_vma_info {
>> +    /**
>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>> value
>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>> +     * drm_i915_gem_vm_control.vm_id.
>> +     */
>> +    __u32 vm_id;
>> +    /** @pad: MBZ. */
>> +    __u32 pad;
>> +    /**
>> +     * @offset: The corresponding ppGTT address of the vma which the 
>> kernel
>> +     * will use to perform the lookup.
>> +     */
>> +    __u64 offset;
>> +    /**
>> +     * @attributes: The returned attributes for the given vma.
>> +     *
>> +     * Possible values:
>> +     *
>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>> backing the
>> +     * vma are currently CPU accessible. If this is not set then the 
>> vma is
>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>> the CPU
>> +     * cannot directly access(this is only possible on discrete 
>> devices with
>> +     * a small BAR). Attempting to MMAP and fault such an object will
>> +     * require the kernel first synchronising any GPU work tied to the
>> +     * object, before then migrating the pages, either to the CPU 
>> accessible
>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>> I915_MEMORY_CLASS_SYSTEM, if the
>> +     * placements permit it. See 
>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>> +     *
>> +     * Note that this is inherently racy.
>> +     */
>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>> +    __u64 attributes;
>> +    /** @rsvd: MBZ */
>> +    __u32 rsvd[4];
>> +};
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>> b/Documentation/gpu/rfc/i915_small_bar.rst
>> new file mode 100644
>> index 000000000000..be3d9bcdd86d
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>> @@ -0,0 +1,58 @@
>> +==========================
>> +I915 Small BAR RFC Section
>> +==========================
>> +Starting from DG2 we will have resizable BAR support for device 
>> local-memory(i.e
>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>> might still be
>> +smaller than the total probed_size. In such cases, only some subset of
>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>> first 256M),
>> +while the remainder is only accessible via the GPU.
>> +
>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>> +----------------------------------------------
>> +New gem_create_ext flag to tell the kernel that a BO will require 
>> CPU access.
>> +This becomes important when placing an object in 
>> I915_MEMORY_CLASS_DEVICE, where
>> +underneath the device has a small BAR, meaning only some portion of 
>> it is CPU
>> +accessible. Without this flag the kernel will assume that CPU access 
>> is not
>> +required, and prioritize using the non-CPU visible portion of
>> +I915_MEMORY_CLASS_DEVICE.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_gem_create_ext
>> +
>> +probed_cpu_visible_size attribute
>> +---------------------------------
>> +New struct__drm_i915_memory_region attribute which returns the total 
>> size of the
>> +CPU accessible portion, for the particular region. This should only be
>> +applicable for I915_MEMORY_CLASS_DEVICE.
>> +
>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>> with the
>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>> visible portion,
>> +where the total size of the heap needs to be known.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_memory_region_info
>> +
>> +DRM_I915_QUERY_VMA_INFO query
>> +-----------------------------
>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>> +respective vma, and return its set of attributes. For now we only 
>> support
>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>> +currently placed in memory that is accessible by the CPU. This 
>> should always be
>> +set on devices where the CPU probed_cpu_visible_size of 
>> I915_MEMORY_CLASS_DEVICE
>> +matches the probed_size. If this is not set then CPU faulting the 
>> object will
>> +likely first require migrating the pages.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_query_vma_info
>> +
>> +Error Capture restrictions
>> +--------------------------
>> +With error capture we have two new restrictions:
>> +
>> +    1) Error capture is best effort on small BAR systems; if the 
>> pages are not
>> +    CPU accessible, at the time of capture, then the kernel is free 
>> to skip
>> +    trying to capture them.
>> +
>> +    2) On discrete we now reject error capture on recoverable 
>> contexts. In the
>> +    future the kernel may want to blit during error capture, when 
>> for example
>> +    something is not currently CPU accessible.
>> diff --git a/Documentation/gpu/rfc/index.rst 
>> b/Documentation/gpu/rfc/index.rst
>> index 91e93a705230..5a3bd3924ba6 100644
>> --- a/Documentation/gpu/rfc/index.rst
>> +++ b/Documentation/gpu/rfc/index.rst
>> @@ -23,3 +23,7 @@ host such documentation:
>>   .. toctree::
>>         i915_scheduler.rst
>> +
>> +.. toctree::
>> +
>> +    i915_small_bar.rst
>
>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-02  8:53     ` Lionel Landwerlin
  0 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-02  8:53 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 02/05/2022 10:54, Lionel Landwerlin wrote:
> On 20/04/2022 20:13, Matthew Auld wrote:
>> Add an entry for the new uapi needed for small BAR on DG2+.
>>
>> v2:
>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>    - Rework error capture interactions, including no longer needing
>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>    - Add probed_cpu_visible_size. (Lionel)
>>
>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>> Cc: mesa-dev@lists.freedesktop.org
>> ---
>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>   Documentation/gpu/rfc/index.rst          |   4 +
>>   3 files changed, 252 insertions(+)
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>> b/Documentation/gpu/rfc/i915_small_bar.h
>> new file mode 100644
>> index 000000000000..7bfd0cf44d35
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>> @@ -0,0 +1,190 @@
>> +/**
>> + * struct __drm_i915_memory_region_info - Describes one region as 
>> known to the
>> + * driver.
>> + *
>> + * Note this is using both struct drm_i915_query_item and struct 
>> drm_i915_query.
>> + * For this new query we are adding the new query id 
>> DRM_I915_QUERY_MEMORY_REGIONS
>> + * at &drm_i915_query_item.query_id.
>> + */
>> +struct __drm_i915_memory_region_info {
>> +    /** @region: The class:instance pair encoding */
>> +    struct drm_i915_gem_memory_class_instance region;
>> +
>> +    /** @rsvd0: MBZ */
>> +    __u32 rsvd0;
>> +
>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>> +    __u64 probed_size;
>> +
>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>> unknown) */
>> +    __u64 unallocated_size;
>> +
>> +    union {
>> +        /** @rsvd1: MBZ */
>> +        __u64 rsvd1[8];
>> +        struct {
>> +            /**
>> +             * @probed_cpu_visible_size: Memory probed by the driver
>> +             * that is CPU accessible. (-1 = unknown).
>> +             *
>> +             * This will be always be <= @probed_size, and the
>> +             * remainder(if there is any) will not be CPU
>> +             * accessible.
>> +             */
>> +            __u64 probed_cpu_visible_size;
>> +        };
>
>
> Trying to implement userspace support in Vulkan for this, I have an 
> additional question about the value of probed_cpu_visible_size.
>
> When is it set to -1?
>
> I'm guessing before there is support for this value it'll be 0 (MBZ).
>
> After after it should either be the entire lmem or something smaller.
>
>
> -Lionel


Other pain point of this new uAPI, previously we could query the 
unallocated size for each heap.

Now lmem is effectively divided into 2 heaps, but unallocated_size is 
tracking allocation from both parts of lmem.

Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?


-Lionel


>
>
>> +    };
>> +};
>> +
>> +/**
>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>> with added
>> + * extension support using struct i915_user_extension.
>> + *
>> + * Note that new buffer flags should be added here, at least for the 
>> stuff that
>> + * is immutable. Previously we would have two ioctls, one to create 
>> the object
>> + * with gem_create, and another to apply various parameters, however 
>> this
>> + * creates some ambiguity for the params which are considered 
>> immutable. Also in
>> + * general we're phasing out the various SET/GET ioctls.
>> + */
>> +struct __drm_i915_gem_create_ext {
>> +    /**
>> +     * @size: Requested size for the object.
>> +     *
>> +     * The (page-aligned) allocated size for the object will be 
>> returned.
>> +     *
>> +     * Note that for some devices we have might have further minimum
>> +     * page-size restrictions(larger than 4K), like for device 
>> local-memory.
>> +     * However in general the final size here should always reflect any
>> +     * rounding up, if for example using the 
>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>> +     * extension to place the object in device local-memory.
>> +     */
>> +    __u64 size;
>> +    /**
>> +     * @handle: Returned handle for the object.
>> +     *
>> +     * Object handles are nonzero.
>> +     */
>> +    __u32 handle;
>> +    /**
>> +     * @flags: Optional flags.
>> +     *
>> +     * Supported values:
>> +     *
>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>> kernel that
>> +     * the object will need to be accessed via the CPU.
>> +     *
>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>> +     * only strictly required on platforms where only some of the 
>> device
>> +     * memory is directly visible or mappable through the CPU, like 
>> on DG2+.
>> +     *
>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>> +     * ensure we can always spill the allocation to system memory, 
>> if we
>> +     * can't place the object in the mappable part of
>> +     * I915_MEMORY_CLASS_DEVICE.
>> +     *
>> +     * Note that since the kernel only supports flat-CCS on objects 
>> that can
>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>> +     * flat-CCS.
>> +     *
>> +     * Without this hint, the kernel will assume that non-mappable
>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>> that the
>> +     * kernel can still migrate the object to the mappable part, as 
>> a last
>> +     * resort, if userspace ever CPU faults this object, but this 
>> might be
>> +     * expensive, and so ideally should be avoided.
>> +     */
>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>> +    __u32 flags;
>> +    /**
>> +     * @extensions: The chain of extensions to apply to this object.
>> +     *
>> +     * This will be useful in the future when we need to support 
>> several
>> +     * different extensions, and we need to apply more than one when
>> +     * creating the object. See struct i915_user_extension.
>> +     *
>> +     * If we don't supply any extensions then we get the same old 
>> gem_create
>> +     * behaviour.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>> +     * struct drm_i915_gem_create_ext_memory_regions.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>> +     * struct drm_i915_gem_create_ext_protected_content.
>> +     */
>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>> +    __u64 extensions;
>> +};
>> +
>> +#define DRM_I915_QUERY_VMA_INFO    5
>> +
>> +/**
>> + * struct __drm_i915_query_vma_info
>> + *
>> + * Given a vm and GTT address, lookup the corresponding vma, 
>> returning its set
>> + * of attributes.
>> + *
>> + * .. code-block:: C
>> + *
>> + *    struct drm_i915_query_vma_info info = {};
>> + *    struct drm_i915_query_item item = {
>> + *        .data_ptr = (uintptr_t)&info,
>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>> + *    };
>> + *    struct drm_i915_query query = {
>> + *        .num_items = 1,
>> + *        .items_ptr = (uintptr_t)&item,
>> + *    };
>> + *    int err;
>> + *
>> + *    // Unlike some other types of queries, there is no need to 
>> first query
>> + *    // the size of the data_ptr blob here, since we already know 
>> ahead of
>> + *    // time how big this needs to be.
>> + *    item.length = sizeof(info);
>> + *
>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>> + *    // to query, before then firing off the query.
>> + *    info.vm_id = vm_id;
>> + *    info.offset = gtt_address;
>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>> + *    if (err || item.length < 0) ...
>> + *
>> + *    // If all went well we can now inspect the returned attributes.
>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>> + */
>> +struct __drm_i915_query_vma_info {
>> +    /**
>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>> value
>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>> +     * drm_i915_gem_vm_control.vm_id.
>> +     */
>> +    __u32 vm_id;
>> +    /** @pad: MBZ. */
>> +    __u32 pad;
>> +    /**
>> +     * @offset: The corresponding ppGTT address of the vma which the 
>> kernel
>> +     * will use to perform the lookup.
>> +     */
>> +    __u64 offset;
>> +    /**
>> +     * @attributes: The returned attributes for the given vma.
>> +     *
>> +     * Possible values:
>> +     *
>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>> backing the
>> +     * vma are currently CPU accessible. If this is not set then the 
>> vma is
>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>> the CPU
>> +     * cannot directly access(this is only possible on discrete 
>> devices with
>> +     * a small BAR). Attempting to MMAP and fault such an object will
>> +     * require the kernel first synchronising any GPU work tied to the
>> +     * object, before then migrating the pages, either to the CPU 
>> accessible
>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>> I915_MEMORY_CLASS_SYSTEM, if the
>> +     * placements permit it. See 
>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>> +     *
>> +     * Note that this is inherently racy.
>> +     */
>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>> +    __u64 attributes;
>> +    /** @rsvd: MBZ */
>> +    __u32 rsvd[4];
>> +};
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>> b/Documentation/gpu/rfc/i915_small_bar.rst
>> new file mode 100644
>> index 000000000000..be3d9bcdd86d
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>> @@ -0,0 +1,58 @@
>> +==========================
>> +I915 Small BAR RFC Section
>> +==========================
>> +Starting from DG2 we will have resizable BAR support for device 
>> local-memory(i.e
>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>> might still be
>> +smaller than the total probed_size. In such cases, only some subset of
>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>> first 256M),
>> +while the remainder is only accessible via the GPU.
>> +
>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>> +----------------------------------------------
>> +New gem_create_ext flag to tell the kernel that a BO will require 
>> CPU access.
>> +This becomes important when placing an object in 
>> I915_MEMORY_CLASS_DEVICE, where
>> +underneath the device has a small BAR, meaning only some portion of 
>> it is CPU
>> +accessible. Without this flag the kernel will assume that CPU access 
>> is not
>> +required, and prioritize using the non-CPU visible portion of
>> +I915_MEMORY_CLASS_DEVICE.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_gem_create_ext
>> +
>> +probed_cpu_visible_size attribute
>> +---------------------------------
>> +New struct__drm_i915_memory_region attribute which returns the total 
>> size of the
>> +CPU accessible portion, for the particular region. This should only be
>> +applicable for I915_MEMORY_CLASS_DEVICE.
>> +
>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>> with the
>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>> visible portion,
>> +where the total size of the heap needs to be known.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_memory_region_info
>> +
>> +DRM_I915_QUERY_VMA_INFO query
>> +-----------------------------
>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>> +respective vma, and return its set of attributes. For now we only 
>> support
>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>> +currently placed in memory that is accessible by the CPU. This 
>> should always be
>> +set on devices where the CPU probed_cpu_visible_size of 
>> I915_MEMORY_CLASS_DEVICE
>> +matches the probed_size. If this is not set then CPU faulting the 
>> object will
>> +likely first require migrating the pages.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_query_vma_info
>> +
>> +Error Capture restrictions
>> +--------------------------
>> +With error capture we have two new restrictions:
>> +
>> +    1) Error capture is best effort on small BAR systems; if the 
>> pages are not
>> +    CPU accessible, at the time of capture, then the kernel is free 
>> to skip
>> +    trying to capture them.
>> +
>> +    2) On discrete we now reject error capture on recoverable 
>> contexts. In the
>> +    future the kernel may want to blit during error capture, when 
>> for example
>> +    something is not currently CPU accessible.
>> diff --git a/Documentation/gpu/rfc/index.rst 
>> b/Documentation/gpu/rfc/index.rst
>> index 91e93a705230..5a3bd3924ba6 100644
>> --- a/Documentation/gpu/rfc/index.rst
>> +++ b/Documentation/gpu/rfc/index.rst
>> @@ -23,3 +23,7 @@ host such documentation:
>>   .. toctree::
>>         i915_scheduler.rst
>> +
>> +.. toctree::
>> +
>> +    i915_small_bar.rst
>
>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* RE: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-02  7:54   ` [Intel-gfx] " Lionel Landwerlin
@ 2022-05-02 17:58     ` Abodunrin, Akeem G
  -1 siblings, 0 replies; 50+ messages in thread
From: Abodunrin, Akeem G @ 2022-05-02 17:58 UTC (permalink / raw)
  To: Landwerlin, Lionel G, Auld, Matthew, intel-gfx
  Cc: Thomas Hellström, Justen, Jordan L, dri-devel,
	Kenneth Graunke, Bloomfield, Jon, Daniel Vetter, mesa-dev



> -----Original Message-----
> From: Landwerlin, Lionel G <lionel.g.landwerlin@intel.com>
> Sent: Monday, May 2, 2022 12:55 AM
> To: Auld, Matthew <matthew.auld@intel.com>; intel-gfx@lists.freedesktop.org
> Cc: dri-devel@lists.freedesktop.org; Thomas Hellström
> <thomas.hellstrom@linux.intel.com>; Bloomfield, Jon
> <jon.bloomfield@intel.com>; Daniel Vetter <daniel.vetter@ffwll.ch>; Justen,
> Jordan L <jordan.l.justen@intel.com>; Kenneth Graunke
> <kenneth@whitecape.org>; Abodunrin, Akeem G
> <akeem.g.abodunrin@intel.com>; mesa-dev@lists.freedesktop.org
> Subject: Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
> 
> On 20/04/2022 20:13, Matthew Auld wrote:
> > Add an entry for the new uapi needed for small BAR on DG2+.
> >
> > v2:
> >    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
> >    - Rework error capture interactions, including no longer needing
> >      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
> >    - Add probed_cpu_visible_size. (Lionel)
> >
> > Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> > Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> > Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> > Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > Cc: Jordan Justen <jordan.l.justen@intel.com>
> > Cc: Kenneth Graunke <kenneth@whitecape.org>
> > Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
> > Cc: mesa-dev@lists.freedesktop.org
> > ---
> >   Documentation/gpu/rfc/i915_small_bar.h   | 190
> +++++++++++++++++++++++
> >   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
> >   Documentation/gpu/rfc/index.rst          |   4 +
> >   3 files changed, 252 insertions(+)
> >   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
> >   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
> >
> > diff --git a/Documentation/gpu/rfc/i915_small_bar.h
> > b/Documentation/gpu/rfc/i915_small_bar.h
> > new file mode 100644
> > index 000000000000..7bfd0cf44d35
> > --- /dev/null
> > +++ b/Documentation/gpu/rfc/i915_small_bar.h
> > @@ -0,0 +1,190 @@
> > +/**
> > + * struct __drm_i915_memory_region_info - Describes one region as
> > +known to the
> > + * driver.
> > + *
> > + * Note this is using both struct drm_i915_query_item and struct
> drm_i915_query.
> > + * For this new query we are adding the new query id
> > +DRM_I915_QUERY_MEMORY_REGIONS
> > + * at &drm_i915_query_item.query_id.
> > + */
> > +struct __drm_i915_memory_region_info {
> > +	/** @region: The class:instance pair encoding */
> > +	struct drm_i915_gem_memory_class_instance region;
> > +
> > +	/** @rsvd0: MBZ */
> > +	__u32 rsvd0;
> > +
> > +	/** @probed_size: Memory probed by the driver (-1 = unknown) */
> > +	__u64 probed_size;
> > +
> > +	/** @unallocated_size: Estimate of memory remaining (-1 = unknown)
> */
> > +	__u64 unallocated_size;
> > +
> > +	union {
> > +		/** @rsvd1: MBZ */
> > +		__u64 rsvd1[8];
> > +		struct {
> > +			/**
> > +			 * @probed_cpu_visible_size: Memory probed by the
> driver
> > +			 * that is CPU accessible. (-1 = unknown).
> > +			 *
> > +			 * This will be always be <= @probed_size, and the
> > +			 * remainder(if there is any) will not be CPU
> > +			 * accessible.
> > +			 */
> > +			__u64 probed_cpu_visible_size;
> > +		};
> 
> 
> Trying to implement userspace support in Vulkan for this, I have an additional
> question about the value of probed_cpu_visible_size.
> 
> When is it set to -1?
I believe it is set to -1 if it is unknown, and/or not cpu accessible... 

Cheers!
~Akeem
> 
> I'm guessing before there is support for this value it'll be 0 (MBZ).
> 
> After after it should either be the entire lmem or something smaller.
> 
> 
> -Lionel
> 
> 
> > +	};
> > +};
> > +
> > +/**
> > + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour,
> > +with added
> > + * extension support using struct i915_user_extension.
> > + *
> > + * Note that new buffer flags should be added here, at least for the
> > +stuff that
> > + * is immutable. Previously we would have two ioctls, one to create
> > +the object
> > + * with gem_create, and another to apply various parameters, however
> > +this
> > + * creates some ambiguity for the params which are considered
> > +immutable. Also in
> > + * general we're phasing out the various SET/GET ioctls.
> > + */
> > +struct __drm_i915_gem_create_ext {
> > +	/**
> > +	 * @size: Requested size for the object.
> > +	 *
> > +	 * The (page-aligned) allocated size for the object will be returned.
> > +	 *
> > +	 * Note that for some devices we have might have further minimum
> > +	 * page-size restrictions(larger than 4K), like for device local-memory.
> > +	 * However in general the final size here should always reflect any
> > +	 * rounding up, if for example using the
> I915_GEM_CREATE_EXT_MEMORY_REGIONS
> > +	 * extension to place the object in device local-memory.
> > +	 */
> > +	__u64 size;
> > +	/**
> > +	 * @handle: Returned handle for the object.
> > +	 *
> > +	 * Object handles are nonzero.
> > +	 */
> > +	__u32 handle;
> > +	/**
> > +	 * @flags: Optional flags.
> > +	 *
> > +	 * Supported values:
> > +	 *
> > +	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the
> kernel that
> > +	 * the object will need to be accessed via the CPU.
> > +	 *
> > +	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE,
> and
> > +	 * only strictly required on platforms where only some of the device
> > +	 * memory is directly visible or mappable through the CPU, like on DG2+.
> > +	 *
> > +	 * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM,
> to
> > +	 * ensure we can always spill the allocation to system memory, if we
> > +	 * can't place the object in the mappable part of
> > +	 * I915_MEMORY_CLASS_DEVICE.
> > +	 *
> > +	 * Note that since the kernel only supports flat-CCS on objects that can
> > +	 * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
> don't
> > +	 * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS
> together with
> > +	 * flat-CCS.
> > +	 *
> > +	 * Without this hint, the kernel will assume that non-mappable
> > +	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that
> the
> > +	 * kernel can still migrate the object to the mappable part, as a last
> > +	 * resort, if userspace ever CPU faults this object, but this might be
> > +	 * expensive, and so ideally should be avoided.
> > +	 */
> > +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
> > +	__u32 flags;
> > +	/**
> > +	 * @extensions: The chain of extensions to apply to this object.
> > +	 *
> > +	 * This will be useful in the future when we need to support several
> > +	 * different extensions, and we need to apply more than one when
> > +	 * creating the object. See struct i915_user_extension.
> > +	 *
> > +	 * If we don't supply any extensions then we get the same old
> gem_create
> > +	 * behaviour.
> > +	 *
> > +	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
> > +	 * struct drm_i915_gem_create_ext_memory_regions.
> > +	 *
> > +	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> > +	 * struct drm_i915_gem_create_ext_protected_content.
> > +	 */
> > +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 #define
> > +I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> > +	__u64 extensions;
> > +};
> > +
> > +#define DRM_I915_QUERY_VMA_INFO	5
> > +
> > +/**
> > + * struct __drm_i915_query_vma_info
> > + *
> > + * Given a vm and GTT address, lookup the corresponding vma,
> > +returning its set
> > + * of attributes.
> > + *
> > + * .. code-block:: C
> > + *
> > + *	struct drm_i915_query_vma_info info = {};
> > + *	struct drm_i915_query_item item = {
> > + *		.data_ptr = (uintptr_t)&info,
> > + *		.query_id = DRM_I915_QUERY_VMA_INFO,
> > + *	};
> > + *	struct drm_i915_query query = {
> > + *		.num_items = 1,
> > + *		.items_ptr = (uintptr_t)&item,
> > + *	};
> > + *	int err;
> > + *
> > + *	// Unlike some other types of queries, there is no need to first query
> > + *	// the size of the data_ptr blob here, since we already know ahead of
> > + *	// time how big this needs to be.
> > + *	item.length = sizeof(info);
> > + *
> > + *	// Next we fill in the vm_id and ppGTT address of the vma we wish
> > + *	// to query, before then firing off the query.
> > + *	info.vm_id = vm_id;
> > + *	info.offset = gtt_address;
> > + *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
> > + *	if (err || item.length < 0) ...
> > + *
> > + *	// If all went well we can now inspect the returned attributes.
> > + *	if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
> > + */
> > +struct __drm_i915_query_vma_info {
> > +	/**
> > +	 * @vm_id: The given vm id that contains the vma. The id is the value
> > +	 * returned by the DRM_I915_GEM_VM_CREATE. See struct
> > +	 * drm_i915_gem_vm_control.vm_id.
> > +	 */
> > +	__u32 vm_id;
> > +	/** @pad: MBZ. */
> > +	__u32 pad;
> > +	/**
> > +	 * @offset: The corresponding ppGTT address of the vma which the
> kernel
> > +	 * will use to perform the lookup.
> > +	 */
> > +	__u64 offset;
> > +	/**
> > +	 * @attributes: The returned attributes for the given vma.
> > +	 *
> > +	 * Possible values:
> > +	 *
> > +	 * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
> backing the
> > +	 * vma are currently CPU accessible. If this is not set then the vma is
> > +	 * currently backed by I915_MEMORY_CLASS_DEVICE memory, which
> the CPU
> > +	 * cannot directly access(this is only possible on discrete devices with
> > +	 * a small BAR). Attempting to MMAP and fault such an object will
> > +	 * require the kernel first synchronising any GPU work tied to the
> > +	 * object, before then migrating the pages, either to the CPU accessible
> > +	 * part of I915_MEMORY_CLASS_DEVICE, or
> I915_MEMORY_CLASS_SYSTEM, if the
> > +	 * placements permit it. See
> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
> > +	 *
> > +	 * Note that this is inherently racy.
> > +	 */
> > +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
> > +	__u64 attributes;
> > +	/** @rsvd: MBZ */
> > +	__u32 rsvd[4];
> > +};
> > diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
> > b/Documentation/gpu/rfc/i915_small_bar.rst
> > new file mode 100644
> > index 000000000000..be3d9bcdd86d
> > --- /dev/null
> > +++ b/Documentation/gpu/rfc/i915_small_bar.rst
> > @@ -0,0 +1,58 @@
> > +==========================
> > +I915 Small BAR RFC Section
> > +==========================
> > +Starting from DG2 we will have resizable BAR support for device
> > +local-memory(i.e I915_MEMORY_CLASS_DEVICE), but in some cases the
> > +final BAR size might still be smaller than the total probed_size. In
> > +such cases, only some subset of I915_MEMORY_CLASS_DEVICE will be CPU
> > +accessible(for example the first 256M), while the remainder is only accessible
> via the GPU.
> > +
> > +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
> > +----------------------------------------------
> > +New gem_create_ext flag to tell the kernel that a BO will require CPU access.
> > +This becomes important when placing an object in
> > +I915_MEMORY_CLASS_DEVICE, where underneath the device has a small
> > +BAR, meaning only some portion of it is CPU accessible. Without this
> > +flag the kernel will assume that CPU access is not required, and
> > +prioritize using the non-CPU visible portion of
> I915_MEMORY_CLASS_DEVICE.
> > +
> > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > +   :functions: __drm_i915_gem_create_ext
> > +
> > +probed_cpu_visible_size attribute
> > +---------------------------------
> > +New struct__drm_i915_memory_region attribute which returns the total
> > +size of the CPU accessible portion, for the particular region. This
> > +should only be applicable for I915_MEMORY_CLASS_DEVICE.
> > +
> > +Vulkan will need this as part of creating a separate VkMemoryHeap
> > +with the VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the
> > +CPU visible portion, where the total size of the heap needs to be known.
> > +
> > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > +   :functions: __drm_i915_memory_region_info
> > +
> > +DRM_I915_QUERY_VMA_INFO query
> > +-----------------------------
> > +Query the attributes of some vma. Given a vm and GTT offset, find the
> > +respective vma, and return its set of attributes. For now we only
> > +support DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the
> > +object/vma is currently placed in memory that is accessible by the
> > +CPU. This should always be set on devices where the CPU
> > +probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE matches the
> > +probed_size. If this is not set then CPU faulting the object will likely first
> require migrating the pages.
> > +
> > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > +   :functions: __drm_i915_query_vma_info
> > +
> > +Error Capture restrictions
> > +--------------------------
> > +With error capture we have two new restrictions:
> > +
> > +    1) Error capture is best effort on small BAR systems; if the pages are not
> > +    CPU accessible, at the time of capture, then the kernel is free to skip
> > +    trying to capture them.
> > +
> > +    2) On discrete we now reject error capture on recoverable contexts. In the
> > +    future the kernel may want to blit during error capture, when for example
> > +    something is not currently CPU accessible.
> > diff --git a/Documentation/gpu/rfc/index.rst
> > b/Documentation/gpu/rfc/index.rst index 91e93a705230..5a3bd3924ba6
> > 100644
> > --- a/Documentation/gpu/rfc/index.rst
> > +++ b/Documentation/gpu/rfc/index.rst
> > @@ -23,3 +23,7 @@ host such documentation:
> >   .. toctree::
> >
> >       i915_scheduler.rst
> > +
> > +.. toctree::
> > +
> > +    i915_small_bar.rst
> 


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-02 17:58     ` Abodunrin, Akeem G
  0 siblings, 0 replies; 50+ messages in thread
From: Abodunrin, Akeem G @ 2022-05-02 17:58 UTC (permalink / raw)
  To: Landwerlin, Lionel G, Auld, Matthew, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev



> -----Original Message-----
> From: Landwerlin, Lionel G <lionel.g.landwerlin@intel.com>
> Sent: Monday, May 2, 2022 12:55 AM
> To: Auld, Matthew <matthew.auld@intel.com>; intel-gfx@lists.freedesktop.org
> Cc: dri-devel@lists.freedesktop.org; Thomas Hellström
> <thomas.hellstrom@linux.intel.com>; Bloomfield, Jon
> <jon.bloomfield@intel.com>; Daniel Vetter <daniel.vetter@ffwll.ch>; Justen,
> Jordan L <jordan.l.justen@intel.com>; Kenneth Graunke
> <kenneth@whitecape.org>; Abodunrin, Akeem G
> <akeem.g.abodunrin@intel.com>; mesa-dev@lists.freedesktop.org
> Subject: Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
> 
> On 20/04/2022 20:13, Matthew Auld wrote:
> > Add an entry for the new uapi needed for small BAR on DG2+.
> >
> > v2:
> >    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
> >    - Rework error capture interactions, including no longer needing
> >      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
> >    - Add probed_cpu_visible_size. (Lionel)
> >
> > Signed-off-by: Matthew Auld <matthew.auld@intel.com>
> > Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
> > Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
> > Cc: Jon Bloomfield <jon.bloomfield@intel.com>
> > Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
> > Cc: Jordan Justen <jordan.l.justen@intel.com>
> > Cc: Kenneth Graunke <kenneth@whitecape.org>
> > Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
> > Cc: mesa-dev@lists.freedesktop.org
> > ---
> >   Documentation/gpu/rfc/i915_small_bar.h   | 190
> +++++++++++++++++++++++
> >   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
> >   Documentation/gpu/rfc/index.rst          |   4 +
> >   3 files changed, 252 insertions(+)
> >   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
> >   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
> >
> > diff --git a/Documentation/gpu/rfc/i915_small_bar.h
> > b/Documentation/gpu/rfc/i915_small_bar.h
> > new file mode 100644
> > index 000000000000..7bfd0cf44d35
> > --- /dev/null
> > +++ b/Documentation/gpu/rfc/i915_small_bar.h
> > @@ -0,0 +1,190 @@
> > +/**
> > + * struct __drm_i915_memory_region_info - Describes one region as
> > +known to the
> > + * driver.
> > + *
> > + * Note this is using both struct drm_i915_query_item and struct
> drm_i915_query.
> > + * For this new query we are adding the new query id
> > +DRM_I915_QUERY_MEMORY_REGIONS
> > + * at &drm_i915_query_item.query_id.
> > + */
> > +struct __drm_i915_memory_region_info {
> > +	/** @region: The class:instance pair encoding */
> > +	struct drm_i915_gem_memory_class_instance region;
> > +
> > +	/** @rsvd0: MBZ */
> > +	__u32 rsvd0;
> > +
> > +	/** @probed_size: Memory probed by the driver (-1 = unknown) */
> > +	__u64 probed_size;
> > +
> > +	/** @unallocated_size: Estimate of memory remaining (-1 = unknown)
> */
> > +	__u64 unallocated_size;
> > +
> > +	union {
> > +		/** @rsvd1: MBZ */
> > +		__u64 rsvd1[8];
> > +		struct {
> > +			/**
> > +			 * @probed_cpu_visible_size: Memory probed by the
> driver
> > +			 * that is CPU accessible. (-1 = unknown).
> > +			 *
> > +			 * This will be always be <= @probed_size, and the
> > +			 * remainder(if there is any) will not be CPU
> > +			 * accessible.
> > +			 */
> > +			__u64 probed_cpu_visible_size;
> > +		};
> 
> 
> Trying to implement userspace support in Vulkan for this, I have an additional
> question about the value of probed_cpu_visible_size.
> 
> When is it set to -1?
I believe it is set to -1 if it is unknown, and/or not cpu accessible... 

Cheers!
~Akeem
> 
> I'm guessing before there is support for this value it'll be 0 (MBZ).
> 
> After after it should either be the entire lmem or something smaller.
> 
> 
> -Lionel
> 
> 
> > +	};
> > +};
> > +
> > +/**
> > + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour,
> > +with added
> > + * extension support using struct i915_user_extension.
> > + *
> > + * Note that new buffer flags should be added here, at least for the
> > +stuff that
> > + * is immutable. Previously we would have two ioctls, one to create
> > +the object
> > + * with gem_create, and another to apply various parameters, however
> > +this
> > + * creates some ambiguity for the params which are considered
> > +immutable. Also in
> > + * general we're phasing out the various SET/GET ioctls.
> > + */
> > +struct __drm_i915_gem_create_ext {
> > +	/**
> > +	 * @size: Requested size for the object.
> > +	 *
> > +	 * The (page-aligned) allocated size for the object will be returned.
> > +	 *
> > +	 * Note that for some devices we have might have further minimum
> > +	 * page-size restrictions(larger than 4K), like for device local-memory.
> > +	 * However in general the final size here should always reflect any
> > +	 * rounding up, if for example using the
> I915_GEM_CREATE_EXT_MEMORY_REGIONS
> > +	 * extension to place the object in device local-memory.
> > +	 */
> > +	__u64 size;
> > +	/**
> > +	 * @handle: Returned handle for the object.
> > +	 *
> > +	 * Object handles are nonzero.
> > +	 */
> > +	__u32 handle;
> > +	/**
> > +	 * @flags: Optional flags.
> > +	 *
> > +	 * Supported values:
> > +	 *
> > +	 * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the
> kernel that
> > +	 * the object will need to be accessed via the CPU.
> > +	 *
> > +	 * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE,
> and
> > +	 * only strictly required on platforms where only some of the device
> > +	 * memory is directly visible or mappable through the CPU, like on DG2+.
> > +	 *
> > +	 * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM,
> to
> > +	 * ensure we can always spill the allocation to system memory, if we
> > +	 * can't place the object in the mappable part of
> > +	 * I915_MEMORY_CLASS_DEVICE.
> > +	 *
> > +	 * Note that since the kernel only supports flat-CCS on objects that can
> > +	 * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
> don't
> > +	 * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS
> together with
> > +	 * flat-CCS.
> > +	 *
> > +	 * Without this hint, the kernel will assume that non-mappable
> > +	 * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that
> the
> > +	 * kernel can still migrate the object to the mappable part, as a last
> > +	 * resort, if userspace ever CPU faults this object, but this might be
> > +	 * expensive, and so ideally should be avoided.
> > +	 */
> > +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
> > +	__u32 flags;
> > +	/**
> > +	 * @extensions: The chain of extensions to apply to this object.
> > +	 *
> > +	 * This will be useful in the future when we need to support several
> > +	 * different extensions, and we need to apply more than one when
> > +	 * creating the object. See struct i915_user_extension.
> > +	 *
> > +	 * If we don't supply any extensions then we get the same old
> gem_create
> > +	 * behaviour.
> > +	 *
> > +	 * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
> > +	 * struct drm_i915_gem_create_ext_memory_regions.
> > +	 *
> > +	 * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
> > +	 * struct drm_i915_gem_create_ext_protected_content.
> > +	 */
> > +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 #define
> > +I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
> > +	__u64 extensions;
> > +};
> > +
> > +#define DRM_I915_QUERY_VMA_INFO	5
> > +
> > +/**
> > + * struct __drm_i915_query_vma_info
> > + *
> > + * Given a vm and GTT address, lookup the corresponding vma,
> > +returning its set
> > + * of attributes.
> > + *
> > + * .. code-block:: C
> > + *
> > + *	struct drm_i915_query_vma_info info = {};
> > + *	struct drm_i915_query_item item = {
> > + *		.data_ptr = (uintptr_t)&info,
> > + *		.query_id = DRM_I915_QUERY_VMA_INFO,
> > + *	};
> > + *	struct drm_i915_query query = {
> > + *		.num_items = 1,
> > + *		.items_ptr = (uintptr_t)&item,
> > + *	};
> > + *	int err;
> > + *
> > + *	// Unlike some other types of queries, there is no need to first query
> > + *	// the size of the data_ptr blob here, since we already know ahead of
> > + *	// time how big this needs to be.
> > + *	item.length = sizeof(info);
> > + *
> > + *	// Next we fill in the vm_id and ppGTT address of the vma we wish
> > + *	// to query, before then firing off the query.
> > + *	info.vm_id = vm_id;
> > + *	info.offset = gtt_address;
> > + *	err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
> > + *	if (err || item.length < 0) ...
> > + *
> > + *	// If all went well we can now inspect the returned attributes.
> > + *	if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
> > + */
> > +struct __drm_i915_query_vma_info {
> > +	/**
> > +	 * @vm_id: The given vm id that contains the vma. The id is the value
> > +	 * returned by the DRM_I915_GEM_VM_CREATE. See struct
> > +	 * drm_i915_gem_vm_control.vm_id.
> > +	 */
> > +	__u32 vm_id;
> > +	/** @pad: MBZ. */
> > +	__u32 pad;
> > +	/**
> > +	 * @offset: The corresponding ppGTT address of the vma which the
> kernel
> > +	 * will use to perform the lookup.
> > +	 */
> > +	__u64 offset;
> > +	/**
> > +	 * @attributes: The returned attributes for the given vma.
> > +	 *
> > +	 * Possible values:
> > +	 *
> > +	 * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
> backing the
> > +	 * vma are currently CPU accessible. If this is not set then the vma is
> > +	 * currently backed by I915_MEMORY_CLASS_DEVICE memory, which
> the CPU
> > +	 * cannot directly access(this is only possible on discrete devices with
> > +	 * a small BAR). Attempting to MMAP and fault such an object will
> > +	 * require the kernel first synchronising any GPU work tied to the
> > +	 * object, before then migrating the pages, either to the CPU accessible
> > +	 * part of I915_MEMORY_CLASS_DEVICE, or
> I915_MEMORY_CLASS_SYSTEM, if the
> > +	 * placements permit it. See
> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
> > +	 *
> > +	 * Note that this is inherently racy.
> > +	 */
> > +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
> > +	__u64 attributes;
> > +	/** @rsvd: MBZ */
> > +	__u32 rsvd[4];
> > +};
> > diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
> > b/Documentation/gpu/rfc/i915_small_bar.rst
> > new file mode 100644
> > index 000000000000..be3d9bcdd86d
> > --- /dev/null
> > +++ b/Documentation/gpu/rfc/i915_small_bar.rst
> > @@ -0,0 +1,58 @@
> > +==========================
> > +I915 Small BAR RFC Section
> > +==========================
> > +Starting from DG2 we will have resizable BAR support for device
> > +local-memory(i.e I915_MEMORY_CLASS_DEVICE), but in some cases the
> > +final BAR size might still be smaller than the total probed_size. In
> > +such cases, only some subset of I915_MEMORY_CLASS_DEVICE will be CPU
> > +accessible(for example the first 256M), while the remainder is only accessible
> via the GPU.
> > +
> > +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
> > +----------------------------------------------
> > +New gem_create_ext flag to tell the kernel that a BO will require CPU access.
> > +This becomes important when placing an object in
> > +I915_MEMORY_CLASS_DEVICE, where underneath the device has a small
> > +BAR, meaning only some portion of it is CPU accessible. Without this
> > +flag the kernel will assume that CPU access is not required, and
> > +prioritize using the non-CPU visible portion of
> I915_MEMORY_CLASS_DEVICE.
> > +
> > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > +   :functions: __drm_i915_gem_create_ext
> > +
> > +probed_cpu_visible_size attribute
> > +---------------------------------
> > +New struct__drm_i915_memory_region attribute which returns the total
> > +size of the CPU accessible portion, for the particular region. This
> > +should only be applicable for I915_MEMORY_CLASS_DEVICE.
> > +
> > +Vulkan will need this as part of creating a separate VkMemoryHeap
> > +with the VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the
> > +CPU visible portion, where the total size of the heap needs to be known.
> > +
> > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > +   :functions: __drm_i915_memory_region_info
> > +
> > +DRM_I915_QUERY_VMA_INFO query
> > +-----------------------------
> > +Query the attributes of some vma. Given a vm and GTT offset, find the
> > +respective vma, and return its set of attributes. For now we only
> > +support DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the
> > +object/vma is currently placed in memory that is accessible by the
> > +CPU. This should always be set on devices where the CPU
> > +probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE matches the
> > +probed_size. If this is not set then CPU faulting the object will likely first
> require migrating the pages.
> > +
> > +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
> > +   :functions: __drm_i915_query_vma_info
> > +
> > +Error Capture restrictions
> > +--------------------------
> > +With error capture we have two new restrictions:
> > +
> > +    1) Error capture is best effort on small BAR systems; if the pages are not
> > +    CPU accessible, at the time of capture, then the kernel is free to skip
> > +    trying to capture them.
> > +
> > +    2) On discrete we now reject error capture on recoverable contexts. In the
> > +    future the kernel may want to blit during error capture, when for example
> > +    something is not currently CPU accessible.
> > diff --git a/Documentation/gpu/rfc/index.rst
> > b/Documentation/gpu/rfc/index.rst index 91e93a705230..5a3bd3924ba6
> > 100644
> > --- a/Documentation/gpu/rfc/index.rst
> > +++ b/Documentation/gpu/rfc/index.rst
> > @@ -23,3 +23,7 @@ host such documentation:
> >   .. toctree::
> >
> >       i915_scheduler.rst
> > +
> > +.. toctree::
> > +
> > +    i915_small_bar.rst
> 


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-02 17:58     ` [Intel-gfx] " Abodunrin, Akeem G
@ 2022-05-02 18:03       ` Lionel Landwerlin
  -1 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-02 18:03 UTC (permalink / raw)
  To: Abodunrin, Akeem G, Auld, Matthew, intel-gfx
  Cc: Thomas Hellström, Justen, Jordan L, dri-devel,
	Kenneth Graunke, Bloomfield, Jon, Daniel Vetter, mesa-dev

On 02/05/2022 20:58, Abodunrin, Akeem G wrote:
>
>> -----Original Message-----
>> From: Landwerlin, Lionel G <lionel.g.landwerlin@intel.com>
>> Sent: Monday, May 2, 2022 12:55 AM
>> To: Auld, Matthew <matthew.auld@intel.com>; intel-gfx@lists.freedesktop.org
>> Cc: dri-devel@lists.freedesktop.org; Thomas Hellström
>> <thomas.hellstrom@linux.intel.com>; Bloomfield, Jon
>> <jon.bloomfield@intel.com>; Daniel Vetter <daniel.vetter@ffwll.ch>; Justen,
>> Jordan L <jordan.l.justen@intel.com>; Kenneth Graunke
>> <kenneth@whitecape.org>; Abodunrin, Akeem G
>> <akeem.g.abodunrin@intel.com>; mesa-dev@lists.freedesktop.org
>> Subject: Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
>>
>> On 20/04/2022 20:13, Matthew Auld wrote:
>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>
>>> v2:
>>>     - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>     - Rework error capture interactions, including no longer needing
>>>       NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>     - Add probed_cpu_visible_size. (Lionel)
>>>
>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>> Cc: mesa-dev@lists.freedesktop.org
>>> ---
>>>    Documentation/gpu/rfc/i915_small_bar.h   | 190
>> +++++++++++++++++++++++
>>>    Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>    Documentation/gpu/rfc/index.rst          |   4 +
>>>    3 files changed, 252 insertions(+)
>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h
>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>> new file mode 100644
>>> index 000000000000..7bfd0cf44d35
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>> @@ -0,0 +1,190 @@
>>> +/**
>>> + * struct __drm_i915_memory_region_info - Describes one region as
>>> +known to the
>>> + * driver.
>>> + *
>>> + * Note this is using both struct drm_i915_query_item and struct
>> drm_i915_query.
>>> + * For this new query we are adding the new query id
>>> +DRM_I915_QUERY_MEMORY_REGIONS
>>> + * at &drm_i915_query_item.query_id.
>>> + */
>>> +struct __drm_i915_memory_region_info {
>>> +   /** @region: The class:instance pair encoding */
>>> +   struct drm_i915_gem_memory_class_instance region;
>>> +
>>> +   /** @rsvd0: MBZ */
>>> +   __u32 rsvd0;
>>> +
>>> +   /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>> +   __u64 probed_size;
>>> +
>>> +   /** @unallocated_size: Estimate of memory remaining (-1 = unknown)
>> */
>>> +   __u64 unallocated_size;
>>> +
>>> +   union {
>>> +           /** @rsvd1: MBZ */
>>> +           __u64 rsvd1[8];
>>> +           struct {
>>> +                   /**
>>> +                    * @probed_cpu_visible_size: Memory probed by the
>> driver
>>> +                    * that is CPU accessible. (-1 = unknown).
>>> +                    *
>>> +                    * This will be always be <= @probed_size, and the
>>> +                    * remainder(if there is any) will not be CPU
>>> +                    * accessible.
>>> +                    */
>>> +                   __u64 probed_cpu_visible_size;
>>> +           };
>>
>> Trying to implement userspace support in Vulkan for this, I have an additional
>> question about the value of probed_cpu_visible_size.
>>
>> When is it set to -1?
> I believe it is set to -1 if it is unknown, and/or not cpu accessible...
>
> Cheers!
> ~Akeem


So what should I expect on system memory?

What value is returned when all of probed_size is CPU visible on local 
memory?


Thanks,


-Lionel


>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>
>> After after it should either be the entire lmem or something smaller.
>>
>>
>> -Lionel
>>
>>
>>> +   };
>>> +};
>>> +
>>> +/**
>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour,
>>> +with added
>>> + * extension support using struct i915_user_extension.
>>> + *
>>> + * Note that new buffer flags should be added here, at least for the
>>> +stuff that
>>> + * is immutable. Previously we would have two ioctls, one to create
>>> +the object
>>> + * with gem_create, and another to apply various parameters, however
>>> +this
>>> + * creates some ambiguity for the params which are considered
>>> +immutable. Also in
>>> + * general we're phasing out the various SET/GET ioctls.
>>> + */
>>> +struct __drm_i915_gem_create_ext {
>>> +   /**
>>> +    * @size: Requested size for the object.
>>> +    *
>>> +    * The (page-aligned) allocated size for the object will be returned.
>>> +    *
>>> +    * Note that for some devices we have might have further minimum
>>> +    * page-size restrictions(larger than 4K), like for device local-memory.
>>> +    * However in general the final size here should always reflect any
>>> +    * rounding up, if for example using the
>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>> +    * extension to place the object in device local-memory.
>>> +    */
>>> +   __u64 size;
>>> +   /**
>>> +    * @handle: Returned handle for the object.
>>> +    *
>>> +    * Object handles are nonzero.
>>> +    */
>>> +   __u32 handle;
>>> +   /**
>>> +    * @flags: Optional flags.
>>> +    *
>>> +    * Supported values:
>>> +    *
>>> +    * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the
>> kernel that
>>> +    * the object will need to be accessed via the CPU.
>>> +    *
>>> +    * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE,
>> and
>>> +    * only strictly required on platforms where only some of the device
>>> +    * memory is directly visible or mappable through the CPU, like on DG2+.
>>> +    *
>>> +    * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM,
>> to
>>> +    * ensure we can always spill the allocation to system memory, if we
>>> +    * can't place the object in the mappable part of
>>> +    * I915_MEMORY_CLASS_DEVICE.
>>> +    *
>>> +    * Note that since the kernel only supports flat-CCS on objects that can
>>> +    * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
>> don't
>>> +    * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS
>> together with
>>> +    * flat-CCS.
>>> +    *
>>> +    * Without this hint, the kernel will assume that non-mappable
>>> +    * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that
>> the
>>> +    * kernel can still migrate the object to the mappable part, as a last
>>> +    * resort, if userspace ever CPU faults this object, but this might be
>>> +    * expensive, and so ideally should be avoided.
>>> +    */
>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>> +   __u32 flags;
>>> +   /**
>>> +    * @extensions: The chain of extensions to apply to this object.
>>> +    *
>>> +    * This will be useful in the future when we need to support several
>>> +    * different extensions, and we need to apply more than one when
>>> +    * creating the object. See struct i915_user_extension.
>>> +    *
>>> +    * If we don't supply any extensions then we get the same old
>> gem_create
>>> +    * behaviour.
>>> +    *
>>> +    * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>> +    * struct drm_i915_gem_create_ext_memory_regions.
>>> +    *
>>> +    * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>> +    * struct drm_i915_gem_create_ext_protected_content.
>>> +    */
>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 #define
>>> +I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>> +   __u64 extensions;
>>> +};
>>> +
>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>> +
>>> +/**
>>> + * struct __drm_i915_query_vma_info
>>> + *
>>> + * Given a vm and GTT address, lookup the corresponding vma,
>>> +returning its set
>>> + * of attributes.
>>> + *
>>> + * .. code-block:: C
>>> + *
>>> + * struct drm_i915_query_vma_info info = {};
>>> + * struct drm_i915_query_item item = {
>>> + *         .data_ptr = (uintptr_t)&info,
>>> + *         .query_id = DRM_I915_QUERY_VMA_INFO,
>>> + * };
>>> + * struct drm_i915_query query = {
>>> + *         .num_items = 1,
>>> + *         .items_ptr = (uintptr_t)&item,
>>> + * };
>>> + * int err;
>>> + *
>>> + * // Unlike some other types of queries, there is no need to first query
>>> + * // the size of the data_ptr blob here, since we already know ahead of
>>> + * // time how big this needs to be.
>>> + * item.length = sizeof(info);
>>> + *
>>> + * // Next we fill in the vm_id and ppGTT address of the vma we wish
>>> + * // to query, before then firing off the query.
>>> + * info.vm_id = vm_id;
>>> + * info.offset = gtt_address;
>>> + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>> + * if (err || item.length < 0) ...
>>> + *
>>> + * // If all went well we can now inspect the returned attributes.
>>> + * if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>> + */
>>> +struct __drm_i915_query_vma_info {
>>> +   /**
>>> +    * @vm_id: The given vm id that contains the vma. The id is the value
>>> +    * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>> +    * drm_i915_gem_vm_control.vm_id.
>>> +    */
>>> +   __u32 vm_id;
>>> +   /** @pad: MBZ. */
>>> +   __u32 pad;
>>> +   /**
>>> +    * @offset: The corresponding ppGTT address of the vma which the
>> kernel
>>> +    * will use to perform the lookup.
>>> +    */
>>> +   __u64 offset;
>>> +   /**
>>> +    * @attributes: The returned attributes for the given vma.
>>> +    *
>>> +    * Possible values:
>>> +    *
>>> +    * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
>> backing the
>>> +    * vma are currently CPU accessible. If this is not set then the vma is
>>> +    * currently backed by I915_MEMORY_CLASS_DEVICE memory, which
>> the CPU
>>> +    * cannot directly access(this is only possible on discrete devices with
>>> +    * a small BAR). Attempting to MMAP and fault such an object will
>>> +    * require the kernel first synchronising any GPU work tied to the
>>> +    * object, before then migrating the pages, either to the CPU accessible
>>> +    * part of I915_MEMORY_CLASS_DEVICE, or
>> I915_MEMORY_CLASS_SYSTEM, if the
>>> +    * placements permit it. See
>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>> +    *
>>> +    * Note that this is inherently racy.
>>> +    */
>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>> +   __u64 attributes;
>>> +   /** @rsvd: MBZ */
>>> +   __u32 rsvd[4];
>>> +};
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>> new file mode 100644
>>> index 000000000000..be3d9bcdd86d
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>> @@ -0,0 +1,58 @@
>>> +==========================
>>> +I915 Small BAR RFC Section
>>> +==========================
>>> +Starting from DG2 we will have resizable BAR support for device
>>> +local-memory(i.e I915_MEMORY_CLASS_DEVICE), but in some cases the
>>> +final BAR size might still be smaller than the total probed_size. In
>>> +such cases, only some subset of I915_MEMORY_CLASS_DEVICE will be CPU
>>> +accessible(for example the first 256M), while the remainder is only accessible
>> via the GPU.
>>> +
>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>> +----------------------------------------------
>>> +New gem_create_ext flag to tell the kernel that a BO will require CPU access.
>>> +This becomes important when placing an object in
>>> +I915_MEMORY_CLASS_DEVICE, where underneath the device has a small
>>> +BAR, meaning only some portion of it is CPU accessible. Without this
>>> +flag the kernel will assume that CPU access is not required, and
>>> +prioritize using the non-CPU visible portion of
>> I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_gem_create_ext
>>> +
>>> +probed_cpu_visible_size attribute
>>> +---------------------------------
>>> +New struct__drm_i915_memory_region attribute which returns the total
>>> +size of the CPU accessible portion, for the particular region. This
>>> +should only be applicable for I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +Vulkan will need this as part of creating a separate VkMemoryHeap
>>> +with the VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the
>>> +CPU visible portion, where the total size of the heap needs to be known.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_memory_region_info
>>> +
>>> +DRM_I915_QUERY_VMA_INFO query
>>> +-----------------------------
>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>> +respective vma, and return its set of attributes. For now we only
>>> +support DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the
>>> +object/vma is currently placed in memory that is accessible by the
>>> +CPU. This should always be set on devices where the CPU
>>> +probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE matches the
>>> +probed_size. If this is not set then CPU faulting the object will likely first
>> require migrating the pages.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_query_vma_info
>>> +
>>> +Error Capture restrictions
>>> +--------------------------
>>> +With error capture we have two new restrictions:
>>> +
>>> +    1) Error capture is best effort on small BAR systems; if the pages are not
>>> +    CPU accessible, at the time of capture, then the kernel is free to skip
>>> +    trying to capture them.
>>> +
>>> +    2) On discrete we now reject error capture on recoverable contexts. In the
>>> +    future the kernel may want to blit during error capture, when for example
>>> +    something is not currently CPU accessible.
>>> diff --git a/Documentation/gpu/rfc/index.rst
>>> b/Documentation/gpu/rfc/index.rst index 91e93a705230..5a3bd3924ba6
>>> 100644
>>> --- a/Documentation/gpu/rfc/index.rst
>>> +++ b/Documentation/gpu/rfc/index.rst
>>> @@ -23,3 +23,7 @@ host such documentation:
>>>    .. toctree::
>>>
>>>        i915_scheduler.rst
>>> +
>>> +.. toctree::
>>> +
>>> +    i915_small_bar.rst



^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-02 18:03       ` Lionel Landwerlin
  0 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-02 18:03 UTC (permalink / raw)
  To: Abodunrin, Akeem G, Auld, Matthew, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 02/05/2022 20:58, Abodunrin, Akeem G wrote:
>
>> -----Original Message-----
>> From: Landwerlin, Lionel G <lionel.g.landwerlin@intel.com>
>> Sent: Monday, May 2, 2022 12:55 AM
>> To: Auld, Matthew <matthew.auld@intel.com>; intel-gfx@lists.freedesktop.org
>> Cc: dri-devel@lists.freedesktop.org; Thomas Hellström
>> <thomas.hellstrom@linux.intel.com>; Bloomfield, Jon
>> <jon.bloomfield@intel.com>; Daniel Vetter <daniel.vetter@ffwll.ch>; Justen,
>> Jordan L <jordan.l.justen@intel.com>; Kenneth Graunke
>> <kenneth@whitecape.org>; Abodunrin, Akeem G
>> <akeem.g.abodunrin@intel.com>; mesa-dev@lists.freedesktop.org
>> Subject: Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
>>
>> On 20/04/2022 20:13, Matthew Auld wrote:
>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>
>>> v2:
>>>     - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>     - Rework error capture interactions, including no longer needing
>>>       NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>     - Add probed_cpu_visible_size. (Lionel)
>>>
>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>> Cc: mesa-dev@lists.freedesktop.org
>>> ---
>>>    Documentation/gpu/rfc/i915_small_bar.h   | 190
>> +++++++++++++++++++++++
>>>    Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>    Documentation/gpu/rfc/index.rst          |   4 +
>>>    3 files changed, 252 insertions(+)
>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h
>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>> new file mode 100644
>>> index 000000000000..7bfd0cf44d35
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>> @@ -0,0 +1,190 @@
>>> +/**
>>> + * struct __drm_i915_memory_region_info - Describes one region as
>>> +known to the
>>> + * driver.
>>> + *
>>> + * Note this is using both struct drm_i915_query_item and struct
>> drm_i915_query.
>>> + * For this new query we are adding the new query id
>>> +DRM_I915_QUERY_MEMORY_REGIONS
>>> + * at &drm_i915_query_item.query_id.
>>> + */
>>> +struct __drm_i915_memory_region_info {
>>> +   /** @region: The class:instance pair encoding */
>>> +   struct drm_i915_gem_memory_class_instance region;
>>> +
>>> +   /** @rsvd0: MBZ */
>>> +   __u32 rsvd0;
>>> +
>>> +   /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>> +   __u64 probed_size;
>>> +
>>> +   /** @unallocated_size: Estimate of memory remaining (-1 = unknown)
>> */
>>> +   __u64 unallocated_size;
>>> +
>>> +   union {
>>> +           /** @rsvd1: MBZ */
>>> +           __u64 rsvd1[8];
>>> +           struct {
>>> +                   /**
>>> +                    * @probed_cpu_visible_size: Memory probed by the
>> driver
>>> +                    * that is CPU accessible. (-1 = unknown).
>>> +                    *
>>> +                    * This will be always be <= @probed_size, and the
>>> +                    * remainder(if there is any) will not be CPU
>>> +                    * accessible.
>>> +                    */
>>> +                   __u64 probed_cpu_visible_size;
>>> +           };
>>
>> Trying to implement userspace support in Vulkan for this, I have an additional
>> question about the value of probed_cpu_visible_size.
>>
>> When is it set to -1?
> I believe it is set to -1 if it is unknown, and/or not cpu accessible...
>
> Cheers!
> ~Akeem


So what should I expect on system memory?

What value is returned when all of probed_size is CPU visible on local 
memory?


Thanks,


-Lionel


>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>
>> After after it should either be the entire lmem or something smaller.
>>
>>
>> -Lionel
>>
>>
>>> +   };
>>> +};
>>> +
>>> +/**
>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour,
>>> +with added
>>> + * extension support using struct i915_user_extension.
>>> + *
>>> + * Note that new buffer flags should be added here, at least for the
>>> +stuff that
>>> + * is immutable. Previously we would have two ioctls, one to create
>>> +the object
>>> + * with gem_create, and another to apply various parameters, however
>>> +this
>>> + * creates some ambiguity for the params which are considered
>>> +immutable. Also in
>>> + * general we're phasing out the various SET/GET ioctls.
>>> + */
>>> +struct __drm_i915_gem_create_ext {
>>> +   /**
>>> +    * @size: Requested size for the object.
>>> +    *
>>> +    * The (page-aligned) allocated size for the object will be returned.
>>> +    *
>>> +    * Note that for some devices we have might have further minimum
>>> +    * page-size restrictions(larger than 4K), like for device local-memory.
>>> +    * However in general the final size here should always reflect any
>>> +    * rounding up, if for example using the
>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>> +    * extension to place the object in device local-memory.
>>> +    */
>>> +   __u64 size;
>>> +   /**
>>> +    * @handle: Returned handle for the object.
>>> +    *
>>> +    * Object handles are nonzero.
>>> +    */
>>> +   __u32 handle;
>>> +   /**
>>> +    * @flags: Optional flags.
>>> +    *
>>> +    * Supported values:
>>> +    *
>>> +    * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the
>> kernel that
>>> +    * the object will need to be accessed via the CPU.
>>> +    *
>>> +    * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE,
>> and
>>> +    * only strictly required on platforms where only some of the device
>>> +    * memory is directly visible or mappable through the CPU, like on DG2+.
>>> +    *
>>> +    * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM,
>> to
>>> +    * ensure we can always spill the allocation to system memory, if we
>>> +    * can't place the object in the mappable part of
>>> +    * I915_MEMORY_CLASS_DEVICE.
>>> +    *
>>> +    * Note that since the kernel only supports flat-CCS on objects that can
>>> +    * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
>> don't
>>> +    * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS
>> together with
>>> +    * flat-CCS.
>>> +    *
>>> +    * Without this hint, the kernel will assume that non-mappable
>>> +    * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that
>> the
>>> +    * kernel can still migrate the object to the mappable part, as a last
>>> +    * resort, if userspace ever CPU faults this object, but this might be
>>> +    * expensive, and so ideally should be avoided.
>>> +    */
>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>> +   __u32 flags;
>>> +   /**
>>> +    * @extensions: The chain of extensions to apply to this object.
>>> +    *
>>> +    * This will be useful in the future when we need to support several
>>> +    * different extensions, and we need to apply more than one when
>>> +    * creating the object. See struct i915_user_extension.
>>> +    *
>>> +    * If we don't supply any extensions then we get the same old
>> gem_create
>>> +    * behaviour.
>>> +    *
>>> +    * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>> +    * struct drm_i915_gem_create_ext_memory_regions.
>>> +    *
>>> +    * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>> +    * struct drm_i915_gem_create_ext_protected_content.
>>> +    */
>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 #define
>>> +I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>> +   __u64 extensions;
>>> +};
>>> +
>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>> +
>>> +/**
>>> + * struct __drm_i915_query_vma_info
>>> + *
>>> + * Given a vm and GTT address, lookup the corresponding vma,
>>> +returning its set
>>> + * of attributes.
>>> + *
>>> + * .. code-block:: C
>>> + *
>>> + * struct drm_i915_query_vma_info info = {};
>>> + * struct drm_i915_query_item item = {
>>> + *         .data_ptr = (uintptr_t)&info,
>>> + *         .query_id = DRM_I915_QUERY_VMA_INFO,
>>> + * };
>>> + * struct drm_i915_query query = {
>>> + *         .num_items = 1,
>>> + *         .items_ptr = (uintptr_t)&item,
>>> + * };
>>> + * int err;
>>> + *
>>> + * // Unlike some other types of queries, there is no need to first query
>>> + * // the size of the data_ptr blob here, since we already know ahead of
>>> + * // time how big this needs to be.
>>> + * item.length = sizeof(info);
>>> + *
>>> + * // Next we fill in the vm_id and ppGTT address of the vma we wish
>>> + * // to query, before then firing off the query.
>>> + * info.vm_id = vm_id;
>>> + * info.offset = gtt_address;
>>> + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>> + * if (err || item.length < 0) ...
>>> + *
>>> + * // If all went well we can now inspect the returned attributes.
>>> + * if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>> + */
>>> +struct __drm_i915_query_vma_info {
>>> +   /**
>>> +    * @vm_id: The given vm id that contains the vma. The id is the value
>>> +    * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>> +    * drm_i915_gem_vm_control.vm_id.
>>> +    */
>>> +   __u32 vm_id;
>>> +   /** @pad: MBZ. */
>>> +   __u32 pad;
>>> +   /**
>>> +    * @offset: The corresponding ppGTT address of the vma which the
>> kernel
>>> +    * will use to perform the lookup.
>>> +    */
>>> +   __u64 offset;
>>> +   /**
>>> +    * @attributes: The returned attributes for the given vma.
>>> +    *
>>> +    * Possible values:
>>> +    *
>>> +    * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
>> backing the
>>> +    * vma are currently CPU accessible. If this is not set then the vma is
>>> +    * currently backed by I915_MEMORY_CLASS_DEVICE memory, which
>> the CPU
>>> +    * cannot directly access(this is only possible on discrete devices with
>>> +    * a small BAR). Attempting to MMAP and fault such an object will
>>> +    * require the kernel first synchronising any GPU work tied to the
>>> +    * object, before then migrating the pages, either to the CPU accessible
>>> +    * part of I915_MEMORY_CLASS_DEVICE, or
>> I915_MEMORY_CLASS_SYSTEM, if the
>>> +    * placements permit it. See
>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>> +    *
>>> +    * Note that this is inherently racy.
>>> +    */
>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>> +   __u64 attributes;
>>> +   /** @rsvd: MBZ */
>>> +   __u32 rsvd[4];
>>> +};
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>> new file mode 100644
>>> index 000000000000..be3d9bcdd86d
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>> @@ -0,0 +1,58 @@
>>> +==========================
>>> +I915 Small BAR RFC Section
>>> +==========================
>>> +Starting from DG2 we will have resizable BAR support for device
>>> +local-memory(i.e I915_MEMORY_CLASS_DEVICE), but in some cases the
>>> +final BAR size might still be smaller than the total probed_size. In
>>> +such cases, only some subset of I915_MEMORY_CLASS_DEVICE will be CPU
>>> +accessible(for example the first 256M), while the remainder is only accessible
>> via the GPU.
>>> +
>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>> +----------------------------------------------
>>> +New gem_create_ext flag to tell the kernel that a BO will require CPU access.
>>> +This becomes important when placing an object in
>>> +I915_MEMORY_CLASS_DEVICE, where underneath the device has a small
>>> +BAR, meaning only some portion of it is CPU accessible. Without this
>>> +flag the kernel will assume that CPU access is not required, and
>>> +prioritize using the non-CPU visible portion of
>> I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_gem_create_ext
>>> +
>>> +probed_cpu_visible_size attribute
>>> +---------------------------------
>>> +New struct__drm_i915_memory_region attribute which returns the total
>>> +size of the CPU accessible portion, for the particular region. This
>>> +should only be applicable for I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +Vulkan will need this as part of creating a separate VkMemoryHeap
>>> +with the VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the
>>> +CPU visible portion, where the total size of the heap needs to be known.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_memory_region_info
>>> +
>>> +DRM_I915_QUERY_VMA_INFO query
>>> +-----------------------------
>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>> +respective vma, and return its set of attributes. For now we only
>>> +support DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the
>>> +object/vma is currently placed in memory that is accessible by the
>>> +CPU. This should always be set on devices where the CPU
>>> +probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE matches the
>>> +probed_size. If this is not set then CPU faulting the object will likely first
>> require migrating the pages.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_query_vma_info
>>> +
>>> +Error Capture restrictions
>>> +--------------------------
>>> +With error capture we have two new restrictions:
>>> +
>>> +    1) Error capture is best effort on small BAR systems; if the pages are not
>>> +    CPU accessible, at the time of capture, then the kernel is free to skip
>>> +    trying to capture them.
>>> +
>>> +    2) On discrete we now reject error capture on recoverable contexts. In the
>>> +    future the kernel may want to blit during error capture, when for example
>>> +    something is not currently CPU accessible.
>>> diff --git a/Documentation/gpu/rfc/index.rst
>>> b/Documentation/gpu/rfc/index.rst index 91e93a705230..5a3bd3924ba6
>>> 100644
>>> --- a/Documentation/gpu/rfc/index.rst
>>> +++ b/Documentation/gpu/rfc/index.rst
>>> @@ -23,3 +23,7 @@ host such documentation:
>>>    .. toctree::
>>>
>>>        i915_scheduler.rst
>>> +
>>> +.. toctree::
>>> +
>>> +    i915_small_bar.rst



^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-02  7:54   ` [Intel-gfx] " Lionel Landwerlin
@ 2022-05-03  9:01     ` Matthew Auld
  -1 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-05-03  9:01 UTC (permalink / raw)
  To: Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, Jordan Justen, dri-devel, Kenneth Graunke,
	Jon Bloomfield, Daniel Vetter, mesa-dev, Akeem G Abodunrin

On 02/05/2022 08:54, Lionel Landwerlin wrote:
> On 20/04/2022 20:13, Matthew Auld wrote:
>> Add an entry for the new uapi needed for small BAR on DG2+.
>>
>> v2:
>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>    - Rework error capture interactions, including no longer needing
>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>    - Add probed_cpu_visible_size. (Lionel)
>>
>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>> Cc: mesa-dev@lists.freedesktop.org
>> ---
>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>   Documentation/gpu/rfc/index.rst          |   4 +
>>   3 files changed, 252 insertions(+)
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>> b/Documentation/gpu/rfc/i915_small_bar.h
>> new file mode 100644
>> index 000000000000..7bfd0cf44d35
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>> @@ -0,0 +1,190 @@
>> +/**
>> + * struct __drm_i915_memory_region_info - Describes one region as 
>> known to the
>> + * driver.
>> + *
>> + * Note this is using both struct drm_i915_query_item and struct 
>> drm_i915_query.
>> + * For this new query we are adding the new query id 
>> DRM_I915_QUERY_MEMORY_REGIONS
>> + * at &drm_i915_query_item.query_id.
>> + */
>> +struct __drm_i915_memory_region_info {
>> +    /** @region: The class:instance pair encoding */
>> +    struct drm_i915_gem_memory_class_instance region;
>> +
>> +    /** @rsvd0: MBZ */
>> +    __u32 rsvd0;
>> +
>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>> +    __u64 probed_size;
>> +
>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>> unknown) */
>> +    __u64 unallocated_size;
>> +
>> +    union {
>> +        /** @rsvd1: MBZ */
>> +        __u64 rsvd1[8];
>> +        struct {
>> +            /**
>> +             * @probed_cpu_visible_size: Memory probed by the driver
>> +             * that is CPU accessible. (-1 = unknown).
>> +             *
>> +             * This will be always be <= @probed_size, and the
>> +             * remainder(if there is any) will not be CPU
>> +             * accessible.
>> +             */
>> +            __u64 probed_cpu_visible_size;
>> +        };
> 
> 
> Trying to implement userspace support in Vulkan for this, I have an 
> additional question about the value of probed_cpu_visible_size.
> 
> When is it set to -1?

I don't anything is currently using -1, for any of these fields.

> 
> I'm guessing before there is support for this value it'll be 0 (MBZ).
> 
> After after it should either be the entire lmem or something smaller.

Yup.

> 
> 
> -Lionel
> 
> 
>> +    };
>> +};
>> +
>> +/**
>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>> with added
>> + * extension support using struct i915_user_extension.
>> + *
>> + * Note that new buffer flags should be added here, at least for the 
>> stuff that
>> + * is immutable. Previously we would have two ioctls, one to create 
>> the object
>> + * with gem_create, and another to apply various parameters, however 
>> this
>> + * creates some ambiguity for the params which are considered 
>> immutable. Also in
>> + * general we're phasing out the various SET/GET ioctls.
>> + */
>> +struct __drm_i915_gem_create_ext {
>> +    /**
>> +     * @size: Requested size for the object.
>> +     *
>> +     * The (page-aligned) allocated size for the object will be 
>> returned.
>> +     *
>> +     * Note that for some devices we have might have further minimum
>> +     * page-size restrictions(larger than 4K), like for device 
>> local-memory.
>> +     * However in general the final size here should always reflect any
>> +     * rounding up, if for example using the 
>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>> +     * extension to place the object in device local-memory.
>> +     */
>> +    __u64 size;
>> +    /**
>> +     * @handle: Returned handle for the object.
>> +     *
>> +     * Object handles are nonzero.
>> +     */
>> +    __u32 handle;
>> +    /**
>> +     * @flags: Optional flags.
>> +     *
>> +     * Supported values:
>> +     *
>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>> kernel that
>> +     * the object will need to be accessed via the CPU.
>> +     *
>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>> +     * only strictly required on platforms where only some of the device
>> +     * memory is directly visible or mappable through the CPU, like 
>> on DG2+.
>> +     *
>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>> +     * ensure we can always spill the allocation to system memory, if we
>> +     * can't place the object in the mappable part of
>> +     * I915_MEMORY_CLASS_DEVICE.
>> +     *
>> +     * Note that since the kernel only supports flat-CCS on objects 
>> that can
>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>> +     * flat-CCS.
>> +     *
>> +     * Without this hint, the kernel will assume that non-mappable
>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>> that the
>> +     * kernel can still migrate the object to the mappable part, as a 
>> last
>> +     * resort, if userspace ever CPU faults this object, but this 
>> might be
>> +     * expensive, and so ideally should be avoided.
>> +     */
>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>> +    __u32 flags;
>> +    /**
>> +     * @extensions: The chain of extensions to apply to this object.
>> +     *
>> +     * This will be useful in the future when we need to support several
>> +     * different extensions, and we need to apply more than one when
>> +     * creating the object. See struct i915_user_extension.
>> +     *
>> +     * If we don't supply any extensions then we get the same old 
>> gem_create
>> +     * behaviour.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>> +     * struct drm_i915_gem_create_ext_memory_regions.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>> +     * struct drm_i915_gem_create_ext_protected_content.
>> +     */
>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>> +    __u64 extensions;
>> +};
>> +
>> +#define DRM_I915_QUERY_VMA_INFO    5
>> +
>> +/**
>> + * struct __drm_i915_query_vma_info
>> + *
>> + * Given a vm and GTT address, lookup the corresponding vma, 
>> returning its set
>> + * of attributes.
>> + *
>> + * .. code-block:: C
>> + *
>> + *    struct drm_i915_query_vma_info info = {};
>> + *    struct drm_i915_query_item item = {
>> + *        .data_ptr = (uintptr_t)&info,
>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>> + *    };
>> + *    struct drm_i915_query query = {
>> + *        .num_items = 1,
>> + *        .items_ptr = (uintptr_t)&item,
>> + *    };
>> + *    int err;
>> + *
>> + *    // Unlike some other types of queries, there is no need to 
>> first query
>> + *    // the size of the data_ptr blob here, since we already know 
>> ahead of
>> + *    // time how big this needs to be.
>> + *    item.length = sizeof(info);
>> + *
>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>> + *    // to query, before then firing off the query.
>> + *    info.vm_id = vm_id;
>> + *    info.offset = gtt_address;
>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>> + *    if (err || item.length < 0) ...
>> + *
>> + *    // If all went well we can now inspect the returned attributes.
>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>> + */
>> +struct __drm_i915_query_vma_info {
>> +    /**
>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>> value
>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>> +     * drm_i915_gem_vm_control.vm_id.
>> +     */
>> +    __u32 vm_id;
>> +    /** @pad: MBZ. */
>> +    __u32 pad;
>> +    /**
>> +     * @offset: The corresponding ppGTT address of the vma which the 
>> kernel
>> +     * will use to perform the lookup.
>> +     */
>> +    __u64 offset;
>> +    /**
>> +     * @attributes: The returned attributes for the given vma.
>> +     *
>> +     * Possible values:
>> +     *
>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages backing 
>> the
>> +     * vma are currently CPU accessible. If this is not set then the 
>> vma is
>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which the 
>> CPU
>> +     * cannot directly access(this is only possible on discrete 
>> devices with
>> +     * a small BAR). Attempting to MMAP and fault such an object will
>> +     * require the kernel first synchronising any GPU work tied to the
>> +     * object, before then migrating the pages, either to the CPU 
>> accessible
>> +     * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, 
>> if the
>> +     * placements permit it. See 
>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>> +     *
>> +     * Note that this is inherently racy.
>> +     */
>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>> +    __u64 attributes;
>> +    /** @rsvd: MBZ */
>> +    __u32 rsvd[4];
>> +};
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>> b/Documentation/gpu/rfc/i915_small_bar.rst
>> new file mode 100644
>> index 000000000000..be3d9bcdd86d
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>> @@ -0,0 +1,58 @@
>> +==========================
>> +I915 Small BAR RFC Section
>> +==========================
>> +Starting from DG2 we will have resizable BAR support for device 
>> local-memory(i.e
>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might 
>> still be
>> +smaller than the total probed_size. In such cases, only some subset of
>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 
>> 256M),
>> +while the remainder is only accessible via the GPU.
>> +
>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>> +----------------------------------------------
>> +New gem_create_ext flag to tell the kernel that a BO will require CPU 
>> access.
>> +This becomes important when placing an object in 
>> I915_MEMORY_CLASS_DEVICE, where
>> +underneath the device has a small BAR, meaning only some portion of 
>> it is CPU
>> +accessible. Without this flag the kernel will assume that CPU access 
>> is not
>> +required, and prioritize using the non-CPU visible portion of
>> +I915_MEMORY_CLASS_DEVICE.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_gem_create_ext
>> +
>> +probed_cpu_visible_size attribute
>> +---------------------------------
>> +New struct__drm_i915_memory_region attribute which returns the total 
>> size of the
>> +CPU accessible portion, for the particular region. This should only be
>> +applicable for I915_MEMORY_CLASS_DEVICE.
>> +
>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>> with the
>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible 
>> portion,
>> +where the total size of the heap needs to be known.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_memory_region_info
>> +
>> +DRM_I915_QUERY_VMA_INFO query
>> +-----------------------------
>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>> +respective vma, and return its set of attributes. For now we only 
>> support
>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>> +currently placed in memory that is accessible by the CPU. This should 
>> always be
>> +set on devices where the CPU probed_cpu_visible_size of 
>> I915_MEMORY_CLASS_DEVICE
>> +matches the probed_size. If this is not set then CPU faulting the 
>> object will
>> +likely first require migrating the pages.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_query_vma_info
>> +
>> +Error Capture restrictions
>> +--------------------------
>> +With error capture we have two new restrictions:
>> +
>> +    1) Error capture is best effort on small BAR systems; if the 
>> pages are not
>> +    CPU accessible, at the time of capture, then the kernel is free 
>> to skip
>> +    trying to capture them.
>> +
>> +    2) On discrete we now reject error capture on recoverable 
>> contexts. In the
>> +    future the kernel may want to blit during error capture, when for 
>> example
>> +    something is not currently CPU accessible.
>> diff --git a/Documentation/gpu/rfc/index.rst 
>> b/Documentation/gpu/rfc/index.rst
>> index 91e93a705230..5a3bd3924ba6 100644
>> --- a/Documentation/gpu/rfc/index.rst
>> +++ b/Documentation/gpu/rfc/index.rst
>> @@ -23,3 +23,7 @@ host such documentation:
>>   .. toctree::
>>       i915_scheduler.rst
>> +
>> +.. toctree::
>> +
>> +    i915_small_bar.rst
> 
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-03  9:01     ` Matthew Auld
  0 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-05-03  9:01 UTC (permalink / raw)
  To: Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 02/05/2022 08:54, Lionel Landwerlin wrote:
> On 20/04/2022 20:13, Matthew Auld wrote:
>> Add an entry for the new uapi needed for small BAR on DG2+.
>>
>> v2:
>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>    - Rework error capture interactions, including no longer needing
>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>    - Add probed_cpu_visible_size. (Lionel)
>>
>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>> Cc: mesa-dev@lists.freedesktop.org
>> ---
>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>   Documentation/gpu/rfc/index.rst          |   4 +
>>   3 files changed, 252 insertions(+)
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>> b/Documentation/gpu/rfc/i915_small_bar.h
>> new file mode 100644
>> index 000000000000..7bfd0cf44d35
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>> @@ -0,0 +1,190 @@
>> +/**
>> + * struct __drm_i915_memory_region_info - Describes one region as 
>> known to the
>> + * driver.
>> + *
>> + * Note this is using both struct drm_i915_query_item and struct 
>> drm_i915_query.
>> + * For this new query we are adding the new query id 
>> DRM_I915_QUERY_MEMORY_REGIONS
>> + * at &drm_i915_query_item.query_id.
>> + */
>> +struct __drm_i915_memory_region_info {
>> +    /** @region: The class:instance pair encoding */
>> +    struct drm_i915_gem_memory_class_instance region;
>> +
>> +    /** @rsvd0: MBZ */
>> +    __u32 rsvd0;
>> +
>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>> +    __u64 probed_size;
>> +
>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>> unknown) */
>> +    __u64 unallocated_size;
>> +
>> +    union {
>> +        /** @rsvd1: MBZ */
>> +        __u64 rsvd1[8];
>> +        struct {
>> +            /**
>> +             * @probed_cpu_visible_size: Memory probed by the driver
>> +             * that is CPU accessible. (-1 = unknown).
>> +             *
>> +             * This will be always be <= @probed_size, and the
>> +             * remainder(if there is any) will not be CPU
>> +             * accessible.
>> +             */
>> +            __u64 probed_cpu_visible_size;
>> +        };
> 
> 
> Trying to implement userspace support in Vulkan for this, I have an 
> additional question about the value of probed_cpu_visible_size.
> 
> When is it set to -1?

I don't anything is currently using -1, for any of these fields.

> 
> I'm guessing before there is support for this value it'll be 0 (MBZ).
> 
> After after it should either be the entire lmem or something smaller.

Yup.

> 
> 
> -Lionel
> 
> 
>> +    };
>> +};
>> +
>> +/**
>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>> with added
>> + * extension support using struct i915_user_extension.
>> + *
>> + * Note that new buffer flags should be added here, at least for the 
>> stuff that
>> + * is immutable. Previously we would have two ioctls, one to create 
>> the object
>> + * with gem_create, and another to apply various parameters, however 
>> this
>> + * creates some ambiguity for the params which are considered 
>> immutable. Also in
>> + * general we're phasing out the various SET/GET ioctls.
>> + */
>> +struct __drm_i915_gem_create_ext {
>> +    /**
>> +     * @size: Requested size for the object.
>> +     *
>> +     * The (page-aligned) allocated size for the object will be 
>> returned.
>> +     *
>> +     * Note that for some devices we have might have further minimum
>> +     * page-size restrictions(larger than 4K), like for device 
>> local-memory.
>> +     * However in general the final size here should always reflect any
>> +     * rounding up, if for example using the 
>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>> +     * extension to place the object in device local-memory.
>> +     */
>> +    __u64 size;
>> +    /**
>> +     * @handle: Returned handle for the object.
>> +     *
>> +     * Object handles are nonzero.
>> +     */
>> +    __u32 handle;
>> +    /**
>> +     * @flags: Optional flags.
>> +     *
>> +     * Supported values:
>> +     *
>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>> kernel that
>> +     * the object will need to be accessed via the CPU.
>> +     *
>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>> +     * only strictly required on platforms where only some of the device
>> +     * memory is directly visible or mappable through the CPU, like 
>> on DG2+.
>> +     *
>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>> +     * ensure we can always spill the allocation to system memory, if we
>> +     * can't place the object in the mappable part of
>> +     * I915_MEMORY_CLASS_DEVICE.
>> +     *
>> +     * Note that since the kernel only supports flat-CCS on objects 
>> that can
>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>> +     * flat-CCS.
>> +     *
>> +     * Without this hint, the kernel will assume that non-mappable
>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>> that the
>> +     * kernel can still migrate the object to the mappable part, as a 
>> last
>> +     * resort, if userspace ever CPU faults this object, but this 
>> might be
>> +     * expensive, and so ideally should be avoided.
>> +     */
>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>> +    __u32 flags;
>> +    /**
>> +     * @extensions: The chain of extensions to apply to this object.
>> +     *
>> +     * This will be useful in the future when we need to support several
>> +     * different extensions, and we need to apply more than one when
>> +     * creating the object. See struct i915_user_extension.
>> +     *
>> +     * If we don't supply any extensions then we get the same old 
>> gem_create
>> +     * behaviour.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>> +     * struct drm_i915_gem_create_ext_memory_regions.
>> +     *
>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>> +     * struct drm_i915_gem_create_ext_protected_content.
>> +     */
>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>> +    __u64 extensions;
>> +};
>> +
>> +#define DRM_I915_QUERY_VMA_INFO    5
>> +
>> +/**
>> + * struct __drm_i915_query_vma_info
>> + *
>> + * Given a vm and GTT address, lookup the corresponding vma, 
>> returning its set
>> + * of attributes.
>> + *
>> + * .. code-block:: C
>> + *
>> + *    struct drm_i915_query_vma_info info = {};
>> + *    struct drm_i915_query_item item = {
>> + *        .data_ptr = (uintptr_t)&info,
>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>> + *    };
>> + *    struct drm_i915_query query = {
>> + *        .num_items = 1,
>> + *        .items_ptr = (uintptr_t)&item,
>> + *    };
>> + *    int err;
>> + *
>> + *    // Unlike some other types of queries, there is no need to 
>> first query
>> + *    // the size of the data_ptr blob here, since we already know 
>> ahead of
>> + *    // time how big this needs to be.
>> + *    item.length = sizeof(info);
>> + *
>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>> + *    // to query, before then firing off the query.
>> + *    info.vm_id = vm_id;
>> + *    info.offset = gtt_address;
>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>> + *    if (err || item.length < 0) ...
>> + *
>> + *    // If all went well we can now inspect the returned attributes.
>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>> + */
>> +struct __drm_i915_query_vma_info {
>> +    /**
>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>> value
>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>> +     * drm_i915_gem_vm_control.vm_id.
>> +     */
>> +    __u32 vm_id;
>> +    /** @pad: MBZ. */
>> +    __u32 pad;
>> +    /**
>> +     * @offset: The corresponding ppGTT address of the vma which the 
>> kernel
>> +     * will use to perform the lookup.
>> +     */
>> +    __u64 offset;
>> +    /**
>> +     * @attributes: The returned attributes for the given vma.
>> +     *
>> +     * Possible values:
>> +     *
>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages backing 
>> the
>> +     * vma are currently CPU accessible. If this is not set then the 
>> vma is
>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which the 
>> CPU
>> +     * cannot directly access(this is only possible on discrete 
>> devices with
>> +     * a small BAR). Attempting to MMAP and fault such an object will
>> +     * require the kernel first synchronising any GPU work tied to the
>> +     * object, before then migrating the pages, either to the CPU 
>> accessible
>> +     * part of I915_MEMORY_CLASS_DEVICE, or I915_MEMORY_CLASS_SYSTEM, 
>> if the
>> +     * placements permit it. See 
>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>> +     *
>> +     * Note that this is inherently racy.
>> +     */
>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>> +    __u64 attributes;
>> +    /** @rsvd: MBZ */
>> +    __u32 rsvd[4];
>> +};
>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>> b/Documentation/gpu/rfc/i915_small_bar.rst
>> new file mode 100644
>> index 000000000000..be3d9bcdd86d
>> --- /dev/null
>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>> @@ -0,0 +1,58 @@
>> +==========================
>> +I915 Small BAR RFC Section
>> +==========================
>> +Starting from DG2 we will have resizable BAR support for device 
>> local-memory(i.e
>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size might 
>> still be
>> +smaller than the total probed_size. In such cases, only some subset of
>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the first 
>> 256M),
>> +while the remainder is only accessible via the GPU.
>> +
>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>> +----------------------------------------------
>> +New gem_create_ext flag to tell the kernel that a BO will require CPU 
>> access.
>> +This becomes important when placing an object in 
>> I915_MEMORY_CLASS_DEVICE, where
>> +underneath the device has a small BAR, meaning only some portion of 
>> it is CPU
>> +accessible. Without this flag the kernel will assume that CPU access 
>> is not
>> +required, and prioritize using the non-CPU visible portion of
>> +I915_MEMORY_CLASS_DEVICE.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_gem_create_ext
>> +
>> +probed_cpu_visible_size attribute
>> +---------------------------------
>> +New struct__drm_i915_memory_region attribute which returns the total 
>> size of the
>> +CPU accessible portion, for the particular region. This should only be
>> +applicable for I915_MEMORY_CLASS_DEVICE.
>> +
>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>> with the
>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU visible 
>> portion,
>> +where the total size of the heap needs to be known.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_memory_region_info
>> +
>> +DRM_I915_QUERY_VMA_INFO query
>> +-----------------------------
>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>> +respective vma, and return its set of attributes. For now we only 
>> support
>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>> +currently placed in memory that is accessible by the CPU. This should 
>> always be
>> +set on devices where the CPU probed_cpu_visible_size of 
>> I915_MEMORY_CLASS_DEVICE
>> +matches the probed_size. If this is not set then CPU faulting the 
>> object will
>> +likely first require migrating the pages.
>> +
>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>> +   :functions: __drm_i915_query_vma_info
>> +
>> +Error Capture restrictions
>> +--------------------------
>> +With error capture we have two new restrictions:
>> +
>> +    1) Error capture is best effort on small BAR systems; if the 
>> pages are not
>> +    CPU accessible, at the time of capture, then the kernel is free 
>> to skip
>> +    trying to capture them.
>> +
>> +    2) On discrete we now reject error capture on recoverable 
>> contexts. In the
>> +    future the kernel may want to blit during error capture, when for 
>> example
>> +    something is not currently CPU accessible.
>> diff --git a/Documentation/gpu/rfc/index.rst 
>> b/Documentation/gpu/rfc/index.rst
>> index 91e93a705230..5a3bd3924ba6 100644
>> --- a/Documentation/gpu/rfc/index.rst
>> +++ b/Documentation/gpu/rfc/index.rst
>> @@ -23,3 +23,7 @@ host such documentation:
>>   .. toctree::
>>       i915_scheduler.rst
>> +
>> +.. toctree::
>> +
>> +    i915_small_bar.rst
> 
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-02 18:03       ` [Intel-gfx] " Lionel Landwerlin
@ 2022-05-03  9:07         ` Matthew Auld
  -1 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-05-03  9:07 UTC (permalink / raw)
  To: Lionel Landwerlin, Abodunrin, Akeem G, intel-gfx
  Cc: Thomas Hellström, Justen, Jordan L, dri-devel,
	Kenneth Graunke, Bloomfield, Jon, Daniel Vetter, mesa-dev

On 02/05/2022 19:03, Lionel Landwerlin wrote:
> On 02/05/2022 20:58, Abodunrin, Akeem G wrote:
>>
>>> -----Original Message-----
>>> From: Landwerlin, Lionel G <lionel.g.landwerlin@intel.com>
>>> Sent: Monday, May 2, 2022 12:55 AM
>>> To: Auld, Matthew <matthew.auld@intel.com>; 
>>> intel-gfx@lists.freedesktop.org
>>> Cc: dri-devel@lists.freedesktop.org; Thomas Hellström
>>> <thomas.hellstrom@linux.intel.com>; Bloomfield, Jon
>>> <jon.bloomfield@intel.com>; Daniel Vetter <daniel.vetter@ffwll.ch>; 
>>> Justen,
>>> Jordan L <jordan.l.justen@intel.com>; Kenneth Graunke
>>> <kenneth@whitecape.org>; Abodunrin, Akeem G
>>> <akeem.g.abodunrin@intel.com>; mesa-dev@lists.freedesktop.org
>>> Subject: Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
>>>
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>     - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>     - Rework error capture interactions, including no longer needing
>>>>       NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>     - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>    Documentation/gpu/rfc/i915_small_bar.h   | 190
>>> +++++++++++++++++++++++
>>>>    Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>    Documentation/gpu/rfc/index.rst          |   4 +
>>>>    3 files changed, 252 insertions(+)
>>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as
>>>> +known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct
>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id
>>>> +DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +   /** @region: The class:instance pair encoding */
>>>> +   struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +   /** @rsvd0: MBZ */
>>>> +   __u32 rsvd0;
>>>> +
>>>> +   /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +   __u64 probed_size;
>>>> +
>>>> +   /** @unallocated_size: Estimate of memory remaining (-1 = unknown)
>>> */
>>>> +   __u64 unallocated_size;
>>>> +
>>>> +   union {
>>>> +           /** @rsvd1: MBZ */
>>>> +           __u64 rsvd1[8];
>>>> +           struct {
>>>> +                   /**
>>>> +                    * @probed_cpu_visible_size: Memory probed by the
>>> driver
>>>> +                    * that is CPU accessible. (-1 = unknown).
>>>> +                    *
>>>> +                    * This will be always be <= @probed_size, and the
>>>> +                    * remainder(if there is any) will not be CPU
>>>> +                    * accessible.
>>>> +                    */
>>>> +                   __u64 probed_cpu_visible_size;
>>>> +           };
>>>
>>> Trying to implement userspace support in Vulkan for this, I have an 
>>> additional
>>> question about the value of probed_cpu_visible_size.
>>>
>>> When is it set to -1?
>> I believe it is set to -1 if it is unknown, and/or not cpu accessible...
>>
>> Cheers!
>> ~Akeem
> 
> 
> So what should I expect on system memory?

I guess just probed_cpu_visible_size == probed_size. Or maybe we can 
just use -1 here?

> 
> What value is returned when all of probed_size is CPU visible on local 
> memory?

probed_size == probed_cpu_visible_size.

> 
> 
> Thanks,
> 
> 
> -Lionel
> 
> 
>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>
>>> After after it should either be the entire lmem or something smaller.
>>>
>>>
>>> -Lionel
>>>
>>>
>>>> +   };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour,
>>>> +with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for the
>>>> +stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to create
>>>> +the object
>>>> + * with gem_create, and another to apply various parameters, however
>>>> +this
>>>> + * creates some ambiguity for the params which are considered
>>>> +immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +   /**
>>>> +    * @size: Requested size for the object.
>>>> +    *
>>>> +    * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +    *
>>>> +    * Note that for some devices we have might have further minimum
>>>> +    * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +    * However in general the final size here should always reflect any
>>>> +    * rounding up, if for example using the
>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +    * extension to place the object in device local-memory.
>>>> +    */
>>>> +   __u64 size;
>>>> +   /**
>>>> +    * @handle: Returned handle for the object.
>>>> +    *
>>>> +    * Object handles are nonzero.
>>>> +    */
>>>> +   __u32 handle;
>>>> +   /**
>>>> +    * @flags: Optional flags.
>>>> +    *
>>>> +    * Supported values:
>>>> +    *
>>>> +    * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the
>>> kernel that
>>>> +    * the object will need to be accessed via the CPU.
>>>> +    *
>>>> +    * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE,
>>> and
>>>> +    * only strictly required on platforms where only some of the 
>>>> device
>>>> +    * memory is directly visible or mappable through the CPU, like 
>>>> on DG2+.
>>>> +    *
>>>> +    * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM,
>>> to
>>>> +    * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +    * can't place the object in the mappable part of
>>>> +    * I915_MEMORY_CLASS_DEVICE.
>>>> +    *
>>>> +    * Note that since the kernel only supports flat-CCS on objects 
>>>> that can
>>>> +    * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
>>> don't
>>>> +    * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS
>>> together with
>>>> +    * flat-CCS.
>>>> +    *
>>>> +    * Without this hint, the kernel will assume that non-mappable
>>>> +    * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that
>>> the
>>>> +    * kernel can still migrate the object to the mappable part, as 
>>>> a last
>>>> +    * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +    * expensive, and so ideally should be avoided.
>>>> +    */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +   __u32 flags;
>>>> +   /**
>>>> +    * @extensions: The chain of extensions to apply to this object.
>>>> +    *
>>>> +    * This will be useful in the future when we need to support 
>>>> several
>>>> +    * different extensions, and we need to apply more than one when
>>>> +    * creating the object. See struct i915_user_extension.
>>>> +    *
>>>> +    * If we don't supply any extensions then we get the same old
>>> gem_create
>>>> +    * behaviour.
>>>> +    *
>>>> +    * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +    * struct drm_i915_gem_create_ext_memory_regions.
>>>> +    *
>>>> +    * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +    * struct drm_i915_gem_create_ext_protected_content.
>>>> +    */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 #define
>>>> +I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +   __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma,
>>>> +returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + * struct drm_i915_query_vma_info info = {};
>>>> + * struct drm_i915_query_item item = {
>>>> + *         .data_ptr = (uintptr_t)&info,
>>>> + *         .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + * };
>>>> + * struct drm_i915_query query = {
>>>> + *         .num_items = 1,
>>>> + *         .items_ptr = (uintptr_t)&item,
>>>> + * };
>>>> + * int err;
>>>> + *
>>>> + * // Unlike some other types of queries, there is no need to first 
>>>> query
>>>> + * // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + * // time how big this needs to be.
>>>> + * item.length = sizeof(info);
>>>> + *
>>>> + * // Next we fill in the vm_id and ppGTT address of the vma we wish
>>>> + * // to query, before then firing off the query.
>>>> + * info.vm_id = vm_id;
>>>> + * info.offset = gtt_address;
>>>> + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + * if (err || item.length < 0) ...
>>>> + *
>>>> + * // If all went well we can now inspect the returned attributes.
>>>> + * if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +   /**
>>>> +    * @vm_id: The given vm id that contains the vma. The id is the 
>>>> value
>>>> +    * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +    * drm_i915_gem_vm_control.vm_id.
>>>> +    */
>>>> +   __u32 vm_id;
>>>> +   /** @pad: MBZ. */
>>>> +   __u32 pad;
>>>> +   /**
>>>> +    * @offset: The corresponding ppGTT address of the vma which the
>>> kernel
>>>> +    * will use to perform the lookup.
>>>> +    */
>>>> +   __u64 offset;
>>>> +   /**
>>>> +    * @attributes: The returned attributes for the given vma.
>>>> +    *
>>>> +    * Possible values:
>>>> +    *
>>>> +    * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
>>> backing the
>>>> +    * vma are currently CPU accessible. If this is not set then the 
>>>> vma is
>>>> +    * currently backed by I915_MEMORY_CLASS_DEVICE memory, which
>>> the CPU
>>>> +    * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +    * a small BAR). Attempting to MMAP and fault such an object will
>>>> +    * require the kernel first synchronising any GPU work tied to the
>>>> +    * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +    * part of I915_MEMORY_CLASS_DEVICE, or
>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +    * placements permit it. See
>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +    *
>>>> +    * Note that this is inherently racy.
>>>> +    */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +   __u64 attributes;
>>>> +   /** @rsvd: MBZ */
>>>> +   __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device
>>>> +local-memory(i.e I915_MEMORY_CLASS_DEVICE), but in some cases the
>>>> +final BAR size might still be smaller than the total probed_size. In
>>>> +such cases, only some subset of I915_MEMORY_CLASS_DEVICE will be CPU
>>>> +accessible(for example the first 256M), while the remainder is only 
>>>> accessible
>>> via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in
>>>> +I915_MEMORY_CLASS_DEVICE, where underneath the device has a small
>>>> +BAR, meaning only some portion of it is CPU accessible. Without this
>>>> +flag the kernel will assume that CPU access is not required, and
>>>> +prioritize using the non-CPU visible portion of
>>> I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the total
>>>> +size of the CPU accessible portion, for the particular region. This
>>>> +should only be applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap
>>>> +with the VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the
>>>> +CPU visible portion, where the total size of the heap needs to be 
>>>> known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only
>>>> +support DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the
>>>> +object/vma is currently placed in memory that is accessible by the
>>>> +CPU. This should always be set on devices where the CPU
>>>> +probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE matches the
>>>> +probed_size. If this is not set then CPU faulting the object will 
>>>> likely first
>>> require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>>> to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst
>>>> b/Documentation/gpu/rfc/index.rst index 91e93a705230..5a3bd3924ba6
>>>> 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>    .. toctree::
>>>>
>>>>        i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
> 
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-03  9:07         ` Matthew Auld
  0 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-05-03  9:07 UTC (permalink / raw)
  To: Lionel Landwerlin, Abodunrin, Akeem G, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 02/05/2022 19:03, Lionel Landwerlin wrote:
> On 02/05/2022 20:58, Abodunrin, Akeem G wrote:
>>
>>> -----Original Message-----
>>> From: Landwerlin, Lionel G <lionel.g.landwerlin@intel.com>
>>> Sent: Monday, May 2, 2022 12:55 AM
>>> To: Auld, Matthew <matthew.auld@intel.com>; 
>>> intel-gfx@lists.freedesktop.org
>>> Cc: dri-devel@lists.freedesktop.org; Thomas Hellström
>>> <thomas.hellstrom@linux.intel.com>; Bloomfield, Jon
>>> <jon.bloomfield@intel.com>; Daniel Vetter <daniel.vetter@ffwll.ch>; 
>>> Justen,
>>> Jordan L <jordan.l.justen@intel.com>; Kenneth Graunke
>>> <kenneth@whitecape.org>; Abodunrin, Akeem G
>>> <akeem.g.abodunrin@intel.com>; mesa-dev@lists.freedesktop.org
>>> Subject: Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
>>>
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>     - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>     - Rework error capture interactions, including no longer needing
>>>>       NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>     - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>    Documentation/gpu/rfc/i915_small_bar.h   | 190
>>> +++++++++++++++++++++++
>>>>    Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>    Documentation/gpu/rfc/index.rst          |   4 +
>>>>    3 files changed, 252 insertions(+)
>>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as
>>>> +known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct
>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id
>>>> +DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +   /** @region: The class:instance pair encoding */
>>>> +   struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +   /** @rsvd0: MBZ */
>>>> +   __u32 rsvd0;
>>>> +
>>>> +   /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +   __u64 probed_size;
>>>> +
>>>> +   /** @unallocated_size: Estimate of memory remaining (-1 = unknown)
>>> */
>>>> +   __u64 unallocated_size;
>>>> +
>>>> +   union {
>>>> +           /** @rsvd1: MBZ */
>>>> +           __u64 rsvd1[8];
>>>> +           struct {
>>>> +                   /**
>>>> +                    * @probed_cpu_visible_size: Memory probed by the
>>> driver
>>>> +                    * that is CPU accessible. (-1 = unknown).
>>>> +                    *
>>>> +                    * This will be always be <= @probed_size, and the
>>>> +                    * remainder(if there is any) will not be CPU
>>>> +                    * accessible.
>>>> +                    */
>>>> +                   __u64 probed_cpu_visible_size;
>>>> +           };
>>>
>>> Trying to implement userspace support in Vulkan for this, I have an 
>>> additional
>>> question about the value of probed_cpu_visible_size.
>>>
>>> When is it set to -1?
>> I believe it is set to -1 if it is unknown, and/or not cpu accessible...
>>
>> Cheers!
>> ~Akeem
> 
> 
> So what should I expect on system memory?

I guess just probed_cpu_visible_size == probed_size. Or maybe we can 
just use -1 here?

> 
> What value is returned when all of probed_size is CPU visible on local 
> memory?

probed_size == probed_cpu_visible_size.

> 
> 
> Thanks,
> 
> 
> -Lionel
> 
> 
>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>
>>> After after it should either be the entire lmem or something smaller.
>>>
>>>
>>> -Lionel
>>>
>>>
>>>> +   };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour,
>>>> +with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for the
>>>> +stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to create
>>>> +the object
>>>> + * with gem_create, and another to apply various parameters, however
>>>> +this
>>>> + * creates some ambiguity for the params which are considered
>>>> +immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +   /**
>>>> +    * @size: Requested size for the object.
>>>> +    *
>>>> +    * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +    *
>>>> +    * Note that for some devices we have might have further minimum
>>>> +    * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +    * However in general the final size here should always reflect any
>>>> +    * rounding up, if for example using the
>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +    * extension to place the object in device local-memory.
>>>> +    */
>>>> +   __u64 size;
>>>> +   /**
>>>> +    * @handle: Returned handle for the object.
>>>> +    *
>>>> +    * Object handles are nonzero.
>>>> +    */
>>>> +   __u32 handle;
>>>> +   /**
>>>> +    * @flags: Optional flags.
>>>> +    *
>>>> +    * Supported values:
>>>> +    *
>>>> +    * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the
>>> kernel that
>>>> +    * the object will need to be accessed via the CPU.
>>>> +    *
>>>> +    * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE,
>>> and
>>>> +    * only strictly required on platforms where only some of the 
>>>> device
>>>> +    * memory is directly visible or mappable through the CPU, like 
>>>> on DG2+.
>>>> +    *
>>>> +    * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM,
>>> to
>>>> +    * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +    * can't place the object in the mappable part of
>>>> +    * I915_MEMORY_CLASS_DEVICE.
>>>> +    *
>>>> +    * Note that since the kernel only supports flat-CCS on objects 
>>>> that can
>>>> +    * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
>>> don't
>>>> +    * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS
>>> together with
>>>> +    * flat-CCS.
>>>> +    *
>>>> +    * Without this hint, the kernel will assume that non-mappable
>>>> +    * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note that
>>> the
>>>> +    * kernel can still migrate the object to the mappable part, as 
>>>> a last
>>>> +    * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +    * expensive, and so ideally should be avoided.
>>>> +    */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +   __u32 flags;
>>>> +   /**
>>>> +    * @extensions: The chain of extensions to apply to this object.
>>>> +    *
>>>> +    * This will be useful in the future when we need to support 
>>>> several
>>>> +    * different extensions, and we need to apply more than one when
>>>> +    * creating the object. See struct i915_user_extension.
>>>> +    *
>>>> +    * If we don't supply any extensions then we get the same old
>>> gem_create
>>>> +    * behaviour.
>>>> +    *
>>>> +    * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +    * struct drm_i915_gem_create_ext_memory_regions.
>>>> +    *
>>>> +    * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +    * struct drm_i915_gem_create_ext_protected_content.
>>>> +    */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 #define
>>>> +I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +   __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma,
>>>> +returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + * struct drm_i915_query_vma_info info = {};
>>>> + * struct drm_i915_query_item item = {
>>>> + *         .data_ptr = (uintptr_t)&info,
>>>> + *         .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + * };
>>>> + * struct drm_i915_query query = {
>>>> + *         .num_items = 1,
>>>> + *         .items_ptr = (uintptr_t)&item,
>>>> + * };
>>>> + * int err;
>>>> + *
>>>> + * // Unlike some other types of queries, there is no need to first 
>>>> query
>>>> + * // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + * // time how big this needs to be.
>>>> + * item.length = sizeof(info);
>>>> + *
>>>> + * // Next we fill in the vm_id and ppGTT address of the vma we wish
>>>> + * // to query, before then firing off the query.
>>>> + * info.vm_id = vm_id;
>>>> + * info.offset = gtt_address;
>>>> + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + * if (err || item.length < 0) ...
>>>> + *
>>>> + * // If all went well we can now inspect the returned attributes.
>>>> + * if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +   /**
>>>> +    * @vm_id: The given vm id that contains the vma. The id is the 
>>>> value
>>>> +    * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +    * drm_i915_gem_vm_control.vm_id.
>>>> +    */
>>>> +   __u32 vm_id;
>>>> +   /** @pad: MBZ. */
>>>> +   __u32 pad;
>>>> +   /**
>>>> +    * @offset: The corresponding ppGTT address of the vma which the
>>> kernel
>>>> +    * will use to perform the lookup.
>>>> +    */
>>>> +   __u64 offset;
>>>> +   /**
>>>> +    * @attributes: The returned attributes for the given vma.
>>>> +    *
>>>> +    * Possible values:
>>>> +    *
>>>> +    * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
>>> backing the
>>>> +    * vma are currently CPU accessible. If this is not set then the 
>>>> vma is
>>>> +    * currently backed by I915_MEMORY_CLASS_DEVICE memory, which
>>> the CPU
>>>> +    * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +    * a small BAR). Attempting to MMAP and fault such an object will
>>>> +    * require the kernel first synchronising any GPU work tied to the
>>>> +    * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +    * part of I915_MEMORY_CLASS_DEVICE, or
>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +    * placements permit it. See
>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +    *
>>>> +    * Note that this is inherently racy.
>>>> +    */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +   __u64 attributes;
>>>> +   /** @rsvd: MBZ */
>>>> +   __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device
>>>> +local-memory(i.e I915_MEMORY_CLASS_DEVICE), but in some cases the
>>>> +final BAR size might still be smaller than the total probed_size. In
>>>> +such cases, only some subset of I915_MEMORY_CLASS_DEVICE will be CPU
>>>> +accessible(for example the first 256M), while the remainder is only 
>>>> accessible
>>> via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in
>>>> +I915_MEMORY_CLASS_DEVICE, where underneath the device has a small
>>>> +BAR, meaning only some portion of it is CPU accessible. Without this
>>>> +flag the kernel will assume that CPU access is not required, and
>>>> +prioritize using the non-CPU visible portion of
>>> I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the total
>>>> +size of the CPU accessible portion, for the particular region. This
>>>> +should only be applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap
>>>> +with the VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the
>>>> +CPU visible portion, where the total size of the heap needs to be 
>>>> known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only
>>>> +support DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the
>>>> +object/vma is currently placed in memory that is accessible by the
>>>> +CPU. This should always be set on devices where the CPU
>>>> +probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE matches the
>>>> +probed_size. If this is not set then CPU faulting the object will 
>>>> likely first
>>> require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>>> to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst
>>>> b/Documentation/gpu/rfc/index.rst index 91e93a705230..5a3bd3924ba6
>>>> 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>    .. toctree::
>>>>
>>>>        i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
> 
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-03  9:07         ` [Intel-gfx] " Matthew Auld
@ 2022-05-03  9:15           ` Lionel Landwerlin
  -1 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-03  9:15 UTC (permalink / raw)
  To: Matthew Auld, Abodunrin, Akeem G, intel-gfx
  Cc: Thomas Hellström, Justen, Jordan L, dri-devel,
	Kenneth Graunke, Bloomfield, Jon, Daniel Vetter, mesa-dev

On 03/05/2022 12:07, Matthew Auld wrote:
> On 02/05/2022 19:03, Lionel Landwerlin wrote:
>> On 02/05/2022 20:58, Abodunrin, Akeem G wrote:
>>>
>>>> -----Original Message-----
>>>> From: Landwerlin, Lionel G <lionel.g.landwerlin@intel.com>
>>>> Sent: Monday, May 2, 2022 12:55 AM
>>>> To: Auld, Matthew <matthew.auld@intel.com>; 
>>>> intel-gfx@lists.freedesktop.org
>>>> Cc: dri-devel@lists.freedesktop.org; Thomas Hellström
>>>> <thomas.hellstrom@linux.intel.com>; Bloomfield, Jon
>>>> <jon.bloomfield@intel.com>; Daniel Vetter <daniel.vetter@ffwll.ch>; 
>>>> Justen,
>>>> Jordan L <jordan.l.justen@intel.com>; Kenneth Graunke
>>>> <kenneth@whitecape.org>; Abodunrin, Akeem G
>>>> <akeem.g.abodunrin@intel.com>; mesa-dev@lists.freedesktop.org
>>>> Subject: Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
>>>>
>>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>
>>>>> v2:
>>>>>     - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>     - Rework error capture interactions, including no longer needing
>>>>>       NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>     - Add probed_cpu_visible_size. (Lionel)
>>>>>
>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>> ---
>>>>>    Documentation/gpu/rfc/i915_small_bar.h   | 190
>>>> +++++++++++++++++++++++
>>>>> Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>    Documentation/gpu/rfc/index.rst          |   4 +
>>>>>    3 files changed, 252 insertions(+)
>>>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h
>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> new file mode 100644
>>>>> index 000000000000..7bfd0cf44d35
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> @@ -0,0 +1,190 @@
>>>>> +/**
>>>>> + * struct __drm_i915_memory_region_info - Describes one region as
>>>>> +known to the
>>>>> + * driver.
>>>>> + *
>>>>> + * Note this is using both struct drm_i915_query_item and struct
>>>> drm_i915_query.
>>>>> + * For this new query we are adding the new query id
>>>>> +DRM_I915_QUERY_MEMORY_REGIONS
>>>>> + * at &drm_i915_query_item.query_id.
>>>>> + */
>>>>> +struct __drm_i915_memory_region_info {
>>>>> +   /** @region: The class:instance pair encoding */
>>>>> +   struct drm_i915_gem_memory_class_instance region;
>>>>> +
>>>>> +   /** @rsvd0: MBZ */
>>>>> +   __u32 rsvd0;
>>>>> +
>>>>> +   /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>> +   __u64 probed_size;
>>>>> +
>>>>> +   /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>> unknown)
>>>> */
>>>>> +   __u64 unallocated_size;
>>>>> +
>>>>> +   union {
>>>>> +           /** @rsvd1: MBZ */
>>>>> +           __u64 rsvd1[8];
>>>>> +           struct {
>>>>> +                   /**
>>>>> +                    * @probed_cpu_visible_size: Memory probed by the
>>>> driver
>>>>> +                    * that is CPU accessible. (-1 = unknown).
>>>>> +                    *
>>>>> +                    * This will be always be <= @probed_size, and 
>>>>> the
>>>>> +                    * remainder(if there is any) will not be CPU
>>>>> +                    * accessible.
>>>>> +                    */
>>>>> +                   __u64 probed_cpu_visible_size;
>>>>> +           };
>>>>
>>>> Trying to implement userspace support in Vulkan for this, I have an 
>>>> additional
>>>> question about the value of probed_cpu_visible_size.
>>>>
>>>> When is it set to -1?
>>> I believe it is set to -1 if it is unknown, and/or not cpu 
>>> accessible...
>>>
>>> Cheers!
>>> ~Akeem
>>
>>
>> So what should I expect on system memory?
>
> I guess just probed_cpu_visible_size == probed_size. Or maybe we can 
> just use -1 here?
>
>>
>> What value is returned when all of probed_size is CPU visible on 
>> local memory?
>
> probed_size == probed_cpu_visible_size.


Thanks, looks good to me.

Then maybe we should update the comment to say that.

Looks like there are no cases where we'll get -1.


-Lionel


>
>>
>>
>> Thanks,
>>
>>
>> -Lionel
>>
>>
>>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>>
>>>> After after it should either be the entire lmem or something smaller.
>>>>
>>>>
>>>> -Lionel
>>>>
>>>>
>>>>> +   };
>>>>> +};
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour,
>>>>> +with added
>>>>> + * extension support using struct i915_user_extension.
>>>>> + *
>>>>> + * Note that new buffer flags should be added here, at least for the
>>>>> +stuff that
>>>>> + * is immutable. Previously we would have two ioctls, one to create
>>>>> +the object
>>>>> + * with gem_create, and another to apply various parameters, however
>>>>> +this
>>>>> + * creates some ambiguity for the params which are considered
>>>>> +immutable. Also in
>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>> + */
>>>>> +struct __drm_i915_gem_create_ext {
>>>>> +   /**
>>>>> +    * @size: Requested size for the object.
>>>>> +    *
>>>>> +    * The (page-aligned) allocated size for the object will be 
>>>>> returned.
>>>>> +    *
>>>>> +    * Note that for some devices we have might have further minimum
>>>>> +    * page-size restrictions(larger than 4K), like for device 
>>>>> local-memory.
>>>>> +    * However in general the final size here should always 
>>>>> reflect any
>>>>> +    * rounding up, if for example using the
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>> +    * extension to place the object in device local-memory.
>>>>> +    */
>>>>> +   __u64 size;
>>>>> +   /**
>>>>> +    * @handle: Returned handle for the object.
>>>>> +    *
>>>>> +    * Object handles are nonzero.
>>>>> +    */
>>>>> +   __u32 handle;
>>>>> +   /**
>>>>> +    * @flags: Optional flags.
>>>>> +    *
>>>>> +    * Supported values:
>>>>> +    *
>>>>> +    * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the
>>>> kernel that
>>>>> +    * the object will need to be accessed via the CPU.
>>>>> +    *
>>>>> +    * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE,
>>>> and
>>>>> +    * only strictly required on platforms where only some of the 
>>>>> device
>>>>> +    * memory is directly visible or mappable through the CPU, 
>>>>> like on DG2+.
>>>>> +    *
>>>>> +    * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM,
>>>> to
>>>>> +    * ensure we can always spill the allocation to system memory, 
>>>>> if we
>>>>> +    * can't place the object in the mappable part of
>>>>> +    * I915_MEMORY_CLASS_DEVICE.
>>>>> +    *
>>>>> +    * Note that since the kernel only supports flat-CCS on 
>>>>> objects that can
>>>>> +    * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
>>>> don't
>>>>> +    * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS
>>>> together with
>>>>> +    * flat-CCS.
>>>>> +    *
>>>>> +    * Without this hint, the kernel will assume that non-mappable
>>>>> +    * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>>> that
>>>> the
>>>>> +    * kernel can still migrate the object to the mappable part, 
>>>>> as a last
>>>>> +    * resort, if userspace ever CPU faults this object, but this 
>>>>> might be
>>>>> +    * expensive, and so ideally should be avoided.
>>>>> +    */
>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>> +   __u32 flags;
>>>>> +   /**
>>>>> +    * @extensions: The chain of extensions to apply to this object.
>>>>> +    *
>>>>> +    * This will be useful in the future when we need to support 
>>>>> several
>>>>> +    * different extensions, and we need to apply more than one when
>>>>> +    * creating the object. See struct i915_user_extension.
>>>>> +    *
>>>>> +    * If we don't supply any extensions then we get the same old
>>>> gem_create
>>>>> +    * behaviour.
>>>>> +    *
>>>>> +    * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>>> +    * struct drm_i915_gem_create_ext_memory_regions.
>>>>> +    *
>>>>> +    * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>> +    * struct drm_i915_gem_create_ext_protected_content.
>>>>> +    */
>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 #define
>>>>> +I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>> +   __u64 extensions;
>>>>> +};
>>>>> +
>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_query_vma_info
>>>>> + *
>>>>> + * Given a vm and GTT address, lookup the corresponding vma,
>>>>> +returning its set
>>>>> + * of attributes.
>>>>> + *
>>>>> + * .. code-block:: C
>>>>> + *
>>>>> + * struct drm_i915_query_vma_info info = {};
>>>>> + * struct drm_i915_query_item item = {
>>>>> + *         .data_ptr = (uintptr_t)&info,
>>>>> + *         .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>> + * };
>>>>> + * struct drm_i915_query query = {
>>>>> + *         .num_items = 1,
>>>>> + *         .items_ptr = (uintptr_t)&item,
>>>>> + * };
>>>>> + * int err;
>>>>> + *
>>>>> + * // Unlike some other types of queries, there is no need to 
>>>>> first query
>>>>> + * // the size of the data_ptr blob here, since we already know 
>>>>> ahead of
>>>>> + * // time how big this needs to be.
>>>>> + * item.length = sizeof(info);
>>>>> + *
>>>>> + * // Next we fill in the vm_id and ppGTT address of the vma we wish
>>>>> + * // to query, before then firing off the query.
>>>>> + * info.vm_id = vm_id;
>>>>> + * info.offset = gtt_address;
>>>>> + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>> + * if (err || item.length < 0) ...
>>>>> + *
>>>>> + * // If all went well we can now inspect the returned attributes.
>>>>> + * if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>>> + */
>>>>> +struct __drm_i915_query_vma_info {
>>>>> +   /**
>>>>> +    * @vm_id: The given vm id that contains the vma. The id is 
>>>>> the value
>>>>> +    * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>> +    * drm_i915_gem_vm_control.vm_id.
>>>>> +    */
>>>>> +   __u32 vm_id;
>>>>> +   /** @pad: MBZ. */
>>>>> +   __u32 pad;
>>>>> +   /**
>>>>> +    * @offset: The corresponding ppGTT address of the vma which the
>>>> kernel
>>>>> +    * will use to perform the lookup.
>>>>> +    */
>>>>> +   __u64 offset;
>>>>> +   /**
>>>>> +    * @attributes: The returned attributes for the given vma.
>>>>> +    *
>>>>> +    * Possible values:
>>>>> +    *
>>>>> +    * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
>>>> backing the
>>>>> +    * vma are currently CPU accessible. If this is not set then 
>>>>> the vma is
>>>>> +    * currently backed by I915_MEMORY_CLASS_DEVICE memory, which
>>>> the CPU
>>>>> +    * cannot directly access(this is only possible on discrete 
>>>>> devices with
>>>>> +    * a small BAR). Attempting to MMAP and fault such an object will
>>>>> +    * require the kernel first synchronising any GPU work tied to 
>>>>> the
>>>>> +    * object, before then migrating the pages, either to the CPU 
>>>>> accessible
>>>>> +    * part of I915_MEMORY_CLASS_DEVICE, or
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>> +    * placements permit it. See
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>> +    *
>>>>> +    * Note that this is inherently racy.
>>>>> +    */
>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>> +   __u64 attributes;
>>>>> +   /** @rsvd: MBZ */
>>>>> +   __u32 rsvd[4];
>>>>> +};
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> new file mode 100644
>>>>> index 000000000000..be3d9bcdd86d
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> @@ -0,0 +1,58 @@
>>>>> +==========================
>>>>> +I915 Small BAR RFC Section
>>>>> +==========================
>>>>> +Starting from DG2 we will have resizable BAR support for device
>>>>> +local-memory(i.e I915_MEMORY_CLASS_DEVICE), but in some cases the
>>>>> +final BAR size might still be smaller than the total probed_size. In
>>>>> +such cases, only some subset of I915_MEMORY_CLASS_DEVICE will be CPU
>>>>> +accessible(for example the first 256M), while the remainder is 
>>>>> only accessible
>>>> via the GPU.
>>>>> +
>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>> +----------------------------------------------
>>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>>> CPU access.
>>>>> +This becomes important when placing an object in
>>>>> +I915_MEMORY_CLASS_DEVICE, where underneath the device has a small
>>>>> +BAR, meaning only some portion of it is CPU accessible. Without this
>>>>> +flag the kernel will assume that CPU access is not required, and
>>>>> +prioritize using the non-CPU visible portion of
>>>> I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>> +
>>>>> +probed_cpu_visible_size attribute
>>>>> +---------------------------------
>>>>> +New struct__drm_i915_memory_region attribute which returns the total
>>>>> +size of the CPU accessible portion, for the particular region. This
>>>>> +should only be applicable for I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap
>>>>> +with the VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the
>>>>> +CPU visible portion, where the total size of the heap needs to be 
>>>>> known.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_memory_region_info
>>>>> +
>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>> +-----------------------------
>>>>> +Query the attributes of some vma. Given a vm and GTT offset, find 
>>>>> the
>>>>> +respective vma, and return its set of attributes. For now we only
>>>>> +support DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the
>>>>> +object/vma is currently placed in memory that is accessible by the
>>>>> +CPU. This should always be set on devices where the CPU
>>>>> +probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE matches the
>>>>> +probed_size. If this is not set then CPU faulting the object will 
>>>>> likely first
>>>> require migrating the pages.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_query_vma_info
>>>>> +
>>>>> +Error Capture restrictions
>>>>> +--------------------------
>>>>> +With error capture we have two new restrictions:
>>>>> +
>>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>>> pages are not
>>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>>> free to skip
>>>>> +    trying to capture them.
>>>>> +
>>>>> +    2) On discrete we now reject error capture on recoverable 
>>>>> contexts. In the
>>>>> +    future the kernel may want to blit during error capture, when 
>>>>> for example
>>>>> +    something is not currently CPU accessible.
>>>>> diff --git a/Documentation/gpu/rfc/index.rst
>>>>> b/Documentation/gpu/rfc/index.rst index 91e93a705230..5a3bd3924ba6
>>>>> 100644
>>>>> --- a/Documentation/gpu/rfc/index.rst
>>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>>    .. toctree::
>>>>>
>>>>>        i915_scheduler.rst
>>>>> +
>>>>> +.. toctree::
>>>>> +
>>>>> +    i915_small_bar.rst
>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-03  9:15           ` Lionel Landwerlin
  0 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-03  9:15 UTC (permalink / raw)
  To: Matthew Auld, Abodunrin, Akeem G, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 03/05/2022 12:07, Matthew Auld wrote:
> On 02/05/2022 19:03, Lionel Landwerlin wrote:
>> On 02/05/2022 20:58, Abodunrin, Akeem G wrote:
>>>
>>>> -----Original Message-----
>>>> From: Landwerlin, Lionel G <lionel.g.landwerlin@intel.com>
>>>> Sent: Monday, May 2, 2022 12:55 AM
>>>> To: Auld, Matthew <matthew.auld@intel.com>; 
>>>> intel-gfx@lists.freedesktop.org
>>>> Cc: dri-devel@lists.freedesktop.org; Thomas Hellström
>>>> <thomas.hellstrom@linux.intel.com>; Bloomfield, Jon
>>>> <jon.bloomfield@intel.com>; Daniel Vetter <daniel.vetter@ffwll.ch>; 
>>>> Justen,
>>>> Jordan L <jordan.l.justen@intel.com>; Kenneth Graunke
>>>> <kenneth@whitecape.org>; Abodunrin, Akeem G
>>>> <akeem.g.abodunrin@intel.com>; mesa-dev@lists.freedesktop.org
>>>> Subject: Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
>>>>
>>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>
>>>>> v2:
>>>>>     - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>     - Rework error capture interactions, including no longer needing
>>>>>       NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>     - Add probed_cpu_visible_size. (Lionel)
>>>>>
>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>> ---
>>>>>    Documentation/gpu/rfc/i915_small_bar.h   | 190
>>>> +++++++++++++++++++++++
>>>>> Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>    Documentation/gpu/rfc/index.rst          |   4 +
>>>>>    3 files changed, 252 insertions(+)
>>>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>    create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h
>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> new file mode 100644
>>>>> index 000000000000..7bfd0cf44d35
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> @@ -0,0 +1,190 @@
>>>>> +/**
>>>>> + * struct __drm_i915_memory_region_info - Describes one region as
>>>>> +known to the
>>>>> + * driver.
>>>>> + *
>>>>> + * Note this is using both struct drm_i915_query_item and struct
>>>> drm_i915_query.
>>>>> + * For this new query we are adding the new query id
>>>>> +DRM_I915_QUERY_MEMORY_REGIONS
>>>>> + * at &drm_i915_query_item.query_id.
>>>>> + */
>>>>> +struct __drm_i915_memory_region_info {
>>>>> +   /** @region: The class:instance pair encoding */
>>>>> +   struct drm_i915_gem_memory_class_instance region;
>>>>> +
>>>>> +   /** @rsvd0: MBZ */
>>>>> +   __u32 rsvd0;
>>>>> +
>>>>> +   /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>> +   __u64 probed_size;
>>>>> +
>>>>> +   /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>> unknown)
>>>> */
>>>>> +   __u64 unallocated_size;
>>>>> +
>>>>> +   union {
>>>>> +           /** @rsvd1: MBZ */
>>>>> +           __u64 rsvd1[8];
>>>>> +           struct {
>>>>> +                   /**
>>>>> +                    * @probed_cpu_visible_size: Memory probed by the
>>>> driver
>>>>> +                    * that is CPU accessible. (-1 = unknown).
>>>>> +                    *
>>>>> +                    * This will be always be <= @probed_size, and 
>>>>> the
>>>>> +                    * remainder(if there is any) will not be CPU
>>>>> +                    * accessible.
>>>>> +                    */
>>>>> +                   __u64 probed_cpu_visible_size;
>>>>> +           };
>>>>
>>>> Trying to implement userspace support in Vulkan for this, I have an 
>>>> additional
>>>> question about the value of probed_cpu_visible_size.
>>>>
>>>> When is it set to -1?
>>> I believe it is set to -1 if it is unknown, and/or not cpu 
>>> accessible...
>>>
>>> Cheers!
>>> ~Akeem
>>
>>
>> So what should I expect on system memory?
>
> I guess just probed_cpu_visible_size == probed_size. Or maybe we can 
> just use -1 here?
>
>>
>> What value is returned when all of probed_size is CPU visible on 
>> local memory?
>
> probed_size == probed_cpu_visible_size.


Thanks, looks good to me.

Then maybe we should update the comment to say that.

Looks like there are no cases where we'll get -1.


-Lionel


>
>>
>>
>> Thanks,
>>
>>
>> -Lionel
>>
>>
>>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>>
>>>> After after it should either be the entire lmem or something smaller.
>>>>
>>>>
>>>> -Lionel
>>>>
>>>>
>>>>> +   };
>>>>> +};
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour,
>>>>> +with added
>>>>> + * extension support using struct i915_user_extension.
>>>>> + *
>>>>> + * Note that new buffer flags should be added here, at least for the
>>>>> +stuff that
>>>>> + * is immutable. Previously we would have two ioctls, one to create
>>>>> +the object
>>>>> + * with gem_create, and another to apply various parameters, however
>>>>> +this
>>>>> + * creates some ambiguity for the params which are considered
>>>>> +immutable. Also in
>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>> + */
>>>>> +struct __drm_i915_gem_create_ext {
>>>>> +   /**
>>>>> +    * @size: Requested size for the object.
>>>>> +    *
>>>>> +    * The (page-aligned) allocated size for the object will be 
>>>>> returned.
>>>>> +    *
>>>>> +    * Note that for some devices we have might have further minimum
>>>>> +    * page-size restrictions(larger than 4K), like for device 
>>>>> local-memory.
>>>>> +    * However in general the final size here should always 
>>>>> reflect any
>>>>> +    * rounding up, if for example using the
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>> +    * extension to place the object in device local-memory.
>>>>> +    */
>>>>> +   __u64 size;
>>>>> +   /**
>>>>> +    * @handle: Returned handle for the object.
>>>>> +    *
>>>>> +    * Object handles are nonzero.
>>>>> +    */
>>>>> +   __u32 handle;
>>>>> +   /**
>>>>> +    * @flags: Optional flags.
>>>>> +    *
>>>>> +    * Supported values:
>>>>> +    *
>>>>> +    * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the
>>>> kernel that
>>>>> +    * the object will need to be accessed via the CPU.
>>>>> +    *
>>>>> +    * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE,
>>>> and
>>>>> +    * only strictly required on platforms where only some of the 
>>>>> device
>>>>> +    * memory is directly visible or mappable through the CPU, 
>>>>> like on DG2+.
>>>>> +    *
>>>>> +    * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM,
>>>> to
>>>>> +    * ensure we can always spill the allocation to system memory, 
>>>>> if we
>>>>> +    * can't place the object in the mappable part of
>>>>> +    * I915_MEMORY_CLASS_DEVICE.
>>>>> +    *
>>>>> +    * Note that since the kernel only supports flat-CCS on 
>>>>> objects that can
>>>>> +    * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore
>>>> don't
>>>>> +    * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS
>>>> together with
>>>>> +    * flat-CCS.
>>>>> +    *
>>>>> +    * Without this hint, the kernel will assume that non-mappable
>>>>> +    * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>>> that
>>>> the
>>>>> +    * kernel can still migrate the object to the mappable part, 
>>>>> as a last
>>>>> +    * resort, if userspace ever CPU faults this object, but this 
>>>>> might be
>>>>> +    * expensive, and so ideally should be avoided.
>>>>> +    */
>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>> +   __u32 flags;
>>>>> +   /**
>>>>> +    * @extensions: The chain of extensions to apply to this object.
>>>>> +    *
>>>>> +    * This will be useful in the future when we need to support 
>>>>> several
>>>>> +    * different extensions, and we need to apply more than one when
>>>>> +    * creating the object. See struct i915_user_extension.
>>>>> +    *
>>>>> +    * If we don't supply any extensions then we get the same old
>>>> gem_create
>>>>> +    * behaviour.
>>>>> +    *
>>>>> +    * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>>> +    * struct drm_i915_gem_create_ext_memory_regions.
>>>>> +    *
>>>>> +    * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>> +    * struct drm_i915_gem_create_ext_protected_content.
>>>>> +    */
>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0 #define
>>>>> +I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>> +   __u64 extensions;
>>>>> +};
>>>>> +
>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_query_vma_info
>>>>> + *
>>>>> + * Given a vm and GTT address, lookup the corresponding vma,
>>>>> +returning its set
>>>>> + * of attributes.
>>>>> + *
>>>>> + * .. code-block:: C
>>>>> + *
>>>>> + * struct drm_i915_query_vma_info info = {};
>>>>> + * struct drm_i915_query_item item = {
>>>>> + *         .data_ptr = (uintptr_t)&info,
>>>>> + *         .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>> + * };
>>>>> + * struct drm_i915_query query = {
>>>>> + *         .num_items = 1,
>>>>> + *         .items_ptr = (uintptr_t)&item,
>>>>> + * };
>>>>> + * int err;
>>>>> + *
>>>>> + * // Unlike some other types of queries, there is no need to 
>>>>> first query
>>>>> + * // the size of the data_ptr blob here, since we already know 
>>>>> ahead of
>>>>> + * // time how big this needs to be.
>>>>> + * item.length = sizeof(info);
>>>>> + *
>>>>> + * // Next we fill in the vm_id and ppGTT address of the vma we wish
>>>>> + * // to query, before then firing off the query.
>>>>> + * info.vm_id = vm_id;
>>>>> + * info.offset = gtt_address;
>>>>> + * err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>> + * if (err || item.length < 0) ...
>>>>> + *
>>>>> + * // If all went well we can now inspect the returned attributes.
>>>>> + * if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>>> + */
>>>>> +struct __drm_i915_query_vma_info {
>>>>> +   /**
>>>>> +    * @vm_id: The given vm id that contains the vma. The id is 
>>>>> the value
>>>>> +    * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>> +    * drm_i915_gem_vm_control.vm_id.
>>>>> +    */
>>>>> +   __u32 vm_id;
>>>>> +   /** @pad: MBZ. */
>>>>> +   __u32 pad;
>>>>> +   /**
>>>>> +    * @offset: The corresponding ppGTT address of the vma which the
>>>> kernel
>>>>> +    * will use to perform the lookup.
>>>>> +    */
>>>>> +   __u64 offset;
>>>>> +   /**
>>>>> +    * @attributes: The returned attributes for the given vma.
>>>>> +    *
>>>>> +    * Possible values:
>>>>> +    *
>>>>> +    * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages
>>>> backing the
>>>>> +    * vma are currently CPU accessible. If this is not set then 
>>>>> the vma is
>>>>> +    * currently backed by I915_MEMORY_CLASS_DEVICE memory, which
>>>> the CPU
>>>>> +    * cannot directly access(this is only possible on discrete 
>>>>> devices with
>>>>> +    * a small BAR). Attempting to MMAP and fault such an object will
>>>>> +    * require the kernel first synchronising any GPU work tied to 
>>>>> the
>>>>> +    * object, before then migrating the pages, either to the CPU 
>>>>> accessible
>>>>> +    * part of I915_MEMORY_CLASS_DEVICE, or
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>> +    * placements permit it. See
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>> +    *
>>>>> +    * Note that this is inherently racy.
>>>>> +    */
>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>> +   __u64 attributes;
>>>>> +   /** @rsvd: MBZ */
>>>>> +   __u32 rsvd[4];
>>>>> +};
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> new file mode 100644
>>>>> index 000000000000..be3d9bcdd86d
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> @@ -0,0 +1,58 @@
>>>>> +==========================
>>>>> +I915 Small BAR RFC Section
>>>>> +==========================
>>>>> +Starting from DG2 we will have resizable BAR support for device
>>>>> +local-memory(i.e I915_MEMORY_CLASS_DEVICE), but in some cases the
>>>>> +final BAR size might still be smaller than the total probed_size. In
>>>>> +such cases, only some subset of I915_MEMORY_CLASS_DEVICE will be CPU
>>>>> +accessible(for example the first 256M), while the remainder is 
>>>>> only accessible
>>>> via the GPU.
>>>>> +
>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>> +----------------------------------------------
>>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>>> CPU access.
>>>>> +This becomes important when placing an object in
>>>>> +I915_MEMORY_CLASS_DEVICE, where underneath the device has a small
>>>>> +BAR, meaning only some portion of it is CPU accessible. Without this
>>>>> +flag the kernel will assume that CPU access is not required, and
>>>>> +prioritize using the non-CPU visible portion of
>>>> I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>> +
>>>>> +probed_cpu_visible_size attribute
>>>>> +---------------------------------
>>>>> +New struct__drm_i915_memory_region attribute which returns the total
>>>>> +size of the CPU accessible portion, for the particular region. This
>>>>> +should only be applicable for I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap
>>>>> +with the VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the
>>>>> +CPU visible portion, where the total size of the heap needs to be 
>>>>> known.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_memory_region_info
>>>>> +
>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>> +-----------------------------
>>>>> +Query the attributes of some vma. Given a vm and GTT offset, find 
>>>>> the
>>>>> +respective vma, and return its set of attributes. For now we only
>>>>> +support DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the
>>>>> +object/vma is currently placed in memory that is accessible by the
>>>>> +CPU. This should always be set on devices where the CPU
>>>>> +probed_cpu_visible_size of I915_MEMORY_CLASS_DEVICE matches the
>>>>> +probed_size. If this is not set then CPU faulting the object will 
>>>>> likely first
>>>> require migrating the pages.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_query_vma_info
>>>>> +
>>>>> +Error Capture restrictions
>>>>> +--------------------------
>>>>> +With error capture we have two new restrictions:
>>>>> +
>>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>>> pages are not
>>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>>> free to skip
>>>>> +    trying to capture them.
>>>>> +
>>>>> +    2) On discrete we now reject error capture on recoverable 
>>>>> contexts. In the
>>>>> +    future the kernel may want to blit during error capture, when 
>>>>> for example
>>>>> +    something is not currently CPU accessible.
>>>>> diff --git a/Documentation/gpu/rfc/index.rst
>>>>> b/Documentation/gpu/rfc/index.rst index 91e93a705230..5a3bd3924ba6
>>>>> 100644
>>>>> --- a/Documentation/gpu/rfc/index.rst
>>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>>    .. toctree::
>>>>>
>>>>>        i915_scheduler.rst
>>>>> +
>>>>> +.. toctree::
>>>>> +
>>>>> +    i915_small_bar.rst
>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-02  8:53     ` [Intel-gfx] " Lionel Landwerlin
@ 2022-05-03 10:22       ` Matthew Auld
  -1 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-05-03 10:22 UTC (permalink / raw)
  To: Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, Tvrtko Ursulin, Jordan Justen, dri-devel,
	Kenneth Graunke, Jon Bloomfield, Daniel Vetter, mesa-dev,
	Akeem G Abodunrin

On 02/05/2022 09:53, Lionel Landwerlin wrote:
> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>> On 20/04/2022 20:13, Matthew Auld wrote:
>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>
>>> v2:
>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>    - Rework error capture interactions, including no longer needing
>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>    - Add probed_cpu_visible_size. (Lionel)
>>>
>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>> Cc: mesa-dev@lists.freedesktop.org
>>> ---
>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>   3 files changed, 252 insertions(+)
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>> new file mode 100644
>>> index 000000000000..7bfd0cf44d35
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>> @@ -0,0 +1,190 @@
>>> +/**
>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>> known to the
>>> + * driver.
>>> + *
>>> + * Note this is using both struct drm_i915_query_item and struct 
>>> drm_i915_query.
>>> + * For this new query we are adding the new query id 
>>> DRM_I915_QUERY_MEMORY_REGIONS
>>> + * at &drm_i915_query_item.query_id.
>>> + */
>>> +struct __drm_i915_memory_region_info {
>>> +    /** @region: The class:instance pair encoding */
>>> +    struct drm_i915_gem_memory_class_instance region;
>>> +
>>> +    /** @rsvd0: MBZ */
>>> +    __u32 rsvd0;
>>> +
>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>> +    __u64 probed_size;
>>> +
>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>> unknown) */
>>> +    __u64 unallocated_size;
>>> +
>>> +    union {
>>> +        /** @rsvd1: MBZ */
>>> +        __u64 rsvd1[8];
>>> +        struct {
>>> +            /**
>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>> +             * that is CPU accessible. (-1 = unknown).
>>> +             *
>>> +             * This will be always be <= @probed_size, and the
>>> +             * remainder(if there is any) will not be CPU
>>> +             * accessible.
>>> +             */
>>> +            __u64 probed_cpu_visible_size;
>>> +        };
>>
>>
>> Trying to implement userspace support in Vulkan for this, I have an 
>> additional question about the value of probed_cpu_visible_size.
>>
>> When is it set to -1?
>>
>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>
>> After after it should either be the entire lmem or something smaller.
>>
>>
>> -Lionel
> 
> 
> Other pain point of this new uAPI, previously we could query the 
> unallocated size for each heap.

unallocated_size should always give the same value as probed_size. We 
have the avail tracking, but we don't currently expose that through 
unallocated_size, due to lack of real userspace/user etc.

> 
> Now lmem is effectively divided into 2 heaps, but unallocated_size is 
> tracking allocation from both parts of lmem.

Yeah, if we ever properly expose the unallocated_size, then we could 
also just add unallocated_cpu_visible_size.

> 
> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?

I don't think it's out of the question...

I guess user-space should be able to get the current flag behaviour just 
by specifying: device, system. And it does give more flexibly to allow 
something like: device, device-nm, smem.

We can also drop the probed_cpu_visible_size, which would now just be 
the probed_size with device/device-nm. And if we lack device-nm, then 
the entire thing must be CPU mappable.

One of the downsides though, is that we can no longer easily mix object 
pages from both device + device-nm, which we could previously do when we 
didn't specify the flag. At least according to the current 
design/behaviour for @regions that would not be allowed. I guess some 
kind of new flag like ALLOC_MIXED or so? Although currently that is only 
possible with device + device-nm in ttm/i915.

> 
> 
> -Lionel
> 
> 
>>
>>
>>> +    };
>>> +};
>>> +
>>> +/**
>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>>> with added
>>> + * extension support using struct i915_user_extension.
>>> + *
>>> + * Note that new buffer flags should be added here, at least for the 
>>> stuff that
>>> + * is immutable. Previously we would have two ioctls, one to create 
>>> the object
>>> + * with gem_create, and another to apply various parameters, however 
>>> this
>>> + * creates some ambiguity for the params which are considered 
>>> immutable. Also in
>>> + * general we're phasing out the various SET/GET ioctls.
>>> + */
>>> +struct __drm_i915_gem_create_ext {
>>> +    /**
>>> +     * @size: Requested size for the object.
>>> +     *
>>> +     * The (page-aligned) allocated size for the object will be 
>>> returned.
>>> +     *
>>> +     * Note that for some devices we have might have further minimum
>>> +     * page-size restrictions(larger than 4K), like for device 
>>> local-memory.
>>> +     * However in general the final size here should always reflect any
>>> +     * rounding up, if for example using the 
>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>> +     * extension to place the object in device local-memory.
>>> +     */
>>> +    __u64 size;
>>> +    /**
>>> +     * @handle: Returned handle for the object.
>>> +     *
>>> +     * Object handles are nonzero.
>>> +     */
>>> +    __u32 handle;
>>> +    /**
>>> +     * @flags: Optional flags.
>>> +     *
>>> +     * Supported values:
>>> +     *
>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>> kernel that
>>> +     * the object will need to be accessed via the CPU.
>>> +     *
>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>>> +     * only strictly required on platforms where only some of the 
>>> device
>>> +     * memory is directly visible or mappable through the CPU, like 
>>> on DG2+.
>>> +     *
>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>> +     * ensure we can always spill the allocation to system memory, 
>>> if we
>>> +     * can't place the object in the mappable part of
>>> +     * I915_MEMORY_CLASS_DEVICE.
>>> +     *
>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>> that can
>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>> +     * flat-CCS.
>>> +     *
>>> +     * Without this hint, the kernel will assume that non-mappable
>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>> that the
>>> +     * kernel can still migrate the object to the mappable part, as 
>>> a last
>>> +     * resort, if userspace ever CPU faults this object, but this 
>>> might be
>>> +     * expensive, and so ideally should be avoided.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>> +    __u32 flags;
>>> +    /**
>>> +     * @extensions: The chain of extensions to apply to this object.
>>> +     *
>>> +     * This will be useful in the future when we need to support 
>>> several
>>> +     * different extensions, and we need to apply more than one when
>>> +     * creating the object. See struct i915_user_extension.
>>> +     *
>>> +     * If we don't supply any extensions then we get the same old 
>>> gem_create
>>> +     * behaviour.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>> +    __u64 extensions;
>>> +};
>>> +
>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>> +
>>> +/**
>>> + * struct __drm_i915_query_vma_info
>>> + *
>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>> returning its set
>>> + * of attributes.
>>> + *
>>> + * .. code-block:: C
>>> + *
>>> + *    struct drm_i915_query_vma_info info = {};
>>> + *    struct drm_i915_query_item item = {
>>> + *        .data_ptr = (uintptr_t)&info,
>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>> + *    };
>>> + *    struct drm_i915_query query = {
>>> + *        .num_items = 1,
>>> + *        .items_ptr = (uintptr_t)&item,
>>> + *    };
>>> + *    int err;
>>> + *
>>> + *    // Unlike some other types of queries, there is no need to 
>>> first query
>>> + *    // the size of the data_ptr blob here, since we already know 
>>> ahead of
>>> + *    // time how big this needs to be.
>>> + *    item.length = sizeof(info);
>>> + *
>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>>> + *    // to query, before then firing off the query.
>>> + *    info.vm_id = vm_id;
>>> + *    info.offset = gtt_address;
>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>> + *    if (err || item.length < 0) ...
>>> + *
>>> + *    // If all went well we can now inspect the returned attributes.
>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>> + */
>>> +struct __drm_i915_query_vma_info {
>>> +    /**
>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>> value
>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>> +     * drm_i915_gem_vm_control.vm_id.
>>> +     */
>>> +    __u32 vm_id;
>>> +    /** @pad: MBZ. */
>>> +    __u32 pad;
>>> +    /**
>>> +     * @offset: The corresponding ppGTT address of the vma which the 
>>> kernel
>>> +     * will use to perform the lookup.
>>> +     */
>>> +    __u64 offset;
>>> +    /**
>>> +     * @attributes: The returned attributes for the given vma.
>>> +     *
>>> +     * Possible values:
>>> +     *
>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>> backing the
>>> +     * vma are currently CPU accessible. If this is not set then the 
>>> vma is
>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>> the CPU
>>> +     * cannot directly access(this is only possible on discrete 
>>> devices with
>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>> +     * require the kernel first synchronising any GPU work tied to the
>>> +     * object, before then migrating the pages, either to the CPU 
>>> accessible
>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>> I915_MEMORY_CLASS_SYSTEM, if the
>>> +     * placements permit it. See 
>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>> +     *
>>> +     * Note that this is inherently racy.
>>> +     */
>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>> +    __u64 attributes;
>>> +    /** @rsvd: MBZ */
>>> +    __u32 rsvd[4];
>>> +};
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>> new file mode 100644
>>> index 000000000000..be3d9bcdd86d
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>> @@ -0,0 +1,58 @@
>>> +==========================
>>> +I915 Small BAR RFC Section
>>> +==========================
>>> +Starting from DG2 we will have resizable BAR support for device 
>>> local-memory(i.e
>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>> might still be
>>> +smaller than the total probed_size. In such cases, only some subset of
>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>> first 256M),
>>> +while the remainder is only accessible via the GPU.
>>> +
>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>> +----------------------------------------------
>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>> CPU access.
>>> +This becomes important when placing an object in 
>>> I915_MEMORY_CLASS_DEVICE, where
>>> +underneath the device has a small BAR, meaning only some portion of 
>>> it is CPU
>>> +accessible. Without this flag the kernel will assume that CPU access 
>>> is not
>>> +required, and prioritize using the non-CPU visible portion of
>>> +I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_gem_create_ext
>>> +
>>> +probed_cpu_visible_size attribute
>>> +---------------------------------
>>> +New struct__drm_i915_memory_region attribute which returns the total 
>>> size of the
>>> +CPU accessible portion, for the particular region. This should only be
>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>> with the
>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>> visible portion,
>>> +where the total size of the heap needs to be known.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_memory_region_info
>>> +
>>> +DRM_I915_QUERY_VMA_INFO query
>>> +-----------------------------
>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>> +respective vma, and return its set of attributes. For now we only 
>>> support
>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>> +currently placed in memory that is accessible by the CPU. This 
>>> should always be
>>> +set on devices where the CPU probed_cpu_visible_size of 
>>> I915_MEMORY_CLASS_DEVICE
>>> +matches the probed_size. If this is not set then CPU faulting the 
>>> object will
>>> +likely first require migrating the pages.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_query_vma_info
>>> +
>>> +Error Capture restrictions
>>> +--------------------------
>>> +With error capture we have two new restrictions:
>>> +
>>> +    1) Error capture is best effort on small BAR systems; if the 
>>> pages are not
>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>> to skip
>>> +    trying to capture them.
>>> +
>>> +    2) On discrete we now reject error capture on recoverable 
>>> contexts. In the
>>> +    future the kernel may want to blit during error capture, when 
>>> for example
>>> +    something is not currently CPU accessible.
>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>> b/Documentation/gpu/rfc/index.rst
>>> index 91e93a705230..5a3bd3924ba6 100644
>>> --- a/Documentation/gpu/rfc/index.rst
>>> +++ b/Documentation/gpu/rfc/index.rst
>>> @@ -23,3 +23,7 @@ host such documentation:
>>>   .. toctree::
>>>         i915_scheduler.rst
>>> +
>>> +.. toctree::
>>> +
>>> +    i915_small_bar.rst
>>
>>
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-03 10:22       ` Matthew Auld
  0 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-05-03 10:22 UTC (permalink / raw)
  To: Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 02/05/2022 09:53, Lionel Landwerlin wrote:
> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>> On 20/04/2022 20:13, Matthew Auld wrote:
>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>
>>> v2:
>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>    - Rework error capture interactions, including no longer needing
>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>    - Add probed_cpu_visible_size. (Lionel)
>>>
>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>> Cc: mesa-dev@lists.freedesktop.org
>>> ---
>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 +++++++++++++++++++++++
>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>   3 files changed, 252 insertions(+)
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>> new file mode 100644
>>> index 000000000000..7bfd0cf44d35
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>> @@ -0,0 +1,190 @@
>>> +/**
>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>> known to the
>>> + * driver.
>>> + *
>>> + * Note this is using both struct drm_i915_query_item and struct 
>>> drm_i915_query.
>>> + * For this new query we are adding the new query id 
>>> DRM_I915_QUERY_MEMORY_REGIONS
>>> + * at &drm_i915_query_item.query_id.
>>> + */
>>> +struct __drm_i915_memory_region_info {
>>> +    /** @region: The class:instance pair encoding */
>>> +    struct drm_i915_gem_memory_class_instance region;
>>> +
>>> +    /** @rsvd0: MBZ */
>>> +    __u32 rsvd0;
>>> +
>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>> +    __u64 probed_size;
>>> +
>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>> unknown) */
>>> +    __u64 unallocated_size;
>>> +
>>> +    union {
>>> +        /** @rsvd1: MBZ */
>>> +        __u64 rsvd1[8];
>>> +        struct {
>>> +            /**
>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>> +             * that is CPU accessible. (-1 = unknown).
>>> +             *
>>> +             * This will be always be <= @probed_size, and the
>>> +             * remainder(if there is any) will not be CPU
>>> +             * accessible.
>>> +             */
>>> +            __u64 probed_cpu_visible_size;
>>> +        };
>>
>>
>> Trying to implement userspace support in Vulkan for this, I have an 
>> additional question about the value of probed_cpu_visible_size.
>>
>> When is it set to -1?
>>
>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>
>> After after it should either be the entire lmem or something smaller.
>>
>>
>> -Lionel
> 
> 
> Other pain point of this new uAPI, previously we could query the 
> unallocated size for each heap.

unallocated_size should always give the same value as probed_size. We 
have the avail tracking, but we don't currently expose that through 
unallocated_size, due to lack of real userspace/user etc.

> 
> Now lmem is effectively divided into 2 heaps, but unallocated_size is 
> tracking allocation from both parts of lmem.

Yeah, if we ever properly expose the unallocated_size, then we could 
also just add unallocated_cpu_visible_size.

> 
> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?

I don't think it's out of the question...

I guess user-space should be able to get the current flag behaviour just 
by specifying: device, system. And it does give more flexibly to allow 
something like: device, device-nm, smem.

We can also drop the probed_cpu_visible_size, which would now just be 
the probed_size with device/device-nm. And if we lack device-nm, then 
the entire thing must be CPU mappable.

One of the downsides though, is that we can no longer easily mix object 
pages from both device + device-nm, which we could previously do when we 
didn't specify the flag. At least according to the current 
design/behaviour for @regions that would not be allowed. I guess some 
kind of new flag like ALLOC_MIXED or so? Although currently that is only 
possible with device + device-nm in ttm/i915.

> 
> 
> -Lionel
> 
> 
>>
>>
>>> +    };
>>> +};
>>> +
>>> +/**
>>> + * struct __drm_i915_gem_create_ext - Existing gem_create behaviour, 
>>> with added
>>> + * extension support using struct i915_user_extension.
>>> + *
>>> + * Note that new buffer flags should be added here, at least for the 
>>> stuff that
>>> + * is immutable. Previously we would have two ioctls, one to create 
>>> the object
>>> + * with gem_create, and another to apply various parameters, however 
>>> this
>>> + * creates some ambiguity for the params which are considered 
>>> immutable. Also in
>>> + * general we're phasing out the various SET/GET ioctls.
>>> + */
>>> +struct __drm_i915_gem_create_ext {
>>> +    /**
>>> +     * @size: Requested size for the object.
>>> +     *
>>> +     * The (page-aligned) allocated size for the object will be 
>>> returned.
>>> +     *
>>> +     * Note that for some devices we have might have further minimum
>>> +     * page-size restrictions(larger than 4K), like for device 
>>> local-memory.
>>> +     * However in general the final size here should always reflect any
>>> +     * rounding up, if for example using the 
>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>> +     * extension to place the object in device local-memory.
>>> +     */
>>> +    __u64 size;
>>> +    /**
>>> +     * @handle: Returned handle for the object.
>>> +     *
>>> +     * Object handles are nonzero.
>>> +     */
>>> +    __u32 handle;
>>> +    /**
>>> +     * @flags: Optional flags.
>>> +     *
>>> +     * Supported values:
>>> +     *
>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>> kernel that
>>> +     * the object will need to be accessed via the CPU.
>>> +     *
>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, and
>>> +     * only strictly required on platforms where only some of the 
>>> device
>>> +     * memory is directly visible or mappable through the CPU, like 
>>> on DG2+.
>>> +     *
>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>> +     * ensure we can always spill the allocation to system memory, 
>>> if we
>>> +     * can't place the object in the mappable part of
>>> +     * I915_MEMORY_CLASS_DEVICE.
>>> +     *
>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>> that can
>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore don't
>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>> +     * flat-CCS.
>>> +     *
>>> +     * Without this hint, the kernel will assume that non-mappable
>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>> that the
>>> +     * kernel can still migrate the object to the mappable part, as 
>>> a last
>>> +     * resort, if userspace ever CPU faults this object, but this 
>>> might be
>>> +     * expensive, and so ideally should be avoided.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>> +    __u32 flags;
>>> +    /**
>>> +     * @extensions: The chain of extensions to apply to this object.
>>> +     *
>>> +     * This will be useful in the future when we need to support 
>>> several
>>> +     * different extensions, and we need to apply more than one when
>>> +     * creating the object. See struct i915_user_extension.
>>> +     *
>>> +     * If we don't supply any extensions then we get the same old 
>>> gem_create
>>> +     * behaviour.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>> +     *
>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>> +     */
>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>> +    __u64 extensions;
>>> +};
>>> +
>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>> +
>>> +/**
>>> + * struct __drm_i915_query_vma_info
>>> + *
>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>> returning its set
>>> + * of attributes.
>>> + *
>>> + * .. code-block:: C
>>> + *
>>> + *    struct drm_i915_query_vma_info info = {};
>>> + *    struct drm_i915_query_item item = {
>>> + *        .data_ptr = (uintptr_t)&info,
>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>> + *    };
>>> + *    struct drm_i915_query query = {
>>> + *        .num_items = 1,
>>> + *        .items_ptr = (uintptr_t)&item,
>>> + *    };
>>> + *    int err;
>>> + *
>>> + *    // Unlike some other types of queries, there is no need to 
>>> first query
>>> + *    // the size of the data_ptr blob here, since we already know 
>>> ahead of
>>> + *    // time how big this needs to be.
>>> + *    item.length = sizeof(info);
>>> + *
>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we wish
>>> + *    // to query, before then firing off the query.
>>> + *    info.vm_id = vm_id;
>>> + *    info.offset = gtt_address;
>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>> + *    if (err || item.length < 0) ...
>>> + *
>>> + *    // If all went well we can now inspect the returned attributes.
>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>> + */
>>> +struct __drm_i915_query_vma_info {
>>> +    /**
>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>> value
>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>> +     * drm_i915_gem_vm_control.vm_id.
>>> +     */
>>> +    __u32 vm_id;
>>> +    /** @pad: MBZ. */
>>> +    __u32 pad;
>>> +    /**
>>> +     * @offset: The corresponding ppGTT address of the vma which the 
>>> kernel
>>> +     * will use to perform the lookup.
>>> +     */
>>> +    __u64 offset;
>>> +    /**
>>> +     * @attributes: The returned attributes for the given vma.
>>> +     *
>>> +     * Possible values:
>>> +     *
>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>> backing the
>>> +     * vma are currently CPU accessible. If this is not set then the 
>>> vma is
>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>> the CPU
>>> +     * cannot directly access(this is only possible on discrete 
>>> devices with
>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>> +     * require the kernel first synchronising any GPU work tied to the
>>> +     * object, before then migrating the pages, either to the CPU 
>>> accessible
>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>> I915_MEMORY_CLASS_SYSTEM, if the
>>> +     * placements permit it. See 
>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>> +     *
>>> +     * Note that this is inherently racy.
>>> +     */
>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>> +    __u64 attributes;
>>> +    /** @rsvd: MBZ */
>>> +    __u32 rsvd[4];
>>> +};
>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>> new file mode 100644
>>> index 000000000000..be3d9bcdd86d
>>> --- /dev/null
>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>> @@ -0,0 +1,58 @@
>>> +==========================
>>> +I915 Small BAR RFC Section
>>> +==========================
>>> +Starting from DG2 we will have resizable BAR support for device 
>>> local-memory(i.e
>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>> might still be
>>> +smaller than the total probed_size. In such cases, only some subset of
>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>> first 256M),
>>> +while the remainder is only accessible via the GPU.
>>> +
>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>> +----------------------------------------------
>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>> CPU access.
>>> +This becomes important when placing an object in 
>>> I915_MEMORY_CLASS_DEVICE, where
>>> +underneath the device has a small BAR, meaning only some portion of 
>>> it is CPU
>>> +accessible. Without this flag the kernel will assume that CPU access 
>>> is not
>>> +required, and prioritize using the non-CPU visible portion of
>>> +I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_gem_create_ext
>>> +
>>> +probed_cpu_visible_size attribute
>>> +---------------------------------
>>> +New struct__drm_i915_memory_region attribute which returns the total 
>>> size of the
>>> +CPU accessible portion, for the particular region. This should only be
>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>> +
>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>> with the
>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>> visible portion,
>>> +where the total size of the heap needs to be known.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_memory_region_info
>>> +
>>> +DRM_I915_QUERY_VMA_INFO query
>>> +-----------------------------
>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>> +respective vma, and return its set of attributes. For now we only 
>>> support
>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>> +currently placed in memory that is accessible by the CPU. This 
>>> should always be
>>> +set on devices where the CPU probed_cpu_visible_size of 
>>> I915_MEMORY_CLASS_DEVICE
>>> +matches the probed_size. If this is not set then CPU faulting the 
>>> object will
>>> +likely first require migrating the pages.
>>> +
>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>> +   :functions: __drm_i915_query_vma_info
>>> +
>>> +Error Capture restrictions
>>> +--------------------------
>>> +With error capture we have two new restrictions:
>>> +
>>> +    1) Error capture is best effort on small BAR systems; if the 
>>> pages are not
>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>> to skip
>>> +    trying to capture them.
>>> +
>>> +    2) On discrete we now reject error capture on recoverable 
>>> contexts. In the
>>> +    future the kernel may want to blit during error capture, when 
>>> for example
>>> +    something is not currently CPU accessible.
>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>> b/Documentation/gpu/rfc/index.rst
>>> index 91e93a705230..5a3bd3924ba6 100644
>>> --- a/Documentation/gpu/rfc/index.rst
>>> +++ b/Documentation/gpu/rfc/index.rst
>>> @@ -23,3 +23,7 @@ host such documentation:
>>>   .. toctree::
>>>         i915_scheduler.rst
>>> +
>>> +.. toctree::
>>> +
>>> +    i915_small_bar.rst
>>
>>
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-03 10:22       ` [Intel-gfx] " Matthew Auld
@ 2022-05-03 10:39         ` Lionel Landwerlin
  -1 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-03 10:39 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Tvrtko Ursulin, Jordan Justen, dri-devel,
	Kenneth Graunke, Jon Bloomfield, Daniel Vetter, mesa-dev,
	Akeem G Abodunrin

On 03/05/2022 13:22, Matthew Auld wrote:
> On 02/05/2022 09:53, Lionel Landwerlin wrote:
>> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>    - Rework error capture interactions, including no longer needing
>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>> +++++++++++++++++++++++
>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>   3 files changed, 252 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>> known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id 
>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +    /** @region: The class:instance pair encoding */
>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +    /** @rsvd0: MBZ */
>>>> +    __u32 rsvd0;
>>>> +
>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +    __u64 probed_size;
>>>> +
>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>> unknown) */
>>>> +    __u64 unallocated_size;
>>>> +
>>>> +    union {
>>>> +        /** @rsvd1: MBZ */
>>>> +        __u64 rsvd1[8];
>>>> +        struct {
>>>> +            /**
>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>> +             * that is CPU accessible. (-1 = unknown).
>>>> +             *
>>>> +             * This will be always be <= @probed_size, and the
>>>> +             * remainder(if there is any) will not be CPU
>>>> +             * accessible.
>>>> +             */
>>>> +            __u64 probed_cpu_visible_size;
>>>> +        };
>>>
>>>
>>> Trying to implement userspace support in Vulkan for this, I have an 
>>> additional question about the value of probed_cpu_visible_size.
>>>
>>> When is it set to -1?
>>>
>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>
>>> After after it should either be the entire lmem or something smaller.
>>>
>>>
>>> -Lionel
>>
>>
>> Other pain point of this new uAPI, previously we could query the 
>> unallocated size for each heap.
>
> unallocated_size should always give the same value as probed_size. We 
> have the avail tracking, but we don't currently expose that through 
> unallocated_size, due to lack of real userspace/user etc.
>
>>
>> Now lmem is effectively divided into 2 heaps, but unallocated_size is 
>> tracking allocation from both parts of lmem.
>
> Yeah, if we ever properly expose the unallocated_size, then we could 
> also just add unallocated_cpu_visible_size.
>
>>
>> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?
>
> I don't think it's out of the question...
>
> I guess user-space should be able to get the current flag behaviour 
> just by specifying: device, system. And it does give more flexibly to 
> allow something like: device, device-nm, smem.
>
> We can also drop the probed_cpu_visible_size, which would now just be 
> the probed_size with device/device-nm. And if we lack device-nm, then 
> the entire thing must be CPU mappable.
>
> One of the downsides though, is that we can no longer easily mix 
> object pages from both device + device-nm, which we could previously 
> do when we didn't specify the flag. At least according to the current 
> design/behaviour for @regions that would not be allowed. I guess some 
> kind of new flag like ALLOC_MIXED or so? Although currently that is 
> only possible with device + device-nm in ttm/i915.


Thanks, I wasn't aware of the restrictions.

Adding unallocated_cpu_visible_size would be great.


-Lionel


>
>>
>>
>> -Lionel
>>
>>
>>>
>>>
>>>> +    };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>> behaviour, with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for 
>>>> the stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>> create the object
>>>> + * with gem_create, and another to apply various parameters, 
>>>> however this
>>>> + * creates some ambiguity for the params which are considered 
>>>> immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +    /**
>>>> +     * @size: Requested size for the object.
>>>> +     *
>>>> +     * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +     *
>>>> +     * Note that for some devices we have might have further minimum
>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +     * However in general the final size here should always 
>>>> reflect any
>>>> +     * rounding up, if for example using the 
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +     * extension to place the object in device local-memory.
>>>> +     */
>>>> +    __u64 size;
>>>> +    /**
>>>> +     * @handle: Returned handle for the object.
>>>> +     *
>>>> +     * Object handles are nonzero.
>>>> +     */
>>>> +    __u32 handle;
>>>> +    /**
>>>> +     * @flags: Optional flags.
>>>> +     *
>>>> +     * Supported values:
>>>> +     *
>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>> kernel that
>>>> +     * the object will need to be accessed via the CPU.
>>>> +     *
>>>> +     * Only valid when placing objects in 
>>>> I915_MEMORY_CLASS_DEVICE, and
>>>> +     * only strictly required on platforms where only some of the 
>>>> device
>>>> +     * memory is directly visible or mappable through the CPU, 
>>>> like on DG2+.
>>>> +     *
>>>> +     * One of the placements MUST also be 
>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>> +     * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +     * can't place the object in the mappable part of
>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>> +     *
>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>> objects that can
>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>> don't
>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>> with
>>>> +     * flat-CCS.
>>>> +     *
>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>> that the
>>>> +     * kernel can still migrate the object to the mappable part, 
>>>> as a last
>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +     * expensive, and so ideally should be avoided.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +    __u32 flags;
>>>> +    /**
>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>> +     *
>>>> +     * This will be useful in the future when we need to support 
>>>> several
>>>> +     * different extensions, and we need to apply more than one when
>>>> +     * creating the object. See struct i915_user_extension.
>>>> +     *
>>>> +     * If we don't supply any extensions then we get the same old 
>>>> gem_create
>>>> +     * behaviour.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>> returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + *    struct drm_i915_query_vma_info info = {};
>>>> + *    struct drm_i915_query_item item = {
>>>> + *        .data_ptr = (uintptr_t)&info,
>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + *    };
>>>> + *    struct drm_i915_query query = {
>>>> + *        .num_items = 1,
>>>> + *        .items_ptr = (uintptr_t)&item,
>>>> + *    };
>>>> + *    int err;
>>>> + *
>>>> + *    // Unlike some other types of queries, there is no need to 
>>>> first query
>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + *    // time how big this needs to be.
>>>> + *    item.length = sizeof(info);
>>>> + *
>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>> wish
>>>> + *    // to query, before then firing off the query.
>>>> + *    info.vm_id = vm_id;
>>>> + *    info.offset = gtt_address;
>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + *    if (err || item.length < 0) ...
>>>> + *
>>>> + *    // If all went well we can now inspect the returned attributes.
>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +    /**
>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>> the value
>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>> +     */
>>>> +    __u32 vm_id;
>>>> +    /** @pad: MBZ. */
>>>> +    __u32 pad;
>>>> +    /**
>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>> the kernel
>>>> +     * will use to perform the lookup.
>>>> +     */
>>>> +    __u64 offset;
>>>> +    /**
>>>> +     * @attributes: The returned attributes for the given vma.
>>>> +     *
>>>> +     * Possible values:
>>>> +     *
>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>> backing the
>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>> the vma is
>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>> the CPU
>>>> +     * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>> +     * require the kernel first synchronising any GPU work tied to 
>>>> the
>>>> +     * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +     * placements permit it. See 
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +     *
>>>> +     * Note that this is inherently racy.
>>>> +     */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +    __u64 attributes;
>>>> +    /** @rsvd: MBZ */
>>>> +    __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>> local-memory(i.e
>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>> might still be
>>>> +smaller than the total probed_size. In such cases, only some 
>>>> subset of
>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>> first 256M),
>>>> +while the remainder is only accessible via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in 
>>>> I915_MEMORY_CLASS_DEVICE, where
>>>> +underneath the device has a small BAR, meaning only some portion 
>>>> of it is CPU
>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>> access is not
>>>> +required, and prioritize using the non-CPU visible portion of
>>>> +I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>> total size of the
>>>> +CPU accessible portion, for the particular region. This should 
>>>> only be
>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>> with the
>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>> visible portion,
>>>> +where the total size of the heap needs to be known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only 
>>>> support
>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>> object/vma is
>>>> +currently placed in memory that is accessible by the CPU. This 
>>>> should always be
>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>> I915_MEMORY_CLASS_DEVICE
>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>> object will
>>>> +likely first require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>> free to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>> b/Documentation/gpu/rfc/index.rst
>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>   .. toctree::
>>>>         i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
>>>
>>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-03 10:39         ` Lionel Landwerlin
  0 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-03 10:39 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 03/05/2022 13:22, Matthew Auld wrote:
> On 02/05/2022 09:53, Lionel Landwerlin wrote:
>> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>    - Rework error capture interactions, including no longer needing
>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>> +++++++++++++++++++++++
>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>   3 files changed, 252 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>> known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id 
>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +    /** @region: The class:instance pair encoding */
>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +    /** @rsvd0: MBZ */
>>>> +    __u32 rsvd0;
>>>> +
>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +    __u64 probed_size;
>>>> +
>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>> unknown) */
>>>> +    __u64 unallocated_size;
>>>> +
>>>> +    union {
>>>> +        /** @rsvd1: MBZ */
>>>> +        __u64 rsvd1[8];
>>>> +        struct {
>>>> +            /**
>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>> +             * that is CPU accessible. (-1 = unknown).
>>>> +             *
>>>> +             * This will be always be <= @probed_size, and the
>>>> +             * remainder(if there is any) will not be CPU
>>>> +             * accessible.
>>>> +             */
>>>> +            __u64 probed_cpu_visible_size;
>>>> +        };
>>>
>>>
>>> Trying to implement userspace support in Vulkan for this, I have an 
>>> additional question about the value of probed_cpu_visible_size.
>>>
>>> When is it set to -1?
>>>
>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>
>>> After after it should either be the entire lmem or something smaller.
>>>
>>>
>>> -Lionel
>>
>>
>> Other pain point of this new uAPI, previously we could query the 
>> unallocated size for each heap.
>
> unallocated_size should always give the same value as probed_size. We 
> have the avail tracking, but we don't currently expose that through 
> unallocated_size, due to lack of real userspace/user etc.
>
>>
>> Now lmem is effectively divided into 2 heaps, but unallocated_size is 
>> tracking allocation from both parts of lmem.
>
> Yeah, if we ever properly expose the unallocated_size, then we could 
> also just add unallocated_cpu_visible_size.
>
>>
>> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?
>
> I don't think it's out of the question...
>
> I guess user-space should be able to get the current flag behaviour 
> just by specifying: device, system. And it does give more flexibly to 
> allow something like: device, device-nm, smem.
>
> We can also drop the probed_cpu_visible_size, which would now just be 
> the probed_size with device/device-nm. And if we lack device-nm, then 
> the entire thing must be CPU mappable.
>
> One of the downsides though, is that we can no longer easily mix 
> object pages from both device + device-nm, which we could previously 
> do when we didn't specify the flag. At least according to the current 
> design/behaviour for @regions that would not be allowed. I guess some 
> kind of new flag like ALLOC_MIXED or so? Although currently that is 
> only possible with device + device-nm in ttm/i915.


Thanks, I wasn't aware of the restrictions.

Adding unallocated_cpu_visible_size would be great.


-Lionel


>
>>
>>
>> -Lionel
>>
>>
>>>
>>>
>>>> +    };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>> behaviour, with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for 
>>>> the stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>> create the object
>>>> + * with gem_create, and another to apply various parameters, 
>>>> however this
>>>> + * creates some ambiguity for the params which are considered 
>>>> immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +    /**
>>>> +     * @size: Requested size for the object.
>>>> +     *
>>>> +     * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +     *
>>>> +     * Note that for some devices we have might have further minimum
>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +     * However in general the final size here should always 
>>>> reflect any
>>>> +     * rounding up, if for example using the 
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +     * extension to place the object in device local-memory.
>>>> +     */
>>>> +    __u64 size;
>>>> +    /**
>>>> +     * @handle: Returned handle for the object.
>>>> +     *
>>>> +     * Object handles are nonzero.
>>>> +     */
>>>> +    __u32 handle;
>>>> +    /**
>>>> +     * @flags: Optional flags.
>>>> +     *
>>>> +     * Supported values:
>>>> +     *
>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>> kernel that
>>>> +     * the object will need to be accessed via the CPU.
>>>> +     *
>>>> +     * Only valid when placing objects in 
>>>> I915_MEMORY_CLASS_DEVICE, and
>>>> +     * only strictly required on platforms where only some of the 
>>>> device
>>>> +     * memory is directly visible or mappable through the CPU, 
>>>> like on DG2+.
>>>> +     *
>>>> +     * One of the placements MUST also be 
>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>> +     * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +     * can't place the object in the mappable part of
>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>> +     *
>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>> objects that can
>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>> don't
>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>> with
>>>> +     * flat-CCS.
>>>> +     *
>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>> that the
>>>> +     * kernel can still migrate the object to the mappable part, 
>>>> as a last
>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +     * expensive, and so ideally should be avoided.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +    __u32 flags;
>>>> +    /**
>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>> +     *
>>>> +     * This will be useful in the future when we need to support 
>>>> several
>>>> +     * different extensions, and we need to apply more than one when
>>>> +     * creating the object. See struct i915_user_extension.
>>>> +     *
>>>> +     * If we don't supply any extensions then we get the same old 
>>>> gem_create
>>>> +     * behaviour.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>> returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + *    struct drm_i915_query_vma_info info = {};
>>>> + *    struct drm_i915_query_item item = {
>>>> + *        .data_ptr = (uintptr_t)&info,
>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + *    };
>>>> + *    struct drm_i915_query query = {
>>>> + *        .num_items = 1,
>>>> + *        .items_ptr = (uintptr_t)&item,
>>>> + *    };
>>>> + *    int err;
>>>> + *
>>>> + *    // Unlike some other types of queries, there is no need to 
>>>> first query
>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + *    // time how big this needs to be.
>>>> + *    item.length = sizeof(info);
>>>> + *
>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>> wish
>>>> + *    // to query, before then firing off the query.
>>>> + *    info.vm_id = vm_id;
>>>> + *    info.offset = gtt_address;
>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + *    if (err || item.length < 0) ...
>>>> + *
>>>> + *    // If all went well we can now inspect the returned attributes.
>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +    /**
>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>> the value
>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>> +     */
>>>> +    __u32 vm_id;
>>>> +    /** @pad: MBZ. */
>>>> +    __u32 pad;
>>>> +    /**
>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>> the kernel
>>>> +     * will use to perform the lookup.
>>>> +     */
>>>> +    __u64 offset;
>>>> +    /**
>>>> +     * @attributes: The returned attributes for the given vma.
>>>> +     *
>>>> +     * Possible values:
>>>> +     *
>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>> backing the
>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>> the vma is
>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>> the CPU
>>>> +     * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>> +     * require the kernel first synchronising any GPU work tied to 
>>>> the
>>>> +     * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +     * placements permit it. See 
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +     *
>>>> +     * Note that this is inherently racy.
>>>> +     */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +    __u64 attributes;
>>>> +    /** @rsvd: MBZ */
>>>> +    __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>> local-memory(i.e
>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>> might still be
>>>> +smaller than the total probed_size. In such cases, only some 
>>>> subset of
>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>> first 256M),
>>>> +while the remainder is only accessible via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in 
>>>> I915_MEMORY_CLASS_DEVICE, where
>>>> +underneath the device has a small BAR, meaning only some portion 
>>>> of it is CPU
>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>> access is not
>>>> +required, and prioritize using the non-CPU visible portion of
>>>> +I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>> total size of the
>>>> +CPU accessible portion, for the particular region. This should 
>>>> only be
>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>> with the
>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>> visible portion,
>>>> +where the total size of the heap needs to be known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only 
>>>> support
>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>> object/vma is
>>>> +currently placed in memory that is accessible by the CPU. This 
>>>> should always be
>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>> I915_MEMORY_CLASS_DEVICE
>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>> object will
>>>> +likely first require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>> free to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>> b/Documentation/gpu/rfc/index.rst
>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>   .. toctree::
>>>>         i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
>>>
>>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-03 10:22       ` [Intel-gfx] " Matthew Auld
@ 2022-05-03 11:06         ` Tvrtko Ursulin
  -1 siblings, 0 replies; 50+ messages in thread
From: Tvrtko Ursulin @ 2022-05-03 11:06 UTC (permalink / raw)
  To: Matthew Auld, Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, Jordan Justen, dri-devel, Kenneth Graunke,
	Jon Bloomfield, Daniel Vetter, mesa-dev, Akeem G Abodunrin


On 03/05/2022 11:22, Matthew Auld wrote:
> On 02/05/2022 09:53, Lionel Landwerlin wrote:
>> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>    - Rework error capture interactions, including no longer needing
>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>> +++++++++++++++++++++++
>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>   3 files changed, 252 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>> known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id 
>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +    /** @region: The class:instance pair encoding */
>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +    /** @rsvd0: MBZ */
>>>> +    __u32 rsvd0;
>>>> +
>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +    __u64 probed_size;
>>>> +
>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>> unknown) */
>>>> +    __u64 unallocated_size;
>>>> +
>>>> +    union {
>>>> +        /** @rsvd1: MBZ */
>>>> +        __u64 rsvd1[8];
>>>> +        struct {
>>>> +            /**
>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>> +             * that is CPU accessible. (-1 = unknown).
>>>> +             *
>>>> +             * This will be always be <= @probed_size, and the
>>>> +             * remainder(if there is any) will not be CPU
>>>> +             * accessible.
>>>> +             */
>>>> +            __u64 probed_cpu_visible_size;
>>>> +        };
>>>
>>>
>>> Trying to implement userspace support in Vulkan for this, I have an 
>>> additional question about the value of probed_cpu_visible_size.
>>>
>>> When is it set to -1?
>>>
>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>
>>> After after it should either be the entire lmem or something smaller.
>>>
>>>
>>> -Lionel
>>
>>
>> Other pain point of this new uAPI, previously we could query the 
>> unallocated size for each heap.
> 
> unallocated_size should always give the same value as probed_size. We 
> have the avail tracking, but we don't currently expose that through 
> unallocated_size, due to lack of real userspace/user etc.
> 
>>
>> Now lmem is effectively divided into 2 heaps, but unallocated_size is 
>> tracking allocation from both parts of lmem.
> 
> Yeah, if we ever properly expose the unallocated_size, then we could 
> also just add unallocated_cpu_visible_size.
> 
>>
>> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?
> 
> I don't think it's out of the question...
> 
> I guess user-space should be able to get the current flag behaviour just 
> by specifying: device, system. And it does give more flexibly to allow 
> something like: device, device-nm, smem.

I was also thinking about that option, albeit both regions under the 
existing class just separate instances with "capability" flags differing.

Downsides I thought were a) it does not really match the underlying 
resource, which is one and not two from the backing storage POV, and b) 
it allows userspace to do potentially do too much of restrictive 
regions=device-mappable,system  (even if only an innocent mistake); 
disallowing i915 to manage the space better in cases where multiple 
clients happen to fight over it.

The last part is going back to what I was commenting earlier, where I 
though migrating objects which had cpu-access set to any device should 
be allowed. (When mappable region is over subscribed.)

Regards,

Tvrtko

> We can also drop the probed_cpu_visible_size, which would now just be 
> the probed_size with device/device-nm. And if we lack device-nm, then 
> the entire thing must be CPU mappable.
> 
> One of the downsides though, is that we can no longer easily mix object 
> pages from both device + device-nm, which we could previously do when we 
> didn't specify the flag. At least according to the current 
> design/behaviour for @regions that would not be allowed. I guess some 
> kind of new flag like ALLOC_MIXED or so? Although currently that is only 
> possible with device + device-nm in ttm/i915.
> 
>>
>>
>> -Lionel
>>
>>
>>>
>>>
>>>> +    };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>> behaviour, with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for 
>>>> the stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to create 
>>>> the object
>>>> + * with gem_create, and another to apply various parameters, 
>>>> however this
>>>> + * creates some ambiguity for the params which are considered 
>>>> immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +    /**
>>>> +     * @size: Requested size for the object.
>>>> +     *
>>>> +     * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +     *
>>>> +     * Note that for some devices we have might have further minimum
>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +     * However in general the final size here should always reflect 
>>>> any
>>>> +     * rounding up, if for example using the 
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +     * extension to place the object in device local-memory.
>>>> +     */
>>>> +    __u64 size;
>>>> +    /**
>>>> +     * @handle: Returned handle for the object.
>>>> +     *
>>>> +     * Object handles are nonzero.
>>>> +     */
>>>> +    __u32 handle;
>>>> +    /**
>>>> +     * @flags: Optional flags.
>>>> +     *
>>>> +     * Supported values:
>>>> +     *
>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>> kernel that
>>>> +     * the object will need to be accessed via the CPU.
>>>> +     *
>>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, 
>>>> and
>>>> +     * only strictly required on platforms where only some of the 
>>>> device
>>>> +     * memory is directly visible or mappable through the CPU, like 
>>>> on DG2+.
>>>> +     *
>>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>>> +     * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +     * can't place the object in the mappable part of
>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>> +     *
>>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>>> that can
>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>> don't
>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>>> +     * flat-CCS.
>>>> +     *
>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>> that the
>>>> +     * kernel can still migrate the object to the mappable part, as 
>>>> a last
>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +     * expensive, and so ideally should be avoided.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +    __u32 flags;
>>>> +    /**
>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>> +     *
>>>> +     * This will be useful in the future when we need to support 
>>>> several
>>>> +     * different extensions, and we need to apply more than one when
>>>> +     * creating the object. See struct i915_user_extension.
>>>> +     *
>>>> +     * If we don't supply any extensions then we get the same old 
>>>> gem_create
>>>> +     * behaviour.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>> returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + *    struct drm_i915_query_vma_info info = {};
>>>> + *    struct drm_i915_query_item item = {
>>>> + *        .data_ptr = (uintptr_t)&info,
>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + *    };
>>>> + *    struct drm_i915_query query = {
>>>> + *        .num_items = 1,
>>>> + *        .items_ptr = (uintptr_t)&item,
>>>> + *    };
>>>> + *    int err;
>>>> + *
>>>> + *    // Unlike some other types of queries, there is no need to 
>>>> first query
>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + *    // time how big this needs to be.
>>>> + *    item.length = sizeof(info);
>>>> + *
>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>> wish
>>>> + *    // to query, before then firing off the query.
>>>> + *    info.vm_id = vm_id;
>>>> + *    info.offset = gtt_address;
>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + *    if (err || item.length < 0) ...
>>>> + *
>>>> + *    // If all went well we can now inspect the returned attributes.
>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +    /**
>>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>>> value
>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>> +     */
>>>> +    __u32 vm_id;
>>>> +    /** @pad: MBZ. */
>>>> +    __u32 pad;
>>>> +    /**
>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>> the kernel
>>>> +     * will use to perform the lookup.
>>>> +     */
>>>> +    __u64 offset;
>>>> +    /**
>>>> +     * @attributes: The returned attributes for the given vma.
>>>> +     *
>>>> +     * Possible values:
>>>> +     *
>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>> backing the
>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>> the vma is
>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>> the CPU
>>>> +     * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>> +     * require the kernel first synchronising any GPU work tied to the
>>>> +     * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +     * placements permit it. See 
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +     *
>>>> +     * Note that this is inherently racy.
>>>> +     */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +    __u64 attributes;
>>>> +    /** @rsvd: MBZ */
>>>> +    __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>> local-memory(i.e
>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>> might still be
>>>> +smaller than the total probed_size. In such cases, only some subset of
>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>> first 256M),
>>>> +while the remainder is only accessible via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in 
>>>> I915_MEMORY_CLASS_DEVICE, where
>>>> +underneath the device has a small BAR, meaning only some portion of 
>>>> it is CPU
>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>> access is not
>>>> +required, and prioritize using the non-CPU visible portion of
>>>> +I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>> total size of the
>>>> +CPU accessible portion, for the particular region. This should only be
>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>> with the
>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>> visible portion,
>>>> +where the total size of the heap needs to be known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only 
>>>> support
>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>>> +currently placed in memory that is accessible by the CPU. This 
>>>> should always be
>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>> I915_MEMORY_CLASS_DEVICE
>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>> object will
>>>> +likely first require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>>> to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>> b/Documentation/gpu/rfc/index.rst
>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>   .. toctree::
>>>>         i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
>>>
>>>
>>

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-03 11:06         ` Tvrtko Ursulin
  0 siblings, 0 replies; 50+ messages in thread
From: Tvrtko Ursulin @ 2022-05-03 11:06 UTC (permalink / raw)
  To: Matthew Auld, Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev


On 03/05/2022 11:22, Matthew Auld wrote:
> On 02/05/2022 09:53, Lionel Landwerlin wrote:
>> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>
>>>> v2:
>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>    - Rework error capture interactions, including no longer needing
>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>
>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>> Cc: mesa-dev@lists.freedesktop.org
>>>> ---
>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>> +++++++++++++++++++++++
>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>   3 files changed, 252 insertions(+)
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>> new file mode 100644
>>>> index 000000000000..7bfd0cf44d35
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>> @@ -0,0 +1,190 @@
>>>> +/**
>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>> known to the
>>>> + * driver.
>>>> + *
>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>> drm_i915_query.
>>>> + * For this new query we are adding the new query id 
>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>> + * at &drm_i915_query_item.query_id.
>>>> + */
>>>> +struct __drm_i915_memory_region_info {
>>>> +    /** @region: The class:instance pair encoding */
>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>> +
>>>> +    /** @rsvd0: MBZ */
>>>> +    __u32 rsvd0;
>>>> +
>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>> +    __u64 probed_size;
>>>> +
>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>> unknown) */
>>>> +    __u64 unallocated_size;
>>>> +
>>>> +    union {
>>>> +        /** @rsvd1: MBZ */
>>>> +        __u64 rsvd1[8];
>>>> +        struct {
>>>> +            /**
>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>> +             * that is CPU accessible. (-1 = unknown).
>>>> +             *
>>>> +             * This will be always be <= @probed_size, and the
>>>> +             * remainder(if there is any) will not be CPU
>>>> +             * accessible.
>>>> +             */
>>>> +            __u64 probed_cpu_visible_size;
>>>> +        };
>>>
>>>
>>> Trying to implement userspace support in Vulkan for this, I have an 
>>> additional question about the value of probed_cpu_visible_size.
>>>
>>> When is it set to -1?
>>>
>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>
>>> After after it should either be the entire lmem or something smaller.
>>>
>>>
>>> -Lionel
>>
>>
>> Other pain point of this new uAPI, previously we could query the 
>> unallocated size for each heap.
> 
> unallocated_size should always give the same value as probed_size. We 
> have the avail tracking, but we don't currently expose that through 
> unallocated_size, due to lack of real userspace/user etc.
> 
>>
>> Now lmem is effectively divided into 2 heaps, but unallocated_size is 
>> tracking allocation from both parts of lmem.
> 
> Yeah, if we ever properly expose the unallocated_size, then we could 
> also just add unallocated_cpu_visible_size.
> 
>>
>> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?
> 
> I don't think it's out of the question...
> 
> I guess user-space should be able to get the current flag behaviour just 
> by specifying: device, system. And it does give more flexibly to allow 
> something like: device, device-nm, smem.

I was also thinking about that option, albeit both regions under the 
existing class just separate instances with "capability" flags differing.

Downsides I thought were a) it does not really match the underlying 
resource, which is one and not two from the backing storage POV, and b) 
it allows userspace to do potentially do too much of restrictive 
regions=device-mappable,system  (even if only an innocent mistake); 
disallowing i915 to manage the space better in cases where multiple 
clients happen to fight over it.

The last part is going back to what I was commenting earlier, where I 
though migrating objects which had cpu-access set to any device should 
be allowed. (When mappable region is over subscribed.)

Regards,

Tvrtko

> We can also drop the probed_cpu_visible_size, which would now just be 
> the probed_size with device/device-nm. And if we lack device-nm, then 
> the entire thing must be CPU mappable.
> 
> One of the downsides though, is that we can no longer easily mix object 
> pages from both device + device-nm, which we could previously do when we 
> didn't specify the flag. At least according to the current 
> design/behaviour for @regions that would not be allowed. I guess some 
> kind of new flag like ALLOC_MIXED or so? Although currently that is only 
> possible with device + device-nm in ttm/i915.
> 
>>
>>
>> -Lionel
>>
>>
>>>
>>>
>>>> +    };
>>>> +};
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>> behaviour, with added
>>>> + * extension support using struct i915_user_extension.
>>>> + *
>>>> + * Note that new buffer flags should be added here, at least for 
>>>> the stuff that
>>>> + * is immutable. Previously we would have two ioctls, one to create 
>>>> the object
>>>> + * with gem_create, and another to apply various parameters, 
>>>> however this
>>>> + * creates some ambiguity for the params which are considered 
>>>> immutable. Also in
>>>> + * general we're phasing out the various SET/GET ioctls.
>>>> + */
>>>> +struct __drm_i915_gem_create_ext {
>>>> +    /**
>>>> +     * @size: Requested size for the object.
>>>> +     *
>>>> +     * The (page-aligned) allocated size for the object will be 
>>>> returned.
>>>> +     *
>>>> +     * Note that for some devices we have might have further minimum
>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>> local-memory.
>>>> +     * However in general the final size here should always reflect 
>>>> any
>>>> +     * rounding up, if for example using the 
>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>> +     * extension to place the object in device local-memory.
>>>> +     */
>>>> +    __u64 size;
>>>> +    /**
>>>> +     * @handle: Returned handle for the object.
>>>> +     *
>>>> +     * Object handles are nonzero.
>>>> +     */
>>>> +    __u32 handle;
>>>> +    /**
>>>> +     * @flags: Optional flags.
>>>> +     *
>>>> +     * Supported values:
>>>> +     *
>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>> kernel that
>>>> +     * the object will need to be accessed via the CPU.
>>>> +     *
>>>> +     * Only valid when placing objects in I915_MEMORY_CLASS_DEVICE, 
>>>> and
>>>> +     * only strictly required on platforms where only some of the 
>>>> device
>>>> +     * memory is directly visible or mappable through the CPU, like 
>>>> on DG2+.
>>>> +     *
>>>> +     * One of the placements MUST also be I915_MEMORY_CLASS_SYSTEM, to
>>>> +     * ensure we can always spill the allocation to system memory, 
>>>> if we
>>>> +     * can't place the object in the mappable part of
>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>> +     *
>>>> +     * Note that since the kernel only supports flat-CCS on objects 
>>>> that can
>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>> don't
>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together with
>>>> +     * flat-CCS.
>>>> +     *
>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>> that the
>>>> +     * kernel can still migrate the object to the mappable part, as 
>>>> a last
>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>> might be
>>>> +     * expensive, and so ideally should be avoided.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>> +    __u32 flags;
>>>> +    /**
>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>> +     *
>>>> +     * This will be useful in the future when we need to support 
>>>> several
>>>> +     * different extensions, and we need to apply more than one when
>>>> +     * creating the object. See struct i915_user_extension.
>>>> +     *
>>>> +     * If we don't supply any extensions then we get the same old 
>>>> gem_create
>>>> +     * behaviour.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>> +     *
>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>> +     */
>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>> +    __u64 extensions;
>>>> +};
>>>> +
>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>> +
>>>> +/**
>>>> + * struct __drm_i915_query_vma_info
>>>> + *
>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>> returning its set
>>>> + * of attributes.
>>>> + *
>>>> + * .. code-block:: C
>>>> + *
>>>> + *    struct drm_i915_query_vma_info info = {};
>>>> + *    struct drm_i915_query_item item = {
>>>> + *        .data_ptr = (uintptr_t)&info,
>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>> + *    };
>>>> + *    struct drm_i915_query query = {
>>>> + *        .num_items = 1,
>>>> + *        .items_ptr = (uintptr_t)&item,
>>>> + *    };
>>>> + *    int err;
>>>> + *
>>>> + *    // Unlike some other types of queries, there is no need to 
>>>> first query
>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>> ahead of
>>>> + *    // time how big this needs to be.
>>>> + *    item.length = sizeof(info);
>>>> + *
>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>> wish
>>>> + *    // to query, before then firing off the query.
>>>> + *    info.vm_id = vm_id;
>>>> + *    info.offset = gtt_address;
>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>> + *    if (err || item.length < 0) ...
>>>> + *
>>>> + *    // If all went well we can now inspect the returned attributes.
>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>> + */
>>>> +struct __drm_i915_query_vma_info {
>>>> +    /**
>>>> +     * @vm_id: The given vm id that contains the vma. The id is the 
>>>> value
>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>> +     */
>>>> +    __u32 vm_id;
>>>> +    /** @pad: MBZ. */
>>>> +    __u32 pad;
>>>> +    /**
>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>> the kernel
>>>> +     * will use to perform the lookup.
>>>> +     */
>>>> +    __u64 offset;
>>>> +    /**
>>>> +     * @attributes: The returned attributes for the given vma.
>>>> +     *
>>>> +     * Possible values:
>>>> +     *
>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>> backing the
>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>> the vma is
>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>> the CPU
>>>> +     * cannot directly access(this is only possible on discrete 
>>>> devices with
>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>> +     * require the kernel first synchronising any GPU work tied to the
>>>> +     * object, before then migrating the pages, either to the CPU 
>>>> accessible
>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>> +     * placements permit it. See 
>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>> +     *
>>>> +     * Note that this is inherently racy.
>>>> +     */
>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>> +    __u64 attributes;
>>>> +    /** @rsvd: MBZ */
>>>> +    __u32 rsvd[4];
>>>> +};
>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> new file mode 100644
>>>> index 000000000000..be3d9bcdd86d
>>>> --- /dev/null
>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>> @@ -0,0 +1,58 @@
>>>> +==========================
>>>> +I915 Small BAR RFC Section
>>>> +==========================
>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>> local-memory(i.e
>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>> might still be
>>>> +smaller than the total probed_size. In such cases, only some subset of
>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>> first 256M),
>>>> +while the remainder is only accessible via the GPU.
>>>> +
>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>> +----------------------------------------------
>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>> CPU access.
>>>> +This becomes important when placing an object in 
>>>> I915_MEMORY_CLASS_DEVICE, where
>>>> +underneath the device has a small BAR, meaning only some portion of 
>>>> it is CPU
>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>> access is not
>>>> +required, and prioritize using the non-CPU visible portion of
>>>> +I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_gem_create_ext
>>>> +
>>>> +probed_cpu_visible_size attribute
>>>> +---------------------------------
>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>> total size of the
>>>> +CPU accessible portion, for the particular region. This should only be
>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>> +
>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>> with the
>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>> visible portion,
>>>> +where the total size of the heap needs to be known.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_memory_region_info
>>>> +
>>>> +DRM_I915_QUERY_VMA_INFO query
>>>> +-----------------------------
>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>> +respective vma, and return its set of attributes. For now we only 
>>>> support
>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the object/vma is
>>>> +currently placed in memory that is accessible by the CPU. This 
>>>> should always be
>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>> I915_MEMORY_CLASS_DEVICE
>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>> object will
>>>> +likely first require migrating the pages.
>>>> +
>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>> +   :functions: __drm_i915_query_vma_info
>>>> +
>>>> +Error Capture restrictions
>>>> +--------------------------
>>>> +With error capture we have two new restrictions:
>>>> +
>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>> pages are not
>>>> +    CPU accessible, at the time of capture, then the kernel is free 
>>>> to skip
>>>> +    trying to capture them.
>>>> +
>>>> +    2) On discrete we now reject error capture on recoverable 
>>>> contexts. In the
>>>> +    future the kernel may want to blit during error capture, when 
>>>> for example
>>>> +    something is not currently CPU accessible.
>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>> b/Documentation/gpu/rfc/index.rst
>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>> --- a/Documentation/gpu/rfc/index.rst
>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>   .. toctree::
>>>>         i915_scheduler.rst
>>>> +
>>>> +.. toctree::
>>>> +
>>>> +    i915_small_bar.rst
>>>
>>>
>>

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-03 10:39         ` [Intel-gfx] " Lionel Landwerlin
@ 2022-05-03 14:27           ` Matthew Auld
  -1 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-05-03 14:27 UTC (permalink / raw)
  To: Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, Tvrtko Ursulin, Jordan Justen, dri-devel,
	Kenneth Graunke, Jon Bloomfield, Daniel Vetter, mesa-dev,
	Akeem G Abodunrin

On 03/05/2022 11:39, Lionel Landwerlin wrote:
> On 03/05/2022 13:22, Matthew Auld wrote:
>> On 02/05/2022 09:53, Lionel Landwerlin wrote:
>>> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>
>>>>> v2:
>>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>    - Rework error capture interactions, including no longer needing
>>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>>
>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>> ---
>>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>>> +++++++++++++++++++++++
>>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>>   3 files changed, 252 insertions(+)
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> new file mode 100644
>>>>> index 000000000000..7bfd0cf44d35
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> @@ -0,0 +1,190 @@
>>>>> +/**
>>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>>> known to the
>>>>> + * driver.
>>>>> + *
>>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>>> drm_i915_query.
>>>>> + * For this new query we are adding the new query id 
>>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>>> + * at &drm_i915_query_item.query_id.
>>>>> + */
>>>>> +struct __drm_i915_memory_region_info {
>>>>> +    /** @region: The class:instance pair encoding */
>>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>>> +
>>>>> +    /** @rsvd0: MBZ */
>>>>> +    __u32 rsvd0;
>>>>> +
>>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>> +    __u64 probed_size;
>>>>> +
>>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>> unknown) */
>>>>> +    __u64 unallocated_size;
>>>>> +
>>>>> +    union {
>>>>> +        /** @rsvd1: MBZ */
>>>>> +        __u64 rsvd1[8];
>>>>> +        struct {
>>>>> +            /**
>>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>>> +             * that is CPU accessible. (-1 = unknown).
>>>>> +             *
>>>>> +             * This will be always be <= @probed_size, and the
>>>>> +             * remainder(if there is any) will not be CPU
>>>>> +             * accessible.
>>>>> +             */
>>>>> +            __u64 probed_cpu_visible_size;
>>>>> +        };
>>>>
>>>>
>>>> Trying to implement userspace support in Vulkan for this, I have an 
>>>> additional question about the value of probed_cpu_visible_size.
>>>>
>>>> When is it set to -1?
>>>>
>>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>>
>>>> After after it should either be the entire lmem or something smaller.
>>>>
>>>>
>>>> -Lionel
>>>
>>>
>>> Other pain point of this new uAPI, previously we could query the 
>>> unallocated size for each heap.
>>
>> unallocated_size should always give the same value as probed_size. We 
>> have the avail tracking, but we don't currently expose that through 
>> unallocated_size, due to lack of real userspace/user etc.
>>
>>>
>>> Now lmem is effectively divided into 2 heaps, but unallocated_size is 
>>> tracking allocation from both parts of lmem.
>>
>> Yeah, if we ever properly expose the unallocated_size, then we could 
>> also just add unallocated_cpu_visible_size.
>>
>>>
>>> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?
>>
>> I don't think it's out of the question...
>>
>> I guess user-space should be able to get the current flag behaviour 
>> just by specifying: device, system. And it does give more flexibly to 
>> allow something like: device, device-nm, smem.
>>
>> We can also drop the probed_cpu_visible_size, which would now just be 
>> the probed_size with device/device-nm. And if we lack device-nm, then 
>> the entire thing must be CPU mappable.
>>
>> One of the downsides though, is that we can no longer easily mix 
>> object pages from both device + device-nm, which we could previously 
>> do when we didn't specify the flag. At least according to the current 
>> design/behaviour for @regions that would not be allowed. I guess some 
>> kind of new flag like ALLOC_MIXED or so? Although currently that is 
>> only possible with device + device-nm in ttm/i915.
> 
> 
> Thanks, I wasn't aware of the restrictions.
> 
> Adding unallocated_cpu_visible_size would be great.

So do we want this in the next version? i.e we already have a current 
real use case in mind for unallocated_size where probed_size is not good 
enough?

> 
> 
> -Lionel
> 
> 
>>
>>>
>>>
>>> -Lionel
>>>
>>>
>>>>
>>>>
>>>>> +    };
>>>>> +};
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>>> behaviour, with added
>>>>> + * extension support using struct i915_user_extension.
>>>>> + *
>>>>> + * Note that new buffer flags should be added here, at least for 
>>>>> the stuff that
>>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>>> create the object
>>>>> + * with gem_create, and another to apply various parameters, 
>>>>> however this
>>>>> + * creates some ambiguity for the params which are considered 
>>>>> immutable. Also in
>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>> + */
>>>>> +struct __drm_i915_gem_create_ext {
>>>>> +    /**
>>>>> +     * @size: Requested size for the object.
>>>>> +     *
>>>>> +     * The (page-aligned) allocated size for the object will be 
>>>>> returned.
>>>>> +     *
>>>>> +     * Note that for some devices we have might have further minimum
>>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>>> local-memory.
>>>>> +     * However in general the final size here should always 
>>>>> reflect any
>>>>> +     * rounding up, if for example using the 
>>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>> +     * extension to place the object in device local-memory.
>>>>> +     */
>>>>> +    __u64 size;
>>>>> +    /**
>>>>> +     * @handle: Returned handle for the object.
>>>>> +     *
>>>>> +     * Object handles are nonzero.
>>>>> +     */
>>>>> +    __u32 handle;
>>>>> +    /**
>>>>> +     * @flags: Optional flags.
>>>>> +     *
>>>>> +     * Supported values:
>>>>> +     *
>>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>>> kernel that
>>>>> +     * the object will need to be accessed via the CPU.
>>>>> +     *
>>>>> +     * Only valid when placing objects in 
>>>>> I915_MEMORY_CLASS_DEVICE, and
>>>>> +     * only strictly required on platforms where only some of the 
>>>>> device
>>>>> +     * memory is directly visible or mappable through the CPU, 
>>>>> like on DG2+.
>>>>> +     *
>>>>> +     * One of the placements MUST also be 
>>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>>> +     * ensure we can always spill the allocation to system memory, 
>>>>> if we
>>>>> +     * can't place the object in the mappable part of
>>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>>> +     *
>>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>>> objects that can
>>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>>> don't
>>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>>> with
>>>>> +     * flat-CCS.
>>>>> +     *
>>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>>> that the
>>>>> +     * kernel can still migrate the object to the mappable part, 
>>>>> as a last
>>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>>> might be
>>>>> +     * expensive, and so ideally should be avoided.
>>>>> +     */
>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>> +    __u32 flags;
>>>>> +    /**
>>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>>> +     *
>>>>> +     * This will be useful in the future when we need to support 
>>>>> several
>>>>> +     * different extensions, and we need to apply more than one when
>>>>> +     * creating the object. See struct i915_user_extension.
>>>>> +     *
>>>>> +     * If we don't supply any extensions then we get the same old 
>>>>> gem_create
>>>>> +     * behaviour.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>>> +     */
>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>> +    __u64 extensions;
>>>>> +};
>>>>> +
>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_query_vma_info
>>>>> + *
>>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>>> returning its set
>>>>> + * of attributes.
>>>>> + *
>>>>> + * .. code-block:: C
>>>>> + *
>>>>> + *    struct drm_i915_query_vma_info info = {};
>>>>> + *    struct drm_i915_query_item item = {
>>>>> + *        .data_ptr = (uintptr_t)&info,
>>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>> + *    };
>>>>> + *    struct drm_i915_query query = {
>>>>> + *        .num_items = 1,
>>>>> + *        .items_ptr = (uintptr_t)&item,
>>>>> + *    };
>>>>> + *    int err;
>>>>> + *
>>>>> + *    // Unlike some other types of queries, there is no need to 
>>>>> first query
>>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>>> ahead of
>>>>> + *    // time how big this needs to be.
>>>>> + *    item.length = sizeof(info);
>>>>> + *
>>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>>> wish
>>>>> + *    // to query, before then firing off the query.
>>>>> + *    info.vm_id = vm_id;
>>>>> + *    info.offset = gtt_address;
>>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>> + *    if (err || item.length < 0) ...
>>>>> + *
>>>>> + *    // If all went well we can now inspect the returned attributes.
>>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>>> + */
>>>>> +struct __drm_i915_query_vma_info {
>>>>> +    /**
>>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>>> the value
>>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>>> +     */
>>>>> +    __u32 vm_id;
>>>>> +    /** @pad: MBZ. */
>>>>> +    __u32 pad;
>>>>> +    /**
>>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>>> the kernel
>>>>> +     * will use to perform the lookup.
>>>>> +     */
>>>>> +    __u64 offset;
>>>>> +    /**
>>>>> +     * @attributes: The returned attributes for the given vma.
>>>>> +     *
>>>>> +     * Possible values:
>>>>> +     *
>>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>>> backing the
>>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>>> the vma is
>>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>>> the CPU
>>>>> +     * cannot directly access(this is only possible on discrete 
>>>>> devices with
>>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>>> +     * require the kernel first synchronising any GPU work tied to 
>>>>> the
>>>>> +     * object, before then migrating the pages, either to the CPU 
>>>>> accessible
>>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>> +     * placements permit it. See 
>>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>> +     *
>>>>> +     * Note that this is inherently racy.
>>>>> +     */
>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>> +    __u64 attributes;
>>>>> +    /** @rsvd: MBZ */
>>>>> +    __u32 rsvd[4];
>>>>> +};
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> new file mode 100644
>>>>> index 000000000000..be3d9bcdd86d
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> @@ -0,0 +1,58 @@
>>>>> +==========================
>>>>> +I915 Small BAR RFC Section
>>>>> +==========================
>>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>>> local-memory(i.e
>>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>>> might still be
>>>>> +smaller than the total probed_size. In such cases, only some 
>>>>> subset of
>>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>>> first 256M),
>>>>> +while the remainder is only accessible via the GPU.
>>>>> +
>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>> +----------------------------------------------
>>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>>> CPU access.
>>>>> +This becomes important when placing an object in 
>>>>> I915_MEMORY_CLASS_DEVICE, where
>>>>> +underneath the device has a small BAR, meaning only some portion 
>>>>> of it is CPU
>>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>>> access is not
>>>>> +required, and prioritize using the non-CPU visible portion of
>>>>> +I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>> +
>>>>> +probed_cpu_visible_size attribute
>>>>> +---------------------------------
>>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>>> total size of the
>>>>> +CPU accessible portion, for the particular region. This should 
>>>>> only be
>>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>>> with the
>>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>>> visible portion,
>>>>> +where the total size of the heap needs to be known.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_memory_region_info
>>>>> +
>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>> +-----------------------------
>>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>>> +respective vma, and return its set of attributes. For now we only 
>>>>> support
>>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>>> object/vma is
>>>>> +currently placed in memory that is accessible by the CPU. This 
>>>>> should always be
>>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>>> I915_MEMORY_CLASS_DEVICE
>>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>>> object will
>>>>> +likely first require migrating the pages.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_query_vma_info
>>>>> +
>>>>> +Error Capture restrictions
>>>>> +--------------------------
>>>>> +With error capture we have two new restrictions:
>>>>> +
>>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>>> pages are not
>>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>>> free to skip
>>>>> +    trying to capture them.
>>>>> +
>>>>> +    2) On discrete we now reject error capture on recoverable 
>>>>> contexts. In the
>>>>> +    future the kernel may want to blit during error capture, when 
>>>>> for example
>>>>> +    something is not currently CPU accessible.
>>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>>> b/Documentation/gpu/rfc/index.rst
>>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>>> --- a/Documentation/gpu/rfc/index.rst
>>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>>   .. toctree::
>>>>>         i915_scheduler.rst
>>>>> +
>>>>> +.. toctree::
>>>>> +
>>>>> +    i915_small_bar.rst
>>>>
>>>>
>>>
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-03 14:27           ` Matthew Auld
  0 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-05-03 14:27 UTC (permalink / raw)
  To: Lionel Landwerlin, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 03/05/2022 11:39, Lionel Landwerlin wrote:
> On 03/05/2022 13:22, Matthew Auld wrote:
>> On 02/05/2022 09:53, Lionel Landwerlin wrote:
>>> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>
>>>>> v2:
>>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>    - Rework error capture interactions, including no longer needing
>>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>>
>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>> ---
>>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>>> +++++++++++++++++++++++
>>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>>   3 files changed, 252 insertions(+)
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> new file mode 100644
>>>>> index 000000000000..7bfd0cf44d35
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>> @@ -0,0 +1,190 @@
>>>>> +/**
>>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>>> known to the
>>>>> + * driver.
>>>>> + *
>>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>>> drm_i915_query.
>>>>> + * For this new query we are adding the new query id 
>>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>>> + * at &drm_i915_query_item.query_id.
>>>>> + */
>>>>> +struct __drm_i915_memory_region_info {
>>>>> +    /** @region: The class:instance pair encoding */
>>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>>> +
>>>>> +    /** @rsvd0: MBZ */
>>>>> +    __u32 rsvd0;
>>>>> +
>>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>> +    __u64 probed_size;
>>>>> +
>>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>> unknown) */
>>>>> +    __u64 unallocated_size;
>>>>> +
>>>>> +    union {
>>>>> +        /** @rsvd1: MBZ */
>>>>> +        __u64 rsvd1[8];
>>>>> +        struct {
>>>>> +            /**
>>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>>> +             * that is CPU accessible. (-1 = unknown).
>>>>> +             *
>>>>> +             * This will be always be <= @probed_size, and the
>>>>> +             * remainder(if there is any) will not be CPU
>>>>> +             * accessible.
>>>>> +             */
>>>>> +            __u64 probed_cpu_visible_size;
>>>>> +        };
>>>>
>>>>
>>>> Trying to implement userspace support in Vulkan for this, I have an 
>>>> additional question about the value of probed_cpu_visible_size.
>>>>
>>>> When is it set to -1?
>>>>
>>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>>
>>>> After after it should either be the entire lmem or something smaller.
>>>>
>>>>
>>>> -Lionel
>>>
>>>
>>> Other pain point of this new uAPI, previously we could query the 
>>> unallocated size for each heap.
>>
>> unallocated_size should always give the same value as probed_size. We 
>> have the avail tracking, but we don't currently expose that through 
>> unallocated_size, due to lack of real userspace/user etc.
>>
>>>
>>> Now lmem is effectively divided into 2 heaps, but unallocated_size is 
>>> tracking allocation from both parts of lmem.
>>
>> Yeah, if we ever properly expose the unallocated_size, then we could 
>> also just add unallocated_cpu_visible_size.
>>
>>>
>>> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?
>>
>> I don't think it's out of the question...
>>
>> I guess user-space should be able to get the current flag behaviour 
>> just by specifying: device, system. And it does give more flexibly to 
>> allow something like: device, device-nm, smem.
>>
>> We can also drop the probed_cpu_visible_size, which would now just be 
>> the probed_size with device/device-nm. And if we lack device-nm, then 
>> the entire thing must be CPU mappable.
>>
>> One of the downsides though, is that we can no longer easily mix 
>> object pages from both device + device-nm, which we could previously 
>> do when we didn't specify the flag. At least according to the current 
>> design/behaviour for @regions that would not be allowed. I guess some 
>> kind of new flag like ALLOC_MIXED or so? Although currently that is 
>> only possible with device + device-nm in ttm/i915.
> 
> 
> Thanks, I wasn't aware of the restrictions.
> 
> Adding unallocated_cpu_visible_size would be great.

So do we want this in the next version? i.e we already have a current 
real use case in mind for unallocated_size where probed_size is not good 
enough?

> 
> 
> -Lionel
> 
> 
>>
>>>
>>>
>>> -Lionel
>>>
>>>
>>>>
>>>>
>>>>> +    };
>>>>> +};
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>>> behaviour, with added
>>>>> + * extension support using struct i915_user_extension.
>>>>> + *
>>>>> + * Note that new buffer flags should be added here, at least for 
>>>>> the stuff that
>>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>>> create the object
>>>>> + * with gem_create, and another to apply various parameters, 
>>>>> however this
>>>>> + * creates some ambiguity for the params which are considered 
>>>>> immutable. Also in
>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>> + */
>>>>> +struct __drm_i915_gem_create_ext {
>>>>> +    /**
>>>>> +     * @size: Requested size for the object.
>>>>> +     *
>>>>> +     * The (page-aligned) allocated size for the object will be 
>>>>> returned.
>>>>> +     *
>>>>> +     * Note that for some devices we have might have further minimum
>>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>>> local-memory.
>>>>> +     * However in general the final size here should always 
>>>>> reflect any
>>>>> +     * rounding up, if for example using the 
>>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>> +     * extension to place the object in device local-memory.
>>>>> +     */
>>>>> +    __u64 size;
>>>>> +    /**
>>>>> +     * @handle: Returned handle for the object.
>>>>> +     *
>>>>> +     * Object handles are nonzero.
>>>>> +     */
>>>>> +    __u32 handle;
>>>>> +    /**
>>>>> +     * @flags: Optional flags.
>>>>> +     *
>>>>> +     * Supported values:
>>>>> +     *
>>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>>> kernel that
>>>>> +     * the object will need to be accessed via the CPU.
>>>>> +     *
>>>>> +     * Only valid when placing objects in 
>>>>> I915_MEMORY_CLASS_DEVICE, and
>>>>> +     * only strictly required on platforms where only some of the 
>>>>> device
>>>>> +     * memory is directly visible or mappable through the CPU, 
>>>>> like on DG2+.
>>>>> +     *
>>>>> +     * One of the placements MUST also be 
>>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>>> +     * ensure we can always spill the allocation to system memory, 
>>>>> if we
>>>>> +     * can't place the object in the mappable part of
>>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>>> +     *
>>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>>> objects that can
>>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>>> don't
>>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>>> with
>>>>> +     * flat-CCS.
>>>>> +     *
>>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. Note 
>>>>> that the
>>>>> +     * kernel can still migrate the object to the mappable part, 
>>>>> as a last
>>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>>> might be
>>>>> +     * expensive, and so ideally should be avoided.
>>>>> +     */
>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>> +    __u32 flags;
>>>>> +    /**
>>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>>> +     *
>>>>> +     * This will be useful in the future when we need to support 
>>>>> several
>>>>> +     * different extensions, and we need to apply more than one when
>>>>> +     * creating the object. See struct i915_user_extension.
>>>>> +     *
>>>>> +     * If we don't supply any extensions then we get the same old 
>>>>> gem_create
>>>>> +     * behaviour.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>>> +     *
>>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>>> +     */
>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>> +    __u64 extensions;
>>>>> +};
>>>>> +
>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>> +
>>>>> +/**
>>>>> + * struct __drm_i915_query_vma_info
>>>>> + *
>>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>>> returning its set
>>>>> + * of attributes.
>>>>> + *
>>>>> + * .. code-block:: C
>>>>> + *
>>>>> + *    struct drm_i915_query_vma_info info = {};
>>>>> + *    struct drm_i915_query_item item = {
>>>>> + *        .data_ptr = (uintptr_t)&info,
>>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>> + *    };
>>>>> + *    struct drm_i915_query query = {
>>>>> + *        .num_items = 1,
>>>>> + *        .items_ptr = (uintptr_t)&item,
>>>>> + *    };
>>>>> + *    int err;
>>>>> + *
>>>>> + *    // Unlike some other types of queries, there is no need to 
>>>>> first query
>>>>> + *    // the size of the data_ptr blob here, since we already know 
>>>>> ahead of
>>>>> + *    // time how big this needs to be.
>>>>> + *    item.length = sizeof(info);
>>>>> + *
>>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma we 
>>>>> wish
>>>>> + *    // to query, before then firing off the query.
>>>>> + *    info.vm_id = vm_id;
>>>>> + *    info.offset = gtt_address;
>>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>> + *    if (err || item.length < 0) ...
>>>>> + *
>>>>> + *    // If all went well we can now inspect the returned attributes.
>>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>>> + */
>>>>> +struct __drm_i915_query_vma_info {
>>>>> +    /**
>>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>>> the value
>>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>>> +     */
>>>>> +    __u32 vm_id;
>>>>> +    /** @pad: MBZ. */
>>>>> +    __u32 pad;
>>>>> +    /**
>>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>>> the kernel
>>>>> +     * will use to perform the lookup.
>>>>> +     */
>>>>> +    __u64 offset;
>>>>> +    /**
>>>>> +     * @attributes: The returned attributes for the given vma.
>>>>> +     *
>>>>> +     * Possible values:
>>>>> +     *
>>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>>> backing the
>>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>>> the vma is
>>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, which 
>>>>> the CPU
>>>>> +     * cannot directly access(this is only possible on discrete 
>>>>> devices with
>>>>> +     * a small BAR). Attempting to MMAP and fault such an object will
>>>>> +     * require the kernel first synchronising any GPU work tied to 
>>>>> the
>>>>> +     * object, before then migrating the pages, either to the CPU 
>>>>> accessible
>>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>> +     * placements permit it. See 
>>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>> +     *
>>>>> +     * Note that this is inherently racy.
>>>>> +     */
>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>> +    __u64 attributes;
>>>>> +    /** @rsvd: MBZ */
>>>>> +    __u32 rsvd[4];
>>>>> +};
>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> new file mode 100644
>>>>> index 000000000000..be3d9bcdd86d
>>>>> --- /dev/null
>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>> @@ -0,0 +1,58 @@
>>>>> +==========================
>>>>> +I915 Small BAR RFC Section
>>>>> +==========================
>>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>>> local-memory(i.e
>>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>>> might still be
>>>>> +smaller than the total probed_size. In such cases, only some 
>>>>> subset of
>>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>>> first 256M),
>>>>> +while the remainder is only accessible via the GPU.
>>>>> +
>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>> +----------------------------------------------
>>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>>> CPU access.
>>>>> +This becomes important when placing an object in 
>>>>> I915_MEMORY_CLASS_DEVICE, where
>>>>> +underneath the device has a small BAR, meaning only some portion 
>>>>> of it is CPU
>>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>>> access is not
>>>>> +required, and prioritize using the non-CPU visible portion of
>>>>> +I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>> +
>>>>> +probed_cpu_visible_size attribute
>>>>> +---------------------------------
>>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>>> total size of the
>>>>> +CPU accessible portion, for the particular region. This should 
>>>>> only be
>>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>>> +
>>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>>> with the
>>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>>> visible portion,
>>>>> +where the total size of the heap needs to be known.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_memory_region_info
>>>>> +
>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>> +-----------------------------
>>>>> +Query the attributes of some vma. Given a vm and GTT offset, find the
>>>>> +respective vma, and return its set of attributes. For now we only 
>>>>> support
>>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>>> object/vma is
>>>>> +currently placed in memory that is accessible by the CPU. This 
>>>>> should always be
>>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>>> I915_MEMORY_CLASS_DEVICE
>>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>>> object will
>>>>> +likely first require migrating the pages.
>>>>> +
>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>> +   :functions: __drm_i915_query_vma_info
>>>>> +
>>>>> +Error Capture restrictions
>>>>> +--------------------------
>>>>> +With error capture we have two new restrictions:
>>>>> +
>>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>>> pages are not
>>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>>> free to skip
>>>>> +    trying to capture them.
>>>>> +
>>>>> +    2) On discrete we now reject error capture on recoverable 
>>>>> contexts. In the
>>>>> +    future the kernel may want to blit during error capture, when 
>>>>> for example
>>>>> +    something is not currently CPU accessible.
>>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>>> b/Documentation/gpu/rfc/index.rst
>>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>>> --- a/Documentation/gpu/rfc/index.rst
>>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>>   .. toctree::
>>>>>         i915_scheduler.rst
>>>>> +
>>>>> +.. toctree::
>>>>> +
>>>>> +    i915_small_bar.rst
>>>>
>>>>
>>>
> 

^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-05-03 14:27           ` [Intel-gfx] " Matthew Auld
@ 2022-05-03 14:37             ` Lionel Landwerlin
  -1 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-03 14:37 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, Tvrtko Ursulin, Jordan Justen, dri-devel,
	Kenneth Graunke, Jon Bloomfield, Daniel Vetter, mesa-dev,
	Akeem G Abodunrin

On 03/05/2022 17:27, Matthew Auld wrote:
> On 03/05/2022 11:39, Lionel Landwerlin wrote:
>> On 03/05/2022 13:22, Matthew Auld wrote:
>>> On 02/05/2022 09:53, Lionel Landwerlin wrote:
>>>> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>>>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>>
>>>>>> v2:
>>>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>>    - Rework error capture interactions, including no longer needing
>>>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>>>
>>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>>> ---
>>>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>>>> +++++++++++++++++++++++
>>>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>>>   3 files changed, 252 insertions(+)
>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>>
>>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>>> new file mode 100644
>>>>>> index 000000000000..7bfd0cf44d35
>>>>>> --- /dev/null
>>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>>> @@ -0,0 +1,190 @@
>>>>>> +/**
>>>>>> + * struct __drm_i915_memory_region_info - Describes one region 
>>>>>> as known to the
>>>>>> + * driver.
>>>>>> + *
>>>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>>>> drm_i915_query.
>>>>>> + * For this new query we are adding the new query id 
>>>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>>>> + * at &drm_i915_query_item.query_id.
>>>>>> + */
>>>>>> +struct __drm_i915_memory_region_info {
>>>>>> +    /** @region: The class:instance pair encoding */
>>>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>>>> +
>>>>>> +    /** @rsvd0: MBZ */
>>>>>> +    __u32 rsvd0;
>>>>>> +
>>>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>>> +    __u64 probed_size;
>>>>>> +
>>>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>>> unknown) */
>>>>>> +    __u64 unallocated_size;
>>>>>> +
>>>>>> +    union {
>>>>>> +        /** @rsvd1: MBZ */
>>>>>> +        __u64 rsvd1[8];
>>>>>> +        struct {
>>>>>> +            /**
>>>>>> +             * @probed_cpu_visible_size: Memory probed by the 
>>>>>> driver
>>>>>> +             * that is CPU accessible. (-1 = unknown).
>>>>>> +             *
>>>>>> +             * This will be always be <= @probed_size, and the
>>>>>> +             * remainder(if there is any) will not be CPU
>>>>>> +             * accessible.
>>>>>> +             */
>>>>>> +            __u64 probed_cpu_visible_size;
>>>>>> +        };
>>>>>
>>>>>
>>>>> Trying to implement userspace support in Vulkan for this, I have 
>>>>> an additional question about the value of probed_cpu_visible_size.
>>>>>
>>>>> When is it set to -1?
>>>>>
>>>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>>>
>>>>> After after it should either be the entire lmem or something smaller.
>>>>>
>>>>>
>>>>> -Lionel
>>>>
>>>>
>>>> Other pain point of this new uAPI, previously we could query the 
>>>> unallocated size for each heap.
>>>
>>> unallocated_size should always give the same value as probed_size. 
>>> We have the avail tracking, but we don't currently expose that 
>>> through unallocated_size, due to lack of real userspace/user etc.
>>>
>>>>
>>>> Now lmem is effectively divided into 2 heaps, but unallocated_size 
>>>> is tracking allocation from both parts of lmem.
>>>
>>> Yeah, if we ever properly expose the unallocated_size, then we could 
>>> also just add unallocated_cpu_visible_size.
>>>
>>>>
>>>> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?
>>>
>>> I don't think it's out of the question...
>>>
>>> I guess user-space should be able to get the current flag behaviour 
>>> just by specifying: device, system. And it does give more flexibly 
>>> to allow something like: device, device-nm, smem.
>>>
>>> We can also drop the probed_cpu_visible_size, which would now just 
>>> be the probed_size with device/device-nm. And if we lack device-nm, 
>>> then the entire thing must be CPU mappable.
>>>
>>> One of the downsides though, is that we can no longer easily mix 
>>> object pages from both device + device-nm, which we could previously 
>>> do when we didn't specify the flag. At least according to the 
>>> current design/behaviour for @regions that would not be allowed. I 
>>> guess some kind of new flag like ALLOC_MIXED or so? Although 
>>> currently that is only possible with device + device-nm in ttm/i915.
>>
>>
>> Thanks, I wasn't aware of the restrictions.
>>
>> Adding unallocated_cpu_visible_size would be great.
>
> So do we want this in the next version? i.e we already have a current 
> real use case in mind for unallocated_size where probed_size is not 
> good enough?


Yeah in the  next iteration.

We're using unallocated_size to implement VK_EXT_memory_budget and since 
I'm going to expose lmem mappable/unmappable as 2 different heaps on 
Vulkan, I would use that there too.


-Lionel


>
>>
>>
>> -Lionel
>>
>>
>>>
>>>>
>>>>
>>>> -Lionel
>>>>
>>>>
>>>>>
>>>>>
>>>>>> +    };
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>>>> behaviour, with added
>>>>>> + * extension support using struct i915_user_extension.
>>>>>> + *
>>>>>> + * Note that new buffer flags should be added here, at least for 
>>>>>> the stuff that
>>>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>>>> create the object
>>>>>> + * with gem_create, and another to apply various parameters, 
>>>>>> however this
>>>>>> + * creates some ambiguity for the params which are considered 
>>>>>> immutable. Also in
>>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>>> + */
>>>>>> +struct __drm_i915_gem_create_ext {
>>>>>> +    /**
>>>>>> +     * @size: Requested size for the object.
>>>>>> +     *
>>>>>> +     * The (page-aligned) allocated size for the object will be 
>>>>>> returned.
>>>>>> +     *
>>>>>> +     * Note that for some devices we have might have further 
>>>>>> minimum
>>>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>>>> local-memory.
>>>>>> +     * However in general the final size here should always 
>>>>>> reflect any
>>>>>> +     * rounding up, if for example using the 
>>>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>>> +     * extension to place the object in device local-memory.
>>>>>> +     */
>>>>>> +    __u64 size;
>>>>>> +    /**
>>>>>> +     * @handle: Returned handle for the object.
>>>>>> +     *
>>>>>> +     * Object handles are nonzero.
>>>>>> +     */
>>>>>> +    __u32 handle;
>>>>>> +    /**
>>>>>> +     * @flags: Optional flags.
>>>>>> +     *
>>>>>> +     * Supported values:
>>>>>> +     *
>>>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>>>> kernel that
>>>>>> +     * the object will need to be accessed via the CPU.
>>>>>> +     *
>>>>>> +     * Only valid when placing objects in 
>>>>>> I915_MEMORY_CLASS_DEVICE, and
>>>>>> +     * only strictly required on platforms where only some of 
>>>>>> the device
>>>>>> +     * memory is directly visible or mappable through the CPU, 
>>>>>> like on DG2+.
>>>>>> +     *
>>>>>> +     * One of the placements MUST also be 
>>>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>>>> +     * ensure we can always spill the allocation to system 
>>>>>> memory, if we
>>>>>> +     * can't place the object in the mappable part of
>>>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>>>> +     *
>>>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>>>> objects that can
>>>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we 
>>>>>> therefore don't
>>>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS 
>>>>>> together with
>>>>>> +     * flat-CCS.
>>>>>> +     *
>>>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. 
>>>>>> Note that the
>>>>>> +     * kernel can still migrate the object to the mappable part, 
>>>>>> as a last
>>>>>> +     * resort, if userspace ever CPU faults this object, but 
>>>>>> this might be
>>>>>> +     * expensive, and so ideally should be avoided.
>>>>>> +     */
>>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>>> +    __u32 flags;
>>>>>> +    /**
>>>>>> +     * @extensions: The chain of extensions to apply to this 
>>>>>> object.
>>>>>> +     *
>>>>>> +     * This will be useful in the future when we need to support 
>>>>>> several
>>>>>> +     * different extensions, and we need to apply more than one 
>>>>>> when
>>>>>> +     * creating the object. See struct i915_user_extension.
>>>>>> +     *
>>>>>> +     * If we don't supply any extensions then we get the same 
>>>>>> old gem_create
>>>>>> +     * behaviour.
>>>>>> +     *
>>>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>>>> +     *
>>>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>>>> +     */
>>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>>> +    __u64 extensions;
>>>>>> +};
>>>>>> +
>>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>>> +
>>>>>> +/**
>>>>>> + * struct __drm_i915_query_vma_info
>>>>>> + *
>>>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>>>> returning its set
>>>>>> + * of attributes.
>>>>>> + *
>>>>>> + * .. code-block:: C
>>>>>> + *
>>>>>> + *    struct drm_i915_query_vma_info info = {};
>>>>>> + *    struct drm_i915_query_item item = {
>>>>>> + *        .data_ptr = (uintptr_t)&info,
>>>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>>> + *    };
>>>>>> + *    struct drm_i915_query query = {
>>>>>> + *        .num_items = 1,
>>>>>> + *        .items_ptr = (uintptr_t)&item,
>>>>>> + *    };
>>>>>> + *    int err;
>>>>>> + *
>>>>>> + *    // Unlike some other types of queries, there is no need to 
>>>>>> first query
>>>>>> + *    // the size of the data_ptr blob here, since we already 
>>>>>> know ahead of
>>>>>> + *    // time how big this needs to be.
>>>>>> + *    item.length = sizeof(info);
>>>>>> + *
>>>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma 
>>>>>> we wish
>>>>>> + *    // to query, before then firing off the query.
>>>>>> + *    info.vm_id = vm_id;
>>>>>> + *    info.offset = gtt_address;
>>>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>>> + *    if (err || item.length < 0) ...
>>>>>> + *
>>>>>> + *    // If all went well we can now inspect the returned 
>>>>>> attributes.
>>>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) 
>>>>>> ...
>>>>>> + */
>>>>>> +struct __drm_i915_query_vma_info {
>>>>>> +    /**
>>>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>>>> the value
>>>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>>>> +     */
>>>>>> +    __u32 vm_id;
>>>>>> +    /** @pad: MBZ. */
>>>>>> +    __u32 pad;
>>>>>> +    /**
>>>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>>>> the kernel
>>>>>> +     * will use to perform the lookup.
>>>>>> +     */
>>>>>> +    __u64 offset;
>>>>>> +    /**
>>>>>> +     * @attributes: The returned attributes for the given vma.
>>>>>> +     *
>>>>>> +     * Possible values:
>>>>>> +     *
>>>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>>>> backing the
>>>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>>>> the vma is
>>>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, 
>>>>>> which the CPU
>>>>>> +     * cannot directly access(this is only possible on discrete 
>>>>>> devices with
>>>>>> +     * a small BAR). Attempting to MMAP and fault such an object 
>>>>>> will
>>>>>> +     * require the kernel first synchronising any GPU work tied 
>>>>>> to the
>>>>>> +     * object, before then migrating the pages, either to the 
>>>>>> CPU accessible
>>>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>>> +     * placements permit it. See 
>>>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>>> +     *
>>>>>> +     * Note that this is inherently racy.
>>>>>> +     */
>>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>>> +    __u64 attributes;
>>>>>> +    /** @rsvd: MBZ */
>>>>>> +    __u32 rsvd[4];
>>>>>> +};
>>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>>> new file mode 100644
>>>>>> index 000000000000..be3d9bcdd86d
>>>>>> --- /dev/null
>>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>>> @@ -0,0 +1,58 @@
>>>>>> +==========================
>>>>>> +I915 Small BAR RFC Section
>>>>>> +==========================
>>>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>>>> local-memory(i.e
>>>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>>>> might still be
>>>>>> +smaller than the total probed_size. In such cases, only some 
>>>>>> subset of
>>>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>>>> first 256M),
>>>>>> +while the remainder is only accessible via the GPU.
>>>>>> +
>>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>>> +----------------------------------------------
>>>>>> +New gem_create_ext flag to tell the kernel that a BO will 
>>>>>> require CPU access.
>>>>>> +This becomes important when placing an object in 
>>>>>> I915_MEMORY_CLASS_DEVICE, where
>>>>>> +underneath the device has a small BAR, meaning only some portion 
>>>>>> of it is CPU
>>>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>>>> access is not
>>>>>> +required, and prioritize using the non-CPU visible portion of
>>>>>> +I915_MEMORY_CLASS_DEVICE.
>>>>>> +
>>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>>> +
>>>>>> +probed_cpu_visible_size attribute
>>>>>> +---------------------------------
>>>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>>>> total size of the
>>>>>> +CPU accessible portion, for the particular region. This should 
>>>>>> only be
>>>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>>>> +
>>>>>> +Vulkan will need this as part of creating a separate 
>>>>>> VkMemoryHeap with the
>>>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>>>> visible portion,
>>>>>> +where the total size of the heap needs to be known.
>>>>>> +
>>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>>> +   :functions: __drm_i915_memory_region_info
>>>>>> +
>>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>>> +-----------------------------
>>>>>> +Query the attributes of some vma. Given a vm and GTT offset, 
>>>>>> find the
>>>>>> +respective vma, and return its set of attributes. For now we 
>>>>>> only support
>>>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>>>> object/vma is
>>>>>> +currently placed in memory that is accessible by the CPU. This 
>>>>>> should always be
>>>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>>>> I915_MEMORY_CLASS_DEVICE
>>>>>> +matches the probed_size. If this is not set then CPU faulting 
>>>>>> the object will
>>>>>> +likely first require migrating the pages.
>>>>>> +
>>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>>> +   :functions: __drm_i915_query_vma_info
>>>>>> +
>>>>>> +Error Capture restrictions
>>>>>> +--------------------------
>>>>>> +With error capture we have two new restrictions:
>>>>>> +
>>>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>>>> pages are not
>>>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>>>> free to skip
>>>>>> +    trying to capture them.
>>>>>> +
>>>>>> +    2) On discrete we now reject error capture on recoverable 
>>>>>> contexts. In the
>>>>>> +    future the kernel may want to blit during error capture, 
>>>>>> when for example
>>>>>> +    something is not currently CPU accessible.
>>>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>>>> b/Documentation/gpu/rfc/index.rst
>>>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>>>> --- a/Documentation/gpu/rfc/index.rst
>>>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>>>   .. toctree::
>>>>>>         i915_scheduler.rst
>>>>>> +
>>>>>> +.. toctree::
>>>>>> +
>>>>>> +    i915_small_bar.rst
>>>>>
>>>>>
>>>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
@ 2022-05-03 14:37             ` Lionel Landwerlin
  0 siblings, 0 replies; 50+ messages in thread
From: Lionel Landwerlin @ 2022-05-03 14:37 UTC (permalink / raw)
  To: Matthew Auld, intel-gfx
  Cc: Thomas Hellström, dri-devel, Kenneth Graunke, Daniel Vetter,
	mesa-dev

On 03/05/2022 17:27, Matthew Auld wrote:
> On 03/05/2022 11:39, Lionel Landwerlin wrote:
>> On 03/05/2022 13:22, Matthew Auld wrote:
>>> On 02/05/2022 09:53, Lionel Landwerlin wrote:
>>>> On 02/05/2022 10:54, Lionel Landwerlin wrote:
>>>>> On 20/04/2022 20:13, Matthew Auld wrote:
>>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>>
>>>>>> v2:
>>>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>>    - Rework error capture interactions, including no longer needing
>>>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>>>
>>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>>> ---
>>>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>>>> +++++++++++++++++++++++
>>>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>>>   3 files changed, 252 insertions(+)
>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>>
>>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>>> new file mode 100644
>>>>>> index 000000000000..7bfd0cf44d35
>>>>>> --- /dev/null
>>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>>> @@ -0,0 +1,190 @@
>>>>>> +/**
>>>>>> + * struct __drm_i915_memory_region_info - Describes one region 
>>>>>> as known to the
>>>>>> + * driver.
>>>>>> + *
>>>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>>>> drm_i915_query.
>>>>>> + * For this new query we are adding the new query id 
>>>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>>>> + * at &drm_i915_query_item.query_id.
>>>>>> + */
>>>>>> +struct __drm_i915_memory_region_info {
>>>>>> +    /** @region: The class:instance pair encoding */
>>>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>>>> +
>>>>>> +    /** @rsvd0: MBZ */
>>>>>> +    __u32 rsvd0;
>>>>>> +
>>>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>>> +    __u64 probed_size;
>>>>>> +
>>>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>>> unknown) */
>>>>>> +    __u64 unallocated_size;
>>>>>> +
>>>>>> +    union {
>>>>>> +        /** @rsvd1: MBZ */
>>>>>> +        __u64 rsvd1[8];
>>>>>> +        struct {
>>>>>> +            /**
>>>>>> +             * @probed_cpu_visible_size: Memory probed by the 
>>>>>> driver
>>>>>> +             * that is CPU accessible. (-1 = unknown).
>>>>>> +             *
>>>>>> +             * This will be always be <= @probed_size, and the
>>>>>> +             * remainder(if there is any) will not be CPU
>>>>>> +             * accessible.
>>>>>> +             */
>>>>>> +            __u64 probed_cpu_visible_size;
>>>>>> +        };
>>>>>
>>>>>
>>>>> Trying to implement userspace support in Vulkan for this, I have 
>>>>> an additional question about the value of probed_cpu_visible_size.
>>>>>
>>>>> When is it set to -1?
>>>>>
>>>>> I'm guessing before there is support for this value it'll be 0 (MBZ).
>>>>>
>>>>> After after it should either be the entire lmem or something smaller.
>>>>>
>>>>>
>>>>> -Lionel
>>>>
>>>>
>>>> Other pain point of this new uAPI, previously we could query the 
>>>> unallocated size for each heap.
>>>
>>> unallocated_size should always give the same value as probed_size. 
>>> We have the avail tracking, but we don't currently expose that 
>>> through unallocated_size, due to lack of real userspace/user etc.
>>>
>>>>
>>>> Now lmem is effectively divided into 2 heaps, but unallocated_size 
>>>> is tracking allocation from both parts of lmem.
>>>
>>> Yeah, if we ever properly expose the unallocated_size, then we could 
>>> also just add unallocated_cpu_visible_size.
>>>
>>>>
>>>> Is adding new I915_MEMORY_CLASS_DEVICE_NON_MAPPABLE out of question?
>>>
>>> I don't think it's out of the question...
>>>
>>> I guess user-space should be able to get the current flag behaviour 
>>> just by specifying: device, system. And it does give more flexibly 
>>> to allow something like: device, device-nm, smem.
>>>
>>> We can also drop the probed_cpu_visible_size, which would now just 
>>> be the probed_size with device/device-nm. And if we lack device-nm, 
>>> then the entire thing must be CPU mappable.
>>>
>>> One of the downsides though, is that we can no longer easily mix 
>>> object pages from both device + device-nm, which we could previously 
>>> do when we didn't specify the flag. At least according to the 
>>> current design/behaviour for @regions that would not be allowed. I 
>>> guess some kind of new flag like ALLOC_MIXED or so? Although 
>>> currently that is only possible with device + device-nm in ttm/i915.
>>
>>
>> Thanks, I wasn't aware of the restrictions.
>>
>> Adding unallocated_cpu_visible_size would be great.
>
> So do we want this in the next version? i.e we already have a current 
> real use case in mind for unallocated_size where probed_size is not 
> good enough?


Yeah in the  next iteration.

We're using unallocated_size to implement VK_EXT_memory_budget and since 
I'm going to expose lmem mappable/unmappable as 2 different heaps on 
Vulkan, I would use that there too.


-Lionel


>
>>
>>
>> -Lionel
>>
>>
>>>
>>>>
>>>>
>>>> -Lionel
>>>>
>>>>
>>>>>
>>>>>
>>>>>> +    };
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>>>> behaviour, with added
>>>>>> + * extension support using struct i915_user_extension.
>>>>>> + *
>>>>>> + * Note that new buffer flags should be added here, at least for 
>>>>>> the stuff that
>>>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>>>> create the object
>>>>>> + * with gem_create, and another to apply various parameters, 
>>>>>> however this
>>>>>> + * creates some ambiguity for the params which are considered 
>>>>>> immutable. Also in
>>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>>> + */
>>>>>> +struct __drm_i915_gem_create_ext {
>>>>>> +    /**
>>>>>> +     * @size: Requested size for the object.
>>>>>> +     *
>>>>>> +     * The (page-aligned) allocated size for the object will be 
>>>>>> returned.
>>>>>> +     *
>>>>>> +     * Note that for some devices we have might have further 
>>>>>> minimum
>>>>>> +     * page-size restrictions(larger than 4K), like for device 
>>>>>> local-memory.
>>>>>> +     * However in general the final size here should always 
>>>>>> reflect any
>>>>>> +     * rounding up, if for example using the 
>>>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>>> +     * extension to place the object in device local-memory.
>>>>>> +     */
>>>>>> +    __u64 size;
>>>>>> +    /**
>>>>>> +     * @handle: Returned handle for the object.
>>>>>> +     *
>>>>>> +     * Object handles are nonzero.
>>>>>> +     */
>>>>>> +    __u32 handle;
>>>>>> +    /**
>>>>>> +     * @flags: Optional flags.
>>>>>> +     *
>>>>>> +     * Supported values:
>>>>>> +     *
>>>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>>>> kernel that
>>>>>> +     * the object will need to be accessed via the CPU.
>>>>>> +     *
>>>>>> +     * Only valid when placing objects in 
>>>>>> I915_MEMORY_CLASS_DEVICE, and
>>>>>> +     * only strictly required on platforms where only some of 
>>>>>> the device
>>>>>> +     * memory is directly visible or mappable through the CPU, 
>>>>>> like on DG2+.
>>>>>> +     *
>>>>>> +     * One of the placements MUST also be 
>>>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>>>> +     * ensure we can always spill the allocation to system 
>>>>>> memory, if we
>>>>>> +     * can't place the object in the mappable part of
>>>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>>>> +     *
>>>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>>>> objects that can
>>>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we 
>>>>>> therefore don't
>>>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS 
>>>>>> together with
>>>>>> +     * flat-CCS.
>>>>>> +     *
>>>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. 
>>>>>> Note that the
>>>>>> +     * kernel can still migrate the object to the mappable part, 
>>>>>> as a last
>>>>>> +     * resort, if userspace ever CPU faults this object, but 
>>>>>> this might be
>>>>>> +     * expensive, and so ideally should be avoided.
>>>>>> +     */
>>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>>> +    __u32 flags;
>>>>>> +    /**
>>>>>> +     * @extensions: The chain of extensions to apply to this 
>>>>>> object.
>>>>>> +     *
>>>>>> +     * This will be useful in the future when we need to support 
>>>>>> several
>>>>>> +     * different extensions, and we need to apply more than one 
>>>>>> when
>>>>>> +     * creating the object. See struct i915_user_extension.
>>>>>> +     *
>>>>>> +     * If we don't supply any extensions then we get the same 
>>>>>> old gem_create
>>>>>> +     * behaviour.
>>>>>> +     *
>>>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usage see
>>>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>>>> +     *
>>>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>>>> +     */
>>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>>> +    __u64 extensions;
>>>>>> +};
>>>>>> +
>>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>>> +
>>>>>> +/**
>>>>>> + * struct __drm_i915_query_vma_info
>>>>>> + *
>>>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>>>> returning its set
>>>>>> + * of attributes.
>>>>>> + *
>>>>>> + * .. code-block:: C
>>>>>> + *
>>>>>> + *    struct drm_i915_query_vma_info info = {};
>>>>>> + *    struct drm_i915_query_item item = {
>>>>>> + *        .data_ptr = (uintptr_t)&info,
>>>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>>> + *    };
>>>>>> + *    struct drm_i915_query query = {
>>>>>> + *        .num_items = 1,
>>>>>> + *        .items_ptr = (uintptr_t)&item,
>>>>>> + *    };
>>>>>> + *    int err;
>>>>>> + *
>>>>>> + *    // Unlike some other types of queries, there is no need to 
>>>>>> first query
>>>>>> + *    // the size of the data_ptr blob here, since we already 
>>>>>> know ahead of
>>>>>> + *    // time how big this needs to be.
>>>>>> + *    item.length = sizeof(info);
>>>>>> + *
>>>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma 
>>>>>> we wish
>>>>>> + *    // to query, before then firing off the query.
>>>>>> + *    info.vm_id = vm_id;
>>>>>> + *    info.offset = gtt_address;
>>>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>>> + *    if (err || item.length < 0) ...
>>>>>> + *
>>>>>> + *    // If all went well we can now inspect the returned 
>>>>>> attributes.
>>>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) 
>>>>>> ...
>>>>>> + */
>>>>>> +struct __drm_i915_query_vma_info {
>>>>>> +    /**
>>>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>>>> the value
>>>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>>>> +     */
>>>>>> +    __u32 vm_id;
>>>>>> +    /** @pad: MBZ. */
>>>>>> +    __u32 pad;
>>>>>> +    /**
>>>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>>>> the kernel
>>>>>> +     * will use to perform the lookup.
>>>>>> +     */
>>>>>> +    __u64 offset;
>>>>>> +    /**
>>>>>> +     * @attributes: The returned attributes for the given vma.
>>>>>> +     *
>>>>>> +     * Possible values:
>>>>>> +     *
>>>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set if the pages 
>>>>>> backing the
>>>>>> +     * vma are currently CPU accessible. If this is not set then 
>>>>>> the vma is
>>>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICE memory, 
>>>>>> which the CPU
>>>>>> +     * cannot directly access(this is only possible on discrete 
>>>>>> devices with
>>>>>> +     * a small BAR). Attempting to MMAP and fault such an object 
>>>>>> will
>>>>>> +     * require the kernel first synchronising any GPU work tied 
>>>>>> to the
>>>>>> +     * object, before then migrating the pages, either to the 
>>>>>> CPU accessible
>>>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>>> +     * placements permit it. See 
>>>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>>> +     *
>>>>>> +     * Note that this is inherently racy.
>>>>>> +     */
>>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>>> +    __u64 attributes;
>>>>>> +    /** @rsvd: MBZ */
>>>>>> +    __u32 rsvd[4];
>>>>>> +};
>>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>>> new file mode 100644
>>>>>> index 000000000000..be3d9bcdd86d
>>>>>> --- /dev/null
>>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>>> @@ -0,0 +1,58 @@
>>>>>> +==========================
>>>>>> +I915 Small BAR RFC Section
>>>>>> +==========================
>>>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>>>> local-memory(i.e
>>>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>>>> might still be
>>>>>> +smaller than the total probed_size. In such cases, only some 
>>>>>> subset of
>>>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>>>> first 256M),
>>>>>> +while the remainder is only accessible via the GPU.
>>>>>> +
>>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>>> +----------------------------------------------
>>>>>> +New gem_create_ext flag to tell the kernel that a BO will 
>>>>>> require CPU access.
>>>>>> +This becomes important when placing an object in 
>>>>>> I915_MEMORY_CLASS_DEVICE, where
>>>>>> +underneath the device has a small BAR, meaning only some portion 
>>>>>> of it is CPU
>>>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>>>> access is not
>>>>>> +required, and prioritize using the non-CPU visible portion of
>>>>>> +I915_MEMORY_CLASS_DEVICE.
>>>>>> +
>>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>>> +
>>>>>> +probed_cpu_visible_size attribute
>>>>>> +---------------------------------
>>>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>>>> total size of the
>>>>>> +CPU accessible portion, for the particular region. This should 
>>>>>> only be
>>>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>>>> +
>>>>>> +Vulkan will need this as part of creating a separate 
>>>>>> VkMemoryHeap with the
>>>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>>>> visible portion,
>>>>>> +where the total size of the heap needs to be known.
>>>>>> +
>>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>>> +   :functions: __drm_i915_memory_region_info
>>>>>> +
>>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>>> +-----------------------------
>>>>>> +Query the attributes of some vma. Given a vm and GTT offset, 
>>>>>> find the
>>>>>> +respective vma, and return its set of attributes. For now we 
>>>>>> only support
>>>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>>>> object/vma is
>>>>>> +currently placed in memory that is accessible by the CPU. This 
>>>>>> should always be
>>>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>>>> I915_MEMORY_CLASS_DEVICE
>>>>>> +matches the probed_size. If this is not set then CPU faulting 
>>>>>> the object will
>>>>>> +likely first require migrating the pages.
>>>>>> +
>>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>>> +   :functions: __drm_i915_query_vma_info
>>>>>> +
>>>>>> +Error Capture restrictions
>>>>>> +--------------------------
>>>>>> +With error capture we have two new restrictions:
>>>>>> +
>>>>>> +    1) Error capture is best effort on small BAR systems; if the 
>>>>>> pages are not
>>>>>> +    CPU accessible, at the time of capture, then the kernel is 
>>>>>> free to skip
>>>>>> +    trying to capture them.
>>>>>> +
>>>>>> +    2) On discrete we now reject error capture on recoverable 
>>>>>> contexts. In the
>>>>>> +    future the kernel may want to blit during error capture, 
>>>>>> when for example
>>>>>> +    something is not currently CPU accessible.
>>>>>> diff --git a/Documentation/gpu/rfc/index.rst 
>>>>>> b/Documentation/gpu/rfc/index.rst
>>>>>> index 91e93a705230..5a3bd3924ba6 100644
>>>>>> --- a/Documentation/gpu/rfc/index.rst
>>>>>> +++ b/Documentation/gpu/rfc/index.rst
>>>>>> @@ -23,3 +23,7 @@ host such documentation:
>>>>>>   .. toctree::
>>>>>>         i915_scheduler.rst
>>>>>> +
>>>>>> +.. toctree::
>>>>>> +
>>>>>> +    i915_small_bar.rst
>>>>>
>>>>>
>>>>
>>


^ permalink raw reply	[flat|nested] 50+ messages in thread

* Re: [Intel-gfx] [PATCH v2] drm/doc: add rfc section for small BAR uapi
  2022-04-28 11:11         ` Tvrtko Ursulin
@ 2022-05-03 14:40           ` Matthew Auld
  0 siblings, 0 replies; 50+ messages in thread
From: Matthew Auld @ 2022-05-03 14:40 UTC (permalink / raw)
  To: Tvrtko Ursulin, intel-gfx, Bloomfield, Jon
  Cc: Thomas Hellström, Kenneth Graunke, mesa-dev, dri-devel,
	Daniel Vetter

On 28/04/2022 12:11, Tvrtko Ursulin wrote:
> 
> On 28/04/2022 11:25, Matthew Auld wrote:
>> On 28/04/2022 09:55, Tvrtko Ursulin wrote:
>>>
>>> On 27/04/2022 18:36, Matthew Auld wrote:
>>>> On 27/04/2022 09:36, Tvrtko Ursulin wrote:
>>>>>
>>>>> On 20/04/2022 18:13, Matthew Auld wrote:
>>>>>> Add an entry for the new uapi needed for small BAR on DG2+.
>>>>>>
>>>>>> v2:
>>>>>>    - Some spelling fixes and other small tweaks. (Akeem & Thomas)
>>>>>>    - Rework error capture interactions, including no longer needing
>>>>>>      NEEDS_CPU_ACCESS for objects marked for capture. (Thomas)
>>>>>>    - Add probed_cpu_visible_size. (Lionel)
>>>>>>
>>>>>> Signed-off-by: Matthew Auld <matthew.auld@intel.com>
>>>>>> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
>>>>>> Cc: Lionel Landwerlin <lionel.g.landwerlin@intel.com>
>>>>>> Cc: Jon Bloomfield <jon.bloomfield@intel.com>
>>>>>> Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
>>>>>> Cc: Jordan Justen <jordan.l.justen@intel.com>
>>>>>> Cc: Kenneth Graunke <kenneth@whitecape.org>
>>>>>> Cc: Akeem G Abodunrin <akeem.g.abodunrin@intel.com>
>>>>>> Cc: mesa-dev@lists.freedesktop.org
>>>>>> ---
>>>>>>   Documentation/gpu/rfc/i915_small_bar.h   | 190 
>>>>>> +++++++++++++++++++++++
>>>>>>   Documentation/gpu/rfc/i915_small_bar.rst |  58 +++++++
>>>>>>   Documentation/gpu/rfc/index.rst          |   4 +
>>>>>>   3 files changed, 252 insertions(+)
>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.h
>>>>>>   create mode 100644 Documentation/gpu/rfc/i915_small_bar.rst
>>>>>>
>>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.h 
>>>>>> b/Documentation/gpu/rfc/i915_small_bar.h
>>>>>> new file mode 100644
>>>>>> index 000000000000..7bfd0cf44d35
>>>>>> --- /dev/null
>>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.h
>>>>>> @@ -0,0 +1,190 @@
>>>>>> +/**
>>>>>> + * struct __drm_i915_memory_region_info - Describes one region as 
>>>>>> known to the
>>>>>> + * driver.
>>>>>> + *
>>>>>> + * Note this is using both struct drm_i915_query_item and struct 
>>>>>> drm_i915_query.
>>>>>> + * For this new query we are adding the new query id 
>>>>>> DRM_I915_QUERY_MEMORY_REGIONS
>>>>>> + * at &drm_i915_query_item.query_id.
>>>>>> + */
>>>>>> +struct __drm_i915_memory_region_info {
>>>>>> +    /** @region: The class:instance pair encoding */
>>>>>> +    struct drm_i915_gem_memory_class_instance region;
>>>>>> +
>>>>>> +    /** @rsvd0: MBZ */
>>>>>> +    __u32 rsvd0;
>>>>>> +
>>>>>> +    /** @probed_size: Memory probed by the driver (-1 = unknown) */
>>>>>> +    __u64 probed_size;
>>>>>> +
>>>>>> +    /** @unallocated_size: Estimate of memory remaining (-1 = 
>>>>>> unknown) */
>>>>>> +    __u64 unallocated_size;
>>>>>> +
>>>>>> +    union {
>>>>>> +        /** @rsvd1: MBZ */
>>>>>> +        __u64 rsvd1[8];
>>>>>> +        struct {
>>>>>> +            /**
>>>>>> +             * @probed_cpu_visible_size: Memory probed by the driver
>>>>>> +             * that is CPU accessible. (-1 = unknown).
>>>>>> +             *
>>>>>> +             * This will be always be <= @probed_size, and the
>>>>>> +             * remainder(if there is any) will not be CPU
>>>>>> +             * accessible.
>>>>>> +             */
>>>>>> +            __u64 probed_cpu_visible_size;
>>>>>
>>>>> Would unallocated_cpu_visible_size be useful, to follow the total 
>>>>> unallocated_size?
>>>>
>>>> Make sense. But I don't think unallocated_size has actually been 
>>>> properly wired up yet. It still just gives the same value as 
>>>> probed_size. IIRC for unallocated_size we still need a real 
>>>> user/usecase/umd, before wiring that up for real with the existing 
>>>> avail tracking. Once we have that we can also add 
>>>> unallocated_cpu_visible_size.
>>>
>>> So this does nothing at the moment:
>>>
>>>   info.unallocated_size = mr->avail;
>>>
>>> Right, it is set to "mem->avail = mem->total;" at region init time 
>>> and I indeed can't find it ever getting modified. Okay.
>>>
>>>>> Btw, have we ever considered whether unallocated_size should 
>>>>> require CAP_SYS_ADMIN/PERFMON or something?
>>>>
>>>> Note sure. But just in case we do add it for real at some point, why 
>>>> the added restriction?
>>>
>>> To avoid a side channel, albeit perhaps a very weak one. For engine 
>>> utilization we require CAP_SYS_PERFMON, but that is implied by the 
>>> perf core API. It's open for discussion. I guess it may make sense to 
>>> limit it also because it is questionable the field(s) are even useful.
>>>
>>>>
>>>>>
>>>>>> +        };
>>>>>> +    };
>>>>>> +};
>>>>>> +
>>>>>> +/**
>>>>>> + * struct __drm_i915_gem_create_ext - Existing gem_create 
>>>>>> behaviour, with added
>>>>>> + * extension support using struct i915_user_extension.
>>>>>> + *
>>>>>> + * Note that new buffer flags should be added here, at least for 
>>>>>> the stuff that
>>>>>> + * is immutable. Previously we would have two ioctls, one to 
>>>>>> create the object
>>>>>> + * with gem_create, and another to apply various parameters, 
>>>>>> however this
>>>>>> + * creates some ambiguity for the params which are considered 
>>>>>> immutable. Also in
>>>>>> + * general we're phasing out the various SET/GET ioctls.
>>>>>> + */
>>>>>> +struct __drm_i915_gem_create_ext {
>>>>>> +    /**
>>>>>> +     * @size: Requested size for the object.
>>>>>> +     *
>>>>>> +     * The (page-aligned) allocated size for the object will be 
>>>>>> returned.
>>>>>> +     *
>>>>>> +     * Note that for some devices we have might have further minimum
>>>>>> +     * page-size restrictions(larger than 4K), likefor device 
>>>>>> local-memory.
>>>>>> +     * However in general the final size here should always 
>>>>>> reflect any
>>>>>> +     * rounding up, if for example using the 
>>>>>> I915_GEM_CREATE_EXT_MEMORY_REGIONS
>>>>>> +     * extension to place the object in device local-memory.
>>>>>> +     */
>>>>>> +    __u64 size;
>>>>>> +    /**
>>>>>> +     * @handle: Returned handle for the object.
>>>>>> +     *
>>>>>> +     * Object handles are nonzero.
>>>>>> +     */
>>>>>> +    __u32 handle;
>>>>>> +    /**
>>>>>> +     * @flags: Optional flags.
>>>>>> +     *
>>>>>> +     * Supported values:
>>>>>> +     *
>>>>>> +     * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS - Signal to the 
>>>>>> kernel that
>>>>>> +     * the object will need to be accessed via the CPU.
>>>>>> +     *
>>>>>> +     * Only valid when placing objects in 
>>>>>> I915_MEMORY_CLASS_DEVICE, and
>>>>>> +     * only strictly required on platforms where only some of the 
>>>>>> device
>>>>>> +     * memory is directly visible or mappable through the CPU, 
>>>>>> like on DG2+.
>>>>>> +     *
>>>>>> +     * One of the placements MUST also be 
>>>>>> I915_MEMORY_CLASS_SYSTEM, to
>>>>>> +     * ensure we can always spill the allocation tosystem memory, 
>>>>>> if we
>>>>>> +     * can't place the object in the mappable part of
>>>>>> +     * I915_MEMORY_CLASS_DEVICE.
>>>>>> +     *
>>>>>> +     * Note that since the kernel only supports flat-CCS on 
>>>>>> objects that can
>>>>>> +     * *only* be placed in I915_MEMORY_CLASS_DEVICE, we therefore 
>>>>>> don't
>>>>>> +     * support I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS together 
>>>>>> with
>>>>>> +     * flat-CCS.
>>>>>> +     *
>>>>>> +     * Without this hint, the kernel will assume that non-mappable
>>>>>> +     * I915_MEMORY_CLASS_DEVICE is preferred for this object. 
>>>>>> Note that the
>>>>>> +     * kernel can still migrate the object to the mappable part, 
>>>>>> as a last
>>>>>> +     * resort, if userspace ever CPU faults this object, but this 
>>>>>> might be
>>>>>> +     * expensive, and so ideally should be avoided.
>>>>>> +     */
>>>>>
>>>>> So "needs_cpu_access" flag could almost be viewed as a sub-region 
>>>>> placement priority? What I mean is this:
>>>>>
>>>>> 1)
>>>>> placements=device,system flags=
>>>>>
>>>>> This results in placement priorities: device, device_cpu_mappable, 
>>>>> system.
>>>>
>>>> Yup.
>>>>
>>>>>
>>>>> 2)
>>>>> placements=device,system flags=needs_cpu_access
>>>>>
>>>>> This results in placement priorities: device_cpu_mappable, device, 
>>>>> system.
>>>>
>>>> Here it would only be: device_cpu_mappable, system. We would 
>>>> completely ignore "device" in this case.
>>>>
>>>>>
>>>>> Is this correct?
>>>>>
>>>>> The benefit of the flag is that i915 can place the object to the 
>>>>> right place from the start instead of on the first CPU access? Is 
>>>>> that worth it or is there more to it?
>>>>
>>>> Yeah, the object will only be placed somewhere that is also CPU 
>>>> mappable, with the flag set.
>>>
>>> Hm, wouldn't it be more efficient to be able to migrate it over to 
>>> non-mappable in cases when mappable is over-subscribed?
>>
>> Not sure. As an alternative strategy, I guess that might be 
>> interesting, and if userspace wants something like that we can always 
>> add a new flag I guess? It's a toss up whether just using system 
>> memory is better/worse than incurring an extra move at fault time?
> 
> For me it doesn't make sense to allow bos wo/ cpu mappable flag to use 
> the mappable section (albeit as 2nd priority) and not allowing the cpu 
> mappable ones to temporarily go anywhere in lmem.
> 
>  From i915 side it needs to know the allowed regions (for this argument 
> I speak of mappable / non-mappable as separate regions, even if uapi 
> does not expose them as separate memory regions). i915 does not know in 
> advance the exact usage pattern.
> 
> In case of multiple clients, one might touch a buffer from the CPU once 
> and then render with GPU many times. Another client might touch from CPU 
> a lot more. With mappable space contention this would cause the buffer 
> from the first client to constantly get migrated between smem and lmem, 
> while in reality it could have been migrated to non mappable lmem and 
> used by the GPU without any problems.
> 
> As some sort of diagram:
> 
>      Client A BO    Client B BO
>      --------------    --------------
>      CPU access
>              CPU access (Client A BO "evicted" to smem)
>      GPU access    GPU access
>      GPU access    CPU access
>      GPU access    GPU access
> 
> If we assume there is only space for one BO in mappable, allowing 
> non-mappable placement allows Client A to be unaffected by Client B 
> activity. While with the current proposal it needlessly takes a hit on 
> every, or every other GPU access.
> 
> So I think, if there isn't a fundamental reason to disallow it which I 
> am missing, not limiting the implied placement when cpu access flag is 
> given is beneficial to flexibility of migration decisions i915 can make.
> 

Jon/Daniel, any thoughts/inputs here?


> Regards,
> 
> Tvrtko
> 
>>
>>>
>>>>>> +#define I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS (1 << 0)
>>>>>> +    __u32 flags;
>>>>>> +    /**
>>>>>> +     * @extensions: The chain of extensions to apply to this object.
>>>>>> +     *
>>>>>> +     * This will be useful in the future when we need to support 
>>>>>> several
>>>>>> +     * different extensions, and we need to apply more than one when
>>>>>> +     * creating the object. See struct i915_user_extension.
>>>>>> +     *
>>>>>> +     * If we don't supply any extensions then we get the same old 
>>>>>> gem_create
>>>>>> +     * behaviour.
>>>>>> +     *
>>>>>> +     * For I915_GEM_CREATE_EXT_MEMORY_REGIONS usagesee
>>>>>> +     * struct drm_i915_gem_create_ext_memory_regions.
>>>>>> +     *
>>>>>> +     * For I915_GEM_CREATE_EXT_PROTECTED_CONTENT usage see
>>>>>> +     * struct drm_i915_gem_create_ext_protected_content.
>>>>>> +     */
>>>>>> +#define I915_GEM_CREATE_EXT_MEMORY_REGIONS 0
>>>>>> +#define I915_GEM_CREATE_EXT_PROTECTED_CONTENT 1
>>>>>> +    __u64 extensions;
>>>>>> +};
>>>>>> +
>>>>>> +#define DRM_I915_QUERY_VMA_INFO    5
>>>>>> +
>>>>>> +/**
>>>>>> + * struct __drm_i915_query_vma_info
>>>>>> + *
>>>>>> + * Given a vm and GTT address, lookup the corresponding vma, 
>>>>>> returning its set
>>>>>> + * of attributes.
>>>>>> + *
>>>>>> + * .. code-block:: C
>>>>>> + *
>>>>>> + *    struct drm_i915_query_vma_info info = {};
>>>>>> + *    struct drm_i915_query_item item = {
>>>>>> + *        .data_ptr = (uintptr_t)&info,
>>>>>> + *        .query_id = DRM_I915_QUERY_VMA_INFO,
>>>>>> + *    };
>>>>>> + *    struct drm_i915_query query = {
>>>>>> + *        .num_items = 1,
>>>>>> + *        .items_ptr = (uintptr_t)&item,
>>>>>> + *    };
>>>>>> + *    int err;
>>>>>> + *
>>>>>> + *    // Unlike some other types of queries, there is noneed to 
>>>>>> first query
>>>>>> + *    // the size of the data_ptr blob here, since we already 
>>>>>> know ahead of
>>>>>> + *    // time how big this needs to be.
>>>>>> + *    item.length = sizeof(info);
>>>>>> + *
>>>>>> + *    // Next we fill in the vm_id and ppGTT address of the vma 
>>>>>> we wish
>>>>>> + *    // to query, before then firing off the query.
>>>>>> + *    info.vm_id = vm_id;
>>>>>> + *    info.offset = gtt_address;
>>>>>> + *    err = ioctl(fd, DRM_IOCTL_I915_QUERY, &query);
>>>>>> + *    if (err || item.length < 0) ...
>>>>>> + *
>>>>>> + *    // If all went well we can now inspect the returned 
>>>>>> attributes.
>>>>>> + *    if (info.attributes & DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE) ...
>>>>>> + */
>>>>>> +struct __drm_i915_query_vma_info {
>>>>>> +    /**
>>>>>> +     * @vm_id: The given vm id that contains the vma. The id is 
>>>>>> the value
>>>>>> +     * returned by the DRM_I915_GEM_VM_CREATE. See struct
>>>>>> +     * drm_i915_gem_vm_control.vm_id.
>>>>>> +     */
>>>>>> +    __u32 vm_id;
>>>>>> +    /** @pad: MBZ. */
>>>>>> +    __u32 pad;
>>>>>> +    /**
>>>>>> +     * @offset: The corresponding ppGTT address of the vma which 
>>>>>> the kernel
>>>>>> +     * will use to perform the lookup.
>>>>>> +     */
>>>>>> +    __u64 offset;
>>>>>> +    /**
>>>>>> +     * @attributes: The returned attributes for thegiven vma.
>>>>>> +     *
>>>>>> +     * Possible values:
>>>>>> +     *
>>>>>> +     * DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE - Set ifthe pages 
>>>>>> backing the
>>>>>> +     * vma are currently CPU accessible. If this isnot set then 
>>>>>> the vma is
>>>>>> +     * currently backed by I915_MEMORY_CLASS_DEVICEmemory, which 
>>>>>> the CPU
>>>>>> +     * cannot directly access(this is only possibleon discrete 
>>>>>> devices with
>>>>>> +     * a small BAR). Attempting to MMAP and fault such an object 
>>>>>> will
>>>>>> +     * require the kernel first synchronising any GPU work tied 
>>>>>> to the
>>>>>> +     * object, before then migrating the pages, either to the CPU 
>>>>>> accessible
>>>>>> +     * part of I915_MEMORY_CLASS_DEVICE, or 
>>>>>> I915_MEMORY_CLASS_SYSTEM, if the
>>>>>> +     * placements permit it. See 
>>>>>> I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS.
>>>>>> +     *
>>>>>> +     * Note that this is inherently racy.
>>>>>> +     */
>>>>>> +#define DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE (1<<0)
>>>>>> +    __u64 attributes;
>>>>>> +    /** @rsvd: MBZ */
>>>>>> +    __u32 rsvd[4];
>>>>>> +};
>>>>>> diff --git a/Documentation/gpu/rfc/i915_small_bar.rst 
>>>>>> b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>>> new file mode 100644
>>>>>> index 000000000000..be3d9bcdd86d
>>>>>> --- /dev/null
>>>>>> +++ b/Documentation/gpu/rfc/i915_small_bar.rst
>>>>>> @@ -0,0 +1,58 @@
>>>>>> +==========================
>>>>>> +I915 Small BAR RFC Section
>>>>>> +==========================
>>>>>> +Starting from DG2 we will have resizable BAR support for device 
>>>>>> local-memory(i.e
>>>>>> +I915_MEMORY_CLASS_DEVICE), but in some cases the final BAR size 
>>>>>> might still be
>>>>>> +smaller than the total probed_size. In such cases, only some 
>>>>>> subset of
>>>>>> +I915_MEMORY_CLASS_DEVICE will be CPU accessible(for example the 
>>>>>> first 256M),
>>>>>> +while the remainder is only accessible via the GPU.
>>>>>> +
>>>>>> +I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS flag
>>>>>> +----------------------------------------------
>>>>>> +New gem_create_ext flag to tell the kernel that a BO will require 
>>>>>> CPU access.
>>>>>> +This becomes important when placing an object in 
>>>>>> I915_MEMORY_CLASS_DEVICE, where
>>>>>> +underneath the device has a small BAR, meaning only some portion 
>>>>>> of it is CPU
>>>>>> +accessible. Without this flag the kernel will assume that CPU 
>>>>>> access is not
>>>>>> +required, and prioritize using the non-CPU visible portion of
>>>>>> +I915_MEMORY_CLASS_DEVICE.
>>>>>> +
>>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>>> +   :functions: __drm_i915_gem_create_ext
>>>>>> +
>>>>>> +probed_cpu_visible_size attribute
>>>>>> +---------------------------------
>>>>>> +New struct__drm_i915_memory_region attribute which returns the 
>>>>>> total size of the
>>>>>> +CPU accessible portion, for the particular region. This should 
>>>>>> only be
>>>>>> +applicable for I915_MEMORY_CLASS_DEVICE.
>>>>>> +
>>>>>> +Vulkan will need this as part of creating a separate VkMemoryHeap 
>>>>>> with the
>>>>>> +VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT set, to represent the CPU 
>>>>>> visible portion,
>>>>>> +where the total size of the heap needs to be known.
>>>>>> +
>>>>>> +.. kernel-doc:: Documentation/gpu/rfc/i915_small_bar.h
>>>>>> +   :functions: __drm_i915_memory_region_info
>>>>>> +
>>>>>> +DRM_I915_QUERY_VMA_INFO query
>>>>>> +-----------------------------
>>>>>> +Query the attributes of some vma. Given a vm and GTT offset, find 
>>>>>> the
>>>>>> +respective vma, and return its set of attributes. For now we only 
>>>>>> support
>>>>>> +DRM_I915_QUERY_VMA_INFO_CPU_VISIBLE, which is set if the 
>>>>>> object/vma is
>>>>>> +currently placed in memory that is accessible by the CPU. This 
>>>>>> should always be
>>>>>> +set on devices where the CPU probed_cpu_visible_size of 
>>>>>> I915_MEMORY_CLASS_DEVICE
>>>>>> +matches the probed_size. If this is not set then CPU faulting the 
>>>>>> object will
>>>>>> +likely first require migrating the pages.
>>>>>
>>>>> I think there should be justification for the new query documented 
>>>>> as well. (Why on top of what.)
>>>>
>>>> Yeah, I'm wondering now if we can just drop this part of the uapi, 
>>>> for now at least, and focus on landing the new flag stuff first.
>>>>
>>>>>
>>>>> Without it personally I can't immediately understand why the 
>>>>> disconnect between the object based and VMA based API. Userspace 
>>>>> has to do some intervening operations like either execbuf, or vm 
>>>>> bind in the future, to make this query usable after object 
>>>>> creation. So question is why wouldn't it know already which 
>>>>> placements it allowed and so would i915 auto-migrate or not for 
>>>>> this particular object. No? Or in other words why this wouldn't be 
>>>>> an object based query since the question it is answering is about 
>>>>> the object backing store and not the VMA.
>>>>
>>>> Yeah, just using the object handle or so I guess would also work. 
>>>> Thanks for the comments.
>>>
>>> I saw other folks have said the same so omitting for now sounds good 
>>> to me indeed.
>>>
>>> Regards,
>>>
>>> Tvrtko

^ permalink raw reply	[flat|nested] 50+ messages in thread

end of thread, other threads:[~2022-05-03 14:40 UTC | newest]

Thread overview: 50+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-04-20 17:13 [PATCH v2] drm/doc: add rfc section for small BAR uapi Matthew Auld
2022-04-20 17:13 ` [Intel-gfx] " Matthew Auld
2022-04-20 20:47 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for " Patchwork
2022-04-20 21:14 ` [Intel-gfx] ✗ Fi.CI.BAT: failure " Patchwork
2022-04-27  6:35 ` [PATCH v2] " Lionel Landwerlin
2022-04-27  6:35   ` [Intel-gfx] " Lionel Landwerlin
2022-04-27  6:48   ` Lionel Landwerlin
2022-04-27  6:48     ` [Intel-gfx] " Lionel Landwerlin
2022-04-27  6:55     ` Christian König
2022-04-27  6:55       ` [Intel-gfx] " Christian König
2022-04-27 15:02       ` Matthew Auld
2022-04-27 15:02         ` [Intel-gfx] " Matthew Auld
2022-04-27 15:04         ` Christian König
2022-04-27 15:04           ` [Intel-gfx] " Christian König
2022-04-27 15:38       ` Daniel Vetter
2022-04-27 15:38         ` [Intel-gfx] " Daniel Vetter
2022-04-27 15:18     ` Matthew Auld
2022-04-27 15:18       ` [Intel-gfx] " Matthew Auld
2022-04-27 15:37       ` Lionel Landwerlin
2022-04-27 15:37         ` [Intel-gfx] " Lionel Landwerlin
2022-04-27  8:36 ` Tvrtko Ursulin
2022-04-27 17:36   ` Matthew Auld
2022-04-28  8:55     ` Tvrtko Ursulin
2022-04-28 10:25       ` Matthew Auld
2022-04-28 11:11         ` Tvrtko Ursulin
2022-05-03 14:40           ` Matthew Auld
2022-05-02  7:54 ` Lionel Landwerlin
2022-05-02  7:54   ` [Intel-gfx] " Lionel Landwerlin
2022-05-02  8:53   ` Lionel Landwerlin
2022-05-02  8:53     ` [Intel-gfx] " Lionel Landwerlin
2022-05-03 10:22     ` Matthew Auld
2022-05-03 10:22       ` [Intel-gfx] " Matthew Auld
2022-05-03 10:39       ` Lionel Landwerlin
2022-05-03 10:39         ` [Intel-gfx] " Lionel Landwerlin
2022-05-03 14:27         ` Matthew Auld
2022-05-03 14:27           ` [Intel-gfx] " Matthew Auld
2022-05-03 14:37           ` Lionel Landwerlin
2022-05-03 14:37             ` [Intel-gfx] " Lionel Landwerlin
2022-05-03 11:06       ` Tvrtko Ursulin
2022-05-03 11:06         ` [Intel-gfx] " Tvrtko Ursulin
2022-05-02 17:58   ` Abodunrin, Akeem G
2022-05-02 17:58     ` [Intel-gfx] " Abodunrin, Akeem G
2022-05-02 18:03     ` Lionel Landwerlin
2022-05-02 18:03       ` [Intel-gfx] " Lionel Landwerlin
2022-05-03  9:07       ` Matthew Auld
2022-05-03  9:07         ` [Intel-gfx] " Matthew Auld
2022-05-03  9:15         ` Lionel Landwerlin
2022-05-03  9:15           ` [Intel-gfx] " Lionel Landwerlin
2022-05-03  9:01   ` Matthew Auld
2022-05-03  9:01     ` [Intel-gfx] " Matthew Auld

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.