All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [Intel-gfx] [RFC 38/60] drm/i915/dg1: Introduce dmabuf mmap to LMEM
Date: Fri, 10 Jul 2020 12:57:35 +0100	[thread overview]
Message-ID: <20200710115757.290984-39-matthew.auld@intel.com> (raw)
In-Reply-To: <20200710115757.290984-1-matthew.auld@intel.com>

From: "Michael J. Ruhl" <michael.j.ruhl@intel.com>

The i915 GEM dmabuf mmap interface assumes all BOs are SHMEM. When
the BO is backed by LMEM, this assumption doesn't work so well.

Introduce the dmabuf mmap interface to LMEM by adding the appropriate
VMA faulting mechanism and update dmabuf to allow for LMEM backed BOs by
leveraging the gem_mman path.

Cc: Brian Welty <brian.welty@intel.com>
Signed-off-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c    |  58 ++++++++--
 drivers/gpu/drm/i915/gem/i915_gem_mman.c      | 103 ++++++++++--------
 drivers/gpu/drm/i915/gem/i915_gem_mman.h      |   8 ++
 .../drm/i915/gem/selftests/i915_gem_mman.c    |  12 +-
 4 files changed, 119 insertions(+), 62 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 2679380159fc..5824e56751ac 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -9,6 +9,8 @@
 #include <linux/dma-resv.h>
 
 #include "i915_drv.h"
+#include "i915_gem_lmem.h"
+#include "i915_gem_mman.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
 
@@ -97,7 +99,41 @@ static void i915_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
 	i915_gem_object_unpin_map(obj);
 }
 
-static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *vma)
+/**
+ * i915_gem_dmabuf_update_vma - Setup VMA information for exported LMEM
+ * objects
+ * @obj: valid LMEM object
+ * @vma: va;od vma
+ *
+ * NOTE: on success, the final _object_put() will be done by the VMA
+ * vm_close() callback.
+ */
+static int i915_gem_dmabuf_update_vma(struct drm_i915_gem_object *obj,
+				      struct vm_area_struct *vma)
+{
+	struct i915_mmap_offset *mmo;
+	int err;
+
+	i915_gem_object_get(obj);
+	mmo = i915_gem_mmap_offset_attach(obj, I915_MMAP_TYPE_WC, NULL);
+	if (IS_ERR(mmo)) {
+		err = PTR_ERR(mmo);
+		goto out;
+	}
+
+	err = i915_gem_update_vma_info(mmo, vma);
+	if (err)
+		goto out;
+
+	return 0;
+
+out:
+	i915_gem_object_put(obj);
+	return err;
+}
+
+static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf,
+				struct vm_area_struct *vma)
 {
 	struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
 	int ret;
@@ -105,17 +141,21 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
 	if (obj->base.size < vma->vm_end - vma->vm_start)
 		return -EINVAL;
 
-	if (!obj->base.filp)
-		return -ENODEV;
+	/* shmem */
+	if (obj->base.filp) {
+		ret = call_mmap(obj->base.filp, vma);
+		if (ret)
+			return ret;
+		fput(vma->vm_file);
+		vma->vm_file = get_file(obj->base.filp);
 
-	ret = call_mmap(obj->base.filp, vma);
-	if (ret)
-		return ret;
+		return 0;
+	}
 
-	fput(vma->vm_file);
-	vma->vm_file = get_file(obj->base.filp);
+	if (i915_gem_object_is_lmem(obj))
+		return i915_gem_dmabuf_update_vma(obj, vma);
 
-	return 0;
+	return -ENODEV;
 }
 
 static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b23368529a40..38c8874e10d2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -568,10 +568,10 @@ insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
 	return mmo;
 }
 
-static struct i915_mmap_offset *
-mmap_offset_attach(struct drm_i915_gem_object *obj,
-		   enum i915_mmap_type mmap_type,
-		   struct drm_file *file)
+struct i915_mmap_offset *
+i915_gem_mmap_offset_attach(struct drm_i915_gem_object *obj,
+			    enum i915_mmap_type mmap_type,
+			    struct drm_file *file)
 {
 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
 	struct i915_mmap_offset *mmo;
@@ -645,7 +645,7 @@ __assign_mmap_offset(struct drm_file *file,
 		goto out;
 	}
 
-	mmo = mmap_offset_attach(obj, mmap_type, file);
+	mmo = i915_gem_mmap_offset_attach(obj, mmap_type, file);
 	if (IS_ERR(mmo)) {
 		err = PTR_ERR(mmo);
 		goto out;
@@ -816,56 +816,21 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
 	return file;
 }
 
-/*
- * This overcomes the limitation in drm_gem_mmap's assignment of a
- * drm_gem_object as the vma->vm_private_data. Since we need to
- * be able to resolve multiple mmap offsets which could be tied
- * to a single gem object.
- */
-int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+int i915_gem_update_vma_info(struct i915_mmap_offset *mmo,
+			     struct vm_area_struct *vma)
 {
-	struct drm_vma_offset_node *node;
-	struct drm_file *priv = filp->private_data;
-	struct drm_device *dev = priv->minor->dev;
-	struct drm_i915_gem_object *obj = NULL;
-	struct i915_mmap_offset *mmo = NULL;
 	struct file *anon;
 
-	if (drm_dev_is_unplugged(dev))
-		return -ENODEV;
-
-	rcu_read_lock();
-	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
-	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
-						  vma->vm_pgoff,
-						  vma_pages(vma));
-	if (node && drm_vma_node_is_allowed(node, priv)) {
-		/*
-		 * Skip 0-refcnted objects as it is in the process of being
-		 * destroyed and will be invalid when the vma manager lock
-		 * is released.
-		 */
-		mmo = container_of(node, struct i915_mmap_offset, vma_node);
-		obj = i915_gem_object_get_rcu(mmo->obj);
-	}
-	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
-	rcu_read_unlock();
-	if (!obj)
-		return node ? -EACCES : -EINVAL;
-
-	if (i915_gem_object_is_readonly(obj)) {
-		if (vma->vm_flags & VM_WRITE) {
-			i915_gem_object_put(obj);
+	if (i915_gem_object_is_readonly(mmo->obj)) {
+		if (vma->vm_flags & VM_WRITE)
 			return -EINVAL;
-		}
+
 		vma->vm_flags &= ~VM_MAYWRITE;
 	}
 
-	anon = mmap_singleton(to_i915(dev));
-	if (IS_ERR(anon)) {
-		i915_gem_object_put(obj);
+	anon = mmap_singleton(to_i915(mmo->obj->base.dev));
+	if (IS_ERR(anon))
 		return PTR_ERR(anon);
-	}
 
 	vma->vm_flags |= VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
 	vma->vm_private_data = mmo;
@@ -910,6 +875,50 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
 	return 0;
 }
 
+/*
+ * This overcomes the limitation in drm_gem_mmap's assignment of a
+ * drm_gem_object as the vma->vm_private_data. Since we need to
+ * be able to resolve multiple mmap offsets which could be tied
+ * to a single gem object.
+ */
+int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	struct drm_vma_offset_node *node;
+	struct drm_file *priv = filp->private_data;
+	struct drm_device *dev = priv->minor->dev;
+	struct drm_i915_gem_object *obj = NULL;
+	struct i915_mmap_offset *mmo = NULL;
+	int err;
+
+	if (drm_dev_is_unplugged(dev))
+		return -ENODEV;
+
+	rcu_read_lock();
+	drm_vma_offset_lock_lookup(dev->vma_offset_manager);
+	node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
+						  vma->vm_pgoff,
+						  vma_pages(vma));
+	if (node && drm_vma_node_is_allowed(node, priv)) {
+		/*
+		 * Skip 0-refcnted objects as it is in the process of being
+		 * destroyed and will be invalid when the vma manager lock
+		 * is released.
+		 */
+		mmo = container_of(node, struct i915_mmap_offset, vma_node);
+		obj = i915_gem_object_get_rcu(mmo->obj);
+	}
+	drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+	rcu_read_unlock();
+	if (!obj)
+		return node ? -EACCES : -EINVAL;
+
+	err = i915_gem_update_vma_info(mmo, vma);
+	if (err)
+		i915_gem_object_put(obj);
+
+	return err;
+}
+
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
 #include "selftests/i915_gem_mman.c"
 #endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.h b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
index efee9e0d2508..8da5f0d6f46f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.h
@@ -10,6 +10,8 @@
 #include <linux/mm_types.h>
 #include <linux/types.h>
 
+#include "gem/i915_gem_object_types.h"
+
 struct drm_device;
 struct drm_file;
 struct drm_i915_gem_object;
@@ -29,4 +31,10 @@ void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
 
 void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
 
+struct i915_mmap_offset *
+i915_gem_mmap_offset_attach(struct drm_i915_gem_object *obj,
+			    enum i915_mmap_type mmap_type,
+			    struct drm_file *file);
+int i915_gem_update_vma_info(struct i915_mmap_offset *mmo,
+			     struct vm_area_struct *vma);
 #endif
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 9c7402ce5bf9..4343f4ccc9b6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -572,7 +572,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
 	if (IS_ERR(obj))
 		return false;
 
-	mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
+	mmo = i915_gem_mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
 	i915_gem_object_put(obj);
 
 	return PTR_ERR_OR_ZERO(mmo) == expected;
@@ -675,7 +675,7 @@ static int igt_mmap_offset_exhaustion(void *arg)
 		goto out;
 	}
 
-	mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
+	mmo = i915_gem_mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
 	if (IS_ERR(mmo)) {
 		pr_err("Unable to insert object into reclaimed hole\n");
 		err = PTR_ERR(mmo);
@@ -850,7 +850,7 @@ static int __igt_mmap(struct drm_i915_private *i915,
 	if (err)
 		return err;
 
-	mmo = mmap_offset_attach(obj, type, NULL);
+	mmo = i915_gem_mmap_offset_attach(obj, type, NULL);
 	if (IS_ERR(mmo))
 		return PTR_ERR(mmo);
 
@@ -988,7 +988,7 @@ static int __igt_mmap_access(struct drm_i915_private *i915,
 	if (!can_mmap(obj, type) || !can_access(obj))
 		return 0;
 
-	mmo = mmap_offset_attach(obj, type, NULL);
+	mmo = i915_gem_mmap_offset_attach(obj, type, NULL);
 	if (IS_ERR(mmo))
 		return PTR_ERR(mmo);
 
@@ -1101,7 +1101,7 @@ static int __igt_mmap_gpu(struct drm_i915_private *i915,
 	if (err)
 		return err;
 
-	mmo = mmap_offset_attach(obj, type, NULL);
+	mmo = i915_gem_mmap_offset_attach(obj, type, NULL);
 	if (IS_ERR(mmo))
 		return PTR_ERR(mmo);
 
@@ -1267,7 +1267,7 @@ static int __igt_mmap_revoke(struct drm_i915_private *i915,
 	if (!can_mmap(obj, type))
 		return 0;
 
-	mmo = mmap_offset_attach(obj, type, NULL);
+	mmo = i915_gem_mmap_offset_attach(obj, type, NULL);
 	if (IS_ERR(mmo))
 		return PTR_ERR(mmo);
 
-- 
2.26.2

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2020-07-10 12:00 UTC|newest]

Thread overview: 87+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-07-10 11:56 [Intel-gfx] [RFC 00/60] DG1 LMEM enabling Matthew Auld
2020-07-10 11:56 ` [Intel-gfx] [RFC 01/60] drm/i915: Add has_master_unit_irq flag Matthew Auld
2020-07-10 11:56 ` [Intel-gfx] [RFC 02/60] drm/i915/dg1: add initial DG-1 definitions Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 03/60] drm/i915/dg1: Add DG1 PCI IDs Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 04/60] drm/i915/dg1: add support for the master unit interrupt Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 05/60] drm/i915/dg1: Remove SHPD_FILTER_CNT register programming Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 06/60] drm/i915/dg1: Add fake PCH Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 07/60] drm/i915/dg1: Initialize RAWCLK properly Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 08/60] drm/i915/dg1: Define MOCS table for DG1 Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 09/60] drm/i915/dg1: Add DG1 power wells Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 10/60] drm/i915/dg1: Increase mmio size to 4MB Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 11/60] drm/i915/dg1: Wait for pcode/uncore handshake at startup Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 12/60] drm/i915/dg1: Add DPLL macros for DG1 Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 13/60] drm/i915/dg1: Add and setup DPLLs " Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 14/60] drm/i915/dg1: Enable DPLL " Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 15/60] drm/i915/dg1: add hpd interrupt handling Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 16/60] drm/i915/dg1: invert HPD pins Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 17/60] drm/i915/dg1: gmbus pin mapping Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 18/60] drm/i915/dg1: Enable first 2 ports for DG1 Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 19/60] drm/i915/dg1: Don't program PHY_MISC for PHY-C and PHY-D Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 20/60] drm/i915/dg1: Update comp master/slave relationships for PHYs Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 21/60] drm/i915/dg1: Update voltage swing tables for DP Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 22/60] drm/i915/dg1: provide port/phy mapping for vbt Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 23/60] drm/i915/dg1: map/unmap pll clocks Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 24/60] drm/i915/dg1: enable PORT C/D aka D/E Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 25/60] drm/i915/dg1: Load DMC Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 26/60] drm/i915/rkl: Add initial workarounds Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 27/60] drm/i915/dg1: Add initial DG1 workarounds Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 28/60] drm/i915/dg1: DG1 does not support DC6 Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 29/60] drm/i915/lmem: Limit block size to 4G Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 30/60] drm/i915/lmem: Do not check r->sgt.pfn for NULL Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 31/60] drm/i915/dgfx: define llc and snooping behaviour Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 32/60] drm/i915/lmem: support pread Matthew Auld
2020-07-10 12:39   ` Tvrtko Ursulin
2020-07-10 13:04     ` Chris Wilson
2020-07-10 11:57 ` [Intel-gfx] [RFC 33/60] drm/i915/lmem: support pwrite Matthew Auld
2020-07-13  5:09   ` Dave Airlie
2020-07-14 14:35     ` Matthew Auld
2020-07-16  0:43       ` Dave Airlie
2020-07-16 10:11         ` Matthew Auld
2020-07-19 21:52           ` Dave Airlie
2020-08-07  9:32             ` Joonas Lahtinen
2020-08-07  9:46     ` Joonas Lahtinen
2020-08-09 21:06       ` Dave Airlie
2020-07-10 11:57 ` [Intel-gfx] [RFC 34/60] drm/i915: introduce kernel blitter_context Matthew Auld
2020-08-03 19:59   ` Lucas De Marchi
2020-08-03 20:17     ` Lucas De Marchi
2020-07-10 11:57 ` [Intel-gfx] [RFC 35/60] drm/i915/query: Expose memory regions through the query uAPI Matthew Auld
2020-07-10 16:20   ` Tvrtko Ursulin
2020-07-10 11:57 ` [Intel-gfx] [RFC 36/60] drm/i915/uapi: introduce drm_i915_gem_create_ext Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 37/60] drm/i915/lmem: allocate cmd ring in lmem Matthew Auld
2020-07-10 11:57 ` Matthew Auld [this message]
2020-07-10 11:57 ` [Intel-gfx] [RFC 39/60] drm/i915: setup the LMEM region Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 40/60] drm/i915: drop fake LMEM Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 41/60] drm/i915: Distinction of memory regions Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 42/60] drm/i915: PPGTT support Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 43/60] drm/i915: support GGTT LMEM entries Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 44/60] drm/i915: allocate context from LMEM Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 45/60] drm/i915: move engine scratch to LMEM Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 46/60] drm/i915: Provide a way to disable PCIe relaxed write ordering Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 47/60] drm/i915: setup GPU device lmem region Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 48/60] drm/i915: Fix object page offset within a region Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 49/60] drm/i915: add i915_gem_object_is_devmem() function Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 50/60] drm/i915: finish memory region support for stolen objects Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 51/60] drm/i915/lmem: support optional CPU clearing for special internal use Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 52/60] drm/i915/guc: put all guc objects in lmem when available Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 53/60] drm/i915: Create stolen memory region from local memory Matthew Auld
2020-07-13  4:48   ` Dave Airlie
2020-07-14 15:01     ` Matthew Auld
2020-07-14 16:57       ` Tang, CQ
2020-07-14 19:26         ` Dave Airlie
2020-08-07  9:38           ` Joonas Lahtinen
2020-08-07 16:24             ` Tang, CQ
2020-07-14 17:39       ` Ville Syrjälä
2020-07-10 11:57 ` [Intel-gfx] [RFC 54/60] drm/i915/lmem: Bypass aperture when lmem is available Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 55/60] drm/i915/lmem: reset the lmem buffer created by fbdev Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 56/60] drm/i915/dsb: Enable lmem for dsb Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 57/60] drm/i915: Reintroduce mem->reserved Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 58/60] drm/i915/dg1: Reserve first 1MB of local memory Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 59/60] drm/i915: defer pd lmem block put to worker Matthew Auld
2020-07-10 11:57 ` [Intel-gfx] [RFC 60/60] drm/i915/lmem: allocate HWSP in lmem Matthew Auld
2020-07-10 12:51 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for DG1 LMEM enabling Patchwork
2020-07-10 12:52 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2020-07-10 13:12 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2020-07-10 14:50 ` [Intel-gfx] ✗ Fi.CI.IGT: failure " Patchwork
2020-08-11  3:23 ` [Intel-gfx] [RFC 00/60] " Dave Airlie
2020-08-11 10:48   ` Matthew Auld

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200710115757.290984-39-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.