All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH v2 08/22] drm/i915/lmem: support kernel mapping
Date: Thu,  3 Oct 2019 20:24:30 +0100	[thread overview]
Message-ID: <20191003192444.10113-9-matthew.auld@intel.com> (raw)
In-Reply-To: <20191003192444.10113-1-matthew.auld@intel.com>

From: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>

We can create LMEM objects, but we also need to support mapping them
into kernel space for internal use.

Signed-off-by: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Signed-off-by: Steve Hampson <steven.t.hampson@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/i915/gem/i915_gem_lmem.c      |  36 ++++++
 drivers/gpu/drm/i915/gem/i915_gem_lmem.h      |   8 ++
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   9 +-
 drivers/gpu/drm/i915/gem/i915_gem_pages.c     |  22 +++-
 .../drm/i915/selftests/intel_memory_region.c  | 118 ++++++++++++++++++
 5 files changed, 185 insertions(+), 8 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
index 26a23304df32..1a045858b3b2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c
@@ -9,11 +9,47 @@
 #include "i915_drv.h"
 
 const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = {
+	.flags = I915_GEM_OBJECT_HAS_IOMEM,
+
 	.get_pages = i915_gem_object_get_pages_buddy,
 	.put_pages = i915_gem_object_put_pages_buddy,
 	.release = i915_gem_object_release_memory_region,
 };
 
+/* XXX: Time to vfunc your life up? */
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+					       unsigned long n)
+{
+	resource_size_t offset;
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+
+	return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE);
+}
+
+void __iomem *i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+						      unsigned long n)
+{
+	resource_size_t offset;
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+
+	return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset);
+}
+
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+					  unsigned long n,
+					  unsigned long size)
+{
+	resource_size_t offset;
+
+	GEM_BUG_ON(!(obj->flags & I915_BO_ALLOC_CONTIGUOUS));
+
+	offset = i915_gem_object_get_dma_address(obj, n);
+
+	return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
+}
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
 {
 	struct intel_memory_region *region = obj->mm.region;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
index ebc15fe24f58..31a6462bdbb6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h
@@ -13,6 +13,14 @@ struct drm_i915_gem_object;
 
 extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
 
+void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
+					  unsigned long n, unsigned long size);
+void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj,
+					       unsigned long n);
+void __iomem *
+i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj,
+					unsigned long n);
+
 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 9a8579b67357..b75a14a61d24 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -30,10 +30,11 @@ struct i915_lut_handle {
 struct drm_i915_gem_object_ops {
 	unsigned int flags;
 #define I915_GEM_OBJECT_HAS_STRUCT_PAGE	BIT(0)
-#define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(1)
-#define I915_GEM_OBJECT_IS_PROXY	BIT(2)
-#define I915_GEM_OBJECT_NO_GGTT		BIT(3)
-#define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(4)
+#define I915_GEM_OBJECT_HAS_IOMEM	BIT(1)
+#define I915_GEM_OBJECT_IS_SHRINKABLE	BIT(2)
+#define I915_GEM_OBJECT_IS_PROXY	BIT(3)
+#define I915_GEM_OBJECT_NO_GGTT		BIT(4)
+#define I915_GEM_OBJECT_ASYNC_CANCEL	BIT(5)
 
 	/* Interface between the GEM object and its backing storage.
 	 * get_pages() is called once prior to the use of the associated set
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index b0ec0959c13f..cf7f5a3cb210 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -7,6 +7,7 @@
 #include "i915_drv.h"
 #include "i915_gem_object.h"
 #include "i915_scatterlist.h"
+#include "i915_gem_lmem.h"
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
 				 struct sg_table *pages,
@@ -172,7 +173,9 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
 		void *ptr;
 
 		ptr = page_mask_bits(obj->mm.mapping);
-		if (is_vmalloc_addr(ptr))
+		if (i915_gem_object_is_lmem(obj))
+			io_mapping_unmap(ptr);
+		else if (is_vmalloc_addr(ptr))
 			vunmap(ptr);
 		else
 			kunmap(kmap_to_page(ptr));
@@ -231,7 +234,7 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
 }
 
 /* The 'mapping' part of i915_gem_object_pin_map() below */
-static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
+static void *i915_gem_object_map(struct drm_i915_gem_object *obj,
 				 enum i915_map_type type)
 {
 	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
@@ -244,6 +247,13 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
 	pgprot_t pgprot;
 	void *addr;
 
+	if (i915_gem_object_is_lmem(obj)) {
+		if (type != I915_MAP_WC)
+			return NULL;
+
+		return i915_gem_object_lmem_io_map(obj, 0, obj->base.size);
+	}
+
 	/* A single page can always be kmapped */
 	if (n_pages == 1 && type == I915_MAP_WB)
 		return kmap(sg_page(sgt->sgl));
@@ -285,11 +295,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			      enum i915_map_type type)
 {
 	enum i915_map_type has_type;
+	unsigned int flags;
 	bool pinned;
 	void *ptr;
 	int err;
 
-	if (unlikely(!i915_gem_object_has_struct_page(obj)))
+	flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM;
+	if (!i915_gem_object_type_has(obj, flags))
 		return ERR_PTR(-ENXIO);
 
 	err = mutex_lock_interruptible(&obj->mm.lock);
@@ -321,7 +333,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
 			goto err_unpin;
 		}
 
-		if (is_vmalloc_addr(ptr))
+		if (i915_gem_object_is_lmem(obj))
+			io_mapping_unmap(ptr);
+		else if (is_vmalloc_addr(ptr))
 			vunmap(ptr);
 		else
 			kunmap(kmap_to_page(ptr));
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 9692dd448b8c..1ad755e75e57 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -13,8 +13,10 @@
 
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
+#include "gem/i915_gem_object_blt.h"
 #include "gem/selftests/mock_context.h"
 #include "gt/intel_gt.h"
+#include "selftests/igt_flush_test.h"
 #include "selftests/i915_random.h"
 
 static void close_objects(struct intel_memory_region *mem,
@@ -274,6 +276,121 @@ static int igt_lmem_create(void *arg)
 	return err;
 }
 
+static int igt_lmem_write_cpu(void *arg)
+{
+	struct drm_i915_private *i915 = arg;
+	struct drm_i915_gem_object *obj;
+	I915_RND_STATE(prng);
+	IGT_TIMEOUT(end_time);
+	u32 bytes[] = {
+		0, /* rng placeholder */
+		sizeof(u32),
+		sizeof(u64),
+		64, /* cl */
+		PAGE_SIZE,
+		PAGE_SIZE - sizeof(u32),
+		PAGE_SIZE - sizeof(u64),
+		PAGE_SIZE - 64,
+	};
+	u32 *vaddr;
+	u32 val;
+	u32 sz;
+	u32 i;
+	int *order;
+	int count;
+	int err;
+
+	if (!HAS_ENGINE(i915, BCS0))
+		return 0;
+
+	sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE);
+	sz = max_t(u32, 2 * PAGE_SIZE, sz);
+
+	obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC);
+	if (IS_ERR(vaddr)) {
+		err = PTR_ERR(vaddr);
+		goto out_put;
+	}
+
+	val = prandom_u32_state(&prng);
+
+	/* Put the pages into a known state -- do so from the gpu for added fun */
+	err = i915_gem_object_fill_blt(obj, i915->engine[BCS0]->kernel_context,
+				       val);
+	if (err)
+		goto out_unpin;
+
+	i915_gem_object_lock(obj);
+	err = i915_gem_object_set_to_wc_domain(obj, true);
+	i915_gem_object_unlock(obj);
+	if (err)
+		goto out_unpin;
+
+	count = ARRAY_SIZE(bytes);
+	order = i915_random_order(count * count, &prng);
+	if (!order) {
+		err = -ENOMEM;
+		goto out_unpin;
+	}
+
+	/* We want to throw in a random width/align */
+	bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32),
+				     sizeof(u32));
+
+	i = 0;
+	do {
+		u32 offset;
+		u32 align;
+		u32 dword;
+		u32 size;
+
+		size = bytes[order[i] % count];
+		i = (i + 1) % (count * count);
+
+		align = bytes[order[i] % count];
+		i = (i + 1) % (count * count);
+
+		align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align));
+
+		offset = igt_random_offset(&prng, 0, obj->base.size,
+					   size, align);
+
+		val = prandom_u32_state(&prng);
+		memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf,
+			 size / sizeof(u32));
+
+		/*
+		 * Sample random dw -- don't waste precious time reading every
+		 * single dw.
+		 */
+		dword = igt_random_offset(&prng, offset,
+					  offset + size,
+					  sizeof(u32), sizeof(u32));
+		dword /= sizeof(u32);
+		if (vaddr[dword] != (val ^ 0xdeadbeaf)) {
+			pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n",
+			       __func__, dword, vaddr[dword], val ^ 0xdeadbeaf,
+			       size, align, offset);
+			err = -EINVAL;
+			break;
+		}
+	} while (!__igt_timeout(end_time, NULL));
+
+out_unpin:
+	i915_gem_object_unpin_map(obj);
+out_put:
+	i915_gem_object_put(obj);
+
+	if (igt_flush_test(i915, I915_WAIT_LOCKED))
+		err = -EIO;
+
+	return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
@@ -309,6 +426,7 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_lmem_create),
+		SUBTEST(igt_lmem_write_cpu),
 	};
 	int err;
 
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-10-03 19:25 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-03 19:24 [PATCH v2 00/22] LMEM basics Matthew Auld
2019-10-03 19:24 ` [PATCH v2 01/22] drm/i915/stolen: make the object creation interface consistent Matthew Auld
2019-10-03 19:29   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 02/22] drm/i915: introduce intel_memory_region Matthew Auld
2019-10-03 19:24 ` [PATCH v2 03/22] drm/i915/region: support contiguous allocations Matthew Auld
2019-10-03 19:24 ` [PATCH v2 04/22] drm/i915/region: support volatile objects Matthew Auld
2019-10-03 19:40   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 05/22] drm/i915: Add memory region information to device_info Matthew Auld
2019-10-03 19:24 ` [PATCH v2 06/22] drm/i915: support creating LMEM objects Matthew Auld
2019-10-03 19:46   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 07/22] drm/i915: setup io-mapping for LMEM Matthew Auld
2019-10-03 19:24 ` Matthew Auld [this message]
2019-10-03 19:48   ` [PATCH v2 08/22] drm/i915/lmem: support kernel mapping Chris Wilson
2019-10-03 19:24 ` [PATCH v2 09/22] drm/i915/selftests: add write-dword test for LMEM Matthew Auld
2019-10-03 19:24 ` [PATCH v2 10/22] drm/i915/selftests: extend coverage to include LMEM huge-pages Matthew Auld
2019-10-03 19:24 ` [PATCH v2 11/22] drm/i915: enumerate and init each supported region Matthew Auld
2019-10-03 19:24 ` [PATCH v2 12/22] drm/i915: treat shmem as a region Matthew Auld
2019-10-03 19:24 ` [PATCH v2 13/22] drm/i915: treat stolen " Matthew Auld
2019-10-03 19:43   ` Tang, CQ
2019-10-03 19:24 ` [PATCH v2 14/22] drm/i915: define HAS_MAPPABLE_APERTURE Matthew Auld
2019-10-03 19:53   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 15/22] drm/i915: do not map aperture if it is not available Matthew Auld
2019-10-03 19:24 ` [PATCH v2 16/22] drm/i915: set num_fence_regs to 0 if there is no aperture Matthew Auld
2019-10-03 19:24 ` [PATCH v2 17/22] drm/i915: error capture with no ggtt slot Matthew Auld
2019-10-03 19:24 ` [PATCH v2 18/22] drm/i915: Don't try to place HWS in non-existing mappable region Matthew Auld
2019-10-03 19:24 ` [PATCH v2 19/22] drm/i915: don't allocate the ring in stolen if we lack aperture Matthew Auld
2019-10-03 19:37   ` Tang, CQ
2019-10-03 19:55   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 20/22] drm/i915/selftests: fallback to using the gpu to trash stolen Matthew Auld
2019-10-03 20:02   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 21/22] drm/i915/selftests: check for missing aperture Matthew Auld
2019-10-03 19:24 ` [PATCH v2 22/22] HAX drm/i915: add the fake lmem region Matthew Auld
2019-10-03 22:11 ` ✗ Fi.CI.CHECKPATCH: warning for LMEM basics (rev2) Patchwork
2019-10-03 22:21 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-10-03 22:34 ` ✓ Fi.CI.BAT: success " Patchwork
2019-10-04 11:34 ` ✗ Fi.CI.IGT: failure " Patchwork
2019-10-04 12:06   ` Kai Vehmanen
2019-10-04 12:08     ` Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191003192444.10113-9-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.