All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: intel-gfx@lists.freedesktop.org
Subject: [PATCH v2 03/22] drm/i915/region: support contiguous allocations
Date: Thu,  3 Oct 2019 20:24:25 +0100	[thread overview]
Message-ID: <20191003192444.10113-4-matthew.auld@intel.com> (raw)
In-Reply-To: <20191003192444.10113-1-matthew.auld@intel.com>

Some kernel internal objects may need to be allocated as a contiguous
block, also thinking ahead the various kernel io_mapping interfaces seem
to expect it, although this is purely a limitation in the kernel
API...so perhaps something to be improved.

Signed-off-by: Matthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Abdiel Janulgue <abdiel.janulgue@linux.intel.com>
Cc: Michael J Ruhl <michael.j.ruhl@intel.com>
---
 .../gpu/drm/i915/gem/i915_gem_object_types.h  |   4 +
 drivers/gpu/drm/i915/gem/i915_gem_region.c    |  15 +-
 drivers/gpu/drm/i915/gem/i915_gem_region.h    |   3 +-
 .../gpu/drm/i915/gem/selftests/huge_pages.c   |  63 ++++---
 drivers/gpu/drm/i915/intel_memory_region.c    |   9 +-
 drivers/gpu/drm/i915/intel_memory_region.h    |   3 +-
 .../drm/i915/selftests/intel_memory_region.c  | 165 ++++++++++++++++++
 drivers/gpu/drm/i915/selftests/mock_region.c  |   2 +-
 8 files changed, 229 insertions(+), 35 deletions(-)

diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index cff527e79661..2960aa0c79f4 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -118,6 +118,10 @@ struct drm_i915_gem_object {
 
 	I915_SELFTEST_DECLARE(struct list_head st_link);
 
+	unsigned long flags;
+#define I915_BO_ALLOC_CONTIGUOUS BIT(0)
+#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS)
+
 	/*
 	 * Is the object to be mapped as read-only to the GPU
 	 * Only honoured if hardware has relevant pte bit
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c
index 5fc6e4540f82..04cb9f72945e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c
@@ -23,10 +23,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 {
 	struct intel_memory_region *mem = obj->mm.region;
 	struct list_head *blocks = &obj->mm.blocks;
-	unsigned int flags = I915_ALLOC_MIN_PAGE_SIZE;
 	resource_size_t size = obj->base.size;
 	resource_size_t prev_end;
 	struct i915_buddy_block *block;
+	unsigned int flags;
 	struct sg_table *st;
 	struct scatterlist *sg;
 	unsigned int sg_page_sizes;
@@ -42,6 +42,10 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 		return -ENOMEM;
 	}
 
+	flags = I915_ALLOC_MIN_PAGE_SIZE;
+	if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
+		flags |= I915_ALLOC_CONTIGUOUS;
+
 	ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks);
 	if (ret)
 		goto err_free_sg;
@@ -56,7 +60,8 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 	list_for_each_entry(block, blocks, link) {
 		u64 block_size, offset;
 
-		block_size = i915_buddy_block_size(&mem->mm, block);
+		block_size = min_t(u64, size,
+				   i915_buddy_block_size(&mem->mm, block));
 		offset = i915_buddy_block_offset(block);
 
 		GEM_BUG_ON(overflows_type(block_size, sg->length));
@@ -98,10 +103,12 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
 }
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-					struct intel_memory_region *mem)
+					struct intel_memory_region *mem,
+					unsigned long flags)
 {
 	INIT_LIST_HEAD(&obj->mm.blocks);
 	obj->mm.region = intel_memory_region_get(mem);
+	obj->flags = flags;
 }
 
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj)
@@ -116,6 +123,8 @@ i915_gem_object_create_region(struct intel_memory_region *mem,
 {
 	struct drm_i915_gem_object *obj;
 
+	GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS);
+
 	if (!mem)
 		return ERR_PTR(-ENODEV);
 
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h
index ebddc86d78f7..f2ff6f8bff74 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_region.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h
@@ -17,7 +17,8 @@ void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj,
 				     struct sg_table *pages);
 
 void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj,
-					struct intel_memory_region *mem);
+					struct intel_memory_region *mem,
+					unsigned long flags);
 void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj);
 
 struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 1313c7f93ef8..2549c233465c 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -451,6 +451,7 @@ static int igt_mock_exhaust_device_supported_pages(void *arg)
 
 static int igt_mock_memory_region_huge_pages(void *arg)
 {
+	const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS };
 	struct i915_ppgtt *ppgtt = arg;
 	struct drm_i915_private *i915 = ppgtt->vm.i915;
 	unsigned long supported = INTEL_INFO(i915)->page_sizes;
@@ -469,41 +470,47 @@ static int igt_mock_memory_region_huge_pages(void *arg)
 	for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
 		unsigned int page_size = BIT(bit);
 		resource_size_t phys;
+		int i;
 
-		obj = i915_gem_object_create_region(mem, page_size, 0);
-		if (IS_ERR(obj)) {
-			err = PTR_ERR(obj);
-			goto out_region;
-		}
+		for (i = 0; i < ARRAY_SIZE(flags); ++i) {
+			obj = i915_gem_object_create_region(mem, page_size,
+							    flags[i]);
+			if (IS_ERR(obj)) {
+				err = PTR_ERR(obj);
+				goto out_region;
+			}
 
-		vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
-		if (IS_ERR(vma)) {
-			err = PTR_ERR(vma);
-			goto out_put;
-		}
+			vma = i915_vma_instance(obj, &ppgtt->vm, NULL);
+			if (IS_ERR(vma)) {
+				err = PTR_ERR(vma);
+				goto out_put;
+			}
 
-		err = i915_vma_pin(vma, 0, 0, PIN_USER);
-		if (err)
-			goto out_close;
+			err = i915_vma_pin(vma, 0, 0, PIN_USER);
+			if (err)
+				goto out_close;
 
-		phys = i915_gem_object_get_dma_address(obj, 0);
-		if (!IS_ALIGNED(phys, page_size)) {
-			pr_err("memory region misaligned(%pa)\n", &phys);
-			err = -EINVAL;
-			goto out_close;
-		}
+			phys = i915_gem_object_get_dma_address(obj, 0);
+			if (!IS_ALIGNED(phys, page_size)) {
+				pr_err("memory region misaligned(%pa) flags=%u\n",
+				       &phys, flags[i]);
+				err = -EINVAL;
+				goto out_close;
+			}
 
-		if (vma->page_sizes.gtt != page_size) {
-			pr_err("page_sizes.gtt=%u, expected=%u\n",
-			       vma->page_sizes.gtt, page_size);
-			err = -EINVAL;
-			goto out_unpin;
-		}
+			if (vma->page_sizes.gtt != page_size) {
+				pr_err("page_sizes.gtt=%u, expected=%u flags=%u\n",
+				       vma->page_sizes.gtt, page_size, flags[i]);
+				err = -EINVAL;
+				goto out_unpin;
+			}
 
-		i915_vma_unpin(vma);
-		i915_vma_close(vma);
+			i915_vma_unpin(vma);
+			i915_vma_close(vma);
 
-		i915_gem_object_put(obj);
+			__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+			i915_gem_object_put(obj);
+		}
 	}
 
 	goto out_region;
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index 2ef67c397fca..98006618e871 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -47,8 +47,8 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 				      unsigned int flags,
 				      struct list_head *blocks)
 {
-	unsigned long n_pages = size >> ilog2(mem->mm.chunk_size);
 	unsigned int min_order = 0;
+	unsigned long n_pages;
 
 	GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
 	GEM_BUG_ON(!list_empty(blocks));
@@ -58,6 +58,13 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
 			    ilog2(mem->mm.chunk_size);
 	}
 
+	if (flags & I915_ALLOC_CONTIGUOUS) {
+		size = roundup_pow_of_two(size);
+		min_order = ilog2(size) - ilog2(mem->mm.chunk_size);
+	}
+
+	n_pages = size >> ilog2(mem->mm.chunk_size);
+
 	mutex_lock(&mem->mm_lock);
 
 	do {
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index eb304a3ed6ad..05f289953c8b 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -18,7 +18,8 @@ struct drm_i915_gem_object;
 struct intel_memory_region;
 struct sg_table;
 
-#define I915_ALLOC_MIN_PAGE_SIZE   BIT(0)
+#define I915_ALLOC_MIN_PAGE_SIZE  BIT(0)
+#define I915_ALLOC_CONTIGUOUS     BIT(1)
 
 struct intel_memory_region_ops {
 	unsigned int flags;
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 42dad6067c47..5eefbeaa674a 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -13,6 +13,7 @@
 
 #include "gem/i915_gem_region.h"
 #include "gem/selftests/mock_context.h"
+#include "selftests/i915_random.h"
 
 static void close_objects(struct intel_memory_region *mem,
 			  struct list_head *objects)
@@ -87,10 +88,174 @@ static int igt_mock_fill(void *arg)
 	return err;
 }
 
+static struct drm_i915_gem_object *
+igt_object_create(struct intel_memory_region *mem,
+		  struct list_head *objects,
+		  u64 size,
+		  unsigned int flags)
+{
+	struct drm_i915_gem_object *obj;
+	int err;
+
+	obj = i915_gem_object_create_region(mem, size, flags);
+	if (IS_ERR(obj))
+		return obj;
+
+	err = i915_gem_object_pin_pages(obj);
+	if (err)
+		goto put;
+
+	list_add(&obj->st_link, objects);
+	return obj;
+
+put:
+	i915_gem_object_put(obj);
+	return ERR_PTR(err);
+}
+
+void igt_object_release(struct drm_i915_gem_object *obj)
+{
+	i915_gem_object_unpin_pages(obj);
+	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
+	i915_gem_object_put(obj);
+	list_del(&obj->st_link);
+}
+
+static int igt_mock_contiguous(void *arg)
+{
+	struct intel_memory_region *mem = arg;
+	struct drm_i915_gem_object *obj;
+	unsigned long n_objects;
+	LIST_HEAD(objects);
+	LIST_HEAD(holes);
+	I915_RND_STATE(prng);
+	resource_size_t target;
+	resource_size_t total;
+	resource_size_t min;
+	int err = 0;
+
+	total = resource_size(&mem->region);
+
+	/* Min size */
+	obj = igt_object_create(mem, &objects, mem->mm.chunk_size,
+				I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	if (obj->mm.pages->nents != 1) {
+		pr_err("%s min object spans multiple sg entries\n", __func__);
+		err = -EINVAL;
+		goto err_close_objects;
+	}
+
+	igt_object_release(obj);
+
+	/* Max size */
+	obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	if (obj->mm.pages->nents != 1) {
+		pr_err("%s max object spans multiple sg entries\n", __func__);
+		err = -EINVAL;
+		goto err_close_objects;
+	}
+
+	igt_object_release(obj);
+
+	/* Internal fragmentation should not bleed into the object size */
+	target = round_up(prandom_u32_state(&prng) % total, PAGE_SIZE);
+	target = max_t(u64, PAGE_SIZE, target);
+
+	obj = igt_object_create(mem, &objects, target,
+				I915_BO_ALLOC_CONTIGUOUS);
+	if (IS_ERR(obj))
+		return PTR_ERR(obj);
+
+	if (obj->base.size != target) {
+		pr_err("%s obj->base.size(%llx) != target(%llx)\n", __func__,
+		       (u64)obj->base.size, (u64)target);
+		err = -EINVAL;
+		goto err_close_objects;
+	}
+
+	if (obj->mm.pages->nents != 1) {
+		pr_err("%s object spans multiple sg entries\n", __func__);
+		err = -EINVAL;
+		goto err_close_objects;
+	}
+
+	igt_object_release(obj);
+
+	/*
+	 * Try to fragment the address space, such that half of it is free, but
+	 * the max contiguous block size is SZ_64K.
+	 */
+
+	target = SZ_64K;
+	n_objects = div64_u64(total, target);
+
+	while (n_objects--) {
+		struct list_head *list;
+
+		if (n_objects % 2)
+			list = &holes;
+		else
+			list = &objects;
+
+		obj = igt_object_create(mem, list, target,
+					I915_BO_ALLOC_CONTIGUOUS);
+		if (IS_ERR(obj)) {
+			err = PTR_ERR(obj);
+			goto err_close_objects;
+		}
+	}
+
+	close_objects(mem, &holes);
+
+	min = target;
+	target = total >> 1;
+
+	/* Make sure we can still allocate all the fragmented space */
+	obj = igt_object_create(mem, &objects, target, 0);
+	if (IS_ERR(obj)) {
+		err = PTR_ERR(obj);
+		goto err_close_objects;
+	}
+
+	igt_object_release(obj);
+
+	/*
+	 * Even though we have enough free space, we don't have a big enough
+	 * contiguous block. Make sure that holds true.
+	 */
+
+	do {
+		bool should_fail = target > min;
+
+		obj = igt_object_create(mem, &objects, target,
+					I915_BO_ALLOC_CONTIGUOUS);
+		if (should_fail != IS_ERR(obj)) {
+			pr_err("%s target allocation(%llx) mismatch\n",
+			       __func__, (u64)target);
+			err = -EINVAL;
+			goto err_close_objects;
+		}
+
+		target >>= 1;
+	} while (target >= mem->mm.chunk_size);
+
+err_close_objects:
+	list_splice_tail(&holes, &objects);
+	close_objects(mem, &objects);
+	return err;
+}
+
 int intel_memory_region_mock_selftests(void)
 {
 	static const struct i915_subtest tests[] = {
 		SUBTEST(igt_mock_fill),
+		SUBTEST(igt_mock_contiguous),
 	};
 	struct intel_memory_region *mem;
 	struct drm_i915_private *i915;
diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c
index 0e9a575ede3b..7b0c99ddc2d5 100644
--- a/drivers/gpu/drm/i915/selftests/mock_region.c
+++ b/drivers/gpu/drm/i915/selftests/mock_region.c
@@ -36,7 +36,7 @@ mock_object_create(struct intel_memory_region *mem,
 
 	i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
 
-	i915_gem_object_init_memory_region(obj, mem);
+	i915_gem_object_init_memory_region(obj, mem, flags);
 
 	return obj;
 }
-- 
2.20.1

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

  parent reply	other threads:[~2019-10-03 19:24 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-03 19:24 [PATCH v2 00/22] LMEM basics Matthew Auld
2019-10-03 19:24 ` [PATCH v2 01/22] drm/i915/stolen: make the object creation interface consistent Matthew Auld
2019-10-03 19:29   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 02/22] drm/i915: introduce intel_memory_region Matthew Auld
2019-10-03 19:24 ` Matthew Auld [this message]
2019-10-03 19:24 ` [PATCH v2 04/22] drm/i915/region: support volatile objects Matthew Auld
2019-10-03 19:40   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 05/22] drm/i915: Add memory region information to device_info Matthew Auld
2019-10-03 19:24 ` [PATCH v2 06/22] drm/i915: support creating LMEM objects Matthew Auld
2019-10-03 19:46   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 07/22] drm/i915: setup io-mapping for LMEM Matthew Auld
2019-10-03 19:24 ` [PATCH v2 08/22] drm/i915/lmem: support kernel mapping Matthew Auld
2019-10-03 19:48   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 09/22] drm/i915/selftests: add write-dword test for LMEM Matthew Auld
2019-10-03 19:24 ` [PATCH v2 10/22] drm/i915/selftests: extend coverage to include LMEM huge-pages Matthew Auld
2019-10-03 19:24 ` [PATCH v2 11/22] drm/i915: enumerate and init each supported region Matthew Auld
2019-10-03 19:24 ` [PATCH v2 12/22] drm/i915: treat shmem as a region Matthew Auld
2019-10-03 19:24 ` [PATCH v2 13/22] drm/i915: treat stolen " Matthew Auld
2019-10-03 19:43   ` Tang, CQ
2019-10-03 19:24 ` [PATCH v2 14/22] drm/i915: define HAS_MAPPABLE_APERTURE Matthew Auld
2019-10-03 19:53   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 15/22] drm/i915: do not map aperture if it is not available Matthew Auld
2019-10-03 19:24 ` [PATCH v2 16/22] drm/i915: set num_fence_regs to 0 if there is no aperture Matthew Auld
2019-10-03 19:24 ` [PATCH v2 17/22] drm/i915: error capture with no ggtt slot Matthew Auld
2019-10-03 19:24 ` [PATCH v2 18/22] drm/i915: Don't try to place HWS in non-existing mappable region Matthew Auld
2019-10-03 19:24 ` [PATCH v2 19/22] drm/i915: don't allocate the ring in stolen if we lack aperture Matthew Auld
2019-10-03 19:37   ` Tang, CQ
2019-10-03 19:55   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 20/22] drm/i915/selftests: fallback to using the gpu to trash stolen Matthew Auld
2019-10-03 20:02   ` Chris Wilson
2019-10-03 19:24 ` [PATCH v2 21/22] drm/i915/selftests: check for missing aperture Matthew Auld
2019-10-03 19:24 ` [PATCH v2 22/22] HAX drm/i915: add the fake lmem region Matthew Auld
2019-10-03 22:11 ` ✗ Fi.CI.CHECKPATCH: warning for LMEM basics (rev2) Patchwork
2019-10-03 22:21 ` ✗ Fi.CI.SPARSE: " Patchwork
2019-10-03 22:34 ` ✓ Fi.CI.BAT: success " Patchwork
2019-10-04 11:34 ` ✗ Fi.CI.IGT: failure " Patchwork
2019-10-04 12:06   ` Kai Vehmanen
2019-10-04 12:08     ` Chris Wilson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191003192444.10113-4-matthew.auld@intel.com \
    --to=matthew.auld@intel.com \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.