All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: dri-devel@lists.freedesktop.org
Cc: intel-gfx@lists.freedesktop.org
Subject: [PATCH v4 14/38] drm: kselftest for drm_mm and eviction
Date: Thu, 22 Dec 2016 08:36:17 +0000	[thread overview]
Message-ID: <20161222083641.2691-15-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20161222083641.2691-1-chris@chris-wilson.co.uk>

Check that we add arbitrary blocks to the eviction scanner in order to
find the first minimal hole that matches our request.

v2: Refactor out some common eviction code for later

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
---
 drivers/gpu/drm/selftests/drm_mm_selftests.h |   1 +
 drivers/gpu/drm/selftests/test-drm_mm.c      | 337 +++++++++++++++++++++++++++
 2 files changed, 338 insertions(+)

diff --git a/drivers/gpu/drm/selftests/drm_mm_selftests.h b/drivers/gpu/drm/selftests/drm_mm_selftests.h
index a7a3763f8b20..a31b4458c7eb 100644
--- a/drivers/gpu/drm/selftests/drm_mm_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_mm_selftests.h
@@ -15,3 +15,4 @@ selftest(insert_range, igt_insert_range)
 selftest(align, igt_align)
 selftest(align32, igt_align32)
 selftest(align64, igt_align64)
+selftest(evict, igt_evict)
diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c
index 53ba268e07d9..065e2c72845b 100644
--- a/drivers/gpu/drm/selftests/test-drm_mm.c
+++ b/drivers/gpu/drm/selftests/test-drm_mm.c
@@ -36,6 +36,10 @@ static const struct insert_mode {
 	[TOPDOWN] = { "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
 	[BEST] = { "best", DRM_MM_SEARCH_BEST, DRM_MM_CREATE_DEFAULT },
 	{}
+}, evict_modes[] = {
+	{ "default", DRM_MM_SEARCH_DEFAULT, DRM_MM_CREATE_DEFAULT },
+	{ "top-down", DRM_MM_SEARCH_BELOW, DRM_MM_CREATE_TOP },
+	{}
 };
 
 static int igt_sanitycheck(void *ignored)
@@ -1110,6 +1114,339 @@ static int igt_align64(void *ignored)
 	return igt_align_pot(64);
 }
 
+static void show_scan(const struct drm_mm *scan)
+{
+	pr_info("scan: hit [%llx, %llx], size=%lld, align=%d, color=%ld\n",
+		scan->scan_hit_start, scan->scan_hit_end,
+		scan->scan_size, scan->scan_alignment, scan->scan_color);
+}
+
+static void show_holes(const struct drm_mm *mm, int count)
+{
+	u64 hole_start, hole_end;
+	struct drm_mm_node *hole;
+
+	drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+		struct drm_mm_node *next = list_next_entry(hole, node_list);
+		const char *node1 = NULL, *node2 = NULL;
+
+		if (hole->allocated)
+			node1 = kasprintf(GFP_KERNEL,
+					  "[%llx + %lld, color=%ld], ",
+					  hole->start, hole->size, hole->color);
+
+		if (next->allocated)
+			node2 = kasprintf(GFP_KERNEL,
+					  ", [%llx + %lld, color=%ld]",
+					  next->start, next->size, next->color);
+
+		pr_info("%sHole [%llx - %llx, size %lld]%s\n",
+			node1,
+			hole_start, hole_end, hole_end - hole_start,
+			node2);
+
+		kfree(node2);
+		kfree(node1);
+
+		if (!--count)
+			break;
+	}
+}
+
+struct evict_node {
+	struct drm_mm_node node;
+	struct list_head link;
+};
+
+static bool evict_nodes(struct drm_mm *mm,
+			struct evict_node *nodes,
+			unsigned int *order,
+			unsigned int count,
+			struct list_head *evict_list)
+{
+	struct evict_node *e, *en;
+	unsigned int i;
+
+	for (i = 0; i < count; i++) {
+		e = &nodes[order ? order[i] : i];
+		list_add(&e->link, evict_list);
+		if (drm_mm_scan_add_block(&e->node))
+			break;
+	}
+	list_for_each_entry_safe(e, en, evict_list, link) {
+		if (!drm_mm_scan_remove_block(&e->node))
+			list_del(&e->link);
+	}
+	if (list_empty(evict_list)) {
+		pr_err("Failed to find eviction: size=%lld [avail=%d], align=%d (color=%lu)\n",
+		       mm->scan_size, count,
+		       mm->scan_alignment,
+		       mm->scan_color);
+		return false;
+	}
+
+	list_for_each_entry(e, evict_list, link)
+		drm_mm_remove_node(&e->node);
+
+	return true;
+}
+
+static bool evict_nothing(struct drm_mm *mm,
+			  unsigned int total_size,
+			  struct evict_node *nodes)
+{
+	LIST_HEAD(evict_list);
+	struct evict_node *e;
+	struct drm_mm_node *node;
+	unsigned int n;
+
+	drm_mm_init_scan(mm, 1, 0, 0);
+	for (n = 0; n < total_size; n++) {
+		e = &nodes[n];
+		list_add(&e->link, &evict_list);
+		drm_mm_scan_add_block(&e->node);
+	}
+	list_for_each_entry(e, &evict_list, link)
+		drm_mm_scan_remove_block(&e->node);
+
+	for (n = 0; n < total_size; n++) {
+		e = &nodes[n];
+
+		if (!drm_mm_node_allocated(&e->node)) {
+			pr_err("node[%d] no longer allocated!\n", n);
+			return false;
+		}
+
+		e->link.next = NULL;
+	}
+
+	drm_mm_for_each_node(node, mm) {
+		e = container_of(node, typeof(*e), node);
+		e->link.next = &e->link;
+	}
+
+	for (n = 0; n < total_size; n++) {
+		e = &nodes[n];
+
+		if (!e->link.next) {
+			pr_err("node[%d] no longer connected!\n", n);
+			return false;
+		}
+	}
+
+	return assert_continuous(mm, nodes[0].node.size);
+}
+
+static bool evict_everything(struct drm_mm *mm,
+			     unsigned int total_size,
+			     struct evict_node *nodes)
+{
+	LIST_HEAD(evict_list);
+	struct evict_node *e;
+	unsigned int n;
+	int err;
+
+	drm_mm_init_scan(mm, total_size, 0, 0);
+	for (n = 0; n < total_size; n++) {
+		e = &nodes[n];
+		list_add(&e->link, &evict_list);
+		drm_mm_scan_add_block(&e->node);
+	}
+	list_for_each_entry(e, &evict_list, link) {
+		if (!drm_mm_scan_remove_block(&e->node)) {
+			pr_err("Node %lld not marked for eviction!\n",
+			       e->node.start);
+			list_del(&e->link);
+		}
+	}
+
+	list_for_each_entry(e, &evict_list, link)
+		drm_mm_remove_node(&e->node);
+
+	if (!assert_one_hole(mm, 0, total_size))
+		return false;
+
+	list_for_each_entry(e, &evict_list, link) {
+		err = drm_mm_reserve_node(mm, &e->node);
+		if (err) {
+			pr_err("Failed to reinsert node after eviction: start=%llx\n",
+			       e->node.start);
+			return false;
+		}
+	}
+
+	return assert_continuous(mm, nodes[0].node.size);
+}
+
+static int evict_something(struct drm_mm *mm,
+			   struct evict_node *nodes,
+			   unsigned int *order,
+			   unsigned int count,
+			   unsigned int size,
+			   unsigned int alignment,
+			   const struct insert_mode *mode)
+{
+	LIST_HEAD(evict_list);
+	struct evict_node *e;
+	struct drm_mm_node tmp;
+	int err;
+
+	drm_mm_init_scan(mm, size, alignment, 0);
+	if (!evict_nodes(mm,
+			 nodes, order, count,
+			 &evict_list))
+		return -EINVAL;
+
+	memset(&tmp, 0, sizeof(tmp));
+	err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
+					 mode->search_flags,
+					 mode->create_flags);
+	if (err) {
+		pr_err("Failed to insert into eviction hole: size=%d, align=%d\n",
+		       size, alignment);
+		show_scan(mm);
+		show_holes(mm, 3);
+		return err;
+	}
+
+	if (!assert_node(&tmp, mm, size, alignment, 0) || tmp.hole_follows) {
+		pr_err("Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
+		       tmp.size, size,
+		       alignment, misalignment(&tmp, alignment),
+		       tmp.start, tmp.hole_follows);
+		err = -EINVAL;
+	}
+
+	drm_mm_remove_node(&tmp);
+	if (err)
+		return err;
+
+	list_for_each_entry(e, &evict_list, link) {
+		err = drm_mm_reserve_node(mm, &e->node);
+		if (err) {
+			pr_err("Failed to reinsert node after eviction: start=%llx\n",
+			       e->node.start);
+			return err;
+		}
+	}
+
+	if (!assert_continuous(mm, nodes[0].node.size)) {
+		pr_err("range is no longer continuous\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int igt_evict(void *ignored)
+{
+	DRM_RND_STATE(prng, random_seed);
+	const unsigned int size = 8192;
+	const struct insert_mode *mode;
+	struct drm_mm mm;
+	struct evict_node *nodes;
+	struct drm_mm_node *node, *next;
+	unsigned int *order, n;
+	int ret, err;
+
+	/* Here we populate a full drm_mm and then try and insert a new node
+	 * by evicting other nodes in a random order. The drm_mm_scan should
+	 * pick the first matching hole it finds from the random list. We
+	 * repeat that for different allocation strategies, alignments and
+	 * sizes to try and stress the hole finder.
+	 */
+
+	ret = -ENOMEM;
+	nodes = vzalloc(size * sizeof(*nodes));
+	if (!nodes)
+		goto err;
+
+	order = drm_random_order(size, &prng);
+	if (!order)
+		goto err_nodes;
+
+	ret = -EINVAL;
+	drm_mm_init(&mm, 0, size);
+	for (n = 0; n < size; n++) {
+		err = drm_mm_insert_node(&mm, &nodes[n].node, 1, 0,
+					 DRM_MM_SEARCH_DEFAULT);
+		if (err) {
+			pr_err("insert failed, step %d\n", n);
+			ret = err;
+			goto out;
+		}
+	}
+
+	/* First check that using the scanner doesn't break the mm */
+	if (!evict_nothing(&mm, size, nodes)) {
+		pr_err("evict_nothing() failed\n");
+		goto out;
+	}
+	if (!evict_everything(&mm, size, nodes)) {
+		pr_err("evict_everything() failed\n");
+		goto out;
+	}
+
+	for (mode = evict_modes; mode->name; mode++) {
+		for (n = 1; n <= size; n <<= 1) {
+			drm_random_reorder(order, size, &prng);
+			err = evict_something(&mm,
+					      nodes, order, size,
+					      n, 1,
+					      mode);
+			if (err) {
+				pr_err("%s evict_something(size=%u) failed\n",
+				       mode->name, n);
+				ret = err;
+				goto out;
+			}
+		}
+
+		for (n = 1; n < size; n <<= 1) {
+			drm_random_reorder(order, size, &prng);
+			err = evict_something(&mm,
+					      nodes, order, size,
+					      size/2, n,
+					      mode);
+			if (err) {
+				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
+				       mode->name, size/2, n);
+				ret = err;
+				goto out;
+			}
+		}
+
+		for_each_prime_number_from(n, 1, min(size, max_prime)) {
+			unsigned int nsize = (size - n + 1) / 2;
+
+			DRM_MM_BUG_ON(!nsize);
+
+			drm_random_reorder(order, size, &prng);
+			err = evict_something(&mm,
+					      nodes, order, size,
+					      nsize, n,
+					      mode);
+			if (err) {
+				pr_err("%s evict_something(size=%u, alignment=%u) failed\n",
+				       mode->name, nsize, n);
+				ret = err;
+				goto out;
+			}
+		}
+	}
+
+	ret = 0;
+out:
+	drm_mm_for_each_node_safe(node, next, &mm)
+		drm_mm_remove_node(node);
+	drm_mm_takedown(&mm);
+	kfree(order);
+err_nodes:
+	vfree(nodes);
+err:
+	return ret;
+}
+
 #include "drm_selftest.c"
 
 static int __init test_drm_mm_init(void)
-- 
2.11.0

_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel

  parent reply	other threads:[~2016-12-22  8:36 UTC|newest]

Thread overview: 61+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-12-22  8:36 drm_mm fixes, take 4? Chris Wilson
2016-12-22  8:36 ` [PATCH v4 01/38] drm/i915: Use the MRU stack search after evicting Chris Wilson
2016-12-27 11:30   ` Daniel Vetter
2016-12-22  8:36 ` [PATCH v4 02/38] drm: Use drm_mm_nodes() as shorthand for the list of nodes under struct drm_mm Chris Wilson
2016-12-22  8:36 ` [PATCH v4 03/38] drm: Compile time enabling for asserts in drm_mm Chris Wilson
2016-12-22  8:36 ` [PATCH v4 04/38] lib: Add a simple prime number generator Chris Wilson
2016-12-22  9:52   ` Joonas Lahtinen
2016-12-22 10:00     ` [Intel-gfx] " Chris Wilson
2016-12-22 14:45   ` [PATCH v10] " Chris Wilson
2016-12-27 11:31     ` Daniel Vetter
2016-12-22  8:36 ` [PATCH v4 05/38] drm: Add a simple generator of random permutations Chris Wilson
2016-12-27 11:33   ` Daniel Vetter
2016-12-22  8:36 ` [PATCH v4 06/38] drm: Add some kselftests for the DRM range manager (struct drm_mm) Chris Wilson
2016-12-27 11:36   ` Daniel Vetter
2016-12-22  8:36 ` [PATCH v4 07/38] drm: kselftest for drm_mm_init() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 08/38] drm: kselftest for drm_mm_debug() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 09/38] drm: kselftest for drm_mm_reserve_node() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 10/38] drm: kselftest for drm_mm_insert_node() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 11/38] drm: kselftest for drm_mm_replace_node() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 12/38] drm: kselftest for drm_mm_insert_node_in_range() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 13/38] drm: kselftest for drm_mm and alignment Chris Wilson
2016-12-22  8:36 ` Chris Wilson [this message]
2016-12-22  8:36 ` [PATCH v4 15/38] drm: kselftest for drm_mm and range restricted eviction Chris Wilson
2016-12-22  8:36 ` [PATCH v4 16/38] drm: kselftest for drm_mm and top-down allocation Chris Wilson
2016-12-22  8:36 ` [PATCH v4 17/38] drm: kselftest for drm_mm and color adjustment Chris Wilson
2016-12-22  8:36 ` [PATCH v4 18/38] drm: kselftest for drm_mm and color eviction Chris Wilson
2016-12-22  8:36 ` [PATCH v4 19/38] drm: kselftest for drm_mm and restricted " Chris Wilson
2016-12-22  8:36 ` [PATCH v4 20/38] drm/i915: Build DRM range manager selftests for CI Chris Wilson
2016-12-27 13:03   ` Daniel Vetter
2016-12-22  8:36 ` [PATCH v4 21/38] drm: Promote drm_mm alignment to u64 Chris Wilson
2016-12-22  8:36 ` [PATCH v4 22/38] drm: Fix kerneldoc for drm_mm_scan_remove_block() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 23/38] drm: Detect overflow in drm_mm_reserve_node() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 24/38] drm: Simplify drm_mm_clean() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 25/38] drm: Add asserts to catch overflow in drm_mm_init() and drm_mm_init_scan() Chris Wilson
2016-12-27 13:12   ` Daniel Vetter
2016-12-22  8:36 ` [PATCH v4 26/38] drm: Extract struct drm_mm_scan from struct drm_mm Chris Wilson
2016-12-27 15:48   ` Daniel Vetter
2016-12-22  8:36 ` [PATCH v4 27/38] drm: Rename prev_node to hole in drm_mm_scan_add_block() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 28/38] drm: Unconditionally do the range check " Chris Wilson
2016-12-22  8:36 ` [PATCH v4 29/38] drm: Fix application of color vs range restriction when scanning drm_mm Chris Wilson
2016-12-22  8:36 ` [PATCH v4 30/38] drm: Compute tight evictions for drm_mm_scan Chris Wilson
2016-12-28 13:01   ` [Intel-gfx] " Daniel Vetter
2016-12-28 14:36     ` Chris Wilson
2016-12-22  8:36 ` [PATCH v4 31/38] drm: Optimise power-of-two alignments in drm_mm_scan_add_block() Chris Wilson
2016-12-22  8:36 ` [PATCH v4 32/38] drm: Simplify drm_mm scan-list manipulation Chris Wilson
2016-12-22  8:36 ` [PATCH v4 33/38] drm: Apply tight eviction scanning to color_adjust Chris Wilson
2016-12-22  8:36 ` [PATCH v4 34/38] drm: Wrap drm_mm_node.hole_follows Chris Wilson
2016-12-28 13:02   ` Daniel Vetter
2016-12-28 13:31     ` Chris Wilson
2016-12-28 14:31       ` Daniel Vetter
2016-12-28 18:47         ` Chris Wilson
2016-12-22  8:36 ` [PATCH v4 35/38] drm: Apply range restriction after color adjustment when allocation Chris Wilson
2016-12-22  8:36 ` [PATCH v4 36/38] drm: Use drm_mm_insert_node_in_range_generic() for everyone Chris Wilson
2016-12-22  8:36 ` [PATCH v4 37/38] drm: Improve drm_mm search (and fix topdown allocation) with rbtrees Chris Wilson
2016-12-28 11:08   ` Chris Wilson
2016-12-28 13:48   ` Daniel Vetter
2016-12-28 14:34     ` Daniel Vetter
2016-12-22  8:36 ` [PATCH v4 38/38] drm: kselftest for drm_mm and bottom-up allocation Chris Wilson
2016-12-22  9:15 ` ✗ Fi.CI.BAT: warning for series starting with [v4,01/38] drm/i915: Use the MRU stack search after evicting Patchwork
2016-12-22  9:47   ` Imre Deak
2016-12-22 20:53 ` ✓ Fi.CI.BAT: success for series starting with [v4,01/38] drm/i915: Use the MRU stack search after evicting (rev2) Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20161222083641.2691-15-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.