All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Zbigniew Kempczyński" <zbigniew.kempczynski@intel.com>
To: igt-dev@lists.freedesktop.org
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Subject: [igt-dev] [PATCH i-g-t v12 17/31] tests/api_intel_allocator: Simple allocator test suite
Date: Tue,  5 Jan 2021 09:10:34 +0100	[thread overview]
Message-ID: <20210105081048.14389-18-zbigniew.kempczynski@intel.com> (raw)
In-Reply-To: <20210105081048.14389-1-zbigniew.kempczynski@intel.com>

From: Dominik Grzegorzek <dominik.grzegorzek@intel.com>

We want to verify allocator works as expected. Try to exploit it.

Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/i915/api_intel_allocator.c | 589 +++++++++++++++++++++++++++++++
 tests/meson.build                |   1 +
 2 files changed, 590 insertions(+)
 create mode 100644 tests/i915/api_intel_allocator.c

diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
new file mode 100644
index 000000000..84366e70d
--- /dev/null
+++ b/tests/i915/api_intel_allocator.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include <stdatomic.h>
+#include "i915/gem.h"
+#include "igt.h"
+#include "igt_aux.h"
+#include "intel_allocator.h"
+
+#define OBJ_SIZE 1024
+
+struct test_obj {
+	uint32_t handle;
+	uint64_t offset;
+	uint64_t size;
+};
+
+static _Atomic(uint32_t) next_handle;
+
+static inline uint32_t gem_handle_gen(void)
+{
+	return atomic_fetch_add(&next_handle, 1);
+}
+
+static void alloc_simple(int fd)
+{
+	uint64_t ialh;
+	uint64_t offset0, offset1;
+	bool is_allocated, freed;
+
+	ialh = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+
+	offset0 = intel_allocator_alloc(ialh, 1, 0x1000, 0x1000);
+	offset1 = intel_allocator_alloc(ialh, 1, 0x1000, 0x1000);
+	igt_assert(offset0 == offset1);
+
+	is_allocated = intel_allocator_is_allocated(ialh, 1, 0x1000, offset0);
+	igt_assert(is_allocated);
+
+	freed = intel_allocator_free(ialh, 1);
+	igt_assert(freed);
+
+	is_allocated = intel_allocator_is_allocated(ialh, 1, 0x1000, offset0);
+	igt_assert(!is_allocated);
+
+	freed = intel_allocator_free(ialh, 1);
+	igt_assert(!freed);
+
+	intel_allocator_close(ialh);
+}
+
+static void reserve_simple(int fd)
+{
+	uint64_t ialh;
+	uint64_t start;
+	bool reserved, unreserved;
+
+	ialh = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	intel_allocator_get_address_range(ialh, &start, NULL);
+
+	reserved = intel_allocator_reserve(ialh, 0, 0x1000, start);
+	igt_assert(reserved);
+
+	reserved = intel_allocator_is_reserved(ialh, 0x1000, start);
+	igt_assert(reserved);
+
+	reserved = intel_allocator_reserve(ialh, 0, 0x1000, start);
+	igt_assert(!reserved);
+
+	unreserved = intel_allocator_unreserve(ialh, 0, 0x1000, start);
+	igt_assert(unreserved);
+
+	reserved = intel_allocator_is_reserved(ialh, 0x1000, start);
+	igt_assert(!reserved);
+
+	intel_allocator_close(ialh);
+}
+
+static void purge_bb(int fd)
+{
+	struct buf_ops *bops;
+	struct intel_buf *buf;
+	struct intel_bb *ibb;
+	uint64_t offset0, offset1;
+
+	bops = buf_ops_create(fd);
+	buf = intel_buf_create(bops, 512, 512, 32, 0, I915_TILING_NONE,
+			       I915_COMPRESSION_NONE);
+	ibb = intel_bb_create(fd, 4096);
+	intel_bb_set_debug(ibb, true);
+
+	intel_bb_add_intel_buf(ibb, buf, false);
+	offset0 = buf->addr.offset;
+
+	intel_bb_reset(ibb, true);
+	buf->addr.offset = INTEL_BUF_INVALID_ADDRESS;
+
+	intel_bb_add_intel_buf(ibb, buf, false);
+	offset1 = buf->addr.offset;
+
+	igt_assert(offset0 == offset1);
+
+	intel_buf_destroy(buf);
+	intel_bb_destroy(ibb);
+	buf_ops_destroy(bops);
+}
+
+static void reserve(int fd, uint8_t type)
+{
+	struct intel_allocator *ial;
+	struct test_obj obj;
+
+	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+
+	igt_assert(ial->reserve(ial, 0, 0x40000, 0x800000));
+	/* try reserve once again */
+	igt_assert_eq(ial->reserve(ial, 0, 0x40040, 0x700000), false);
+
+	obj.handle = gem_handle_gen();
+	obj.size = OBJ_SIZE;
+	obj.offset = ial->alloc(ial, obj.handle, obj.size, 0);
+
+	igt_assert_eq(ial->reserve(ial, 0, obj.offset,
+				obj.offset + obj.size), false);
+	ial->free(ial, obj.handle);
+	igt_assert_eq(ial->reserve(ial, 0, obj.offset,
+				obj.offset + obj.size), true);
+
+	ial->unreserve(ial, 0, obj.offset, obj.offset + obj.size);
+	ial->unreserve(ial, 0, 0x40000, 0x800000);
+	igt_assert(ial->reserve(ial, 0, 0x40040, 0x700000));
+	ial->unreserve(ial, 0, 0x40040, 0x700000);
+
+	igt_assert(ial->is_empty(ial));
+
+	intel_allocator_close(to_user_pointer(ial));
+}
+
+static bool overlaps(struct test_obj *buf1, struct test_obj *buf2)
+{
+	uint64_t begin1 = buf1->offset;
+	uint64_t end1 = buf1->offset + buf1->size;
+	uint64_t begin2 = buf2->offset;
+	uint64_t end2 = buf2->offset + buf2->size;
+
+	return (end1 > begin2 && end2 > end1) || (end2 > begin1 && end1 > end2);
+}
+
+static void basic_alloc(int fd, int cnt, uint8_t type)
+{
+	struct test_obj *obj;
+	struct intel_allocator *ial;
+	int i, j;
+
+	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+	obj = malloc(sizeof(struct test_obj) * cnt);
+
+	for (i = 0; i < cnt; i++) {
+		igt_progress("allocating objects: ", i, cnt);
+		obj[i].handle = gem_handle_gen();
+		obj[i].size = OBJ_SIZE;
+		obj[i].offset = ial->alloc(ial, obj[i].handle,
+					   obj[i].size, 4096);
+		igt_assert_eq(obj[i].offset % 4096, 0);
+	}
+
+	for (i = 0; i < cnt; i++) {
+		igt_progress("check overlapping: ", i, cnt);
+
+		if (type == INTEL_ALLOCATOR_RANDOM)
+			continue;
+
+		for (j = 0; j < cnt; j++) {
+			if (j == i)
+				continue;
+				igt_assert(!overlaps(&obj[i], &obj[j]));
+		}
+	}
+
+	for (i = 0; i < cnt; i++) {
+		igt_progress("freeing objects: ", i, cnt);
+		ial->free(ial, obj[i].handle);
+	}
+
+	igt_assert(ial->is_empty(ial));
+
+	free(obj);
+	intel_allocator_close(to_user_pointer(ial));
+}
+
+static void reuse(int fd, uint8_t type)
+{
+	struct test_obj obj[128], tmp;
+	struct intel_allocator *ial;
+	uint64_t prev_offset;
+	int i;
+
+	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+
+	for (i = 0; i < 128; i++) {
+		obj[i].handle = gem_handle_gen();
+		obj[i].size = OBJ_SIZE;
+		obj[i].offset = ial->alloc(ial, obj[i].handle,
+					   obj[i].size, 0x40);
+	}
+
+	/* check simple reuse */
+	for (i = 0; i < 128; i++) {
+		prev_offset = obj[i].offset;
+		obj[i].offset = ial->alloc(ial, obj[i].handle, obj[i].size, 0);
+		igt_assert(prev_offset == obj[i].offset);
+	}
+	i--;
+
+	/* free bo prevously alloced */
+	ial->free(ial, obj[i].handle);
+	/* alloc different buffer to fill freed hole */
+	tmp.handle = gem_handle_gen();
+	tmp.offset = ial->alloc(ial, tmp.handle, OBJ_SIZE, 0);
+	igt_assert(prev_offset == tmp.offset);
+
+	obj[i].offset = ial->alloc(ial, obj[i].handle, obj[i].size, 0);
+	igt_assert(prev_offset != obj[i].offset);
+	ial->free(ial, tmp.handle);
+
+	for (i = 0; i < 128; i++)
+		ial->free(ial, obj[i].handle);
+
+	igt_assert(ial->is_empty(ial));
+
+	intel_allocator_close(to_user_pointer(ial));
+}
+
+struct ial_thread_args {
+	struct intel_allocator *ial;
+	pthread_t thread;
+	uint32_t *handles;
+	uint64_t *offsets;
+	uint32_t count;
+	int threads;
+	int idx;
+};
+
+static void *alloc_bo_in_thread(void *arg)
+{
+	struct ial_thread_args *a = arg;
+	int i;
+
+	for (i = a->idx; i < a->count; i += a->threads) {
+		a->handles[i] = gem_handle_gen();
+		pthread_mutex_lock(&a->ial->mutex);
+		a->offsets[i] = a->ial->alloc(a->ial, a->handles[i], OBJ_SIZE,
+					      1UL << ((random() % 20) + 1));
+		pthread_mutex_unlock(&a->ial->mutex);
+	}
+
+	return NULL;
+}
+
+static void *free_bo_in_thread(void *arg)
+{
+	struct ial_thread_args *a = arg;
+	int i;
+
+	for (i = (a->idx + 1) % a->threads; i < a->count; i += a->threads) {
+		pthread_mutex_lock(&a->ial->mutex);
+		a->ial->free(a->ial, a->handles[i]);
+		pthread_mutex_unlock(&a->ial->mutex);
+	}
+
+	return NULL;
+}
+
+#define THREADS 6
+
+static void parallel_one(int fd, uint8_t type)
+{
+	struct intel_allocator *ial;
+	struct ial_thread_args a[THREADS];
+	uint32_t *handles;
+	uint64_t *offsets;
+	int count, i;
+
+	srandom(0xdeadbeef);
+	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+	count = 1UL << 12;
+
+	handles = malloc(sizeof(uint32_t) * count);
+	offsets = calloc(1, sizeof(uint64_t) * count);
+
+	for (i = 0; i < THREADS; i++) {
+		a[i].ial = ial;
+		a[i].handles = handles;
+		a[i].offsets = offsets;
+		a[i].count = count;
+		a[i].threads = THREADS;
+		a[i].idx = i;
+		pthread_create(&a[i].thread, NULL, alloc_bo_in_thread, &a[i]);
+	}
+
+	for (i = 0; i < THREADS; i++)
+		pthread_join(a[i].thread, NULL);
+
+	/* Check if all objects are alocated */
+	for (i = 0; i < count; i++) {
+	/* Random allocator don't have state. Always returns different offset */
+		if (type == INTEL_ALLOCATOR_RANDOM)
+			break;
+
+		igt_assert_eq(offsets[i],
+			      a->ial->alloc(ial, handles[i], OBJ_SIZE, 0));
+	}
+
+	for (i = 0; i < THREADS; i++)
+		pthread_create(&a[i].thread, NULL, free_bo_in_thread, &a[i]);
+
+	for (i = 0; i < THREADS; i++)
+		pthread_join(a[i].thread, NULL);
+
+	/* Check if all offsets where objects were are free */
+	for (i = 0; i < count; i++) {
+		if (type == INTEL_ALLOCATOR_RANDOM)
+			break;
+
+		igt_assert(ial->reserve(ial, 0, offsets[i], offsets[i] + 1));
+	}
+
+	free(handles);
+	free(offsets);
+
+	intel_allocator_close(to_user_pointer(ial));
+}
+
+#define SIMPLE_GROUP_ALLOCS 1
+static void __simple_allocs(int fd)
+{
+	uint32_t handles[SIMPLE_GROUP_ALLOCS];
+	uint64_t ahnd;
+	uint32_t ctx;
+	int i;
+
+	ctx = rand() % 2;
+	ahnd = intel_allocator_open(fd, ctx, INTEL_ALLOCATOR_SIMPLE);
+
+	for (i = 0; i < SIMPLE_GROUP_ALLOCS; i++) {
+		uint32_t size;
+
+		size = (rand() % 4 + 1) * 0x1000;
+		handles[i] = gem_create(fd, size);
+		intel_allocator_alloc(ahnd, handles[i], size, 0x1000);
+	}
+
+	for (i = 0; i < SIMPLE_GROUP_ALLOCS; i++) {
+		igt_assert_f(intel_allocator_free(ahnd, handles[i]) == 1,
+			     "Error freeing handle: %u\n", handles[i]);
+		gem_close(fd, handles[i]);
+	}
+
+	intel_allocator_close(ahnd);
+}
+
+static void fork_simple_once(int fd)
+{
+	intel_allocator_multiprocess_start();
+
+	igt_fork(child, 1)
+		__simple_allocs(fd);
+
+	igt_waitchildren();
+
+	intel_allocator_multiprocess_stop();
+}
+
+#define SIMPLE_TIMEOUT 5
+static void *__fork_simple_thread(void *data)
+{
+	int fd = (int) (long) data;
+
+	igt_until_timeout(SIMPLE_TIMEOUT) {
+		__simple_allocs(fd);
+	}
+
+	return NULL;
+}
+
+static void fork_simple_stress(int fd, bool two_level_inception)
+{
+	pthread_t thread0, thread1;
+	uint64_t ahnd0, ahnd1;
+	bool are_empty;
+
+	intel_allocator_multiprocess_start();
+
+	ahnd0 = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd1 = intel_allocator_open(fd, 1, INTEL_ALLOCATOR_SIMPLE);
+
+	pthread_create(&thread0, NULL, __fork_simple_thread, (void *) (long) fd);
+	pthread_create(&thread1, NULL, __fork_simple_thread, (void *) (long) fd);
+
+	igt_fork(child, 8) {
+		if (two_level_inception) {
+			pthread_create(&thread0, NULL, __fork_simple_thread,
+				       (void *) (long) fd);
+			pthread_create(&thread1, NULL, __fork_simple_thread,
+				       (void *) (long) fd);
+		}
+
+		igt_until_timeout(SIMPLE_TIMEOUT) {
+			__simple_allocs(fd);
+		}
+
+		if (two_level_inception) {
+			pthread_join(thread0, NULL);
+			pthread_join(thread1, NULL);
+		}
+	}
+	igt_waitchildren();
+
+	pthread_join(thread0, NULL);
+	pthread_join(thread1, NULL);
+
+	are_empty = intel_allocator_close(ahnd0);
+	are_empty &= intel_allocator_close(ahnd1);
+
+	intel_allocator_multiprocess_stop();
+
+	igt_assert_f(are_empty, "Allocators were not emptied\n");
+}
+
+static void __reopen_allocs(int fd1, int fd2)
+{
+	uint64_t ahnd0, ahnd1, ahnd2;
+
+	ahnd0 = intel_allocator_open(fd1, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd1 = intel_allocator_open(fd2, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd2 = intel_allocator_open(fd2, 0, INTEL_ALLOCATOR_SIMPLE);
+	igt_assert(ahnd0 != ahnd1);
+	igt_assert(ahnd1 == ahnd2);
+
+	intel_allocator_close(ahnd0);
+	intel_allocator_close(ahnd1);
+	intel_allocator_close(ahnd2);
+}
+
+static void reopen(int fd)
+{
+	int fd2;
+
+	igt_require_gem(fd);
+
+	fd2 = gem_reopen_driver(fd);
+
+	__reopen_allocs(fd, fd2);
+
+	close(fd2);
+}
+
+#define REOPEN_TIMEOUT 3
+static void reopen_fork(int fd)
+{
+	int fd2;
+
+	igt_require_gem(fd);
+
+	intel_allocator_multiprocess_start();
+
+	fd2 = gem_reopen_driver(fd);
+
+	igt_fork(child, 1) {
+		igt_until_timeout(REOPEN_TIMEOUT)
+			__reopen_allocs(fd, fd2);
+	}
+	igt_until_timeout(REOPEN_TIMEOUT)
+		__reopen_allocs(fd, fd2);
+
+	igt_waitchildren();
+
+	close(fd2);
+
+	intel_allocator_multiprocess_stop();
+}
+
+struct allocators {
+	const char *name;
+	uint8_t type;
+} als[] = {
+	{"simple", INTEL_ALLOCATOR_SIMPLE},
+	{"random", INTEL_ALLOCATOR_RANDOM},
+	{NULL, 0},
+};
+
+igt_main
+{
+	int fd;
+	struct allocators *a;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+		atomic_init(&next_handle, 1);
+		srandom(0xdeadbeef);
+	}
+
+	igt_subtest_f("alloc-simple")
+		alloc_simple(fd);
+
+	igt_subtest_f("reserve-simple")
+		reserve_simple(fd);
+
+	igt_subtest_f("purge-bb")
+		purge_bb(fd);
+
+	igt_subtest_f("print")
+		basic_alloc(fd, 1UL << 2, INTEL_ALLOCATOR_RANDOM);
+
+	igt_subtest_f("reuse")
+		reuse(fd, INTEL_ALLOCATOR_SIMPLE);
+
+	igt_subtest_f("reserve")
+		reserve(fd, INTEL_ALLOCATOR_SIMPLE);
+
+	for (a = als; a->name; a++) {
+		igt_subtest_with_dynamic_f("%s-allocator", a->name) {
+			igt_dynamic("basic")
+				basic_alloc(fd, 1UL << 8, a->type);
+
+			igt_dynamic("parallel-one")
+				parallel_one(fd, a->type);
+
+			if (a->type != INTEL_ALLOCATOR_RANDOM) {
+				igt_dynamic("reuse")
+					reuse(fd, a->type);
+
+				igt_dynamic("reserve")
+					reserve(fd, a->type);
+			}
+		}
+	}
+
+	igt_subtest_f("fork-simple-once")
+		fork_simple_once(fd);
+
+	igt_subtest_f("fork-simple-stress")
+		fork_simple_stress(fd, false);
+
+	igt_subtest_f("fork-simple-stress-signal") {
+		igt_fork_signal_helper();
+		fork_simple_stress(fd, false);
+		igt_stop_signal_helper();
+	}
+
+	igt_subtest_f("two-level-inception")
+		fork_simple_stress(fd, true);
+
+	igt_subtest_f("two-level-inception-interruptible") {
+		igt_fork_signal_helper();
+		fork_simple_stress(fd, true);
+		igt_stop_signal_helper();
+	}
+
+	igt_subtest_f("reopen")
+		reopen(fd);
+
+	igt_subtest_f("reopen-fork")
+		reopen_fork(fd);
+
+	igt_fixture
+		close(fd);
+}
diff --git a/tests/meson.build b/tests/meson.build
index a628b4d28..bdd86ef91 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -110,6 +110,7 @@ test_progs = [
 ]
 
 i915_progs = [
+	'api_intel_allocator',
 	'api_intel_bb',
 	'gen3_mixed_blits',
 	'gen3_render_linear_blits',
-- 
2.26.0

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

  parent reply	other threads:[~2021-01-05  8:11 UTC|newest]

Thread overview: 34+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-05  8:10 [igt-dev] [PATCH i-g-t v12 00/31] Introduce IGT allocator Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 01/31] lib/igt_list: igt_hlist implementation Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 02/31] lib/igt_map: Introduce igt_map Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 03/31] lib/igt_core: Track child process pid and tid Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 04/31] lib/intel_allocator_simple: Add simple allocator Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 05/31] lib/intel_allocator_random: Add random allocator Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 06/31] lib/intel_allocator: Add intel_allocator core Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 07/31] lib/intel_allocator: Try to stop smoothly instead of deinit Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 08/31] lib/intel_allocator_msgchannel: Scale to 4k of parallel clients Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 09/31] lib/intel_bufops: Removes handle from allocator, change size Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 10/31] lib/intel_bufops: Add init with handle and size function Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 11/31] lib/intel_batchbuffer: Integrate intel_bb with allocator Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 12/31] lib/intel_batchbuffer: Add tracking intel_buf to intel_bb Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 13/31] lib/intel_aux_pgtable: Get addresses for aux table from an allocator Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 14/31] lib/igt_fb: Initialize intel_buf with same size as fb Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 15/31] tests/api_intel_bb: Modify test to verify intel_bb with allocator Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 16/31] tests/api_intel_bb: Add compressed->compressed copy Zbigniew Kempczyński
2021-01-05  8:10 ` Zbigniew Kempczyński [this message]
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 18/31] tests/gem|kms: Remove intel_bb from fixture Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 19/31] tests/gem_mmap_offset: Use intel_buf wrapper code instead direct Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 20/31] tests/gem_ppgtt: Adopt test to use intel_bb with allocator Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 21/31] tests/gem_render_copy_redux: Adopt to use with intel_bb and allocator Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 22/31] tests/perf.c: Remove buffer from batch Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 23/31] tests/gem_linear_blits: Use intel allocator Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 24/31] lib/ioctl_wrappers: Add gem_has_relocations() check Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 25/31] lib/intel_batchbuffer: Use relocations in intel-bb up to gen12 Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 26/31] tests/api_intel_*: Adopt to use relocations as default " Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 27/31] tests/gem_ppgtt: Migrate memory check out of render blits Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 28/31] tests/api_intel_bb: Remove check-canonical test Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 29/31] tests/api_intel_bb: Use allocator in delta-check test Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 30/31] lib/intel_allocator: Separate allocator multiprocess start Zbigniew Kempczyński
2021-01-05  8:10 ` [igt-dev] [PATCH i-g-t v12 31/31] tests/api_intel_allocator: Prepare to run with sanitizer Zbigniew Kempczyński
2021-01-05  9:10 ` [igt-dev] ✓ Fi.CI.BAT: success for Introduce IGT allocator (rev13) Patchwork
2021-01-05 10:10 ` [igt-dev] ✓ Fi.CI.IGT: " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210105081048.14389-18-zbigniew.kempczynski@intel.com \
    --to=zbigniew.kempczynski@intel.com \
    --cc=chris@chris-wilson.co.uk \
    --cc=igt-dev@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.