All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Zbigniew Kempczyński" <zbigniew.kempczynski@intel.com>
To: igt-dev@lists.freedesktop.org
Subject: [igt-dev] [PATCH i-g-t 26/35] tests/api_intel_allocator: Simple allocator test suite
Date: Tue, 16 Feb 2021 12:39:58 +0100	[thread overview]
Message-ID: <20210216114007.122175-27-zbigniew.kempczynski@intel.com> (raw)
In-Reply-To: <20210216114007.122175-1-zbigniew.kempczynski@intel.com>

From: Dominik Grzegorzek <dominik.grzegorzek@intel.com>

We want to verify allocator works as expected. Try to exploit it.

Signed-off-by: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
Signed-off-by: Dominik Grzegorzek <dominik.grzegorzek@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/i915/api_intel_allocator.c | 538 +++++++++++++++++++++++++++++++
 tests/meson.build                |   1 +
 2 files changed, 539 insertions(+)
 create mode 100644 tests/i915/api_intel_allocator.c

diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
new file mode 100644
index 000000000..650c2ff5e
--- /dev/null
+++ b/tests/i915/api_intel_allocator.c
@@ -0,0 +1,538 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <stdatomic.h>
+#include "i915/gem.h"
+#include "igt.h"
+#include "igt_aux.h"
+#include "intel_allocator.h"
+
+#define OBJ_SIZE 1024
+
+struct test_obj {
+	uint32_t handle;
+	uint64_t offset;
+	uint64_t size;
+};
+
+static _Atomic(uint32_t) next_handle;
+
+static inline uint32_t gem_handle_gen(void)
+{
+	return atomic_fetch_add(&next_handle, 1);
+}
+
+static void alloc_simple(int fd)
+{
+	uint64_t ialh;
+	uint64_t offset0, offset1;
+	bool is_allocated, freed;
+
+	ialh = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+
+	offset0 = intel_allocator_alloc(ialh, 1, 0x1000, 0x1000);
+	offset1 = intel_allocator_alloc(ialh, 1, 0x1000, 0x1000);
+	igt_assert(offset0 == offset1);
+
+	is_allocated = intel_allocator_is_allocated(ialh, 1, 0x1000, offset0);
+	igt_assert(is_allocated);
+
+	freed = intel_allocator_free(ialh, 1);
+	igt_assert(freed);
+
+	is_allocated = intel_allocator_is_allocated(ialh, 1, 0x1000, offset0);
+	igt_assert(!is_allocated);
+
+	freed = intel_allocator_free(ialh, 1);
+	igt_assert(!freed);
+
+	intel_allocator_close(ialh);
+}
+
+static void reserve_simple(int fd)
+{
+	uint64_t ialh;
+	uint64_t start;
+	bool reserved, unreserved;
+
+	ialh = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	intel_allocator_get_address_range(ialh, &start, NULL);
+
+	reserved = intel_allocator_reserve(ialh, 0, 0x1000, start);
+	igt_assert(reserved);
+
+	reserved = intel_allocator_is_reserved(ialh, 0x1000, start);
+	igt_assert(reserved);
+
+	reserved = intel_allocator_reserve(ialh, 0, 0x1000, start);
+	igt_assert(!reserved);
+
+	unreserved = intel_allocator_unreserve(ialh, 0, 0x1000, start);
+	igt_assert(unreserved);
+
+	reserved = intel_allocator_is_reserved(ialh, 0x1000, start);
+	igt_assert(!reserved);
+
+	intel_allocator_close(ialh);
+}
+
+static void reserve(int fd, uint8_t type)
+{
+	struct intel_allocator *ial;
+	struct test_obj obj;
+
+	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+
+	igt_assert(ial->reserve(ial, 0, 0x40000, 0x800000));
+	/* try reserve once again */
+	igt_assert_eq(ial->reserve(ial, 0, 0x40040, 0x700000), false);
+
+	obj.handle = gem_handle_gen();
+	obj.size = OBJ_SIZE;
+	obj.offset = ial->alloc(ial, obj.handle, obj.size, 0);
+
+	igt_assert_eq(ial->reserve(ial, 0, obj.offset,
+				obj.offset + obj.size), false);
+	ial->free(ial, obj.handle);
+	igt_assert_eq(ial->reserve(ial, 0, obj.offset,
+				obj.offset + obj.size), true);
+
+	ial->unreserve(ial, 0, obj.offset, obj.offset + obj.size);
+	ial->unreserve(ial, 0, 0x40000, 0x800000);
+	igt_assert(ial->reserve(ial, 0, 0x40040, 0x700000));
+	ial->unreserve(ial, 0, 0x40040, 0x700000);
+
+	igt_assert(ial->is_empty(ial));
+
+	intel_allocator_close(to_user_pointer(ial));
+}
+
+static bool overlaps(struct test_obj *buf1, struct test_obj *buf2)
+{
+	uint64_t begin1 = buf1->offset;
+	uint64_t end1 = buf1->offset + buf1->size;
+	uint64_t begin2 = buf2->offset;
+	uint64_t end2 = buf2->offset + buf2->size;
+
+	return (end1 > begin2 && end2 > end1) || (end2 > begin1 && end1 > end2);
+}
+
+static void basic_alloc(int fd, int cnt, uint8_t type)
+{
+	struct test_obj *obj;
+	struct intel_allocator *ial;
+	int i, j;
+
+	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+	obj = malloc(sizeof(struct test_obj) * cnt);
+
+	for (i = 0; i < cnt; i++) {
+		igt_progress("allocating objects: ", i, cnt);
+		obj[i].handle = gem_handle_gen();
+		obj[i].size = OBJ_SIZE;
+		obj[i].offset = ial->alloc(ial, obj[i].handle,
+					   obj[i].size, 4096);
+		igt_assert_eq(obj[i].offset % 4096, 0);
+	}
+
+	for (i = 0; i < cnt; i++) {
+		igt_progress("check overlapping: ", i, cnt);
+
+		if (type == INTEL_ALLOCATOR_RANDOM)
+			continue;
+
+		for (j = 0; j < cnt; j++) {
+			if (j == i)
+				continue;
+				igt_assert(!overlaps(&obj[i], &obj[j]));
+		}
+	}
+
+	for (i = 0; i < cnt; i++) {
+		igt_progress("freeing objects: ", i, cnt);
+		ial->free(ial, obj[i].handle);
+	}
+
+	igt_assert(ial->is_empty(ial));
+
+	free(obj);
+	intel_allocator_close(to_user_pointer(ial));
+}
+
+static void reuse(int fd, uint8_t type)
+{
+	struct test_obj obj[128], tmp;
+	struct intel_allocator *ial;
+	uint64_t prev_offset;
+	int i;
+
+	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+
+	for (i = 0; i < 128; i++) {
+		obj[i].handle = gem_handle_gen();
+		obj[i].size = OBJ_SIZE;
+		obj[i].offset = ial->alloc(ial, obj[i].handle,
+					   obj[i].size, 0x40);
+	}
+
+	/* check simple reuse */
+	for (i = 0; i < 128; i++) {
+		prev_offset = obj[i].offset;
+		obj[i].offset = ial->alloc(ial, obj[i].handle, obj[i].size, 0);
+		igt_assert(prev_offset == obj[i].offset);
+	}
+	i--;
+
+	/* free bo prevously alloced */
+	ial->free(ial, obj[i].handle);
+	/* alloc different buffer to fill freed hole */
+	tmp.handle = gem_handle_gen();
+	tmp.offset = ial->alloc(ial, tmp.handle, OBJ_SIZE, 0);
+	igt_assert(prev_offset == tmp.offset);
+
+	obj[i].offset = ial->alloc(ial, obj[i].handle, obj[i].size, 0);
+	igt_assert(prev_offset != obj[i].offset);
+	ial->free(ial, tmp.handle);
+
+	for (i = 0; i < 128; i++)
+		ial->free(ial, obj[i].handle);
+
+	igt_assert(ial->is_empty(ial));
+
+	intel_allocator_close(to_user_pointer(ial));
+}
+
+struct ial_thread_args {
+	struct intel_allocator *ial;
+	pthread_t thread;
+	uint32_t *handles;
+	uint64_t *offsets;
+	uint32_t count;
+	int threads;
+	int idx;
+};
+
+static void *alloc_bo_in_thread(void *arg)
+{
+	struct ial_thread_args *a = arg;
+	int i;
+
+	for (i = a->idx; i < a->count; i += a->threads) {
+		a->handles[i] = gem_handle_gen();
+		pthread_mutex_lock(&a->ial->mutex);
+		a->offsets[i] = a->ial->alloc(a->ial, a->handles[i], OBJ_SIZE,
+					      1UL << ((random() % 20) + 1));
+		pthread_mutex_unlock(&a->ial->mutex);
+	}
+
+	return NULL;
+}
+
+static void *free_bo_in_thread(void *arg)
+{
+	struct ial_thread_args *a = arg;
+	int i;
+
+	for (i = (a->idx + 1) % a->threads; i < a->count; i += a->threads) {
+		pthread_mutex_lock(&a->ial->mutex);
+		a->ial->free(a->ial, a->handles[i]);
+		pthread_mutex_unlock(&a->ial->mutex);
+	}
+
+	return NULL;
+}
+
+#define THREADS 6
+
+static void parallel_one(int fd, uint8_t type)
+{
+	struct intel_allocator *ial;
+	struct ial_thread_args a[THREADS];
+	uint32_t *handles;
+	uint64_t *offsets;
+	int count, i;
+
+	srandom(0xdeadbeef);
+	ial = from_user_pointer(intel_allocator_open(fd, 0, type));
+	count = 1UL << 12;
+
+	handles = malloc(sizeof(uint32_t) * count);
+	offsets = calloc(1, sizeof(uint64_t) * count);
+
+	for (i = 0; i < THREADS; i++) {
+		a[i].ial = ial;
+		a[i].handles = handles;
+		a[i].offsets = offsets;
+		a[i].count = count;
+		a[i].threads = THREADS;
+		a[i].idx = i;
+		pthread_create(&a[i].thread, NULL, alloc_bo_in_thread, &a[i]);
+	}
+
+	for (i = 0; i < THREADS; i++)
+		pthread_join(a[i].thread, NULL);
+
+	/* Check if all objects are alocated */
+	for (i = 0; i < count; i++) {
+	/* Random allocator don't have state. Always returns different offset */
+		if (type == INTEL_ALLOCATOR_RANDOM)
+			break;
+
+		igt_assert_eq(offsets[i],
+			      a->ial->alloc(ial, handles[i], OBJ_SIZE, 0));
+	}
+
+	for (i = 0; i < THREADS; i++)
+		pthread_create(&a[i].thread, NULL, free_bo_in_thread, &a[i]);
+
+	for (i = 0; i < THREADS; i++)
+		pthread_join(a[i].thread, NULL);
+
+	/* Check if all offsets where objects were are free */
+	for (i = 0; i < count; i++) {
+		if (type == INTEL_ALLOCATOR_RANDOM)
+			break;
+
+		igt_assert(ial->reserve(ial, 0, offsets[i], offsets[i] + 1));
+	}
+
+	free(handles);
+	free(offsets);
+
+	intel_allocator_close(to_user_pointer(ial));
+}
+
+#define SIMPLE_GROUP_ALLOCS 8
+static void __simple_allocs(int fd)
+{
+	uint32_t handles[SIMPLE_GROUP_ALLOCS];
+	uint64_t ahnd;
+	uint32_t ctx;
+	int i;
+
+	ctx = rand() % 2;
+	ahnd = intel_allocator_open(fd, ctx, INTEL_ALLOCATOR_SIMPLE);
+
+	for (i = 0; i < SIMPLE_GROUP_ALLOCS; i++) {
+		uint32_t size;
+
+		size = (rand() % 4 + 1) * 0x1000;
+		handles[i] = gem_create(fd, size);
+		intel_allocator_alloc(ahnd, handles[i], size, 0x1000);
+	}
+
+	for (i = 0; i < SIMPLE_GROUP_ALLOCS; i++) {
+		igt_assert_f(intel_allocator_free(ahnd, handles[i]) == 1,
+			     "Error freeing handle: %u\n", handles[i]);
+		gem_close(fd, handles[i]);
+	}
+
+	intel_allocator_close(ahnd);
+}
+
+static void fork_simple_once(int fd)
+{
+	intel_allocator_multiprocess_start();
+
+	igt_fork(child, 1)
+		__simple_allocs(fd);
+
+	igt_waitchildren();
+
+	intel_allocator_multiprocess_stop();
+}
+
+#define SIMPLE_TIMEOUT 5
+static void *__fork_simple_thread(void *data)
+{
+	int fd = (int) (long) data;
+
+	igt_until_timeout(SIMPLE_TIMEOUT) {
+		__simple_allocs(fd);
+	}
+
+	return NULL;
+}
+
+static void fork_simple_stress(int fd, bool two_level_inception)
+{
+	pthread_t thread0, thread1;
+	uint64_t ahnd0, ahnd1;
+	bool are_empty;
+
+	intel_allocator_multiprocess_start();
+
+	ahnd0 = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd1 = intel_allocator_open(fd, 1, INTEL_ALLOCATOR_SIMPLE);
+
+	pthread_create(&thread0, NULL, __fork_simple_thread, (void *) (long) fd);
+	pthread_create(&thread1, NULL, __fork_simple_thread, (void *) (long) fd);
+
+	igt_fork(child, 8) {
+		if (two_level_inception) {
+			pthread_create(&thread0, NULL, __fork_simple_thread,
+				       (void *) (long) fd);
+			pthread_create(&thread1, NULL, __fork_simple_thread,
+				       (void *) (long) fd);
+		}
+
+		igt_until_timeout(SIMPLE_TIMEOUT) {
+			__simple_allocs(fd);
+		}
+
+		if (two_level_inception) {
+			pthread_join(thread0, NULL);
+			pthread_join(thread1, NULL);
+		}
+	}
+	igt_waitchildren();
+
+	pthread_join(thread0, NULL);
+	pthread_join(thread1, NULL);
+
+	are_empty = intel_allocator_close(ahnd0);
+	are_empty &= intel_allocator_close(ahnd1);
+
+	intel_allocator_multiprocess_stop();
+
+	igt_assert_f(are_empty, "Allocators were not emptied\n");
+}
+
+static void __reopen_allocs(int fd1, int fd2)
+{
+	uint64_t ahnd0, ahnd1, ahnd2;
+
+	ahnd0 = intel_allocator_open(fd1, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd1 = intel_allocator_open(fd2, 0, INTEL_ALLOCATOR_SIMPLE);
+	ahnd2 = intel_allocator_open(fd2, 0, INTEL_ALLOCATOR_SIMPLE);
+	igt_assert(ahnd0 != ahnd1);
+	igt_assert(ahnd1 == ahnd2);
+
+	intel_allocator_close(ahnd0);
+	intel_allocator_close(ahnd1);
+	intel_allocator_close(ahnd2);
+}
+
+static void reopen(int fd)
+{
+	int fd2;
+
+	igt_require_gem(fd);
+
+	fd2 = gem_reopen_driver(fd);
+
+	__reopen_allocs(fd, fd2);
+
+	close(fd2);
+}
+
+#define REOPEN_TIMEOUT 3
+static void reopen_fork(int fd)
+{
+	int fd2;
+
+	igt_require_gem(fd);
+
+	intel_allocator_multiprocess_start();
+
+	fd2 = gem_reopen_driver(fd);
+
+	igt_fork(child, 1) {
+		igt_until_timeout(REOPEN_TIMEOUT)
+			__reopen_allocs(fd, fd2);
+	}
+	igt_until_timeout(REOPEN_TIMEOUT)
+		__reopen_allocs(fd, fd2);
+
+	igt_waitchildren();
+
+	close(fd2);
+
+	intel_allocator_multiprocess_stop();
+}
+
+struct allocators {
+	const char *name;
+	uint8_t type;
+} als[] = {
+	{"simple", INTEL_ALLOCATOR_SIMPLE},
+	{"random", INTEL_ALLOCATOR_RANDOM},
+	{NULL, 0},
+};
+
+igt_main
+{
+	int fd;
+	struct allocators *a;
+
+	igt_fixture {
+		fd = drm_open_driver(DRIVER_INTEL);
+		atomic_init(&next_handle, 1);
+		srandom(0xdeadbeef);
+	}
+
+	igt_subtest_f("alloc-simple")
+		alloc_simple(fd);
+
+	igt_subtest_f("reserve-simple")
+		reserve_simple(fd);
+
+	igt_subtest_f("print")
+		basic_alloc(fd, 1UL << 2, INTEL_ALLOCATOR_RANDOM);
+
+	igt_subtest_f("reuse")
+		reuse(fd, INTEL_ALLOCATOR_SIMPLE);
+
+	igt_subtest_f("reserve")
+		reserve(fd, INTEL_ALLOCATOR_SIMPLE);
+
+	for (a = als; a->name; a++) {
+		igt_subtest_with_dynamic_f("%s-allocator", a->name) {
+			igt_dynamic("basic")
+				basic_alloc(fd, 1UL << 8, a->type);
+
+			igt_dynamic("parallel-one")
+				parallel_one(fd, a->type);
+
+			if (a->type != INTEL_ALLOCATOR_RANDOM) {
+				igt_dynamic("reuse")
+					reuse(fd, a->type);
+
+				igt_dynamic("reserve")
+					reserve(fd, a->type);
+			}
+		}
+	}
+
+	igt_subtest_f("fork-simple-once")
+		fork_simple_once(fd);
+
+	igt_subtest_f("fork-simple-stress")
+		fork_simple_stress(fd, false);
+
+	igt_subtest_f("fork-simple-stress-signal") {
+		igt_fork_signal_helper();
+		fork_simple_stress(fd, false);
+		igt_stop_signal_helper();
+	}
+
+	igt_subtest_f("two-level-inception")
+		fork_simple_stress(fd, true);
+
+	igt_subtest_f("two-level-inception-interruptible") {
+		igt_fork_signal_helper();
+		fork_simple_stress(fd, true);
+		igt_stop_signal_helper();
+	}
+
+	igt_subtest_f("reopen")
+		reopen(fd);
+
+	igt_subtest_f("reopen-fork")
+		reopen_fork(fd);
+
+	igt_fixture
+		close(fd);
+}
diff --git a/tests/meson.build b/tests/meson.build
index 825e01833..061691903 100644
--- a/tests/meson.build
+++ b/tests/meson.build
@@ -111,6 +111,7 @@ test_progs = [
 ]
 
 i915_progs = [
+	'api_intel_allocator',
 	'api_intel_bb',
 	'gen3_mixed_blits',
 	'gen3_render_linear_blits',
-- 
2.26.0

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

  parent reply	other threads:[~2021-02-16 11:40 UTC|newest]

Thread overview: 38+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-16 11:39 [igt-dev] [PATCH i-g-t 00/35] Introduce IGT allocator Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 01/35] lib/gem_submission: Add gem_has_relocations() check Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 02/35] lib/igt_list: Add igt_list_del_init() Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 03/35] lib/igt_list: igt_hlist implementation Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 04/35] lib/igt_map: Introduce igt_map Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 05/35] lib/igt_core: Track child process pid and tid Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 06/35] lib/intel_allocator_simple: Add simple allocator Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 07/35] lib/intel_allocator_random: Add random allocator Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 08/35] lib/intel_allocator: Add intel_allocator core Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 09/35] lib/intel_allocator: Try to stop smoothly instead of deinit Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 10/35] lib/intel_allocator_msgchannel: Scale to 4k of parallel clients Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 11/35] lib/intel_allocator: Separate allocator multiprocess start Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 12/35] lib/intel_bufops: Change size from 32->64 bit Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 13/35] lib/intel_bufops: Add init with handle and size function Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 14/35] lib/intel_batchbuffer: Integrate intel_bb with allocator Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 15/35] lib/intel_batchbuffer: Use relocations in intel-bb up to gen12 Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 16/35] lib/intel_batchbuffer: Create bb with strategy / vm ranges Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 17/35] lib/intel_batchbuffer: Add tracking intel_buf to intel_bb Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 18/35] lib/igt_fb: Initialize intel_buf with same size as fb Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 19/35] tests/api_intel_bb: Modify test to verify intel_bb with allocator Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 20/35] tests/api_intel_bb: Add subtest to check render batch on the last page Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 21/35] tests/api_intel_bb: Add compressed->compressed copy Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 22/35] tests/api_intel_bb: Add purge-bb test Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 23/35] tests/api_intel_bb: Remove check-canonical test Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 24/35] tests/api_intel_bb: Add simple intel-bb which uses allocator Zbigniew Kempczyński
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 25/35] tests/api_intel_bb: Use allocator in delta-check test Zbigniew Kempczyński
2021-02-16 11:39 ` Zbigniew Kempczyński [this message]
2021-02-16 11:39 ` [igt-dev] [PATCH i-g-t 27/35] tests/api_intel_allocator: Prepare to run with sanitizer Zbigniew Kempczyński
2021-02-16 11:40 ` [igt-dev] [PATCH i-g-t 28/35] tests/api_intel_allocator: Add execbuf with allocator example Zbigniew Kempczyński
2021-02-16 11:40 ` [igt-dev] [PATCH i-g-t 29/35] tests/gem_softpin: Verify allocator and execbuf pair work together Zbigniew Kempczyński
2021-02-16 11:40 ` [igt-dev] [PATCH i-g-t 30/35] tests/gem|kms: Remove intel_bb from fixture Zbigniew Kempczyński
2021-02-16 11:40 ` [igt-dev] [PATCH i-g-t 31/35] tests/gem_mmap_offset: Use intel_buf wrapper code instead direct Zbigniew Kempczyński
2021-02-16 11:40 ` [igt-dev] [PATCH i-g-t 32/35] tests/gem_ppgtt: Adopt test to use intel_bb with allocator Zbigniew Kempczyński
2021-02-16 11:40 ` [igt-dev] [PATCH i-g-t 33/35] tests/gem_render_copy_redux: Adopt to use with intel_bb and allocator Zbigniew Kempczyński
2021-02-16 11:40 ` [igt-dev] [PATCH i-g-t 34/35] tests/perf.c: Remove buffer from batch Zbigniew Kempczyński
2021-02-16 11:40 ` [igt-dev] [PATCH i-g-t 35/35] tests/gem_linear_blits: Use intel allocator Zbigniew Kempczyński
2021-02-16 13:21 ` [igt-dev] ✓ Fi.CI.BAT: success for Introduce IGT allocator (rev21) Patchwork
2021-02-16 15:03 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210216114007.122175-27-zbigniew.kempczynski@intel.com \
    --to=zbigniew.kempczynski@intel.com \
    --cc=igt-dev@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.