* [igt-dev] [PATCH i-g-t 2/4] lib/igt_bst: Add a BST interface and an AVL implementation
2021-07-20 14:12 [igt-dev] [PATCH i-g-t v2 0/4] Implement the BST allocator Andrzej Turko
2021-07-20 14:12 ` [igt-dev] [PATCH i-g-t 1/4] lib/intel_allocator: Fix argument names in declarations Andrzej Turko
@ 2021-07-20 14:12 ` Andrzej Turko
2021-07-20 14:12 ` [igt-dev] [PATCH i-g-t 3/4] lib/intel_allocator_bst: Implement the allocator with a BST Andrzej Turko
` (3 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: Andrzej Turko @ 2021-07-20 14:12 UTC (permalink / raw)
To: igt-dev
This adds an efficient binary search tree implementation
in order to support an allocator with best-fit strategy.
Signed-off-by: Andrzej Turko <andrzej.turko@linux.intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
lib/igt_bst.c | 157 +++++++++++
lib/igt_bst.h | 69 +++++
lib/igt_bst_avl.c | 651 ++++++++++++++++++++++++++++++++++++++++++++++
lib/meson.build | 2 +
4 files changed, 879 insertions(+)
create mode 100644 lib/igt_bst.c
create mode 100644 lib/igt_bst.h
create mode 100644 lib/igt_bst_avl.c
diff --git a/lib/igt_bst.c b/lib/igt_bst.c
new file mode 100644
index 000000000..a24859213
--- /dev/null
+++ b/lib/igt_bst.c
@@ -0,0 +1,157 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "igt_bst.h"
+
+
+/**
+ * igt_bst_insert
+ * @bst: igt_bst pointer
+ * @value: a pointer value being inserted
+ * @key: the key under which the value should be stored
+ *
+ * Inserts a new value to the bst.
+ *
+ * Returns: a pointer to the newly created node.
+ */
+void *igt_bst_insert(struct igt_bst *bst, void *value, uint64_t key)
+{
+ return bst->insert(bst, value, key);
+}
+
+/**
+ * igt_bst_erase
+ * @bst: igt_bst pointer
+ * @nodeptr: a pointer to a node in the bst
+ *
+ * Erases the indicated node.
+ *
+ * Returns: the value associated with the
+ * erased node.
+ */
+void *igt_bst_erase(struct igt_bst *bst, void *nodeptr)
+{
+ return bst->erase(bst, nodeptr);
+}
+
+/**
+ * igt_bst_query_successor
+ * @bst: igt_bst pointer
+ * @key: an arbitrary key
+ *
+ * Among the elements currently present in the bst,
+ * finds the one with the smallest key not smaller than
+ * the given key.
+ *
+ * Returns: a value associated with the found element,
+ * NULL if all keys in the tree are smaller than the key
+ * parameter.
+ */
+void *igt_bst_query_successor(struct igt_bst *bst, uint64_t key)
+{
+ return bst->query_successor(bst, key);
+}
+
+/**
+ * igt_bst_query_successor
+ * Similar to igt_bst_query_predecessor, but looks for an
+ * element with the greatest key not exceeding the parameter.
+ */
+void *igt_bst_query_predecessor(struct igt_bst *bst, uint64_t key)
+{
+ return bst->query_predecessor(bst, key);
+}
+
+/**
+ * igt_bst_pop_successor
+ * Like igt_bst_query_successor, but also removes the
+ * found element from the bst.
+ */
+void *igt_bst_pop_successor(struct igt_bst *bst, uint64_t key)
+{
+ return bst->pop_successor(bst, key);
+}
+
+/**
+ * igt_bst_pop_predecessor
+ * Like igt_bst_query_predecessor, but also removes the
+ * found element from the bst.
+ */
+void *igt_bst_pop_predecessor(struct igt_bst *bst, uint64_t key)
+{
+ return bst->pop_predecessor(bst, key);
+}
+
+/* check whether the bst contains any elements */
+bool igt_bst_empty(struct igt_bst *bst)
+{
+ return bst->bst_empty(bst);
+}
+
+/* retrieve the value from the element in the bst given by the pointer to its node */
+void *igt_bst_get_value(struct igt_bst *bst, void *nodeptr)
+{
+ return bst->get_value(nodeptr);
+}
+
+/* retrieve the key from the element the bst given by the pointer to its node */
+uint64_t igt_bst_get_key(struct igt_bst *bst, void *nodeptr)
+{
+ return bst->get_key(nodeptr);
+}
+
+/**
+ * igt_bst_leftmost_entry
+ * @bst: igt_bst pointer
+ *
+ * Finds an element in the bst with the smallest key.
+ *
+ * Returns: a pointer to the found node,
+ * NULL if the bst is empty.
+ */
+void *igt_bst_leftmost_entry(struct igt_bst *bst)
+{
+ return bst->leftmost_entry(bst);
+}
+
+/**
+ * igt_bst_rightmost_entry
+ * See igt_bst_leftmost_entry.
+ */
+void *igt_bst_rightmost_entry(struct igt_bst *bst)
+{
+ return bst->rightmost_entry(bst);
+}
+
+/**
+ * igt_bst_next_entry
+ * @bst: igt_bst pointer
+ * @nodeptr: a pointer to a node in the bst
+ *
+ * Finds the next element in the bst in the
+ * nondecreasing order of keys.
+ * Can be used to iterate over all elements.
+ *
+ * Retuns: a pointer to the next node in the bst,
+ * NULL if nodeptr points to the last node.
+ */
+void *igt_bst_next_entry(struct igt_bst *bst, void *nodeptr)
+{
+ return bst->next_entry(nodeptr);
+}
+
+/**
+ * igt_bst_prev_entry
+ * Like igt_bst_next_entry, but returns the previous node.
+ */
+void *igt_bst_prev_entry(struct igt_bst *bst, void *nodeptr)
+{
+ return bst->prev_entry(nodeptr);
+}
+
+void igt_bst_free(struct igt_bst *bst)
+{
+ bst->bst_free(bst);
+}
diff --git a/lib/igt_bst.h b/lib/igt_bst.h
new file mode 100644
index 000000000..f54fca50c
--- /dev/null
+++ b/lib/igt_bst.h
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#ifndef __IGT_BST_H
+#define __IGT_BST_H
+
+#include <stdint.h>
+#include <stdbool.h>
+
+/**
+ * SECTION: igt_bst
+ * @short_description: a binary search tree interface
+ * @title: IGT Bst
+ * @include: igt_bst.h
+ *
+ * This interface operates on binary rearch trees
+ * which store void pointers ordered by uint64_t keys.
+ */
+
+struct igt_bst {
+
+ /* private structure of the BST */
+ void *priv;
+
+ void* (*insert)(struct igt_bst *bst, void *value, uint64_t key);
+ void* (*erase)(struct igt_bst *bst, void *nodeptr);
+ void* (*pop_successor)(struct igt_bst *bst, uint64_t key);
+ void* (*pop_predecessor)(struct igt_bst *bst, uint64_t key);
+ void* (*query_successor)(struct igt_bst *bst, uint64_t key);
+ void* (*query_predecessor)(struct igt_bst *bst, uint64_t key);
+ bool (*bst_empty)(struct igt_bst *bst);
+ void* (*get_value)(void *nodeptr);
+ uint64_t (*get_key)(void *nodeptr);
+ void* (*leftmost_entry)(struct igt_bst *bst);
+ void* (*rightmost_entry)(struct igt_bst *bst);
+ void* (*next_entry)(void *nodeptr);
+ void* (*prev_entry)(void *nodeptr);
+ void (*bst_free)(struct igt_bst *bst);
+ int (*validate)(struct igt_bst *bst);
+};
+
+/* contructors */
+struct igt_bst *igt_bst_avl_create(void);
+
+#define igt_bst_for_each_node(bst, nodeptr) \
+ for ((nodeptr) = (bst)->leftmost_entry(bst); (nodeptr); (nodeptr) = (bst)->next_entry(nodeptr))
+
+#define igt_bst_for_each_node_rev(bst, nodeptr) \
+ for ((nodeptr) = (bst)->rightmost_entry(bst); (nodeptr); (nodeptr) = (bst)->prev_entry(nodeptr))
+
+
+void *igt_bst_insert(struct igt_bst *bst, void *value, uint64_t key);
+void *igt_bst_erase(struct igt_bst *bst, void *nodeptr);
+void *igt_bst_query_successor(struct igt_bst *bst, uint64_t key);
+void *igt_bst_query_predecessor(struct igt_bst *bst, uint64_t key);
+void *igt_bst_pop_successor(struct igt_bst *bst, uint64_t key);
+void *igt_bst_pop_predecessor(struct igt_bst *bst, uint64_t key);
+bool igt_bst_empty(struct igt_bst *bst);
+void *igt_bst_get_value(struct igt_bst *bst, void *nodeptr);
+uint64_t igt_bst_get_key(struct igt_bst *bst, void *nodeptr);
+void *igt_bst_leftmost_entry(struct igt_bst *bst);
+void *igt_bst_rightmost_entry(struct igt_bst *bst);
+void *igt_bst_next_entry(struct igt_bst *bst, void *nodeptr);
+void *igt_bst_prev_entry(struct igt_bst *bst, void *nodeptr);
+void igt_bst_free(struct igt_bst *bst);
+
+#endif /* IGT_BST_H */
diff --git a/lib/igt_bst_avl.c b/lib/igt_bst_avl.c
new file mode 100644
index 000000000..7245a798d
--- /dev/null
+++ b/lib/igt_bst_avl.c
@@ -0,0 +1,651 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include "igt.h"
+#include "igt_bst.h"
+
+//#define AVLDBG
+#ifdef AVLDBG
+#define avl_debug(...) fprintf(stderr, __VA_ARGS__)
+#define avl_assert igt_assert
+#define avl_assert_f igt_assert_f
+#else
+#define avl_debug(...) {}
+#define avl_assert(...) {}
+#define avl_assert_f(...) {}
+#endif
+
+struct igt_avl_node {
+ uint64_t key;
+
+ struct igt_avl_node *left;
+ struct igt_avl_node *right;
+ struct igt_avl_node *parent;
+
+ int32_t height;
+
+ void *value;
+};
+
+struct igt_avl {
+ struct igt_avl_node *root;
+};
+
+static int __traverse_validate(struct igt_avl_node *node)
+{
+ int32_t node_balance, left_h, right_h;
+ int n = 1;
+
+ igt_assert(node);
+
+ left_h = 0;
+ right_h = 0;
+
+ avl_debug("at 0x%llx : < key %llu, value 0x%llx >\nleft 0x%llx ; right 0x%llx\n",
+ (unsigned long long)node,
+ (unsigned long long)node->key,
+ (unsigned long long)node->value,
+ (unsigned long long)node->left,
+ (unsigned long long)node->right);
+
+ if (node->left) {
+ igt_assert(node->left->parent == node);
+ igt_assert_f(node->key >= node->left->key,
+ "The order of keys is not preserved in the AVL tree");
+ left_h = node->left->height;
+ n += __traverse_validate(node->left);
+ }
+
+ if (node->right) {
+ igt_assert(node->right->parent == node);
+ igt_assert_f(node->right->key >= node->key,
+ "The order of keys is not preserved in the AVL tree");
+ right_h = node->right->height;
+ n += __traverse_validate(node->right);
+ }
+
+ node_balance = left_h - right_h;
+ igt_assert(node_balance <= 1 && node_balance >= -1);
+ igt_assert(node->height == (left_h > right_h ? left_h : right_h) + 1);
+
+ return n;
+}
+
+static int avl_validate(struct igt_bst *bst)
+{
+ struct igt_avl *avl = bst->priv;
+
+ avl_debug("Tree traversal:\n");
+
+ /* an empty tree */
+ if (avl->root == NULL)
+ return 0;
+
+ igt_assert(!avl->root->parent);
+ return __traverse_validate(avl->root);
+}
+
+/* fix balance of all vertices from the given node to the root */
+static inline void __avl_fix_balance(struct igt_avl *avl, struct igt_avl_node *node)
+{
+ struct igt_avl_node *left, *right, *l_left, *l_right, *r_left, *r_right;
+ int32_t left_h, right_h, l_left_h, l_right_h, r_left_h, r_right_h;
+ int32_t balance, node_h;
+
+ while (node) {
+
+ node_h = node->height;
+ left = node->left;
+ left_h = (left ? left->height : 0);
+ right = node->right;
+ right_h = (right ? right->height : 0);
+ balance = left_h - right_h;
+
+ avl_debug("Correcting a node with key %llu at 0x%llx (%llu)\n",
+ (unsigned long long)node->key,
+ (unsigned long long)node,
+ *((unsigned long long *)node->value));
+ avl_debug("Heights: %d [%d %d] : balance %d\n",
+ node_h, left_h, right_h, balance);
+
+ if (balance > 1) {
+ avl_assert(balance == 2);
+ avl_assert(left);
+
+ /* 1st case: left son's subtree is too high */
+ l_left = left->left;
+ l_left_h = (l_left ? l_left->height : 0);
+ l_right = left->right;
+ l_right_h = (l_right ? l_right->height : 0);
+
+ if (l_right_h > l_left_h) {
+ avl_assert(l_right_h == right_h + 1);
+ avl_assert(l_left_h == right_h);
+ avl_assert(l_right);
+
+ /*
+ * left son's (left's) right son (l_right) is too high,
+ * rotate the left son and its right son
+ *
+ * N N N
+ * / / /
+ * L (rotate) LR (rename) L
+ * / \ -----> / \ -----> / \
+ * LL LR L LRR LL LR
+ * / \ / \
+ * LRL LRR LL LRL
+ *
+ */
+
+ left->right = l_right->left;
+ if (l_right->left)
+ l_right->left->parent = left;
+ l_right->parent = node;
+ node->left = l_right;
+ l_right->left = left;
+ left->parent = l_right;
+
+ l_left = left;
+ left = l_right;
+ l_right = l_right->right;
+
+ l_left_h = --l_left->height;
+ l_right_h = (l_right ? l_right->height : 0);
+ left_h = ++left->height;
+ }
+
+ avl_assert(left);
+ avl_assert(left_h == right_h + 2);
+ avl_assert(l_left_h == right_h + 1);
+
+ /*
+ * rotate the node and its left son
+ *
+ * | | |
+ * N (rotate) L (rename) N
+ * / \ -----> / \ -----> / \
+ * L R LL N ... ...
+ * / \ / \
+ * LL LR LR R
+ *
+ */
+
+ node->left = l_right;
+ if (l_right)
+ l_right->parent = node;
+ left->parent = node->parent;
+ if (node->parent) {
+ if (node->parent->left == node)
+ node->parent->left = left;
+ else
+ node->parent->right = left;
+
+ } else {
+ avl->root = left;
+ }
+ node->parent = left;
+ left->right = node;
+
+ node->height = (l_right_h > right_h ? l_right_h : right_h) + 1;
+ left->height = (node->height > l_left_h ? node->height : l_left_h) + 1;
+
+ node = left;
+ }
+
+
+ if (balance < -1) {
+ avl_assert(balance == -2);
+ avl_assert(right);
+
+ /* 2nd case: right son's subtree is too high */
+ r_left = right->left;
+ r_left_h = (r_left ? r_left->height : 0);
+ r_right = right->right;
+ r_right_h = (r_right ? r_right->height : 0);
+
+ if (r_left_h > r_right_h) {
+ avl_assert(r_left_h == left_h + 1);
+ avl_assert(r_right_h == left_h);
+ avl_assert(r_left);
+
+ /*
+ * right son's (right's) left son (r_left) is too high,
+ * rotate the right son and its left son.
+ *
+ * N N N
+ * \ \ \
+ * R (rotate) RL (rename) R
+ * / \ -----> / \ -----> / \
+ * RL RR RLL R RL RR
+ * / \ / \
+ * RLL RLR RLR RR
+ *
+ */
+
+ right->left = r_left->right;
+ if (r_left->right)
+ r_left->right->parent = right;
+ r_left->parent = node;
+ node->right = r_left;
+ r_left->right = right;
+ right->parent = r_left;
+
+ r_right = right;
+ right = r_left;
+ r_left = r_left->left;
+
+ r_right_h = --r_right->height;
+ r_left_h = (r_left ? r_left->height : 0);
+ right_h = ++right->height;
+ }
+
+ avl_assert(right);
+ avl_assert(right_h == left_h + 2);
+ avl_assert(r_right_h == left_h + 1);
+
+ /*
+ * rotate the node and its right son
+ *
+ * | | |
+ * N (rotate) R (rename) N
+ * / \ -----> / \ -----> / \
+ * L R N RR ... ...
+ * / \ / \
+ * RL RR L RL
+ *
+ */
+
+ node->right = r_left;
+ if (r_left)
+ r_left->parent = node;
+ right->parent = node->parent;
+ if (node->parent) {
+ if (node->parent->left == node)
+ node->parent->left = right;
+ else
+ node->parent->right = right;
+
+ } else {
+ avl->root = right;
+ }
+ node->parent = right;
+ right->left = node;
+
+ node->height = (r_left_h > left_h ? r_left_h : left_h) + 1;
+ right->height = (node->height > r_right_h ? node->height : r_right_h) + 1;
+
+ node = right;
+ }
+
+ if (balance >= -1 && balance <= 1)
+ node->height = (left_h > right_h ? left_h : right_h) + 1;
+
+ avl_assert(node->height - node_h >= -1 && node->height - node_h <= 1);
+
+ if (node->height == node_h)
+ break;
+
+ node = node->parent;
+ }
+}
+
+static void *avl_insert(struct igt_bst *bst, void *value, uint64_t key)
+{
+ struct igt_avl *avl = bst->priv;
+ struct igt_avl_node *node, *parent;
+
+ node = avl->root;
+ parent = NULL;
+ while (node) {
+ parent = node;
+ if (key < node->key)
+ node = node->left;
+ else
+ node = node->right;
+ }
+
+ node = malloc(sizeof(struct igt_avl_node));
+ igt_assert(node);
+ node->key = key;
+ node->parent = parent;
+ node->left = NULL;
+ node->right = NULL;
+ node->height = 1;
+ node->value = value;
+ if (parent) {
+ if (key < parent->key)
+ parent->left = node;
+ else
+ parent->right = node;
+
+ __avl_fix_balance(avl, parent);
+ } else {
+ avl->root = node;
+ }
+
+ return node;
+}
+
+static inline void
+__avl_erase_node(struct igt_avl *avl, struct igt_avl_node *node,
+ struct igt_avl_node *del_pos)
+{
+ struct igt_avl_node *parent, *son;
+
+ avl_assert(node);
+ avl_assert(del_pos);
+ avl_assert(!del_pos->left || !del_pos->right);
+ son = (del_pos->left ? del_pos->left : del_pos->right);
+ parent = del_pos->parent;
+
+ avl_debug("Erasing node 0x%llx with position 0x%llx\n",
+ (unsigned long long)node, (unsigned long long)del_pos);
+
+ if (node != del_pos) {
+ /* move a node from the deleted position
+ * to place of the deleted node
+ */
+
+ del_pos->left = node->left;
+ if (del_pos->left)
+ del_pos->left->parent = del_pos;
+ del_pos->right = node->right;
+ if (del_pos->right)
+ del_pos->right->parent = del_pos;
+
+ del_pos->parent = node->parent;
+ if (node->parent) {
+ if (node->parent->left == node)
+ node->parent->left = del_pos;
+ else
+ node->parent->right = del_pos;
+
+ } else {
+ avl->root = del_pos;
+ }
+
+ del_pos->height = node->height;
+
+ if (parent == node)
+ parent = del_pos;
+
+ }
+
+ if (son)
+ son->parent = parent;
+
+ if (parent) {
+ if (parent->left == del_pos)
+ parent->left = son;
+ else
+ parent->right = son;
+
+ __avl_fix_balance(avl, parent);
+ } else {
+ avl->root = son;
+ }
+
+ avl_assert(del_pos->parent != del_pos);
+
+ free(node);
+}
+
+static void *avl_erase(struct igt_bst *bst, void *nodeptr)
+{
+ struct igt_avl_node *node, *del_pos;
+ void *value;
+
+ igt_assert(nodeptr);
+ node = (struct igt_avl_node *) nodeptr;
+ value = node->value;
+ del_pos = node->left;
+ if (del_pos) {
+ while (del_pos->right)
+ del_pos = del_pos->right;
+
+ __avl_erase_node(bst->priv, node, del_pos);
+ } else {
+ __avl_erase_node(bst->priv, node, node);
+ }
+
+ return value;
+}
+
+static void *avl_pop_successor(struct igt_bst *bst, uint64_t key)
+{
+ struct igt_avl *avl = bst->priv;
+ struct igt_avl_node *node, *succ, *parent;
+ void *value;
+
+ node = avl->root;
+ parent = NULL;
+ succ = NULL;
+ while (node) {
+ parent = node;
+ if (node->key >= key) {
+ succ = node;
+ node = node->left;
+ } else {
+ node = node->right;
+ }
+ }
+
+ if (!succ)
+ return NULL;
+
+ value = succ->value;
+ __avl_erase_node(avl, succ, parent);
+
+ return value;
+}
+
+static void *avl_pop_predecessor(struct igt_bst *bst, uint64_t key)
+{
+ struct igt_avl *avl = bst->priv;
+ struct igt_avl_node *node, *parent, *pred;
+ void *value;
+
+ node = avl->root;
+ parent = NULL;
+ pred = NULL;
+ while (node) {
+ parent = node;
+ if (node->key <= key) {
+ pred = node;
+ node = node->right;
+ } else {
+ node = node->left;
+ }
+ }
+
+ if (!pred)
+ return NULL;
+
+ value = pred->value;
+ __avl_erase_node(avl, pred, parent);
+
+ return value;
+}
+
+static void *avl_query_successor(struct igt_bst *bst, uint64_t key)
+{
+ struct igt_avl *avl = bst->priv;
+ struct igt_avl_node *node;
+ void *value = NULL;
+
+ node = avl->root;
+ while (node) {
+ if (node->key >= key) {
+ value = node->value;
+ node = node->left;
+ } else {
+ node = node->right;
+ }
+ }
+
+ return value;
+}
+
+static void *avl_query_predecessor(struct igt_bst *bst, uint64_t key)
+{
+ struct igt_avl *avl = bst->priv;
+ struct igt_avl_node *node;
+ void *value = NULL;
+
+ node = avl->root;
+ while (node) {
+ if (node->key <= key) {
+ value = node->value;
+ node = node->right;
+ } else {
+ node = node->left;
+ }
+ }
+
+ return value;
+}
+
+static bool avl_bst_empty(struct igt_bst *bst)
+{
+ struct igt_avl *avl = bst->priv;
+
+ return (avl->root == NULL);
+}
+
+
+static void *avl_get_value(void *nodeptr)
+{
+ return ((struct igt_avl_node *)nodeptr)->value;
+}
+
+static uint64_t avl_get_key(void *nodeptr)
+{
+ return ((struct igt_avl_node *)nodeptr)->key;
+}
+
+static void *avl_leftmost_entry(struct igt_bst *bst)
+{
+ struct igt_avl *avl = bst->priv;
+ struct igt_avl_node *node;
+
+ node = avl->root;
+ if (node) {
+ while (node->left)
+ node = node->left;
+ }
+
+ return node;
+}
+
+static void *avl_rightmost_entry(struct igt_bst *bst)
+{
+ struct igt_avl *avl = bst->priv;
+ struct igt_avl_node *node;
+
+ node = avl->root;
+ if (node) {
+ while (node->right)
+ node = node->right;
+ }
+ return node;
+}
+
+static void *avl_next_entry(void *nodeptr)
+{
+ struct igt_avl_node *node;
+
+ igt_assert(nodeptr);
+ node = (struct igt_avl_node *) nodeptr;
+ if (node->right) {
+ node = node->right;
+ while (node->left)
+ node = node->left;
+
+ return node;
+ }
+ while (node->parent) {
+ if (node->parent->left == node)
+ return node->parent;
+
+ node = node->parent;
+ }
+ return NULL;
+}
+
+static void *avl_prev_entry(void *nodeptr)
+{
+ struct igt_avl_node *node;
+
+ igt_assert(nodeptr);
+ node = (struct igt_avl_node *) nodeptr;
+ if (node->left) {
+ node = node->left;
+ while (node->right)
+ node = node->right;
+
+ return node;
+ }
+ while (node->parent) {
+ if (node->parent->right == node)
+ return node->parent;
+
+ node = node->parent;
+ }
+ return NULL;
+}
+
+static void __avl_free(struct igt_avl_node *node)
+{
+ avl_assert(node);
+
+ if (node->left)
+ __avl_free(node->left);
+
+ if (node->right)
+ __avl_free(node->right);
+
+ free(node);
+}
+
+static void avl_bst_free(struct igt_bst *bst)
+{
+ struct igt_avl *avl = bst->priv;
+
+ if (avl->root)
+ __avl_free(avl->root);
+
+ free(avl);
+}
+
+struct igt_bst *igt_bst_avl_create(void)
+{
+ struct igt_avl *avl;
+ struct igt_bst *bst;
+
+ avl = malloc(sizeof(struct igt_avl));
+ bst = malloc(sizeof(struct igt_bst));
+ igt_assert(avl);
+ igt_assert(bst);
+
+ avl->root = NULL;
+ bst->priv = avl;
+ bst->insert = avl_insert;
+ bst->erase = avl_erase;
+ bst->pop_successor = avl_pop_successor;
+ bst->pop_predecessor = avl_pop_predecessor;
+ bst->query_successor = avl_query_successor;
+ bst->query_predecessor = avl_query_predecessor;
+ bst->bst_empty = avl_bst_empty;
+ bst->get_value = avl_get_value;
+ bst->get_key = avl_get_key;
+ bst->leftmost_entry = avl_leftmost_entry;
+ bst->rightmost_entry = avl_rightmost_entry;
+ bst->next_entry = avl_next_entry;
+ bst->prev_entry = avl_prev_entry;
+ bst->bst_free = avl_bst_free;
+ bst->validate = avl_validate;
+
+ return bst;
+}
diff --git a/lib/meson.build b/lib/meson.build
index 67d405129..a1aa0ee12 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -11,6 +11,8 @@ lib_sources = [
'i915/gem_mman.c',
'i915/gem_vm.c',
'i915/intel_memory_region.c',
+ 'igt_bst.c',
+ 'igt_bst_avl.c',
'igt_collection.c',
'igt_color_encoding.c',
'igt_debugfs.c',
--
2.25.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [igt-dev] [PATCH i-g-t 3/4] lib/intel_allocator_bst: Implement the allocator with a BST
2021-07-20 14:12 [igt-dev] [PATCH i-g-t v2 0/4] Implement the BST allocator Andrzej Turko
2021-07-20 14:12 ` [igt-dev] [PATCH i-g-t 1/4] lib/intel_allocator: Fix argument names in declarations Andrzej Turko
2021-07-20 14:12 ` [igt-dev] [PATCH i-g-t 2/4] lib/igt_bst: Add a BST interface and an AVL implementation Andrzej Turko
@ 2021-07-20 14:12 ` Andrzej Turko
2021-07-20 14:12 ` [igt-dev] [PATCH i-g-t 4/4] tests/i915_api_intel_allocator: Add the BST allocator Andrzej Turko
` (2 subsequent siblings)
5 siblings, 0 replies; 9+ messages in thread
From: Andrzej Turko @ 2021-07-20 14:12 UTC (permalink / raw)
To: igt-dev
This implements an alternative to the simple allocator.
The BST allocator follows best-fit strategy in order to
use the address space more effectively.
The use of a binary search tree speeds up finding free
blocks of memory (compared to a list).
Signed-off-by: Andrzej Turko <andrzej.turko@linux.intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
lib/intel_allocator.c | 7 +
lib/intel_allocator.h | 1 +
lib/intel_allocator_bst.c | 672 ++++++++++++++++++++++++++++++++++++++
lib/meson.build | 1 +
4 files changed, 681 insertions(+)
create mode 100644 lib/intel_allocator_bst.c
diff --git a/lib/intel_allocator.c b/lib/intel_allocator.c
index 133176ed4..2ef487a05 100644
--- a/lib/intel_allocator.c
+++ b/lib/intel_allocator.c
@@ -71,6 +71,9 @@ intel_allocator_random_create(int fd, uint64_t start, uint64_t end);
struct intel_allocator *
intel_allocator_simple_create(int fd, uint64_t start, uint64_t end,
enum allocator_strategy strategy);
+struct intel_allocator *
+intel_allocator_bst_create(int fd, uint64_t start, uint64_t end,
+ enum allocator_strategy strategy);
/*
* Instead of trying to find first empty handle just get new one. Assuming
@@ -311,6 +314,10 @@ static struct intel_allocator *intel_allocator_create(int fd,
ial = intel_allocator_simple_create(fd, start, end,
allocator_strategy);
break;
+ case INTEL_ALLOCATOR_BST:
+ ial = intel_allocator_bst_create(fd, start, end,
+ allocator_strategy);
+ break;
default:
igt_assert_f(ial, "Allocator type %d not implemented\n",
allocator_type);
diff --git a/lib/intel_allocator.h b/lib/intel_allocator.h
index 7d9d01123..93ecbc3e4 100644
--- a/lib/intel_allocator.h
+++ b/lib/intel_allocator.h
@@ -211,6 +211,7 @@ void intel_allocator_print(uint64_t allocator_handle);
#define INTEL_ALLOCATOR_RELOC 1
#define INTEL_ALLOCATOR_RANDOM 2
#define INTEL_ALLOCATOR_SIMPLE 3
+#define INTEL_ALLOCATOR_BST 4
#define GEN8_GTT_ADDRESS_WIDTH 48
diff --git a/lib/intel_allocator_bst.c b/lib/intel_allocator_bst.c
new file mode 100644
index 000000000..c9c97e65e
--- /dev/null
+++ b/lib/intel_allocator_bst.c
@@ -0,0 +1,672 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include "igt.h"
+#include "igt_map.h"
+#include "igt_bst.h"
+#include "intel_allocator.h"
+
+/* The addresses of allocated objects will be
+ * strictly smaller than this value (the upper
+ * end of the address range cannot exceed it).
+ * This is to ensure there are no overflows
+ * and prevent the existence of a hole or
+ * block of size 2^64.
+ *
+ * Thanks to this -1 can be returned from alloc
+ * to indicate that allocator has failed to find
+ * enough space for the object.
+ *
+ * IALB_MAX_ADDR can be set to any positive
+ * integer smaller than 2^64.
+ */
+#define IALB_MAX_ADDR (1ull<<63)
+
+#define RESERVED 4096
+
+/* Avoid compilation warning */
+struct intel_allocator *
+intel_allocator_bst_create(int fd, uint64_t start, uint64_t end,
+ enum allocator_strategy strategy);
+
+struct intel_allocator_bst {
+ struct igt_map *objects;
+ struct igt_map *reserved;
+ struct igt_bst *by_size;
+ struct igt_bst *by_offset;
+
+ uint64_t start;
+ uint64_t end;
+
+ /* statistics */
+ uint64_t total_size;
+ uint64_t allocated_size;
+ uint64_t allocated_objects;
+ uint64_t reserved_size;
+ uint64_t reserved_areas;
+};
+
+struct intel_allocator_bst_vma_hole {
+
+ /* pointer to the node in the by_size bst */
+ void *size_ptr;
+
+ /* pointer to the node in the by_offset bst */
+ void *offset_ptr;
+
+ uint64_t size;
+ uint64_t offset;
+};
+
+struct intel_allocator_record {
+ uint32_t handle;
+ uint64_t offset;
+ uint64_t size;
+};
+
+/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
+#define GOLDEN_RATIO_PRIME_32 0x9e370001UL
+
+/* 2^63 + 2^61 - 2^57 + 2^54 - 2^51 - 2^18 + 1 */
+#define GOLDEN_RATIO_PRIME_64 0x9e37fffffffc0001ULL
+
+static inline uint32_t hash_handles(const void *val)
+{
+ uint32_t hash = *(uint32_t *) val;
+
+ hash = hash * GOLDEN_RATIO_PRIME_32;
+ return hash;
+}
+
+static int equal_handles(const void *a, const void *b)
+{
+ uint32_t *key1 = (uint32_t *) a, *key2 = (uint32_t *) b;
+
+ return *key1 == *key2;
+}
+
+static inline uint32_t hash_offsets(const void *val)
+{
+ uint64_t hash = *(uint64_t *) val;
+
+ hash = hash * GOLDEN_RATIO_PRIME_64;
+ /* High bits are more random, so use them. */
+ return hash >> 32;
+}
+
+static int equal_offsets(const void *a, const void *b)
+{
+ uint64_t *key1 = (uint64_t *) a, *key2 = (uint64_t *) b;
+
+ return *key1 == *key2;
+}
+
+static void map_entry_free_func(struct igt_map_entry *entry)
+{
+ free(entry->data);
+}
+
+static void holes_bst_validate(struct intel_allocator_bst *ialb)
+{
+ struct igt_bst *offset_bst, *size_bst;
+ struct intel_allocator_bst_vma_hole *hole;
+ void *nodeptr;
+ int by_offset_size, by_size_size;
+ uint64_t prev_offset;
+
+ offset_bst = ialb->by_offset;
+ size_bst = ialb->by_size;
+ by_offset_size = offset_bst->validate(offset_bst);
+ by_size_size = size_bst->validate(size_bst);
+ igt_assert(by_offset_size == by_size_size);
+
+ prev_offset = IALB_MAX_ADDR;
+ igt_bst_for_each_node_rev(offset_bst, nodeptr) {
+ hole = igt_bst_get_value(offset_bst, nodeptr);
+ igt_assert(hole);
+ /* Make sure the hole is stored under correct keys in
+ * the balanced search trees.
+ */
+ igt_assert(hole->offset == igt_bst_get_key(offset_bst, hole->offset_ptr));
+ igt_assert(hole->size == igt_bst_get_key(size_bst, hole->size_ptr));
+
+ igt_assert(hole->offset + hole->size > hole->offset);
+ /* Make sure no pair of holes is adjacent. */
+ igt_assert(prev_offset > hole->offset + hole->size);
+
+ prev_offset = hole->offset;
+ }
+}
+
+static void __intel_allocator_bst_alloc(struct intel_allocator_bst *ialb,
+ struct intel_allocator_bst_vma_hole *hole,
+ uint64_t offset, uint64_t size)
+{
+ struct igt_bst *offset_bst, *size_bst;
+ struct intel_allocator_bst_vma_hole *newhole;
+ uint64_t lower, higher;
+
+ igt_assert(hole->offset <= offset);
+ igt_assert(hole->offset + hole->size >= offset + size);
+
+ offset_bst = ialb->by_offset;
+ size_bst = ialb->by_size;
+
+ lower = offset - hole->offset;
+ higher = hole->size - size - lower;
+
+ if (lower > 0 && higher > 0) {
+ /* There are leftovers on both lower and higher addresses.
+ * Create a hole for higher addresses from scratch and
+ * one for lower addresses by modifying the existing hole.
+ */
+
+ newhole = malloc(sizeof(struct intel_allocator_bst_vma_hole));
+ igt_assert(newhole);
+
+ newhole->offset = offset + size;
+ newhole->size = higher;
+
+ newhole->offset_ptr = igt_bst_insert(offset_bst, newhole, newhole->offset);
+ newhole->size_ptr = igt_bst_insert(size_bst, newhole, newhole->size);
+
+ hole->size = lower;
+ igt_bst_erase(size_bst, hole->size_ptr);
+ hole->size_ptr = igt_bst_insert(size_bst, hole, hole->size);
+
+ } else if (higher > 0) {
+ /* There is no free address space in this hole left
+ * below the allocated object. Only a portion of
+ * higher addresses is still free, so just adjust the
+ * existing hole accordingly (change offset and size).
+ */
+
+ hole->offset = offset + size;
+ hole->size = higher;
+
+ igt_bst_erase(offset_bst, hole->offset_ptr);
+ igt_bst_erase(size_bst, hole->size_ptr);
+
+ hole->offset_ptr = igt_bst_insert(offset_bst, hole, hole->offset);
+ hole->size_ptr = igt_bst_insert(size_bst, hole, hole->size);
+
+ } else if (lower > 0) {
+ /* The allocated block is aligned to the end of the
+ * hole and the lower addresses are still free,
+ * so shrink the existing hole (change size,
+ * but not the offset).
+ */
+
+ hole->size = lower;
+ igt_bst_erase(size_bst, hole->size_ptr);
+ hole->size_ptr = igt_bst_insert(size_bst, hole, hole->size);
+
+ } else {
+ /* There are no leftovers, just erase the hole. */
+
+ igt_bst_erase(offset_bst, hole->offset_ptr);
+ igt_bst_erase(size_bst, hole->size_ptr);
+ free(hole);
+ }
+}
+
+static void __intel_allocator_bst_add_hole(struct intel_allocator_bst *ialb,
+ uint64_t offset, uint64_t size)
+{
+ struct igt_bst *offset_bst, *size_bst;
+ struct intel_allocator_bst_vma_hole *hole, *nxt, *prv;
+
+ offset_bst = ialb->by_offset;
+ size_bst = ialb->by_size;
+
+ prv = igt_bst_query_predecessor(offset_bst, offset);
+ if (prv && prv->offset + prv->size < offset)
+ prv = NULL;
+
+ nxt = igt_bst_query_successor(offset_bst, offset);
+ if (nxt && nxt->offset > offset + size)
+ nxt = NULL;
+
+ if (nxt && prv) {
+ /* There are holes adjacent to the new one
+ * both below and above it in the address space.
+ * Merge them all into a single hole:
+ * erase the upper and increase size of the
+ * lower one.
+ */
+ prv->size += size + nxt->size;
+
+ igt_bst_erase(offset_bst, nxt->offset_ptr);
+ igt_bst_erase(size_bst, nxt->size_ptr);
+ free(nxt);
+
+ igt_bst_erase(size_bst, prv->size_ptr);
+ prv->size_ptr = igt_bst_insert(size_bst, prv, prv->size);
+
+ } else if (nxt) {
+ /* The only adjacent hole is above the new one,
+ * so increase its size and update its offset.
+ */
+ nxt->size += size;
+ nxt->offset = offset;
+
+ igt_bst_erase(offset_bst, nxt->offset_ptr);
+ igt_bst_erase(size_bst, nxt->size_ptr);
+
+ nxt->offset_ptr = igt_bst_insert(offset_bst, nxt, nxt->offset);
+ nxt->size_ptr = igt_bst_insert(size_bst, nxt, nxt->size);
+
+ } else if (prv) {
+ /* The only adjacent hole is below the new one,
+ * just increase its size.
+ */
+ prv->size += size;
+ igt_bst_erase(size_bst, prv->size_ptr);
+ prv->size_ptr = igt_bst_insert(size_bst, prv, prv->size);
+
+ } else {
+ /* There are no holes neighbouring the new one,
+ * so just create a new hole.
+ */
+ hole = malloc(sizeof(struct intel_allocator_bst_vma_hole));
+ igt_assert(hole);
+
+ hole->offset = offset;
+ hole->size = size;
+ hole->offset_ptr = igt_bst_insert(offset_bst, hole, offset);
+ hole->size_ptr = igt_bst_insert(size_bst, hole, size);
+
+ }
+}
+
+
+static void intel_allocator_bst_get_address_range(struct intel_allocator *ial,
+ uint64_t *startp, uint64_t *endp)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+
+ if (startp)
+ *startp = ialb->start;
+
+ if (endp)
+ *endp = ialb->end;
+}
+
+static uint64_t intel_allocator_bst_alloc(struct intel_allocator *ial, uint32_t handle,
+ uint64_t size, uint64_t alignment,
+ enum allocator_strategy strategy)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+ struct intel_allocator_bst_vma_hole *hole;
+ uint64_t misalignment, offset;
+
+ igt_assert(size > 0);
+ alignment = (alignment > 0 ? alignment : 1);
+
+ record = igt_map_search(ialb->objects, &handle);
+
+ if (record) {
+ /* This block has already been allocated,
+ * check that it satifies the requirements.
+ */
+
+ igt_assert(size == record->size);
+ igt_assert(record->offset % alignment == 0);
+
+ return record->offset;
+ }
+
+ /* Look for a slightly bigger hole to make sure
+ * the block can be aligned properly.
+ */
+ hole = igt_bst_query_successor(ialb->by_size,
+ size + alignment - 1);
+
+ /* Look for a smaller hole, maybe proper alignment
+ * will still be possible.
+ */
+ if (!hole)
+ hole = igt_bst_query_successor(ialb->by_size, size);
+
+ if (!hole) {
+ igt_warn("Failed to allocate: not enough memory\n");
+ return ALLOC_INVALID_ADDRESS;
+ }
+
+ if (strategy == ALLOC_STRATEGY_NONE)
+ strategy = ial->strategy;
+
+ switch (strategy) {
+ case ALLOC_STRATEGY_LOW_TO_HIGH:
+ misalignment = alignment - (hole->offset % alignment);
+ misalignment = misalignment == alignment ? 0 : misalignment;
+ offset = hole->offset + misalignment;
+
+ if (misalignment + size > hole->size) {
+ igt_warn("Failed to allocate: not enough memory\n");
+ return ALLOC_INVALID_ADDRESS;
+ }
+ break;
+ case ALLOC_STRATEGY_HIGH_TO_LOW:
+ offset = hole->offset + hole->size - size;
+ offset = offset - offset % alignment;
+
+ if (offset < hole->offset) {
+ igt_warn("Failed to allocate: not enough memory\n");
+ return ALLOC_INVALID_ADDRESS;
+ }
+ break;
+ default:
+ igt_assert_f(0, "Unsupported strategy.");
+ }
+
+ __intel_allocator_bst_alloc(ialb, hole, offset, size);
+
+ record = malloc(sizeof(struct intel_allocator_record));
+ igt_assert(record);
+ record->handle = handle;
+ record->offset = offset;
+ record->size = size;
+
+ igt_map_insert(ialb->objects, &record->handle, record);
+ ialb->allocated_objects++;
+ ialb->allocated_size += size;
+
+ return offset;
+}
+
+static bool intel_allocator_bst_is_allocated(struct intel_allocator *ial, uint32_t handle,
+ uint64_t size, uint64_t offset)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+ bool same;
+
+ record = igt_map_search(ialb->objects, &handle);
+ if (record) {
+ same = (record->handle == handle && record->size == size &&
+ record->offset == offset);
+ } else {
+ same = false;
+ }
+
+ return same;
+}
+
+static bool intel_allocator_bst_free(struct intel_allocator *ial, uint32_t handle)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+
+ record = igt_map_search(ialb->objects, &handle);
+
+ if (!record)
+ return false;
+
+ __intel_allocator_bst_add_hole(ialb, record->offset, record->size);
+ ialb->allocated_objects--;
+ ialb->allocated_size -= record->size;
+ igt_map_remove(ialb->objects, &handle, map_entry_free_func);
+
+ return true;
+}
+
+static bool intel_allocator_bst_reserve(struct intel_allocator *ial, uint32_t handle,
+ uint64_t start, uint64_t end)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_bst_vma_hole *hole;
+ struct intel_allocator_record *record;
+
+ igt_assert(start < end);
+ igt_assert(start >= ialb->start);
+ igt_assert(end <= ialb->end);
+
+ hole = igt_bst_query_predecessor(ialb->by_offset, start);
+ if (!hole)
+ return false;
+
+ igt_assert(hole->offset <= start);
+
+ if (hole->offset + hole->size < end)
+ return false;
+
+ __intel_allocator_bst_alloc(ialb, hole, start, end - start);
+
+ record = malloc(sizeof(struct intel_allocator_bst_vma_hole));
+ igt_assert(record);
+ record->offset = start;
+ record->size = end - start;
+ record->handle = handle;
+ igt_map_insert(ialb->reserved, &record->offset, record);
+ ialb->reserved_areas++;
+ ialb->reserved_size += end - start;
+
+ return true;
+}
+
+static bool intel_allocator_bst_unreserve(struct intel_allocator *ial, uint32_t handle,
+ uint64_t start, uint64_t end)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+
+ record = igt_map_search(ialb->reserved, &start);
+
+ if (!record) {
+ igt_warn("Only a reserved block can be unreserved.\n");
+ return false;
+ }
+
+ if (record->size != end - start) {
+ igt_warn("Size of the block does not match the passed value.\n");
+ return false;
+ }
+
+ if (record->offset != start) {
+ igt_warn("Offset of the block does not match the passed value.\n");
+ return false;
+ }
+
+ if (record->handle != handle) {
+ igt_warn("Handle %u does not match the passed handle %u.\n",
+ record->handle, handle);
+ return false;
+ }
+
+ __intel_allocator_bst_add_hole(ialb, start, end - start);
+ ialb->reserved_areas--;
+ ialb->reserved_size -= record->size;
+ igt_map_remove(ialb->reserved, &start, map_entry_free_func);
+
+ return true;
+}
+
+static bool intel_allocator_bst_is_reserved(struct intel_allocator *ial,
+ uint64_t start, uint64_t end)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+
+ igt_assert(start < end);
+ record = igt_map_search(ialb->reserved, &start);
+
+ if (!record)
+ return false;
+
+ if (record->offset == start && record->size == end - start)
+ return true;
+
+ return false;
+}
+
+static void intel_allocator_bst_destroy(struct intel_allocator *ial)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct igt_bst *size_bst, *offset_bst;
+ void *nodeptr;
+
+ size_bst = ialb->by_size;
+ offset_bst = ialb->by_offset;
+
+ igt_bst_for_each_node(offset_bst, nodeptr)
+ free(offset_bst->get_value(nodeptr));
+
+ igt_bst_free(size_bst);
+ igt_bst_free(offset_bst);
+ free(offset_bst);
+ free(size_bst);
+
+ igt_map_destroy(ialb->objects, map_entry_free_func);
+ igt_map_destroy(ialb->reserved, map_entry_free_func);
+
+ free(ialb);
+ free(ial);
+}
+
+static bool intel_allocator_bst_is_empty(struct intel_allocator *ial)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+
+ return ialb->allocated_objects == 0 && ialb->reserved_areas == 0;
+}
+
+static void intel_allocator_bst_print(struct intel_allocator *ial, bool full)
+{
+ struct intel_allocator_bst *ialb = ial->priv;
+ struct intel_allocator_record *record;
+ struct intel_allocator_bst_vma_hole *hole;
+ struct igt_bst *offset_bst, *size_bst;
+ struct igt_map_entry *entry;
+ void *nodeptr;
+ uint64_t total_free;
+
+ igt_info("intel_allocator_bst <ial: %p, fd: %d> on "
+ "[0x%"PRIX64" : 0x%"PRIx64"]\n", ial,
+ ial->fd, ialb->start, ialb->end);
+
+ total_free = 0;
+ if (full) {
+ offset_bst = ialb->by_offset;
+ size_bst = ialb->by_size;
+
+ igt_info("holes by offset:\n");
+ igt_bst_for_each_node(offset_bst, nodeptr) {
+ hole = igt_bst_get_value(offset_bst, nodeptr);
+ igt_info("offset = %"PRIu64" (0x%"PRIx64") "
+ "size = %"PRIu64" (0x%"PRIx64")\n",
+ hole->offset, hole->offset, hole->size,
+ hole->size);
+ total_free += hole->size;
+ }
+
+ igt_info("holes by size:\n");
+ igt_bst_for_each_node(size_bst, nodeptr) {
+ hole = igt_bst_get_value(size_bst, nodeptr);
+ igt_info("offset = %"PRIu64" (0x%"PRIx64") "
+ "size = %"PRIu64" (0x%"PRIx64")\n",
+ hole->offset, hole->offset, hole->size,
+ hole->size);
+
+ }
+
+ igt_info("allocated objects:\n");
+ igt_map_foreach(ialb->objects, entry) {
+ record = entry->data;
+ igt_info("handle = %d, offset = %"PRIu64" "
+ "(0x%"PRIx64", size = %"PRIu64" (0x%"PRIx64")\n",
+ record->handle, record->offset, record->offset,
+ record->size, record->size);
+
+ }
+
+ igt_info("reserved areas:\n");
+ igt_map_foreach(ialb->reserved, entry) {
+ record = entry->data;
+ igt_info("handle = %d, offset = %"PRIu64" "
+ "(0x%"PRIx64", size = %"PRIu64" (0x%"PRIx64")\n",
+ record->handle, record->offset, record->offset,
+ record->size, record->size);
+
+ }
+
+ } else {
+ offset_bst = ialb->by_offset;
+
+ igt_bst_for_each_node(offset_bst, nodeptr) {
+ igt_bst_get_value(offset_bst, nodeptr);
+ total_free += hole->size;
+ }
+ }
+
+ igt_info("free space: %"PRIu64"B (0x%"PRIx64") (%.2f%% full)\n"
+ "allocated objects: %"PRIu64", size: %"PRIu64
+ " (%"PRIx64")\n"
+ "reserved areas: %"PRIu64", size: %"PRIu64
+ " (%"PRIx64")\n", total_free, total_free,
+ ((double) (ialb->total_size - total_free) /
+ (double) ialb->total_size) * 100.0,
+ ialb->allocated_objects, ialb->allocated_size,
+ ialb->allocated_size, ialb->reserved_areas,
+ ialb->reserved_areas, ialb->reserved_size);
+
+ holes_bst_validate(ialb);
+}
+
+struct intel_allocator *
+intel_allocator_bst_create(int fd, uint64_t start, uint64_t end,
+ enum allocator_strategy strategy)
+{
+ struct intel_allocator *ial;
+ struct intel_allocator_bst *ialb;
+
+ igt_debug("Using BST allocator\n");
+
+ ialb = malloc(sizeof(struct intel_allocator_bst));
+ igt_assert(ialb);
+ ial = malloc(sizeof(struct intel_allocator));
+ igt_assert(ial);
+
+ /* Allocated object are stored by 4-byte-long handles. */
+ ialb->objects = igt_map_create(hash_handles, equal_handles);
+ /* Reserved object are stored by 8-byte-long offsets. */
+ ialb->reserved = igt_map_create(hash_offsets, equal_offsets);
+ igt_assert(ialb->objects && ialb->reserved);
+
+ ialb->by_offset = igt_bst_avl_create();
+ ialb->by_size = igt_bst_avl_create();
+
+ igt_assert(start < end);
+ igt_assert(end < IALB_MAX_ADDR);
+ ialb->start = start;
+ ialb->end = end;
+
+ ialb->total_size = ialb->end - ialb->start;
+ ialb->allocated_size = 0;
+ ialb->allocated_objects = 0;
+ ialb->reserved_size = 0;
+ ialb->reserved_areas = 0;
+ __intel_allocator_bst_add_hole(ialb, ialb->start, ialb->total_size);
+
+ ial->fd = fd;
+ ial->type = INTEL_ALLOCATOR_BST;
+ ial->strategy = strategy;
+ ial->refcount = 0;
+ ial->priv = ialb;
+ ial->get_address_range = intel_allocator_bst_get_address_range;
+ ial->alloc = intel_allocator_bst_alloc;
+ ial->is_allocated = intel_allocator_bst_is_allocated;
+ ial->free = intel_allocator_bst_free;
+ ial->reserve = intel_allocator_bst_reserve;
+ ial->unreserve = intel_allocator_bst_unreserve;
+ ial->is_reserved = intel_allocator_bst_is_reserved;
+ ial->destroy = intel_allocator_bst_destroy;
+ ial->is_empty = intel_allocator_bst_is_empty;
+ ial->print = intel_allocator_bst_print;
+
+ return ial;
+}
diff --git a/lib/meson.build b/lib/meson.build
index a1aa0ee12..62c8dbf20 100644
--- a/lib/meson.build
+++ b/lib/meson.build
@@ -38,6 +38,7 @@ lib_sources = [
'igt_x86.c',
'instdone.c',
'intel_allocator.c',
+ 'intel_allocator_bst.c',
'intel_allocator_msgchannel.c',
'intel_allocator_random.c',
'intel_allocator_reloc.c',
--
2.25.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [igt-dev] [PATCH i-g-t 4/4] tests/i915_api_intel_allocator: Add the BST allocator
2021-07-20 14:12 [igt-dev] [PATCH i-g-t v2 0/4] Implement the BST allocator Andrzej Turko
` (2 preceding siblings ...)
2021-07-20 14:12 ` [igt-dev] [PATCH i-g-t 3/4] lib/intel_allocator_bst: Implement the allocator with a BST Andrzej Turko
@ 2021-07-20 14:12 ` Andrzej Turko
2021-07-20 15:21 ` [igt-dev] ✓ Fi.CI.BAT: success for Implement the BST allocator (rev2) Patchwork
2021-07-20 16:31 ` [igt-dev] ✗ Fi.CI.IGT: failure " Patchwork
5 siblings, 0 replies; 9+ messages in thread
From: Andrzej Turko @ 2021-07-20 14:12 UTC (permalink / raw)
To: igt-dev
Include the BST allocator in the allocator test.
Signed-off-by: Andrzej Turko <andrzej.turko@linux.intel.com>
Cc: Zbigniew Kempczyński <zbigniew.kempczynski@intel.com>
---
tests/i915/api_intel_allocator.c | 55 ++++++++++++++++++++------------
1 file changed, 35 insertions(+), 20 deletions(-)
diff --git a/tests/i915/api_intel_allocator.c b/tests/i915/api_intel_allocator.c
index 4b74317ed..45e62cd59 100644
--- a/tests/i915/api_intel_allocator.c
+++ b/tests/i915/api_intel_allocator.c
@@ -25,13 +25,13 @@ static inline uint32_t gem_handle_gen(void)
return atomic_fetch_add(&next_handle, 1);
}
-static void alloc_simple(int fd)
+static void alloc_sanity_check(int fd, uint8_t type)
{
uint64_t ahnd;
uint64_t offset0, offset1, size = 0x1000, align = 0x1000, start, end;
bool is_allocated, freed;
- ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+ ahnd = intel_allocator_open(fd, 0, type);
offset0 = intel_allocator_alloc(ahnd, 1, size, align);
offset1 = intel_allocator_alloc(ahnd, 1, size, align);
@@ -67,12 +67,12 @@ static void alloc_simple(int fd)
igt_assert_eq(intel_allocator_close(ahnd), true);
}
-static void reserve_simple(int fd)
+static void reserve_sanity_check(int fd, uint8_t type)
{
uint64_t ahnd, start, size = 0x1000;
bool reserved, unreserved;
- ahnd = intel_allocator_open(fd, 0, INTEL_ALLOCATOR_SIMPLE);
+ ahnd = intel_allocator_open(fd, 0, type);
intel_allocator_get_address_range(ahnd, &start, NULL);
reserved = intel_allocator_reserve(ahnd, 0, size, start);
@@ -194,17 +194,19 @@ static void reuse(int fd, uint8_t type)
}
i--;
- /* free previously allocated bo */
- intel_allocator_free(ahnd, obj[i].handle);
- /* alloc different buffer to fill freed hole */
- tmp.handle = gem_handle_gen();
- tmp.offset = intel_allocator_alloc(ahnd, tmp.handle, OBJ_SIZE, 0);
- igt_assert(prev_offset == tmp.offset);
+ if (type == INTEL_ALLOCATOR_SIMPLE) {
+ /* free previously allocated bo */
+ intel_allocator_free(ahnd, obj[i].handle);
+ /* alloc different buffer to fill freed hole */
+ tmp.handle = gem_handle_gen();
+ tmp.offset = intel_allocator_alloc(ahnd, tmp.handle, OBJ_SIZE, 0);
+ igt_assert(prev_offset == tmp.offset);
- obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
- obj[i].size, 0);
- igt_assert(prev_offset != obj[i].offset);
- intel_allocator_free(ahnd, tmp.handle);
+ obj[i].offset = intel_allocator_alloc(ahnd, obj[i].handle,
+ obj[i].size, 0);
+ igt_assert(prev_offset != obj[i].offset);
+ intel_allocator_free(ahnd, tmp.handle);
+ }
for (i = 0; i < 128; i++)
intel_allocator_free(ahnd, obj[i].handle);
@@ -665,6 +667,7 @@ struct allocators {
{"simple", INTEL_ALLOCATOR_SIMPLE},
{"reloc", INTEL_ALLOCATOR_RELOC},
{"random", INTEL_ALLOCATOR_RANDOM},
+ {"bst", INTEL_ALLOCATOR_BST},
{NULL, 0},
};
@@ -679,18 +682,30 @@ igt_main
srandom(0xdeadbeef);
}
- igt_subtest_f("alloc-simple")
- alloc_simple(fd);
+ igt_subtest_f("alloc-sanity-check-simple")
+ alloc_sanity_check(fd, INTEL_ALLOCATOR_SIMPLE);
- igt_subtest_f("reserve-simple")
- reserve_simple(fd);
+ igt_subtest_f("alloc-sanity-check-bst")
+ alloc_sanity_check(fd, INTEL_ALLOCATOR_BST);
+
+ igt_subtest_f("reserve-sanity-check-simple")
+ reserve_sanity_check(fd, INTEL_ALLOCATOR_SIMPLE);
+
+ igt_subtest_f("reserve-sanity-check-bst")
+ reserve_sanity_check(fd, INTEL_ALLOCATOR_BST);
- igt_subtest_f("reuse")
+ igt_subtest_f("reuse-simple")
reuse(fd, INTEL_ALLOCATOR_SIMPLE);
- igt_subtest_f("reserve")
+ igt_subtest_f("reuse-bst")
+ reuse(fd, INTEL_ALLOCATOR_BST);
+
+ igt_subtest_f("reserve-simple")
reserve(fd, INTEL_ALLOCATOR_SIMPLE);
+ igt_subtest_f("reserve-bst")
+ reserve(fd, INTEL_ALLOCATOR_BST);
+
for (a = als; a->name; a++) {
igt_subtest_with_dynamic_f("%s-allocator", a->name) {
igt_dynamic("basic")
--
2.25.1
_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev
^ permalink raw reply related [flat|nested] 9+ messages in thread