All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
@ 2023-08-21 10:14 ` Arunpravin Paneer Selvam
  0 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-21 10:14 UTC (permalink / raw)
  To: dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Arunpravin Paneer Selvam, christian.koenig,
	matthew.auld

The way now contiguous requests are implemented such that
the size rounded up to power of 2 and the corresponding order
block picked from the freelist.

In addition to the older method, the new method will rounddown
the size to power of 2 and the corresponding order block picked
from the freelist. And for the remaining size we traverse the
tree and try to allocate either from the freelist block's buddy
or from the peer block. If the remaining size from peer/buddy
block is not free, we pick the next freelist block and repeat
the same method.

Moved contiguous/alignment size computation part and trim
function to the drm buddy manager.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
---
 drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
 include/drm/drm_buddy.h     |   6 +-
 2 files changed, 248 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 7098f125b54a..220f60c08a03 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct drm_buddy *mm,
 	return __alloc_range(mm, &dfs, start, size, blocks);
 }
 
+static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
+					       u64 size,
+					       u64 min_block_size,
+					       struct drm_buddy_block *block,
+					       struct list_head *blocks)
+{
+	struct drm_buddy_block *buddy, *parent = NULL;
+	u64 start, offset = 0;
+	LIST_HEAD(dfs);
+	int err;
+
+	if (!block)
+		return -EINVAL;
+
+	buddy = __get_buddy(block);
+	if (!buddy)
+		return -ENOSPC;
+
+	if (drm_buddy_block_is_allocated(buddy))
+		return -ENOSPC;
+
+	parent = block->parent;
+	if (!parent)
+		return -ENOSPC;
+
+	if (block->parent->right == block) {
+		u64 remaining;
+
+		/* Compute the leftover size for allocation */
+		remaining = max((size - drm_buddy_block_size(mm, buddy)),
+				min_block_size);
+		if (!IS_ALIGNED(remaining, min_block_size))
+			remaining = round_up(remaining, min_block_size);
+
+		/* Check if remaining size is greater than buddy block size */
+		if (drm_buddy_block_size(mm, buddy) < remaining)
+			return -ENOSPC;
+
+		offset = drm_buddy_block_size(mm, buddy) - remaining;
+	}
+
+	list_add(&parent->tmp_link, &dfs);
+	start = drm_buddy_block_offset(parent) + offset;
+
+	err = __alloc_range(mm, &dfs, start, size, blocks);
+	if (err)
+		return -ENOSPC;
+
+	return 0;
+}
+
+static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
+					      u64 size,
+					      u64 min_block_size,
+					      struct drm_buddy_block *block,
+					      struct list_head *blocks)
+{
+	struct drm_buddy_block *first, *peer, *tmp;
+	struct drm_buddy_block *parent = NULL;
+	u64 start, offset = 0;
+	unsigned int order;
+	LIST_HEAD(dfs);
+	int err;
+
+	if (!block)
+		return -EINVAL;
+
+	order = drm_buddy_block_order(block);
+	/* Add freelist block to dfs list */
+	list_add(&block->tmp_link, &dfs);
+
+	tmp = block;
+	parent = block->parent;
+	while (parent) {
+		if (block->parent->left == block) {
+			if (parent->left != tmp) {
+				peer = parent->left;
+				break;
+			}
+		} else {
+			if (parent->right != tmp) {
+				peer = parent->right;
+				break;
+			}
+		}
+
+		tmp = parent;
+		parent = tmp->parent;
+	}
+
+	if (!parent)
+		return -ENOSPC;
+
+	do {
+		if (drm_buddy_block_is_allocated(peer))
+			return -ENOSPC;
+		/* Exit loop if peer block order is equal to block order */
+		if (drm_buddy_block_order(peer) == order)
+			break;
+
+		if (drm_buddy_block_is_split(peer)) {
+			/* Traverse down to the block order level */
+			if (block->parent->left == block)
+				peer = peer->right;
+			else
+				peer = peer->left;
+		} else {
+			break;
+		}
+	} while (1);
+
+	if (block->parent->left == block) {
+		u64 remaining;
+
+		/* Compute the leftover size for allocation */
+		remaining = max((size - drm_buddy_block_size(mm, block)),
+				min_block_size);
+		if (!IS_ALIGNED(remaining, min_block_size))
+			remaining = round_up(remaining, min_block_size);
+
+		/* Check if remaining size is greater than peer block size */
+		if (drm_buddy_block_size(mm, peer) < remaining)
+			return -ENOSPC;
+
+		offset = drm_buddy_block_size(mm, peer) - remaining;
+		/* Add left peer block to dfs list */
+		list_add(&peer->tmp_link, &dfs);
+	} else {
+		/* Add right peer block to dfs list */
+		list_add_tail(&peer->tmp_link, &dfs);
+	}
+
+	first = list_first_entry_or_null(&dfs,
+					 struct drm_buddy_block,
+					 tmp_link);
+	if (!first)
+		return -EINVAL;
+
+	start = drm_buddy_block_offset(first) + offset;
+	err = __alloc_range(mm, &dfs, start, size, blocks);
+	if (err)
+		return -ENOSPC;
+
+	return 0;
+}
+
+static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
+					       u64 size,
+					       u64 min_block_size,
+					       struct list_head *blocks)
+{
+	struct drm_buddy_block *block;
+	struct list_head *list;
+	unsigned long pages;
+	unsigned int order;
+	u64 modify_size;
+	int err;
+
+	modify_size = rounddown_pow_of_two(size);
+	pages = modify_size >> ilog2(mm->chunk_size);
+	order = fls(pages) - 1;
+	if (order == 0)
+		return -ENOSPC;
+
+	list = &mm->free_list[order];
+	if (list_empty(list))
+		return -ENOSPC;
+
+	list_for_each_entry_reverse(block, list, link) {
+		/* Allocate contiguous blocks from the buddy */
+		err = __alloc_contiguous_block_from_buddy(mm,
+							  size,
+							  min_block_size,
+							  block,
+							  blocks);
+		if (!err)
+			return 0;
+
+		/* Allocate contiguous blocks from tree traversal method */
+		err = __alloc_contiguous_block_from_peer(mm,
+							 size,
+							 min_block_size,
+							 block,
+							 blocks);
+		if (!err)
+			return 0;
+	}
+
+	return -ENOSPC;
+}
+
 /**
  * drm_buddy_block_trim - free unused pages
  *
@@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
  * @start: start of the allowed range for this block
  * @end: end of the allowed range for this block
  * @size: size of the allocation
- * @min_page_size: alignment of the allocation
+ * @min_block_size: alignment of the allocation
  * @blocks: output list head to add allocated blocks
  * @flags: DRM_BUDDY_*_ALLOCATION flags
  *
@@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
  */
 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 			   u64 start, u64 end, u64 size,
-			   u64 min_page_size,
+			   u64 min_block_size,
 			   struct list_head *blocks,
 			   unsigned long flags)
 {
 	struct drm_buddy_block *block = NULL;
+	u64 original_size, original_min_size;
 	unsigned int min_order, order;
-	unsigned long pages;
 	LIST_HEAD(allocated);
+	unsigned long pages;
 	int err;
 
 	if (size < mm->chunk_size)
 		return -EINVAL;
 
-	if (min_page_size < mm->chunk_size)
+	if (min_block_size < mm->chunk_size)
 		return -EINVAL;
 
-	if (!is_power_of_2(min_page_size))
+	if (!is_power_of_2(min_block_size))
 		return -EINVAL;
 
 	if (!IS_ALIGNED(start | end | size, mm->chunk_size))
@@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 	if (start + size == end)
 		return __drm_buddy_alloc_range(mm, start, size, blocks);
 
-	if (!IS_ALIGNED(size, min_page_size))
-		return -EINVAL;
+	original_size = size;
+	original_min_size = min_block_size;
+
+	/* Roundup the size to power of 2 */
+	if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
+		size = roundup_pow_of_two(size);
+		min_block_size = size;
+	/* Align size value to min_block_size */
+	} else if (!IS_ALIGNED(size, min_block_size)) {
+		size = round_up(size, min_block_size);
+	}
 
 	pages = size >> ilog2(mm->chunk_size);
 	order = fls(pages) - 1;
-	min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
+	min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
 
 	do {
 		order = min(order, (unsigned int)fls(pages) - 1);
@@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 				break;
 
 			if (order-- == min_order) {
+				if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
+				    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
+					/*
+					 * Try contiguous block allocation through
+					 * tree traversal method
+					 */
+					return __drm_buddy_alloc_contiguous_blocks(mm,
+										   original_size,
+										   original_min_size,
+										   blocks);
+
 				err = -ENOSPC;
 				goto err_free;
 			}
@@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 			break;
 	} while (1);
 
+	/* Trim the allocated block to the required size */
+	if (original_size != size) {
+		struct list_head *trim_list;
+		LIST_HEAD(temp);
+		u64 trim_size;
+
+		trim_list = &allocated;
+		trim_size = original_size;
+
+		if (!list_is_singular(&allocated)) {
+			block = list_last_entry(&allocated, typeof(*block), link);
+			list_move(&block->link, &temp);
+			trim_list = &temp;
+			trim_size = drm_buddy_block_size(mm, block) -
+				(size - original_size);
+		}
+
+		drm_buddy_block_trim(mm,
+				     trim_size,
+				     trim_list);
+
+		if (!list_empty(&temp))
+			list_splice_tail(trim_list, &allocated);
+	}
+
 	list_splice_tail(&allocated, blocks);
 	return 0;
 
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 572077ff8ae7..a5b39fc01003 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -22,8 +22,9 @@
 	start__ >= max__ || size__ > max__ - start__; \
 })
 
-#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
-#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
+#define DRM_BUDDY_RANGE_ALLOCATION		BIT(0)
+#define DRM_BUDDY_TOPDOWN_ALLOCATION		BIT(1)
+#define DRM_BUDDY_CONTIGUOUS_ALLOCATION		BIT(2)
 
 struct drm_buddy_block {
 #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
@@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
 void drm_buddy_block_print(struct drm_buddy *mm,
 			   struct drm_buddy_block *block,
 			   struct drm_printer *p);
-
 #endif
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Intel-gfx] [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
@ 2023-08-21 10:14 ` Arunpravin Paneer Selvam
  0 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-21 10:14 UTC (permalink / raw)
  To: dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Arunpravin Paneer Selvam, christian.koenig,
	matthew.auld

The way now contiguous requests are implemented such that
the size rounded up to power of 2 and the corresponding order
block picked from the freelist.

In addition to the older method, the new method will rounddown
the size to power of 2 and the corresponding order block picked
from the freelist. And for the remaining size we traverse the
tree and try to allocate either from the freelist block's buddy
or from the peer block. If the remaining size from peer/buddy
block is not free, we pick the next freelist block and repeat
the same method.

Moved contiguous/alignment size computation part and trim
function to the drm buddy manager.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
---
 drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
 include/drm/drm_buddy.h     |   6 +-
 2 files changed, 248 insertions(+), 11 deletions(-)

diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 7098f125b54a..220f60c08a03 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct drm_buddy *mm,
 	return __alloc_range(mm, &dfs, start, size, blocks);
 }
 
+static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
+					       u64 size,
+					       u64 min_block_size,
+					       struct drm_buddy_block *block,
+					       struct list_head *blocks)
+{
+	struct drm_buddy_block *buddy, *parent = NULL;
+	u64 start, offset = 0;
+	LIST_HEAD(dfs);
+	int err;
+
+	if (!block)
+		return -EINVAL;
+
+	buddy = __get_buddy(block);
+	if (!buddy)
+		return -ENOSPC;
+
+	if (drm_buddy_block_is_allocated(buddy))
+		return -ENOSPC;
+
+	parent = block->parent;
+	if (!parent)
+		return -ENOSPC;
+
+	if (block->parent->right == block) {
+		u64 remaining;
+
+		/* Compute the leftover size for allocation */
+		remaining = max((size - drm_buddy_block_size(mm, buddy)),
+				min_block_size);
+		if (!IS_ALIGNED(remaining, min_block_size))
+			remaining = round_up(remaining, min_block_size);
+
+		/* Check if remaining size is greater than buddy block size */
+		if (drm_buddy_block_size(mm, buddy) < remaining)
+			return -ENOSPC;
+
+		offset = drm_buddy_block_size(mm, buddy) - remaining;
+	}
+
+	list_add(&parent->tmp_link, &dfs);
+	start = drm_buddy_block_offset(parent) + offset;
+
+	err = __alloc_range(mm, &dfs, start, size, blocks);
+	if (err)
+		return -ENOSPC;
+
+	return 0;
+}
+
+static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
+					      u64 size,
+					      u64 min_block_size,
+					      struct drm_buddy_block *block,
+					      struct list_head *blocks)
+{
+	struct drm_buddy_block *first, *peer, *tmp;
+	struct drm_buddy_block *parent = NULL;
+	u64 start, offset = 0;
+	unsigned int order;
+	LIST_HEAD(dfs);
+	int err;
+
+	if (!block)
+		return -EINVAL;
+
+	order = drm_buddy_block_order(block);
+	/* Add freelist block to dfs list */
+	list_add(&block->tmp_link, &dfs);
+
+	tmp = block;
+	parent = block->parent;
+	while (parent) {
+		if (block->parent->left == block) {
+			if (parent->left != tmp) {
+				peer = parent->left;
+				break;
+			}
+		} else {
+			if (parent->right != tmp) {
+				peer = parent->right;
+				break;
+			}
+		}
+
+		tmp = parent;
+		parent = tmp->parent;
+	}
+
+	if (!parent)
+		return -ENOSPC;
+
+	do {
+		if (drm_buddy_block_is_allocated(peer))
+			return -ENOSPC;
+		/* Exit loop if peer block order is equal to block order */
+		if (drm_buddy_block_order(peer) == order)
+			break;
+
+		if (drm_buddy_block_is_split(peer)) {
+			/* Traverse down to the block order level */
+			if (block->parent->left == block)
+				peer = peer->right;
+			else
+				peer = peer->left;
+		} else {
+			break;
+		}
+	} while (1);
+
+	if (block->parent->left == block) {
+		u64 remaining;
+
+		/* Compute the leftover size for allocation */
+		remaining = max((size - drm_buddy_block_size(mm, block)),
+				min_block_size);
+		if (!IS_ALIGNED(remaining, min_block_size))
+			remaining = round_up(remaining, min_block_size);
+
+		/* Check if remaining size is greater than peer block size */
+		if (drm_buddy_block_size(mm, peer) < remaining)
+			return -ENOSPC;
+
+		offset = drm_buddy_block_size(mm, peer) - remaining;
+		/* Add left peer block to dfs list */
+		list_add(&peer->tmp_link, &dfs);
+	} else {
+		/* Add right peer block to dfs list */
+		list_add_tail(&peer->tmp_link, &dfs);
+	}
+
+	first = list_first_entry_or_null(&dfs,
+					 struct drm_buddy_block,
+					 tmp_link);
+	if (!first)
+		return -EINVAL;
+
+	start = drm_buddy_block_offset(first) + offset;
+	err = __alloc_range(mm, &dfs, start, size, blocks);
+	if (err)
+		return -ENOSPC;
+
+	return 0;
+}
+
+static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
+					       u64 size,
+					       u64 min_block_size,
+					       struct list_head *blocks)
+{
+	struct drm_buddy_block *block;
+	struct list_head *list;
+	unsigned long pages;
+	unsigned int order;
+	u64 modify_size;
+	int err;
+
+	modify_size = rounddown_pow_of_two(size);
+	pages = modify_size >> ilog2(mm->chunk_size);
+	order = fls(pages) - 1;
+	if (order == 0)
+		return -ENOSPC;
+
+	list = &mm->free_list[order];
+	if (list_empty(list))
+		return -ENOSPC;
+
+	list_for_each_entry_reverse(block, list, link) {
+		/* Allocate contiguous blocks from the buddy */
+		err = __alloc_contiguous_block_from_buddy(mm,
+							  size,
+							  min_block_size,
+							  block,
+							  blocks);
+		if (!err)
+			return 0;
+
+		/* Allocate contiguous blocks from tree traversal method */
+		err = __alloc_contiguous_block_from_peer(mm,
+							 size,
+							 min_block_size,
+							 block,
+							 blocks);
+		if (!err)
+			return 0;
+	}
+
+	return -ENOSPC;
+}
+
 /**
  * drm_buddy_block_trim - free unused pages
  *
@@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
  * @start: start of the allowed range for this block
  * @end: end of the allowed range for this block
  * @size: size of the allocation
- * @min_page_size: alignment of the allocation
+ * @min_block_size: alignment of the allocation
  * @blocks: output list head to add allocated blocks
  * @flags: DRM_BUDDY_*_ALLOCATION flags
  *
@@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
  */
 int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 			   u64 start, u64 end, u64 size,
-			   u64 min_page_size,
+			   u64 min_block_size,
 			   struct list_head *blocks,
 			   unsigned long flags)
 {
 	struct drm_buddy_block *block = NULL;
+	u64 original_size, original_min_size;
 	unsigned int min_order, order;
-	unsigned long pages;
 	LIST_HEAD(allocated);
+	unsigned long pages;
 	int err;
 
 	if (size < mm->chunk_size)
 		return -EINVAL;
 
-	if (min_page_size < mm->chunk_size)
+	if (min_block_size < mm->chunk_size)
 		return -EINVAL;
 
-	if (!is_power_of_2(min_page_size))
+	if (!is_power_of_2(min_block_size))
 		return -EINVAL;
 
 	if (!IS_ALIGNED(start | end | size, mm->chunk_size))
@@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 	if (start + size == end)
 		return __drm_buddy_alloc_range(mm, start, size, blocks);
 
-	if (!IS_ALIGNED(size, min_page_size))
-		return -EINVAL;
+	original_size = size;
+	original_min_size = min_block_size;
+
+	/* Roundup the size to power of 2 */
+	if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
+		size = roundup_pow_of_two(size);
+		min_block_size = size;
+	/* Align size value to min_block_size */
+	} else if (!IS_ALIGNED(size, min_block_size)) {
+		size = round_up(size, min_block_size);
+	}
 
 	pages = size >> ilog2(mm->chunk_size);
 	order = fls(pages) - 1;
-	min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
+	min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
 
 	do {
 		order = min(order, (unsigned int)fls(pages) - 1);
@@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 				break;
 
 			if (order-- == min_order) {
+				if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
+				    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
+					/*
+					 * Try contiguous block allocation through
+					 * tree traversal method
+					 */
+					return __drm_buddy_alloc_contiguous_blocks(mm,
+										   original_size,
+										   original_min_size,
+										   blocks);
+
 				err = -ENOSPC;
 				goto err_free;
 			}
@@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
 			break;
 	} while (1);
 
+	/* Trim the allocated block to the required size */
+	if (original_size != size) {
+		struct list_head *trim_list;
+		LIST_HEAD(temp);
+		u64 trim_size;
+
+		trim_list = &allocated;
+		trim_size = original_size;
+
+		if (!list_is_singular(&allocated)) {
+			block = list_last_entry(&allocated, typeof(*block), link);
+			list_move(&block->link, &temp);
+			trim_list = &temp;
+			trim_size = drm_buddy_block_size(mm, block) -
+				(size - original_size);
+		}
+
+		drm_buddy_block_trim(mm,
+				     trim_size,
+				     trim_list);
+
+		if (!list_empty(&temp))
+			list_splice_tail(trim_list, &allocated);
+	}
+
 	list_splice_tail(&allocated, blocks);
 	return 0;
 
diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
index 572077ff8ae7..a5b39fc01003 100644
--- a/include/drm/drm_buddy.h
+++ b/include/drm/drm_buddy.h
@@ -22,8 +22,9 @@
 	start__ >= max__ || size__ > max__ - start__; \
 })
 
-#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
-#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
+#define DRM_BUDDY_RANGE_ALLOCATION		BIT(0)
+#define DRM_BUDDY_TOPDOWN_ALLOCATION		BIT(1)
+#define DRM_BUDDY_CONTIGUOUS_ALLOCATION		BIT(2)
 
 struct drm_buddy_block {
 #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
@@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
 void drm_buddy_block_print(struct drm_buddy *mm,
 			   struct drm_buddy_block *block,
 			   struct drm_printer *p);
-
 #endif
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 2/3] drm/amdgpu: Remove the contiguous computation and trim
  2023-08-21 10:14 ` [Intel-gfx] " Arunpravin Paneer Selvam
@ 2023-08-21 10:14   ` Arunpravin Paneer Selvam
  -1 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-21 10:14 UTC (permalink / raw)
  To: dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Arunpravin Paneer Selvam, christian.koenig,
	matthew.auld

As we have implemented a new method for contiguous allocation
which requires actual size and actual min_block_size, hence we
have moved the roundup and alignment size computation to buddy
allocator. This way gpu drivers pass the required size and
alignment to buddy allocator and rest of the operations taken
care by drm_buddy_alloc_blocks() function.

We have moved the trim function call to the drm_buddy_alloc_blocks()
function as we dont have the roundup or aligned size in our driver.

Now we have all in one place and it will be easy to manage in
buddy allocator.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 58 ++------------------
 1 file changed, 4 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index c7085a747b03..18f58efc9dc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -424,9 +424,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 			       const struct ttm_place *place,
 			       struct ttm_resource **res)
 {
-	u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
+	u64 vis_usage = 0, max_bytes, min_block_size;
 	struct amdgpu_vram_mgr_resource *vres;
 	u64 size, remaining_size, lpfn, fpfn;
 	struct drm_buddy *mm = &mgr->mm;
@@ -474,6 +474,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
 		vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
 
+	if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+		vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+
 	if (fpfn || lpfn != mgr->mm.size)
 		/* Allocate blocks in desired range */
 		vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
@@ -496,25 +499,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 				!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
 			min_block_size = (u64)pages_per_block << PAGE_SHIFT;
 
-		cur_size = size;
-
-		if (fpfn + size != (u64)place->lpfn << PAGE_SHIFT) {
-			/*
-			 * Except for actual range allocation, modify the size and
-			 * min_block_size conforming to continuous flag enablement
-			 */
-			if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-				size = roundup_pow_of_two(size);
-				min_block_size = size;
-			/*
-			 * Modify the size value if size is not
-			 * aligned with min_block_size
-			 */
-			} else if (!IS_ALIGNED(size, min_block_size)) {
-				size = round_up(size, min_block_size);
-			}
-		}
-
 		r = drm_buddy_alloc_blocks(mm, fpfn,
 					   lpfn,
 					   size,
@@ -531,40 +515,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 	}
 	mutex_unlock(&mgr->lock);
 
-	if (cur_size != size) {
-		struct drm_buddy_block *block;
-		struct list_head *trim_list;
-		u64 original_size;
-		LIST_HEAD(temp);
-
-		trim_list = &vres->blocks;
-		original_size = (u64)vres->base.size;
-
-		/*
-		 * If size value is rounded up to min_block_size, trim the last
-		 * block to the required size
-		 */
-		if (!list_is_singular(&vres->blocks)) {
-			block = list_last_entry(&vres->blocks, typeof(*block), link);
-			list_move_tail(&block->link, &temp);
-			trim_list = &temp;
-			/*
-			 * Compute the original_size value by subtracting the
-			 * last block size with (aligned size - original size)
-			 */
-			original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
-		}
-
-		mutex_lock(&mgr->lock);
-		drm_buddy_block_trim(mm,
-				     original_size,
-				     trim_list);
-		mutex_unlock(&mgr->lock);
-
-		if (!list_empty(&temp))
-			list_splice_tail(trim_list, &vres->blocks);
-	}
-
 	vres->base.start = 0;
 	list_for_each_entry(block, &vres->blocks, link) {
 		unsigned long start;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Intel-gfx] [PATCH 2/3] drm/amdgpu: Remove the contiguous computation and trim
@ 2023-08-21 10:14   ` Arunpravin Paneer Selvam
  0 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-21 10:14 UTC (permalink / raw)
  To: dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Arunpravin Paneer Selvam, christian.koenig,
	matthew.auld

As we have implemented a new method for contiguous allocation
which requires actual size and actual min_block_size, hence we
have moved the roundup and alignment size computation to buddy
allocator. This way gpu drivers pass the required size and
alignment to buddy allocator and rest of the operations taken
care by drm_buddy_alloc_blocks() function.

We have moved the trim function call to the drm_buddy_alloc_blocks()
function as we dont have the roundup or aligned size in our driver.

Now we have all in one place and it will be easy to manage in
buddy allocator.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
---
 drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 58 ++------------------
 1 file changed, 4 insertions(+), 54 deletions(-)

diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index c7085a747b03..18f58efc9dc7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -424,9 +424,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 			       const struct ttm_place *place,
 			       struct ttm_resource **res)
 {
-	u64 vis_usage = 0, max_bytes, cur_size, min_block_size;
 	struct amdgpu_vram_mgr *mgr = to_vram_mgr(man);
 	struct amdgpu_device *adev = to_amdgpu_device(mgr);
+	u64 vis_usage = 0, max_bytes, min_block_size;
 	struct amdgpu_vram_mgr_resource *vres;
 	u64 size, remaining_size, lpfn, fpfn;
 	struct drm_buddy *mm = &mgr->mm;
@@ -474,6 +474,9 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
 		vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
 
+	if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+		vres->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+
 	if (fpfn || lpfn != mgr->mm.size)
 		/* Allocate blocks in desired range */
 		vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
@@ -496,25 +499,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 				!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
 			min_block_size = (u64)pages_per_block << PAGE_SHIFT;
 
-		cur_size = size;
-
-		if (fpfn + size != (u64)place->lpfn << PAGE_SHIFT) {
-			/*
-			 * Except for actual range allocation, modify the size and
-			 * min_block_size conforming to continuous flag enablement
-			 */
-			if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-				size = roundup_pow_of_two(size);
-				min_block_size = size;
-			/*
-			 * Modify the size value if size is not
-			 * aligned with min_block_size
-			 */
-			} else if (!IS_ALIGNED(size, min_block_size)) {
-				size = round_up(size, min_block_size);
-			}
-		}
-
 		r = drm_buddy_alloc_blocks(mm, fpfn,
 					   lpfn,
 					   size,
@@ -531,40 +515,6 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
 	}
 	mutex_unlock(&mgr->lock);
 
-	if (cur_size != size) {
-		struct drm_buddy_block *block;
-		struct list_head *trim_list;
-		u64 original_size;
-		LIST_HEAD(temp);
-
-		trim_list = &vres->blocks;
-		original_size = (u64)vres->base.size;
-
-		/*
-		 * If size value is rounded up to min_block_size, trim the last
-		 * block to the required size
-		 */
-		if (!list_is_singular(&vres->blocks)) {
-			block = list_last_entry(&vres->blocks, typeof(*block), link);
-			list_move_tail(&block->link, &temp);
-			trim_list = &temp;
-			/*
-			 * Compute the original_size value by subtracting the
-			 * last block size with (aligned size - original size)
-			 */
-			original_size = amdgpu_vram_mgr_block_size(block) - (size - cur_size);
-		}
-
-		mutex_lock(&mgr->lock);
-		drm_buddy_block_trim(mm,
-				     original_size,
-				     trim_list);
-		mutex_unlock(&mgr->lock);
-
-		if (!list_empty(&temp))
-			list_splice_tail(trim_list, &vres->blocks);
-	}
-
 	vres->base.start = 0;
 	list_for_each_entry(block, &vres->blocks, link) {
 		unsigned long start;
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH 3/3] drm/i915: Remove the contiguous computation and trim
  2023-08-21 10:14 ` [Intel-gfx] " Arunpravin Paneer Selvam
@ 2023-08-21 10:14   ` Arunpravin Paneer Selvam
  -1 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-21 10:14 UTC (permalink / raw)
  To: dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Arunpravin Paneer Selvam, christian.koenig,
	matthew.auld

As we have implemented a new method for contiguous allocation
which requires actual size and actual min_block_size, hence we
have moved the roundup and alignment size computation to buddy
allocator. This way gpu drivers pass the required size and
alignment to buddy allocator and rest of the operations taken
care by drm_buddy_alloc_blocks() function.

We have moved the trim function call to the drm_buddy_alloc_blocks()
function as we dont have the roundup or aligned size in our driver.

Now we have all in one place and it will be easy to manage in
buddy allocator.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
---
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 23 +++----------------
 1 file changed, 3 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index a1bc804cfa15..0d735d5c2b35 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -59,6 +59,9 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
 		bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
 
+	if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+		bman_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+
 	if (place->fpfn || lpfn != man->size)
 		bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
 
@@ -72,18 +75,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
 	GEM_BUG_ON(min_page_size < mm->chunk_size);
 	GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
 
-	if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
-	    place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-		unsigned long pages;
-
-		size = roundup_pow_of_two(size);
-		min_page_size = size;
-
-		pages = size >> ilog2(mm->chunk_size);
-		if (pages > lpfn)
-			lpfn = pages;
-	}
-
 	if (size > lpfn << PAGE_SHIFT) {
 		err = -E2BIG;
 		goto err_free_res;
@@ -107,14 +98,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
 	if (unlikely(err))
 		goto err_free_blocks;
 
-	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-		u64 original_size = (u64)bman_res->base.size;
-
-		drm_buddy_block_trim(mm,
-				     original_size,
-				     &bman_res->blocks);
-	}
-
 	if (lpfn <= bman->visible_size) {
 		bman_res->used_visible_size = PFN_UP(bman_res->base.size);
 	} else {
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Intel-gfx] [PATCH 3/3] drm/i915: Remove the contiguous computation and trim
@ 2023-08-21 10:14   ` Arunpravin Paneer Selvam
  0 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-21 10:14 UTC (permalink / raw)
  To: dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Arunpravin Paneer Selvam, christian.koenig,
	matthew.auld

As we have implemented a new method for contiguous allocation
which requires actual size and actual min_block_size, hence we
have moved the roundup and alignment size computation to buddy
allocator. This way gpu drivers pass the required size and
alignment to buddy allocator and rest of the operations taken
care by drm_buddy_alloc_blocks() function.

We have moved the trim function call to the drm_buddy_alloc_blocks()
function as we dont have the roundup or aligned size in our driver.

Now we have all in one place and it will be easy to manage in
buddy allocator.

Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
---
 drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 23 +++----------------
 1 file changed, 3 insertions(+), 20 deletions(-)

diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
index a1bc804cfa15..0d735d5c2b35 100644
--- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
+++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c
@@ -59,6 +59,9 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
 	if (place->flags & TTM_PL_FLAG_TOPDOWN)
 		bman_res->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION;
 
+	if (place->flags & TTM_PL_FLAG_CONTIGUOUS)
+		bman_res->flags |= DRM_BUDDY_CONTIGUOUS_ALLOCATION;
+
 	if (place->fpfn || lpfn != man->size)
 		bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION;
 
@@ -72,18 +75,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
 	GEM_BUG_ON(min_page_size < mm->chunk_size);
 	GEM_BUG_ON(!IS_ALIGNED(size, min_page_size));
 
-	if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn &&
-	    place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-		unsigned long pages;
-
-		size = roundup_pow_of_two(size);
-		min_page_size = size;
-
-		pages = size >> ilog2(mm->chunk_size);
-		if (pages > lpfn)
-			lpfn = pages;
-	}
-
 	if (size > lpfn << PAGE_SHIFT) {
 		err = -E2BIG;
 		goto err_free_res;
@@ -107,14 +98,6 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man,
 	if (unlikely(err))
 		goto err_free_blocks;
 
-	if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
-		u64 original_size = (u64)bman_res->base.size;
-
-		drm_buddy_block_trim(mm,
-				     original_size,
-				     &bman_res->blocks);
-	}
-
 	if (lpfn <= bman->visible_size) {
 		bman_res->used_visible_size = PFN_UP(bman_res->base.size);
 	} else {
-- 
2.25.1


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/3] drm/buddy: Fix contiguous memory allocation issues
  2023-08-21 10:14 ` [Intel-gfx] " Arunpravin Paneer Selvam
                   ` (2 preceding siblings ...)
  (?)
@ 2023-08-21 10:44 ` Patchwork
  -1 siblings, 0 replies; 20+ messages in thread
From: Patchwork @ 2023-08-21 10:44 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/3] drm/buddy: Fix contiguous memory allocation issues
URL   : https://patchwork.freedesktop.org/series/122693/
State : warning

== Summary ==

Error: dim checkpatch failed
/home/kbuild/linux/maintainer-tools/dim: line 50: /home/kbuild/.dimrc: No such file or directory



^ permalink raw reply	[flat|nested] 20+ messages in thread

* [Intel-gfx] ✗ Fi.CI.SPARSE: warning for series starting with [1/3] drm/buddy: Fix contiguous memory allocation issues
  2023-08-21 10:14 ` [Intel-gfx] " Arunpravin Paneer Selvam
                   ` (3 preceding siblings ...)
  (?)
@ 2023-08-21 10:44 ` Patchwork
  -1 siblings, 0 replies; 20+ messages in thread
From: Patchwork @ 2023-08-21 10:44 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam; +Cc: intel-gfx

== Series Details ==

Series: series starting with [1/3] drm/buddy: Fix contiguous memory allocation issues
URL   : https://patchwork.freedesktop.org/series/122693/
State : warning

== Summary ==

Error: dim sparse failed
/home/kbuild/linux/maintainer-tools/dim: line 50: /home/kbuild/.dimrc: No such file or directory



^ permalink raw reply	[flat|nested] 20+ messages in thread

* [Intel-gfx] ✓ Fi.CI.BAT: success for series starting with [1/3] drm/buddy: Fix contiguous memory allocation issues
  2023-08-21 10:14 ` [Intel-gfx] " Arunpravin Paneer Selvam
                   ` (4 preceding siblings ...)
  (?)
@ 2023-08-21 10:58 ` Patchwork
  -1 siblings, 0 replies; 20+ messages in thread
From: Patchwork @ 2023-08-21 10:58 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 6993 bytes --]

== Series Details ==

Series: series starting with [1/3] drm/buddy: Fix contiguous memory allocation issues
URL   : https://patchwork.freedesktop.org/series/122693/
State : success

== Summary ==

CI Bug Log - changes from CI_DRM_13538 -> Patchwork_122693v1
====================================================

Summary
-------

  **SUCCESS**

  No regressions found.

  External URL: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/index.html

Participating hosts (38 -> 38)
------------------------------

  Additional (1): fi-kbl-soraka 
  Missing    (1): fi-snb-2520m 

Known issues
------------

  Here are the changes found in Patchwork_122693v1 that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@core_auth@basic-auth:
    - bat-adlp-11:        NOTRUN -> [ABORT][1] ([i915#8011])
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/bat-adlp-11/igt@core_auth@basic-auth.html

  * igt@gem_busy@busy@all-engines:
    - bat-mtlp-8:         [PASS][2] -> [DMESG-FAIL][3] ([i915#9121])
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/bat-mtlp-8/igt@gem_busy@busy@all-engines.html
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/bat-mtlp-8/igt@gem_busy@busy@all-engines.html

  * igt@gem_huc_copy@huc-copy:
    - fi-kbl-soraka:      NOTRUN -> [SKIP][4] ([fdo#109271] / [i915#2190])
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/fi-kbl-soraka/igt@gem_huc_copy@huc-copy.html

  * igt@gem_lmem_swapping@basic:
    - fi-kbl-soraka:      NOTRUN -> [SKIP][5] ([fdo#109271] / [i915#4613]) +3 similar issues
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/fi-kbl-soraka/igt@gem_lmem_swapping@basic.html

  * igt@i915_selftest@live@gt_heartbeat:
    - fi-apl-guc:         [PASS][6] -> [DMESG-FAIL][7] ([i915#5334])
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/fi-apl-guc/igt@i915_selftest@live@gt_heartbeat.html
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/fi-apl-guc/igt@i915_selftest@live@gt_heartbeat.html

  * igt@i915_selftest@live@gt_pm:
    - fi-kbl-soraka:      NOTRUN -> [DMESG-FAIL][8] ([i915#1886] / [i915#7913])
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/fi-kbl-soraka/igt@i915_selftest@live@gt_pm.html

  * igt@i915_selftest@live@requests:
    - bat-mtlp-8:         [PASS][9] -> [ABORT][10] ([i915#7982])
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/bat-mtlp-8/igt@i915_selftest@live@requests.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/bat-mtlp-8/igt@i915_selftest@live@requests.html

  * igt@i915_suspend@basic-s3-without-i915:
    - bat-rpls-1:         NOTRUN -> [ABORT][11] ([i915#6687] / [i915#7978] / [i915#8668])
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/bat-rpls-1/igt@i915_suspend@basic-s3-without-i915.html

  * igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic:
    - fi-kbl-soraka:      NOTRUN -> [SKIP][12] ([fdo#109271]) +8 similar issues
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/fi-kbl-soraka/igt@kms_cursor_legacy@basic-busy-flip-before-cursor-atomic.html

  
#### Possible fixes ####

  * igt@i915_selftest@live@hangcheck:
    - bat-rpls-1:         [ABORT][13] ([i915#7677]) -> [PASS][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/bat-rpls-1/igt@i915_selftest@live@hangcheck.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/bat-rpls-1/igt@i915_selftest@live@hangcheck.html

  * igt@i915_selftest@live@mman:
    - bat-rpls-2:         [TIMEOUT][15] ([i915#6794] / [i915#7392]) -> [PASS][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/bat-rpls-2/igt@i915_selftest@live@mman.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/bat-rpls-2/igt@i915_selftest@live@mman.html

  * igt@i915_suspend@basic-s2idle-without-i915:
    - bat-rpls-2:         [WARN][17] ([i915#8747]) -> [PASS][18]
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/bat-rpls-2/igt@i915_suspend@basic-s2idle-without-i915.html
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/bat-rpls-2/igt@i915_suspend@basic-s2idle-without-i915.html

  
#### Warnings ####

  * igt@i915_module_load@load:
    - bat-adlp-11:        [ABORT][19] ([i915#4423]) -> [DMESG-WARN][20] ([i915#4423])
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/bat-adlp-11/igt@i915_module_load@load.html
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/bat-adlp-11/igt@i915_module_load@load.html

  * igt@kms_psr@primary_page_flip:
    - bat-rplp-1:         [SKIP][21] ([i915#1072]) -> [ABORT][22] ([i915#8442] / [i915#8668] / [i915#8860])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/bat-rplp-1/igt@kms_psr@primary_page_flip.html
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/bat-rplp-1/igt@kms_psr@primary_page_flip.html

  
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [i915#1072]: https://gitlab.freedesktop.org/drm/intel/issues/1072
  [i915#1886]: https://gitlab.freedesktop.org/drm/intel/issues/1886
  [i915#2190]: https://gitlab.freedesktop.org/drm/intel/issues/2190
  [i915#4423]: https://gitlab.freedesktop.org/drm/intel/issues/4423
  [i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
  [i915#5334]: https://gitlab.freedesktop.org/drm/intel/issues/5334
  [i915#6687]: https://gitlab.freedesktop.org/drm/intel/issues/6687
  [i915#6794]: https://gitlab.freedesktop.org/drm/intel/issues/6794
  [i915#7392]: https://gitlab.freedesktop.org/drm/intel/issues/7392
  [i915#7677]: https://gitlab.freedesktop.org/drm/intel/issues/7677
  [i915#7913]: https://gitlab.freedesktop.org/drm/intel/issues/7913
  [i915#7978]: https://gitlab.freedesktop.org/drm/intel/issues/7978
  [i915#7982]: https://gitlab.freedesktop.org/drm/intel/issues/7982
  [i915#8011]: https://gitlab.freedesktop.org/drm/intel/issues/8011
  [i915#8442]: https://gitlab.freedesktop.org/drm/intel/issues/8442
  [i915#8668]: https://gitlab.freedesktop.org/drm/intel/issues/8668
  [i915#8747]: https://gitlab.freedesktop.org/drm/intel/issues/8747
  [i915#8860]: https://gitlab.freedesktop.org/drm/intel/issues/8860
  [i915#9121]: https://gitlab.freedesktop.org/drm/intel/issues/9121


Build changes
-------------

  * Linux: CI_DRM_13538 -> Patchwork_122693v1

  CI-20190529: 20190529
  CI_DRM_13538: 19f1cd24387fc8bbb63a2c1a74080e73a8f53f5f @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_7445: 7445
  Patchwork_122693v1: 19f1cd24387fc8bbb63a2c1a74080e73a8f53f5f @ git://anongit.freedesktop.org/gfx-ci/linux


### Linux commits

cee03d7522e6 drm/i915: Remove the contiguous computation and trim
fa58883f4e6e drm/amdgpu: Remove the contiguous computation and trim
bdf5b55f2587 drm/buddy: Fix contiguous memory allocation issues

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/index.html

[-- Attachment #2: Type: text/html, Size: 8211 bytes --]

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
  2023-08-21 10:14 ` [Intel-gfx] " Arunpravin Paneer Selvam
@ 2023-08-21 11:16   ` Christian König
  -1 siblings, 0 replies; 20+ messages in thread
From: Christian König @ 2023-08-21 11:16 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Pan, Xinhui, matthew.auld

Am 21.08.23 um 12:14 schrieb Arunpravin Paneer Selvam:
> The way now contiguous requests are implemented such that
> the size rounded up to power of 2 and the corresponding order
> block picked from the freelist.
>
> In addition to the older method, the new method will rounddown
> the size to power of 2 and the corresponding order block picked
> from the freelist. And for the remaining size we traverse the
> tree and try to allocate either from the freelist block's buddy
> or from the peer block. If the remaining size from peer/buddy
> block is not free, we pick the next freelist block and repeat
> the same method.

I think it's worth mentioning that Xinhui tried something similar a few 
month ago, but that didn't looked like it would work. For this here I'm 
more confident.

Of hand the implementation looks clean to me, but Matthew or others 
which have more background in how the implementation works need to take 
a look as well.

Thanks,
Christian.

>
> Moved contiguous/alignment size computation part and trim
> function to the drm buddy manager.
>
> Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
> ---
>   drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
>   include/drm/drm_buddy.h     |   6 +-
>   2 files changed, 248 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
> index 7098f125b54a..220f60c08a03 100644
> --- a/drivers/gpu/drm/drm_buddy.c
> +++ b/drivers/gpu/drm/drm_buddy.c
> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct drm_buddy *mm,
>   	return __alloc_range(mm, &dfs, start, size, blocks);
>   }
>   
> +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
> +					       u64 size,
> +					       u64 min_block_size,
> +					       struct drm_buddy_block *block,
> +					       struct list_head *blocks)
> +{
> +	struct drm_buddy_block *buddy, *parent = NULL;
> +	u64 start, offset = 0;
> +	LIST_HEAD(dfs);
> +	int err;
> +
> +	if (!block)
> +		return -EINVAL;
> +
> +	buddy = __get_buddy(block);
> +	if (!buddy)
> +		return -ENOSPC;
> +
> +	if (drm_buddy_block_is_allocated(buddy))
> +		return -ENOSPC;
> +
> +	parent = block->parent;
> +	if (!parent)
> +		return -ENOSPC;
> +
> +	if (block->parent->right == block) {
> +		u64 remaining;
> +
> +		/* Compute the leftover size for allocation */
> +		remaining = max((size - drm_buddy_block_size(mm, buddy)),
> +				min_block_size);
> +		if (!IS_ALIGNED(remaining, min_block_size))
> +			remaining = round_up(remaining, min_block_size);
> +
> +		/* Check if remaining size is greater than buddy block size */
> +		if (drm_buddy_block_size(mm, buddy) < remaining)
> +			return -ENOSPC;
> +
> +		offset = drm_buddy_block_size(mm, buddy) - remaining;
> +	}
> +
> +	list_add(&parent->tmp_link, &dfs);
> +	start = drm_buddy_block_offset(parent) + offset;
> +
> +	err = __alloc_range(mm, &dfs, start, size, blocks);
> +	if (err)
> +		return -ENOSPC;
> +
> +	return 0;
> +}
> +
> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
> +					      u64 size,
> +					      u64 min_block_size,
> +					      struct drm_buddy_block *block,
> +					      struct list_head *blocks)
> +{
> +	struct drm_buddy_block *first, *peer, *tmp;
> +	struct drm_buddy_block *parent = NULL;
> +	u64 start, offset = 0;
> +	unsigned int order;
> +	LIST_HEAD(dfs);
> +	int err;
> +
> +	if (!block)
> +		return -EINVAL;
> +
> +	order = drm_buddy_block_order(block);
> +	/* Add freelist block to dfs list */
> +	list_add(&block->tmp_link, &dfs);
> +
> +	tmp = block;
> +	parent = block->parent;
> +	while (parent) {
> +		if (block->parent->left == block) {
> +			if (parent->left != tmp) {
> +				peer = parent->left;
> +				break;
> +			}
> +		} else {
> +			if (parent->right != tmp) {
> +				peer = parent->right;
> +				break;
> +			}
> +		}
> +
> +		tmp = parent;
> +		parent = tmp->parent;
> +	}
> +
> +	if (!parent)
> +		return -ENOSPC;
> +
> +	do {
> +		if (drm_buddy_block_is_allocated(peer))
> +			return -ENOSPC;
> +		/* Exit loop if peer block order is equal to block order */
> +		if (drm_buddy_block_order(peer) == order)
> +			break;
> +
> +		if (drm_buddy_block_is_split(peer)) {
> +			/* Traverse down to the block order level */
> +			if (block->parent->left == block)
> +				peer = peer->right;
> +			else
> +				peer = peer->left;
> +		} else {
> +			break;
> +		}
> +	} while (1);
> +
> +	if (block->parent->left == block) {
> +		u64 remaining;
> +
> +		/* Compute the leftover size for allocation */
> +		remaining = max((size - drm_buddy_block_size(mm, block)),
> +				min_block_size);
> +		if (!IS_ALIGNED(remaining, min_block_size))
> +			remaining = round_up(remaining, min_block_size);
> +
> +		/* Check if remaining size is greater than peer block size */
> +		if (drm_buddy_block_size(mm, peer) < remaining)
> +			return -ENOSPC;
> +
> +		offset = drm_buddy_block_size(mm, peer) - remaining;
> +		/* Add left peer block to dfs list */
> +		list_add(&peer->tmp_link, &dfs);
> +	} else {
> +		/* Add right peer block to dfs list */
> +		list_add_tail(&peer->tmp_link, &dfs);
> +	}
> +
> +	first = list_first_entry_or_null(&dfs,
> +					 struct drm_buddy_block,
> +					 tmp_link);
> +	if (!first)
> +		return -EINVAL;
> +
> +	start = drm_buddy_block_offset(first) + offset;
> +	err = __alloc_range(mm, &dfs, start, size, blocks);
> +	if (err)
> +		return -ENOSPC;
> +
> +	return 0;
> +}
> +
> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
> +					       u64 size,
> +					       u64 min_block_size,
> +					       struct list_head *blocks)
> +{
> +	struct drm_buddy_block *block;
> +	struct list_head *list;
> +	unsigned long pages;
> +	unsigned int order;
> +	u64 modify_size;
> +	int err;
> +
> +	modify_size = rounddown_pow_of_two(size);
> +	pages = modify_size >> ilog2(mm->chunk_size);
> +	order = fls(pages) - 1;
> +	if (order == 0)
> +		return -ENOSPC;
> +
> +	list = &mm->free_list[order];
> +	if (list_empty(list))
> +		return -ENOSPC;
> +
> +	list_for_each_entry_reverse(block, list, link) {
> +		/* Allocate contiguous blocks from the buddy */
> +		err = __alloc_contiguous_block_from_buddy(mm,
> +							  size,
> +							  min_block_size,
> +							  block,
> +							  blocks);
> +		if (!err)
> +			return 0;
> +
> +		/* Allocate contiguous blocks from tree traversal method */
> +		err = __alloc_contiguous_block_from_peer(mm,
> +							 size,
> +							 min_block_size,
> +							 block,
> +							 blocks);
> +		if (!err)
> +			return 0;
> +	}
> +
> +	return -ENOSPC;
> +}
> +
>   /**
>    * drm_buddy_block_trim - free unused pages
>    *
> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>    * @start: start of the allowed range for this block
>    * @end: end of the allowed range for this block
>    * @size: size of the allocation
> - * @min_page_size: alignment of the allocation
> + * @min_block_size: alignment of the allocation
>    * @blocks: output list head to add allocated blocks
>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>    *
> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>    */
>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   			   u64 start, u64 end, u64 size,
> -			   u64 min_page_size,
> +			   u64 min_block_size,
>   			   struct list_head *blocks,
>   			   unsigned long flags)
>   {
>   	struct drm_buddy_block *block = NULL;
> +	u64 original_size, original_min_size;
>   	unsigned int min_order, order;
> -	unsigned long pages;
>   	LIST_HEAD(allocated);
> +	unsigned long pages;
>   	int err;
>   
>   	if (size < mm->chunk_size)
>   		return -EINVAL;
>   
> -	if (min_page_size < mm->chunk_size)
> +	if (min_block_size < mm->chunk_size)
>   		return -EINVAL;
>   
> -	if (!is_power_of_2(min_page_size))
> +	if (!is_power_of_2(min_block_size))
>   		return -EINVAL;
>   
>   	if (!IS_ALIGNED(start | end | size, mm->chunk_size))
> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   	if (start + size == end)
>   		return __drm_buddy_alloc_range(mm, start, size, blocks);
>   
> -	if (!IS_ALIGNED(size, min_page_size))
> -		return -EINVAL;
> +	original_size = size;
> +	original_min_size = min_block_size;
> +
> +	/* Roundup the size to power of 2 */
> +	if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
> +		size = roundup_pow_of_two(size);
> +		min_block_size = size;
> +	/* Align size value to min_block_size */
> +	} else if (!IS_ALIGNED(size, min_block_size)) {
> +		size = round_up(size, min_block_size);
> +	}
>   
>   	pages = size >> ilog2(mm->chunk_size);
>   	order = fls(pages) - 1;
> -	min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
> +	min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>   
>   	do {
>   		order = min(order, (unsigned int)fls(pages) - 1);
> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   				break;
>   
>   			if (order-- == min_order) {
> +				if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
> +				    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
> +					/*
> +					 * Try contiguous block allocation through
> +					 * tree traversal method
> +					 */
> +					return __drm_buddy_alloc_contiguous_blocks(mm,
> +										   original_size,
> +										   original_min_size,
> +										   blocks);
> +
>   				err = -ENOSPC;
>   				goto err_free;
>   			}
> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   			break;
>   	} while (1);
>   
> +	/* Trim the allocated block to the required size */
> +	if (original_size != size) {
> +		struct list_head *trim_list;
> +		LIST_HEAD(temp);
> +		u64 trim_size;
> +
> +		trim_list = &allocated;
> +		trim_size = original_size;
> +
> +		if (!list_is_singular(&allocated)) {
> +			block = list_last_entry(&allocated, typeof(*block), link);
> +			list_move(&block->link, &temp);
> +			trim_list = &temp;
> +			trim_size = drm_buddy_block_size(mm, block) -
> +				(size - original_size);
> +		}
> +
> +		drm_buddy_block_trim(mm,
> +				     trim_size,
> +				     trim_list);
> +
> +		if (!list_empty(&temp))
> +			list_splice_tail(trim_list, &allocated);
> +	}
> +
>   	list_splice_tail(&allocated, blocks);
>   	return 0;
>   
> diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
> index 572077ff8ae7..a5b39fc01003 100644
> --- a/include/drm/drm_buddy.h
> +++ b/include/drm/drm_buddy.h
> @@ -22,8 +22,9 @@
>   	start__ >= max__ || size__ > max__ - start__; \
>   })
>   
> -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
> +#define DRM_BUDDY_RANGE_ALLOCATION		BIT(0)
> +#define DRM_BUDDY_TOPDOWN_ALLOCATION		BIT(1)
> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION		BIT(2)
>   
>   struct drm_buddy_block {
>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
>   void drm_buddy_block_print(struct drm_buddy *mm,
>   			   struct drm_buddy_block *block,
>   			   struct drm_printer *p);
> -
>   #endif


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
@ 2023-08-21 11:16   ` Christian König
  0 siblings, 0 replies; 20+ messages in thread
From: Christian König @ 2023-08-21 11:16 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Pan, Xinhui, matthew.auld

Am 21.08.23 um 12:14 schrieb Arunpravin Paneer Selvam:
> The way now contiguous requests are implemented such that
> the size rounded up to power of 2 and the corresponding order
> block picked from the freelist.
>
> In addition to the older method, the new method will rounddown
> the size to power of 2 and the corresponding order block picked
> from the freelist. And for the remaining size we traverse the
> tree and try to allocate either from the freelist block's buddy
> or from the peer block. If the remaining size from peer/buddy
> block is not free, we pick the next freelist block and repeat
> the same method.

I think it's worth mentioning that Xinhui tried something similar a few 
month ago, but that didn't looked like it would work. For this here I'm 
more confident.

Of hand the implementation looks clean to me, but Matthew or others 
which have more background in how the implementation works need to take 
a look as well.

Thanks,
Christian.

>
> Moved contiguous/alignment size computation part and trim
> function to the drm buddy manager.
>
> Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
> ---
>   drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
>   include/drm/drm_buddy.h     |   6 +-
>   2 files changed, 248 insertions(+), 11 deletions(-)
>
> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
> index 7098f125b54a..220f60c08a03 100644
> --- a/drivers/gpu/drm/drm_buddy.c
> +++ b/drivers/gpu/drm/drm_buddy.c
> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct drm_buddy *mm,
>   	return __alloc_range(mm, &dfs, start, size, blocks);
>   }
>   
> +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
> +					       u64 size,
> +					       u64 min_block_size,
> +					       struct drm_buddy_block *block,
> +					       struct list_head *blocks)
> +{
> +	struct drm_buddy_block *buddy, *parent = NULL;
> +	u64 start, offset = 0;
> +	LIST_HEAD(dfs);
> +	int err;
> +
> +	if (!block)
> +		return -EINVAL;
> +
> +	buddy = __get_buddy(block);
> +	if (!buddy)
> +		return -ENOSPC;
> +
> +	if (drm_buddy_block_is_allocated(buddy))
> +		return -ENOSPC;
> +
> +	parent = block->parent;
> +	if (!parent)
> +		return -ENOSPC;
> +
> +	if (block->parent->right == block) {
> +		u64 remaining;
> +
> +		/* Compute the leftover size for allocation */
> +		remaining = max((size - drm_buddy_block_size(mm, buddy)),
> +				min_block_size);
> +		if (!IS_ALIGNED(remaining, min_block_size))
> +			remaining = round_up(remaining, min_block_size);
> +
> +		/* Check if remaining size is greater than buddy block size */
> +		if (drm_buddy_block_size(mm, buddy) < remaining)
> +			return -ENOSPC;
> +
> +		offset = drm_buddy_block_size(mm, buddy) - remaining;
> +	}
> +
> +	list_add(&parent->tmp_link, &dfs);
> +	start = drm_buddy_block_offset(parent) + offset;
> +
> +	err = __alloc_range(mm, &dfs, start, size, blocks);
> +	if (err)
> +		return -ENOSPC;
> +
> +	return 0;
> +}
> +
> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
> +					      u64 size,
> +					      u64 min_block_size,
> +					      struct drm_buddy_block *block,
> +					      struct list_head *blocks)
> +{
> +	struct drm_buddy_block *first, *peer, *tmp;
> +	struct drm_buddy_block *parent = NULL;
> +	u64 start, offset = 0;
> +	unsigned int order;
> +	LIST_HEAD(dfs);
> +	int err;
> +
> +	if (!block)
> +		return -EINVAL;
> +
> +	order = drm_buddy_block_order(block);
> +	/* Add freelist block to dfs list */
> +	list_add(&block->tmp_link, &dfs);
> +
> +	tmp = block;
> +	parent = block->parent;
> +	while (parent) {
> +		if (block->parent->left == block) {
> +			if (parent->left != tmp) {
> +				peer = parent->left;
> +				break;
> +			}
> +		} else {
> +			if (parent->right != tmp) {
> +				peer = parent->right;
> +				break;
> +			}
> +		}
> +
> +		tmp = parent;
> +		parent = tmp->parent;
> +	}
> +
> +	if (!parent)
> +		return -ENOSPC;
> +
> +	do {
> +		if (drm_buddy_block_is_allocated(peer))
> +			return -ENOSPC;
> +		/* Exit loop if peer block order is equal to block order */
> +		if (drm_buddy_block_order(peer) == order)
> +			break;
> +
> +		if (drm_buddy_block_is_split(peer)) {
> +			/* Traverse down to the block order level */
> +			if (block->parent->left == block)
> +				peer = peer->right;
> +			else
> +				peer = peer->left;
> +		} else {
> +			break;
> +		}
> +	} while (1);
> +
> +	if (block->parent->left == block) {
> +		u64 remaining;
> +
> +		/* Compute the leftover size for allocation */
> +		remaining = max((size - drm_buddy_block_size(mm, block)),
> +				min_block_size);
> +		if (!IS_ALIGNED(remaining, min_block_size))
> +			remaining = round_up(remaining, min_block_size);
> +
> +		/* Check if remaining size is greater than peer block size */
> +		if (drm_buddy_block_size(mm, peer) < remaining)
> +			return -ENOSPC;
> +
> +		offset = drm_buddy_block_size(mm, peer) - remaining;
> +		/* Add left peer block to dfs list */
> +		list_add(&peer->tmp_link, &dfs);
> +	} else {
> +		/* Add right peer block to dfs list */
> +		list_add_tail(&peer->tmp_link, &dfs);
> +	}
> +
> +	first = list_first_entry_or_null(&dfs,
> +					 struct drm_buddy_block,
> +					 tmp_link);
> +	if (!first)
> +		return -EINVAL;
> +
> +	start = drm_buddy_block_offset(first) + offset;
> +	err = __alloc_range(mm, &dfs, start, size, blocks);
> +	if (err)
> +		return -ENOSPC;
> +
> +	return 0;
> +}
> +
> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
> +					       u64 size,
> +					       u64 min_block_size,
> +					       struct list_head *blocks)
> +{
> +	struct drm_buddy_block *block;
> +	struct list_head *list;
> +	unsigned long pages;
> +	unsigned int order;
> +	u64 modify_size;
> +	int err;
> +
> +	modify_size = rounddown_pow_of_two(size);
> +	pages = modify_size >> ilog2(mm->chunk_size);
> +	order = fls(pages) - 1;
> +	if (order == 0)
> +		return -ENOSPC;
> +
> +	list = &mm->free_list[order];
> +	if (list_empty(list))
> +		return -ENOSPC;
> +
> +	list_for_each_entry_reverse(block, list, link) {
> +		/* Allocate contiguous blocks from the buddy */
> +		err = __alloc_contiguous_block_from_buddy(mm,
> +							  size,
> +							  min_block_size,
> +							  block,
> +							  blocks);
> +		if (!err)
> +			return 0;
> +
> +		/* Allocate contiguous blocks from tree traversal method */
> +		err = __alloc_contiguous_block_from_peer(mm,
> +							 size,
> +							 min_block_size,
> +							 block,
> +							 blocks);
> +		if (!err)
> +			return 0;
> +	}
> +
> +	return -ENOSPC;
> +}
> +
>   /**
>    * drm_buddy_block_trim - free unused pages
>    *
> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>    * @start: start of the allowed range for this block
>    * @end: end of the allowed range for this block
>    * @size: size of the allocation
> - * @min_page_size: alignment of the allocation
> + * @min_block_size: alignment of the allocation
>    * @blocks: output list head to add allocated blocks
>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>    *
> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>    */
>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   			   u64 start, u64 end, u64 size,
> -			   u64 min_page_size,
> +			   u64 min_block_size,
>   			   struct list_head *blocks,
>   			   unsigned long flags)
>   {
>   	struct drm_buddy_block *block = NULL;
> +	u64 original_size, original_min_size;
>   	unsigned int min_order, order;
> -	unsigned long pages;
>   	LIST_HEAD(allocated);
> +	unsigned long pages;
>   	int err;
>   
>   	if (size < mm->chunk_size)
>   		return -EINVAL;
>   
> -	if (min_page_size < mm->chunk_size)
> +	if (min_block_size < mm->chunk_size)
>   		return -EINVAL;
>   
> -	if (!is_power_of_2(min_page_size))
> +	if (!is_power_of_2(min_block_size))
>   		return -EINVAL;
>   
>   	if (!IS_ALIGNED(start | end | size, mm->chunk_size))
> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   	if (start + size == end)
>   		return __drm_buddy_alloc_range(mm, start, size, blocks);
>   
> -	if (!IS_ALIGNED(size, min_page_size))
> -		return -EINVAL;
> +	original_size = size;
> +	original_min_size = min_block_size;
> +
> +	/* Roundup the size to power of 2 */
> +	if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
> +		size = roundup_pow_of_two(size);
> +		min_block_size = size;
> +	/* Align size value to min_block_size */
> +	} else if (!IS_ALIGNED(size, min_block_size)) {
> +		size = round_up(size, min_block_size);
> +	}
>   
>   	pages = size >> ilog2(mm->chunk_size);
>   	order = fls(pages) - 1;
> -	min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
> +	min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>   
>   	do {
>   		order = min(order, (unsigned int)fls(pages) - 1);
> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   				break;
>   
>   			if (order-- == min_order) {
> +				if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
> +				    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
> +					/*
> +					 * Try contiguous block allocation through
> +					 * tree traversal method
> +					 */
> +					return __drm_buddy_alloc_contiguous_blocks(mm,
> +										   original_size,
> +										   original_min_size,
> +										   blocks);
> +
>   				err = -ENOSPC;
>   				goto err_free;
>   			}
> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   			break;
>   	} while (1);
>   
> +	/* Trim the allocated block to the required size */
> +	if (original_size != size) {
> +		struct list_head *trim_list;
> +		LIST_HEAD(temp);
> +		u64 trim_size;
> +
> +		trim_list = &allocated;
> +		trim_size = original_size;
> +
> +		if (!list_is_singular(&allocated)) {
> +			block = list_last_entry(&allocated, typeof(*block), link);
> +			list_move(&block->link, &temp);
> +			trim_list = &temp;
> +			trim_size = drm_buddy_block_size(mm, block) -
> +				(size - original_size);
> +		}
> +
> +		drm_buddy_block_trim(mm,
> +				     trim_size,
> +				     trim_list);
> +
> +		if (!list_empty(&temp))
> +			list_splice_tail(trim_list, &allocated);
> +	}
> +
>   	list_splice_tail(&allocated, blocks);
>   	return 0;
>   
> diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
> index 572077ff8ae7..a5b39fc01003 100644
> --- a/include/drm/drm_buddy.h
> +++ b/include/drm/drm_buddy.h
> @@ -22,8 +22,9 @@
>   	start__ >= max__ || size__ > max__ - start__; \
>   })
>   
> -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
> +#define DRM_BUDDY_RANGE_ALLOCATION		BIT(0)
> +#define DRM_BUDDY_TOPDOWN_ALLOCATION		BIT(1)
> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION		BIT(2)
>   
>   struct drm_buddy_block {
>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
>   void drm_buddy_block_print(struct drm_buddy *mm,
>   			   struct drm_buddy_block *block,
>   			   struct drm_printer *p);
> -
>   #endif


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [1/3] drm/buddy: Fix contiguous memory allocation issues
  2023-08-21 10:14 ` [Intel-gfx] " Arunpravin Paneer Selvam
                   ` (6 preceding siblings ...)
  (?)
@ 2023-08-21 13:04 ` Patchwork
  -1 siblings, 0 replies; 20+ messages in thread
From: Patchwork @ 2023-08-21 13:04 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam; +Cc: intel-gfx

[-- Attachment #1: Type: text/plain, Size: 65379 bytes --]

== Series Details ==

Series: series starting with [1/3] drm/buddy: Fix contiguous memory allocation issues
URL   : https://patchwork.freedesktop.org/series/122693/
State : failure

== Summary ==

CI Bug Log - changes from CI_DRM_13538_full -> Patchwork_122693v1_full
====================================================

Summary
-------

  **FAILURE**

  Serious unknown changes coming with Patchwork_122693v1_full absolutely need to be
  verified manually.
  
  If you think the reported changes have nothing to do with the changes
  introduced in Patchwork_122693v1_full, please notify your bug team to allow them
  to document this new failure mode, which will reduce false positives in CI.

  

Participating hosts (9 -> 9)
------------------------------

  No changes in participating hosts

Possible new issues
-------------------

  Here are the unknown changes that may have been introduced in Patchwork_122693v1_full:

### IGT changes ###

#### Possible regressions ####

  * igt@i915_selftest@mock@memory_region:
    - shard-apl:          [PASS][1] -> [DMESG-WARN][2]
   [1]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-apl2/igt@i915_selftest@mock@memory_region.html
   [2]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl6/igt@i915_selftest@mock@memory_region.html
    - shard-glk:          [PASS][3] -> [DMESG-WARN][4]
   [3]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-glk1/igt@i915_selftest@mock@memory_region.html
   [4]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk1/igt@i915_selftest@mock@memory_region.html
    - shard-dg2:          [PASS][5] -> [DMESG-WARN][6]
   [5]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-2/igt@i915_selftest@mock@memory_region.html
   [6]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-10/igt@i915_selftest@mock@memory_region.html
    - shard-rkl:          [PASS][7] -> [DMESG-WARN][8]
   [7]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-rkl-2/igt@i915_selftest@mock@memory_region.html
   [8]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-4/igt@i915_selftest@mock@memory_region.html
    - shard-dg1:          [PASS][9] -> [DMESG-WARN][10]
   [9]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg1-16/igt@i915_selftest@mock@memory_region.html
   [10]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-17/igt@i915_selftest@mock@memory_region.html
    - shard-tglu:         [PASS][11] -> [DMESG-WARN][12]
   [11]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-tglu-4/igt@i915_selftest@mock@memory_region.html
   [12]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-tglu-9/igt@i915_selftest@mock@memory_region.html
    - shard-mtlp:         [PASS][13] -> [DMESG-WARN][14]
   [13]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-8/igt@i915_selftest@mock@memory_region.html
   [14]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-4/igt@i915_selftest@mock@memory_region.html

  * igt@kms_ccs@pipe-c-crc-primary-basic-4_tiled_dg2_mc_ccs:
    - shard-dg2:          [PASS][15] -> [FAIL][16]
   [15]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-6/igt@kms_ccs@pipe-c-crc-primary-basic-4_tiled_dg2_mc_ccs.html
   [16]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-12/igt@kms_ccs@pipe-c-crc-primary-basic-4_tiled_dg2_mc_ccs.html

  
Known issues
------------

  Here are the changes found in Patchwork_122693v1_full that come from known issues:

### IGT changes ###

#### Issues hit ####

  * igt@device_reset@unbind-cold-reset-rebind:
    - shard-mtlp:         NOTRUN -> [SKIP][17] ([i915#7701])
   [17]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@device_reset@unbind-cold-reset-rebind.html

  * igt@drm_fdinfo@busy-check-all@bcs0:
    - shard-dg1:          NOTRUN -> [SKIP][18] ([i915#8414]) +4 similar issues
   [18]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@drm_fdinfo@busy-check-all@bcs0.html

  * igt@drm_fdinfo@isolation@bcs0:
    - shard-dg2:          NOTRUN -> [SKIP][19] ([i915#8414]) +9 similar issues
   [19]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@drm_fdinfo@isolation@bcs0.html

  * igt@drm_fdinfo@virtual-busy-idle:
    - shard-mtlp:         NOTRUN -> [SKIP][20] ([i915#8414])
   [20]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@drm_fdinfo@virtual-busy-idle.html

  * igt@feature_discovery@display-2x:
    - shard-dg2:          NOTRUN -> [SKIP][21] ([i915#1839])
   [21]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@feature_discovery@display-2x.html

  * igt@gem_close_race@multigpu-basic-threads:
    - shard-dg2:          NOTRUN -> [SKIP][22] ([i915#7697])
   [22]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@gem_close_race@multigpu-basic-threads.html

  * igt@gem_ctx_persistence@legacy-engines-cleanup:
    - shard-snb:          NOTRUN -> [SKIP][23] ([fdo#109271] / [i915#1099]) +1 similar issue
   [23]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-snb5/igt@gem_ctx_persistence@legacy-engines-cleanup.html

  * igt@gem_ctx_sseu@mmap-args:
    - shard-dg2:          NOTRUN -> [SKIP][24] ([i915#280])
   [24]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@gem_ctx_sseu@mmap-args.html

  * igt@gem_eio@kms:
    - shard-glk:          [PASS][25] -> [FAIL][26] ([i915#8764])
   [25]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-glk5/igt@gem_eio@kms.html
   [26]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk5/igt@gem_eio@kms.html

  * igt@gem_eio@reset-stress:
    - shard-snb:          NOTRUN -> [FAIL][27] ([i915#8898])
   [27]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-snb5/igt@gem_eio@reset-stress.html

  * igt@gem_exec_capture@pi@bcs0:
    - shard-mtlp:         [PASS][28] -> [FAIL][29] ([i915#4475] / [i915#7765])
   [28]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-5/igt@gem_exec_capture@pi@bcs0.html
   [29]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-4/igt@gem_exec_capture@pi@bcs0.html

  * igt@gem_exec_capture@pi@ccs0:
    - shard-mtlp:         [PASS][30] -> [FAIL][31] ([i915#7765])
   [30]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-5/igt@gem_exec_capture@pi@ccs0.html
   [31]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-4/igt@gem_exec_capture@pi@ccs0.html

  * igt@gem_exec_capture@pi@rcs0:
    - shard-mtlp:         [PASS][32] -> [FAIL][33] ([i915#4475])
   [32]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-5/igt@gem_exec_capture@pi@rcs0.html
   [33]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-4/igt@gem_exec_capture@pi@rcs0.html

  * igt@gem_exec_fair@basic-pace:
    - shard-dg1:          NOTRUN -> [SKIP][34] ([i915#3539])
   [34]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@gem_exec_fair@basic-pace.html

  * igt@gem_exec_fair@basic-pace-share@rcs0:
    - shard-glk:          [PASS][35] -> [FAIL][36] ([i915#2842])
   [35]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-glk8/igt@gem_exec_fair@basic-pace-share@rcs0.html
   [36]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk3/igt@gem_exec_fair@basic-pace-share@rcs0.html

  * igt@gem_exec_fair@basic-sync:
    - shard-mtlp:         NOTRUN -> [SKIP][37] ([i915#4473] / [i915#4771])
   [37]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-7/igt@gem_exec_fair@basic-sync.html

  * igt@gem_exec_flush@basic-wb-ro-before-default:
    - shard-dg2:          NOTRUN -> [SKIP][38] ([i915#3539] / [i915#4852])
   [38]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@gem_exec_flush@basic-wb-ro-before-default.html

  * igt@gem_exec_params@secure-non-root:
    - shard-mtlp:         NOTRUN -> [SKIP][39] ([fdo#112283])
   [39]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@gem_exec_params@secure-non-root.html

  * igt@gem_exec_reloc@basic-gtt-read:
    - shard-dg2:          NOTRUN -> [SKIP][40] ([i915#3281]) +1 similar issue
   [40]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@gem_exec_reloc@basic-gtt-read.html

  * igt@gem_exec_reloc@basic-wc-gtt:
    - shard-dg1:          NOTRUN -> [SKIP][41] ([i915#3281]) +1 similar issue
   [41]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@gem_exec_reloc@basic-wc-gtt.html

  * igt@gem_exec_reloc@basic-wc-read-active:
    - shard-mtlp:         NOTRUN -> [SKIP][42] ([i915#3281]) +3 similar issues
   [42]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-7/igt@gem_exec_reloc@basic-wc-read-active.html

  * igt@gem_exec_schedule@noreorder-priority@vcs1:
    - shard-mtlp:         [PASS][43] -> [DMESG-WARN][44] ([i915#9121])
   [43]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-3/igt@gem_exec_schedule@noreorder-priority@vcs1.html
   [44]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-4/igt@gem_exec_schedule@noreorder-priority@vcs1.html

  * igt@gem_exec_schedule@preempt-engines@ccs0:
    - shard-mtlp:         [PASS][45] -> [FAIL][46] ([i915#9119]) +4 similar issues
   [45]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-8/igt@gem_exec_schedule@preempt-engines@ccs0.html
   [46]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-4/igt@gem_exec_schedule@preempt-engines@ccs0.html

  * igt@gem_exec_schedule@preempt-engines@rcs0:
    - shard-mtlp:         [PASS][47] -> [DMESG-FAIL][48] ([i915#8962] / [i915#9121])
   [47]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-8/igt@gem_exec_schedule@preempt-engines@rcs0.html
   [48]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-4/igt@gem_exec_schedule@preempt-engines@rcs0.html

  * igt@gem_lmem_swapping@random-engines:
    - shard-glk:          NOTRUN -> [SKIP][49] ([fdo#109271] / [i915#4613])
   [49]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk8/igt@gem_lmem_swapping@random-engines.html
    - shard-apl:          NOTRUN -> [SKIP][50] ([fdo#109271] / [i915#4613])
   [50]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl6/igt@gem_lmem_swapping@random-engines.html

  * igt@gem_mmap_gtt@bad-object:
    - shard-dg2:          NOTRUN -> [SKIP][51] ([i915#4077]) +2 similar issues
   [51]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@gem_mmap_gtt@bad-object.html

  * igt@gem_mmap_gtt@big-bo-tiledx:
    - shard-mtlp:         NOTRUN -> [SKIP][52] ([i915#4077])
   [52]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@gem_mmap_gtt@big-bo-tiledx.html

  * igt@gem_mmap_gtt@fault-concurrent:
    - shard-dg1:          NOTRUN -> [SKIP][53] ([i915#4077]) +4 similar issues
   [53]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@gem_mmap_gtt@fault-concurrent.html

  * igt@gem_mmap_wc@copy:
    - shard-dg2:          NOTRUN -> [SKIP][54] ([i915#4083]) +1 similar issue
   [54]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@gem_mmap_wc@copy.html

  * igt@gem_mmap_wc@fault-concurrent:
    - shard-dg1:          NOTRUN -> [SKIP][55] ([i915#4083])
   [55]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@gem_mmap_wc@fault-concurrent.html

  * igt@gem_mmap_wc@read:
    - shard-mtlp:         NOTRUN -> [SKIP][56] ([i915#4083])
   [56]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@gem_mmap_wc@read.html

  * igt@gem_partial_pwrite_pread@writes-after-reads-display:
    - shard-dg2:          NOTRUN -> [SKIP][57] ([i915#3282]) +2 similar issues
   [57]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@gem_partial_pwrite_pread@writes-after-reads-display.html

  * igt@gem_pxp@create-regular-buffer:
    - shard-mtlp:         NOTRUN -> [SKIP][58] ([i915#4270])
   [58]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@gem_pxp@create-regular-buffer.html

  * igt@gem_pxp@fail-invalid-protected-context:
    - shard-dg2:          NOTRUN -> [SKIP][59] ([i915#4270])
   [59]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@gem_pxp@fail-invalid-protected-context.html

  * igt@gem_pxp@reject-modify-context-protection-off-1:
    - shard-dg1:          NOTRUN -> [SKIP][60] ([i915#4270])
   [60]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@gem_pxp@reject-modify-context-protection-off-1.html

  * igt@gem_readwrite@new-obj:
    - shard-mtlp:         NOTRUN -> [SKIP][61] ([i915#3282]) +1 similar issue
   [61]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@gem_readwrite@new-obj.html

  * igt@gem_render_copy@yf-tiled-mc-ccs-to-vebox-yf-tiled:
    - shard-mtlp:         NOTRUN -> [SKIP][62] ([i915#8428]) +3 similar issues
   [62]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-7/igt@gem_render_copy@yf-tiled-mc-ccs-to-vebox-yf-tiled.html

  * igt@gem_unfence_active_buffers:
    - shard-mtlp:         NOTRUN -> [SKIP][63] ([i915#4879])
   [63]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@gem_unfence_active_buffers.html

  * igt@gem_userptr_blits@unsync-unmap-cycles:
    - shard-dg1:          NOTRUN -> [SKIP][64] ([i915#3297])
   [64]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@gem_userptr_blits@unsync-unmap-cycles.html

  * igt@gen7_exec_parse@basic-allowed:
    - shard-dg1:          NOTRUN -> [SKIP][65] ([fdo#109289])
   [65]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@gen7_exec_parse@basic-allowed.html

  * igt@gen7_exec_parse@oacontrol-tracking:
    - shard-mtlp:         NOTRUN -> [SKIP][66] ([fdo#109289])
   [66]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-7/igt@gen7_exec_parse@oacontrol-tracking.html

  * igt@i915_pm_dc@dc3co-vpb-simulation:
    - shard-apl:          NOTRUN -> [SKIP][67] ([fdo#109271] / [i915#658])
   [67]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl6/igt@i915_pm_dc@dc3co-vpb-simulation.html
    - shard-glk:          NOTRUN -> [SKIP][68] ([fdo#109271] / [i915#658])
   [68]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk8/igt@i915_pm_dc@dc3co-vpb-simulation.html

  * igt@i915_pm_dc@dc5-psr:
    - shard-dg1:          NOTRUN -> [SKIP][69] ([i915#658])
   [69]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@i915_pm_dc@dc5-psr.html

  * igt@i915_pm_dc@dc6-dpms:
    - shard-tglu:         [PASS][70] -> [FAIL][71] ([i915#3989] / [i915#454])
   [70]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-tglu-10/igt@i915_pm_dc@dc6-dpms.html
   [71]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-tglu-6/igt@i915_pm_dc@dc6-dpms.html

  * igt@i915_pm_rc6_residency@rc6-idle@vecs0:
    - shard-dg1:          [PASS][72] -> [FAIL][73] ([i915#3591])
   [72]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg1-19/igt@i915_pm_rc6_residency@rc6-idle@vecs0.html
   [73]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-19/igt@i915_pm_rc6_residency@rc6-idle@vecs0.html

  * igt@i915_pm_rpm@dpms-mode-unset-non-lpsp:
    - shard-dg2:          [PASS][74] -> [SKIP][75] ([i915#1397]) +3 similar issues
   [74]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-2/igt@i915_pm_rpm@dpms-mode-unset-non-lpsp.html
   [75]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-12/igt@i915_pm_rpm@dpms-mode-unset-non-lpsp.html
    - shard-dg1:          [PASS][76] -> [SKIP][77] ([i915#1397])
   [76]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg1-16/igt@i915_pm_rpm@dpms-mode-unset-non-lpsp.html
   [77]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-19/igt@i915_pm_rpm@dpms-mode-unset-non-lpsp.html

  * igt@i915_pm_rpm@modeset-lpsp-stress-no-wait:
    - shard-rkl:          [PASS][78] -> [SKIP][79] ([i915#1397])
   [78]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-rkl-7/igt@i915_pm_rpm@modeset-lpsp-stress-no-wait.html
   [79]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-1/igt@i915_pm_rpm@modeset-lpsp-stress-no-wait.html

  * igt@i915_pm_rps@thresholds-idle@gt1:
    - shard-mtlp:         NOTRUN -> [SKIP][80] ([i915#8925]) +1 similar issue
   [80]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@i915_pm_rps@thresholds-idle@gt1.html

  * igt@i915_pm_rps@thresholds@gt0:
    - shard-dg1:          NOTRUN -> [SKIP][81] ([i915#8925])
   [81]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@i915_pm_rps@thresholds@gt0.html

  * igt@i915_suspend@fence-restore-untiled:
    - shard-snb:          NOTRUN -> [DMESG-WARN][82] ([i915#8841])
   [82]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-snb5/igt@i915_suspend@fence-restore-untiled.html

  * igt@kms_async_flips@async-flip-with-page-flip-events@pipe-a-edp-1-4-rc_ccs:
    - shard-mtlp:         NOTRUN -> [SKIP][83] ([i915#8502]) +11 similar issues
   [83]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_async_flips@async-flip-with-page-flip-events@pipe-a-edp-1-4-rc_ccs.html

  * igt@kms_async_flips@async-flip-with-page-flip-events@pipe-d-dp-2-4-mc_ccs:
    - shard-dg2:          NOTRUN -> [SKIP][84] ([i915#8502] / [i915#8709]) +11 similar issues
   [84]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-12/igt@kms_async_flips@async-flip-with-page-flip-events@pipe-d-dp-2-4-mc_ccs.html

  * igt@kms_async_flips@crc@pipe-c-dp-1:
    - shard-apl:          NOTRUN -> [FAIL][85] ([i915#8247]) +2 similar issues
   [85]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl2/igt@kms_async_flips@crc@pipe-c-dp-1.html

  * igt@kms_atomic_transition@plane-all-modeset-transition-fencing:
    - shard-mtlp:         NOTRUN -> [SKIP][86] ([i915#1769])
   [86]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_atomic_transition@plane-all-modeset-transition-fencing.html

  * igt@kms_atomic_transition@plane-all-modeset-transition-fencing-internal-panels:
    - shard-snb:          NOTRUN -> [SKIP][87] ([fdo#109271] / [i915#1769])
   [87]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-snb4/igt@kms_atomic_transition@plane-all-modeset-transition-fencing-internal-panels.html

  * igt@kms_big_fb@4-tiled-8bpp-rotate-180:
    - shard-dg1:          NOTRUN -> [SKIP][88] ([i915#4538] / [i915#5286]) +1 similar issue
   [88]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_big_fb@4-tiled-8bpp-rotate-180.html

  * igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip:
    - shard-mtlp:         [PASS][89] -> [FAIL][90] ([i915#5138])
   [89]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-7/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip.html
   [90]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@kms_big_fb@4-tiled-max-hw-stride-64bpp-rotate-0-hflip.html

  * igt@kms_big_fb@linear-64bpp-rotate-90:
    - shard-dg2:          NOTRUN -> [SKIP][91] ([fdo#111614])
   [91]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_big_fb@linear-64bpp-rotate-90.html

  * igt@kms_big_fb@x-tiled-64bpp-rotate-270:
    - shard-mtlp:         NOTRUN -> [SKIP][92] ([fdo#111614])
   [92]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_big_fb@x-tiled-64bpp-rotate-270.html

  * igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-hflip:
    - shard-dg2:          NOTRUN -> [SKIP][93] ([i915#5190]) +1 similar issue
   [93]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_big_fb@y-tiled-max-hw-stride-64bpp-rotate-0-hflip.html

  * igt@kms_big_fb@yf-tiled-8bpp-rotate-0:
    - shard-dg2:          NOTRUN -> [SKIP][94] ([i915#4538] / [i915#5190]) +1 similar issue
   [94]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_big_fb@yf-tiled-8bpp-rotate-0.html

  * igt@kms_big_fb@yf-tiled-addfb-size-overflow:
    - shard-dg1:          NOTRUN -> [SKIP][95] ([fdo#111615])
   [95]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_big_fb@yf-tiled-addfb-size-overflow.html

  * igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip:
    - shard-mtlp:         NOTRUN -> [SKIP][96] ([fdo#111615]) +1 similar issue
   [96]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_big_fb@yf-tiled-max-hw-stride-32bpp-rotate-0-hflip.html

  * igt@kms_big_joiner@2x-modeset:
    - shard-mtlp:         NOTRUN -> [SKIP][97] ([i915#2705])
   [97]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-7/igt@kms_big_joiner@2x-modeset.html

  * igt@kms_ccs@pipe-a-bad-aux-stride-y_tiled_ccs:
    - shard-dg1:          NOTRUN -> [SKIP][98] ([i915#3689] / [i915#5354] / [i915#6095]) +4 similar issues
   [98]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_ccs@pipe-a-bad-aux-stride-y_tiled_ccs.html

  * igt@kms_ccs@pipe-a-ccs-on-another-bo-y_tiled_gen12_mc_ccs:
    - shard-apl:          NOTRUN -> [SKIP][99] ([fdo#109271] / [i915#3886]) +1 similar issue
   [99]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl6/igt@kms_ccs@pipe-a-ccs-on-another-bo-y_tiled_gen12_mc_ccs.html
    - shard-glk:          NOTRUN -> [SKIP][100] ([fdo#109271] / [i915#3886]) +1 similar issue
   [100]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk8/igt@kms_ccs@pipe-a-ccs-on-another-bo-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-a-ccs-on-another-bo-y_tiled_gen12_rc_ccs:
    - shard-dg2:          NOTRUN -> [SKIP][101] ([i915#3689] / [i915#5354]) +3 similar issues
   [101]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_ccs@pipe-a-ccs-on-another-bo-y_tiled_gen12_rc_ccs.html

  * igt@kms_ccs@pipe-a-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs:
    - shard-mtlp:         NOTRUN -> [SKIP][102] ([i915#3886] / [i915#6095]) +2 similar issues
   [102]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_ccs@pipe-a-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-a-missing-ccs-buffer-y_tiled_gen12_mc_ccs:
    - shard-dg2:          NOTRUN -> [SKIP][103] ([i915#3689] / [i915#3886] / [i915#5354]) +4 similar issues
   [103]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_ccs@pipe-a-missing-ccs-buffer-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-b-ccs-on-another-bo-4_tiled_mtl_rc_ccs:
    - shard-dg1:          NOTRUN -> [SKIP][104] ([i915#5354] / [i915#6095]) +4 similar issues
   [104]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_ccs@pipe-b-ccs-on-another-bo-4_tiled_mtl_rc_ccs.html

  * igt@kms_ccs@pipe-c-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs:
    - shard-dg1:          NOTRUN -> [SKIP][105] ([i915#3689] / [i915#3886] / [i915#5354] / [i915#6095])
   [105]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_ccs@pipe-c-crc-sprite-planes-basic-y_tiled_gen12_mc_ccs.html

  * igt@kms_ccs@pipe-d-crc-primary-basic-yf_tiled_ccs:
    - shard-mtlp:         NOTRUN -> [SKIP][106] ([i915#6095]) +6 similar issues
   [106]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_ccs@pipe-d-crc-primary-basic-yf_tiled_ccs.html

  * igt@kms_cdclk@mode-transition@pipe-d-hdmi-a-3:
    - shard-dg2:          NOTRUN -> [SKIP][107] ([i915#4087] / [i915#7213]) +3 similar issues
   [107]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-1/igt@kms_cdclk@mode-transition@pipe-d-hdmi-a-3.html

  * igt@kms_cdclk@plane-scaling@pipe-c-dp-2:
    - shard-dg2:          NOTRUN -> [SKIP][108] ([i915#4087]) +3 similar issues
   [108]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-12/igt@kms_cdclk@plane-scaling@pipe-c-dp-2.html

  * igt@kms_chamelium_color@ctm-0-50:
    - shard-dg1:          NOTRUN -> [SKIP][109] ([fdo#111827])
   [109]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_chamelium_color@ctm-0-50.html

  * igt@kms_chamelium_color@ctm-green-to-red:
    - shard-dg2:          NOTRUN -> [SKIP][110] ([fdo#111827])
   [110]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_chamelium_color@ctm-green-to-red.html

  * igt@kms_chamelium_frames@hdmi-aspect-ratio:
    - shard-mtlp:         NOTRUN -> [SKIP][111] ([i915#7828]) +2 similar issues
   [111]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_chamelium_frames@hdmi-aspect-ratio.html

  * igt@kms_chamelium_hpd@common-hpd-after-suspend:
    - shard-apl:          NOTRUN -> [SKIP][112] ([fdo#109271]) +79 similar issues
   [112]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl6/igt@kms_chamelium_hpd@common-hpd-after-suspend.html

  * igt@kms_chamelium_hpd@hdmi-hpd-fast:
    - shard-dg2:          NOTRUN -> [SKIP][113] ([i915#7828]) +1 similar issue
   [113]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_chamelium_hpd@hdmi-hpd-fast.html

  * igt@kms_chamelium_hpd@hdmi-hpd-for-each-pipe:
    - shard-dg1:          NOTRUN -> [SKIP][114] ([i915#7828]) +1 similar issue
   [114]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_chamelium_hpd@hdmi-hpd-for-each-pipe.html

  * igt@kms_color@deep-color:
    - shard-rkl:          NOTRUN -> [SKIP][115] ([i915#3555])
   [115]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-7/igt@kms_color@deep-color.html

  * igt@kms_content_protection@atomic-dpms:
    - shard-mtlp:         NOTRUN -> [SKIP][116] ([i915#6944])
   [116]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_content_protection@atomic-dpms.html

  * igt@kms_content_protection@atomic-dpms@pipe-a-dp-2:
    - shard-dg2:          NOTRUN -> [TIMEOUT][117] ([i915#8628])
   [117]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-12/igt@kms_content_protection@atomic-dpms@pipe-a-dp-2.html

  * igt@kms_content_protection@content_type_change:
    - shard-dg1:          NOTRUN -> [SKIP][118] ([i915#7116])
   [118]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_content_protection@content_type_change.html

  * igt@kms_content_protection@dp-mst-type-1:
    - shard-dg2:          NOTRUN -> [SKIP][119] ([i915#3299])
   [119]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_content_protection@dp-mst-type-1.html

  * igt@kms_content_protection@srm@pipe-a-dp-2:
    - shard-dg2:          NOTRUN -> [TIMEOUT][120] ([i915#7173]) +1 similar issue
   [120]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-12/igt@kms_content_protection@srm@pipe-a-dp-2.html

  * igt@kms_content_protection@uevent:
    - shard-dg2:          NOTRUN -> [SKIP][121] ([i915#7118])
   [121]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-1/igt@kms_content_protection@uevent.html

  * igt@kms_cursor_crc@cursor-offscreen-512x170:
    - shard-dg1:          NOTRUN -> [SKIP][122] ([i915#3359])
   [122]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_cursor_crc@cursor-offscreen-512x170.html

  * igt@kms_cursor_crc@cursor-rapid-movement-512x512:
    - shard-mtlp:         NOTRUN -> [SKIP][123] ([i915#3359])
   [123]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@kms_cursor_crc@cursor-rapid-movement-512x512.html

  * igt@kms_cursor_crc@cursor-sliding-512x512:
    - shard-dg2:          NOTRUN -> [SKIP][124] ([i915#3359]) +1 similar issue
   [124]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_cursor_crc@cursor-sliding-512x512.html

  * igt@kms_cursor_legacy@cursorb-vs-flipb-toggle:
    - shard-mtlp:         NOTRUN -> [SKIP][125] ([fdo#111767] / [i915#3546])
   [125]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-7/igt@kms_cursor_legacy@cursorb-vs-flipb-toggle.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size:
    - shard-apl:          [PASS][126] -> [FAIL][127] ([i915#2346])
   [126]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-apl4/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html
   [127]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl1/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html

  * igt@kms_cursor_legacy@short-busy-flip-before-cursor-toggle:
    - shard-dg1:          NOTRUN -> [SKIP][128] ([i915#4103] / [i915#4213])
   [128]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_cursor_legacy@short-busy-flip-before-cursor-toggle.html

  * igt@kms_display_modes@mst-extended-mode-negative:
    - shard-mtlp:         NOTRUN -> [SKIP][129] ([i915#8588])
   [129]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_display_modes@mst-extended-mode-negative.html

  * igt@kms_flip@2x-flip-vs-blocking-wf-vblank:
    - shard-dg1:          NOTRUN -> [SKIP][130] ([fdo#111767] / [fdo#111825])
   [130]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_flip@2x-flip-vs-blocking-wf-vblank.html

  * igt@kms_flip@2x-flip-vs-dpms:
    - shard-mtlp:         NOTRUN -> [SKIP][131] ([i915#3637]) +2 similar issues
   [131]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_flip@2x-flip-vs-dpms.html

  * igt@kms_flip@2x-flip-vs-rmfb-interruptible:
    - shard-dg2:          NOTRUN -> [SKIP][132] ([fdo#109274] / [fdo#111767])
   [132]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_flip@2x-flip-vs-rmfb-interruptible.html

  * igt@kms_flip@2x-modeset-vs-vblank-race-interruptible:
    - shard-dg2:          NOTRUN -> [SKIP][133] ([fdo#109274])
   [133]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_flip@2x-modeset-vs-vblank-race-interruptible.html

  * igt@kms_flip@flip-vs-expired-vblank-interruptible@d-dp2:
    - shard-dg2:          NOTRUN -> [FAIL][134] ([i915#79])
   [134]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-12/igt@kms_flip@flip-vs-expired-vblank-interruptible@d-dp2.html

  * igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-32bpp-yftileccs-upscaling@pipe-a-valid-mode:
    - shard-dg1:          NOTRUN -> [SKIP][135] ([i915#2587] / [i915#2672]) +1 similar issue
   [135]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_flip_scaled_crc@flip-32bpp-yftile-to-32bpp-yftileccs-upscaling@pipe-a-valid-mode.html

  * igt@kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling@pipe-a-default-mode:
    - shard-mtlp:         NOTRUN -> [SKIP][136] ([i915#8810])
   [136]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_flip_scaled_crc@flip-64bpp-linear-to-16bpp-linear-downscaling@pipe-a-default-mode.html

  * igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling@pipe-a-default-mode:
    - shard-mtlp:         NOTRUN -> [SKIP][137] ([i915#2672]) +1 similar issue
   [137]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling@pipe-a-default-mode.html

  * igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling@pipe-a-valid-mode:
    - shard-dg2:          NOTRUN -> [SKIP][138] ([i915#2672])
   [138]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_flip_scaled_crc@flip-64bpp-ytile-to-32bpp-ytile-upscaling@pipe-a-valid-mode.html

  * igt@kms_force_connector_basic@prune-stale-modes:
    - shard-mtlp:         NOTRUN -> [SKIP][139] ([i915#5274])
   [139]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_force_connector_basic@prune-stale-modes.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt:
    - shard-dg2:          [PASS][140] -> [FAIL][141] ([i915#6880])
   [140]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-6/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt.html
   [141]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-11/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-indfb-plflip-blt.html

  * igt@kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-gtt:
    - shard-dg1:          NOTRUN -> [SKIP][142] ([i915#8708]) +3 similar issues
   [142]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_frontbuffer_tracking@fbc-rgb565-draw-mmap-gtt.html

  * igt@kms_frontbuffer_tracking@fbcpsr-1p-indfb-fliptrack-mmap-gtt:
    - shard-dg2:          NOTRUN -> [SKIP][143] ([i915#8708])
   [143]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_frontbuffer_tracking@fbcpsr-1p-indfb-fliptrack-mmap-gtt.html

  * igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-blt:
    - shard-dg1:          NOTRUN -> [SKIP][144] ([fdo#111825]) +8 similar issues
   [144]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_frontbuffer_tracking@fbcpsr-2p-primscrn-cur-indfb-draw-blt.html

  * igt@kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-move:
    - shard-dg2:          NOTRUN -> [SKIP][145] ([i915#5354]) +8 similar issues
   [145]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_frontbuffer_tracking@fbcpsr-2p-scndscrn-cur-indfb-move.html

  * igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-cpu:
    - shard-dg2:          NOTRUN -> [SKIP][146] ([i915#3458]) +5 similar issues
   [146]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_frontbuffer_tracking@psr-1p-offscren-pri-shrfb-draw-mmap-cpu.html

  * igt@kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-mmap-gtt:
    - shard-mtlp:         NOTRUN -> [SKIP][147] ([i915#8708]) +1 similar issue
   [147]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-7/igt@kms_frontbuffer_tracking@psr-1p-primscrn-pri-indfb-draw-mmap-gtt.html

  * igt@kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-blt:
    - shard-dg1:          NOTRUN -> [SKIP][148] ([i915#3458]) +3 similar issues
   [148]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_frontbuffer_tracking@psr-1p-primscrn-pri-shrfb-draw-blt.html

  * igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-wc:
    - shard-mtlp:         NOTRUN -> [SKIP][149] ([i915#1825]) +5 similar issues
   [149]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_frontbuffer_tracking@psr-2p-scndscrn-pri-shrfb-draw-mmap-wc.html

  * igt@kms_hdr@bpc-switch-suspend:
    - shard-rkl:          NOTRUN -> [SKIP][150] ([i915#3555] / [i915#8228])
   [150]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-2/igt@kms_hdr@bpc-switch-suspend.html

  * igt@kms_hdr@static-swap:
    - shard-mtlp:         NOTRUN -> [SKIP][151] ([i915#8228])
   [151]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_hdr@static-swap.html

  * igt@kms_panel_fitting@legacy:
    - shard-dg2:          NOTRUN -> [SKIP][152] ([i915#6301])
   [152]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_panel_fitting@legacy.html

  * igt@kms_plane_scaling@intel-max-src-size@pipe-a-hdmi-a-1:
    - shard-dg1:          NOTRUN -> [FAIL][153] ([i915#8292])
   [153]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-19/igt@kms_plane_scaling@intel-max-src-size@pipe-a-hdmi-a-1.html

  * igt@kms_plane_scaling@intel-max-src-size@pipe-a-hdmi-a-2:
    - shard-rkl:          NOTRUN -> [FAIL][154] ([i915#8292])
   [154]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-1/igt@kms_plane_scaling@intel-max-src-size@pipe-a-hdmi-a-2.html

  * igt@kms_plane_scaling@plane-downscale-with-modifiers-factor-0-25@pipe-b-hdmi-a-1:
    - shard-rkl:          NOTRUN -> [SKIP][155] ([i915#5176]) +7 similar issues
   [155]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-7/igt@kms_plane_scaling@plane-downscale-with-modifiers-factor-0-25@pipe-b-hdmi-a-1.html

  * igt@kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-25@pipe-a-dp-4:
    - shard-dg2:          NOTRUN -> [SKIP][156] ([i915#5176]) +7 similar issues
   [156]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-11/igt@kms_plane_scaling@plane-downscale-with-pixel-format-factor-0-25@pipe-a-dp-4.html

  * igt@kms_plane_scaling@plane-upscale-with-modifiers-factor-0-25@pipe-a-vga-1:
    - shard-snb:          NOTRUN -> [SKIP][157] ([fdo#109271]) +164 similar issues
   [157]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-snb5/igt@kms_plane_scaling@plane-upscale-with-modifiers-factor-0-25@pipe-a-vga-1.html

  * igt@kms_plane_scaling@plane-upscale-with-rotation-20x20@pipe-a-hdmi-a-1:
    - shard-dg1:          NOTRUN -> [SKIP][158] ([i915#5176]) +3 similar issues
   [158]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-19/igt@kms_plane_scaling@plane-upscale-with-rotation-20x20@pipe-a-hdmi-a-1.html

  * igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25@pipe-b-dp-4:
    - shard-dg2:          NOTRUN -> [SKIP][159] ([i915#5235]) +23 similar issues
   [159]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-11/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25@pipe-b-dp-4.html

  * igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25@pipe-b-hdmi-a-2:
    - shard-rkl:          NOTRUN -> [SKIP][160] ([i915#5235]) +5 similar issues
   [160]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-1/igt@kms_plane_scaling@planes-unity-scaling-downscale-factor-0-25@pipe-b-hdmi-a-2.html

  * igt@kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25@pipe-b-edp-1:
    - shard-mtlp:         NOTRUN -> [SKIP][161] ([i915#5235]) +3 similar issues
   [161]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_plane_scaling@planes-upscale-20x20-downscale-factor-0-25@pipe-b-edp-1.html

  * igt@kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-25@pipe-d-hdmi-a-1:
    - shard-dg1:          NOTRUN -> [SKIP][162] ([i915#5235]) +7 similar issues
   [162]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-19/igt@kms_plane_scaling@planes-upscale-factor-0-25-downscale-factor-0-25@pipe-d-hdmi-a-1.html

  * igt@kms_prime@basic-crc-vgem:
    - shard-dg1:          NOTRUN -> [SKIP][163] ([i915#6524])
   [163]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_prime@basic-crc-vgem.html

  * igt@kms_psr@dpms:
    - shard-dg1:          NOTRUN -> [SKIP][164] ([i915#1072]) +2 similar issues
   [164]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_psr@dpms.html

  * igt@kms_psr@psr2_sprite_blt:
    - shard-dg2:          NOTRUN -> [SKIP][165] ([i915#1072]) +2 similar issues
   [165]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@kms_psr@psr2_sprite_blt.html

  * igt@kms_rotation_crc@primary-y-tiled-reflect-x-0:
    - shard-mtlp:         NOTRUN -> [SKIP][166] ([i915#5289])
   [166]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_rotation_crc@primary-y-tiled-reflect-x-0.html

  * igt@kms_selftest@drm_cmdline:
    - shard-snb:          NOTRUN -> [SKIP][167] ([fdo#109271] / [i915#8661])
   [167]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-snb5/igt@kms_selftest@drm_cmdline.html

  * igt@kms_sysfs_edid_timing:
    - shard-dg1:          NOTRUN -> [FAIL][168] ([IGT#2] / [i915#6493])
   [168]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_sysfs_edid_timing.html

  * igt@kms_vblank@pipe-c-ts-continuation-dpms-suspend:
    - shard-apl:          [PASS][169] -> [ABORT][170] ([i915#180])
   [169]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-apl1/igt@kms_vblank@pipe-c-ts-continuation-dpms-suspend.html
   [170]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl3/igt@kms_vblank@pipe-c-ts-continuation-dpms-suspend.html

  * igt@kms_vrr@flip-basic:
    - shard-dg1:          NOTRUN -> [SKIP][171] ([i915#3555])
   [171]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_vrr@flip-basic.html

  * igt@kms_vrr@negative-basic:
    - shard-dg2:          NOTRUN -> [SKIP][172] ([i915#3555]) +1 similar issue
   [172]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-1/igt@kms_vrr@negative-basic.html

  * igt@kms_writeback@writeback-check-output:
    - shard-apl:          NOTRUN -> [SKIP][173] ([fdo#109271] / [i915#2437])
   [173]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl6/igt@kms_writeback@writeback-check-output.html
    - shard-glk:          NOTRUN -> [SKIP][174] ([fdo#109271] / [i915#2437])
   [174]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk8/igt@kms_writeback@writeback-check-output.html

  * igt@perf@non-zero-reason@0-rcs0:
    - shard-dg2:          [PASS][175] -> [FAIL][176] ([i915#7484])
   [175]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-12/igt@perf@non-zero-reason@0-rcs0.html
   [176]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-11/igt@perf@non-zero-reason@0-rcs0.html

  * igt@perf_pmu@busy-double-start@vecs1:
    - shard-dg2:          [PASS][177] -> [FAIL][178] ([i915#4349]) +3 similar issues
   [177]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-6/igt@perf_pmu@busy-double-start@vecs1.html
   [178]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-1/igt@perf_pmu@busy-double-start@vecs1.html

  * igt@perf_pmu@frequency@gt0:
    - shard-dg2:          NOTRUN -> [FAIL][179] ([i915#6806])
   [179]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@perf_pmu@frequency@gt0.html

  * igt@prime_vgem@basic-fence-mmap:
    - shard-dg1:          NOTRUN -> [SKIP][180] ([i915#3708] / [i915#4077])
   [180]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@prime_vgem@basic-fence-mmap.html

  * igt@prime_vgem@basic-gtt:
    - shard-mtlp:         NOTRUN -> [SKIP][181] ([i915#3708] / [i915#4077])
   [181]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@prime_vgem@basic-gtt.html
    - shard-dg2:          NOTRUN -> [SKIP][182] ([i915#3708] / [i915#4077])
   [182]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@prime_vgem@basic-gtt.html

  * igt@v3d/v3d_perfmon@destroy-invalid-perfmon:
    - shard-glk:          NOTRUN -> [SKIP][183] ([fdo#109271]) +48 similar issues
   [183]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk8/igt@v3d/v3d_perfmon@destroy-invalid-perfmon.html

  * igt@v3d/v3d_perfmon@destroy-valid-perfmon:
    - shard-mtlp:         NOTRUN -> [SKIP][184] ([i915#2575]) +2 similar issues
   [184]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@v3d/v3d_perfmon@destroy-valid-perfmon.html

  * igt@v3d/v3d_perfmon@get-values-invalid-pad:
    - shard-dg1:          NOTRUN -> [SKIP][185] ([i915#2575]) +2 similar issues
   [185]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@v3d/v3d_perfmon@get-values-invalid-pad.html

  * igt@v3d/v3d_submit_cl@bad-multisync-extension:
    - shard-dg2:          NOTRUN -> [SKIP][186] ([i915#2575]) +1 similar issue
   [186]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@v3d/v3d_submit_cl@bad-multisync-extension.html

  * igt@vc4/vc4_create_bo@create-bo-4096:
    - shard-dg1:          NOTRUN -> [SKIP][187] ([i915#7711]) +2 similar issues
   [187]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@vc4/vc4_create_bo@create-bo-4096.html

  * igt@vc4/vc4_dmabuf_poll@poll-write-waits-until-write-done:
    - shard-dg2:          NOTRUN -> [SKIP][188] ([i915#7711]) +1 similar issue
   [188]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@vc4/vc4_dmabuf_poll@poll-write-waits-until-write-done.html

  * igt@vc4/vc4_perfmon@create-single-perfmon:
    - shard-mtlp:         NOTRUN -> [SKIP][189] ([i915#7711])
   [189]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@vc4/vc4_perfmon@create-single-perfmon.html

  
#### Possible fixes ####

  * igt@drm_fdinfo@most-busy-check-all@rcs0:
    - shard-rkl:          [FAIL][190] ([i915#7742]) -> [PASS][191]
   [190]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-rkl-2/igt@drm_fdinfo@most-busy-check-all@rcs0.html
   [191]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-1/igt@drm_fdinfo@most-busy-check-all@rcs0.html

  * igt@gem_ctx_exec@basic-nohangcheck:
    - shard-dg2:          [TIMEOUT][192] -> [PASS][193]
   [192]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-11/igt@gem_ctx_exec@basic-nohangcheck.html
   [193]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-6/igt@gem_ctx_exec@basic-nohangcheck.html

  * igt@gem_exec_fence@parallel@bcs0:
    - shard-mtlp:         [DMESG-FAIL][194] ([i915#8962] / [i915#9121]) -> [PASS][195] +1 similar issue
   [194]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-4/igt@gem_exec_fence@parallel@bcs0.html
   [195]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@gem_exec_fence@parallel@bcs0.html

  * igt@gem_exec_fence@parallel@vcs0:
    - shard-mtlp:         [DMESG-FAIL][196] ([i915#9121]) -> [PASS][197]
   [196]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-4/igt@gem_exec_fence@parallel@vcs0.html
   [197]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@gem_exec_fence@parallel@vcs0.html

  * igt@gem_exec_fence@parallel@vecs0:
    - shard-mtlp:         [FAIL][198] ([i915#8957]) -> [PASS][199] +2 similar issues
   [198]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-4/igt@gem_exec_fence@parallel@vecs0.html
   [199]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@gem_exec_fence@parallel@vecs0.html

  * igt@gen9_exec_parse@allowed-single:
    - shard-apl:          [ABORT][200] ([i915#5566]) -> [PASS][201]
   [200]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-apl6/igt@gen9_exec_parse@allowed-single.html
   [201]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl6/igt@gen9_exec_parse@allowed-single.html
    - shard-glk:          [ABORT][202] ([i915#5566]) -> [PASS][203]
   [202]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-glk8/igt@gen9_exec_parse@allowed-single.html
   [203]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk8/igt@gen9_exec_parse@allowed-single.html

  * igt@i915_module_load@reload-with-fault-injection:
    - shard-mtlp:         [ABORT][204] ([i915#8489] / [i915#8668]) -> [PASS][205]
   [204]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-6/igt@i915_module_load@reload-with-fault-injection.html
   [205]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@i915_module_load@reload-with-fault-injection.html

  * igt@i915_pipe_stress@stress-xrgb8888-untiled:
    - shard-mtlp:         [FAIL][206] ([i915#8691]) -> [PASS][207]
   [206]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-5/igt@i915_pipe_stress@stress-xrgb8888-untiled.html
   [207]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-4/igt@i915_pipe_stress@stress-xrgb8888-untiled.html

  * igt@i915_pm_rpm@i2c:
    - shard-dg2:          [FAIL][208] ([i915#8717]) -> [PASS][209]
   [208]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-2/igt@i915_pm_rpm@i2c.html
   [209]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-12/igt@i915_pm_rpm@i2c.html

  * igt@i915_pm_rpm@modeset-lpsp:
    - shard-dg2:          [SKIP][210] ([i915#1397]) -> [PASS][211] +1 similar issue
   [210]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-6/igt@i915_pm_rpm@modeset-lpsp.html
   [211]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-12/igt@i915_pm_rpm@modeset-lpsp.html

  * igt@i915_pm_rpm@modeset-non-lpsp-stress:
    - shard-dg1:          [SKIP][212] ([i915#1397]) -> [PASS][213]
   [212]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg1-19/igt@i915_pm_rpm@modeset-non-lpsp-stress.html
   [213]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-17/igt@i915_pm_rpm@modeset-non-lpsp-stress.html

  * igt@i915_pm_rps@engine-order:
    - shard-apl:          [FAIL][214] ([i915#6537]) -> [PASS][215]
   [214]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-apl1/igt@i915_pm_rps@engine-order.html
   [215]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl2/igt@i915_pm_rps@engine-order.html

  * igt@i915_selftest@live@gt_heartbeat:
    - shard-apl:          [DMESG-FAIL][216] ([i915#5334]) -> [PASS][217]
   [216]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-apl6/igt@i915_selftest@live@gt_heartbeat.html
   [217]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl6/igt@i915_selftest@live@gt_heartbeat.html

  * igt@kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180-async-flip:
    - shard-mtlp:         [FAIL][218] ([i915#3743]) -> [PASS][219]
   [218]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-5/igt@kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180-async-flip.html
   [219]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-4/igt@kms_big_fb@x-tiled-max-hw-stride-64bpp-rotate-180-async-flip.html

  * igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-async-flip:
    - shard-rkl:          [FAIL][220] ([i915#3743]) -> [PASS][221]
   [220]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-rkl-7/igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-async-flip.html
   [221]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-7/igt@kms_big_fb@y-tiled-max-hw-stride-32bpp-rotate-180-async-flip.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions:
    - shard-apl:          [FAIL][222] ([i915#2346]) -> [PASS][223]
   [222]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-apl3/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions.html
   [223]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl1/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size:
    - shard-glk:          [FAIL][224] ([i915#2346]) -> [PASS][225]
   [224]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-glk4/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html
   [225]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-glk2/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html

  * igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-fullscreen:
    - shard-dg2:          [FAIL][226] ([i915#6880]) -> [PASS][227] +1 similar issue
   [226]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-11/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-fullscreen.html
   [227]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-1/igt@kms_frontbuffer_tracking@fbc-1p-primscrn-spr-indfb-fullscreen.html

  * igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend:
    - shard-apl:          [ABORT][228] ([i915#180]) -> [PASS][229]
   [228]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-apl1/igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend.html
   [229]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-apl2/igt@kms_vblank@pipe-a-ts-continuation-dpms-suspend.html

  
#### Warnings ####

  * igt@i915_pm_rc6_residency@rc6-idle@vcs0:
    - shard-tglu:         [WARN][230] ([i915#2681]) -> [FAIL][231] ([i915#2681] / [i915#3591])
   [230]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-tglu-3/igt@i915_pm_rc6_residency@rc6-idle@vcs0.html
   [231]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-tglu-4/igt@i915_pm_rc6_residency@rc6-idle@vcs0.html

  * igt@kms_content_protection@type1:
    - shard-dg2:          [SKIP][232] ([i915#7118] / [i915#7162]) -> [SKIP][233] ([i915#7118])
   [232]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-11/igt@kms_content_protection@type1.html
   [233]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-1/igt@kms_content_protection@type1.html

  * igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size:
    - shard-mtlp:         [DMESG-FAIL][234] ([i915#1982] / [i915#2017] / [i915#5954]) -> [FAIL][235] ([i915#2346])
   [234]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-4/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html
   [235]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-8/igt@kms_cursor_legacy@flip-vs-cursor-atomic-transitions-varying-size.html

  * igt@kms_dsc@dsc-basic:
    - shard-mtlp:         [SKIP][236] ([i915#3840]) -> [SKIP][237] ([i915#3840] / [i915#9159])
   [236]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-mtlp-3/igt@kms_dsc@dsc-basic.html
   [237]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-mtlp-5/igt@kms_dsc@dsc-basic.html

  * igt@kms_fbcon_fbt@psr:
    - shard-rkl:          [SKIP][238] ([i915#3955]) -> [SKIP][239] ([fdo#110189] / [i915#3955])
   [238]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-rkl-4/igt@kms_fbcon_fbt@psr.html
   [239]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-2/igt@kms_fbcon_fbt@psr.html

  * igt@kms_fbcon_fbt@psr-suspend:
    - shard-rkl:          [SKIP][240] ([fdo#110189] / [i915#3955]) -> [SKIP][241] ([i915#3955])
   [240]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-rkl-2/igt@kms_fbcon_fbt@psr-suspend.html
   [241]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-rkl-4/igt@kms_fbcon_fbt@psr-suspend.html

  * igt@kms_psr@cursor_plane_move:
    - shard-dg1:          [SKIP][242] ([i915#1072] / [i915#4078]) -> [SKIP][243] ([i915#1072])
   [242]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg1-16/igt@kms_psr@cursor_plane_move.html
   [243]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-19/igt@kms_psr@cursor_plane_move.html

  * igt@kms_psr@primary_page_flip:
    - shard-dg1:          [SKIP][244] ([i915#1072]) -> [SKIP][245] ([i915#1072] / [i915#4078])
   [244]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg1-14/igt@kms_psr@primary_page_flip.html
   [245]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg1-18/igt@kms_psr@primary_page_flip.html

  * igt@prime_mmap@test_aperture_limit@test_aperture_limit-smem:
    - shard-dg2:          [INCOMPLETE][246] ([i915#5493]) -> [CRASH][247] ([i915#7331])
   [246]: https://intel-gfx-ci.01.org/tree/drm-tip/CI_DRM_13538/shard-dg2-10/igt@prime_mmap@test_aperture_limit@test_aperture_limit-smem.html
   [247]: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/shard-dg2-2/igt@prime_mmap@test_aperture_limit@test_aperture_limit-smem.html

  
  {name}: This element is suppressed. This means it is ignored when computing
          the status of the difference (SUCCESS, WARNING, or FAILURE).

  [IGT#2]: https://gitlab.freedesktop.org/drm/igt-gpu-tools/issues/2
  [fdo#109271]: https://bugs.freedesktop.org/show_bug.cgi?id=109271
  [fdo#109274]: https://bugs.freedesktop.org/show_bug.cgi?id=109274
  [fdo#109289]: https://bugs.freedesktop.org/show_bug.cgi?id=109289
  [fdo#110189]: https://bugs.freedesktop.org/show_bug.cgi?id=110189
  [fdo#111614]: https://bugs.freedesktop.org/show_bug.cgi?id=111614
  [fdo#111615]: https://bugs.freedesktop.org/show_bug.cgi?id=111615
  [fdo#111767]: https://bugs.freedesktop.org/show_bug.cgi?id=111767
  [fdo#111825]: https://bugs.freedesktop.org/show_bug.cgi?id=111825
  [fdo#111827]: https://bugs.freedesktop.org/show_bug.cgi?id=111827
  [fdo#112283]: https://bugs.freedesktop.org/show_bug.cgi?id=112283
  [i915#1072]: https://gitlab.freedesktop.org/drm/intel/issues/1072
  [i915#1099]: https://gitlab.freedesktop.org/drm/intel/issues/1099
  [i915#1397]: https://gitlab.freedesktop.org/drm/intel/issues/1397
  [i915#1769]: https://gitlab.freedesktop.org/drm/intel/issues/1769
  [i915#180]: https://gitlab.freedesktop.org/drm/intel/issues/180
  [i915#1825]: https://gitlab.freedesktop.org/drm/intel/issues/1825
  [i915#1839]: https://gitlab.freedesktop.org/drm/intel/issues/1839
  [i915#1982]: https://gitlab.freedesktop.org/drm/intel/issues/1982
  [i915#2017]: https://gitlab.freedesktop.org/drm/intel/issues/2017
  [i915#2346]: https://gitlab.freedesktop.org/drm/intel/issues/2346
  [i915#2437]: https://gitlab.freedesktop.org/drm/intel/issues/2437
  [i915#2575]: https://gitlab.freedesktop.org/drm/intel/issues/2575
  [i915#2587]: https://gitlab.freedesktop.org/drm/intel/issues/2587
  [i915#2672]: https://gitlab.freedesktop.org/drm/intel/issues/2672
  [i915#2681]: https://gitlab.freedesktop.org/drm/intel/issues/2681
  [i915#2705]: https://gitlab.freedesktop.org/drm/intel/issues/2705
  [i915#280]: https://gitlab.freedesktop.org/drm/intel/issues/280
  [i915#2842]: https://gitlab.freedesktop.org/drm/intel/issues/2842
  [i915#3281]: https://gitlab.freedesktop.org/drm/intel/issues/3281
  [i915#3282]: https://gitlab.freedesktop.org/drm/intel/issues/3282
  [i915#3297]: https://gitlab.freedesktop.org/drm/intel/issues/3297
  [i915#3299]: https://gitlab.freedesktop.org/drm/intel/issues/3299
  [i915#3359]: https://gitlab.freedesktop.org/drm/intel/issues/3359
  [i915#3458]: https://gitlab.freedesktop.org/drm/intel/issues/3458
  [i915#3539]: https://gitlab.freedesktop.org/drm/intel/issues/3539
  [i915#3546]: https://gitlab.freedesktop.org/drm/intel/issues/3546
  [i915#3555]: https://gitlab.freedesktop.org/drm/intel/issues/3555
  [i915#3591]: https://gitlab.freedesktop.org/drm/intel/issues/3591
  [i915#3637]: https://gitlab.freedesktop.org/drm/intel/issues/3637
  [i915#3689]: https://gitlab.freedesktop.org/drm/intel/issues/3689
  [i915#3708]: https://gitlab.freedesktop.org/drm/intel/issues/3708
  [i915#3743]: https://gitlab.freedesktop.org/drm/intel/issues/3743
  [i915#3840]: https://gitlab.freedesktop.org/drm/intel/issues/3840
  [i915#3886]: https://gitlab.freedesktop.org/drm/intel/issues/3886
  [i915#3955]: https://gitlab.freedesktop.org/drm/intel/issues/3955
  [i915#3989]: https://gitlab.freedesktop.org/drm/intel/issues/3989
  [i915#4077]: https://gitlab.freedesktop.org/drm/intel/issues/4077
  [i915#4078]: https://gitlab.freedesktop.org/drm/intel/issues/4078
  [i915#4083]: https://gitlab.freedesktop.org/drm/intel/issues/4083
  [i915#4087]: https://gitlab.freedesktop.org/drm/intel/issues/4087
  [i915#4103]: https://gitlab.freedesktop.org/drm/intel/issues/4103
  [i915#4213]: https://gitlab.freedesktop.org/drm/intel/issues/4213
  [i915#4270]: https://gitlab.freedesktop.org/drm/intel/issues/4270
  [i915#4349]: https://gitlab.freedesktop.org/drm/intel/issues/4349
  [i915#4473]: https://gitlab.freedesktop.org/drm/intel/issues/4473
  [i915#4475]: https://gitlab.freedesktop.org/drm/intel/issues/4475
  [i915#4538]: https://gitlab.freedesktop.org/drm/intel/issues/4538
  [i915#454]: https://gitlab.freedesktop.org/drm/intel/issues/454
  [i915#4613]: https://gitlab.freedesktop.org/drm/intel/issues/4613
  [i915#4771]: https://gitlab.freedesktop.org/drm/intel/issues/4771
  [i915#4852]: https://gitlab.freedesktop.org/drm/intel/issues/4852
  [i915#4879]: https://gitlab.freedesktop.org/drm/intel/issues/4879
  [i915#5138]: https://gitlab.freedesktop.org/drm/intel/issues/5138
  [i915#5176]: https://gitlab.freedesktop.org/drm/intel/issues/5176
  [i915#5190]: https://gitlab.freedesktop.org/drm/intel/issues/5190
  [i915#5235]: https://gitlab.freedesktop.org/drm/intel/issues/5235
  [i915#5274]: https://gitlab.freedesktop.org/drm/intel/issues/5274
  [i915#5286]: https://gitlab.freedesktop.org/drm/intel/issues/5286
  [i915#5289]: https://gitlab.freedesktop.org/drm/intel/issues/5289
  [i915#5334]: https://gitlab.freedesktop.org/drm/intel/issues/5334
  [i915#5354]: https://gitlab.freedesktop.org/drm/intel/issues/5354
  [i915#5493]: https://gitlab.freedesktop.org/drm/intel/issues/5493
  [i915#5566]: https://gitlab.freedesktop.org/drm/intel/issues/5566
  [i915#5954]: https://gitlab.freedesktop.org/drm/intel/issues/5954
  [i915#6095]: https://gitlab.freedesktop.org/drm/intel/issues/6095
  [i915#6301]: https://gitlab.freedesktop.org/drm/intel/issues/6301
  [i915#6493]: https://gitlab.freedesktop.org/drm/intel/issues/6493
  [i915#6524]: https://gitlab.freedesktop.org/drm/intel/issues/6524
  [i915#6537]: https://gitlab.freedesktop.org/drm/intel/issues/6537
  [i915#658]: https://gitlab.freedesktop.org/drm/intel/issues/658
  [i915#6806]: https://gitlab.freedesktop.org/drm/intel/issues/6806
  [i915#6880]: https://gitlab.freedesktop.org/drm/intel/issues/6880
  [i915#6944]: https://gitlab.freedesktop.org/drm/intel/issues/6944
  [i915#7116]: https://gitlab.freedesktop.org/drm/intel/issues/7116
  [i915#7118]: https://gitlab.freedesktop.org/drm/intel/issues/7118
  [i915#7162]: https://gitlab.freedesktop.org/drm/intel/issues/7162
  [i915#7173]: https://gitlab.freedesktop.org/drm/intel/issues/7173
  [i915#7213]: https://gitlab.freedesktop.org/drm/intel/issues/7213
  [i915#7331]: https://gitlab.freedesktop.org/drm/intel/issues/7331
  [i915#7484]: https://gitlab.freedesktop.org/drm/intel/issues/7484
  [i915#7697]: https://gitlab.freedesktop.org/drm/intel/issues/7697
  [i915#7701]: https://gitlab.freedesktop.org/drm/intel/issues/7701
  [i915#7711]: https://gitlab.freedesktop.org/drm/intel/issues/7711
  [i915#7742]: https://gitlab.freedesktop.org/drm/intel/issues/7742
  [i915#7765]: https://gitlab.freedesktop.org/drm/intel/issues/7765
  [i915#7828]: https://gitlab.freedesktop.org/drm/intel/issues/7828
  [i915#79]: https://gitlab.freedesktop.org/drm/intel/issues/79
  [i915#8228]: https://gitlab.freedesktop.org/drm/intel/issues/8228
  [i915#8247]: https://gitlab.freedesktop.org/drm/intel/issues/8247
  [i915#8292]: https://gitlab.freedesktop.org/drm/intel/issues/8292
  [i915#8414]: https://gitlab.freedesktop.org/drm/intel/issues/8414
  [i915#8428]: https://gitlab.freedesktop.org/drm/intel/issues/8428
  [i915#8489]: https://gitlab.freedesktop.org/drm/intel/issues/8489
  [i915#8502]: https://gitlab.freedesktop.org/drm/intel/issues/8502
  [i915#8588]: https://gitlab.freedesktop.org/drm/intel/issues/8588
  [i915#8628]: https://gitlab.freedesktop.org/drm/intel/issues/8628
  [i915#8661]: https://gitlab.freedesktop.org/drm/intel/issues/8661
  [i915#8668]: https://gitlab.freedesktop.org/drm/intel/issues/8668
  [i915#8691]: https://gitlab.freedesktop.org/drm/intel/issues/8691
  [i915#8708]: https://gitlab.freedesktop.org/drm/intel/issues/8708
  [i915#8709]: https://gitlab.freedesktop.org/drm/intel/issues/8709
  [i915#8717]: https://gitlab.freedesktop.org/drm/intel/issues/8717
  [i915#8764]: https://gitlab.freedesktop.org/drm/intel/issues/8764
  [i915#8810]: https://gitlab.freedesktop.org/drm/intel/issues/8810
  [i915#8841]: https://gitlab.freedesktop.org/drm/intel/issues/8841
  [i915#8898]: https://gitlab.freedesktop.org/drm/intel/issues/8898
  [i915#8925]: https://gitlab.freedesktop.org/drm/intel/issues/8925
  [i915#8957]: https://gitlab.freedesktop.org/drm/intel/issues/8957
  [i915#8962]: https://gitlab.freedesktop.org/drm/intel/issues/8962
  [i915#9053]: https://gitlab.freedesktop.org/drm/intel/issues/9053
  [i915#9119]: https://gitlab.freedesktop.org/drm/intel/issues/9119
  [i915#9121]: https://gitlab.freedesktop.org/drm/intel/issues/9121
  [i915#9159]: https://gitlab.freedesktop.org/drm/intel/issues/9159


Build changes
-------------

  * Linux: CI_DRM_13538 -> Patchwork_122693v1

  CI-20190529: 20190529
  CI_DRM_13538: 19f1cd24387fc8bbb63a2c1a74080e73a8f53f5f @ git://anongit.freedesktop.org/gfx-ci/linux
  IGT_7445: 7445
  Patchwork_122693v1: 19f1cd24387fc8bbb63a2c1a74080e73a8f53f5f @ git://anongit.freedesktop.org/gfx-ci/linux
  piglit_4509: fdc5a4ca11124ab8413c7988896eec4c97336694 @ git://anongit.freedesktop.org/piglit

== Logs ==

For more details see: https://intel-gfx-ci.01.org/tree/drm-tip/Patchwork_122693v1/index.html

[-- Attachment #2: Type: text/html, Size: 78338 bytes --]

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
  2023-08-21 10:14 ` [Intel-gfx] " Arunpravin Paneer Selvam
@ 2023-08-21 17:46   ` Matthew Auld
  -1 siblings, 0 replies; 20+ messages in thread
From: Matthew Auld @ 2023-08-21 17:46 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, christian.koenig

Hi,

On 21/08/2023 11:14, Arunpravin Paneer Selvam wrote:
> The way now contiguous requests are implemented such that
> the size rounded up to power of 2 and the corresponding order
> block picked from the freelist.
> 
> In addition to the older method, the new method will rounddown
> the size to power of 2 and the corresponding order block picked
> from the freelist. And for the remaining size we traverse the
> tree and try to allocate either from the freelist block's buddy
> or from the peer block. If the remaining size from peer/buddy
> block is not free, we pick the next freelist block and repeat
> the same method.
> 
> Moved contiguous/alignment size computation part and trim
> function to the drm buddy manager.

I think we should also mention somewhere what issue this is trying to 
solve. IIUC the roundup_power_of_two() might in some cases trigger 
-ENOSPC even though there might be enough free space, and so to help 
with that we introduce a try harder mechanism.

> 
> Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
> ---
>   drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
>   include/drm/drm_buddy.h     |   6 +-
>   2 files changed, 248 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
> index 7098f125b54a..220f60c08a03 100644
> --- a/drivers/gpu/drm/drm_buddy.c
> +++ b/drivers/gpu/drm/drm_buddy.c
> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct drm_buddy *mm,
>   	return __alloc_range(mm, &dfs, start, size, blocks);
>   }
>   
> +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
> +					       u64 size,
> +					       u64 min_block_size,
> +					       struct drm_buddy_block *block,
> +					       struct list_head *blocks)
> +{
> +	struct drm_buddy_block *buddy, *parent = NULL;
> +	u64 start, offset = 0;
> +	LIST_HEAD(dfs);
> +	int err;
> +
> +	if (!block)
> +		return -EINVAL;
> +
> +	buddy = __get_buddy(block);
> +	if (!buddy)
> +		return -ENOSPC;
> +
> +	if (drm_buddy_block_is_allocated(buddy))
> +		return -ENOSPC;
> +
> +	parent = block->parent;
> +	if (!parent)
> +		return -ENOSPC;
> +
> +	if (block->parent->right == block) {
> +		u64 remaining;
> +
> +		/* Compute the leftover size for allocation */
> +		remaining = max((size - drm_buddy_block_size(mm, buddy)),
> +				min_block_size);
> +		if (!IS_ALIGNED(remaining, min_block_size))
> +			remaining = round_up(remaining, min_block_size);
> +
> +		/* Check if remaining size is greater than buddy block size */
> +		if (drm_buddy_block_size(mm, buddy) < remaining)
> +			return -ENOSPC;
> +
> +		offset = drm_buddy_block_size(mm, buddy) - remaining;
> +	}
> +
> +	list_add(&parent->tmp_link, &dfs);
> +	start = drm_buddy_block_offset(parent) + offset;
> +
> +	err = __alloc_range(mm, &dfs, start, size, blocks);
> +	if (err)
> +		return -ENOSPC;
> +
> +	return 0;
> +}
> +
> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
> +					      u64 size,
> +					      u64 min_block_size,
> +					      struct drm_buddy_block *block,
> +					      struct list_head *blocks)
> +{
> +	struct drm_buddy_block *first, *peer, *tmp;
> +	struct drm_buddy_block *parent = NULL;
> +	u64 start, offset = 0;
> +	unsigned int order;
> +	LIST_HEAD(dfs);
> +	int err;
> +
> +	if (!block)
> +		return -EINVAL;
> +
> +	order = drm_buddy_block_order(block);
> +	/* Add freelist block to dfs list */
> +	list_add(&block->tmp_link, &dfs);
> +
> +	tmp = block;
> +	parent = block->parent;
> +	while (parent) {
> +		if (block->parent->left == block) {
> +			if (parent->left != tmp) {
> +				peer = parent->left;
> +				break;
> +			}
> +		} else {
> +			if (parent->right != tmp) {
> +				peer = parent->right;
> +				break;
> +			}
> +		}
> +
> +		tmp = parent;
> +		parent = tmp->parent;
> +	}
> +
> +	if (!parent)
> +		return -ENOSPC;
> +
> +	do {
> +		if (drm_buddy_block_is_allocated(peer))
> +			return -ENOSPC;
> +		/* Exit loop if peer block order is equal to block order */
> +		if (drm_buddy_block_order(peer) == order)
> +			break;
> +
> +		if (drm_buddy_block_is_split(peer)) {
> +			/* Traverse down to the block order level */
> +			if (block->parent->left == block)
> +				peer = peer->right;
> +			else
> +				peer = peer->left;
> +		} else {
> +			break;
> +		}
> +	} while (1);
> +
> +	if (block->parent->left == block) {
> +		u64 remaining;
> +
> +		/* Compute the leftover size for allocation */
> +		remaining = max((size - drm_buddy_block_size(mm, block)),
> +				min_block_size);
> +		if (!IS_ALIGNED(remaining, min_block_size))
> +			remaining = round_up(remaining, min_block_size);
> +
> +		/* Check if remaining size is greater than peer block size */
> +		if (drm_buddy_block_size(mm, peer) < remaining)
> +			return -ENOSPC;
> +
> +		offset = drm_buddy_block_size(mm, peer) - remaining;
> +		/* Add left peer block to dfs list */
> +		list_add(&peer->tmp_link, &dfs);
> +	} else {
> +		/* Add right peer block to dfs list */
> +		list_add_tail(&peer->tmp_link, &dfs);
> +	}
> +
> +	first = list_first_entry_or_null(&dfs,
> +					 struct drm_buddy_block,
> +					 tmp_link);
> +	if (!first)
> +		return -EINVAL;
> +
> +	start = drm_buddy_block_offset(first) + offset;
> +	err = __alloc_range(mm, &dfs, start, size, blocks);
> +	if (err)
> +		return -ENOSPC;
> +
> +	return 0;
> +}
> +
> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
> +					       u64 size,
> +					       u64 min_block_size,
> +					       struct list_head *blocks)
> +{
> +	struct drm_buddy_block *block;
> +	struct list_head *list;
> +	unsigned long pages;
> +	unsigned int order;
> +	u64 modify_size;
> +	int err;
> +
> +	modify_size = rounddown_pow_of_two(size);
> +	pages = modify_size >> ilog2(mm->chunk_size);
> +	order = fls(pages) - 1;
> +	if (order == 0)
> +		return -ENOSPC;
> +
> +	list = &mm->free_list[order];
> +	if (list_empty(list))
> +		return -ENOSPC;
> +
> +	list_for_each_entry_reverse(block, list, link) {
> +		/* Allocate contiguous blocks from the buddy */
> +		err = __alloc_contiguous_block_from_buddy(mm,
> +							  size,
> +							  min_block_size,
> +							  block,
> +							  blocks);
> +		if (!err)
> +			return 0;
> +
> +		/* Allocate contiguous blocks from tree traversal method */
> +		err = __alloc_contiguous_block_from_peer(mm,
> +							 size,
> +							 min_block_size,
> +							 block,
> +							 blocks);
> +		if (!err)
> +			return 0;
> +	}
> +
> +	return -ENOSPC;
> +}

Wondering if this would be a lot simpler if we can tweak alloc_range() 
to support allocating as much as it can up to some size? If it runs out 
of space it still returns an error but doesn't actually free what it has 
successfully allocated. It then also tells us how much it allocated. We 
can then allocate the rhs first and then from whatever is left we can 
figure out the precise offset we need for the lhs? I think that looks 
sort of similar to what the above does, but here we can for the most 
part just re-use alloc_range()? So maybe something like:

__alloc_range(..., u64 *total_allocated_on_err)
{
     ....
     err_free:
         if (err == -ENOSPC && total_allocated_on_err)
             *total_allocated_on_err = total_allocated;
         else
             drm_buddy_free_list(mm, &allocated);
         return err;
}

alloc_contig_try_harder()
{
      ....
      list_for_each_entry_reverse(b, list, link) {
          .....

          rhs_offset = block_offset(b);
          err =  __drm_buddy_alloc_range(mm, rhs_offset,
                                         size, &filled,
                                         blocks);
          if (!err || err != -ENOSPC)
              break;

          lhs_size = size - filled;
          lhs_offset = block_offset(b) - lhs_size;
          err =  __drm_buddy_alloc_range(mm, lhs_offset,
                                         lhs_size, NULL,
                                         blocks_lhs);
          list_splice(blocks_lhs, blocks);

          ....
      }
}

?

> +
>   /**
>    * drm_buddy_block_trim - free unused pages
>    *
> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>    * @start: start of the allowed range for this block
>    * @end: end of the allowed range for this block
>    * @size: size of the allocation
> - * @min_page_size: alignment of the allocation
> + * @min_block_size: alignment of the allocation
>    * @blocks: output list head to add allocated blocks
>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>    *
> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>    */
>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   			   u64 start, u64 end, u64 size,
> -			   u64 min_page_size,
> +			   u64 min_block_size,
>   			   struct list_head *blocks,
>   			   unsigned long flags)
>   {
>   	struct drm_buddy_block *block = NULL;
> +	u64 original_size, original_min_size;
>   	unsigned int min_order, order;
> -	unsigned long pages;
>   	LIST_HEAD(allocated);
> +	unsigned long pages;
>   	int err;
>   
>   	if (size < mm->chunk_size)
>   		return -EINVAL;
>   
> -	if (min_page_size < mm->chunk_size)
> +	if (min_block_size < mm->chunk_size)
>   		return -EINVAL;
>   
> -	if (!is_power_of_2(min_page_size))
> +	if (!is_power_of_2(min_block_size))
>   		return -EINVAL;
>   
>   	if (!IS_ALIGNED(start | end | size, mm->chunk_size))
> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   	if (start + size == end)
>   		return __drm_buddy_alloc_range(mm, start, size, blocks);
>   
> -	if (!IS_ALIGNED(size, min_page_size))
> -		return -EINVAL;
> +	original_size = size;
> +	original_min_size = min_block_size;
> +
> +	/* Roundup the size to power of 2 */
> +	if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
> +		size = roundup_pow_of_two(size);
> +		min_block_size = size;
> +	/* Align size value to min_block_size */
> +	} else if (!IS_ALIGNED(size, min_block_size)) {
> +		size = round_up(size, min_block_size);
> +	}
>   
>   	pages = size >> ilog2(mm->chunk_size);
>   	order = fls(pages) - 1;
> -	min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
> +	min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>   
>   	do {
>   		order = min(order, (unsigned int)fls(pages) - 1);
> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   				break;
>   
>   			if (order-- == min_order) {
> +				if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
> +				    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
> +					/*
> +					 * Try contiguous block allocation through
> +					 * tree traversal method
> +					 */
> +					return __drm_buddy_alloc_contiguous_blocks(mm,
> +										   original_size,
> +										   original_min_size,
> +										   blocks);
> +
>   				err = -ENOSPC;
>   				goto err_free;
>   			}
> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   			break;
>   	} while (1);
>   
> +	/* Trim the allocated block to the required size */
> +	if (original_size != size) {
> +		struct list_head *trim_list;
> +		LIST_HEAD(temp);
> +		u64 trim_size;
> +
> +		trim_list = &allocated;
> +		trim_size = original_size;
> +
> +		if (!list_is_singular(&allocated)) {
> +			block = list_last_entry(&allocated, typeof(*block), link);
> +			list_move(&block->link, &temp);
> +			trim_list = &temp;
> +			trim_size = drm_buddy_block_size(mm, block) -
> +				(size - original_size);
> +		}
> +
> +		drm_buddy_block_trim(mm,
> +				     trim_size,
> +				     trim_list);
> +
> +		if (!list_empty(&temp))
> +			list_splice_tail(trim_list, &allocated);
> +	}
> +
>   	list_splice_tail(&allocated, blocks);
>   	return 0;
>   
> diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
> index 572077ff8ae7..a5b39fc01003 100644
> --- a/include/drm/drm_buddy.h
> +++ b/include/drm/drm_buddy.h
> @@ -22,8 +22,9 @@
>   	start__ >= max__ || size__ > max__ - start__; \
>   })
>   
> -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
> +#define DRM_BUDDY_RANGE_ALLOCATION		BIT(0)
> +#define DRM_BUDDY_TOPDOWN_ALLOCATION		BIT(1)
> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION		BIT(2)
>   
>   struct drm_buddy_block {
>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
>   void drm_buddy_block_print(struct drm_buddy *mm,
>   			   struct drm_buddy_block *block,
>   			   struct drm_printer *p);
> -
>   #endif

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
@ 2023-08-21 17:46   ` Matthew Auld
  0 siblings, 0 replies; 20+ messages in thread
From: Matthew Auld @ 2023-08-21 17:46 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, christian.koenig

Hi,

On 21/08/2023 11:14, Arunpravin Paneer Selvam wrote:
> The way now contiguous requests are implemented such that
> the size rounded up to power of 2 and the corresponding order
> block picked from the freelist.
> 
> In addition to the older method, the new method will rounddown
> the size to power of 2 and the corresponding order block picked
> from the freelist. And for the remaining size we traverse the
> tree and try to allocate either from the freelist block's buddy
> or from the peer block. If the remaining size from peer/buddy
> block is not free, we pick the next freelist block and repeat
> the same method.
> 
> Moved contiguous/alignment size computation part and trim
> function to the drm buddy manager.

I think we should also mention somewhere what issue this is trying to 
solve. IIUC the roundup_power_of_two() might in some cases trigger 
-ENOSPC even though there might be enough free space, and so to help 
with that we introduce a try harder mechanism.

> 
> Signed-off-by: Arunpravin Paneer Selvam <Arunpravin.PaneerSelvam@amd.com>
> ---
>   drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
>   include/drm/drm_buddy.h     |   6 +-
>   2 files changed, 248 insertions(+), 11 deletions(-)
> 
> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
> index 7098f125b54a..220f60c08a03 100644
> --- a/drivers/gpu/drm/drm_buddy.c
> +++ b/drivers/gpu/drm/drm_buddy.c
> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct drm_buddy *mm,
>   	return __alloc_range(mm, &dfs, start, size, blocks);
>   }
>   
> +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
> +					       u64 size,
> +					       u64 min_block_size,
> +					       struct drm_buddy_block *block,
> +					       struct list_head *blocks)
> +{
> +	struct drm_buddy_block *buddy, *parent = NULL;
> +	u64 start, offset = 0;
> +	LIST_HEAD(dfs);
> +	int err;
> +
> +	if (!block)
> +		return -EINVAL;
> +
> +	buddy = __get_buddy(block);
> +	if (!buddy)
> +		return -ENOSPC;
> +
> +	if (drm_buddy_block_is_allocated(buddy))
> +		return -ENOSPC;
> +
> +	parent = block->parent;
> +	if (!parent)
> +		return -ENOSPC;
> +
> +	if (block->parent->right == block) {
> +		u64 remaining;
> +
> +		/* Compute the leftover size for allocation */
> +		remaining = max((size - drm_buddy_block_size(mm, buddy)),
> +				min_block_size);
> +		if (!IS_ALIGNED(remaining, min_block_size))
> +			remaining = round_up(remaining, min_block_size);
> +
> +		/* Check if remaining size is greater than buddy block size */
> +		if (drm_buddy_block_size(mm, buddy) < remaining)
> +			return -ENOSPC;
> +
> +		offset = drm_buddy_block_size(mm, buddy) - remaining;
> +	}
> +
> +	list_add(&parent->tmp_link, &dfs);
> +	start = drm_buddy_block_offset(parent) + offset;
> +
> +	err = __alloc_range(mm, &dfs, start, size, blocks);
> +	if (err)
> +		return -ENOSPC;
> +
> +	return 0;
> +}
> +
> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
> +					      u64 size,
> +					      u64 min_block_size,
> +					      struct drm_buddy_block *block,
> +					      struct list_head *blocks)
> +{
> +	struct drm_buddy_block *first, *peer, *tmp;
> +	struct drm_buddy_block *parent = NULL;
> +	u64 start, offset = 0;
> +	unsigned int order;
> +	LIST_HEAD(dfs);
> +	int err;
> +
> +	if (!block)
> +		return -EINVAL;
> +
> +	order = drm_buddy_block_order(block);
> +	/* Add freelist block to dfs list */
> +	list_add(&block->tmp_link, &dfs);
> +
> +	tmp = block;
> +	parent = block->parent;
> +	while (parent) {
> +		if (block->parent->left == block) {
> +			if (parent->left != tmp) {
> +				peer = parent->left;
> +				break;
> +			}
> +		} else {
> +			if (parent->right != tmp) {
> +				peer = parent->right;
> +				break;
> +			}
> +		}
> +
> +		tmp = parent;
> +		parent = tmp->parent;
> +	}
> +
> +	if (!parent)
> +		return -ENOSPC;
> +
> +	do {
> +		if (drm_buddy_block_is_allocated(peer))
> +			return -ENOSPC;
> +		/* Exit loop if peer block order is equal to block order */
> +		if (drm_buddy_block_order(peer) == order)
> +			break;
> +
> +		if (drm_buddy_block_is_split(peer)) {
> +			/* Traverse down to the block order level */
> +			if (block->parent->left == block)
> +				peer = peer->right;
> +			else
> +				peer = peer->left;
> +		} else {
> +			break;
> +		}
> +	} while (1);
> +
> +	if (block->parent->left == block) {
> +		u64 remaining;
> +
> +		/* Compute the leftover size for allocation */
> +		remaining = max((size - drm_buddy_block_size(mm, block)),
> +				min_block_size);
> +		if (!IS_ALIGNED(remaining, min_block_size))
> +			remaining = round_up(remaining, min_block_size);
> +
> +		/* Check if remaining size is greater than peer block size */
> +		if (drm_buddy_block_size(mm, peer) < remaining)
> +			return -ENOSPC;
> +
> +		offset = drm_buddy_block_size(mm, peer) - remaining;
> +		/* Add left peer block to dfs list */
> +		list_add(&peer->tmp_link, &dfs);
> +	} else {
> +		/* Add right peer block to dfs list */
> +		list_add_tail(&peer->tmp_link, &dfs);
> +	}
> +
> +	first = list_first_entry_or_null(&dfs,
> +					 struct drm_buddy_block,
> +					 tmp_link);
> +	if (!first)
> +		return -EINVAL;
> +
> +	start = drm_buddy_block_offset(first) + offset;
> +	err = __alloc_range(mm, &dfs, start, size, blocks);
> +	if (err)
> +		return -ENOSPC;
> +
> +	return 0;
> +}
> +
> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
> +					       u64 size,
> +					       u64 min_block_size,
> +					       struct list_head *blocks)
> +{
> +	struct drm_buddy_block *block;
> +	struct list_head *list;
> +	unsigned long pages;
> +	unsigned int order;
> +	u64 modify_size;
> +	int err;
> +
> +	modify_size = rounddown_pow_of_two(size);
> +	pages = modify_size >> ilog2(mm->chunk_size);
> +	order = fls(pages) - 1;
> +	if (order == 0)
> +		return -ENOSPC;
> +
> +	list = &mm->free_list[order];
> +	if (list_empty(list))
> +		return -ENOSPC;
> +
> +	list_for_each_entry_reverse(block, list, link) {
> +		/* Allocate contiguous blocks from the buddy */
> +		err = __alloc_contiguous_block_from_buddy(mm,
> +							  size,
> +							  min_block_size,
> +							  block,
> +							  blocks);
> +		if (!err)
> +			return 0;
> +
> +		/* Allocate contiguous blocks from tree traversal method */
> +		err = __alloc_contiguous_block_from_peer(mm,
> +							 size,
> +							 min_block_size,
> +							 block,
> +							 blocks);
> +		if (!err)
> +			return 0;
> +	}
> +
> +	return -ENOSPC;
> +}

Wondering if this would be a lot simpler if we can tweak alloc_range() 
to support allocating as much as it can up to some size? If it runs out 
of space it still returns an error but doesn't actually free what it has 
successfully allocated. It then also tells us how much it allocated. We 
can then allocate the rhs first and then from whatever is left we can 
figure out the precise offset we need for the lhs? I think that looks 
sort of similar to what the above does, but here we can for the most 
part just re-use alloc_range()? So maybe something like:

__alloc_range(..., u64 *total_allocated_on_err)
{
     ....
     err_free:
         if (err == -ENOSPC && total_allocated_on_err)
             *total_allocated_on_err = total_allocated;
         else
             drm_buddy_free_list(mm, &allocated);
         return err;
}

alloc_contig_try_harder()
{
      ....
      list_for_each_entry_reverse(b, list, link) {
          .....

          rhs_offset = block_offset(b);
          err =  __drm_buddy_alloc_range(mm, rhs_offset,
                                         size, &filled,
                                         blocks);
          if (!err || err != -ENOSPC)
              break;

          lhs_size = size - filled;
          lhs_offset = block_offset(b) - lhs_size;
          err =  __drm_buddy_alloc_range(mm, lhs_offset,
                                         lhs_size, NULL,
                                         blocks_lhs);
          list_splice(blocks_lhs, blocks);

          ....
      }
}

?

> +
>   /**
>    * drm_buddy_block_trim - free unused pages
>    *
> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>    * @start: start of the allowed range for this block
>    * @end: end of the allowed range for this block
>    * @size: size of the allocation
> - * @min_page_size: alignment of the allocation
> + * @min_block_size: alignment of the allocation
>    * @blocks: output list head to add allocated blocks
>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>    *
> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>    */
>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   			   u64 start, u64 end, u64 size,
> -			   u64 min_page_size,
> +			   u64 min_block_size,
>   			   struct list_head *blocks,
>   			   unsigned long flags)
>   {
>   	struct drm_buddy_block *block = NULL;
> +	u64 original_size, original_min_size;
>   	unsigned int min_order, order;
> -	unsigned long pages;
>   	LIST_HEAD(allocated);
> +	unsigned long pages;
>   	int err;
>   
>   	if (size < mm->chunk_size)
>   		return -EINVAL;
>   
> -	if (min_page_size < mm->chunk_size)
> +	if (min_block_size < mm->chunk_size)
>   		return -EINVAL;
>   
> -	if (!is_power_of_2(min_page_size))
> +	if (!is_power_of_2(min_block_size))
>   		return -EINVAL;
>   
>   	if (!IS_ALIGNED(start | end | size, mm->chunk_size))
> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   	if (start + size == end)
>   		return __drm_buddy_alloc_range(mm, start, size, blocks);
>   
> -	if (!IS_ALIGNED(size, min_page_size))
> -		return -EINVAL;
> +	original_size = size;
> +	original_min_size = min_block_size;
> +
> +	/* Roundup the size to power of 2 */
> +	if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
> +		size = roundup_pow_of_two(size);
> +		min_block_size = size;
> +	/* Align size value to min_block_size */
> +	} else if (!IS_ALIGNED(size, min_block_size)) {
> +		size = round_up(size, min_block_size);
> +	}
>   
>   	pages = size >> ilog2(mm->chunk_size);
>   	order = fls(pages) - 1;
> -	min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
> +	min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>   
>   	do {
>   		order = min(order, (unsigned int)fls(pages) - 1);
> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   				break;
>   
>   			if (order-- == min_order) {
> +				if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
> +				    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
> +					/*
> +					 * Try contiguous block allocation through
> +					 * tree traversal method
> +					 */
> +					return __drm_buddy_alloc_contiguous_blocks(mm,
> +										   original_size,
> +										   original_min_size,
> +										   blocks);
> +
>   				err = -ENOSPC;
>   				goto err_free;
>   			}
> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>   			break;
>   	} while (1);
>   
> +	/* Trim the allocated block to the required size */
> +	if (original_size != size) {
> +		struct list_head *trim_list;
> +		LIST_HEAD(temp);
> +		u64 trim_size;
> +
> +		trim_list = &allocated;
> +		trim_size = original_size;
> +
> +		if (!list_is_singular(&allocated)) {
> +			block = list_last_entry(&allocated, typeof(*block), link);
> +			list_move(&block->link, &temp);
> +			trim_list = &temp;
> +			trim_size = drm_buddy_block_size(mm, block) -
> +				(size - original_size);
> +		}
> +
> +		drm_buddy_block_trim(mm,
> +				     trim_size,
> +				     trim_list);
> +
> +		if (!list_empty(&temp))
> +			list_splice_tail(trim_list, &allocated);
> +	}
> +
>   	list_splice_tail(&allocated, blocks);
>   	return 0;
>   
> diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
> index 572077ff8ae7..a5b39fc01003 100644
> --- a/include/drm/drm_buddy.h
> +++ b/include/drm/drm_buddy.h
> @@ -22,8 +22,9 @@
>   	start__ >= max__ || size__ > max__ - start__; \
>   })
>   
> -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
> +#define DRM_BUDDY_RANGE_ALLOCATION		BIT(0)
> +#define DRM_BUDDY_TOPDOWN_ALLOCATION		BIT(1)
> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION		BIT(2)
>   
>   struct drm_buddy_block {
>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p);
>   void drm_buddy_block_print(struct drm_buddy *mm,
>   			   struct drm_buddy_block *block,
>   			   struct drm_printer *p);
> -
>   #endif

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
  2023-08-21 17:46   ` [Intel-gfx] " Matthew Auld
@ 2023-08-22 14:01     ` Arunpravin Paneer Selvam
  -1 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-22 14:01 UTC (permalink / raw)
  To: Matthew Auld, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, christian.koenig


On 21/08/23 10:46, Matthew Auld wrote:
> Hi,
>
> On 21/08/2023 11:14, Arunpravin Paneer Selvam wrote:
>> The way now contiguous requests are implemented such that
>> the size rounded up to power of 2 and the corresponding order
>> block picked from the freelist.
>>
>> In addition to the older method, the new method will rounddown
>> the size to power of 2 and the corresponding order block picked
>> from the freelist. And for the remaining size we traverse the
>> tree and try to allocate either from the freelist block's buddy
>> or from the peer block. If the remaining size from peer/buddy
>> block is not free, we pick the next freelist block and repeat
>> the same method.
>>
>> Moved contiguous/alignment size computation part and trim
>> function to the drm buddy manager.
>
> I think we should also mention somewhere what issue this is trying to 
> solve. IIUC the roundup_power_of_two() might in some cases trigger 
> -ENOSPC even though there might be enough free space, and so to help 
> with that we introduce a try harder mechanism.
Yes, we are trying to solve the above issue. I will add the problem 
statement to the commit description.
>
>>
>> Signed-off-by: Arunpravin Paneer Selvam 
>> <Arunpravin.PaneerSelvam@amd.com>
>> ---
>>   drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
>>   include/drm/drm_buddy.h     |   6 +-
>>   2 files changed, 248 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
>> index 7098f125b54a..220f60c08a03 100644
>> --- a/drivers/gpu/drm/drm_buddy.c
>> +++ b/drivers/gpu/drm/drm_buddy.c
>> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct 
>> drm_buddy *mm,
>>       return __alloc_range(mm, &dfs, start, size, blocks);
>>   }
>>   +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
>> +                           u64 size,
>> +                           u64 min_block_size,
>> +                           struct drm_buddy_block *block,
>> +                           struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *buddy, *parent = NULL;
>> +    u64 start, offset = 0;
>> +    LIST_HEAD(dfs);
>> +    int err;
>> +
>> +    if (!block)
>> +        return -EINVAL;
>> +
>> +    buddy = __get_buddy(block);
>> +    if (!buddy)
>> +        return -ENOSPC;
>> +
>> +    if (drm_buddy_block_is_allocated(buddy))
>> +        return -ENOSPC;
>> +
>> +    parent = block->parent;
>> +    if (!parent)
>> +        return -ENOSPC;
>> +
>> +    if (block->parent->right == block) {
>> +        u64 remaining;
>> +
>> +        /* Compute the leftover size for allocation */
>> +        remaining = max((size - drm_buddy_block_size(mm, buddy)),
>> +                min_block_size);
>> +        if (!IS_ALIGNED(remaining, min_block_size))
>> +            remaining = round_up(remaining, min_block_size);
>> +
>> +        /* Check if remaining size is greater than buddy block size */
>> +        if (drm_buddy_block_size(mm, buddy) < remaining)
>> +            return -ENOSPC;
>> +
>> +        offset = drm_buddy_block_size(mm, buddy) - remaining;
>> +    }
>> +
>> +    list_add(&parent->tmp_link, &dfs);
>> +    start = drm_buddy_block_offset(parent) + offset;
>> +
>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>> +    if (err)
>> +        return -ENOSPC;
>> +
>> +    return 0;
>> +}
>> +
>> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
>> +                          u64 size,
>> +                          u64 min_block_size,
>> +                          struct drm_buddy_block *block,
>> +                          struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *first, *peer, *tmp;
>> +    struct drm_buddy_block *parent = NULL;
>> +    u64 start, offset = 0;
>> +    unsigned int order;
>> +    LIST_HEAD(dfs);
>> +    int err;
>> +
>> +    if (!block)
>> +        return -EINVAL;
>> +
>> +    order = drm_buddy_block_order(block);
>> +    /* Add freelist block to dfs list */
>> +    list_add(&block->tmp_link, &dfs);
>> +
>> +    tmp = block;
>> +    parent = block->parent;
>> +    while (parent) {
>> +        if (block->parent->left == block) {
>> +            if (parent->left != tmp) {
>> +                peer = parent->left;
>> +                break;
>> +            }
>> +        } else {
>> +            if (parent->right != tmp) {
>> +                peer = parent->right;
>> +                break;
>> +            }
>> +        }
>> +
>> +        tmp = parent;
>> +        parent = tmp->parent;
>> +    }
>> +
>> +    if (!parent)
>> +        return -ENOSPC;
>> +
>> +    do {
>> +        if (drm_buddy_block_is_allocated(peer))
>> +            return -ENOSPC;
>> +        /* Exit loop if peer block order is equal to block order */
>> +        if (drm_buddy_block_order(peer) == order)
>> +            break;
>> +
>> +        if (drm_buddy_block_is_split(peer)) {
>> +            /* Traverse down to the block order level */
>> +            if (block->parent->left == block)
>> +                peer = peer->right;
>> +            else
>> +                peer = peer->left;
>> +        } else {
>> +            break;
>> +        }
>> +    } while (1);
>> +
>> +    if (block->parent->left == block) {
>> +        u64 remaining;
>> +
>> +        /* Compute the leftover size for allocation */
>> +        remaining = max((size - drm_buddy_block_size(mm, block)),
>> +                min_block_size);
>> +        if (!IS_ALIGNED(remaining, min_block_size))
>> +            remaining = round_up(remaining, min_block_size);
>> +
>> +        /* Check if remaining size is greater than peer block size */
>> +        if (drm_buddy_block_size(mm, peer) < remaining)
>> +            return -ENOSPC;
>> +
>> +        offset = drm_buddy_block_size(mm, peer) - remaining;
>> +        /* Add left peer block to dfs list */
>> +        list_add(&peer->tmp_link, &dfs);
>> +    } else {
>> +        /* Add right peer block to dfs list */
>> +        list_add_tail(&peer->tmp_link, &dfs);
>> +    }
>> +
>> +    first = list_first_entry_or_null(&dfs,
>> +                     struct drm_buddy_block,
>> +                     tmp_link);
>> +    if (!first)
>> +        return -EINVAL;
>> +
>> +    start = drm_buddy_block_offset(first) + offset;
>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>> +    if (err)
>> +        return -ENOSPC;
>> +
>> +    return 0;
>> +}
>> +
>> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
>> +                           u64 size,
>> +                           u64 min_block_size,
>> +                           struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *block;
>> +    struct list_head *list;
>> +    unsigned long pages;
>> +    unsigned int order;
>> +    u64 modify_size;
>> +    int err;
>> +
>> +    modify_size = rounddown_pow_of_two(size);
>> +    pages = modify_size >> ilog2(mm->chunk_size);
>> +    order = fls(pages) - 1;
>> +    if (order == 0)
>> +        return -ENOSPC;
>> +
>> +    list = &mm->free_list[order];
>> +    if (list_empty(list))
>> +        return -ENOSPC;
>> +
>> +    list_for_each_entry_reverse(block, list, link) {
>> +        /* Allocate contiguous blocks from the buddy */
>> +        err = __alloc_contiguous_block_from_buddy(mm,
>> +                              size,
>> +                              min_block_size,
>> +                              block,
>> +                              blocks);
>> +        if (!err)
>> +            return 0;
>> +
>> +        /* Allocate contiguous blocks from tree traversal method */
>> +        err = __alloc_contiguous_block_from_peer(mm,
>> +                             size,
>> +                             min_block_size,
>> +                             block,
>> +                             blocks);
>> +        if (!err)
>> +            return 0;
>> +    }
>> +
>> +    return -ENOSPC;
>> +}
>
> Wondering if this would be a lot simpler if we can tweak alloc_range() 
> to support allocating as much as it can up to some size? If it runs 
> out of space it still returns an error but doesn't actually free what 
> it has successfully allocated. It then also tells us how much it 
> allocated. We can then allocate the rhs first and then from whatever 
> is left we can figure out the precise offset we need for the lhs? I 
> think that looks sort of similar to what the above does, but here we 
> can for the most part just re-use alloc_range()? So maybe something like:
>
> __alloc_range(..., u64 *total_allocated_on_err)
> {
>     ....
>     err_free:
>         if (err == -ENOSPC && total_allocated_on_err)
>             *total_allocated_on_err = total_allocated;
>         else
>             drm_buddy_free_list(mm, &allocated);
>         return err;
> }
>
> alloc_contig_try_harder()
> {
>      ....
>      list_for_each_entry_reverse(b, list, link) {
>          .....
>
>          rhs_offset = block_offset(b);
>          err =  __drm_buddy_alloc_range(mm, rhs_offset,
>                                         size, &filled,
>                                         blocks);
>          if (!err || err != -ENOSPC)
>              break;
>
>          lhs_size = size - filled;
>          lhs_offset = block_offset(b) - lhs_size;
>          err =  __drm_buddy_alloc_range(mm, lhs_offset,
>                                         lhs_size, NULL,
>                                         blocks_lhs);
>          list_splice(blocks_lhs, blocks);
>
>          ....
>      }
> }
>
> ?
>
The difference between the above approach and this patch is that the 
above approach tries to allocate first from RHS and remaining size from 
LHS, but the patch tries to handle RHS and LHS separately through 
alloc_from_buddy() and alloc_from_peer() functions. I thought this would 
unblock the merge back operation of any one of the side (either LHS or 
RHS). If the above approach doesn't harm much, we will try and allocate 
from both the sides.

If we can add the immediate right block (i.e its buddy) to the dfs list 
for RHS traversal and if we could add only the immediate peer block for 
LHS traversal, this would eliminate the need for adding complete address 
space to the list and it might reduce the number of iterations.

Also, we should ALIGN the LHS remaining size to the min_block_size. 
Otherwise, I see glitches in some of the workloads.

>> +
>>   /**
>>    * drm_buddy_block_trim - free unused pages
>>    *
>> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>    * @start: start of the allowed range for this block
>>    * @end: end of the allowed range for this block
>>    * @size: size of the allocation
>> - * @min_page_size: alignment of the allocation
>> + * @min_block_size: alignment of the allocation
>>    * @blocks: output list head to add allocated blocks
>>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>>    *
>> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>    */
>>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>                  u64 start, u64 end, u64 size,
>> -               u64 min_page_size,
>> +               u64 min_block_size,
>>                  struct list_head *blocks,
>>                  unsigned long flags)
>>   {
>>       struct drm_buddy_block *block = NULL;
>> +    u64 original_size, original_min_size;
>>       unsigned int min_order, order;
>> -    unsigned long pages;
>>       LIST_HEAD(allocated);
>> +    unsigned long pages;
>>       int err;
>>         if (size < mm->chunk_size)
>>           return -EINVAL;
>>   -    if (min_page_size < mm->chunk_size)
>> +    if (min_block_size < mm->chunk_size)
>>           return -EINVAL;
>>   -    if (!is_power_of_2(min_page_size))
>> +    if (!is_power_of_2(min_block_size))
>>           return -EINVAL;
>>         if (!IS_ALIGNED(start | end | size, mm->chunk_size))
>> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>       if (start + size == end)
>>           return __drm_buddy_alloc_range(mm, start, size, blocks);
>>   -    if (!IS_ALIGNED(size, min_page_size))
>> -        return -EINVAL;
>> +    original_size = size;
>> +    original_min_size = min_block_size;
>> +
>> +    /* Roundup the size to power of 2 */
>> +    if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
>> +        size = roundup_pow_of_two(size);
>> +        min_block_size = size;
>> +    /* Align size value to min_block_size */
>> +    } else if (!IS_ALIGNED(size, min_block_size)) {
>> +        size = round_up(size, min_block_size);
>> +    }
>>         pages = size >> ilog2(mm->chunk_size);
>>       order = fls(pages) - 1;
>> -    min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
>> +    min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>>         do {
>>           order = min(order, (unsigned int)fls(pages) - 1);
>> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>                   break;
>>                 if (order-- == min_order) {
>> +                if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
>> +                    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
>> +                    /*
>> +                     * Try contiguous block allocation through
>> +                     * tree traversal method
>> +                     */
>> +                    return __drm_buddy_alloc_contiguous_blocks(mm,
>> +                                           original_size,
>> +                                           original_min_size,
>> +                                           blocks);
>> +
>>                   err = -ENOSPC;
>>                   goto err_free;
>>               }
>> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>               break;
>>       } while (1);
>>   +    /* Trim the allocated block to the required size */
>> +    if (original_size != size) {
>> +        struct list_head *trim_list;
>> +        LIST_HEAD(temp);
>> +        u64 trim_size;
>> +
>> +        trim_list = &allocated;
>> +        trim_size = original_size;
>> +
>> +        if (!list_is_singular(&allocated)) {
>> +            block = list_last_entry(&allocated, typeof(*block), link);
>> +            list_move(&block->link, &temp);
>> +            trim_list = &temp;
>> +            trim_size = drm_buddy_block_size(mm, block) -
>> +                (size - original_size);
>> +        }
>> +
>> +        drm_buddy_block_trim(mm,
>> +                     trim_size,
>> +                     trim_list);
>> +
>> +        if (!list_empty(&temp))
>> +            list_splice_tail(trim_list, &allocated);
>> +    }
>> +
>>       list_splice_tail(&allocated, blocks);
>>       return 0;
>>   diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
>> index 572077ff8ae7..a5b39fc01003 100644
>> --- a/include/drm/drm_buddy.h
>> +++ b/include/drm/drm_buddy.h
>> @@ -22,8 +22,9 @@
>>       start__ >= max__ || size__ > max__ - start__; \
>>   })
>>   -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
>> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
>> +#define DRM_BUDDY_RANGE_ALLOCATION        BIT(0)
>> +#define DRM_BUDDY_TOPDOWN_ALLOCATION        BIT(1)
>> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION        BIT(2)
>>     struct drm_buddy_block {
>>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
>> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct 
>> drm_printer *p);
>>   void drm_buddy_block_print(struct drm_buddy *mm,
>>                  struct drm_buddy_block *block,
>>                  struct drm_printer *p);
>> -
>>   #endif

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
@ 2023-08-22 14:01     ` Arunpravin Paneer Selvam
  0 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-22 14:01 UTC (permalink / raw)
  To: Matthew Auld, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, christian.koenig


On 21/08/23 10:46, Matthew Auld wrote:
> Hi,
>
> On 21/08/2023 11:14, Arunpravin Paneer Selvam wrote:
>> The way now contiguous requests are implemented such that
>> the size rounded up to power of 2 and the corresponding order
>> block picked from the freelist.
>>
>> In addition to the older method, the new method will rounddown
>> the size to power of 2 and the corresponding order block picked
>> from the freelist. And for the remaining size we traverse the
>> tree and try to allocate either from the freelist block's buddy
>> or from the peer block. If the remaining size from peer/buddy
>> block is not free, we pick the next freelist block and repeat
>> the same method.
>>
>> Moved contiguous/alignment size computation part and trim
>> function to the drm buddy manager.
>
> I think we should also mention somewhere what issue this is trying to 
> solve. IIUC the roundup_power_of_two() might in some cases trigger 
> -ENOSPC even though there might be enough free space, and so to help 
> with that we introduce a try harder mechanism.
Yes, we are trying to solve the above issue. I will add the problem 
statement to the commit description.
>
>>
>> Signed-off-by: Arunpravin Paneer Selvam 
>> <Arunpravin.PaneerSelvam@amd.com>
>> ---
>>   drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
>>   include/drm/drm_buddy.h     |   6 +-
>>   2 files changed, 248 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
>> index 7098f125b54a..220f60c08a03 100644
>> --- a/drivers/gpu/drm/drm_buddy.c
>> +++ b/drivers/gpu/drm/drm_buddy.c
>> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct 
>> drm_buddy *mm,
>>       return __alloc_range(mm, &dfs, start, size, blocks);
>>   }
>>   +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
>> +                           u64 size,
>> +                           u64 min_block_size,
>> +                           struct drm_buddy_block *block,
>> +                           struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *buddy, *parent = NULL;
>> +    u64 start, offset = 0;
>> +    LIST_HEAD(dfs);
>> +    int err;
>> +
>> +    if (!block)
>> +        return -EINVAL;
>> +
>> +    buddy = __get_buddy(block);
>> +    if (!buddy)
>> +        return -ENOSPC;
>> +
>> +    if (drm_buddy_block_is_allocated(buddy))
>> +        return -ENOSPC;
>> +
>> +    parent = block->parent;
>> +    if (!parent)
>> +        return -ENOSPC;
>> +
>> +    if (block->parent->right == block) {
>> +        u64 remaining;
>> +
>> +        /* Compute the leftover size for allocation */
>> +        remaining = max((size - drm_buddy_block_size(mm, buddy)),
>> +                min_block_size);
>> +        if (!IS_ALIGNED(remaining, min_block_size))
>> +            remaining = round_up(remaining, min_block_size);
>> +
>> +        /* Check if remaining size is greater than buddy block size */
>> +        if (drm_buddy_block_size(mm, buddy) < remaining)
>> +            return -ENOSPC;
>> +
>> +        offset = drm_buddy_block_size(mm, buddy) - remaining;
>> +    }
>> +
>> +    list_add(&parent->tmp_link, &dfs);
>> +    start = drm_buddy_block_offset(parent) + offset;
>> +
>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>> +    if (err)
>> +        return -ENOSPC;
>> +
>> +    return 0;
>> +}
>> +
>> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
>> +                          u64 size,
>> +                          u64 min_block_size,
>> +                          struct drm_buddy_block *block,
>> +                          struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *first, *peer, *tmp;
>> +    struct drm_buddy_block *parent = NULL;
>> +    u64 start, offset = 0;
>> +    unsigned int order;
>> +    LIST_HEAD(dfs);
>> +    int err;
>> +
>> +    if (!block)
>> +        return -EINVAL;
>> +
>> +    order = drm_buddy_block_order(block);
>> +    /* Add freelist block to dfs list */
>> +    list_add(&block->tmp_link, &dfs);
>> +
>> +    tmp = block;
>> +    parent = block->parent;
>> +    while (parent) {
>> +        if (block->parent->left == block) {
>> +            if (parent->left != tmp) {
>> +                peer = parent->left;
>> +                break;
>> +            }
>> +        } else {
>> +            if (parent->right != tmp) {
>> +                peer = parent->right;
>> +                break;
>> +            }
>> +        }
>> +
>> +        tmp = parent;
>> +        parent = tmp->parent;
>> +    }
>> +
>> +    if (!parent)
>> +        return -ENOSPC;
>> +
>> +    do {
>> +        if (drm_buddy_block_is_allocated(peer))
>> +            return -ENOSPC;
>> +        /* Exit loop if peer block order is equal to block order */
>> +        if (drm_buddy_block_order(peer) == order)
>> +            break;
>> +
>> +        if (drm_buddy_block_is_split(peer)) {
>> +            /* Traverse down to the block order level */
>> +            if (block->parent->left == block)
>> +                peer = peer->right;
>> +            else
>> +                peer = peer->left;
>> +        } else {
>> +            break;
>> +        }
>> +    } while (1);
>> +
>> +    if (block->parent->left == block) {
>> +        u64 remaining;
>> +
>> +        /* Compute the leftover size for allocation */
>> +        remaining = max((size - drm_buddy_block_size(mm, block)),
>> +                min_block_size);
>> +        if (!IS_ALIGNED(remaining, min_block_size))
>> +            remaining = round_up(remaining, min_block_size);
>> +
>> +        /* Check if remaining size is greater than peer block size */
>> +        if (drm_buddy_block_size(mm, peer) < remaining)
>> +            return -ENOSPC;
>> +
>> +        offset = drm_buddy_block_size(mm, peer) - remaining;
>> +        /* Add left peer block to dfs list */
>> +        list_add(&peer->tmp_link, &dfs);
>> +    } else {
>> +        /* Add right peer block to dfs list */
>> +        list_add_tail(&peer->tmp_link, &dfs);
>> +    }
>> +
>> +    first = list_first_entry_or_null(&dfs,
>> +                     struct drm_buddy_block,
>> +                     tmp_link);
>> +    if (!first)
>> +        return -EINVAL;
>> +
>> +    start = drm_buddy_block_offset(first) + offset;
>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>> +    if (err)
>> +        return -ENOSPC;
>> +
>> +    return 0;
>> +}
>> +
>> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
>> +                           u64 size,
>> +                           u64 min_block_size,
>> +                           struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *block;
>> +    struct list_head *list;
>> +    unsigned long pages;
>> +    unsigned int order;
>> +    u64 modify_size;
>> +    int err;
>> +
>> +    modify_size = rounddown_pow_of_two(size);
>> +    pages = modify_size >> ilog2(mm->chunk_size);
>> +    order = fls(pages) - 1;
>> +    if (order == 0)
>> +        return -ENOSPC;
>> +
>> +    list = &mm->free_list[order];
>> +    if (list_empty(list))
>> +        return -ENOSPC;
>> +
>> +    list_for_each_entry_reverse(block, list, link) {
>> +        /* Allocate contiguous blocks from the buddy */
>> +        err = __alloc_contiguous_block_from_buddy(mm,
>> +                              size,
>> +                              min_block_size,
>> +                              block,
>> +                              blocks);
>> +        if (!err)
>> +            return 0;
>> +
>> +        /* Allocate contiguous blocks from tree traversal method */
>> +        err = __alloc_contiguous_block_from_peer(mm,
>> +                             size,
>> +                             min_block_size,
>> +                             block,
>> +                             blocks);
>> +        if (!err)
>> +            return 0;
>> +    }
>> +
>> +    return -ENOSPC;
>> +}
>
> Wondering if this would be a lot simpler if we can tweak alloc_range() 
> to support allocating as much as it can up to some size? If it runs 
> out of space it still returns an error but doesn't actually free what 
> it has successfully allocated. It then also tells us how much it 
> allocated. We can then allocate the rhs first and then from whatever 
> is left we can figure out the precise offset we need for the lhs? I 
> think that looks sort of similar to what the above does, but here we 
> can for the most part just re-use alloc_range()? So maybe something like:
>
> __alloc_range(..., u64 *total_allocated_on_err)
> {
>     ....
>     err_free:
>         if (err == -ENOSPC && total_allocated_on_err)
>             *total_allocated_on_err = total_allocated;
>         else
>             drm_buddy_free_list(mm, &allocated);
>         return err;
> }
>
> alloc_contig_try_harder()
> {
>      ....
>      list_for_each_entry_reverse(b, list, link) {
>          .....
>
>          rhs_offset = block_offset(b);
>          err =  __drm_buddy_alloc_range(mm, rhs_offset,
>                                         size, &filled,
>                                         blocks);
>          if (!err || err != -ENOSPC)
>              break;
>
>          lhs_size = size - filled;
>          lhs_offset = block_offset(b) - lhs_size;
>          err =  __drm_buddy_alloc_range(mm, lhs_offset,
>                                         lhs_size, NULL,
>                                         blocks_lhs);
>          list_splice(blocks_lhs, blocks);
>
>          ....
>      }
> }
>
> ?
>
The difference between the above approach and this patch is that the 
above approach tries to allocate first from RHS and remaining size from 
LHS, but the patch tries to handle RHS and LHS separately through 
alloc_from_buddy() and alloc_from_peer() functions. I thought this would 
unblock the merge back operation of any one of the side (either LHS or 
RHS). If the above approach doesn't harm much, we will try and allocate 
from both the sides.

If we can add the immediate right block (i.e its buddy) to the dfs list 
for RHS traversal and if we could add only the immediate peer block for 
LHS traversal, this would eliminate the need for adding complete address 
space to the list and it might reduce the number of iterations.

Also, we should ALIGN the LHS remaining size to the min_block_size. 
Otherwise, I see glitches in some of the workloads.

>> +
>>   /**
>>    * drm_buddy_block_trim - free unused pages
>>    *
>> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>    * @start: start of the allowed range for this block
>>    * @end: end of the allowed range for this block
>>    * @size: size of the allocation
>> - * @min_page_size: alignment of the allocation
>> + * @min_block_size: alignment of the allocation
>>    * @blocks: output list head to add allocated blocks
>>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>>    *
>> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>    */
>>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>                  u64 start, u64 end, u64 size,
>> -               u64 min_page_size,
>> +               u64 min_block_size,
>>                  struct list_head *blocks,
>>                  unsigned long flags)
>>   {
>>       struct drm_buddy_block *block = NULL;
>> +    u64 original_size, original_min_size;
>>       unsigned int min_order, order;
>> -    unsigned long pages;
>>       LIST_HEAD(allocated);
>> +    unsigned long pages;
>>       int err;
>>         if (size < mm->chunk_size)
>>           return -EINVAL;
>>   -    if (min_page_size < mm->chunk_size)
>> +    if (min_block_size < mm->chunk_size)
>>           return -EINVAL;
>>   -    if (!is_power_of_2(min_page_size))
>> +    if (!is_power_of_2(min_block_size))
>>           return -EINVAL;
>>         if (!IS_ALIGNED(start | end | size, mm->chunk_size))
>> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>       if (start + size == end)
>>           return __drm_buddy_alloc_range(mm, start, size, blocks);
>>   -    if (!IS_ALIGNED(size, min_page_size))
>> -        return -EINVAL;
>> +    original_size = size;
>> +    original_min_size = min_block_size;
>> +
>> +    /* Roundup the size to power of 2 */
>> +    if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
>> +        size = roundup_pow_of_two(size);
>> +        min_block_size = size;
>> +    /* Align size value to min_block_size */
>> +    } else if (!IS_ALIGNED(size, min_block_size)) {
>> +        size = round_up(size, min_block_size);
>> +    }
>>         pages = size >> ilog2(mm->chunk_size);
>>       order = fls(pages) - 1;
>> -    min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
>> +    min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>>         do {
>>           order = min(order, (unsigned int)fls(pages) - 1);
>> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>                   break;
>>                 if (order-- == min_order) {
>> +                if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
>> +                    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
>> +                    /*
>> +                     * Try contiguous block allocation through
>> +                     * tree traversal method
>> +                     */
>> +                    return __drm_buddy_alloc_contiguous_blocks(mm,
>> +                                           original_size,
>> +                                           original_min_size,
>> +                                           blocks);
>> +
>>                   err = -ENOSPC;
>>                   goto err_free;
>>               }
>> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>               break;
>>       } while (1);
>>   +    /* Trim the allocated block to the required size */
>> +    if (original_size != size) {
>> +        struct list_head *trim_list;
>> +        LIST_HEAD(temp);
>> +        u64 trim_size;
>> +
>> +        trim_list = &allocated;
>> +        trim_size = original_size;
>> +
>> +        if (!list_is_singular(&allocated)) {
>> +            block = list_last_entry(&allocated, typeof(*block), link);
>> +            list_move(&block->link, &temp);
>> +            trim_list = &temp;
>> +            trim_size = drm_buddy_block_size(mm, block) -
>> +                (size - original_size);
>> +        }
>> +
>> +        drm_buddy_block_trim(mm,
>> +                     trim_size,
>> +                     trim_list);
>> +
>> +        if (!list_empty(&temp))
>> +            list_splice_tail(trim_list, &allocated);
>> +    }
>> +
>>       list_splice_tail(&allocated, blocks);
>>       return 0;
>>   diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
>> index 572077ff8ae7..a5b39fc01003 100644
>> --- a/include/drm/drm_buddy.h
>> +++ b/include/drm/drm_buddy.h
>> @@ -22,8 +22,9 @@
>>       start__ >= max__ || size__ > max__ - start__; \
>>   })
>>   -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
>> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
>> +#define DRM_BUDDY_RANGE_ALLOCATION        BIT(0)
>> +#define DRM_BUDDY_TOPDOWN_ALLOCATION        BIT(1)
>> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION        BIT(2)
>>     struct drm_buddy_block {
>>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
>> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct 
>> drm_printer *p);
>>   void drm_buddy_block_print(struct drm_buddy *mm,
>>                  struct drm_buddy_block *block,
>>                  struct drm_printer *p);
>> -
>>   #endif

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
  2023-08-21 11:16   ` [Intel-gfx] " Christian König
@ 2023-08-23  5:52     ` Christian König
  -1 siblings, 0 replies; 20+ messages in thread
From: Christian König @ 2023-08-23  5:52 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Pan, Xinhui, matthew.auld

Am 21.08.23 um 13:16 schrieb Christian König:
> Am 21.08.23 um 12:14 schrieb Arunpravin Paneer Selvam:
>> The way now contiguous requests are implemented such that
>> the size rounded up to power of 2 and the corresponding order
>> block picked from the freelist.
>>
>> In addition to the older method, the new method will rounddown
>> the size to power of 2 and the corresponding order block picked
>> from the freelist. And for the remaining size we traverse the
>> tree and try to allocate either from the freelist block's buddy
>> or from the peer block. If the remaining size from peer/buddy
>> block is not free, we pick the next freelist block and repeat
>> the same method.
>
> I think it's worth mentioning that Xinhui tried something similar a 
> few month ago, but that didn't looked like it would work. For this 
> here I'm more confident.
>
> Of hand the implementation looks clean to me, but Matthew or others 
> which have more background in how the implementation works need to 
> take a look as well.

One more thing I've just noticed, not sure if Matthew already noted it: 
When you mention "fix" in the subject line people might try to backport 
it, better write "improve" and drop the "issues" at the end.

Regards,
Christian.

>
> Thanks,
> Christian.
>
>>
>> Moved contiguous/alignment size computation part and trim
>> function to the drm buddy manager.
>>
>> Signed-off-by: Arunpravin Paneer Selvam 
>> <Arunpravin.PaneerSelvam@amd.com>
>> ---
>>   drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
>>   include/drm/drm_buddy.h     |   6 +-
>>   2 files changed, 248 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
>> index 7098f125b54a..220f60c08a03 100644
>> --- a/drivers/gpu/drm/drm_buddy.c
>> +++ b/drivers/gpu/drm/drm_buddy.c
>> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct 
>> drm_buddy *mm,
>>       return __alloc_range(mm, &dfs, start, size, blocks);
>>   }
>>   +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
>> +                           u64 size,
>> +                           u64 min_block_size,
>> +                           struct drm_buddy_block *block,
>> +                           struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *buddy, *parent = NULL;
>> +    u64 start, offset = 0;
>> +    LIST_HEAD(dfs);
>> +    int err;
>> +
>> +    if (!block)
>> +        return -EINVAL;
>> +
>> +    buddy = __get_buddy(block);
>> +    if (!buddy)
>> +        return -ENOSPC;
>> +
>> +    if (drm_buddy_block_is_allocated(buddy))
>> +        return -ENOSPC;
>> +
>> +    parent = block->parent;
>> +    if (!parent)
>> +        return -ENOSPC;
>> +
>> +    if (block->parent->right == block) {
>> +        u64 remaining;
>> +
>> +        /* Compute the leftover size for allocation */
>> +        remaining = max((size - drm_buddy_block_size(mm, buddy)),
>> +                min_block_size);
>> +        if (!IS_ALIGNED(remaining, min_block_size))
>> +            remaining = round_up(remaining, min_block_size);
>> +
>> +        /* Check if remaining size is greater than buddy block size */
>> +        if (drm_buddy_block_size(mm, buddy) < remaining)
>> +            return -ENOSPC;
>> +
>> +        offset = drm_buddy_block_size(mm, buddy) - remaining;
>> +    }
>> +
>> +    list_add(&parent->tmp_link, &dfs);
>> +    start = drm_buddy_block_offset(parent) + offset;
>> +
>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>> +    if (err)
>> +        return -ENOSPC;
>> +
>> +    return 0;
>> +}
>> +
>> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
>> +                          u64 size,
>> +                          u64 min_block_size,
>> +                          struct drm_buddy_block *block,
>> +                          struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *first, *peer, *tmp;
>> +    struct drm_buddy_block *parent = NULL;
>> +    u64 start, offset = 0;
>> +    unsigned int order;
>> +    LIST_HEAD(dfs);
>> +    int err;
>> +
>> +    if (!block)
>> +        return -EINVAL;
>> +
>> +    order = drm_buddy_block_order(block);
>> +    /* Add freelist block to dfs list */
>> +    list_add(&block->tmp_link, &dfs);
>> +
>> +    tmp = block;
>> +    parent = block->parent;
>> +    while (parent) {
>> +        if (block->parent->left == block) {
>> +            if (parent->left != tmp) {
>> +                peer = parent->left;
>> +                break;
>> +            }
>> +        } else {
>> +            if (parent->right != tmp) {
>> +                peer = parent->right;
>> +                break;
>> +            }
>> +        }
>> +
>> +        tmp = parent;
>> +        parent = tmp->parent;
>> +    }
>> +
>> +    if (!parent)
>> +        return -ENOSPC;
>> +
>> +    do {
>> +        if (drm_buddy_block_is_allocated(peer))
>> +            return -ENOSPC;
>> +        /* Exit loop if peer block order is equal to block order */
>> +        if (drm_buddy_block_order(peer) == order)
>> +            break;
>> +
>> +        if (drm_buddy_block_is_split(peer)) {
>> +            /* Traverse down to the block order level */
>> +            if (block->parent->left == block)
>> +                peer = peer->right;
>> +            else
>> +                peer = peer->left;
>> +        } else {
>> +            break;
>> +        }
>> +    } while (1);
>> +
>> +    if (block->parent->left == block) {
>> +        u64 remaining;
>> +
>> +        /* Compute the leftover size for allocation */
>> +        remaining = max((size - drm_buddy_block_size(mm, block)),
>> +                min_block_size);
>> +        if (!IS_ALIGNED(remaining, min_block_size))
>> +            remaining = round_up(remaining, min_block_size);
>> +
>> +        /* Check if remaining size is greater than peer block size */
>> +        if (drm_buddy_block_size(mm, peer) < remaining)
>> +            return -ENOSPC;
>> +
>> +        offset = drm_buddy_block_size(mm, peer) - remaining;
>> +        /* Add left peer block to dfs list */
>> +        list_add(&peer->tmp_link, &dfs);
>> +    } else {
>> +        /* Add right peer block to dfs list */
>> +        list_add_tail(&peer->tmp_link, &dfs);
>> +    }
>> +
>> +    first = list_first_entry_or_null(&dfs,
>> +                     struct drm_buddy_block,
>> +                     tmp_link);
>> +    if (!first)
>> +        return -EINVAL;
>> +
>> +    start = drm_buddy_block_offset(first) + offset;
>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>> +    if (err)
>> +        return -ENOSPC;
>> +
>> +    return 0;
>> +}
>> +
>> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
>> +                           u64 size,
>> +                           u64 min_block_size,
>> +                           struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *block;
>> +    struct list_head *list;
>> +    unsigned long pages;
>> +    unsigned int order;
>> +    u64 modify_size;
>> +    int err;
>> +
>> +    modify_size = rounddown_pow_of_two(size);
>> +    pages = modify_size >> ilog2(mm->chunk_size);
>> +    order = fls(pages) - 1;
>> +    if (order == 0)
>> +        return -ENOSPC;
>> +
>> +    list = &mm->free_list[order];
>> +    if (list_empty(list))
>> +        return -ENOSPC;
>> +
>> +    list_for_each_entry_reverse(block, list, link) {
>> +        /* Allocate contiguous blocks from the buddy */
>> +        err = __alloc_contiguous_block_from_buddy(mm,
>> +                              size,
>> +                              min_block_size,
>> +                              block,
>> +                              blocks);
>> +        if (!err)
>> +            return 0;
>> +
>> +        /* Allocate contiguous blocks from tree traversal method */
>> +        err = __alloc_contiguous_block_from_peer(mm,
>> +                             size,
>> +                             min_block_size,
>> +                             block,
>> +                             blocks);
>> +        if (!err)
>> +            return 0;
>> +    }
>> +
>> +    return -ENOSPC;
>> +}
>> +
>>   /**
>>    * drm_buddy_block_trim - free unused pages
>>    *
>> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>    * @start: start of the allowed range for this block
>>    * @end: end of the allowed range for this block
>>    * @size: size of the allocation
>> - * @min_page_size: alignment of the allocation
>> + * @min_block_size: alignment of the allocation
>>    * @blocks: output list head to add allocated blocks
>>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>>    *
>> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>    */
>>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>                  u64 start, u64 end, u64 size,
>> -               u64 min_page_size,
>> +               u64 min_block_size,
>>                  struct list_head *blocks,
>>                  unsigned long flags)
>>   {
>>       struct drm_buddy_block *block = NULL;
>> +    u64 original_size, original_min_size;
>>       unsigned int min_order, order;
>> -    unsigned long pages;
>>       LIST_HEAD(allocated);
>> +    unsigned long pages;
>>       int err;
>>         if (size < mm->chunk_size)
>>           return -EINVAL;
>>   -    if (min_page_size < mm->chunk_size)
>> +    if (min_block_size < mm->chunk_size)
>>           return -EINVAL;
>>   -    if (!is_power_of_2(min_page_size))
>> +    if (!is_power_of_2(min_block_size))
>>           return -EINVAL;
>>         if (!IS_ALIGNED(start | end | size, mm->chunk_size))
>> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>       if (start + size == end)
>>           return __drm_buddy_alloc_range(mm, start, size, blocks);
>>   -    if (!IS_ALIGNED(size, min_page_size))
>> -        return -EINVAL;
>> +    original_size = size;
>> +    original_min_size = min_block_size;
>> +
>> +    /* Roundup the size to power of 2 */
>> +    if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
>> +        size = roundup_pow_of_two(size);
>> +        min_block_size = size;
>> +    /* Align size value to min_block_size */
>> +    } else if (!IS_ALIGNED(size, min_block_size)) {
>> +        size = round_up(size, min_block_size);
>> +    }
>>         pages = size >> ilog2(mm->chunk_size);
>>       order = fls(pages) - 1;
>> -    min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
>> +    min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>>         do {
>>           order = min(order, (unsigned int)fls(pages) - 1);
>> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>                   break;
>>                 if (order-- == min_order) {
>> +                if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
>> +                    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
>> +                    /*
>> +                     * Try contiguous block allocation through
>> +                     * tree traversal method
>> +                     */
>> +                    return __drm_buddy_alloc_contiguous_blocks(mm,
>> +                                           original_size,
>> +                                           original_min_size,
>> +                                           blocks);
>> +
>>                   err = -ENOSPC;
>>                   goto err_free;
>>               }
>> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>               break;
>>       } while (1);
>>   +    /* Trim the allocated block to the required size */
>> +    if (original_size != size) {
>> +        struct list_head *trim_list;
>> +        LIST_HEAD(temp);
>> +        u64 trim_size;
>> +
>> +        trim_list = &allocated;
>> +        trim_size = original_size;
>> +
>> +        if (!list_is_singular(&allocated)) {
>> +            block = list_last_entry(&allocated, typeof(*block), link);
>> +            list_move(&block->link, &temp);
>> +            trim_list = &temp;
>> +            trim_size = drm_buddy_block_size(mm, block) -
>> +                (size - original_size);
>> +        }
>> +
>> +        drm_buddy_block_trim(mm,
>> +                     trim_size,
>> +                     trim_list);
>> +
>> +        if (!list_empty(&temp))
>> +            list_splice_tail(trim_list, &allocated);
>> +    }
>> +
>>       list_splice_tail(&allocated, blocks);
>>       return 0;
>>   diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
>> index 572077ff8ae7..a5b39fc01003 100644
>> --- a/include/drm/drm_buddy.h
>> +++ b/include/drm/drm_buddy.h
>> @@ -22,8 +22,9 @@
>>       start__ >= max__ || size__ > max__ - start__; \
>>   })
>>   -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
>> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
>> +#define DRM_BUDDY_RANGE_ALLOCATION        BIT(0)
>> +#define DRM_BUDDY_TOPDOWN_ALLOCATION        BIT(1)
>> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION        BIT(2)
>>     struct drm_buddy_block {
>>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
>> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct 
>> drm_printer *p);
>>   void drm_buddy_block_print(struct drm_buddy *mm,
>>                  struct drm_buddy_block *block,
>>                  struct drm_printer *p);
>> -
>>   #endif
>


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
@ 2023-08-23  5:52     ` Christian König
  0 siblings, 0 replies; 20+ messages in thread
From: Christian König @ 2023-08-23  5:52 UTC (permalink / raw)
  To: Arunpravin Paneer Selvam, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Pan, Xinhui, matthew.auld

Am 21.08.23 um 13:16 schrieb Christian König:
> Am 21.08.23 um 12:14 schrieb Arunpravin Paneer Selvam:
>> The way now contiguous requests are implemented such that
>> the size rounded up to power of 2 and the corresponding order
>> block picked from the freelist.
>>
>> In addition to the older method, the new method will rounddown
>> the size to power of 2 and the corresponding order block picked
>> from the freelist. And for the remaining size we traverse the
>> tree and try to allocate either from the freelist block's buddy
>> or from the peer block. If the remaining size from peer/buddy
>> block is not free, we pick the next freelist block and repeat
>> the same method.
>
> I think it's worth mentioning that Xinhui tried something similar a 
> few month ago, but that didn't looked like it would work. For this 
> here I'm more confident.
>
> Of hand the implementation looks clean to me, but Matthew or others 
> which have more background in how the implementation works need to 
> take a look as well.

One more thing I've just noticed, not sure if Matthew already noted it: 
When you mention "fix" in the subject line people might try to backport 
it, better write "improve" and drop the "issues" at the end.

Regards,
Christian.

>
> Thanks,
> Christian.
>
>>
>> Moved contiguous/alignment size computation part and trim
>> function to the drm buddy manager.
>>
>> Signed-off-by: Arunpravin Paneer Selvam 
>> <Arunpravin.PaneerSelvam@amd.com>
>> ---
>>   drivers/gpu/drm/drm_buddy.c | 253 ++++++++++++++++++++++++++++++++++--
>>   include/drm/drm_buddy.h     |   6 +-
>>   2 files changed, 248 insertions(+), 11 deletions(-)
>>
>> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
>> index 7098f125b54a..220f60c08a03 100644
>> --- a/drivers/gpu/drm/drm_buddy.c
>> +++ b/drivers/gpu/drm/drm_buddy.c
>> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct 
>> drm_buddy *mm,
>>       return __alloc_range(mm, &dfs, start, size, blocks);
>>   }
>>   +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
>> +                           u64 size,
>> +                           u64 min_block_size,
>> +                           struct drm_buddy_block *block,
>> +                           struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *buddy, *parent = NULL;
>> +    u64 start, offset = 0;
>> +    LIST_HEAD(dfs);
>> +    int err;
>> +
>> +    if (!block)
>> +        return -EINVAL;
>> +
>> +    buddy = __get_buddy(block);
>> +    if (!buddy)
>> +        return -ENOSPC;
>> +
>> +    if (drm_buddy_block_is_allocated(buddy))
>> +        return -ENOSPC;
>> +
>> +    parent = block->parent;
>> +    if (!parent)
>> +        return -ENOSPC;
>> +
>> +    if (block->parent->right == block) {
>> +        u64 remaining;
>> +
>> +        /* Compute the leftover size for allocation */
>> +        remaining = max((size - drm_buddy_block_size(mm, buddy)),
>> +                min_block_size);
>> +        if (!IS_ALIGNED(remaining, min_block_size))
>> +            remaining = round_up(remaining, min_block_size);
>> +
>> +        /* Check if remaining size is greater than buddy block size */
>> +        if (drm_buddy_block_size(mm, buddy) < remaining)
>> +            return -ENOSPC;
>> +
>> +        offset = drm_buddy_block_size(mm, buddy) - remaining;
>> +    }
>> +
>> +    list_add(&parent->tmp_link, &dfs);
>> +    start = drm_buddy_block_offset(parent) + offset;
>> +
>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>> +    if (err)
>> +        return -ENOSPC;
>> +
>> +    return 0;
>> +}
>> +
>> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
>> +                          u64 size,
>> +                          u64 min_block_size,
>> +                          struct drm_buddy_block *block,
>> +                          struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *first, *peer, *tmp;
>> +    struct drm_buddy_block *parent = NULL;
>> +    u64 start, offset = 0;
>> +    unsigned int order;
>> +    LIST_HEAD(dfs);
>> +    int err;
>> +
>> +    if (!block)
>> +        return -EINVAL;
>> +
>> +    order = drm_buddy_block_order(block);
>> +    /* Add freelist block to dfs list */
>> +    list_add(&block->tmp_link, &dfs);
>> +
>> +    tmp = block;
>> +    parent = block->parent;
>> +    while (parent) {
>> +        if (block->parent->left == block) {
>> +            if (parent->left != tmp) {
>> +                peer = parent->left;
>> +                break;
>> +            }
>> +        } else {
>> +            if (parent->right != tmp) {
>> +                peer = parent->right;
>> +                break;
>> +            }
>> +        }
>> +
>> +        tmp = parent;
>> +        parent = tmp->parent;
>> +    }
>> +
>> +    if (!parent)
>> +        return -ENOSPC;
>> +
>> +    do {
>> +        if (drm_buddy_block_is_allocated(peer))
>> +            return -ENOSPC;
>> +        /* Exit loop if peer block order is equal to block order */
>> +        if (drm_buddy_block_order(peer) == order)
>> +            break;
>> +
>> +        if (drm_buddy_block_is_split(peer)) {
>> +            /* Traverse down to the block order level */
>> +            if (block->parent->left == block)
>> +                peer = peer->right;
>> +            else
>> +                peer = peer->left;
>> +        } else {
>> +            break;
>> +        }
>> +    } while (1);
>> +
>> +    if (block->parent->left == block) {
>> +        u64 remaining;
>> +
>> +        /* Compute the leftover size for allocation */
>> +        remaining = max((size - drm_buddy_block_size(mm, block)),
>> +                min_block_size);
>> +        if (!IS_ALIGNED(remaining, min_block_size))
>> +            remaining = round_up(remaining, min_block_size);
>> +
>> +        /* Check if remaining size is greater than peer block size */
>> +        if (drm_buddy_block_size(mm, peer) < remaining)
>> +            return -ENOSPC;
>> +
>> +        offset = drm_buddy_block_size(mm, peer) - remaining;
>> +        /* Add left peer block to dfs list */
>> +        list_add(&peer->tmp_link, &dfs);
>> +    } else {
>> +        /* Add right peer block to dfs list */
>> +        list_add_tail(&peer->tmp_link, &dfs);
>> +    }
>> +
>> +    first = list_first_entry_or_null(&dfs,
>> +                     struct drm_buddy_block,
>> +                     tmp_link);
>> +    if (!first)
>> +        return -EINVAL;
>> +
>> +    start = drm_buddy_block_offset(first) + offset;
>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>> +    if (err)
>> +        return -ENOSPC;
>> +
>> +    return 0;
>> +}
>> +
>> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
>> +                           u64 size,
>> +                           u64 min_block_size,
>> +                           struct list_head *blocks)
>> +{
>> +    struct drm_buddy_block *block;
>> +    struct list_head *list;
>> +    unsigned long pages;
>> +    unsigned int order;
>> +    u64 modify_size;
>> +    int err;
>> +
>> +    modify_size = rounddown_pow_of_two(size);
>> +    pages = modify_size >> ilog2(mm->chunk_size);
>> +    order = fls(pages) - 1;
>> +    if (order == 0)
>> +        return -ENOSPC;
>> +
>> +    list = &mm->free_list[order];
>> +    if (list_empty(list))
>> +        return -ENOSPC;
>> +
>> +    list_for_each_entry_reverse(block, list, link) {
>> +        /* Allocate contiguous blocks from the buddy */
>> +        err = __alloc_contiguous_block_from_buddy(mm,
>> +                              size,
>> +                              min_block_size,
>> +                              block,
>> +                              blocks);
>> +        if (!err)
>> +            return 0;
>> +
>> +        /* Allocate contiguous blocks from tree traversal method */
>> +        err = __alloc_contiguous_block_from_peer(mm,
>> +                             size,
>> +                             min_block_size,
>> +                             block,
>> +                             blocks);
>> +        if (!err)
>> +            return 0;
>> +    }
>> +
>> +    return -ENOSPC;
>> +}
>> +
>>   /**
>>    * drm_buddy_block_trim - free unused pages
>>    *
>> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>    * @start: start of the allowed range for this block
>>    * @end: end of the allowed range for this block
>>    * @size: size of the allocation
>> - * @min_page_size: alignment of the allocation
>> + * @min_block_size: alignment of the allocation
>>    * @blocks: output list head to add allocated blocks
>>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>>    *
>> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>    */
>>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>                  u64 start, u64 end, u64 size,
>> -               u64 min_page_size,
>> +               u64 min_block_size,
>>                  struct list_head *blocks,
>>                  unsigned long flags)
>>   {
>>       struct drm_buddy_block *block = NULL;
>> +    u64 original_size, original_min_size;
>>       unsigned int min_order, order;
>> -    unsigned long pages;
>>       LIST_HEAD(allocated);
>> +    unsigned long pages;
>>       int err;
>>         if (size < mm->chunk_size)
>>           return -EINVAL;
>>   -    if (min_page_size < mm->chunk_size)
>> +    if (min_block_size < mm->chunk_size)
>>           return -EINVAL;
>>   -    if (!is_power_of_2(min_page_size))
>> +    if (!is_power_of_2(min_block_size))
>>           return -EINVAL;
>>         if (!IS_ALIGNED(start | end | size, mm->chunk_size))
>> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>       if (start + size == end)
>>           return __drm_buddy_alloc_range(mm, start, size, blocks);
>>   -    if (!IS_ALIGNED(size, min_page_size))
>> -        return -EINVAL;
>> +    original_size = size;
>> +    original_min_size = min_block_size;
>> +
>> +    /* Roundup the size to power of 2 */
>> +    if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
>> +        size = roundup_pow_of_two(size);
>> +        min_block_size = size;
>> +    /* Align size value to min_block_size */
>> +    } else if (!IS_ALIGNED(size, min_block_size)) {
>> +        size = round_up(size, min_block_size);
>> +    }
>>         pages = size >> ilog2(mm->chunk_size);
>>       order = fls(pages) - 1;
>> -    min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
>> +    min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>>         do {
>>           order = min(order, (unsigned int)fls(pages) - 1);
>> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>                   break;
>>                 if (order-- == min_order) {
>> +                if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
>> +                    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
>> +                    /*
>> +                     * Try contiguous block allocation through
>> +                     * tree traversal method
>> +                     */
>> +                    return __drm_buddy_alloc_contiguous_blocks(mm,
>> +                                           original_size,
>> +                                           original_min_size,
>> +                                           blocks);
>> +
>>                   err = -ENOSPC;
>>                   goto err_free;
>>               }
>> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>               break;
>>       } while (1);
>>   +    /* Trim the allocated block to the required size */
>> +    if (original_size != size) {
>> +        struct list_head *trim_list;
>> +        LIST_HEAD(temp);
>> +        u64 trim_size;
>> +
>> +        trim_list = &allocated;
>> +        trim_size = original_size;
>> +
>> +        if (!list_is_singular(&allocated)) {
>> +            block = list_last_entry(&allocated, typeof(*block), link);
>> +            list_move(&block->link, &temp);
>> +            trim_list = &temp;
>> +            trim_size = drm_buddy_block_size(mm, block) -
>> +                (size - original_size);
>> +        }
>> +
>> +        drm_buddy_block_trim(mm,
>> +                     trim_size,
>> +                     trim_list);
>> +
>> +        if (!list_empty(&temp))
>> +            list_splice_tail(trim_list, &allocated);
>> +    }
>> +
>>       list_splice_tail(&allocated, blocks);
>>       return 0;
>>   diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
>> index 572077ff8ae7..a5b39fc01003 100644
>> --- a/include/drm/drm_buddy.h
>> +++ b/include/drm/drm_buddy.h
>> @@ -22,8 +22,9 @@
>>       start__ >= max__ || size__ > max__ - start__; \
>>   })
>>   -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
>> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
>> +#define DRM_BUDDY_RANGE_ALLOCATION        BIT(0)
>> +#define DRM_BUDDY_TOPDOWN_ALLOCATION        BIT(1)
>> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION        BIT(2)
>>     struct drm_buddy_block {
>>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
>> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, struct 
>> drm_printer *p);
>>   void drm_buddy_block_print(struct drm_buddy *mm,
>>                  struct drm_buddy_block *block,
>>                  struct drm_printer *p);
>> -
>>   #endif
>


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
  2023-08-23  5:52     ` [Intel-gfx] " Christian König
@ 2023-08-23  6:35       ` Arunpravin Paneer Selvam
  -1 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-23  6:35 UTC (permalink / raw)
  To: Christian König, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Pan, Xinhui, matthew.auld


On 22/08/23 22:52, Christian König wrote:
> Am 21.08.23 um 13:16 schrieb Christian König:
>> Am 21.08.23 um 12:14 schrieb Arunpravin Paneer Selvam:
>>> The way now contiguous requests are implemented such that
>>> the size rounded up to power of 2 and the corresponding order
>>> block picked from the freelist.
>>>
>>> In addition to the older method, the new method will rounddown
>>> the size to power of 2 and the corresponding order block picked
>>> from the freelist. And for the remaining size we traverse the
>>> tree and try to allocate either from the freelist block's buddy
>>> or from the peer block. If the remaining size from peer/buddy
>>> block is not free, we pick the next freelist block and repeat
>>> the same method.
>>
>> I think it's worth mentioning that Xinhui tried something similar a 
>> few month ago, but that didn't looked like it would work. For this 
>> here I'm more confident.
>>
>> Of hand the implementation looks clean to me, but Matthew or others 
>> which have more background in how the implementation works need to 
>> take a look as well.
>
> One more thing I've just noticed, not sure if Matthew already noted 
> it: When you mention "fix" in the subject line people might try to 
> backport it, better write "improve" and drop the "issues" at the end.

I will modify in the next version.

Thanks,
Arun.

>
> Regards,
> Christian.
>
>>
>> Thanks,
>> Christian.
>>
>>>
>>> Moved contiguous/alignment size computation part and trim
>>> function to the drm buddy manager.
>>>
>>> Signed-off-by: Arunpravin Paneer Selvam 
>>> <Arunpravin.PaneerSelvam@amd.com>
>>> ---
>>>   drivers/gpu/drm/drm_buddy.c | 253 
>>> ++++++++++++++++++++++++++++++++++--
>>>   include/drm/drm_buddy.h     |   6 +-
>>>   2 files changed, 248 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
>>> index 7098f125b54a..220f60c08a03 100644
>>> --- a/drivers/gpu/drm/drm_buddy.c
>>> +++ b/drivers/gpu/drm/drm_buddy.c
>>> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct 
>>> drm_buddy *mm,
>>>       return __alloc_range(mm, &dfs, start, size, blocks);
>>>   }
>>>   +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
>>> +                           u64 size,
>>> +                           u64 min_block_size,
>>> +                           struct drm_buddy_block *block,
>>> +                           struct list_head *blocks)
>>> +{
>>> +    struct drm_buddy_block *buddy, *parent = NULL;
>>> +    u64 start, offset = 0;
>>> +    LIST_HEAD(dfs);
>>> +    int err;
>>> +
>>> +    if (!block)
>>> +        return -EINVAL;
>>> +
>>> +    buddy = __get_buddy(block);
>>> +    if (!buddy)
>>> +        return -ENOSPC;
>>> +
>>> +    if (drm_buddy_block_is_allocated(buddy))
>>> +        return -ENOSPC;
>>> +
>>> +    parent = block->parent;
>>> +    if (!parent)
>>> +        return -ENOSPC;
>>> +
>>> +    if (block->parent->right == block) {
>>> +        u64 remaining;
>>> +
>>> +        /* Compute the leftover size for allocation */
>>> +        remaining = max((size - drm_buddy_block_size(mm, buddy)),
>>> +                min_block_size);
>>> +        if (!IS_ALIGNED(remaining, min_block_size))
>>> +            remaining = round_up(remaining, min_block_size);
>>> +
>>> +        /* Check if remaining size is greater than buddy block size */
>>> +        if (drm_buddy_block_size(mm, buddy) < remaining)
>>> +            return -ENOSPC;
>>> +
>>> +        offset = drm_buddy_block_size(mm, buddy) - remaining;
>>> +    }
>>> +
>>> +    list_add(&parent->tmp_link, &dfs);
>>> +    start = drm_buddy_block_offset(parent) + offset;
>>> +
>>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>>> +    if (err)
>>> +        return -ENOSPC;
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
>>> +                          u64 size,
>>> +                          u64 min_block_size,
>>> +                          struct drm_buddy_block *block,
>>> +                          struct list_head *blocks)
>>> +{
>>> +    struct drm_buddy_block *first, *peer, *tmp;
>>> +    struct drm_buddy_block *parent = NULL;
>>> +    u64 start, offset = 0;
>>> +    unsigned int order;
>>> +    LIST_HEAD(dfs);
>>> +    int err;
>>> +
>>> +    if (!block)
>>> +        return -EINVAL;
>>> +
>>> +    order = drm_buddy_block_order(block);
>>> +    /* Add freelist block to dfs list */
>>> +    list_add(&block->tmp_link, &dfs);
>>> +
>>> +    tmp = block;
>>> +    parent = block->parent;
>>> +    while (parent) {
>>> +        if (block->parent->left == block) {
>>> +            if (parent->left != tmp) {
>>> +                peer = parent->left;
>>> +                break;
>>> +            }
>>> +        } else {
>>> +            if (parent->right != tmp) {
>>> +                peer = parent->right;
>>> +                break;
>>> +            }
>>> +        }
>>> +
>>> +        tmp = parent;
>>> +        parent = tmp->parent;
>>> +    }
>>> +
>>> +    if (!parent)
>>> +        return -ENOSPC;
>>> +
>>> +    do {
>>> +        if (drm_buddy_block_is_allocated(peer))
>>> +            return -ENOSPC;
>>> +        /* Exit loop if peer block order is equal to block order */
>>> +        if (drm_buddy_block_order(peer) == order)
>>> +            break;
>>> +
>>> +        if (drm_buddy_block_is_split(peer)) {
>>> +            /* Traverse down to the block order level */
>>> +            if (block->parent->left == block)
>>> +                peer = peer->right;
>>> +            else
>>> +                peer = peer->left;
>>> +        } else {
>>> +            break;
>>> +        }
>>> +    } while (1);
>>> +
>>> +    if (block->parent->left == block) {
>>> +        u64 remaining;
>>> +
>>> +        /* Compute the leftover size for allocation */
>>> +        remaining = max((size - drm_buddy_block_size(mm, block)),
>>> +                min_block_size);
>>> +        if (!IS_ALIGNED(remaining, min_block_size))
>>> +            remaining = round_up(remaining, min_block_size);
>>> +
>>> +        /* Check if remaining size is greater than peer block size */
>>> +        if (drm_buddy_block_size(mm, peer) < remaining)
>>> +            return -ENOSPC;
>>> +
>>> +        offset = drm_buddy_block_size(mm, peer) - remaining;
>>> +        /* Add left peer block to dfs list */
>>> +        list_add(&peer->tmp_link, &dfs);
>>> +    } else {
>>> +        /* Add right peer block to dfs list */
>>> +        list_add_tail(&peer->tmp_link, &dfs);
>>> +    }
>>> +
>>> +    first = list_first_entry_or_null(&dfs,
>>> +                     struct drm_buddy_block,
>>> +                     tmp_link);
>>> +    if (!first)
>>> +        return -EINVAL;
>>> +
>>> +    start = drm_buddy_block_offset(first) + offset;
>>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>>> +    if (err)
>>> +        return -ENOSPC;
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
>>> +                           u64 size,
>>> +                           u64 min_block_size,
>>> +                           struct list_head *blocks)
>>> +{
>>> +    struct drm_buddy_block *block;
>>> +    struct list_head *list;
>>> +    unsigned long pages;
>>> +    unsigned int order;
>>> +    u64 modify_size;
>>> +    int err;
>>> +
>>> +    modify_size = rounddown_pow_of_two(size);
>>> +    pages = modify_size >> ilog2(mm->chunk_size);
>>> +    order = fls(pages) - 1;
>>> +    if (order == 0)
>>> +        return -ENOSPC;
>>> +
>>> +    list = &mm->free_list[order];
>>> +    if (list_empty(list))
>>> +        return -ENOSPC;
>>> +
>>> +    list_for_each_entry_reverse(block, list, link) {
>>> +        /* Allocate contiguous blocks from the buddy */
>>> +        err = __alloc_contiguous_block_from_buddy(mm,
>>> +                              size,
>>> +                              min_block_size,
>>> +                              block,
>>> +                              blocks);
>>> +        if (!err)
>>> +            return 0;
>>> +
>>> +        /* Allocate contiguous blocks from tree traversal method */
>>> +        err = __alloc_contiguous_block_from_peer(mm,
>>> +                             size,
>>> +                             min_block_size,
>>> +                             block,
>>> +                             blocks);
>>> +        if (!err)
>>> +            return 0;
>>> +    }
>>> +
>>> +    return -ENOSPC;
>>> +}
>>> +
>>>   /**
>>>    * drm_buddy_block_trim - free unused pages
>>>    *
>>> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>>    * @start: start of the allowed range for this block
>>>    * @end: end of the allowed range for this block
>>>    * @size: size of the allocation
>>> - * @min_page_size: alignment of the allocation
>>> + * @min_block_size: alignment of the allocation
>>>    * @blocks: output list head to add allocated blocks
>>>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>>>    *
>>> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>>    */
>>>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>>                  u64 start, u64 end, u64 size,
>>> -               u64 min_page_size,
>>> +               u64 min_block_size,
>>>                  struct list_head *blocks,
>>>                  unsigned long flags)
>>>   {
>>>       struct drm_buddy_block *block = NULL;
>>> +    u64 original_size, original_min_size;
>>>       unsigned int min_order, order;
>>> -    unsigned long pages;
>>>       LIST_HEAD(allocated);
>>> +    unsigned long pages;
>>>       int err;
>>>         if (size < mm->chunk_size)
>>>           return -EINVAL;
>>>   -    if (min_page_size < mm->chunk_size)
>>> +    if (min_block_size < mm->chunk_size)
>>>           return -EINVAL;
>>>   -    if (!is_power_of_2(min_page_size))
>>> +    if (!is_power_of_2(min_block_size))
>>>           return -EINVAL;
>>>         if (!IS_ALIGNED(start | end | size, mm->chunk_size))
>>> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>>       if (start + size == end)
>>>           return __drm_buddy_alloc_range(mm, start, size, blocks);
>>>   -    if (!IS_ALIGNED(size, min_page_size))
>>> -        return -EINVAL;
>>> +    original_size = size;
>>> +    original_min_size = min_block_size;
>>> +
>>> +    /* Roundup the size to power of 2 */
>>> +    if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
>>> +        size = roundup_pow_of_two(size);
>>> +        min_block_size = size;
>>> +    /* Align size value to min_block_size */
>>> +    } else if (!IS_ALIGNED(size, min_block_size)) {
>>> +        size = round_up(size, min_block_size);
>>> +    }
>>>         pages = size >> ilog2(mm->chunk_size);
>>>       order = fls(pages) - 1;
>>> -    min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
>>> +    min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>>>         do {
>>>           order = min(order, (unsigned int)fls(pages) - 1);
>>> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>>                   break;
>>>                 if (order-- == min_order) {
>>> +                if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
>>> +                    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
>>> +                    /*
>>> +                     * Try contiguous block allocation through
>>> +                     * tree traversal method
>>> +                     */
>>> +                    return __drm_buddy_alloc_contiguous_blocks(mm,
>>> +                                           original_size,
>>> +                                           original_min_size,
>>> +                                           blocks);
>>> +
>>>                   err = -ENOSPC;
>>>                   goto err_free;
>>>               }
>>> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>>               break;
>>>       } while (1);
>>>   +    /* Trim the allocated block to the required size */
>>> +    if (original_size != size) {
>>> +        struct list_head *trim_list;
>>> +        LIST_HEAD(temp);
>>> +        u64 trim_size;
>>> +
>>> +        trim_list = &allocated;
>>> +        trim_size = original_size;
>>> +
>>> +        if (!list_is_singular(&allocated)) {
>>> +            block = list_last_entry(&allocated, typeof(*block), link);
>>> +            list_move(&block->link, &temp);
>>> +            trim_list = &temp;
>>> +            trim_size = drm_buddy_block_size(mm, block) -
>>> +                (size - original_size);
>>> +        }
>>> +
>>> +        drm_buddy_block_trim(mm,
>>> +                     trim_size,
>>> +                     trim_list);
>>> +
>>> +        if (!list_empty(&temp))
>>> +            list_splice_tail(trim_list, &allocated);
>>> +    }
>>> +
>>>       list_splice_tail(&allocated, blocks);
>>>       return 0;
>>>   diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
>>> index 572077ff8ae7..a5b39fc01003 100644
>>> --- a/include/drm/drm_buddy.h
>>> +++ b/include/drm/drm_buddy.h
>>> @@ -22,8 +22,9 @@
>>>       start__ >= max__ || size__ > max__ - start__; \
>>>   })
>>>   -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
>>> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
>>> +#define DRM_BUDDY_RANGE_ALLOCATION        BIT(0)
>>> +#define DRM_BUDDY_TOPDOWN_ALLOCATION        BIT(1)
>>> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION        BIT(2)
>>>     struct drm_buddy_block {
>>>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
>>> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, 
>>> struct drm_printer *p);
>>>   void drm_buddy_block_print(struct drm_buddy *mm,
>>>                  struct drm_buddy_block *block,
>>>                  struct drm_printer *p);
>>> -
>>>   #endif
>>
>

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [Intel-gfx] [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues
@ 2023-08-23  6:35       ` Arunpravin Paneer Selvam
  0 siblings, 0 replies; 20+ messages in thread
From: Arunpravin Paneer Selvam @ 2023-08-23  6:35 UTC (permalink / raw)
  To: Christian König, dri-devel, amd-gfx, intel-gfx
  Cc: alexander.deucher, Pan, Xinhui, matthew.auld


On 22/08/23 22:52, Christian König wrote:
> Am 21.08.23 um 13:16 schrieb Christian König:
>> Am 21.08.23 um 12:14 schrieb Arunpravin Paneer Selvam:
>>> The way now contiguous requests are implemented such that
>>> the size rounded up to power of 2 and the corresponding order
>>> block picked from the freelist.
>>>
>>> In addition to the older method, the new method will rounddown
>>> the size to power of 2 and the corresponding order block picked
>>> from the freelist. And for the remaining size we traverse the
>>> tree and try to allocate either from the freelist block's buddy
>>> or from the peer block. If the remaining size from peer/buddy
>>> block is not free, we pick the next freelist block and repeat
>>> the same method.
>>
>> I think it's worth mentioning that Xinhui tried something similar a 
>> few month ago, but that didn't looked like it would work. For this 
>> here I'm more confident.
>>
>> Of hand the implementation looks clean to me, but Matthew or others 
>> which have more background in how the implementation works need to 
>> take a look as well.
>
> One more thing I've just noticed, not sure if Matthew already noted 
> it: When you mention "fix" in the subject line people might try to 
> backport it, better write "improve" and drop the "issues" at the end.

I will modify in the next version.

Thanks,
Arun.

>
> Regards,
> Christian.
>
>>
>> Thanks,
>> Christian.
>>
>>>
>>> Moved contiguous/alignment size computation part and trim
>>> function to the drm buddy manager.
>>>
>>> Signed-off-by: Arunpravin Paneer Selvam 
>>> <Arunpravin.PaneerSelvam@amd.com>
>>> ---
>>>   drivers/gpu/drm/drm_buddy.c | 253 
>>> ++++++++++++++++++++++++++++++++++--
>>>   include/drm/drm_buddy.h     |   6 +-
>>>   2 files changed, 248 insertions(+), 11 deletions(-)
>>>
>>> diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
>>> index 7098f125b54a..220f60c08a03 100644
>>> --- a/drivers/gpu/drm/drm_buddy.c
>>> +++ b/drivers/gpu/drm/drm_buddy.c
>>> @@ -569,6 +569,197 @@ static int __drm_buddy_alloc_range(struct 
>>> drm_buddy *mm,
>>>       return __alloc_range(mm, &dfs, start, size, blocks);
>>>   }
>>>   +static int __alloc_contiguous_block_from_buddy(struct drm_buddy *mm,
>>> +                           u64 size,
>>> +                           u64 min_block_size,
>>> +                           struct drm_buddy_block *block,
>>> +                           struct list_head *blocks)
>>> +{
>>> +    struct drm_buddy_block *buddy, *parent = NULL;
>>> +    u64 start, offset = 0;
>>> +    LIST_HEAD(dfs);
>>> +    int err;
>>> +
>>> +    if (!block)
>>> +        return -EINVAL;
>>> +
>>> +    buddy = __get_buddy(block);
>>> +    if (!buddy)
>>> +        return -ENOSPC;
>>> +
>>> +    if (drm_buddy_block_is_allocated(buddy))
>>> +        return -ENOSPC;
>>> +
>>> +    parent = block->parent;
>>> +    if (!parent)
>>> +        return -ENOSPC;
>>> +
>>> +    if (block->parent->right == block) {
>>> +        u64 remaining;
>>> +
>>> +        /* Compute the leftover size for allocation */
>>> +        remaining = max((size - drm_buddy_block_size(mm, buddy)),
>>> +                min_block_size);
>>> +        if (!IS_ALIGNED(remaining, min_block_size))
>>> +            remaining = round_up(remaining, min_block_size);
>>> +
>>> +        /* Check if remaining size is greater than buddy block size */
>>> +        if (drm_buddy_block_size(mm, buddy) < remaining)
>>> +            return -ENOSPC;
>>> +
>>> +        offset = drm_buddy_block_size(mm, buddy) - remaining;
>>> +    }
>>> +
>>> +    list_add(&parent->tmp_link, &dfs);
>>> +    start = drm_buddy_block_offset(parent) + offset;
>>> +
>>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>>> +    if (err)
>>> +        return -ENOSPC;
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +static int __alloc_contiguous_block_from_peer(struct drm_buddy *mm,
>>> +                          u64 size,
>>> +                          u64 min_block_size,
>>> +                          struct drm_buddy_block *block,
>>> +                          struct list_head *blocks)
>>> +{
>>> +    struct drm_buddy_block *first, *peer, *tmp;
>>> +    struct drm_buddy_block *parent = NULL;
>>> +    u64 start, offset = 0;
>>> +    unsigned int order;
>>> +    LIST_HEAD(dfs);
>>> +    int err;
>>> +
>>> +    if (!block)
>>> +        return -EINVAL;
>>> +
>>> +    order = drm_buddy_block_order(block);
>>> +    /* Add freelist block to dfs list */
>>> +    list_add(&block->tmp_link, &dfs);
>>> +
>>> +    tmp = block;
>>> +    parent = block->parent;
>>> +    while (parent) {
>>> +        if (block->parent->left == block) {
>>> +            if (parent->left != tmp) {
>>> +                peer = parent->left;
>>> +                break;
>>> +            }
>>> +        } else {
>>> +            if (parent->right != tmp) {
>>> +                peer = parent->right;
>>> +                break;
>>> +            }
>>> +        }
>>> +
>>> +        tmp = parent;
>>> +        parent = tmp->parent;
>>> +    }
>>> +
>>> +    if (!parent)
>>> +        return -ENOSPC;
>>> +
>>> +    do {
>>> +        if (drm_buddy_block_is_allocated(peer))
>>> +            return -ENOSPC;
>>> +        /* Exit loop if peer block order is equal to block order */
>>> +        if (drm_buddy_block_order(peer) == order)
>>> +            break;
>>> +
>>> +        if (drm_buddy_block_is_split(peer)) {
>>> +            /* Traverse down to the block order level */
>>> +            if (block->parent->left == block)
>>> +                peer = peer->right;
>>> +            else
>>> +                peer = peer->left;
>>> +        } else {
>>> +            break;
>>> +        }
>>> +    } while (1);
>>> +
>>> +    if (block->parent->left == block) {
>>> +        u64 remaining;
>>> +
>>> +        /* Compute the leftover size for allocation */
>>> +        remaining = max((size - drm_buddy_block_size(mm, block)),
>>> +                min_block_size);
>>> +        if (!IS_ALIGNED(remaining, min_block_size))
>>> +            remaining = round_up(remaining, min_block_size);
>>> +
>>> +        /* Check if remaining size is greater than peer block size */
>>> +        if (drm_buddy_block_size(mm, peer) < remaining)
>>> +            return -ENOSPC;
>>> +
>>> +        offset = drm_buddy_block_size(mm, peer) - remaining;
>>> +        /* Add left peer block to dfs list */
>>> +        list_add(&peer->tmp_link, &dfs);
>>> +    } else {
>>> +        /* Add right peer block to dfs list */
>>> +        list_add_tail(&peer->tmp_link, &dfs);
>>> +    }
>>> +
>>> +    first = list_first_entry_or_null(&dfs,
>>> +                     struct drm_buddy_block,
>>> +                     tmp_link);
>>> +    if (!first)
>>> +        return -EINVAL;
>>> +
>>> +    start = drm_buddy_block_offset(first) + offset;
>>> +    err = __alloc_range(mm, &dfs, start, size, blocks);
>>> +    if (err)
>>> +        return -ENOSPC;
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +static int __drm_buddy_alloc_contiguous_blocks(struct drm_buddy *mm,
>>> +                           u64 size,
>>> +                           u64 min_block_size,
>>> +                           struct list_head *blocks)
>>> +{
>>> +    struct drm_buddy_block *block;
>>> +    struct list_head *list;
>>> +    unsigned long pages;
>>> +    unsigned int order;
>>> +    u64 modify_size;
>>> +    int err;
>>> +
>>> +    modify_size = rounddown_pow_of_two(size);
>>> +    pages = modify_size >> ilog2(mm->chunk_size);
>>> +    order = fls(pages) - 1;
>>> +    if (order == 0)
>>> +        return -ENOSPC;
>>> +
>>> +    list = &mm->free_list[order];
>>> +    if (list_empty(list))
>>> +        return -ENOSPC;
>>> +
>>> +    list_for_each_entry_reverse(block, list, link) {
>>> +        /* Allocate contiguous blocks from the buddy */
>>> +        err = __alloc_contiguous_block_from_buddy(mm,
>>> +                              size,
>>> +                              min_block_size,
>>> +                              block,
>>> +                              blocks);
>>> +        if (!err)
>>> +            return 0;
>>> +
>>> +        /* Allocate contiguous blocks from tree traversal method */
>>> +        err = __alloc_contiguous_block_from_peer(mm,
>>> +                             size,
>>> +                             min_block_size,
>>> +                             block,
>>> +                             blocks);
>>> +        if (!err)
>>> +            return 0;
>>> +    }
>>> +
>>> +    return -ENOSPC;
>>> +}
>>> +
>>>   /**
>>>    * drm_buddy_block_trim - free unused pages
>>>    *
>>> @@ -645,7 +836,7 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>>    * @start: start of the allowed range for this block
>>>    * @end: end of the allowed range for this block
>>>    * @size: size of the allocation
>>> - * @min_page_size: alignment of the allocation
>>> + * @min_block_size: alignment of the allocation
>>>    * @blocks: output list head to add allocated blocks
>>>    * @flags: DRM_BUDDY_*_ALLOCATION flags
>>>    *
>>> @@ -660,23 +851,24 @@ EXPORT_SYMBOL(drm_buddy_block_trim);
>>>    */
>>>   int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>>                  u64 start, u64 end, u64 size,
>>> -               u64 min_page_size,
>>> +               u64 min_block_size,
>>>                  struct list_head *blocks,
>>>                  unsigned long flags)
>>>   {
>>>       struct drm_buddy_block *block = NULL;
>>> +    u64 original_size, original_min_size;
>>>       unsigned int min_order, order;
>>> -    unsigned long pages;
>>>       LIST_HEAD(allocated);
>>> +    unsigned long pages;
>>>       int err;
>>>         if (size < mm->chunk_size)
>>>           return -EINVAL;
>>>   -    if (min_page_size < mm->chunk_size)
>>> +    if (min_block_size < mm->chunk_size)
>>>           return -EINVAL;
>>>   -    if (!is_power_of_2(min_page_size))
>>> +    if (!is_power_of_2(min_block_size))
>>>           return -EINVAL;
>>>         if (!IS_ALIGNED(start | end | size, mm->chunk_size))
>>> @@ -692,12 +884,21 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>>       if (start + size == end)
>>>           return __drm_buddy_alloc_range(mm, start, size, blocks);
>>>   -    if (!IS_ALIGNED(size, min_page_size))
>>> -        return -EINVAL;
>>> +    original_size = size;
>>> +    original_min_size = min_block_size;
>>> +
>>> +    /* Roundup the size to power of 2 */
>>> +    if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
>>> +        size = roundup_pow_of_two(size);
>>> +        min_block_size = size;
>>> +    /* Align size value to min_block_size */
>>> +    } else if (!IS_ALIGNED(size, min_block_size)) {
>>> +        size = round_up(size, min_block_size);
>>> +    }
>>>         pages = size >> ilog2(mm->chunk_size);
>>>       order = fls(pages) - 1;
>>> -    min_order = ilog2(min_page_size) - ilog2(mm->chunk_size);
>>> +    min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
>>>         do {
>>>           order = min(order, (unsigned int)fls(pages) - 1);
>>> @@ -716,6 +917,17 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>>                   break;
>>>                 if (order-- == min_order) {
>>> +                if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
>>> +                    !(flags & DRM_BUDDY_RANGE_ALLOCATION))
>>> +                    /*
>>> +                     * Try contiguous block allocation through
>>> +                     * tree traversal method
>>> +                     */
>>> +                    return __drm_buddy_alloc_contiguous_blocks(mm,
>>> +                                           original_size,
>>> +                                           original_min_size,
>>> +                                           blocks);
>>> +
>>>                   err = -ENOSPC;
>>>                   goto err_free;
>>>               }
>>> @@ -732,6 +944,31 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
>>>               break;
>>>       } while (1);
>>>   +    /* Trim the allocated block to the required size */
>>> +    if (original_size != size) {
>>> +        struct list_head *trim_list;
>>> +        LIST_HEAD(temp);
>>> +        u64 trim_size;
>>> +
>>> +        trim_list = &allocated;
>>> +        trim_size = original_size;
>>> +
>>> +        if (!list_is_singular(&allocated)) {
>>> +            block = list_last_entry(&allocated, typeof(*block), link);
>>> +            list_move(&block->link, &temp);
>>> +            trim_list = &temp;
>>> +            trim_size = drm_buddy_block_size(mm, block) -
>>> +                (size - original_size);
>>> +        }
>>> +
>>> +        drm_buddy_block_trim(mm,
>>> +                     trim_size,
>>> +                     trim_list);
>>> +
>>> +        if (!list_empty(&temp))
>>> +            list_splice_tail(trim_list, &allocated);
>>> +    }
>>> +
>>>       list_splice_tail(&allocated, blocks);
>>>       return 0;
>>>   diff --git a/include/drm/drm_buddy.h b/include/drm/drm_buddy.h
>>> index 572077ff8ae7..a5b39fc01003 100644
>>> --- a/include/drm/drm_buddy.h
>>> +++ b/include/drm/drm_buddy.h
>>> @@ -22,8 +22,9 @@
>>>       start__ >= max__ || size__ > max__ - start__; \
>>>   })
>>>   -#define DRM_BUDDY_RANGE_ALLOCATION (1 << 0)
>>> -#define DRM_BUDDY_TOPDOWN_ALLOCATION (1 << 1)
>>> +#define DRM_BUDDY_RANGE_ALLOCATION        BIT(0)
>>> +#define DRM_BUDDY_TOPDOWN_ALLOCATION        BIT(1)
>>> +#define DRM_BUDDY_CONTIGUOUS_ALLOCATION        BIT(2)
>>>     struct drm_buddy_block {
>>>   #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
>>> @@ -155,5 +156,4 @@ void drm_buddy_print(struct drm_buddy *mm, 
>>> struct drm_printer *p);
>>>   void drm_buddy_block_print(struct drm_buddy *mm,
>>>                  struct drm_buddy_block *block,
>>>                  struct drm_printer *p);
>>> -
>>>   #endif
>>
>

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2023-08-23  6:35 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-21 10:14 [PATCH 1/3] drm/buddy: Fix contiguous memory allocation issues Arunpravin Paneer Selvam
2023-08-21 10:14 ` [Intel-gfx] " Arunpravin Paneer Selvam
2023-08-21 10:14 ` [PATCH 2/3] drm/amdgpu: Remove the contiguous computation and trim Arunpravin Paneer Selvam
2023-08-21 10:14   ` [Intel-gfx] " Arunpravin Paneer Selvam
2023-08-21 10:14 ` [PATCH 3/3] drm/i915: " Arunpravin Paneer Selvam
2023-08-21 10:14   ` [Intel-gfx] " Arunpravin Paneer Selvam
2023-08-21 10:44 ` [Intel-gfx] ✗ Fi.CI.CHECKPATCH: warning for series starting with [1/3] drm/buddy: Fix contiguous memory allocation issues Patchwork
2023-08-21 10:44 ` [Intel-gfx] ✗ Fi.CI.SPARSE: " Patchwork
2023-08-21 10:58 ` [Intel-gfx] ✓ Fi.CI.BAT: success " Patchwork
2023-08-21 11:16 ` [PATCH 1/3] " Christian König
2023-08-21 11:16   ` [Intel-gfx] " Christian König
2023-08-23  5:52   ` Christian König
2023-08-23  5:52     ` [Intel-gfx] " Christian König
2023-08-23  6:35     ` Arunpravin Paneer Selvam
2023-08-23  6:35       ` [Intel-gfx] " Arunpravin Paneer Selvam
2023-08-21 13:04 ` [Intel-gfx] ✗ Fi.CI.IGT: failure for series starting with [1/3] " Patchwork
2023-08-21 17:46 ` [PATCH 1/3] " Matthew Auld
2023-08-21 17:46   ` [Intel-gfx] " Matthew Auld
2023-08-22 14:01   ` Arunpravin Paneer Selvam
2023-08-22 14:01     ` Arunpravin Paneer Selvam

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.