All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Auld <matthew.auld@intel.com>
To: Arunpravin <Arunpravin.PaneerSelvam@amd.com>,
	dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org,
	amd-gfx@lists.freedesktop.org
Cc: alexander.deucher@amd.com, tzimmermann@suse.de, christian.koenig@amd.com
Subject: Re: [PATCH 5/7] drm/selftests: add drm buddy pessimistic testcase
Date: Tue, 8 Feb 2022 10:17:17 +0000	[thread overview]
Message-ID: <1f9eff31-8c79-599b-d4dc-f36f47639dd5@intel.com> (raw)
In-Reply-To: <20220203133234.3350-5-Arunpravin.PaneerSelvam@amd.com>

On 03/02/2022 13:32, Arunpravin wrote:
> create a pot-sized mm, then allocate one of each possible
> order within. This should leave the mm with exactly one
> page left.
> 
> Signed-off-by: Arunpravin <Arunpravin.PaneerSelvam@amd.com>
> ---
>   .../gpu/drm/selftests/drm_buddy_selftests.h   |   1 +
>   drivers/gpu/drm/selftests/test-drm_buddy.c    | 153 ++++++++++++++++++
>   2 files changed, 154 insertions(+)
> 
> diff --git a/drivers/gpu/drm/selftests/drm_buddy_selftests.h b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
> index 21a6bd38864f..b14f04a1de19 100644
> --- a/drivers/gpu/drm/selftests/drm_buddy_selftests.h
> +++ b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
> @@ -10,3 +10,4 @@ selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */
>   selftest(buddy_alloc_limit, igt_buddy_alloc_limit)
>   selftest(buddy_alloc_range, igt_buddy_alloc_range)
>   selftest(buddy_alloc_optimistic, igt_buddy_alloc_optimistic)
> +selftest(buddy_alloc_pessimistic, igt_buddy_alloc_pessimistic)
> diff --git a/drivers/gpu/drm/selftests/test-drm_buddy.c b/drivers/gpu/drm/selftests/test-drm_buddy.c
> index b193d9556fb4..e97f583ed0cd 100644
> --- a/drivers/gpu/drm/selftests/test-drm_buddy.c
> +++ b/drivers/gpu/drm/selftests/test-drm_buddy.c
> @@ -314,6 +314,159 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
>   	*size = (u64)s << 12;
>   }
>   
> +static int igt_buddy_alloc_pessimistic(void *arg)
> +{
> +	u64 mm_size, size, min_page_size, start = 0;
> +	struct drm_buddy_block *block, *bn;
> +	const unsigned int max_order = 16;
> +	unsigned long flags = 0;
> +	struct drm_buddy mm;
> +	unsigned int order;
> +	LIST_HEAD(blocks);
> +	LIST_HEAD(tmp);
> +	int err;
> +
> +	/*
> +	 * Create a pot-sized mm, then allocate one of each possible
> +	 * order within. This should leave the mm with exactly one
> +	 * page left.
> +	 */
> +
> +	mm_size = PAGE_SIZE << max_order;
> +	err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
> +	if (err) {
> +		pr_err("buddy_init failed(%d)\n", err);
> +		return err;
> +	}
> +	BUG_ON(mm.max_order != max_order);
> +
> +	for (order = 0; order < max_order; order++) {
> +		size = min_page_size = get_size(order, PAGE_SIZE);
> +		err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +		if (err) {
> +			pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
> +				order);
> +			goto err;
> +		}
> +
> +		block = list_first_entry_or_null(&tmp,
> +						 struct drm_buddy_block,
> +						 link);
> +		if (!block) {
> +			pr_err("alloc_blocks has no blocks\n");
> +			err = -EINVAL;
> +			goto err;
> +		}
> +
> +		list_del(&block->link);
> +		list_add_tail(&block->link, &blocks);
> +	}
> +
> +	/* And now the last remaining block available */
> +	size = min_page_size = get_size(0, PAGE_SIZE);
> +	err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +	if (err) {
> +		pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
> +		goto err;
> +	}
> +
> +	block = list_first_entry_or_null(&tmp,
> +					 struct drm_buddy_block,
> +					 link);
> +	if (!block) {
> +		pr_err("alloc_blocks has no blocks\n");
> +		err = -EINVAL;
> +		goto err;
> +	}
> +
> +	list_del(&block->link);
> +	list_add_tail(&block->link, &blocks);
> +
> +	/* Should be completely full! */
> +	for (order = max_order; order--; ) {
> +		size = min_page_size = get_size(order, PAGE_SIZE);
> +		err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +		if (!err) {
> +			pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
> +				order);
> +			block = list_first_entry_or_null(&tmp,
> +							 struct drm_buddy_block,
> +							 link);
> +			if (!block) {
> +				pr_err("alloc_blocks has no blocks\n");
> +				err = -EINVAL;
> +				goto err;
> +			}
> +
> +			list_del(&block->link);
> +			list_add_tail(&block->link, &blocks);
> +			err = -EINVAL;
> +			goto err;
> +		}
> +	}
> +
> +	block = list_last_entry(&blocks, typeof(*block), link);
> +	list_del(&block->link);
> +	drm_buddy_free_block(&mm, block);
> +
> +	/* As we free in increasing size, we make available larger blocks */
> +	order = 1;
> +	list_for_each_entry_safe(block, bn, &blocks, link) {
> +		list_del(&block->link);
> +		drm_buddy_free_block(&mm, block);
> +
> +		size = min_page_size = get_size(order, PAGE_SIZE);
> +		err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +		if (err) {
> +			pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
> +				order);
> +			goto err;
> +		}
> +
> +		block = list_first_entry_or_null(&tmp,
> +						 struct drm_buddy_block,
> +						 link);
> +		if (!block) {
> +			pr_err("alloc_blocks has no blocks\n");
> +			err = -EINVAL;
> +			goto err;
> +		}
> +
> +		list_del(&block->link);
> +		drm_buddy_free_block(&mm, block);
> +		order++;
> +	}
> +
> +	/* To confirm, now the whole mm should be available */
> +	size = min_page_size = get_size(max_order, PAGE_SIZE);
> +	err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +	if (err) {
> +		pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
> +			max_order);
> +		goto err;
> +	}
> +
> +	block = list_first_entry_or_null(&tmp,
> +					 struct drm_buddy_block,
> +					 link);
> +	if (!block) {
> +		pr_err("alloc_blocks has no blocks\n");
> +		err = -EINVAL;
> +		goto err;
> +	}
> +
> +	list_del(&block->link);
> +	drm_buddy_free_block(&mm, block);
> +
> +	if (!err)

Always true?

Reviewed-by: Matthew Auld <matthew.auld@intel.com>

> +		pr_info("%s - succeeded\n", __func__);
> +
> +err:
> +	drm_buddy_free_list(&mm, &blocks);
> +	drm_buddy_fini(&mm);
> +	return err;
> +}
> +
>   static int igt_buddy_alloc_optimistic(void *arg)
>   {
>   	u64 mm_size, size, min_page_size, start = 0;

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Auld <matthew.auld@intel.com>
To: Arunpravin <Arunpravin.PaneerSelvam@amd.com>,
	dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org,
	amd-gfx@lists.freedesktop.org
Cc: alexander.deucher@amd.com, tzimmermann@suse.de, christian.koenig@amd.com
Subject: Re: [Intel-gfx] [PATCH 5/7] drm/selftests: add drm buddy pessimistic testcase
Date: Tue, 8 Feb 2022 10:17:17 +0000	[thread overview]
Message-ID: <1f9eff31-8c79-599b-d4dc-f36f47639dd5@intel.com> (raw)
In-Reply-To: <20220203133234.3350-5-Arunpravin.PaneerSelvam@amd.com>

On 03/02/2022 13:32, Arunpravin wrote:
> create a pot-sized mm, then allocate one of each possible
> order within. This should leave the mm with exactly one
> page left.
> 
> Signed-off-by: Arunpravin <Arunpravin.PaneerSelvam@amd.com>
> ---
>   .../gpu/drm/selftests/drm_buddy_selftests.h   |   1 +
>   drivers/gpu/drm/selftests/test-drm_buddy.c    | 153 ++++++++++++++++++
>   2 files changed, 154 insertions(+)
> 
> diff --git a/drivers/gpu/drm/selftests/drm_buddy_selftests.h b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
> index 21a6bd38864f..b14f04a1de19 100644
> --- a/drivers/gpu/drm/selftests/drm_buddy_selftests.h
> +++ b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
> @@ -10,3 +10,4 @@ selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */
>   selftest(buddy_alloc_limit, igt_buddy_alloc_limit)
>   selftest(buddy_alloc_range, igt_buddy_alloc_range)
>   selftest(buddy_alloc_optimistic, igt_buddy_alloc_optimistic)
> +selftest(buddy_alloc_pessimistic, igt_buddy_alloc_pessimistic)
> diff --git a/drivers/gpu/drm/selftests/test-drm_buddy.c b/drivers/gpu/drm/selftests/test-drm_buddy.c
> index b193d9556fb4..e97f583ed0cd 100644
> --- a/drivers/gpu/drm/selftests/test-drm_buddy.c
> +++ b/drivers/gpu/drm/selftests/test-drm_buddy.c
> @@ -314,6 +314,159 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
>   	*size = (u64)s << 12;
>   }
>   
> +static int igt_buddy_alloc_pessimistic(void *arg)
> +{
> +	u64 mm_size, size, min_page_size, start = 0;
> +	struct drm_buddy_block *block, *bn;
> +	const unsigned int max_order = 16;
> +	unsigned long flags = 0;
> +	struct drm_buddy mm;
> +	unsigned int order;
> +	LIST_HEAD(blocks);
> +	LIST_HEAD(tmp);
> +	int err;
> +
> +	/*
> +	 * Create a pot-sized mm, then allocate one of each possible
> +	 * order within. This should leave the mm with exactly one
> +	 * page left.
> +	 */
> +
> +	mm_size = PAGE_SIZE << max_order;
> +	err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
> +	if (err) {
> +		pr_err("buddy_init failed(%d)\n", err);
> +		return err;
> +	}
> +	BUG_ON(mm.max_order != max_order);
> +
> +	for (order = 0; order < max_order; order++) {
> +		size = min_page_size = get_size(order, PAGE_SIZE);
> +		err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +		if (err) {
> +			pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
> +				order);
> +			goto err;
> +		}
> +
> +		block = list_first_entry_or_null(&tmp,
> +						 struct drm_buddy_block,
> +						 link);
> +		if (!block) {
> +			pr_err("alloc_blocks has no blocks\n");
> +			err = -EINVAL;
> +			goto err;
> +		}
> +
> +		list_del(&block->link);
> +		list_add_tail(&block->link, &blocks);
> +	}
> +
> +	/* And now the last remaining block available */
> +	size = min_page_size = get_size(0, PAGE_SIZE);
> +	err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +	if (err) {
> +		pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
> +		goto err;
> +	}
> +
> +	block = list_first_entry_or_null(&tmp,
> +					 struct drm_buddy_block,
> +					 link);
> +	if (!block) {
> +		pr_err("alloc_blocks has no blocks\n");
> +		err = -EINVAL;
> +		goto err;
> +	}
> +
> +	list_del(&block->link);
> +	list_add_tail(&block->link, &blocks);
> +
> +	/* Should be completely full! */
> +	for (order = max_order; order--; ) {
> +		size = min_page_size = get_size(order, PAGE_SIZE);
> +		err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +		if (!err) {
> +			pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
> +				order);
> +			block = list_first_entry_or_null(&tmp,
> +							 struct drm_buddy_block,
> +							 link);
> +			if (!block) {
> +				pr_err("alloc_blocks has no blocks\n");
> +				err = -EINVAL;
> +				goto err;
> +			}
> +
> +			list_del(&block->link);
> +			list_add_tail(&block->link, &blocks);
> +			err = -EINVAL;
> +			goto err;
> +		}
> +	}
> +
> +	block = list_last_entry(&blocks, typeof(*block), link);
> +	list_del(&block->link);
> +	drm_buddy_free_block(&mm, block);
> +
> +	/* As we free in increasing size, we make available larger blocks */
> +	order = 1;
> +	list_for_each_entry_safe(block, bn, &blocks, link) {
> +		list_del(&block->link);
> +		drm_buddy_free_block(&mm, block);
> +
> +		size = min_page_size = get_size(order, PAGE_SIZE);
> +		err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +		if (err) {
> +			pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
> +				order);
> +			goto err;
> +		}
> +
> +		block = list_first_entry_or_null(&tmp,
> +						 struct drm_buddy_block,
> +						 link);
> +		if (!block) {
> +			pr_err("alloc_blocks has no blocks\n");
> +			err = -EINVAL;
> +			goto err;
> +		}
> +
> +		list_del(&block->link);
> +		drm_buddy_free_block(&mm, block);
> +		order++;
> +	}
> +
> +	/* To confirm, now the whole mm should be available */
> +	size = min_page_size = get_size(max_order, PAGE_SIZE);
> +	err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +	if (err) {
> +		pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
> +			max_order);
> +		goto err;
> +	}
> +
> +	block = list_first_entry_or_null(&tmp,
> +					 struct drm_buddy_block,
> +					 link);
> +	if (!block) {
> +		pr_err("alloc_blocks has no blocks\n");
> +		err = -EINVAL;
> +		goto err;
> +	}
> +
> +	list_del(&block->link);
> +	drm_buddy_free_block(&mm, block);
> +
> +	if (!err)

Always true?

Reviewed-by: Matthew Auld <matthew.auld@intel.com>

> +		pr_info("%s - succeeded\n", __func__);
> +
> +err:
> +	drm_buddy_free_list(&mm, &blocks);
> +	drm_buddy_fini(&mm);
> +	return err;
> +}
> +
>   static int igt_buddy_alloc_optimistic(void *arg)
>   {
>   	u64 mm_size, size, min_page_size, start = 0;

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Auld <matthew.auld@intel.com>
To: Arunpravin <Arunpravin.PaneerSelvam@amd.com>,
	dri-devel@lists.freedesktop.org, intel-gfx@lists.freedesktop.org,
	amd-gfx@lists.freedesktop.org
Cc: alexander.deucher@amd.com, tzimmermann@suse.de,
	christian.koenig@amd.com, daniel@ffwll.ch
Subject: Re: [PATCH 5/7] drm/selftests: add drm buddy pessimistic testcase
Date: Tue, 8 Feb 2022 10:17:17 +0000	[thread overview]
Message-ID: <1f9eff31-8c79-599b-d4dc-f36f47639dd5@intel.com> (raw)
In-Reply-To: <20220203133234.3350-5-Arunpravin.PaneerSelvam@amd.com>

On 03/02/2022 13:32, Arunpravin wrote:
> create a pot-sized mm, then allocate one of each possible
> order within. This should leave the mm with exactly one
> page left.
> 
> Signed-off-by: Arunpravin <Arunpravin.PaneerSelvam@amd.com>
> ---
>   .../gpu/drm/selftests/drm_buddy_selftests.h   |   1 +
>   drivers/gpu/drm/selftests/test-drm_buddy.c    | 153 ++++++++++++++++++
>   2 files changed, 154 insertions(+)
> 
> diff --git a/drivers/gpu/drm/selftests/drm_buddy_selftests.h b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
> index 21a6bd38864f..b14f04a1de19 100644
> --- a/drivers/gpu/drm/selftests/drm_buddy_selftests.h
> +++ b/drivers/gpu/drm/selftests/drm_buddy_selftests.h
> @@ -10,3 +10,4 @@ selftest(sanitycheck, igt_sanitycheck) /* keep first (selfcheck for igt) */
>   selftest(buddy_alloc_limit, igt_buddy_alloc_limit)
>   selftest(buddy_alloc_range, igt_buddy_alloc_range)
>   selftest(buddy_alloc_optimistic, igt_buddy_alloc_optimistic)
> +selftest(buddy_alloc_pessimistic, igt_buddy_alloc_pessimistic)
> diff --git a/drivers/gpu/drm/selftests/test-drm_buddy.c b/drivers/gpu/drm/selftests/test-drm_buddy.c
> index b193d9556fb4..e97f583ed0cd 100644
> --- a/drivers/gpu/drm/selftests/test-drm_buddy.c
> +++ b/drivers/gpu/drm/selftests/test-drm_buddy.c
> @@ -314,6 +314,159 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
>   	*size = (u64)s << 12;
>   }
>   
> +static int igt_buddy_alloc_pessimistic(void *arg)
> +{
> +	u64 mm_size, size, min_page_size, start = 0;
> +	struct drm_buddy_block *block, *bn;
> +	const unsigned int max_order = 16;
> +	unsigned long flags = 0;
> +	struct drm_buddy mm;
> +	unsigned int order;
> +	LIST_HEAD(blocks);
> +	LIST_HEAD(tmp);
> +	int err;
> +
> +	/*
> +	 * Create a pot-sized mm, then allocate one of each possible
> +	 * order within. This should leave the mm with exactly one
> +	 * page left.
> +	 */
> +
> +	mm_size = PAGE_SIZE << max_order;
> +	err = drm_buddy_init(&mm, mm_size, PAGE_SIZE);
> +	if (err) {
> +		pr_err("buddy_init failed(%d)\n", err);
> +		return err;
> +	}
> +	BUG_ON(mm.max_order != max_order);
> +
> +	for (order = 0; order < max_order; order++) {
> +		size = min_page_size = get_size(order, PAGE_SIZE);
> +		err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +		if (err) {
> +			pr_info("buddy_alloc hit -ENOMEM with order=%d\n",
> +				order);
> +			goto err;
> +		}
> +
> +		block = list_first_entry_or_null(&tmp,
> +						 struct drm_buddy_block,
> +						 link);
> +		if (!block) {
> +			pr_err("alloc_blocks has no blocks\n");
> +			err = -EINVAL;
> +			goto err;
> +		}
> +
> +		list_del(&block->link);
> +		list_add_tail(&block->link, &blocks);
> +	}
> +
> +	/* And now the last remaining block available */
> +	size = min_page_size = get_size(0, PAGE_SIZE);
> +	err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +	if (err) {
> +		pr_info("buddy_alloc hit -ENOMEM on final alloc\n");
> +		goto err;
> +	}
> +
> +	block = list_first_entry_or_null(&tmp,
> +					 struct drm_buddy_block,
> +					 link);
> +	if (!block) {
> +		pr_err("alloc_blocks has no blocks\n");
> +		err = -EINVAL;
> +		goto err;
> +	}
> +
> +	list_del(&block->link);
> +	list_add_tail(&block->link, &blocks);
> +
> +	/* Should be completely full! */
> +	for (order = max_order; order--; ) {
> +		size = min_page_size = get_size(order, PAGE_SIZE);
> +		err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +		if (!err) {
> +			pr_info("buddy_alloc unexpectedly succeeded at order %d, it should be full!",
> +				order);
> +			block = list_first_entry_or_null(&tmp,
> +							 struct drm_buddy_block,
> +							 link);
> +			if (!block) {
> +				pr_err("alloc_blocks has no blocks\n");
> +				err = -EINVAL;
> +				goto err;
> +			}
> +
> +			list_del(&block->link);
> +			list_add_tail(&block->link, &blocks);
> +			err = -EINVAL;
> +			goto err;
> +		}
> +	}
> +
> +	block = list_last_entry(&blocks, typeof(*block), link);
> +	list_del(&block->link);
> +	drm_buddy_free_block(&mm, block);
> +
> +	/* As we free in increasing size, we make available larger blocks */
> +	order = 1;
> +	list_for_each_entry_safe(block, bn, &blocks, link) {
> +		list_del(&block->link);
> +		drm_buddy_free_block(&mm, block);
> +
> +		size = min_page_size = get_size(order, PAGE_SIZE);
> +		err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +		if (err) {
> +			pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
> +				order);
> +			goto err;
> +		}
> +
> +		block = list_first_entry_or_null(&tmp,
> +						 struct drm_buddy_block,
> +						 link);
> +		if (!block) {
> +			pr_err("alloc_blocks has no blocks\n");
> +			err = -EINVAL;
> +			goto err;
> +		}
> +
> +		list_del(&block->link);
> +		drm_buddy_free_block(&mm, block);
> +		order++;
> +	}
> +
> +	/* To confirm, now the whole mm should be available */
> +	size = min_page_size = get_size(max_order, PAGE_SIZE);
> +	err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, min_page_size, &tmp, flags);
> +	if (err) {
> +		pr_info("buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
> +			max_order);
> +		goto err;
> +	}
> +
> +	block = list_first_entry_or_null(&tmp,
> +					 struct drm_buddy_block,
> +					 link);
> +	if (!block) {
> +		pr_err("alloc_blocks has no blocks\n");
> +		err = -EINVAL;
> +		goto err;
> +	}
> +
> +	list_del(&block->link);
> +	drm_buddy_free_block(&mm, block);
> +
> +	if (!err)

Always true?

Reviewed-by: Matthew Auld <matthew.auld@intel.com>

> +		pr_info("%s - succeeded\n", __func__);
> +
> +err:
> +	drm_buddy_free_list(&mm, &blocks);
> +	drm_buddy_fini(&mm);
> +	return err;
> +}
> +
>   static int igt_buddy_alloc_optimistic(void *arg)
>   {
>   	u64 mm_size, size, min_page_size, start = 0;

  reply	other threads:[~2022-02-08 10:17 UTC|newest]

Thread overview: 52+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-02-03 13:32 [PATCH 1/7] drm/selftests: Move i915 buddy selftests into drm Arunpravin
2022-02-03 13:32 ` Arunpravin
2022-02-03 13:32 ` [Intel-gfx] " Arunpravin
2022-02-03 13:32 ` [PATCH 2/7] drm/selftests: add drm buddy alloc limit testcase Arunpravin
2022-02-03 13:32   ` Arunpravin
2022-02-03 13:32   ` [Intel-gfx] " Arunpravin
2022-02-08  9:40   ` Matthew Auld
2022-02-08  9:40     ` Matthew Auld
2022-02-08  9:40     ` [Intel-gfx] " Matthew Auld
2022-02-22 19:07     ` Arunpravin
2022-02-22 19:07       ` Arunpravin
2022-02-22 19:07       ` [Intel-gfx] " Arunpravin
2022-02-03 13:32 ` [PATCH 3/7] drm/selftests: add drm buddy alloc range testcase Arunpravin
2022-02-03 13:32   ` Arunpravin
2022-02-03 13:32   ` [Intel-gfx] " Arunpravin
2022-02-08 10:03   ` Matthew Auld
2022-02-08 10:03     ` Matthew Auld
2022-02-08 10:03     ` [Intel-gfx] " Matthew Auld
2022-02-03 13:32 ` [PATCH 4/7] drm/selftests: add drm buddy optimistic testcase Arunpravin
2022-02-03 13:32   ` Arunpravin
2022-02-03 13:32   ` [Intel-gfx] " Arunpravin
2022-02-08 10:12   ` Matthew Auld
2022-02-08 10:12     ` Matthew Auld
2022-02-08 10:12     ` [Intel-gfx] " Matthew Auld
2022-02-03 13:32 ` [PATCH 5/7] drm/selftests: add drm buddy pessimistic testcase Arunpravin
2022-02-03 13:32   ` Arunpravin
2022-02-03 13:32   ` [Intel-gfx] " Arunpravin
2022-02-08 10:17   ` Matthew Auld [this message]
2022-02-08 10:17     ` Matthew Auld
2022-02-08 10:17     ` [Intel-gfx] " Matthew Auld
2022-02-03 13:32 ` [PATCH 6/7] drm/selftests: add drm buddy smoke testcase Arunpravin
2022-02-03 13:32   ` Arunpravin
2022-02-03 13:32   ` [Intel-gfx] " Arunpravin
2022-02-08 10:22   ` Matthew Auld
2022-02-08 10:22     ` Matthew Auld
2022-02-08 10:22     ` [Intel-gfx] " Matthew Auld
2022-02-03 13:32 ` [PATCH 7/7] drm/selftests: add drm buddy pathological testcase Arunpravin
2022-02-03 13:32   ` Arunpravin
2022-02-03 13:32   ` [Intel-gfx] " Arunpravin
2022-02-08 10:26   ` Matthew Auld
2022-02-08 10:26     ` Matthew Auld
2022-02-08 10:26     ` [Intel-gfx] " Matthew Auld
2022-02-03 14:12 ` [Intel-gfx] ✗ Fi.CI.BUILD: failure for series starting with [1/7] drm/selftests: Move i915 buddy selftests into drm Patchwork
2022-02-03 14:33 ` [PATCH 1/7] " Christian König
2022-02-03 14:33   ` Christian König
2022-02-03 14:33   ` [Intel-gfx] " Christian König
2022-02-08 10:35 ` Matthew Auld
2022-02-08 10:35   ` Matthew Auld
2022-02-08 10:35   ` [Intel-gfx] " Matthew Auld
2022-02-22 18:35   ` Arunpravin
2022-02-22 18:35     ` Arunpravin
2022-02-22 18:35     ` [Intel-gfx] " Arunpravin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1f9eff31-8c79-599b-d4dc-f36f47639dd5@intel.com \
    --to=matthew.auld@intel.com \
    --cc=Arunpravin.PaneerSelvam@amd.com \
    --cc=alexander.deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=christian.koenig@amd.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    --cc=tzimmermann@suse.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.