linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Liam Howlett <liam.howlett@oracle.com>
To: "maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	"damon @ lists . linux . dev" <damon@lists.linux.dev>,
	SeongJae Park <sj@kernel.org>,
	David Hildenbrand <david@redhat.com>
Subject: [PATCH v10 05/69] radix tree test suite: add support for slab bulk APIs
Date: Tue, 21 Jun 2022 20:46:49 +0000	[thread overview]
Message-ID: <20220621204632.3370049-6-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20220621204632.3370049-1-Liam.Howlett@oracle.com>

From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Add support for kmem_cache_free_bulk() and kmem_cache_alloc_bulk() to the
radix tree test suite.

Link: https://lkml.kernel.org/r/20220504010716.661115-7-Liam.Howlett@oracle.com
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Howells <dhowells@redhat.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: SeongJae Park <sj@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
 tools/include/linux/slab.h       |   4 ++
 tools/testing/radix-tree/linux.c | 118 ++++++++++++++++++++++++++++++-
 2 files changed, 120 insertions(+), 2 deletions(-)

diff --git a/tools/include/linux/slab.h b/tools/include/linux/slab.h
index 0616409513eb..311759ea25e9 100644
--- a/tools/include/linux/slab.h
+++ b/tools/include/linux/slab.h
@@ -41,4 +41,8 @@ struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
 			unsigned int align, unsigned int flags,
 			void (*ctor)(void *));
 
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list);
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+			  void **list);
+
 #endif		/* _TOOLS_SLAB_H */
diff --git a/tools/testing/radix-tree/linux.c b/tools/testing/radix-tree/linux.c
index f20529ae4dbe..2048d12c31df 100644
--- a/tools/testing/radix-tree/linux.c
+++ b/tools/testing/radix-tree/linux.c
@@ -93,14 +93,13 @@ void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
 	return p;
 }
 
-void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
 {
 	assert(objp);
 	uatomic_dec(&nr_allocated);
 	uatomic_dec(&cachep->nr_allocated);
 	if (kmalloc_verbose)
 		printf("Freeing %p to slab\n", objp);
-	pthread_mutex_lock(&cachep->lock);
 	if (cachep->nr_objs > 10 || cachep->align) {
 		memset(objp, POISON_FREE, cachep->size);
 		free(objp);
@@ -110,9 +109,80 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
 		node->parent = cachep->objs;
 		cachep->objs = node;
 	}
+}
+
+void kmem_cache_free(struct kmem_cache *cachep, void *objp)
+{
+	pthread_mutex_lock(&cachep->lock);
+	kmem_cache_free_locked(cachep, objp);
 	pthread_mutex_unlock(&cachep->lock);
 }
 
+void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
+{
+	if (kmalloc_verbose)
+		pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
+
+	pthread_mutex_lock(&cachep->lock);
+	for (int i = 0; i < size; i++)
+		kmem_cache_free_locked(cachep, list[i]);
+	pthread_mutex_unlock(&cachep->lock);
+}
+
+int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
+			  void **p)
+{
+	size_t i;
+
+	if (kmalloc_verbose)
+		pr_debug("Bulk alloc %lu\n", size);
+
+	if (!(gfp & __GFP_DIRECT_RECLAIM)) {
+		if (cachep->non_kernel < size)
+			return 0;
+
+		cachep->non_kernel -= size;
+	}
+
+	pthread_mutex_lock(&cachep->lock);
+	if (cachep->nr_objs >= size) {
+		struct radix_tree_node *node;
+
+		for (i = 0; i < size; i++) {
+			node = cachep->objs;
+			cachep->nr_objs--;
+			cachep->objs = node->parent;
+			p[i] = node;
+			node->parent = NULL;
+		}
+		pthread_mutex_unlock(&cachep->lock);
+	} else {
+		pthread_mutex_unlock(&cachep->lock);
+		for (i = 0; i < size; i++) {
+			if (cachep->align) {
+				posix_memalign(&p[i], cachep->align,
+					       cachep->size * size);
+			} else {
+				p[i] = malloc(cachep->size * size);
+			}
+			if (cachep->ctor)
+				cachep->ctor(p[i]);
+			else if (gfp & __GFP_ZERO)
+				memset(p[i], 0, cachep->size);
+		}
+	}
+
+	for (i = 0; i < size; i++) {
+		uatomic_inc(&nr_allocated);
+		uatomic_inc(&cachep->nr_allocated);
+		uatomic_inc(&cachep->nr_tallocated);
+		if (kmalloc_verbose)
+			printf("Allocating %p from slab\n", p[i]);
+	}
+
+	return size;
+}
+
 struct kmem_cache *
 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
 		unsigned int flags, void (*ctor)(void *))
@@ -130,3 +200,47 @@ kmem_cache_create(const char *name, unsigned int size, unsigned int align,
 	ret->non_kernel = 0;
 	return ret;
 }
+
+/*
+ * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
+ */
+void test_kmem_cache_bulk(void)
+{
+	int i;
+	void *list[12];
+	static struct kmem_cache *test_cache, *test_cache2;
+
+	/*
+	 * Testing the bulk allocators without aligned kmem_cache to force the
+	 * bulk alloc/free to reuse
+	 */
+	test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
+
+	for (i = 0; i < 5; i++)
+		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+	for (i = 0; i < 5; i++)
+		kmem_cache_free(test_cache, list[i]);
+	assert(test_cache->nr_objs == 5);
+
+	kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
+	kmem_cache_free_bulk(test_cache, 5, list);
+
+	for (i = 0; i < 12 ; i++)
+		list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
+
+	for (i = 0; i < 12; i++)
+		kmem_cache_free(test_cache, list[i]);
+
+	/* The last free will not be kept around */
+	assert(test_cache->nr_objs == 11);
+
+	/* Aligned caches will immediately free */
+	test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
+
+	kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
+	kmem_cache_free_bulk(test_cache2, 10, list);
+	assert(!test_cache2->nr_objs);
+
+
+}
-- 
2.35.1

  parent reply	other threads:[~2022-06-21 20:47 UTC|newest]

Thread overview: 87+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-21 20:46 [PATCH v10 00/69] Introducing the Maple Tree Liam Howlett
2022-06-21 20:46 ` [PATCH v10 01/69] Maple Tree: add new data structure Liam Howlett
2022-06-21 20:46 ` [PATCH v10 04/69] radix tree test suite: add allocation counts and size to kmem_cache Liam Howlett
2022-06-21 20:46 ` [PATCH v10 02/69] radix tree test suite: add pr_err define Liam Howlett
2022-06-21 20:46 ` [PATCH v10 03/69] radix tree test suite: add kmem_cache_set_non_kernel() Liam Howlett
2022-06-21 20:46 ` Liam Howlett [this message]
2022-06-21 20:46 ` [PATCH v10 06/69] radix tree test suite: add lockdep_is_held to header Liam Howlett
2022-06-21 20:46 ` [PATCH v10 07/69] lib/test_maple_tree: add testing for maple tree Liam Howlett
2022-06-21 20:46 ` [PATCH v10 08/69] mm: start tracking VMAs with " Liam Howlett
2022-06-21 20:46 ` [PATCH v10 10/69] mmap: use the VMA iterator in count_vma_pages_range() Liam Howlett
2022-06-21 21:13   ` David Hildenbrand
2022-06-24 13:10     ` Liam Howlett
2022-06-21 20:46 ` [PATCH v10 09/69] mm: add VMA iterator Liam Howlett
2022-06-21 21:10   ` David Hildenbrand
2022-06-23 17:03     ` Matthew Wilcox
2022-06-24 14:31       ` Liam Howlett
2022-06-21 20:46 ` [PATCH v10 11/69] mm/mmap: use the maple tree in find_vma() instead of the rbtree Liam Howlett
2022-06-21 21:04   ` David Hildenbrand
2022-06-24 13:05     ` Liam Howlett
2022-06-21 20:46 ` [PATCH v10 13/69] mm/mmap: use maple tree for unmapped_area{_topdown} Liam Howlett
2022-06-23 17:25   ` David Hildenbrand
2022-07-14  0:47     ` Liam Howlett
2022-07-09  7:29   ` Alexander Gordeev
2022-07-12  1:49     ` Liam Howlett
2022-07-19 14:20   ` Sven Schnelle
2022-07-19 14:54     ` Liam Howlett
2022-06-21 20:46 ` [PATCH v10 14/69] kernel/fork: use maple tree for dup_mmap() during forking Liam Howlett
2022-06-23 18:42   ` David Hildenbrand
2022-07-13 19:05     ` Liam Howlett
2022-06-21 20:46 ` [PATCH v10 12/69] mm/mmap: use the maple tree for find_vma_prev() instead of the rbtree Liam Howlett
2022-06-21 21:17   ` David Hildenbrand
2022-06-21 20:46 ` [PATCH v10 17/69] mm: remove rb tree Liam Howlett
2022-06-21 20:46 ` [PATCH v10 16/69] proc: remove VMA rbtree use from nommu Liam Howlett
2022-06-21 20:46 ` [PATCH v10 15/69] damon: convert __damon_va_three_regions to use the VMA iterator Liam Howlett
2022-06-23 18:47   ` David Hildenbrand
2022-06-21 20:46 ` [PATCH v10 20/69] mm: optimize find_exact_vma() to use vma_lookup() Liam Howlett
2022-06-21 20:46 ` [PATCH v10 19/69] xen: use vma_lookup() in privcmd_ioctl_mmap() Liam Howlett
2022-06-21 20:46 ` [PATCH v10 18/69] mmap: change zeroing of maple tree in __vma_adjust() Liam Howlett
2022-06-21 20:46 ` [PATCH v10 23/69] mm: use maple tree operations for find_vma_intersection() Liam Howlett
2022-06-21 20:46 ` [PATCH v10 22/69] mm/mmap: change do_brk_flags() to expand existing VMA and add do_brk_munmap() Liam Howlett
2022-06-21 20:46 ` [PATCH v10 21/69] mm/khugepaged: optimize collapse_pte_mapped_thp() by using vma_lookup() Liam Howlett
2022-06-21 20:46 ` [PATCH v10 25/69] mm: remove vmacache Liam Howlett
2022-06-21 20:46 ` [PATCH v10 24/69] mm/mmap: use advanced maple tree API for mmap_region() Liam Howlett
2022-06-21 20:46 ` [PATCH v10 26/69] mm: convert vma_lookup() to use mtree_load() Liam Howlett
2022-06-21 20:47 ` [PATCH v10 27/69] mm/mmap: move mmap_region() below do_munmap() Liam Howlett
2022-06-21 20:47 ` [PATCH v10 28/69] mm/mmap: reorganize munmap to use maple states Liam Howlett
2022-06-21 20:47 ` [PATCH v10 31/69] arm64: Change elfcore for_each_mte_vma() to use VMA iterator Liam Howlett
2022-06-21 20:47 ` [PATCH v10 29/69] mm/mmap: change do_brk_munmap() to use do_mas_align_munmap() Liam Howlett
2022-06-21 20:47 ` [PATCH v10 30/69] arm64: remove mmap linked list from vdso Liam Howlett
2022-06-21 20:47 ` [PATCH v10 34/69] s390: remove vma linked list walks Liam Howlett
2022-06-21 20:47 ` [PATCH v10 35/69] x86: " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 33/69] powerpc: remove mmap " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 32/69] parisc: remove mmap linked list from cache handling Liam Howlett
2022-06-21 20:47 ` [PATCH v10 38/69] optee: remove vma linked list walk Liam Howlett
2022-06-21 20:47 ` [PATCH v10 39/69] um: " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 36/69] xtensa: remove vma linked list walks Liam Howlett
2022-06-21 20:47 ` [PATCH v10 37/69] cxl: remove vma linked list walk Liam Howlett
2022-06-21 20:47 ` [PATCH v10 41/69] exec: use VMA iterator instead of linked list Liam Howlett
2022-06-21 20:47 ` [PATCH v10 40/69] coredump: remove vma linked list walk Liam Howlett
2022-06-21 20:47 ` [PATCH v10 43/69] fs/proc/task_mmu: stop using linked list and highest_vm_end Liam Howlett
2022-06-21 20:47 ` [PATCH v10 42/69] fs/proc/base: use maple tree iterators in place of linked list Liam Howlett
2022-06-21 20:47 ` [PATCH v10 45/69] ipc/shm: use VMA iterator instead " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 44/69] userfaultfd: use maple tree iterator to iterate VMAs Liam Howlett
2022-06-21 20:47 ` [PATCH v10 47/69] perf: use VMA iterator Liam Howlett
2022-06-21 20:47 ` [PATCH v10 46/69] acct: use VMA iterator instead of linked list Liam Howlett
2022-06-21 20:47 ` [PATCH v10 48/69] sched: use maple tree iterator to walk VMAs Liam Howlett
2022-06-21 20:47 ` [PATCH v10 49/69] fork: use VMA iterator Liam Howlett
2022-06-21 20:47 ` [PATCH v10 50/69] bpf: remove VMA linked list Liam Howlett
2022-06-21 20:47 ` [PATCH v10 51/69] mm/gup: use maple tree navigation instead of " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 52/69] mm/khugepaged: stop using vma " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 53/69] mm/ksm: use vma iterators instead of " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 55/69] mm/memcontrol: stop using mm->highest_vm_end Liam Howlett
2022-06-21 20:47 ` [PATCH v10 54/69] mm/madvise: use vma_find() instead of vma linked list Liam Howlett
2022-06-21 20:47 ` [PATCH v10 57/69] mm/mlock: use vma iterator and maple state " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 58/69] mm/mprotect: use maple tree navigation " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 59/69] mm/mremap: use vma_find_intersection() " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 56/69] mm/mempolicy: use vma iterator & maple state " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 62/69] mm/pagewalk: use vma_find() " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 60/69] mm/msync: " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 61/69] mm/oom_kill: use maple tree iterators " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 63/69] mm/swapfile: use vma iterator " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 65/69] nommu: remove uses of VMA " Liam Howlett
2022-06-21 20:47 ` [PATCH v10 66/69] riscv: use vma iterator for vdso Liam Howlett
2022-06-21 20:47 ` [PATCH v10 64/69] i915: use the VMA iterator Liam Howlett
2022-06-21 20:47 ` [PATCH v10 67/69] mm: remove the vma linked list Liam Howlett
2022-06-21 20:47 ` [PATCH v10 69/69] mm/mmap.c: pass in mapping to __vma_link_file() Liam Howlett
2022-06-21 20:47 ` [PATCH v10 68/69] mm/mmap: drop range_has_overlap() function Liam Howlett

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220621204632.3370049-6-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=damon@lists.linux.dev \
    --cc=david@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    --cc=sj@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).