All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: kent.overstreet@linux.dev, mhocko@suse.com, vbabka@suse.cz,
	hannes@cmpxchg.org, roman.gushchin@linux.dev, mgorman@suse.de,
	dave@stgolabs.net, willy@infradead.org, liam.howlett@oracle.com,
	corbet@lwn.net, void@manifault.com, peterz@infradead.org,
	juri.lelli@redhat.com, ldufour@linux.ibm.com,
	catalin.marinas@arm.com, will@kernel.org, arnd@arndb.de,
	tglx@linutronix.de, mingo@redhat.com,
	dave.hansen@linux.intel.com, x86@kernel.org, peterx@redhat.com,
	david@redhat.com, axboe@kernel.dk, mcgrof@kernel.org,
	masahiroy@kernel.org, nathan@kernel.org, dennis@kernel.org,
	tj@kernel.org, muchun.song@linux.dev, rppt@kernel.org,
	paulmck@kernel.org, pasha.tatashin@soleen.com,
	yosryahmed@google.com, yuzhao@google.com, dhowells@redhat.com,
	hughd@google.com, andreyknvl@gmail.com, keescook@chromium.org,
	ndesaulniers@google.com, gregkh@linuxfoundation.org,
	ebiggers@google.com, ytcoode@gmail.com,
	vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	rostedt@goodmis.org, bsegall@google.com, bristot@redhat.com,
	vschneid@redhat.com, cl@linux.com, penberg@kernel.org,
	iamjoonsoo.kim@lge.com, 42.hyeyoo@gmail.com, glider@google.com,
	elver@google.com, dvyukov@google.com, shakeelb@google.com,
	songmuchun@bytedance.com, jbaron@akamai.com, rientjes@google.com,
	minchan@google.com, kaleshsingh@google.com, surenb@google.com,
	kernel-team@android.com, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org, iommu@lists.linux.dev,
	linux-arch@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-mm@kvack.org, linux-modules@vger.kernel.org,
	kasan-dev@googlegroups.com, cgroups@vger.kernel.org
Subject: [PATCH 25/40] mm/slab: enable slab allocation tagging for kmalloc and friends
Date: Mon,  1 May 2023 09:54:35 -0700	[thread overview]
Message-ID: <20230501165450.15352-26-surenb@google.com> (raw)
In-Reply-To: <20230501165450.15352-1-surenb@google.com>

Redefine kmalloc, krealloc, kzalloc, kcalloc, etc. to record allocations
and deallocations done by these functions.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Co-developed-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
---
 include/linux/slab.h | 175 ++++++++++++++++++++++---------------------
 mm/slab.c            |  16 ++--
 mm/slab_common.c     |  22 +++---
 mm/slub.c            |  17 +++--
 mm/util.c            |  10 +--
 5 files changed, 124 insertions(+), 116 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 99a146f3cedf..43c922524081 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -213,7 +213,10 @@ int kmem_cache_shrink(struct kmem_cache *s);
 /*
  * Common kmalloc functions provided by all allocators
  */
-void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
+void * __must_check _krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
+#define krealloc(_p, _size, _flags)					\
+	alloc_hooks(_krealloc(_p, _size, _flags), void*, NULL)
+
 void kfree(const void *objp);
 void kfree_sensitive(const void *objp);
 size_t __ksize(const void *objp);
@@ -451,6 +454,8 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
 static_assert(PAGE_SHIFT <= 20);
 #define kmalloc_index(s) __kmalloc_index(s, true)
 
+#include <linux/alloc_tag.h>
+
 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
 
 /**
@@ -463,9 +468,15 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz
  *
  * Return: pointer to the new object or %NULL in case of error
  */
-void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
-void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
-			   gfp_t gfpflags) __assume_slab_alignment __malloc;
+void *_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc(_s, _flags)				\
+	alloc_hooks(_kmem_cache_alloc(_s, _flags), void*, NULL)
+
+void *_kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+			    gfp_t gfpflags) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_lru(_s, _lru, _flags)			\
+	alloc_hooks(_kmem_cache_alloc_lru(_s, _lru, _flags), void*, NULL)
+
 void kmem_cache_free(struct kmem_cache *s, void *objp);
 
 /*
@@ -476,7 +487,9 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
  * Note that interrupts must be enabled when calling these functions.
  */
 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
+int _kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
+#define kmem_cache_alloc_bulk(_s, _flags, _size, _p)		\
+	alloc_hooks(_kmem_cache_alloc_bulk(_s, _flags, _size, _p), int, 0)
 
 static __always_inline void kfree_bulk(size_t size, void **p)
 {
@@ -485,20 +498,32 @@ static __always_inline void kfree_bulk(size_t size, void **p)
 
 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
 							 __alloc_size(1);
-void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
-									 __malloc;
+void *_kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
+									  __malloc;
+#define kmem_cache_alloc_node(_s, _flags, _node)		\
+	alloc_hooks(_kmem_cache_alloc_node(_s, _flags, _node), void*, NULL)
 
-void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+void *_kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
 		    __assume_kmalloc_alignment __alloc_size(3);
 
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+void *_kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
 			 int node, size_t size) __assume_kmalloc_alignment
 						__alloc_size(4);
-void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
+#define kmalloc_trace(_s, _flags, _size)		\
+	alloc_hooks(_kmalloc_trace(_s, _flags, _size), void*, NULL)
+
+#define kmalloc_node_trace(_s, _gfpflags, _node, _size)	\
+	alloc_hooks(_kmalloc_node_trace(_s, _gfpflags, _node, _size), void*, NULL)
+
+void *_kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
 					      __alloc_size(1);
+#define kmalloc_large(_size, _flags)			\
+	alloc_hooks(_kmalloc_large(_size, _flags), void*, NULL)
 
-void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
+void *_kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
 							     __alloc_size(1);
+#define kmalloc_large_node(_size, _flags, _node)	\
+	alloc_hooks(_kmalloc_large_node(_size, _flags, _node), void*, NULL)
 
 /**
  * kmalloc - allocate kernel memory
@@ -554,37 +579,40 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
  *	Try really hard to succeed the allocation but fail
  *	eventually.
  */
-static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *_kmalloc(size_t size, gfp_t flags)
 {
 	if (__builtin_constant_p(size) && size) {
 		unsigned int index;
 
 		if (size > KMALLOC_MAX_CACHE_SIZE)
-			return kmalloc_large(size, flags);
+			return _kmalloc_large(size, flags);
 
 		index = kmalloc_index(size);
-		return kmalloc_trace(
+		return _kmalloc_trace(
 				kmalloc_caches[kmalloc_type(flags)][index],
 				flags, size);
 	}
 	return __kmalloc(size, flags);
 }
+#define kmalloc(_size, _flags)  alloc_hooks(_kmalloc(_size, _flags), void*, NULL)
 
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *_kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	if (__builtin_constant_p(size) && size) {
 		unsigned int index;
 
 		if (size > KMALLOC_MAX_CACHE_SIZE)
-			return kmalloc_large_node(size, flags, node);
+			return _kmalloc_large_node(size, flags, node);
 
 		index = kmalloc_index(size);
-		return kmalloc_node_trace(
+		return _kmalloc_node_trace(
 				kmalloc_caches[kmalloc_type(flags)][index],
 				flags, node, size);
 	}
 	return __kmalloc_node(size, flags, node);
 }
+#define kmalloc_node(_size, _flags, _node)		\
+	alloc_hooks(_kmalloc_node(_size, _flags, _node), void*, NULL)
 
 /**
  * kmalloc_array - allocate memory for an array.
@@ -592,16 +620,18 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *_kmalloc_array(size_t n, size_t size, gfp_t flags)
 {
 	size_t bytes;
 
 	if (unlikely(check_mul_overflow(n, size, &bytes)))
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc(bytes, flags);
-	return __kmalloc(bytes, flags);
+		return _kmalloc(bytes, flags);
+	return _kmalloc(bytes, flags);
 }
+#define kmalloc_array(_n, _size, _flags)		\
+	alloc_hooks(_kmalloc_array(_n, _size, _flags), void*, NULL)
 
 /**
  * krealloc_array - reallocate memory for an array.
@@ -610,18 +640,20 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_
  * @new_size: new size of a single member of the array
  * @flags: the type of memory to allocate (see kmalloc)
  */
-static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
-								      size_t new_n,
-								      size_t new_size,
-								      gfp_t flags)
+static inline __realloc_size(2, 3) void * __must_check _krealloc_array(void *p,
+								       size_t new_n,
+								       size_t new_size,
+								       gfp_t flags)
 {
 	size_t bytes;
 
 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
 		return NULL;
 
-	return krealloc(p, bytes, flags);
+	return _krealloc(p, bytes, flags);
 }
+#define krealloc_array(_p, _n, _size, _flags)		\
+	alloc_hooks(_krealloc_array(_p, _n, _size, _flags), void*, NULL)
 
 /**
  * kcalloc - allocate memory for an array. The memory is set to zero.
@@ -629,16 +661,14 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
-{
-	return kmalloc_array(n, size, flags | __GFP_ZERO);
-}
+#define kcalloc(_n, _size, _flags)			\
+	kmalloc_array(_n, _size, (_flags) | __GFP_ZERO)
 
 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
 				  unsigned long caller) __alloc_size(1);
 #define kmalloc_node_track_caller(size, flags, node) \
-	__kmalloc_node_track_caller(size, flags, node, \
-				    _RET_IP_)
+	alloc_hooks(__kmalloc_node_track_caller(size, flags, node, \
+				    _RET_IP_), void*, NULL)
 
 /*
  * kmalloc_track_caller is a special version of kmalloc that records the
@@ -648,11 +678,10 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
  * allocator where we care about the real place the memory allocation
  * request comes from.
  */
-#define kmalloc_track_caller(size, flags) \
-	__kmalloc_node_track_caller(size, flags, \
-				    NUMA_NO_NODE, _RET_IP_)
+#define kmalloc_track_caller(size, flags)		\
+	kmalloc_node_track_caller(size, flags, NUMA_NO_NODE)
 
-static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size, gfp_t flags,
 							  int node)
 {
 	size_t bytes;
@@ -660,75 +689,53 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size,
 	if (unlikely(check_mul_overflow(n, size, &bytes)))
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc_node(bytes, flags, node);
+		return _kmalloc_node(bytes, flags, node);
 	return __kmalloc_node(bytes, flags, node);
 }
+#define kmalloc_array_node(_n, _size, _flags, _node)	\
+	alloc_hooks(_kmalloc_array_node(_n, _size, _flags, _node), void*, NULL)
 
-static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
-{
-	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
-}
+#define kcalloc_node(_n, _size, _flags, _node)		\
+	kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
 
 /*
  * Shortcuts
  */
-static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
-{
-	return kmem_cache_alloc(k, flags | __GFP_ZERO);
-}
+#define kmem_cache_zalloc(_k, _flags)			\
+	kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
 
 /**
  * kzalloc - allocate memory. The memory is set to zero.
  * @size: how many bytes of memory are required.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
-{
-	return kmalloc(size, flags | __GFP_ZERO);
-}
-
-/**
- * kzalloc_node - allocate zeroed memory from a particular memory node.
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @node: memory node from which to allocate
- */
-static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
-{
-	return kmalloc_node(size, flags | __GFP_ZERO, node);
-}
+#define kzalloc(_size, _flags)			kmalloc(_size, (_flags)|__GFP_ZERO)
+#define kzalloc_node(_size, _flags, _node)	kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
 
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
-static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
-{
-	return kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
-{
-	return kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
-{
-	return kvmalloc(size, flags | __GFP_ZERO);
-}
+extern void *_kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
+#define kvmalloc_node(_size, _flags, _node)              \
+	alloc_hooks(_kvmalloc_node(_size, _flags, _node), void*, NULL)
 
-static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
-{
-	size_t bytes;
+#define kvmalloc(_size, _flags)			kvmalloc_node(_size, _flags, NUMA_NO_NODE)
+#define kvzalloc(_size, _flags)			kvmalloc(_size, _flags|__GFP_ZERO)
 
-	if (unlikely(check_mul_overflow(n, size, &bytes)))
-		return NULL;
+#define kvzalloc_node(_size, _flags, _node)	kvmalloc_node(_size, _flags|__GFP_ZERO, _node)
 
-	return kvmalloc(bytes, flags);
-}
+#define kvmalloc_array(_n, _size, _flags)						\
+({											\
+	size_t _bytes;									\
+											\
+	!check_mul_overflow(_n, _size, &_bytes) ? kvmalloc(_bytes, _flags) : NULL;	\
+})
 
-static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
-{
-	return kvmalloc_array(n, size, flags | __GFP_ZERO);
-}
+#define kvcalloc(_n, _size, _flags)		kvmalloc_array(_n, _size, _flags|__GFP_ZERO)
 
-extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+extern void *_kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 		      __realloc_size(3);
+
+#define kvrealloc(_p, _oldsize, _newsize, _flags)					\
+	alloc_hooks(_kvrealloc(_p, _oldsize, _newsize, _flags), void*, NULL)
+
 extern void kvfree(const void *addr);
 extern void kvfree_sensitive(const void *addr, size_t len);
 
diff --git a/mm/slab.c b/mm/slab.c
index 026f0c08708a..e08bd3496f56 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3448,18 +3448,18 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
 	return ret;
 }
 
-void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+void *_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
 	return __kmem_cache_alloc_lru(cachep, NULL, flags);
 }
-EXPORT_SYMBOL(kmem_cache_alloc);
+EXPORT_SYMBOL(_kmem_cache_alloc);
 
-void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
+void *_kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
 			   gfp_t flags)
 {
 	return __kmem_cache_alloc_lru(cachep, lru, flags);
 }
-EXPORT_SYMBOL(kmem_cache_alloc_lru);
+EXPORT_SYMBOL(_kmem_cache_alloc_lru);
 
 static __always_inline void
 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
@@ -3471,7 +3471,7 @@ cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
 		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
 }
 
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+int _kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 			  void **p)
 {
 	struct obj_cgroup *objcg = NULL;
@@ -3510,7 +3510,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 	kmem_cache_free_bulk(s, i, p);
 	return 0;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+EXPORT_SYMBOL(_kmem_cache_alloc_bulk);
 
 /**
  * kmem_cache_alloc_node - Allocate an object on the specified node
@@ -3525,7 +3525,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
  *
  * Return: pointer to the new object or %NULL in case of error
  */
-void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+void *_kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
 	void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_);
 
@@ -3533,7 +3533,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 
 	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_node);
+EXPORT_SYMBOL(_kmem_cache_alloc_node);
 
 void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 			     int nodeid, size_t orig_size,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 42777d66d0e3..a05333bbb7f1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1101,7 +1101,7 @@ size_t __ksize(const void *object)
 	return slab_ksize(folio_slab(folio)->slab_cache);
 }
 
-void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
+void *_kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
 	void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
 					    size, _RET_IP_);
@@ -1111,9 +1111,9 @@ void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 	ret = kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_trace);
+EXPORT_SYMBOL(_kmalloc_trace);
 
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+void *_kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
 			 int node, size_t size)
 {
 	void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
@@ -1123,7 +1123,7 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
 	ret = kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_node_trace);
+EXPORT_SYMBOL(_kmalloc_node_trace);
 
 gfp_t kmalloc_fix_flags(gfp_t flags)
 {
@@ -1168,7 +1168,7 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
 	return ptr;
 }
 
-void *kmalloc_large(size_t size, gfp_t flags)
+void *_kmalloc_large(size_t size, gfp_t flags)
 {
 	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
 
@@ -1176,9 +1176,9 @@ void *kmalloc_large(size_t size, gfp_t flags)
 		      flags, NUMA_NO_NODE);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_large);
+EXPORT_SYMBOL(_kmalloc_large);
 
-void *kmalloc_large_node(size_t size, gfp_t flags, int node)
+void *_kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
 	void *ret = __kmalloc_large_node(size, flags, node);
 
@@ -1186,7 +1186,7 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 		      flags, node);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_large_node);
+EXPORT_SYMBOL(_kmalloc_large_node);
 
 #ifdef CONFIG_SLAB_FREELIST_RANDOM
 /* Randomize a generic freelist */
@@ -1405,7 +1405,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
 		return (void *)p;
 	}
 
-	ret = kmalloc_track_caller(new_size, flags);
+	ret = __kmalloc_node_track_caller(new_size, flags, NUMA_NO_NODE, _RET_IP_);
 	if (ret && p) {
 		/* Disable KASAN checks as the object's redzone is accessed. */
 		kasan_disable_current();
@@ -1429,7 +1429,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
  *
  * Return: pointer to the allocated memory or %NULL in case of error
  */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
+void *_krealloc(const void *p, size_t new_size, gfp_t flags)
 {
 	void *ret;
 
@@ -1444,7 +1444,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
 
 	return ret;
 }
-EXPORT_SYMBOL(krealloc);
+EXPORT_SYMBOL(_krealloc);
 
 /**
  * kfree_sensitive - Clear sensitive information in memory before freeing
diff --git a/mm/slub.c b/mm/slub.c
index 507b71372ee4..8f57fd086f69 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3470,18 +3470,18 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
 	return ret;
 }
 
-void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
+void *_kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
 	return __kmem_cache_alloc_lru(s, NULL, gfpflags);
 }
-EXPORT_SYMBOL(kmem_cache_alloc);
+EXPORT_SYMBOL(_kmem_cache_alloc);
 
-void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+void *_kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
 			   gfp_t gfpflags)
 {
 	return __kmem_cache_alloc_lru(s, lru, gfpflags);
 }
-EXPORT_SYMBOL(kmem_cache_alloc_lru);
+EXPORT_SYMBOL(_kmem_cache_alloc_lru);
 
 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
 			      int node, size_t orig_size,
@@ -3491,7 +3491,7 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
 			       caller, orig_size);
 }
 
-void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
+void *_kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
 
@@ -3499,7 +3499,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 
 	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_node);
+EXPORT_SYMBOL(_kmem_cache_alloc_node);
 
 static noinline void free_to_partial_list(
 	struct kmem_cache *s, struct slab *slab,
@@ -3779,6 +3779,7 @@ static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab,
 				      unsigned long addr)
 {
 	memcg_slab_free_hook(s, slab, p, cnt);
+	alloc_tagging_slab_free_hook(s, slab, p, cnt);
 	/*
 	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
 	 * to remove objects, whose reuse must be delayed.
@@ -4009,7 +4010,7 @@ static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
 #endif /* CONFIG_SLUB_TINY */
 
 /* Note that interrupts must be enabled when calling this function. */
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+int _kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 			  void **p)
 {
 	int i;
@@ -4034,7 +4035,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 			slab_want_init_on_alloc(flags, s), s->object_size);
 	return i;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+EXPORT_SYMBOL(_kmem_cache_alloc_bulk);
 
 
 /*
diff --git a/mm/util.c b/mm/util.c
index dd12b9531ac4..e9077d1af676 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(vm_mmap);
  *
  * Return: pointer to the allocated memory of %NULL in case of failure
  */
-void *kvmalloc_node(size_t size, gfp_t flags, int node)
+void *_kvmalloc_node(size_t size, gfp_t flags, int node)
 {
 	gfp_t kmalloc_flags = flags;
 	void *ret;
@@ -601,7 +601,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 		kmalloc_flags &= ~__GFP_NOFAIL;
 	}
 
-	ret = kmalloc_node(size, kmalloc_flags, node);
+	ret = _kmalloc_node(size, kmalloc_flags, node);
 
 	/*
 	 * It doesn't really make sense to fallback to vmalloc for sub page
@@ -630,7 +630,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 			node, __builtin_return_address(0));
 }
-EXPORT_SYMBOL(kvmalloc_node);
+EXPORT_SYMBOL(_kvmalloc_node);
 
 /**
  * kvfree() - Free memory.
@@ -669,7 +669,7 @@ void kvfree_sensitive(const void *addr, size_t len)
 }
 EXPORT_SYMBOL(kvfree_sensitive);
 
-void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+void *_kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 {
 	void *newp;
 
@@ -682,7 +682,7 @@ void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 	kvfree(p);
 	return newp;
 }
-EXPORT_SYMBOL(kvrealloc);
+EXPORT_SYMBOL(_kvrealloc);
 
 /**
  * __vmalloc_array - allocate memory for a virtually contiguous array.
-- 
2.40.1.495.gc816e09b53d-goog


WARNING: multiple messages have this Message-ID (diff)
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: kent.overstreet@linux.dev, mhocko@suse.com, vbabka@suse.cz,
	hannes@cmpxchg.org, roman.gushchin@linux.dev, mgorman@suse.de,
	dave@stgolabs.net, willy@infradead.org, liam.howlett@oracle.com,
	corbet@lwn.net, void@manifault.com, peterz@infradead.org,
	juri.lelli@redhat.com, ldufour@linux.ibm.com,
	catalin.marinas@arm.com, will@kernel.org, arnd@arndb.de,
	tglx@linutronix.de, mingo@redhat.com,
	dave.hansen@linux.intel.com, x86@kernel.org, peterx@redhat.com,
	david@redhat.com, axboe@kernel.dk, mcgrof@kernel.org,
	masahiroy@kernel.org, nathan@kernel.org, dennis@kernel.org,
	tj@kernel.org, muchun.song@linux.dev, rppt@kernel.org,
	paulmck@kernel.org, pasha.tatashin@soleen.com,
	yosryahmed@google.com, yuzhao@google.com, dhowells@redhat.com,
	hughd@google.com, andreyknvl@gmail.com, keescook@chromium.org
Subject: [PATCH 25/40] mm/slab: enable slab allocation tagging for kmalloc and friends
Date: Mon,  1 May 2023 09:54:35 -0700	[thread overview]
Message-ID: <20230501165450.15352-26-surenb@google.com> (raw)
In-Reply-To: <20230501165450.15352-1-surenb@google.com>

Redefine kmalloc, krealloc, kzalloc, kcalloc, etc. to record allocations
and deallocations done by these functions.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Co-developed-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
---
 include/linux/slab.h | 175 ++++++++++++++++++++++---------------------
 mm/slab.c            |  16 ++--
 mm/slab_common.c     |  22 +++---
 mm/slub.c            |  17 +++--
 mm/util.c            |  10 +--
 5 files changed, 124 insertions(+), 116 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 99a146f3cedf..43c922524081 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -213,7 +213,10 @@ int kmem_cache_shrink(struct kmem_cache *s);
 /*
  * Common kmalloc functions provided by all allocators
  */
-void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
+void * __must_check _krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
+#define krealloc(_p, _size, _flags)					\
+	alloc_hooks(_krealloc(_p, _size, _flags), void*, NULL)
+
 void kfree(const void *objp);
 void kfree_sensitive(const void *objp);
 size_t __ksize(const void *objp);
@@ -451,6 +454,8 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
 static_assert(PAGE_SHIFT <= 20);
 #define kmalloc_index(s) __kmalloc_index(s, true)
 
+#include <linux/alloc_tag.h>
+
 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
 
 /**
@@ -463,9 +468,15 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz
  *
  * Return: pointer to the new object or %NULL in case of error
  */
-void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
-void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
-			   gfp_t gfpflags) __assume_slab_alignment __malloc;
+void *_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc(_s, _flags)				\
+	alloc_hooks(_kmem_cache_alloc(_s, _flags), void*, NULL)
+
+void *_kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+			    gfp_t gfpflags) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_lru(_s, _lru, _flags)			\
+	alloc_hooks(_kmem_cache_alloc_lru(_s, _lru, _flags), void*, NULL)
+
 void kmem_cache_free(struct kmem_cache *s, void *objp);
 
 /*
@@ -476,7 +487,9 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
  * Note that interrupts must be enabled when calling these functions.
  */
 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
+int _kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
+#define kmem_cache_alloc_bulk(_s, _flags, _size, _p)		\
+	alloc_hooks(_kmem_cache_alloc_bulk(_s, _flags, _size, _p), int, 0)
 
 static __always_inline void kfree_bulk(size_t size, void **p)
 {
@@ -485,20 +498,32 @@ static __always_inline void kfree_bulk(size_t size, void **p)
 
 void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
 							 __alloc_size(1);
-void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
-									 __malloc;
+void *_kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
+									  __malloc;
+#define kmem_cache_alloc_node(_s, _flags, _node)		\
+	alloc_hooks(_kmem_cache_alloc_node(_s, _flags, _node), void*, NULL)
 
-void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+void *_kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
 		    __assume_kmalloc_alignment __alloc_size(3);
 
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+void *_kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
 			 int node, size_t size) __assume_kmalloc_alignment
 						__alloc_size(4);
-void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
+#define kmalloc_trace(_s, _flags, _size)		\
+	alloc_hooks(_kmalloc_trace(_s, _flags, _size), void*, NULL)
+
+#define kmalloc_node_trace(_s, _gfpflags, _node, _size)	\
+	alloc_hooks(_kmalloc_node_trace(_s, _gfpflags, _node, _size), void*, NULL)
+
+void *_kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
 					      __alloc_size(1);
+#define kmalloc_large(_size, _flags)			\
+	alloc_hooks(_kmalloc_large(_size, _flags), void*, NULL)
 
-void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
+void *_kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
 							     __alloc_size(1);
+#define kmalloc_large_node(_size, _flags, _node)	\
+	alloc_hooks(_kmalloc_large_node(_size, _flags, _node), void*, NULL)
 
 /**
  * kmalloc - allocate kernel memory
@@ -554,37 +579,40 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
  *	Try really hard to succeed the allocation but fail
  *	eventually.
  */
-static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *_kmalloc(size_t size, gfp_t flags)
 {
 	if (__builtin_constant_p(size) && size) {
 		unsigned int index;
 
 		if (size > KMALLOC_MAX_CACHE_SIZE)
-			return kmalloc_large(size, flags);
+			return _kmalloc_large(size, flags);
 
 		index = kmalloc_index(size);
-		return kmalloc_trace(
+		return _kmalloc_trace(
 				kmalloc_caches[kmalloc_type(flags)][index],
 				flags, size);
 	}
 	return __kmalloc(size, flags);
 }
+#define kmalloc(_size, _flags)  alloc_hooks(_kmalloc(_size, _flags), void*, NULL)
 
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *_kmalloc_node(size_t size, gfp_t flags, int node)
 {
 	if (__builtin_constant_p(size) && size) {
 		unsigned int index;
 
 		if (size > KMALLOC_MAX_CACHE_SIZE)
-			return kmalloc_large_node(size, flags, node);
+			return _kmalloc_large_node(size, flags, node);
 
 		index = kmalloc_index(size);
-		return kmalloc_node_trace(
+		return _kmalloc_node_trace(
 				kmalloc_caches[kmalloc_type(flags)][index],
 				flags, node, size);
 	}
 	return __kmalloc_node(size, flags, node);
 }
+#define kmalloc_node(_size, _flags, _node)		\
+	alloc_hooks(_kmalloc_node(_size, _flags, _node), void*, NULL)
 
 /**
  * kmalloc_array - allocate memory for an array.
@@ -592,16 +620,18 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *_kmalloc_array(size_t n, size_t size, gfp_t flags)
 {
 	size_t bytes;
 
 	if (unlikely(check_mul_overflow(n, size, &bytes)))
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc(bytes, flags);
-	return __kmalloc(bytes, flags);
+		return _kmalloc(bytes, flags);
+	return _kmalloc(bytes, flags);
 }
+#define kmalloc_array(_n, _size, _flags)		\
+	alloc_hooks(_kmalloc_array(_n, _size, _flags), void*, NULL)
 
 /**
  * krealloc_array - reallocate memory for an array.
@@ -610,18 +640,20 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_
  * @new_size: new size of a single member of the array
  * @flags: the type of memory to allocate (see kmalloc)
  */
-static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
-								      size_t new_n,
-								      size_t new_size,
-								      gfp_t flags)
+static inline __realloc_size(2, 3) void * __must_check _krealloc_array(void *p,
+								       size_t new_n,
+								       size_t new_size,
+								       gfp_t flags)
 {
 	size_t bytes;
 
 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
 		return NULL;
 
-	return krealloc(p, bytes, flags);
+	return _krealloc(p, bytes, flags);
 }
+#define krealloc_array(_p, _n, _size, _flags)		\
+	alloc_hooks(_krealloc_array(_p, _n, _size, _flags), void*, NULL)
 
 /**
  * kcalloc - allocate memory for an array. The memory is set to zero.
@@ -629,16 +661,14 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
-{
-	return kmalloc_array(n, size, flags | __GFP_ZERO);
-}
+#define kcalloc(_n, _size, _flags)			\
+	kmalloc_array(_n, _size, (_flags) | __GFP_ZERO)
 
 void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
 				  unsigned long caller) __alloc_size(1);
 #define kmalloc_node_track_caller(size, flags, node) \
-	__kmalloc_node_track_caller(size, flags, node, \
-				    _RET_IP_)
+	alloc_hooks(__kmalloc_node_track_caller(size, flags, node, \
+				    _RET_IP_), void*, NULL)
 
 /*
  * kmalloc_track_caller is a special version of kmalloc that records the
@@ -648,11 +678,10 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
  * allocator where we care about the real place the memory allocation
  * request comes from.
  */
-#define kmalloc_track_caller(size, flags) \
-	__kmalloc_node_track_caller(size, flags, \
-				    NUMA_NO_NODE, _RET_IP_)
+#define kmalloc_track_caller(size, flags)		\
+	kmalloc_node_track_caller(size, flags, NUMA_NO_NODE)
 
-static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size, gfp_t flags,
 							  int node)
 {
 	size_t bytes;
@@ -660,75 +689,53 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size,
 	if (unlikely(check_mul_overflow(n, size, &bytes)))
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc_node(bytes, flags, node);
+		return _kmalloc_node(bytes, flags, node);
 	return __kmalloc_node(bytes, flags, node);
 }
+#define kmalloc_array_node(_n, _size, _flags, _node)	\
+	alloc_hooks(_kmalloc_array_node(_n, _size, _flags, _node), void*, NULL)
 
-static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
-{
-	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
-}
+#define kcalloc_node(_n, _size, _flags, _node)		\
+	kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
 
 /*
  * Shortcuts
  */
-static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
-{
-	return kmem_cache_alloc(k, flags | __GFP_ZERO);
-}
+#define kmem_cache_zalloc(_k, _flags)			\
+	kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
 
 /**
  * kzalloc - allocate memory. The memory is set to zero.
  * @size: how many bytes of memory are required.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
-{
-	return kmalloc(size, flags | __GFP_ZERO);
-}
-
-/**
- * kzalloc_node - allocate zeroed memory from a particular memory node.
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @node: memory node from which to allocate
- */
-static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
-{
-	return kmalloc_node(size, flags | __GFP_ZERO, node);
-}
+#define kzalloc(_size, _flags)			kmalloc(_size, (_flags)|__GFP_ZERO)
+#define kzalloc_node(_size, _flags, _node)	kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
 
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
-static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
-{
-	return kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
-{
-	return kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
-{
-	return kvmalloc(size, flags | __GFP_ZERO);
-}
+extern void *_kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
+#define kvmalloc_node(_size, _flags, _node)              \
+	alloc_hooks(_kvmalloc_node(_size, _flags, _node), void*, NULL)
 
-static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
-{
-	size_t bytes;
+#define kvmalloc(_size, _flags)			kvmalloc_node(_size, _flags, NUMA_NO_NODE)
+#define kvzalloc(_size, _flags)			kvmalloc(_size, _flags|__GFP_ZERO)
 
-	if (unlikely(check_mul_overflow(n, size, &bytes)))
-		return NULL;
+#define kvzalloc_node(_size, _flags, _node)	kvmalloc_node(_size, _flags|__GFP_ZERO, _node)
 
-	return kvmalloc(bytes, flags);
-}
+#define kvmalloc_array(_n, _size, _flags)						\
+({											\
+	size_t _bytes;									\
+											\
+	!check_mul_overflow(_n, _size, &_bytes) ? kvmalloc(_bytes, _flags) : NULL;	\
+})
 
-static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
-{
-	return kvmalloc_array(n, size, flags | __GFP_ZERO);
-}
+#define kvcalloc(_n, _size, _flags)		kvmalloc_array(_n, _size, _flags|__GFP_ZERO)
 
-extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+extern void *_kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 		      __realloc_size(3);
+
+#define kvrealloc(_p, _oldsize, _newsize, _flags)					\
+	alloc_hooks(_kvrealloc(_p, _oldsize, _newsize, _flags), void*, NULL)
+
 extern void kvfree(const void *addr);
 extern void kvfree_sensitive(const void *addr, size_t len);
 
diff --git a/mm/slab.c b/mm/slab.c
index 026f0c08708a..e08bd3496f56 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3448,18 +3448,18 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
 	return ret;
 }
 
-void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
+void *_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
 {
 	return __kmem_cache_alloc_lru(cachep, NULL, flags);
 }
-EXPORT_SYMBOL(kmem_cache_alloc);
+EXPORT_SYMBOL(_kmem_cache_alloc);
 
-void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
+void *_kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
 			   gfp_t flags)
 {
 	return __kmem_cache_alloc_lru(cachep, lru, flags);
 }
-EXPORT_SYMBOL(kmem_cache_alloc_lru);
+EXPORT_SYMBOL(_kmem_cache_alloc_lru);
 
 static __always_inline void
 cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
@@ -3471,7 +3471,7 @@ cache_alloc_debugcheck_after_bulk(struct kmem_cache *s, gfp_t flags,
 		p[i] = cache_alloc_debugcheck_after(s, flags, p[i], caller);
 }
 
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+int _kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 			  void **p)
 {
 	struct obj_cgroup *objcg = NULL;
@@ -3510,7 +3510,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 	kmem_cache_free_bulk(s, i, p);
 	return 0;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+EXPORT_SYMBOL(_kmem_cache_alloc_bulk);
 
 /**
  * kmem_cache_alloc_node - Allocate an object on the specified node
@@ -3525,7 +3525,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_bulk);
  *
  * Return: pointer to the new object or %NULL in case of error
  */
-void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
+void *_kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 {
 	void *ret = slab_alloc_node(cachep, NULL, flags, nodeid, cachep->object_size, _RET_IP_);
 
@@ -3533,7 +3533,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
 
 	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_node);
+EXPORT_SYMBOL(_kmem_cache_alloc_node);
 
 void *__kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
 			     int nodeid, size_t orig_size,
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 42777d66d0e3..a05333bbb7f1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1101,7 +1101,7 @@ size_t __ksize(const void *object)
 	return slab_ksize(folio_slab(folio)->slab_cache);
 }
 
-void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
+void *_kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
 	void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
 					    size, _RET_IP_);
@@ -1111,9 +1111,9 @@ void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 	ret = kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_trace);
+EXPORT_SYMBOL(_kmalloc_trace);
 
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+void *_kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
 			 int node, size_t size)
 {
 	void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
@@ -1123,7 +1123,7 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
 	ret = kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_node_trace);
+EXPORT_SYMBOL(_kmalloc_node_trace);
 
 gfp_t kmalloc_fix_flags(gfp_t flags)
 {
@@ -1168,7 +1168,7 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
 	return ptr;
 }
 
-void *kmalloc_large(size_t size, gfp_t flags)
+void *_kmalloc_large(size_t size, gfp_t flags)
 {
 	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
 
@@ -1176,9 +1176,9 @@ void *kmalloc_large(size_t size, gfp_t flags)
 		      flags, NUMA_NO_NODE);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_large);
+EXPORT_SYMBOL(_kmalloc_large);
 
-void *kmalloc_large_node(size_t size, gfp_t flags, int node)
+void *_kmalloc_large_node(size_t size, gfp_t flags, int node)
 {
 	void *ret = __kmalloc_large_node(size, flags, node);
 
@@ -1186,7 +1186,7 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 		      flags, node);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_large_node);
+EXPORT_SYMBOL(_kmalloc_large_node);
 
 #ifdef CONFIG_SLAB_FREELIST_RANDOM
 /* Randomize a generic freelist */
@@ -1405,7 +1405,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
 		return (void *)p;
 	}
 
-	ret = kmalloc_track_caller(new_size, flags);
+	ret = __kmalloc_node_track_caller(new_size, flags, NUMA_NO_NODE, _RET_IP_);
 	if (ret && p) {
 		/* Disable KASAN checks as the object's redzone is accessed. */
 		kasan_disable_current();
@@ -1429,7 +1429,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
  *
  * Return: pointer to the allocated memory or %NULL in case of error
  */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
+void *_krealloc(const void *p, size_t new_size, gfp_t flags)
 {
 	void *ret;
 
@@ -1444,7 +1444,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
 
 	return ret;
 }
-EXPORT_SYMBOL(krealloc);
+EXPORT_SYMBOL(_krealloc);
 
 /**
  * kfree_sensitive - Clear sensitive information in memory before freeing
diff --git a/mm/slub.c b/mm/slub.c
index 507b71372ee4..8f57fd086f69 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3470,18 +3470,18 @@ void *__kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
 	return ret;
 }
 
-void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
+void *_kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 {
 	return __kmem_cache_alloc_lru(s, NULL, gfpflags);
 }
-EXPORT_SYMBOL(kmem_cache_alloc);
+EXPORT_SYMBOL(_kmem_cache_alloc);
 
-void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+void *_kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
 			   gfp_t gfpflags)
 {
 	return __kmem_cache_alloc_lru(s, lru, gfpflags);
 }
-EXPORT_SYMBOL(kmem_cache_alloc_lru);
+EXPORT_SYMBOL(_kmem_cache_alloc_lru);
 
 void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
 			      int node, size_t orig_size,
@@ -3491,7 +3491,7 @@ void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags,
 			       caller, orig_size);
 }
 
-void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
+void *_kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
 
@@ -3499,7 +3499,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 
 	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_node);
+EXPORT_SYMBOL(_kmem_cache_alloc_node);
 
 static noinline void free_to_partial_list(
 	struct kmem_cache *s, struct slab *slab,
@@ -3779,6 +3779,7 @@ static __fastpath_inline void slab_free(struct kmem_cache *s, struct slab *slab,
 				      unsigned long addr)
 {
 	memcg_slab_free_hook(s, slab, p, cnt);
+	alloc_tagging_slab_free_hook(s, slab, p, cnt);
 	/*
 	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
 	 * to remove objects, whose reuse must be delayed.
@@ -4009,7 +4010,7 @@ static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
 #endif /* CONFIG_SLUB_TINY */
 
 /* Note that interrupts must be enabled when calling this function. */
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
+int _kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 			  void **p)
 {
 	int i;
@@ -4034,7 +4035,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 			slab_want_init_on_alloc(flags, s), s->object_size);
 	return i;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+EXPORT_SYMBOL(_kmem_cache_alloc_bulk);
 
 
 /*
diff --git a/mm/util.c b/mm/util.c
index dd12b9531ac4..e9077d1af676 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -579,7 +579,7 @@ EXPORT_SYMBOL(vm_mmap);
  *
  * Return: pointer to the allocated memory of %NULL in case of failure
  */
-void *kvmalloc_node(size_t size, gfp_t flags, int node)
+void *_kvmalloc_node(size_t size, gfp_t flags, int node)
 {
 	gfp_t kmalloc_flags = flags;
 	void *ret;
@@ -601,7 +601,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 		kmalloc_flags &= ~__GFP_NOFAIL;
 	}
 
-	ret = kmalloc_node(size, kmalloc_flags, node);
+	ret = _kmalloc_node(size, kmalloc_flags, node);
 
 	/*
 	 * It doesn't really make sense to fallback to vmalloc for sub page
@@ -630,7 +630,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 			node, __builtin_return_address(0));
 }
-EXPORT_SYMBOL(kvmalloc_node);
+EXPORT_SYMBOL(_kvmalloc_node);
 
 /**
  * kvfree() - Free memory.
@@ -669,7 +669,7 @@ void kvfree_sensitive(const void *addr, size_t len)
 }
 EXPORT_SYMBOL(kvfree_sensitive);
 
-void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+void *_kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 {
 	void *newp;
 
@@ -682,7 +682,7 @@ void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 	kvfree(p);
 	return newp;
 }
-EXPORT_SYMBOL(kvrealloc);
+EXPORT_SYMBOL(_kvrealloc);
 
 /**
  * __vmalloc_array - allocate memory for a virtually contiguous array.
-- 
2.40.1.495.gc816e09b53d-goog


  parent reply	other threads:[~2023-05-01 16:59 UTC|newest]

Thread overview: 320+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-01 16:54 [PATCH 00/40] Memory allocation profiling Suren Baghdasaryan
2023-05-01 16:54 ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 01/40] lib/string_helpers: Drop space in string_get_size's output Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 18:13   ` Davidlohr Bueso
2023-05-01 18:13     ` Davidlohr Bueso
2023-05-01 19:35     ` Kent Overstreet
2023-05-01 19:35     ` Kent Overstreet
2023-05-01 19:57       ` Andy Shevchenko
2023-05-01 19:57         ` Andy Shevchenko
2023-05-01 21:16         ` Kent Overstreet
2023-05-01 21:16           ` Kent Overstreet
2023-05-01 21:33         ` Liam R. Howlett
2023-05-01 21:33           ` Liam R. Howlett
2023-05-02  0:11           ` Kent Overstreet
2023-05-02  0:11             ` Kent Overstreet
2023-05-02  0:53         ` Kent Overstreet
2023-05-02  0:53           ` Kent Overstreet
     [not found]       ` <ZFAUj+Q+hP7cWs4w-jC9Py7bek1znysI04z7BkA@public.gmane.org>
2023-05-02  2:22         ` James Bottomley
2023-05-02  2:22       ` James Bottomley
2023-05-02  3:17         ` Kent Overstreet
2023-05-02  3:17           ` Kent Overstreet
2023-05-02  5:33           ` Andy Shevchenko
2023-05-02  5:33             ` Andy Shevchenko
2023-05-02  6:21             ` Kent Overstreet
2023-05-02  6:21               ` Kent Overstreet
2023-05-02 15:19               ` Andy Shevchenko
2023-05-02 15:19                 ` Andy Shevchenko
2023-05-03  2:07                 ` Kent Overstreet
2023-05-03  2:07                   ` Kent Overstreet
2023-05-03  6:30                   ` Andy Shevchenko
2023-05-03  6:30                     ` Andy Shevchenko
2023-05-03  7:12                     ` Kent Overstreet
2023-05-03  7:12                       ` Kent Overstreet
2023-05-03  9:12                       ` Andy Shevchenko
2023-05-03  9:12                         ` Andy Shevchenko
2023-05-03  9:16                         ` Kent Overstreet
2023-05-03  9:16                           ` Kent Overstreet
2023-05-02 11:42           ` James Bottomley
2023-05-02 11:42             ` James Bottomley
2023-05-02 22:50             ` Dave Chinner
2023-05-02 22:50               ` Dave Chinner
2023-05-03  9:28               ` Vlastimil Babka
2023-05-03  9:28                 ` Vlastimil Babka
2023-05-03  9:44                 ` Andy Shevchenko
2023-05-03  9:44                   ` Andy Shevchenko
2023-05-03 12:15               ` James Bottomley
2023-05-03 12:15                 ` James Bottomley
2023-05-02  7:55   ` Jani Nikula
2023-05-02  7:55     ` Jani Nikula
2023-05-01 16:54 ` [PATCH 02/40] scripts/kallysms: Always include __start and __stop symbols Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 03/40] fs: Convert alloc_inode_sb() to a macro Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-02 12:35   ` Petr Tesařík
2023-05-02 12:35     ` Petr Tesařík
2023-05-02 19:57     ` Kent Overstreet
2023-05-02 19:57       ` Kent Overstreet
2023-05-02 20:20       ` Petr Tesařík
2023-05-02 20:20         ` Petr Tesařík
2023-05-01 16:54 ` [PATCH 04/40] nodemask: Split out include/linux/nodemask_types.h Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 05/40] prandom: Remove unused include Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 06/40] lib/string.c: strsep_no_empty() Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-02 12:37   ` Petr Tesařík
2023-05-02 12:37     ` Petr Tesařík
2023-05-01 16:54 ` [PATCH 07/40] Lazy percpu counters Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 19:17   ` Randy Dunlap
2023-05-01 19:17     ` Randy Dunlap
2023-05-01 16:54 ` [PATCH 08/40] mm: introduce slabobj_ext to support slab object extensions Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 09/40] mm: introduce __GFP_NO_OBJ_EXT flag to selectively prevent slabobj_ext creation Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-02 12:50   ` Petr Tesařík
2023-05-02 12:50     ` Petr Tesařík
2023-05-02 18:33     ` Suren Baghdasaryan
2023-05-02 18:33       ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 10/40] mm/slab: introduce SLAB_NO_OBJ_EXT to avoid obj_ext creation Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 11/40] mm: prevent slabobj_ext allocations for slabobj_ext and kmem_cache objects Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 12/40] slab: objext: introduce objext_flags as extension to page_memcg_data_flags Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 13/40] lib: code tagging framework Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 14/40] lib: code tagging module support Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 15/40] lib: prevent module unloading if memory is not freed Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 16/40] lib: code tagging query helper functions Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 17/40] lib: add allocation tagging support for memory allocation profiling Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 18/40] lib: introduce support for page allocation tagging Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 19/40] change alloc_pages name in dma_map_ops to avoid name conflicts Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-02 15:50   ` Petr Tesařík
2023-05-02 15:50     ` Petr Tesařík
2023-05-02 18:38     ` Suren Baghdasaryan
2023-05-02 18:38       ` Suren Baghdasaryan
2023-05-02 20:09       ` Petr Tesařík
2023-05-02 20:09         ` Petr Tesařík
2023-05-02 20:18         ` Kent Overstreet
2023-05-02 20:18           ` Kent Overstreet
2023-05-02 20:24         ` Suren Baghdasaryan
2023-05-02 20:24           ` Suren Baghdasaryan
2023-05-02 20:39           ` Petr Tesařík
2023-05-02 20:39             ` Petr Tesařík
2023-05-02 20:41             ` Suren Baghdasaryan
2023-05-02 20:41               ` Suren Baghdasaryan
2023-05-03 16:25   ` Steven Rostedt
2023-05-03 16:25     ` Steven Rostedt
2023-05-03 18:03     ` Suren Baghdasaryan
2023-05-03 18:03       ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 20/40] mm: enable page allocation tagging Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 21/40] mm/page_ext: enable early_page_ext when CONFIG_MEM_ALLOC_PROFILING_DEBUG=y Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 22/40] mm: create new codetag references during page splitting Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 23/40] lib: add codetag reference into slabobj_ext Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 24/40] mm/slab: add allocation accounting into slab allocation and free paths Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` Suren Baghdasaryan [this message]
2023-05-01 16:54   ` [PATCH 25/40] mm/slab: enable slab allocation tagging for kmalloc and friends Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 26/40] mm/slub: Mark slab_free_freelist_hook() __always_inline Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 27/40] mempool: Hook up to memory allocation profiling Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 28/40] timekeeping: Fix a circular include dependency Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-02 15:50   ` Thomas Gleixner
2023-05-02 15:50     ` Thomas Gleixner
2023-05-01 16:54 ` [PATCH 29/40] mm: percpu: Introduce pcpuobj_ext Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 30/40] mm: percpu: Add codetag reference into pcpuobj_ext Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 31/40] mm: percpu: enable per-cpu allocation tagging Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 32/40] arm64: Fix circular header dependency Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 33/40] move stack capture functionality into a separate function for reuse Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 34/40] lib: code tagging context capture support Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-03  7:35   ` Michal Hocko
2023-05-03  7:35     ` Michal Hocko
2023-05-03 15:18     ` Suren Baghdasaryan
2023-05-03 15:18       ` Suren Baghdasaryan
2023-05-03 15:26       ` Dave Hansen
2023-05-03 15:26         ` Dave Hansen
2023-05-03 19:45         ` Suren Baghdasaryan
2023-05-03 19:45           ` Suren Baghdasaryan
2023-05-04  8:04       ` Michal Hocko
2023-05-04  8:04         ` Michal Hocko
2023-05-04 14:31         ` Suren Baghdasaryan
2023-05-04 14:31           ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 35/40] lib: implement context capture support for tagged allocations Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-03  7:39   ` Michal Hocko
2023-05-03  7:39     ` Michal Hocko
2023-05-03 15:24     ` Suren Baghdasaryan
2023-05-03 15:24       ` Suren Baghdasaryan
2023-05-04  8:09       ` Michal Hocko
2023-05-04  8:09         ` Michal Hocko
2023-05-04 16:22         ` Suren Baghdasaryan
2023-05-04 16:22           ` Suren Baghdasaryan
2023-05-05  8:40           ` Michal Hocko
2023-05-05  8:40             ` Michal Hocko
2023-05-05 18:10             ` Suren Baghdasaryan
2023-05-05 18:10               ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 36/40] lib: add memory allocations report in show_mem() Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 37/40] codetag: debug: skip objext checking when it's for objext itself Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 38/40] codetag: debug: mark codetags for reserved pages as empty Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 39/40] codetag: debug: introduce OBJEXTS_ALLOC_FAIL to mark failed slab_ext allocations Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 16:54 ` [PATCH 40/40] MAINTAINERS: Add entries for code tagging and memory allocation profiling Suren Baghdasaryan
2023-05-01 16:54   ` Suren Baghdasaryan
2023-05-01 17:47 ` [PATCH 00/40] Memory " Roman Gushchin
2023-05-01 17:47   ` Roman Gushchin
2023-05-01 18:08   ` Suren Baghdasaryan
2023-05-01 18:08     ` Suren Baghdasaryan
2023-05-01 18:14     ` Roman Gushchin
2023-05-01 18:14       ` Roman Gushchin
2023-05-01 19:37       ` Kent Overstreet
2023-05-01 19:37         ` Kent Overstreet
2023-05-01 21:18         ` Roman Gushchin
2023-05-01 21:18           ` Roman Gushchin
2023-05-03  7:25 ` Michal Hocko
2023-05-03  7:25   ` Michal Hocko
2023-05-03  7:34   ` Kent Overstreet
2023-05-03  7:34     ` Kent Overstreet
2023-05-03  7:51     ` Michal Hocko
2023-05-03  7:51       ` Michal Hocko
2023-05-03  8:05       ` Kent Overstreet
2023-05-03  8:05         ` Kent Overstreet
2023-05-03 13:21         ` Steven Rostedt
2023-05-03 13:21           ` Steven Rostedt
2023-05-03 16:35         ` Tejun Heo
2023-05-03 16:35           ` Tejun Heo
2023-05-03 17:42           ` Suren Baghdasaryan
2023-05-03 17:42             ` Suren Baghdasaryan
2023-05-03 18:06             ` Tejun Heo
2023-05-03 18:06               ` Tejun Heo
2023-05-03 17:44           ` Kent Overstreet
2023-05-03 17:44             ` Kent Overstreet
2023-05-03 17:51           ` Kent Overstreet
2023-05-03 17:51             ` Kent Overstreet
2023-05-03 18:24             ` Tejun Heo
2023-05-03 18:24               ` Tejun Heo
2023-05-03 18:07           ` Johannes Weiner
2023-05-03 18:07             ` Johannes Weiner
2023-05-03 18:19             ` Tejun Heo
2023-05-03 18:19               ` Tejun Heo
2023-05-03 18:40               ` Tejun Heo
2023-05-03 18:40                 ` Tejun Heo
2023-05-03 18:56                 ` Kent Overstreet
2023-05-03 18:56                   ` Kent Overstreet
2023-05-03 18:58                   ` Tejun Heo
2023-05-03 18:58                     ` Tejun Heo
2023-05-03 19:09                     ` Tejun Heo
2023-05-03 19:09                       ` Tejun Heo
2023-05-03 19:41                       ` Suren Baghdasaryan
2023-05-03 19:41                         ` Suren Baghdasaryan
2023-05-03 19:48                         ` Tejun Heo
2023-05-03 19:48                           ` Tejun Heo
2023-05-03 20:00                           ` Tejun Heo
2023-05-03 20:00                             ` Tejun Heo
2023-05-03 20:14                             ` Suren Baghdasaryan
2023-05-03 20:14                               ` Suren Baghdasaryan
2023-05-04  2:25                               ` Tejun Heo
2023-05-04  2:25                                 ` Tejun Heo
2023-05-04  3:33                                 ` Kent Overstreet
2023-05-04  3:33                                   ` Kent Overstreet
2023-05-04  3:33                                 ` Suren Baghdasaryan
2023-05-04  3:33                                   ` Suren Baghdasaryan
2023-05-04  8:00                               ` Petr Tesařík
2023-05-04  8:00                                 ` Petr Tesařík
2023-05-03 20:08                           ` Suren Baghdasaryan
2023-05-03 20:08                             ` Suren Baghdasaryan
2023-05-03 20:11                             ` Johannes Weiner
2023-05-03 20:11                               ` Johannes Weiner
2023-05-04  2:16                             ` Tejun Heo
2023-05-04  2:16                               ` Tejun Heo
2023-05-03 20:04           ` Andrey Ryabinin
2023-05-03 20:04             ` Andrey Ryabinin
2023-05-03  9:50       ` Petr Tesařík
2023-05-03  9:50         ` Petr Tesařík
2023-05-03  9:54         ` Kent Overstreet
2023-05-03  9:54           ` Kent Overstreet
2023-05-03 10:24           ` Petr Tesařík
2023-05-03 10:24             ` Petr Tesařík
2023-05-03  9:57         ` Kent Overstreet
2023-05-03  9:57           ` Kent Overstreet
2023-05-03 10:26           ` Petr Tesařík
2023-05-03 10:26             ` Petr Tesařík
2023-05-03 15:30             ` Kent Overstreet
2023-05-03 15:30               ` Kent Overstreet
2023-05-03 12:33           ` James Bottomley
2023-05-03 12:33             ` James Bottomley
2023-05-03 14:31             ` Suren Baghdasaryan
2023-05-03 14:31               ` Suren Baghdasaryan
2023-05-03 15:28             ` Kent Overstreet
2023-05-03 15:28               ` Kent Overstreet
2023-05-03 15:37               ` Lorenzo Stoakes
2023-05-03 15:37                 ` Lorenzo Stoakes
2023-05-03 16:03                 ` Kent Overstreet
2023-05-03 16:03                   ` Kent Overstreet
2023-05-03 15:49               ` James Bottomley
2023-05-03 15:49                 ` James Bottomley
2023-05-03 15:09   ` Suren Baghdasaryan
2023-05-03 15:09     ` Suren Baghdasaryan
2023-05-03 16:28     ` Steven Rostedt
2023-05-03 16:28       ` Steven Rostedt
2023-05-03 17:40       ` Suren Baghdasaryan
2023-05-03 17:40         ` Suren Baghdasaryan
2023-05-03 18:03         ` Steven Rostedt
2023-05-03 18:03           ` Steven Rostedt
2023-05-03 18:07           ` Suren Baghdasaryan
2023-05-03 18:07             ` Suren Baghdasaryan
2023-05-03 18:12           ` Kent Overstreet
2023-05-03 18:12             ` Kent Overstreet
2023-05-04  9:07     ` Michal Hocko
2023-05-04  9:07       ` Michal Hocko
2023-05-04 15:08       ` Suren Baghdasaryan
2023-05-04 15:08         ` Suren Baghdasaryan
2023-05-07 10:27         ` Michal Hocko
2023-05-07 10:27           ` Michal Hocko
2023-05-07 17:01           ` Kent Overstreet
2023-05-07 17:01             ` Kent Overstreet
2023-05-07 17:20       ` Kent Overstreet
2023-05-07 17:20         ` Kent Overstreet
2023-05-07 20:55         ` Steven Rostedt
2023-05-07 20:55           ` Steven Rostedt
2023-05-07 21:53           ` Kent Overstreet
2023-05-07 21:53             ` Kent Overstreet
2023-05-07 22:09             ` Steven Rostedt
2023-05-07 22:09               ` Steven Rostedt
2023-05-07 22:17               ` Kent Overstreet
2023-05-07 22:17                 ` Kent Overstreet
2023-05-08 15:52         ` Petr Tesařík
2023-05-08 15:52           ` Petr Tesařík
2023-05-08 15:57           ` Kent Overstreet
2023-05-08 15:57             ` Kent Overstreet
2023-05-08 16:09             ` Petr Tesařík
2023-05-08 16:09               ` Petr Tesařík
2023-05-08 16:28               ` Kent Overstreet
2023-05-08 16:28                 ` Kent Overstreet
2023-05-08 18:59                 ` Petr Tesařík
2023-05-08 18:59                   ` Petr Tesařík
2023-05-08 20:48                   ` Kent Overstreet
2023-05-08 20:48                     ` Kent Overstreet

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230501165450.15352-26-surenb@google.com \
    --to=surenb@google.com \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=andreyknvl@gmail.com \
    --cc=arnd@arndb.de \
    --cc=axboe@kernel.dk \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=catalin.marinas@arm.com \
    --cc=cgroups@vger.kernel.org \
    --cc=cl@linux.com \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=dave@stgolabs.net \
    --cc=david@redhat.com \
    --cc=dennis@kernel.org \
    --cc=dhowells@redhat.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=dvyukov@google.com \
    --cc=ebiggers@google.com \
    --cc=elver@google.com \
    --cc=glider@google.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=iommu@lists.linux.dev \
    --cc=jbaron@akamai.com \
    --cc=juri.lelli@redhat.com \
    --cc=kaleshsingh@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=keescook@chromium.org \
    --cc=kent.overstreet@linux.dev \
    --cc=kernel-team@android.com \
    --cc=ldufour@linux.ibm.com \
    --cc=liam.howlett@oracle.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-modules@vger.kernel.org \
    --cc=masahiroy@kernel.org \
    --cc=mcgrof@kernel.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=minchan@google.com \
    --cc=mingo@redhat.com \
    --cc=muchun.song@linux.dev \
    --cc=nathan@kernel.org \
    --cc=ndesaulniers@google.com \
    --cc=pasha.tatashin@soleen.com \
    --cc=paulmck@kernel.org \
    --cc=penberg@kernel.org \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=rostedt@goodmis.org \
    --cc=rppt@kernel.org \
    --cc=shakeelb@google.com \
    --cc=songmuchun@bytedance.com \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=vbabka@suse.cz \
    --cc=vincent.guittot@linaro.org \
    --cc=void@manifault.com \
    --cc=vschneid@redhat.com \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=yosryahmed@google.com \
    --cc=ytcoode@gmail.com \
    --cc=yuzhao@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.