All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: kent.overstreet@linux.dev, mhocko@suse.com, vbabka@suse.cz,
	 hannes@cmpxchg.org, roman.gushchin@linux.dev, mgorman@suse.de,
	 dave@stgolabs.net, willy@infradead.org, liam.howlett@oracle.com,
	 corbet@lwn.net, void@manifault.com, peterz@infradead.org,
	 juri.lelli@redhat.com, catalin.marinas@arm.com, will@kernel.org,
	 arnd@arndb.de, tglx@linutronix.de, mingo@redhat.com,
	 dave.hansen@linux.intel.com, x86@kernel.org, peterx@redhat.com,
	 david@redhat.com, axboe@kernel.dk, mcgrof@kernel.org,
	masahiroy@kernel.org,  nathan@kernel.org, dennis@kernel.org,
	tj@kernel.org, muchun.song@linux.dev,  rppt@kernel.org,
	paulmck@kernel.org, pasha.tatashin@soleen.com,
	 yosryahmed@google.com, yuzhao@google.com, dhowells@redhat.com,
	 hughd@google.com, andreyknvl@gmail.com, keescook@chromium.org,
	 ndesaulniers@google.com, vvvvvv@google.com,
	gregkh@linuxfoundation.org,  ebiggers@google.com,
	ytcoode@gmail.com, vincent.guittot@linaro.org,
	 dietmar.eggemann@arm.com, rostedt@goodmis.org,
	bsegall@google.com,  bristot@redhat.com, vschneid@redhat.com,
	cl@linux.com, penberg@kernel.org,  iamjoonsoo.kim@lge.com,
	42.hyeyoo@gmail.com, glider@google.com,  elver@google.com,
	dvyukov@google.com, shakeelb@google.com,
	 songmuchun@bytedance.com, jbaron@akamai.com,
	rientjes@google.com,  minchan@google.com, kaleshsingh@google.com,
	surenb@google.com,  kernel-team@android.com,
	linux-doc@vger.kernel.org,  linux-kernel@vger.kernel.org,
	iommu@lists.linux.dev,  linux-arch@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-mm@kvack.org,
	 linux-modules@vger.kernel.org, kasan-dev@googlegroups.com,
	 cgroups@vger.kernel.org
Subject: [PATCH v3 22/35] mm/slab: enable slab allocation tagging for kmalloc and friends
Date: Mon, 12 Feb 2024 13:39:08 -0800	[thread overview]
Message-ID: <20240212213922.783301-23-surenb@google.com> (raw)
In-Reply-To: <20240212213922.783301-1-surenb@google.com>

Redefine kmalloc, krealloc, kzalloc, kcalloc, etc. to record allocations
and deallocations done by these functions.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Co-developed-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
---
 include/linux/fortify-string.h |   5 +-
 include/linux/slab.h           | 177 ++++++++++++++++-----------------
 include/linux/string.h         |   4 +-
 mm/slab_common.c               |   6 +-
 mm/slub.c                      |  54 +++++-----
 mm/util.c                      |  20 ++--
 6 files changed, 134 insertions(+), 132 deletions(-)

diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index 89a6888f2f9e..55f66bd8a366 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -697,9 +697,9 @@ __FORTIFY_INLINE void *memchr_inv(const void * const POS0 p, int c, size_t size)
 	return __real_memchr_inv(p, c, size);
 }
 
-extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup)
+extern void *__real_kmemdup(const void *src, size_t len, gfp_t gfp) __RENAME(kmemdup_noprof)
 								    __realloc_size(2);
-__FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp)
+__FORTIFY_INLINE void *kmemdup_noprof(const void * const POS0 p, size_t size, gfp_t gfp)
 {
 	const size_t p_size = __struct_size(p);
 
@@ -709,6 +709,7 @@ __FORTIFY_INLINE void *kmemdup(const void * const POS0 p, size_t size, gfp_t gfp
 		fortify_panic(__func__);
 	return __real_kmemdup(p, size, gfp);
 }
+#define kmemdup(...)	alloc_hooks(kmemdup_noprof(__VA_ARGS__))
 
 /**
  * strcpy - Copy a string into another string buffer
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3ac2fc830f0f..910473e07e86 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -230,7 +230,10 @@ int kmem_cache_shrink(struct kmem_cache *s);
 /*
  * Common kmalloc functions provided by all allocators
  */
-void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __realloc_size(2);
+void * __must_check krealloc_noprof(const void *objp, size_t new_size,
+				    gfp_t flags) __realloc_size(2);
+#define krealloc(...)				alloc_hooks(krealloc_noprof(__VA_ARGS__))
+
 void kfree(const void *objp);
 void kfree_sensitive(const void *objp);
 size_t __ksize(const void *objp);
@@ -482,7 +485,10 @@ static __always_inline unsigned int __kmalloc_index(size_t size,
 static_assert(PAGE_SHIFT <= 20);
 #define kmalloc_index(s) __kmalloc_index(s, true)
 
-void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
+#include <linux/alloc_tag.h>
+
+void *__kmalloc_noprof(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
+#define __kmalloc(...)				alloc_hooks(__kmalloc_noprof(__VA_ARGS__))
 
 /**
  * kmem_cache_alloc - Allocate an object
@@ -494,9 +500,14 @@ void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_siz
  *
  * Return: pointer to the new object or %NULL in case of error
  */
-void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) __assume_slab_alignment __malloc;
-void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
-			   gfp_t gfpflags) __assume_slab_alignment __malloc;
+void *kmem_cache_alloc_noprof(struct kmem_cache *cachep,
+			      gfp_t flags) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc(...)			alloc_hooks(kmem_cache_alloc_noprof(__VA_ARGS__))
+
+void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
+			    gfp_t gfpflags) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_lru(...)	alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__))
+
 void kmem_cache_free(struct kmem_cache *s, void *objp);
 
 /*
@@ -507,29 +518,40 @@ void kmem_cache_free(struct kmem_cache *s, void *objp);
  * Note that interrupts must be enabled when calling these functions.
  */
 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
+
+int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
+#define kmem_cache_alloc_bulk(...)	alloc_hooks(kmem_cache_alloc_bulk_noprof(__VA_ARGS__))
 
 static __always_inline void kfree_bulk(size_t size, void **p)
 {
 	kmem_cache_free_bulk(NULL, size, p);
 }
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
+void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
 							 __alloc_size(1);
-void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
-									 __malloc;
+#define __kmalloc_node(...)			alloc_hooks(__kmalloc_node_noprof(__VA_ARGS__))
 
-void *kmalloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
+void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t flags,
+				   int node) __assume_slab_alignment __malloc;
+#define kmem_cache_alloc_node(...)	alloc_hooks(kmem_cache_alloc_node_noprof(__VA_ARGS__))
+
+void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t flags, size_t size)
 		    __assume_kmalloc_alignment __alloc_size(3);
 
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
-			 int node, size_t size) __assume_kmalloc_alignment
+void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
+		int node, size_t size) __assume_kmalloc_alignment
 						__alloc_size(4);
-void *kmalloc_large(size_t size, gfp_t flags) __assume_page_alignment
+#define kmalloc_trace(...)			alloc_hooks(kmalloc_trace_noprof(__VA_ARGS__))
+
+#define kmalloc_node_trace(...)			alloc_hooks(kmalloc_node_trace_noprof(__VA_ARGS__))
+
+void *kmalloc_large_noprof(size_t size, gfp_t flags) __assume_page_alignment
 					      __alloc_size(1);
+#define kmalloc_large(...)			alloc_hooks(kmalloc_large_noprof(__VA_ARGS__))
 
-void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_alignment
+void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node) __assume_page_alignment
 							     __alloc_size(1);
+#define kmalloc_large_node(...)			alloc_hooks(kmalloc_large_node_noprof(__VA_ARGS__))
 
 /**
  * kmalloc - allocate kernel memory
@@ -585,37 +607,39 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node) __assume_page_align
  *	Try really hard to succeed the allocation but fail
  *	eventually.
  */
-static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *kmalloc_noprof(size_t size, gfp_t flags)
 {
 	if (__builtin_constant_p(size) && size) {
 		unsigned int index;
 
 		if (size > KMALLOC_MAX_CACHE_SIZE)
-			return kmalloc_large(size, flags);
+			return kmalloc_large_noprof(size, flags);
 
 		index = kmalloc_index(size);
-		return kmalloc_trace(
+		return kmalloc_trace_noprof(
 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
 				flags, size);
 	}
-	return __kmalloc(size, flags);
+	return __kmalloc_noprof(size, flags);
 }
+#define kmalloc(...)				alloc_hooks(kmalloc_noprof(__VA_ARGS__))
 
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *kmalloc_node_noprof(size_t size, gfp_t flags, int node)
 {
 	if (__builtin_constant_p(size) && size) {
 		unsigned int index;
 
 		if (size > KMALLOC_MAX_CACHE_SIZE)
-			return kmalloc_large_node(size, flags, node);
+			return kmalloc_large_node_noprof(size, flags, node);
 
 		index = kmalloc_index(size);
-		return kmalloc_node_trace(
+		return kmalloc_node_trace_noprof(
 				kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
 				flags, node, size);
 	}
-	return __kmalloc_node(size, flags, node);
+	return __kmalloc_node_noprof(size, flags, node);
 }
+#define kmalloc_node(...)			alloc_hooks(kmalloc_node_noprof(__VA_ARGS__))
 
 /**
  * kmalloc_array - allocate memory for an array.
@@ -623,16 +647,17 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t size, gfp_t flags)
 {
 	size_t bytes;
 
 	if (unlikely(check_mul_overflow(n, size, &bytes)))
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc(bytes, flags);
-	return __kmalloc(bytes, flags);
+		return kmalloc_noprof(bytes, flags);
+	return kmalloc_noprof(bytes, flags);
 }
+#define kmalloc_array(...)			alloc_hooks(kmalloc_array_noprof(__VA_ARGS__))
 
 /**
  * krealloc_array - reallocate memory for an array.
@@ -641,18 +666,19 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_
  * @new_size: new size of a single member of the array
  * @flags: the type of memory to allocate (see kmalloc)
  */
-static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
-								      size_t new_n,
-								      size_t new_size,
-								      gfp_t flags)
+static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p,
+								       size_t new_n,
+								       size_t new_size,
+								       gfp_t flags)
 {
 	size_t bytes;
 
 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
 		return NULL;
 
-	return krealloc(p, bytes, flags);
+	return krealloc_noprof(p, bytes, flags);
 }
+#define krealloc_array(...)			alloc_hooks(krealloc_array_noprof(__VA_ARGS__))
 
 /**
  * kcalloc - allocate memory for an array. The memory is set to zero.
@@ -660,16 +686,12 @@ static inline __realloc_size(2, 3) void * __must_check krealloc_array(void *p,
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
-{
-	return kmalloc_array(n, size, flags | __GFP_ZERO);
-}
+#define kcalloc(_n, _size, _flags)		kmalloc_array(_n, _size, (_flags) | __GFP_ZERO)
 
-void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
+void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags, int node,
 				  unsigned long caller) __alloc_size(1);
-#define kmalloc_node_track_caller(size, flags, node) \
-	__kmalloc_node_track_caller(size, flags, node, \
-				    _RET_IP_)
+#define kmalloc_node_track_caller(...)		\
+	alloc_hooks(kmalloc_node_track_caller_noprof(__VA_ARGS__, _RET_IP_))
 
 /*
  * kmalloc_track_caller is a special version of kmalloc that records the
@@ -679,11 +701,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
  * allocator where we care about the real place the memory allocation
  * request comes from.
  */
-#define kmalloc_track_caller(size, flags) \
-	__kmalloc_node_track_caller(size, flags, \
-				    NUMA_NO_NODE, _RET_IP_)
+#define kmalloc_track_caller(...)		kmalloc_node_track_caller(__VA_ARGS__, NUMA_NO_NODE)
 
-static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+static inline __alloc_size(1, 2) void *kmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags,
 							  int node)
 {
 	size_t bytes;
@@ -691,75 +711,52 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size,
 	if (unlikely(check_mul_overflow(n, size, &bytes)))
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc_node(bytes, flags, node);
-	return __kmalloc_node(bytes, flags, node);
+		return kmalloc_node_noprof(bytes, flags, node);
+	return __kmalloc_node_noprof(bytes, flags, node);
 }
+#define kmalloc_array_node(...)			alloc_hooks(kmalloc_array_node_noprof(__VA_ARGS__))
 
-static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
-{
-	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
-}
+#define kcalloc_node(_n, _size, _flags, _node)	\
+	kmalloc_array_node(_n, _size, (_flags) | __GFP_ZERO, _node)
 
 /*
  * Shortcuts
  */
-static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
-{
-	return kmem_cache_alloc(k, flags | __GFP_ZERO);
-}
+#define kmem_cache_zalloc(_k, _flags)		kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
 
 /**
  * kzalloc - allocate memory. The memory is set to zero.
  * @size: how many bytes of memory are required.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
+static inline __alloc_size(1) void *kzalloc_noprof(size_t size, gfp_t flags)
 {
-	return kmalloc(size, flags | __GFP_ZERO);
+	return kmalloc_noprof(size, flags | __GFP_ZERO);
 }
+#define kzalloc(...)				alloc_hooks(kzalloc_noprof(__VA_ARGS__))
+#define kzalloc_node(_size, _flags, _node)	kmalloc_node(_size, (_flags)|__GFP_ZERO, _node)
 
-/**
- * kzalloc_node - allocate zeroed memory from a particular memory node.
- * @size: how many bytes of memory are required.
- * @flags: the type of memory to allocate (see kmalloc).
- * @node: memory node from which to allocate
- */
-static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
-{
-	return kmalloc_node(size, flags | __GFP_ZERO, node);
-}
+extern void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node) __alloc_size(1);
+#define kvmalloc_node(...)			alloc_hooks(kvmalloc_node_noprof(__VA_ARGS__))
 
-extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
-static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
-{
-	return kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
-{
-	return kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
-{
-	return kvmalloc(size, flags | __GFP_ZERO);
-}
+#define kvmalloc(_size, _flags)			kvmalloc_node(_size, _flags, NUMA_NO_NODE)
+#define kvzalloc(_size, _flags)			kvmalloc(_size, _flags|__GFP_ZERO)
 
-static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
-{
-	size_t bytes;
-
-	if (unlikely(check_mul_overflow(n, size, &bytes)))
-		return NULL;
+#define kvzalloc_node(_size, _flags, _node)	kvmalloc_node(_size, _flags|__GFP_ZERO, _node)
 
-	return kvmalloc(bytes, flags);
-}
+#define kvmalloc_array(_n, _size, _flags)						\
+({											\
+	size_t _bytes;									\
+											\
+	!check_mul_overflow(_n, _size, &_bytes) ? kvmalloc(_bytes, _flags) : NULL;	\
+})
 
-static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
-{
-	return kvmalloc_array(n, size, flags | __GFP_ZERO);
-}
+#define kvcalloc(_n, _size, _flags)		kvmalloc_array(_n, _size, _flags|__GFP_ZERO)
 
-extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 		      __realloc_size(3);
+#define kvrealloc(...)				alloc_hooks(kvrealloc_noprof(__VA_ARGS__))
+
 extern void kvfree(const void *addr);
 DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
 
diff --git a/include/linux/string.h b/include/linux/string.h
index ab148d8dbfc1..14e4fb4340f4 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -214,7 +214,9 @@ extern void kfree_const(const void *x);
 extern char *kstrdup(const char *s, gfp_t gfp) __malloc;
 extern const char *kstrdup_const(const char *s, gfp_t gfp);
 extern char *kstrndup(const char *s, size_t len, gfp_t gfp);
-extern void *kmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
+extern void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
+#define kmemdup(...)	alloc_hooks(kmemdup_noprof(__VA_ARGS__))
+
 extern void *kvmemdup(const void *src, size_t len, gfp_t gfp) __realloc_size(2);
 extern char *kmemdup_nul(const char *s, size_t len, gfp_t gfp);
 
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 83fec2dd2e2d..21b0b9e9cd9e 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1234,7 +1234,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
 		return (void *)p;
 	}
 
-	ret = kmalloc_track_caller(new_size, flags);
+	ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
 	if (ret && p) {
 		/* Disable KASAN checks as the object's redzone is accessed. */
 		kasan_disable_current();
@@ -1258,7 +1258,7 @@ __do_krealloc(const void *p, size_t new_size, gfp_t flags)
  *
  * Return: pointer to the allocated memory or %NULL in case of error
  */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
+void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
 {
 	void *ret;
 
@@ -1273,7 +1273,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
 
 	return ret;
 }
-EXPORT_SYMBOL(krealloc);
+EXPORT_SYMBOL(krealloc_noprof);
 
 /**
  * kfree_sensitive - Clear sensitive information in memory before freeing
diff --git a/mm/slub.c b/mm/slub.c
index f4d5794c1e86..9ea03d6e9c9d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3871,7 +3871,7 @@ static __fastpath_inline void *slab_alloc_node(struct kmem_cache *s, struct list
 	return object;
 }
 
-void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
+void *kmem_cache_alloc_noprof(struct kmem_cache *s, gfp_t gfpflags)
 {
 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE, _RET_IP_,
 				    s->object_size);
@@ -3880,9 +3880,9 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
 
 	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc);
+EXPORT_SYMBOL(kmem_cache_alloc_noprof);
 
-void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
+void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru,
 			   gfp_t gfpflags)
 {
 	void *ret = slab_alloc_node(s, lru, gfpflags, NUMA_NO_NODE, _RET_IP_,
@@ -3892,10 +3892,10 @@ void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
 
 	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_lru);
+EXPORT_SYMBOL(kmem_cache_alloc_lru_noprof);
 
 /**
- * kmem_cache_alloc_node - Allocate an object on the specified node
+ * kmem_cache_alloc_node_noprof - Allocate an object on the specified node
  * @s: The cache to allocate from.
  * @gfpflags: See kmalloc().
  * @node: node number of the target node.
@@ -3907,7 +3907,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_lru);
  *
  * Return: pointer to the new object or %NULL in case of error
  */
-void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
+void *kmem_cache_alloc_node_noprof(struct kmem_cache *s, gfp_t gfpflags, int node)
 {
 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, s->object_size);
 
@@ -3915,7 +3915,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
 
 	return ret;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_node);
+EXPORT_SYMBOL(kmem_cache_alloc_node_noprof);
 
 /*
  * To avoid unnecessary overhead, we pass through large allocation requests
@@ -3932,7 +3932,7 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
 		flags = kmalloc_fix_flags(flags);
 
 	flags |= __GFP_COMP;
-	folio = (struct folio *)alloc_pages_node(node, flags, order);
+	folio = (struct folio *)alloc_pages_node_noprof(node, flags, order);
 	if (folio) {
 		ptr = folio_address(folio);
 		lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B,
@@ -3947,7 +3947,7 @@ static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
 	return ptr;
 }
 
-void *kmalloc_large(size_t size, gfp_t flags)
+void *kmalloc_large_noprof(size_t size, gfp_t flags)
 {
 	void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
 
@@ -3955,9 +3955,9 @@ void *kmalloc_large(size_t size, gfp_t flags)
 		      flags, NUMA_NO_NODE);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_large);
+EXPORT_SYMBOL(kmalloc_large_noprof);
 
-void *kmalloc_large_node(size_t size, gfp_t flags, int node)
+void *kmalloc_large_node_noprof(size_t size, gfp_t flags, int node)
 {
 	void *ret = __kmalloc_large_node(size, flags, node);
 
@@ -3965,7 +3965,7 @@ void *kmalloc_large_node(size_t size, gfp_t flags, int node)
 		      flags, node);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_large_node);
+EXPORT_SYMBOL(kmalloc_large_node_noprof);
 
 static __always_inline
 void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
@@ -3992,26 +3992,26 @@ void *__do_kmalloc_node(size_t size, gfp_t flags, int node,
 	return ret;
 }
 
-void *__kmalloc_node(size_t size, gfp_t flags, int node)
+void *__kmalloc_node_noprof(size_t size, gfp_t flags, int node)
 {
 	return __do_kmalloc_node(size, flags, node, _RET_IP_);
 }
-EXPORT_SYMBOL(__kmalloc_node);
+EXPORT_SYMBOL(__kmalloc_node_noprof);
 
-void *__kmalloc(size_t size, gfp_t flags)
+void *__kmalloc_noprof(size_t size, gfp_t flags)
 {
 	return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
 }
-EXPORT_SYMBOL(__kmalloc);
+EXPORT_SYMBOL(__kmalloc_noprof);
 
-void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
-				  int node, unsigned long caller)
+void *kmalloc_node_track_caller_noprof(size_t size, gfp_t flags,
+				       int node, unsigned long caller)
 {
 	return __do_kmalloc_node(size, flags, node, caller);
 }
-EXPORT_SYMBOL(__kmalloc_node_track_caller);
+EXPORT_SYMBOL(kmalloc_node_track_caller_noprof);
 
-void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
+void *kmalloc_trace_noprof(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 {
 	void *ret = slab_alloc_node(s, NULL, gfpflags, NUMA_NO_NODE,
 					    _RET_IP_, size);
@@ -4021,9 +4021,9 @@ void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
 	ret = kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_trace);
+EXPORT_SYMBOL(kmalloc_trace_noprof);
 
-void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
+void *kmalloc_node_trace_noprof(struct kmem_cache *s, gfp_t gfpflags,
 			 int node, size_t size)
 {
 	void *ret = slab_alloc_node(s, NULL, gfpflags, node, _RET_IP_, size);
@@ -4033,7 +4033,7 @@ void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
 	ret = kasan_kmalloc(s, ret, size, gfpflags);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_node_trace);
+EXPORT_SYMBOL(kmalloc_node_trace_noprof);
 
 static noinline void free_to_partial_list(
 	struct kmem_cache *s, struct slab *slab,
@@ -4304,6 +4304,7 @@ void slab_free(struct kmem_cache *s, struct slab *slab, void *object,
 	       unsigned long addr)
 {
 	memcg_slab_free_hook(s, slab, &object, 1);
+	alloc_tagging_slab_free_hook(s, slab, &object, 1);
 
 	if (likely(slab_free_hook(s, object, slab_want_init_on_free(s))))
 		do_slab_free(s, slab, object, object, 1, addr);
@@ -4314,6 +4315,7 @@ void slab_free_bulk(struct kmem_cache *s, struct slab *slab, void *head,
 		    void *tail, void **p, int cnt, unsigned long addr)
 {
 	memcg_slab_free_hook(s, slab, p, cnt);
+	alloc_tagging_slab_free_hook(s, slab, p, cnt);
 	/*
 	 * With KASAN enabled slab_free_freelist_hook modifies the freelist
 	 * to remove objects, whose reuse must be delayed.
@@ -4640,8 +4642,8 @@ static int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags,
 #endif /* CONFIG_SLUB_TINY */
 
 /* Note that interrupts must be enabled when calling this function. */
-int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
-			  void **p)
+int kmem_cache_alloc_bulk_noprof(struct kmem_cache *s, gfp_t flags, size_t size,
+				 void **p)
 {
 	int i;
 	struct obj_cgroup *objcg = NULL;
@@ -4669,7 +4671,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
 
 	return i;
 }
-EXPORT_SYMBOL(kmem_cache_alloc_bulk);
+EXPORT_SYMBOL(kmem_cache_alloc_bulk_noprof);
 
 
 /*
diff --git a/mm/util.c b/mm/util.c
index 5a6a9802583b..291f7945190f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -115,7 +115,7 @@ char *kstrndup(const char *s, size_t max, gfp_t gfp)
 EXPORT_SYMBOL(kstrndup);
 
 /**
- * kmemdup - duplicate region of memory
+ * kmemdup_noprof - duplicate region of memory
  *
  * @src: memory region to duplicate
  * @len: memory region length
@@ -124,16 +124,16 @@ EXPORT_SYMBOL(kstrndup);
  * Return: newly allocated copy of @src or %NULL in case of error,
  * result is physically contiguous. Use kfree() to free.
  */
-void *kmemdup(const void *src, size_t len, gfp_t gfp)
+void *kmemdup_noprof(const void *src, size_t len, gfp_t gfp)
 {
 	void *p;
 
-	p = kmalloc_track_caller(len, gfp);
+	p = kmalloc_node_track_caller_noprof(len, gfp, NUMA_NO_NODE, _RET_IP_);
 	if (p)
 		memcpy(p, src, len);
 	return p;
 }
-EXPORT_SYMBOL(kmemdup);
+EXPORT_SYMBOL(kmemdup_noprof);
 
 /**
  * kvmemdup - duplicate region of memory
@@ -577,7 +577,7 @@ unsigned long vm_mmap(struct file *file, unsigned long addr,
 EXPORT_SYMBOL(vm_mmap);
 
 /**
- * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
+ * kvmalloc_node_noprof - attempt to allocate physically contiguous memory, but upon
  * failure, fall back to non-contiguous (vmalloc) allocation.
  * @size: size of the request.
  * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
@@ -592,7 +592,7 @@ EXPORT_SYMBOL(vm_mmap);
  *
  * Return: pointer to the allocated memory of %NULL in case of failure
  */
-void *kvmalloc_node(size_t size, gfp_t flags, int node)
+void *kvmalloc_node_noprof(size_t size, gfp_t flags, int node)
 {
 	gfp_t kmalloc_flags = flags;
 	void *ret;
@@ -614,7 +614,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 		kmalloc_flags &= ~__GFP_NOFAIL;
 	}
 
-	ret = kmalloc_node(size, kmalloc_flags, node);
+	ret = kmalloc_node_noprof(size, kmalloc_flags, node);
 
 	/*
 	 * It doesn't really make sense to fallback to vmalloc for sub page
@@ -643,7 +643,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
 			flags, PAGE_KERNEL, VM_ALLOW_HUGE_VMAP,
 			node, __builtin_return_address(0));
 }
-EXPORT_SYMBOL(kvmalloc_node);
+EXPORT_SYMBOL(kvmalloc_node_noprof);
 
 /**
  * kvfree() - Free memory.
@@ -682,7 +682,7 @@ void kvfree_sensitive(const void *addr, size_t len)
 }
 EXPORT_SYMBOL(kvfree_sensitive);
 
-void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
+void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 {
 	void *newp;
 
@@ -695,7 +695,7 @@ void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
 	kvfree(p);
 	return newp;
 }
-EXPORT_SYMBOL(kvrealloc);
+EXPORT_SYMBOL(kvrealloc_noprof);
 
 /**
  * __vmalloc_array - allocate memory for a virtually contiguous array.
-- 
2.43.0.687.g38aa6559b0-goog


  parent reply	other threads:[~2024-02-12 21:40 UTC|newest]

Thread overview: 215+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-02-12 21:38 [PATCH v3 00/35] Memory allocation profiling Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 01/35] lib/string_helpers: Add flags param to string_get_size() Suren Baghdasaryan
2024-02-12 22:09   ` Kees Cook
2024-02-13  8:26   ` Andy Shevchenko
2024-02-13  8:29     ` Andy Shevchenko
2024-02-13 23:55       ` Kent Overstreet
2024-02-13 22:06     ` Kent Overstreet
2024-02-29 20:54       ` Andy Shevchenko
2024-02-14 20:11   ` Matthew Wilcox
2024-02-12 21:38 ` [PATCH v3 02/35] scripts/kallysms: Always include __start and __stop symbols Suren Baghdasaryan
2024-02-12 22:06   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 03/35] fs: Convert alloc_inode_sb() to a macro Suren Baghdasaryan
2024-02-12 22:07   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 04/35] mm: enumerate all gfp flags Suren Baghdasaryan
2024-02-12 22:10   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 05/35] mm: introduce slabobj_ext to support slab object extensions Suren Baghdasaryan
2024-02-12 22:14   ` Kees Cook
2024-02-13  2:20     ` Suren Baghdasaryan
2024-02-14 17:59   ` Vlastimil Babka
2024-02-14 19:19     ` Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 06/35] mm: introduce __GFP_NO_OBJ_EXT flag to selectively prevent slabobj_ext creation Suren Baghdasaryan
2024-02-12 22:14   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 07/35] mm/slab: introduce SLAB_NO_OBJ_EXT to avoid obj_ext creation Suren Baghdasaryan
2024-02-12 22:14   ` Kees Cook
2024-02-15 21:31   ` Vlastimil Babka
2024-02-15 21:37     ` Kent Overstreet
2024-02-15 21:50       ` Vlastimil Babka
2024-02-15 22:10         ` Suren Baghdasaryan
2024-02-16 18:41           ` Suren Baghdasaryan
2024-02-16 18:49             ` Vlastimil Babka
2024-02-12 21:38 ` [PATCH v3 08/35] mm: prevent slabobj_ext allocations for slabobj_ext and kmem_cache objects Suren Baghdasaryan
2024-02-12 22:15   ` Kees Cook
2024-02-15 21:44   ` Vlastimil Babka
2024-02-15 22:13     ` Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 09/35] slab: objext: introduce objext_flags as extension to page_memcg_data_flags Suren Baghdasaryan
2024-02-12 22:15   ` Kees Cook
2024-02-12 21:38 ` [PATCH v3 10/35] lib: code tagging framework Suren Baghdasaryan
2024-02-12 22:27   ` Kees Cook
2024-02-13  2:04     ` Suren Baghdasaryan
2024-02-16  7:22       ` Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 11/35] lib: code tagging module support Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 12/35] lib: prevent module unloading if memory is not freed Suren Baghdasaryan
2024-02-12 21:38 ` [PATCH v3 13/35] lib: add allocation tagging support for memory allocation profiling Suren Baghdasaryan
2024-02-12 22:40   ` Kees Cook
2024-02-13  1:01     ` Suren Baghdasaryan
2024-02-13 22:28       ` Darrick J. Wong
2024-02-13 22:35         ` Suren Baghdasaryan
2024-02-13 22:38           ` Kees Cook
2024-02-13 22:47             ` Steven Rostedt
2024-02-16  8:50             ` Vlastimil Babka
2024-02-16  8:55               ` Suren Baghdasaryan
2024-02-16 23:26     ` Kent Overstreet
2024-02-17  0:08       ` Kees Cook
2024-02-14  6:34   ` kernel test robot
2024-02-16  0:54   ` Andrew Morton
2024-02-16  1:00     ` Kent Overstreet
2024-02-16  1:22       ` Pasha Tatashin
2024-02-16  1:27         ` Kent Overstreet
2024-02-16  9:02           ` Suren Baghdasaryan
2024-02-16  9:03             ` Suren Baghdasaryan
2024-02-16 17:18             ` Pasha Tatashin
2024-02-17 20:10               ` Kent Overstreet
2024-02-16  8:57   ` Vlastimil Babka
2024-02-18  2:21     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 14/35] lib: introduce support for page allocation tagging Suren Baghdasaryan
2024-02-16  9:45   ` Vlastimil Babka
2024-02-16 16:44     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 15/35] mm: percpu: increase PERCPU_MODULE_RESERVE to accommodate allocation tags Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 16/35] change alloc_pages name in dma_map_ops to avoid name conflicts Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 17/35] mm: enable page allocation tagging Suren Baghdasaryan
2024-02-12 22:59   ` Kees Cook
2024-02-12 21:39 ` [PATCH v3 18/35] mm: create new codetag references during page splitting Suren Baghdasaryan
2024-02-16 14:33   ` Vlastimil Babka
2024-02-16 16:46     ` Suren Baghdasaryan
2024-02-18  0:44       ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 19/35] mm/page_ext: enable early_page_ext when CONFIG_MEM_ALLOC_PROFILING_DEBUG=y Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 20/35] lib: add codetag reference into slabobj_ext Suren Baghdasaryan
2024-02-13 19:12   ` kernel test robot
2024-02-16 15:36   ` Vlastimil Babka
2024-02-16 17:04     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 21/35] mm/slab: add allocation accounting into slab allocation and free paths Suren Baghdasaryan
2024-02-12 22:59   ` Kees Cook
2024-02-16 16:31   ` Vlastimil Babka
2024-02-16 16:38     ` Kent Overstreet
2024-02-16 17:11       ` Suren Baghdasaryan
2024-02-12 21:39 ` Suren Baghdasaryan [this message]
2024-02-12 23:01   ` [PATCH v3 22/35] mm/slab: enable slab allocation tagging for kmalloc and friends Kees Cook
2024-02-14  1:03   ` kernel test robot
2024-02-16 16:52   ` Vlastimil Babka
2024-02-16 17:03     ` Kent Overstreet
2024-02-17  8:46   ` kernel test robot
2024-02-12 21:39 ` [PATCH v3 23/35] mm/slub: Mark slab_free_freelist_hook() __always_inline Suren Baghdasaryan
2024-02-13  0:31   ` Kees Cook
2024-02-13  0:34     ` Suren Baghdasaryan
2024-02-13  2:08     ` Kent Overstreet
2024-02-14 15:13       ` Vlastimil Babka
2024-02-15  4:04         ` Liam R. Howlett
2024-02-12 21:39 ` [PATCH v3 24/35] mempool: Hook up to memory allocation profiling Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 25/35] xfs: Memory allocation profiling fixups Suren Baghdasaryan
2024-02-14 22:22   ` Dave Chinner
2024-02-14 22:36     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 26/35] mm: percpu: Introduce pcpuobj_ext Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 27/35] mm: percpu: Add codetag reference into pcpuobj_ext Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 28/35] mm: percpu: enable per-cpu allocation tagging Suren Baghdasaryan
2024-02-15  2:06   ` kernel test robot
2024-02-12 21:39 ` [PATCH v3 29/35] mm: vmalloc: Enable memory allocation profiling Suren Baghdasaryan
2024-02-13 23:09   ` kernel test robot
2024-02-13 23:19   ` kernel test robot
2024-02-12 21:39 ` [PATCH v3 30/35] rhashtable: Plumb through alloc tag Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 31/35] lib: add memory allocations report in show_mem() Suren Baghdasaryan
2024-02-13  0:10   ` Kees Cook
2024-02-13  0:22     ` Steven Rostedt
2024-02-13  4:33       ` Kent Overstreet
2024-02-13  8:17         ` Suren Baghdasaryan
2024-02-15  9:22   ` Michal Hocko
2024-02-15 14:58     ` Suren Baghdasaryan
2024-02-15 16:44       ` Michal Hocko
2024-02-15 16:47         ` Suren Baghdasaryan
2024-02-15 18:29           ` Kent Overstreet
2024-02-15 18:33             ` Suren Baghdasaryan
2024-02-15 18:38               ` Kent Overstreet
2024-02-15 18:41             ` Michal Hocko
2024-02-15 18:49               ` Suren Baghdasaryan
2024-02-15 20:22             ` Vlastimil Babka
2024-02-15 20:33               ` Kent Overstreet
2024-02-15 21:54                 ` Michal Hocko
2024-02-15 22:54                   ` Kent Overstreet
2024-02-15 23:07                 ` Steven Rostedt
2024-02-15 23:16                   ` Steven Rostedt
2024-02-15 23:27                     ` Steven Rostedt
2024-02-15 23:56                       ` Kent Overstreet
2024-02-19 17:17                         ` Suren Baghdasaryan
2024-02-20 16:23                           ` Michal Hocko
2024-02-20 17:18                             ` Kent Overstreet
2024-02-20 17:24                               ` Michal Hocko
2024-02-20 17:32                                 ` Kent Overstreet
2024-02-20 18:27                           ` Vlastimil Babka
2024-02-20 20:59                             ` Suren Baghdasaryan
2024-02-21 13:21                             ` Tetsuo Handa
2024-02-21 18:26                               ` Suren Baghdasaryan
2024-02-15 23:19                   ` Dave Hansen
2024-02-15 23:54                     ` Kent Overstreet
2024-02-15 23:51                   ` Kent Overstreet
2024-02-16  0:21                     ` Steven Rostedt
2024-02-16  0:32                       ` Kent Overstreet
2024-02-16  0:39                         ` Steven Rostedt
2024-02-16  0:50                           ` Kent Overstreet
2024-02-16  1:12                             ` Steven Rostedt
2024-02-16  1:18                               ` Kent Overstreet
2024-02-16  1:31                                 ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 32/35] codetag: debug: skip objext checking when it's for objext itself Suren Baghdasaryan
2024-02-16 18:39   ` Vlastimil Babka
2024-02-19  1:04     ` Suren Baghdasaryan
2024-02-19  9:17       ` Vlastimil Babka
2024-02-19 16:55         ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 33/35] codetag: debug: mark codetags for reserved pages as empty Suren Baghdasaryan
2024-02-12 22:45   ` Kees Cook
2024-02-13  0:15     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 34/35] codetag: debug: introduce OBJEXTS_ALLOC_FAIL to mark failed slab_ext allocations Suren Baghdasaryan
2024-02-12 22:49   ` Kees Cook
2024-02-13  0:09     ` Suren Baghdasaryan
2024-02-12 21:39 ` [PATCH v3 35/35] MAINTAINERS: Add entries for code tagging and memory allocation profiling Suren Baghdasaryan
2024-02-12 22:43   ` Kees Cook
2024-02-13  0:33     ` Suren Baghdasaryan
2024-02-13  0:14 ` [PATCH v3 00/35] Memory " Pasha Tatashin
2024-02-13  0:29 ` Kees Cook
2024-02-13  0:47   ` Suren Baghdasaryan
2024-02-13 12:24 ` Michal Hocko
2024-02-13 21:58   ` Suren Baghdasaryan
2024-02-13 22:04     ` David Hildenbrand
2024-02-13 22:09       ` Kent Overstreet
2024-02-13 22:17         ` David Hildenbrand
2024-02-13 22:29           ` Kent Overstreet
2024-02-13 23:11             ` Darrick J. Wong
2024-02-13 23:24               ` Kent Overstreet
2024-02-13 22:30           ` Suren Baghdasaryan
2024-02-13 22:48             ` David Hildenbrand
2024-02-13 22:50               ` Kent Overstreet
2024-02-13 22:57                 ` David Hildenbrand
2024-02-13 22:59                 ` Suren Baghdasaryan
2024-02-13 23:02                   ` David Hildenbrand
2024-02-13 23:12                     ` Kent Overstreet
2024-02-13 23:22                       ` David Hildenbrand
2024-02-13 23:28                         ` Suren Baghdasaryan
2024-02-13 23:54                           ` Pasha Tatashin
2024-02-14  0:04                             ` Kent Overstreet
2024-02-14 10:01                           ` David Hildenbrand
2024-02-13 23:08                   ` Kent Overstreet
2024-02-14 10:20                     ` Vlastimil Babka
2024-02-14 16:38                       ` Kent Overstreet
2024-02-14 15:00                     ` Matthew Wilcox
2024-02-14 15:13                       ` Kent Overstreet
2024-02-14 13:23                   ` Michal Hocko
2024-02-14 16:55                   ` Andrew Morton
2024-02-14 17:14                     ` Suren Baghdasaryan
2024-02-14 17:52                     ` Kent Overstreet
2024-02-14 19:24                       ` Suren Baghdasaryan
2024-02-14 20:00                         ` Kent Overstreet
2024-02-14  6:20 ` Johannes Weiner
2024-02-14 14:46   ` Michal Hocko
2024-02-14 15:01     ` Kent Overstreet
2024-02-14 16:02       ` Michal Hocko
2024-02-14 16:17         ` Kent Overstreet
2024-02-14 16:31           ` Michal Hocko
2024-02-14 17:14             ` Suren Baghdasaryan
2024-02-14 18:44 ` Andy Shevchenko
2024-02-14 18:51   ` Suren Baghdasaryan
2024-02-14 18:53 ` Tim Chen
2024-02-14 19:09   ` Suren Baghdasaryan
2024-02-14 20:17     ` Yosry Ahmed
2024-02-14 20:30       ` Suren Baghdasaryan
2024-02-14 22:59         ` Tim Chen
2024-02-16  8:38 ` Jani Nikula
2024-02-16  8:42   ` Kent Overstreet
2024-02-16  9:07     ` Jani Nikula

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240212213922.783301-23-surenb@google.com \
    --to=surenb@google.com \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=andreyknvl@gmail.com \
    --cc=arnd@arndb.de \
    --cc=axboe@kernel.dk \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=catalin.marinas@arm.com \
    --cc=cgroups@vger.kernel.org \
    --cc=cl@linux.com \
    --cc=corbet@lwn.net \
    --cc=dave.hansen@linux.intel.com \
    --cc=dave@stgolabs.net \
    --cc=david@redhat.com \
    --cc=dennis@kernel.org \
    --cc=dhowells@redhat.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=dvyukov@google.com \
    --cc=ebiggers@google.com \
    --cc=elver@google.com \
    --cc=glider@google.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=hannes@cmpxchg.org \
    --cc=hughd@google.com \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=iommu@lists.linux.dev \
    --cc=jbaron@akamai.com \
    --cc=juri.lelli@redhat.com \
    --cc=kaleshsingh@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=keescook@chromium.org \
    --cc=kent.overstreet@linux.dev \
    --cc=kernel-team@android.com \
    --cc=liam.howlett@oracle.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-modules@vger.kernel.org \
    --cc=masahiroy@kernel.org \
    --cc=mcgrof@kernel.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=minchan@google.com \
    --cc=mingo@redhat.com \
    --cc=muchun.song@linux.dev \
    --cc=nathan@kernel.org \
    --cc=ndesaulniers@google.com \
    --cc=pasha.tatashin@soleen.com \
    --cc=paulmck@kernel.org \
    --cc=penberg@kernel.org \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=rostedt@goodmis.org \
    --cc=rppt@kernel.org \
    --cc=shakeelb@google.com \
    --cc=songmuchun@bytedance.com \
    --cc=tglx@linutronix.de \
    --cc=tj@kernel.org \
    --cc=vbabka@suse.cz \
    --cc=vincent.guittot@linaro.org \
    --cc=void@manifault.com \
    --cc=vschneid@redhat.com \
    --cc=vvvvvv@google.com \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=yosryahmed@google.com \
    --cc=ytcoode@gmail.com \
    --cc=yuzhao@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.