All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suren Baghdasaryan <surenb@google.com>
To: akpm@linux-foundation.org
Cc: kent.overstreet@linux.dev, mhocko@suse.com, vbabka@suse.cz,
	hannes@cmpxchg.org, roman.gushchin@linux.dev, mgorman@suse.de,
	dave@stgolabs.net, willy@infradead.org, liam.howlett@oracle.com,
	void@manifault.com, peterz@infradead.org, juri.lelli@redhat.com,
	ldufour@linux.ibm.com, peterx@redhat.com, david@redhat.com,
	axboe@kernel.dk, mcgrof@kernel.org, masahiroy@kernel.org,
	nathan@kernel.org, changbin.du@intel.com, ytcoode@gmail.com,
	vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	rostedt@goodmis.org, bsegall@google.com, bristot@redhat.com,
	vschneid@redhat.com, cl@linux.com, penberg@kernel.org,
	iamjoonsoo.kim@lge.com, 42.hyeyoo@gmail.com, glider@google.com,
	elver@google.com, dvyukov@google.com, shakeelb@google.com,
	songmuchun@bytedance.com, arnd@arndb.de, jbaron@akamai.com,
	rientjes@google.com, minchan@google.com, kaleshsingh@google.com,
	surenb@google.com, kernel-team@android.com, linux-mm@kvack.org,
	iommu@lists.linux.dev, kasan-dev@googlegroups.com,
	io-uring@vger.kernel.org, linux-arch@vger.kernel.org,
	xen-devel@lists.xenproject.org, linux-bcache@vger.kernel.org,
	linux-modules@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [RFC PATCH 16/30] mm: enable slab allocation tagging for kmalloc and friends
Date: Tue, 30 Aug 2022 14:49:05 -0700	[thread overview]
Message-ID: <20220830214919.53220-17-surenb@google.com> (raw)
In-Reply-To: <20220830214919.53220-1-surenb@google.com>

Redefine kmalloc, krealloc, kzalloc, kcalloc, etc. to record allocations
and deallocations done by these functions.

Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Co-developed-by: Kent Overstreet <kent.overstreet@linux.dev>
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
---
 include/linux/slab.h | 103 +++++++++++++++++++++++++------------------
 mm/slab.c            |   2 +
 mm/slab_common.c     |  16 +++----
 mm/slob.c            |   2 +
 mm/slub.c            |   2 +
 5 files changed, 75 insertions(+), 50 deletions(-)

diff --git a/include/linux/slab.h b/include/linux/slab.h
index 5a198aa02a08..89273be35743 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -191,7 +191,10 @@ int kmem_cache_shrink(struct kmem_cache *s);
 /*
  * Common kmalloc functions provided by all allocators
  */
-void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
+void * __must_check _krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
+#define krealloc(_p, _size, _flags)					\
+	krealloc_hooks(_p, _krealloc(_p, _size, _flags))
+
 void kfree(const void *objp);
 void kfree_sensitive(const void *objp);
 size_t __ksize(const void *objp);
@@ -463,6 +466,15 @@ static inline void slab_tag_dec(const void *ptr) {}
 
 #endif
 
+#define krealloc_hooks(_p, _do_alloc)					\
+({									\
+	void *_res = _do_alloc;						\
+	slab_tag_add(_p, _res);						\
+	_res;								\
+})
+
+#define kmalloc_hooks(_do_alloc)	krealloc_hooks(NULL, _do_alloc)
+
 void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
 void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
@@ -541,25 +553,31 @@ static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, g
 }
 #endif /* CONFIG_TRACING */
 
-extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
+extern void *_kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
 									 __alloc_size(1);
+#define kmalloc_order(_size, _flags, _order)              \
+	kmalloc_hooks(_kmalloc_order(_size, _flags, _order))
 
 #ifdef CONFIG_TRACING
-extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+extern void *_kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 				__assume_page_alignment __alloc_size(1);
 #else
-static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
+static __always_inline __alloc_size(1) void *_kmalloc_order_trace(size_t size, gfp_t flags,
 								 unsigned int order)
 {
-	return kmalloc_order(size, flags, order);
+	return _kmalloc_order(size, flags, order);
 }
 #endif
+#define kmalloc_order_trace(_size, _flags, _order)      \
+	kmalloc_hooks(_kmalloc_order_trace(_size, _flags, _order))
 
-static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *_kmalloc_large(size_t size, gfp_t flags)
 {
 	unsigned int order = get_order(size);
-	return kmalloc_order_trace(size, flags, order);
+	return _kmalloc_order_trace(size, flags, order);
 }
+#define kmalloc_large(_size, _flags)                    \
+	kmalloc_hooks(_kmalloc_large(_size, _flags))
 
 /**
  * kmalloc - allocate memory
@@ -615,14 +633,14 @@ static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t fl
  *	Try really hard to succeed the allocation but fail
  *	eventually.
  */
-static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
+static __always_inline __alloc_size(1) void *_kmalloc(size_t size, gfp_t flags)
 {
 	if (__builtin_constant_p(size)) {
 #ifndef CONFIG_SLOB
 		unsigned int index;
 #endif
 		if (size > KMALLOC_MAX_CACHE_SIZE)
-			return kmalloc_large(size, flags);
+			return _kmalloc_large(size, flags);
 #ifndef CONFIG_SLOB
 		index = kmalloc_index(size);
 
@@ -636,8 +654,9 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
 	}
 	return __kmalloc(size, flags);
 }
+#define kmalloc(_size, _flags)			kmalloc_hooks(_kmalloc(_size, _flags))
 
-static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
+static __always_inline __alloc_size(1) void *_kmalloc_node(size_t size, gfp_t flags, int node)
 {
 #ifndef CONFIG_SLOB
 	if (__builtin_constant_p(size) &&
@@ -654,6 +673,8 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
 #endif
 	return __kmalloc_node(size, flags, node);
 }
+#define kmalloc_node(_size, _flags, _node)		\
+	kmalloc_hooks(_kmalloc_node(_size, _flags, _node))
 
 /**
  * kmalloc_array - allocate memory for an array.
@@ -661,16 +682,18 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
+static inline __alloc_size(1, 2) void *_kmalloc_array(size_t n, size_t size, gfp_t flags)
 {
 	size_t bytes;
 
 	if (unlikely(check_mul_overflow(n, size, &bytes)))
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc(bytes, flags);
-	return __kmalloc(bytes, flags);
+		return _kmalloc(bytes, flags);
+	return _kmalloc(bytes, flags);
 }
+#define kmalloc_array(_n, _size, _flags)		\
+	kmalloc_hooks(_kmalloc_array(_n, _size, _flags))
 
 /**
  * krealloc_array - reallocate memory for an array.
@@ -679,7 +702,7 @@ static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_
  * @new_size: new size of a single member of the array
  * @flags: the type of memory to allocate (see kmalloc)
  */
-static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
+static inline __alloc_size(2, 3) void * __must_check _krealloc_array(void *p,
 								    size_t new_n,
 								    size_t new_size,
 								    gfp_t flags)
@@ -689,8 +712,10 @@ static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
 	if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
 		return NULL;
 
-	return krealloc(p, bytes, flags);
+	return _krealloc(p, bytes, flags);
 }
+#define krealloc_array(_p, _n, _size, _flags)		\
+	krealloc_hooks(_p, _krealloc_array(_p, _n, _size, _flags))
 
 /**
  * kcalloc - allocate memory for an array. The memory is set to zero.
@@ -698,10 +723,8 @@ static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
  * @size: element size.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
-{
-	return kmalloc_array(n, size, flags | __GFP_ZERO);
-}
+#define kcalloc(_n, _size, _flags)			\
+	kmalloc_array(_n, _size, (_flags)|__GFP_ZERO)
 
 /*
  * kmalloc_track_caller is a special version of kmalloc that records the
@@ -712,10 +735,10 @@ static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flag
  * request comes from.
  */
 extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
-#define kmalloc_track_caller(size, flags) \
-	__kmalloc_track_caller(size, flags, _RET_IP_)
+#define kmalloc_track_caller(size, flags)		\
+	kmalloc_hooks(__kmalloc_track_caller(size, flags, _RET_IP_))
 
-static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
+static inline __alloc_size(1, 2) void *_kmalloc_array_node(size_t n, size_t size, gfp_t flags,
 							  int node)
 {
 	size_t bytes;
@@ -723,26 +746,24 @@ static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size,
 	if (unlikely(check_mul_overflow(n, size, &bytes)))
 		return NULL;
 	if (__builtin_constant_p(n) && __builtin_constant_p(size))
-		return kmalloc_node(bytes, flags, node);
+		return _kmalloc_node(bytes, flags, node);
 	return __kmalloc_node(bytes, flags, node);
 }
+#define kmalloc_array_node(_n, _size, _flags, _node)	\
+	kmalloc_hooks(_kmalloc_array_node(_n, _size, _flags, _node))
 
-static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
-{
-	return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
-}
-
+#define kcalloc_node(_n, _size, _flags, _node)	\
+	kmalloc_array_node(_n, _size, (_flags)|__GFP_ZERO, _node)
 
 #ifdef CONFIG_NUMA
 extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
 					 unsigned long caller) __alloc_size(1);
-#define kmalloc_node_track_caller(size, flags, node) \
-	__kmalloc_node_track_caller(size, flags, node, \
-			_RET_IP_)
+#define kmalloc_node_track_caller(size, flags, node)	\
+	kmalloc_hooks(__kmalloc_node_track_caller(size, flags, node, _RET_IP_))
 
 #else /* CONFIG_NUMA */
 
-#define kmalloc_node_track_caller(size, flags, node) \
+#define kmalloc_node_track_caller(size, flags, node)	\
 	kmalloc_track_caller(size, flags)
 
 #endif /* CONFIG_NUMA */
@@ -750,20 +771,16 @@ extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
 /*
  * Shortcuts
  */
-static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
-{
-	return kmem_cache_alloc(k, flags | __GFP_ZERO);
-}
+#define kmem_cache_zalloc(_k, _flags)			\
+	kmem_cache_alloc(_k, (_flags)|__GFP_ZERO)
 
 /**
  * kzalloc - allocate memory. The memory is set to zero.
  * @size: how many bytes of memory are required.
  * @flags: the type of memory to allocate (see kmalloc).
  */
-static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
-{
-	return kmalloc(size, flags | __GFP_ZERO);
-}
+#define kzalloc(_size, _flags)				\
+	kmalloc(_size, (_flags)|__GFP_ZERO)
 
 /**
  * kzalloc_node - allocate zeroed memory from a particular memory node.
@@ -771,10 +788,12 @@ static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
  * @flags: the type of memory to allocate (see kmalloc).
  * @node: memory node from which to allocate
  */
-static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
+static inline __alloc_size(1) void *_kzalloc_node(size_t size, gfp_t flags, int node)
 {
-	return kmalloc_node(size, flags | __GFP_ZERO, node);
+	return _kmalloc_node(size, flags | __GFP_ZERO, node);
 }
+#define kzalloc_node(_size, _flags, _node)              \
+	kmalloc_hooks(_kzalloc_node(_size, _flags, _node))
 
 extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
 static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
diff --git a/mm/slab.c b/mm/slab.c
index ba97aeef7ec1..db344de3b260 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3402,6 +3402,7 @@ static __always_inline void __cache_free(struct kmem_cache *cachep, void *objp,
 
 	if (is_kfence_address(objp)) {
 		kmemleak_free_recursive(objp, cachep->flags);
+		slab_tag_dec(objp);
 		__kfence_free(objp);
 		return;
 	}
@@ -3433,6 +3434,7 @@ void ___cache_free(struct kmem_cache *cachep, void *objp,
 
 	check_irq_off();
 	kmemleak_free_recursive(objp, cachep->flags);
+	slab_tag_dec(objp);
 	objp = cache_free_debugcheck(cachep, objp, caller);
 
 	/*
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 272eda62ecaa..7b6473db5ab4 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -938,7 +938,7 @@ gfp_t kmalloc_fix_flags(gfp_t flags)
  * directly to the page allocator. We use __GFP_COMP, because we will need to
  * know the allocation order to free the pages properly in kfree.
  */
-void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
+void *_kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 {
 	void *ret = NULL;
 	struct page *page;
@@ -958,16 +958,16 @@ void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
 	kmemleak_alloc(ret, size, 1, flags);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_order);
+EXPORT_SYMBOL(_kmalloc_order);
 
 #ifdef CONFIG_TRACING
-void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
+void *_kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
 {
-	void *ret = kmalloc_order(size, flags, order);
+	void *ret = _kmalloc_order(size, flags, order);
 	trace_kmalloc(_RET_IP_, ret, NULL, size, PAGE_SIZE << order, flags);
 	return ret;
 }
-EXPORT_SYMBOL(kmalloc_order_trace);
+EXPORT_SYMBOL(_kmalloc_order_trace);
 #endif
 
 #ifdef CONFIG_SLAB_FREELIST_RANDOM
@@ -1187,7 +1187,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
 		return (void *)p;
 	}
 
-	ret = kmalloc_track_caller(new_size, flags);
+	ret = __kmalloc_track_caller(new_size, flags, _RET_IP_);
 	if (ret && p) {
 		/* Disable KASAN checks as the object's redzone is accessed. */
 		kasan_disable_current();
@@ -1211,7 +1211,7 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
  *
  * Return: pointer to the allocated memory or %NULL in case of error
  */
-void *krealloc(const void *p, size_t new_size, gfp_t flags)
+void *_krealloc(const void *p, size_t new_size, gfp_t flags)
 {
 	void *ret;
 
@@ -1226,7 +1226,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
 
 	return ret;
 }
-EXPORT_SYMBOL(krealloc);
+EXPORT_SYMBOL(_krealloc);
 
 /**
  * kfree_sensitive - Clear sensitive information in memory before freeing
diff --git a/mm/slob.c b/mm/slob.c
index 2bd4f476c340..23b49f6c9c8f 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -554,6 +554,7 @@ void kfree(const void *block)
 	if (unlikely(ZERO_OR_NULL_PTR(block)))
 		return;
 	kmemleak_free(block);
+	slab_tag_dec(block);
 
 	sp = virt_to_folio(block);
 	if (folio_test_slab(sp)) {
@@ -680,6 +681,7 @@ static void kmem_rcu_free(struct rcu_head *head)
 void kmem_cache_free(struct kmem_cache *c, void *b)
 {
 	kmemleak_free_recursive(b, c->flags);
+	slab_tag_dec(b);
 	trace_kmem_cache_free(_RET_IP_, b, c->name);
 	if (unlikely(c->flags & SLAB_TYPESAFE_BY_RCU)) {
 		struct slob_rcu *slob_rcu;
diff --git a/mm/slub.c b/mm/slub.c
index 80199d5ac7c9..caf752087ad6 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1715,6 +1715,7 @@ static inline void *kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
 static __always_inline void kfree_hook(void *x)
 {
 	kmemleak_free(x);
+	slab_tag_dec(x);
 	kasan_kfree_large(x);
 }
 
@@ -1722,6 +1723,7 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s,
 						void *x, bool init)
 {
 	kmemleak_free_recursive(x, s->flags);
+	slab_tag_dec(x);
 
 	debug_check_no_locks_freed(x, s->object_size);
 
-- 
2.37.2.672.g94769d06f0-goog


  parent reply	other threads:[~2022-08-30 21:52 UTC|newest]

Thread overview: 142+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-30 21:48 [RFC PATCH 00/30] Code tagging framework and applications Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 01/30] kernel/module: move find_kallsyms_symbol_value declaration Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 02/30] lib/string_helpers: Drop space in string_get_size's output Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 03/30] Lazy percpu counters Suren Baghdasaryan
2022-08-31 10:02   ` Mel Gorman
2022-08-31 15:37     ` Suren Baghdasaryan
2022-08-31 16:20     ` Kent Overstreet
2022-09-01  6:51   ` Peter Zijlstra
2022-09-01 14:32     ` Kent Overstreet
2022-09-01 14:48       ` Steven Rostedt
2022-09-01 15:43         ` Kent Overstreet
2022-09-01 18:59       ` Peter Zijlstra
2022-08-30 21:48 ` [RFC PATCH 04/30] scripts/kallysms: Always include __start and __stop symbols Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 05/30] lib: code tagging framework Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 06/30] lib: code tagging module support Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 07/30] lib: add support for allocation tagging Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 08/30] lib: introduce page " Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 09/30] change alloc_pages name in dma_map_ops to avoid name conflicts Suren Baghdasaryan
2022-08-30 21:48 ` [RFC PATCH 10/30] mm: enable page allocation tagging for __get_free_pages and alloc_pages Suren Baghdasaryan
2022-08-31 10:11   ` Mel Gorman
2022-08-31 15:45     ` Suren Baghdasaryan
2022-08-31 15:52       ` Suren Baghdasaryan
2022-08-31 17:46     ` Kent Overstreet
2022-09-01  1:07       ` Suren Baghdasaryan
2022-09-01  7:41       ` Peter Zijlstra
2022-08-30 21:49 ` [RFC PATCH 11/30] mm: introduce slabobj_ext to support slab object extensions Suren Baghdasaryan
2022-09-01 23:35   ` Roman Gushchin
2022-09-02  0:23     ` Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 12/30] mm: introduce __GFP_NO_OBJ_EXT flag to selectively prevent slabobj_ext creation Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 13/30] mm/slab: introduce SLAB_NO_OBJ_EXT to avoid obj_ext creation Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 14/30] mm: prevent slabobj_ext allocations for slabobj_ext and kmem_cache objects Suren Baghdasaryan
2022-09-01 23:40   ` Roman Gushchin
2022-09-02  0:24     ` Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 15/30] lib: introduce slab allocation tagging Suren Baghdasaryan
2022-08-30 21:49 ` Suren Baghdasaryan [this message]
2022-08-31  3:26   ` [RFC PATCH 16/30] mm: enable slab allocation tagging for kmalloc and friends kernel test robot
2022-09-01 23:50   ` Roman Gushchin
2022-08-30 21:49 ` [RFC PATCH 17/30] lib/string.c: strsep_no_empty() Suren Baghdasaryan
2022-08-31  6:41   ` kernel test robot
2022-08-30 21:49 ` [RFC PATCH 18/30] codetag: add codetag query helper functions Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 19/30] move stack capture functionality into a separate function for reuse Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 20/30] lib: introduce support for storing code tag context Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 21/30] lib: implement context capture support for page and slab allocators Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 22/30] Code tagging based fault injection Suren Baghdasaryan
2022-08-31  1:51   ` Randy Dunlap
2022-08-31 15:56     ` Suren Baghdasaryan
2022-08-31 10:37   ` Dmitry Vyukov
2022-08-31 15:51     ` Suren Baghdasaryan
2022-08-31 17:30     ` Kent Overstreet
2022-09-01  8:43       ` Dmitry Vyukov
2022-08-30 21:49 ` [RFC PATCH 23/30] timekeeping: Add a missing include Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 24/30] wait: Clean up waitqueue_entry initialization Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 25/30] lib/time_stats: New library for statistics on events Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 26/30] bcache: Convert to lib/time_stats Suren Baghdasaryan
2022-08-30 21:49 ` [RFC PATCH 27/30] Code tagging based latency tracking Suren Baghdasaryan
2022-08-31  1:53   ` Randy Dunlap
2022-08-31 15:55     ` Suren Baghdasaryan
2022-09-01  7:11   ` Peter Zijlstra
2022-09-01 14:43     ` Kent Overstreet
2022-09-01 21:38   ` Steven Rostedt
2022-09-01 21:46     ` Steven Rostedt
2022-09-01 21:54     ` Kent Overstreet
2022-09-01 22:34       ` Steven Rostedt
2022-09-01 22:55         ` Kent Overstreet
2022-09-02  0:23           ` Steven Rostedt
2022-09-02  1:35             ` Kent Overstreet
2022-09-02  1:59               ` Steven Rostedt
2022-08-30 21:49 ` [RFC PATCH 28/30] Improved symbolic error names Suren Baghdasaryan
2022-08-31  2:46   ` kernel test robot
2022-09-01 23:19   ` Joe Perches
2022-09-01 23:26     ` Kent Overstreet
2022-08-30 21:49 ` [RFC PATCH 29/30] dyndbg: Convert to code tagging Suren Baghdasaryan
2022-08-31  3:26   ` kernel test robot
2022-08-30 21:49 ` [RFC PATCH 30/30] MAINTAINERS: Add entries for code tagging & related Suren Baghdasaryan
2022-08-31  7:38 ` [RFC PATCH 00/30] Code tagging framework and applications Peter Zijlstra
2022-08-31  8:42   ` Kent Overstreet
2022-08-31 10:19     ` Mel Gorman
2022-08-31 10:47       ` Michal Hocko
2022-08-31 15:28         ` Suren Baghdasaryan
2022-08-31 16:48           ` Suren Baghdasaryan
2022-08-31 19:01         ` Kent Overstreet
2022-08-31 20:56           ` Yosry Ahmed
2022-08-31 21:38             ` Suren Baghdasaryan
2022-09-01 22:27             ` Roman Gushchin
2022-09-01 22:37               ` Kent Overstreet
2022-09-01 22:53                 ` Roman Gushchin
2022-09-01 23:36                   ` Suren Baghdasaryan
2022-09-02  0:17                   ` Kent Overstreet
2022-09-02  1:04                     ` Roman Gushchin
2022-09-02  1:16                       ` Kent Overstreet
2022-09-02 12:02                       ` Jens Axboe
2022-09-02 19:48                         ` Kent Overstreet
2022-09-02 19:53                           ` Jens Axboe
2022-09-02 20:05                             ` Kent Overstreet
2022-09-02 20:23                               ` Jens Axboe
2022-09-01  7:18           ` Michal Hocko
2022-09-01 15:33             ` Suren Baghdasaryan
2022-09-01 19:15               ` Michal Hocko
2022-09-01 19:39                 ` Suren Baghdasaryan
2022-09-01 20:15                   ` Kent Overstreet
2022-09-05  8:49                     ` Michal Hocko
2022-09-05 23:46                       ` Kent Overstreet
2022-09-06  7:23                         ` Michal Hocko
2022-09-06 18:20                           ` Kent Overstreet
2022-09-07 11:00                             ` Michal Hocko
2022-09-07 13:04                               ` Kent Overstreet
2022-09-07 13:45                                 ` Steven Rostedt
2022-09-08  6:35                                   ` Kent Overstreet
2022-09-08  6:49                                     ` Suren Baghdasaryan
2022-09-08  7:07                                       ` Kent Overstreet
2022-09-08  7:12                                     ` Michal Hocko
2022-09-08  7:29                                       ` Kent Overstreet
2022-09-08  7:47                                         ` Michal Hocko
2022-09-05  1:32                 ` Suren Baghdasaryan
2022-09-05  8:12                   ` Michal Hocko
2022-09-05  8:58                     ` Marco Elver
2022-09-05 18:07                       ` Suren Baghdasaryan
2022-09-05 18:03                     ` Suren Baghdasaryan
2022-09-06  8:01                       ` Michal Hocko
2022-09-06 15:35                         ` Suren Baghdasaryan
2022-09-05 15:07                   ` Steven Rostedt
2022-09-05 18:08                     ` Suren Baghdasaryan
2022-09-05 20:42                       ` Kent Overstreet
2022-09-05 22:16                         ` Steven Rostedt
2022-09-05 23:50                           ` Kent Overstreet
2022-09-01  8:05           ` David Hildenbrand
2022-09-01 14:23             ` Kent Overstreet
2022-09-01 15:07               ` David Hildenbrand
2022-09-01 15:39                 ` Suren Baghdasaryan
2022-09-01 15:48                 ` Kent Overstreet
2022-08-31 15:59       ` Kent Overstreet
2022-09-01  7:05         ` Peter Zijlstra
2022-09-01  7:36           ` Daniel Bristot de Oliveira
2022-09-01  7:42           ` Peter Zijlstra
2022-09-01 11:05         ` Mel Gorman
2022-09-01 16:31           ` Kent Overstreet
2022-09-01  7:00       ` Peter Zijlstra
2022-09-01 14:29         ` Kent Overstreet
2022-09-05 18:44       ` Nadav Amit
2022-09-05 19:16         ` Steven Rostedt
2022-09-01  4:52 ` Oscar Salvador
2022-09-01  5:05   ` Suren Baghdasaryan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220830214919.53220-17-surenb@google.com \
    --to=surenb@google.com \
    --cc=42.hyeyoo@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=arnd@arndb.de \
    --cc=axboe@kernel.dk \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=changbin.du@intel.com \
    --cc=cl@linux.com \
    --cc=dave@stgolabs.net \
    --cc=david@redhat.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=glider@google.com \
    --cc=hannes@cmpxchg.org \
    --cc=iamjoonsoo.kim@lge.com \
    --cc=io-uring@vger.kernel.org \
    --cc=iommu@lists.linux.dev \
    --cc=jbaron@akamai.com \
    --cc=juri.lelli@redhat.com \
    --cc=kaleshsingh@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=kent.overstreet@linux.dev \
    --cc=kernel-team@android.com \
    --cc=ldufour@linux.ibm.com \
    --cc=liam.howlett@oracle.com \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-bcache@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-modules@vger.kernel.org \
    --cc=masahiroy@kernel.org \
    --cc=mcgrof@kernel.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@suse.com \
    --cc=minchan@google.com \
    --cc=nathan@kernel.org \
    --cc=penberg@kernel.org \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rientjes@google.com \
    --cc=roman.gushchin@linux.dev \
    --cc=rostedt@goodmis.org \
    --cc=shakeelb@google.com \
    --cc=songmuchun@bytedance.com \
    --cc=vbabka@suse.cz \
    --cc=vincent.guittot@linaro.org \
    --cc=void@manifault.com \
    --cc=vschneid@redhat.com \
    --cc=willy@infradead.org \
    --cc=xen-devel@lists.xenproject.org \
    --cc=ytcoode@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.