All of lore.kernel.org
 help / color / mirror / Atom feed
* [RFC PATCH] lib/bug: use stackdepot for WARN*ONCE to make it more useful
@ 2017-01-03 13:44 Michal Hocko
  2017-01-11 19:41 ` Michal Hocko
  0 siblings, 1 reply; 2+ messages in thread
From: Michal Hocko @ 2017-01-03 13:44 UTC (permalink / raw)
  To: LKML
  Cc: Andrew Morton, Arnd Bergmann, Alexander Potapenko,
	Linus Torvalds, Michal Hocko

From: Michal Hocko <mhocko@suse.com>

One of the main problem of WARN_*ONCE style of warnings is that they
might easily hide different callpaths which lead to the warning. This is
especially true for library functions. Rebooting after each offender
hitting the particular WARN_*ONCE is just not feasible.

This patch (ab)uses stackdepot to make WARN_*ONCE more usable. We can
store whether a certain callpath has been seen in the stackdepot's
stack_record. This way only uniq stack traces will be reported.

On the other hand this is not for free. WARN_*ONCE will consume more
stack (at least WARN_STACK_DEPTH*sizeof(unsigned long) + sizeof(struct
stack_trace)) and also requires to allocate a memory sometimes which
might fail. In those cases the warninig will be issued multiple times
potentially.

Signed-off-by: Michal Hocko <mhocko@suse.com>
---
Hi,
this is an (untested) RFC to show the idea. It has some known problems:
 - depot_test_set_reported_stack is GPL symbol which is not suitable for
   generic thing as WARN_*ONCE but the rest of the stackdepot is exported
   as GPL only and I haven't explored why.
 - there needs to be a recursion detection because nothing really prevents
   depot_test_set_reported_stack to call into WARN_*ONCE inderectly
   (e.g. through memory allocation)
 - WARN_STACK_DEPTH doesn't really cooperate with other users of the stack
   depot well (namely KASAN_STACK_DEPTH)
 - configuration space would need to be considered because the STACKDEPOT
   cannot be selected manually currently

and maybe others. But I think the idea should be at least considered. So
here it goes.

Are there any thoughts, comments?

 include/asm-generic/bug.h  | 22 +++++++++++++
 include/linux/stackdepot.h |  2 ++
 lib/stackdepot.c           | 79 ++++++++++++++++++++++++++++++++++++++--------
 3 files changed, 90 insertions(+), 13 deletions(-)

diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 6f96247226a4..0b19fddea7ff 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -11,6 +11,7 @@
 
 #ifndef __ASSEMBLY__
 #include <linux/kernel.h>
+#include <linux/stackdepot.h>
 
 #ifdef CONFIG_BUG
 
@@ -112,6 +113,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
 	unlikely(__ret_warn_on);					\
 })
 
+#ifndef CONFIG_STACKDEPOT
 #define WARN_ON_ONCE(condition)	({				\
 	static bool __section(.data.unlikely) __warned;		\
 	int __ret_warn_once = !!(condition);			\
@@ -133,6 +135,26 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
 	}							\
 	unlikely(__ret_warn_once);				\
 })
+#else
+#define WARN_STACK_DEPTH 16
+#define WARN_ON_ONCE(condition)	({				\
+	int __ret_warn_once = !!(condition);			\
+								\
+	if (unlikely(__ret_warn_once && !depot_test_set_reported_stack(WARN_STACK_DEPTH))) {		\
+		WARN_ON(1);					\
+	}							\
+	unlikely(__ret_warn_once);				\
+})
+
+#define WARN_ONCE(condition, format...)	({			\
+	int __ret_warn_once = !!(condition);			\
+								\
+	if (unlikely(__ret_warn_once && !depot_test_set_reported_stack(WARN_STACK_DEPTH))) {		\
+		WARN(1, format);				\
+	}							\
+	unlikely(__ret_warn_once);				\
+})
+#endif
 
 #define WARN_TAINT_ONCE(condition, taint, format...)	({	\
 	static bool __section(.data.unlikely) __warned;		\
diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
index 7978b3e2c1e1..0e6e047c5e02 100644
--- a/include/linux/stackdepot.h
+++ b/include/linux/stackdepot.h
@@ -29,4 +29,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
 
 void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
 
+bool depot_test_set_reported_stack(int stack_depth);
+
 #endif
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index f87d138e9672..39a471912aa1 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -68,7 +68,10 @@ union handle_parts {
 struct stack_record {
 	struct stack_record *next;	/* Link in the hashtable */
 	u32 hash;			/* Hash in the hastable */
-	u32 size;			/* Number of frames in the stack */
+	u32 size;			/* Number of frames in the stack - the top bit
+					   encodes whether the stack has been seen already
+					   by depot_test_set_reported_stack. Use entries_count
+					   to get the value */
 	union handle_parts handle;
 	unsigned long entries[1];	/* Variable-sized array of entries. */
 };
@@ -80,6 +83,15 @@ static int next_slab_inited;
 static size_t depot_offset;
 static DEFINE_SPINLOCK(depot_lock);
 
+#define STACK_SEEN_BIT	31
+#define STACK_SEEN_MASK (1u<<STACK_SEEN_BIT)
+#define STACK_COUNT_MASK ~STACK_SEEN_MASK
+
+static inline u32 entries_count(struct stack_record *record)
+{
+	return record->size & STACK_COUNT_MASK;
+}
+
 static bool init_stack_slab(void **prealloc)
 {
 	if (!*prealloc)
@@ -172,7 +184,7 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
 
 	for (found = bucket; found; found = found->next) {
 		if (found->hash == hash &&
-		    found->size == size &&
+		    entries_count(found) == size &&
 		    !memcmp(entries, found->entries,
 			    size * sizeof(unsigned long))) {
 			return found;
@@ -188,24 +200,16 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
 	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
 	struct stack_record *stack = slab + offset;
 
-	trace->nr_entries = trace->max_entries = stack->size;
+	trace->nr_entries = trace->max_entries = entries_count(stack);
 	trace->entries = stack->entries;
 	trace->skip = 0;
 }
 EXPORT_SYMBOL_GPL(depot_fetch_stack);
 
-/**
- * depot_save_stack - save stack in a stack depot.
- * @trace - the stacktrace to save.
- * @alloc_flags - flags for allocating additional memory if required.
- *
- * Returns the handle of the stack struct stored in depot.
- */
-depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
+struct stack_record *__depot_save_stack(struct stack_trace *trace,
 				    gfp_t alloc_flags)
 {
 	u32 hash;
-	depot_stack_handle_t retval = 0;
 	struct stack_record *found = NULL, **bucket;
 	unsigned long flags;
 	struct page *page = NULL;
@@ -279,9 +283,58 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
 		/* Nobody used this memory, ok to free it. */
 		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
 	}
+fast_exit:
+	return found;
+}
+
+/**
+ * depot_save_stack - save stack in a stack depot.
+ * @trace - the stacktrace to save.
+ * @alloc_flags - flags for allocating additional memory if required.
+ *
+ * Returns the handle of the stack struct stored in depot.
+ */
+depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
+				    gfp_t alloc_flags)
+{
+	depot_stack_handle_t retval = 0;
+	struct stack_record *found = NULL;
+
+	found = __depot_save_stack(trace, alloc_flags);
 	if (found)
 		retval = found->handle.handle;
-fast_exit:
+
 	return retval;
 }
 EXPORT_SYMBOL_GPL(depot_save_stack);
+
+/* FIXME be careful about recursion */
+bool depot_test_set_reported_stack(int stack_depth)
+{
+	unsigned long entries[stack_depth];
+	struct stack_trace trace = {
+		.nr_entries = 0,
+		.entries = entries,
+		.max_entries = stack_depth,
+		.skip = 0
+	};
+	struct stack_record *record;
+
+	/* FIXME deduplicate save_stack from kasan.c */
+	save_stack_trace(&trace);
+	if (trace.nr_entries != 0 &&
+	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
+		trace.nr_entries--;
+
+	record = __depot_save_stack(&trace, GFP_ATOMIC);
+	if (!record)
+		return false;
+
+	/* This is racy but races should be too rare to matter */
+	if (record->size & STACK_SEEN_MASK)
+		return true;
+
+	record->size = STACK_SEEN_MASK | entries_count(record);
+	return false;
+}
+EXPORT_SYMBOL_GPL(depot_test_set_reported_stack);
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [RFC PATCH] lib/bug: use stackdepot for WARN*ONCE to make it more useful
  2017-01-03 13:44 [RFC PATCH] lib/bug: use stackdepot for WARN*ONCE to make it more useful Michal Hocko
@ 2017-01-11 19:41 ` Michal Hocko
  0 siblings, 0 replies; 2+ messages in thread
From: Michal Hocko @ 2017-01-11 19:41 UTC (permalink / raw)
  To: LKML; +Cc: Andrew Morton, Arnd Bergmann, Alexander Potapenko, Linus Torvalds

Has this just fallen through cracks or the silence means that people do
not really care about the current WARN_*ONCE behavior?

On Tue 03-01-17 14:44:24, Michal Hocko wrote:
> From: Michal Hocko <mhocko@suse.com>
> 
> One of the main problem of WARN_*ONCE style of warnings is that they
> might easily hide different callpaths which lead to the warning. This is
> especially true for library functions. Rebooting after each offender
> hitting the particular WARN_*ONCE is just not feasible.
> 
> This patch (ab)uses stackdepot to make WARN_*ONCE more usable. We can
> store whether a certain callpath has been seen in the stackdepot's
> stack_record. This way only uniq stack traces will be reported.
> 
> On the other hand this is not for free. WARN_*ONCE will consume more
> stack (at least WARN_STACK_DEPTH*sizeof(unsigned long) + sizeof(struct
> stack_trace)) and also requires to allocate a memory sometimes which
> might fail. In those cases the warninig will be issued multiple times
> potentially.
> 
> Signed-off-by: Michal Hocko <mhocko@suse.com>
> ---
> Hi,
> this is an (untested) RFC to show the idea. It has some known problems:
>  - depot_test_set_reported_stack is GPL symbol which is not suitable for
>    generic thing as WARN_*ONCE but the rest of the stackdepot is exported
>    as GPL only and I haven't explored why.
>  - there needs to be a recursion detection because nothing really prevents
>    depot_test_set_reported_stack to call into WARN_*ONCE inderectly
>    (e.g. through memory allocation)
>  - WARN_STACK_DEPTH doesn't really cooperate with other users of the stack
>    depot well (namely KASAN_STACK_DEPTH)
>  - configuration space would need to be considered because the STACKDEPOT
>    cannot be selected manually currently
> 
> and maybe others. But I think the idea should be at least considered. So
> here it goes.
> 
> Are there any thoughts, comments?
> 
>  include/asm-generic/bug.h  | 22 +++++++++++++
>  include/linux/stackdepot.h |  2 ++
>  lib/stackdepot.c           | 79 ++++++++++++++++++++++++++++++++++++++--------
>  3 files changed, 90 insertions(+), 13 deletions(-)
> 
> diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
> index 6f96247226a4..0b19fddea7ff 100644
> --- a/include/asm-generic/bug.h
> +++ b/include/asm-generic/bug.h
> @@ -11,6 +11,7 @@
>  
>  #ifndef __ASSEMBLY__
>  #include <linux/kernel.h>
> +#include <linux/stackdepot.h>
>  
>  #ifdef CONFIG_BUG
>  
> @@ -112,6 +113,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
>  	unlikely(__ret_warn_on);					\
>  })
>  
> +#ifndef CONFIG_STACKDEPOT
>  #define WARN_ON_ONCE(condition)	({				\
>  	static bool __section(.data.unlikely) __warned;		\
>  	int __ret_warn_once = !!(condition);			\
> @@ -133,6 +135,26 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
>  	}							\
>  	unlikely(__ret_warn_once);				\
>  })
> +#else
> +#define WARN_STACK_DEPTH 16
> +#define WARN_ON_ONCE(condition)	({				\
> +	int __ret_warn_once = !!(condition);			\
> +								\
> +	if (unlikely(__ret_warn_once && !depot_test_set_reported_stack(WARN_STACK_DEPTH))) {		\
> +		WARN_ON(1);					\
> +	}							\
> +	unlikely(__ret_warn_once);				\
> +})
> +
> +#define WARN_ONCE(condition, format...)	({			\
> +	int __ret_warn_once = !!(condition);			\
> +								\
> +	if (unlikely(__ret_warn_once && !depot_test_set_reported_stack(WARN_STACK_DEPTH))) {		\
> +		WARN(1, format);				\
> +	}							\
> +	unlikely(__ret_warn_once);				\
> +})
> +#endif
>  
>  #define WARN_TAINT_ONCE(condition, taint, format...)	({	\
>  	static bool __section(.data.unlikely) __warned;		\
> diff --git a/include/linux/stackdepot.h b/include/linux/stackdepot.h
> index 7978b3e2c1e1..0e6e047c5e02 100644
> --- a/include/linux/stackdepot.h
> +++ b/include/linux/stackdepot.h
> @@ -29,4 +29,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace, gfp_t flags);
>  
>  void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace);
>  
> +bool depot_test_set_reported_stack(int stack_depth);
> +
>  #endif
> diff --git a/lib/stackdepot.c b/lib/stackdepot.c
> index f87d138e9672..39a471912aa1 100644
> --- a/lib/stackdepot.c
> +++ b/lib/stackdepot.c
> @@ -68,7 +68,10 @@ union handle_parts {
>  struct stack_record {
>  	struct stack_record *next;	/* Link in the hashtable */
>  	u32 hash;			/* Hash in the hastable */
> -	u32 size;			/* Number of frames in the stack */
> +	u32 size;			/* Number of frames in the stack - the top bit
> +					   encodes whether the stack has been seen already
> +					   by depot_test_set_reported_stack. Use entries_count
> +					   to get the value */
>  	union handle_parts handle;
>  	unsigned long entries[1];	/* Variable-sized array of entries. */
>  };
> @@ -80,6 +83,15 @@ static int next_slab_inited;
>  static size_t depot_offset;
>  static DEFINE_SPINLOCK(depot_lock);
>  
> +#define STACK_SEEN_BIT	31
> +#define STACK_SEEN_MASK (1u<<STACK_SEEN_BIT)
> +#define STACK_COUNT_MASK ~STACK_SEEN_MASK
> +
> +static inline u32 entries_count(struct stack_record *record)
> +{
> +	return record->size & STACK_COUNT_MASK;
> +}
> +
>  static bool init_stack_slab(void **prealloc)
>  {
>  	if (!*prealloc)
> @@ -172,7 +184,7 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
>  
>  	for (found = bucket; found; found = found->next) {
>  		if (found->hash == hash &&
> -		    found->size == size &&
> +		    entries_count(found) == size &&
>  		    !memcmp(entries, found->entries,
>  			    size * sizeof(unsigned long))) {
>  			return found;
> @@ -188,24 +200,16 @@ void depot_fetch_stack(depot_stack_handle_t handle, struct stack_trace *trace)
>  	size_t offset = parts.offset << STACK_ALLOC_ALIGN;
>  	struct stack_record *stack = slab + offset;
>  
> -	trace->nr_entries = trace->max_entries = stack->size;
> +	trace->nr_entries = trace->max_entries = entries_count(stack);
>  	trace->entries = stack->entries;
>  	trace->skip = 0;
>  }
>  EXPORT_SYMBOL_GPL(depot_fetch_stack);
>  
> -/**
> - * depot_save_stack - save stack in a stack depot.
> - * @trace - the stacktrace to save.
> - * @alloc_flags - flags for allocating additional memory if required.
> - *
> - * Returns the handle of the stack struct stored in depot.
> - */
> -depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
> +struct stack_record *__depot_save_stack(struct stack_trace *trace,
>  				    gfp_t alloc_flags)
>  {
>  	u32 hash;
> -	depot_stack_handle_t retval = 0;
>  	struct stack_record *found = NULL, **bucket;
>  	unsigned long flags;
>  	struct page *page = NULL;
> @@ -279,9 +283,58 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
>  		/* Nobody used this memory, ok to free it. */
>  		free_pages((unsigned long)prealloc, STACK_ALLOC_ORDER);
>  	}
> +fast_exit:
> +	return found;
> +}
> +
> +/**
> + * depot_save_stack - save stack in a stack depot.
> + * @trace - the stacktrace to save.
> + * @alloc_flags - flags for allocating additional memory if required.
> + *
> + * Returns the handle of the stack struct stored in depot.
> + */
> +depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
> +				    gfp_t alloc_flags)
> +{
> +	depot_stack_handle_t retval = 0;
> +	struct stack_record *found = NULL;
> +
> +	found = __depot_save_stack(trace, alloc_flags);
>  	if (found)
>  		retval = found->handle.handle;
> -fast_exit:
> +
>  	return retval;
>  }
>  EXPORT_SYMBOL_GPL(depot_save_stack);
> +
> +/* FIXME be careful about recursion */
> +bool depot_test_set_reported_stack(int stack_depth)
> +{
> +	unsigned long entries[stack_depth];
> +	struct stack_trace trace = {
> +		.nr_entries = 0,
> +		.entries = entries,
> +		.max_entries = stack_depth,
> +		.skip = 0
> +	};
> +	struct stack_record *record;
> +
> +	/* FIXME deduplicate save_stack from kasan.c */
> +	save_stack_trace(&trace);
> +	if (trace.nr_entries != 0 &&
> +	    trace.entries[trace.nr_entries-1] == ULONG_MAX)
> +		trace.nr_entries--;
> +
> +	record = __depot_save_stack(&trace, GFP_ATOMIC);
> +	if (!record)
> +		return false;
> +
> +	/* This is racy but races should be too rare to matter */
> +	if (record->size & STACK_SEEN_MASK)
> +		return true;
> +
> +	record->size = STACK_SEEN_MASK | entries_count(record);
> +	return false;
> +}
> +EXPORT_SYMBOL_GPL(depot_test_set_reported_stack);
> -- 
> 2.11.0
> 

-- 
Michal Hocko
SUSE Labs

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2017-01-11 19:42 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-03 13:44 [RFC PATCH] lib/bug: use stackdepot for WARN*ONCE to make it more useful Michal Hocko
2017-01-11 19:41 ` Michal Hocko

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.