Hi all, Today's linux-next merge of the ftrace tree got conflicts in: include/linux/trace_recursion.h kernel/trace/ftrace.c between commit: ed65df63a39a ("tracing: Have all levels of checks prevent recursion") from Linus' tree and commit: ce5e48036c9e ("ftrace: disable preemption when recursion locked") from the ftrace tree. I fixed it up (see below) and can carry the fix as necessary. This is now fixed as far as linux-next is concerned, but any non trivial conflicts should be mentioned to your upstream maintainer when your tree is submitted for merging. You may also want to consider cooperating with the maintainer of the conflicting tree to minimise any particularly complex conflicts. -- Cheers, Stephen Rothwell diff --cc include/linux/trace_recursion.h index fe95f0922526,a13f23b04d73..000000000000 --- a/include/linux/trace_recursion.h +++ b/include/linux/trace_recursion.h @@@ -139,8 -155,11 +135,11 @@@ extern void ftrace_record_recursion(uns # define do_ftrace_record_recursion(ip, pip) do { } while (0) #endif + /* + * Preemption is promised to be disabled when return bit >= 0. + */ static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip, - int start, int max) + int start) { unsigned int val = READ_ONCE(current->trace_recursion); int bit; @@@ -148,10 -167,18 +147,14 @@@ bit = trace_get_context_bit() + start; if (unlikely(val & (1 << bit))) { /* - * It could be that preempt_count has not been updated during - * a switch between contexts. Allow for a single recursion. + * If an interrupt occurs during a trace, and another trace + * happens in that interrupt but before the preempt_count is + * updated to reflect the new interrupt context, then this + * will think a recursion occurred, and the event will be dropped. + * Let a single instance happen via the TRANSITION_BIT to + * not drop those events. */ - bit = TRACE_TRANSITION_BIT; + bit = TRACE_CTX_TRANSITION + start; if (val & (1 << bit)) { do_ftrace_record_recursion(ip, pip); return -1; @@@ -162,12 -192,22 +165,18 @@@ current->trace_recursion = val; barrier(); + preempt_disable_notrace(); + - return bit + 1; + return bit; } + /* + * Preemption will be enabled (if it was previously enabled). + */ static __always_inline void trace_clear_recursion(int bit) { - if (!bit) - return; - + preempt_enable_notrace(); barrier(); - bit--; trace_recursion_clear(bit); } diff --cc kernel/trace/ftrace.c index 635fbdc9d589,b4ed1a301232..000000000000 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@@ -6977,7 -7198,12 +7198,12 @@@ __ftrace_ops_list_func(unsigned long ip struct ftrace_ops *op; int bit; + /* + * The ftrace_test_and_set_recursion() will disable preemption, + * which is required since some of the ops may be dynamically + * allocated, they must be freed after a synchronize_rcu(). + */ - bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START, TRACE_LIST_MAX); + bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); if (bit < 0) return;