All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v5 0/2] tracing: Add trace events for preemption and irq disable/enable
@ 2017-09-12 18:11 Joel Fernandes
  2017-09-12 18:11 ` [PATCH v5 1/2] tracing: Prepare to add preempt and irq trace events Joel Fernandes
  2017-09-12 18:11 ` [PATCH v5 2/2] tracing: Add support for preempt and irq enable/disable events Joel Fernandes
  0 siblings, 2 replies; 5+ messages in thread
From: Joel Fernandes @ 2017-09-12 18:11 UTC (permalink / raw)
  To: linux-kernel; +Cc: Joel Fernandes, Steven Rostedt, Peter Zijlstra, kernel-team

These patches add trace events support for preempt and irq disable/enable
events. This version has few changes suggested by Steven as well.

I am looking forward to getting these accepted for v4.15 if possible. I based
it on Linus's master, please let me know if there is another branch I need to
rebase on. thanks!

Changes since v4:
- Use static inline instead of defines for tracer functions.
- Moved new per-cpu variables to protect from recursion to the correct defines.
- Dropped bug fix patch (3/3) not related to series and will submit that separately.

Here's an example of how Android's systrace will be using it to show critical
sections as a gantt chart: http://imgur.com/download/TZplEVp

Links to earlier series:
v4: https://lkml.org/lkml/2017/9/6/724
v3: https://lkml.org/lkml/2017/9/3/15

Joel Fernandes (2):
  tracing: Prepare to add preempt and irq trace events
  tracing: Add support for preempt and irq enable/disable events

 include/linux/ftrace.h            |   3 +-
 include/linux/irqflags.h          |   4 +-
 include/trace/events/preemptirq.h |  62 ++++++++++++++
 kernel/trace/Kconfig              |   9 ++
 kernel/trace/Makefile             |   1 +
 kernel/trace/trace_irqsoff.c      | 176 +++++++++++++++++++++++++++++++-------
 6 files changed, 222 insertions(+), 33 deletions(-)
 create mode 100644 include/trace/events/preemptirq.h

Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: kernel-team@android.com
-- 
2.14.1.581.gf28d330327-goog

^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH v5 1/2] tracing: Prepare to add preempt and irq trace events
  2017-09-12 18:11 [PATCH v5 0/2] tracing: Add trace events for preemption and irq disable/enable Joel Fernandes
@ 2017-09-12 18:11 ` Joel Fernandes
  2017-09-12 18:11 ` [PATCH v5 2/2] tracing: Add support for preempt and irq enable/disable events Joel Fernandes
  1 sibling, 0 replies; 5+ messages in thread
From: Joel Fernandes @ 2017-09-12 18:11 UTC (permalink / raw)
  To: linux-kernel
  Cc: Joel Fernandes, Steven Rostedt, Peter Zijlstra, kernel-team, Ingo Molnar

In preparation of adding irq and preempts enable and disable trace events, move
required functions and code to make it easier to add these events in a later
patch. This patch is just code movement and no functional change.

Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: kernel-team@android.com
Signed-off-by: Joel Fernandes <joelaf@google.com>
---
 kernel/trace/trace_irqsoff.c | 121 ++++++++++++++++++++++++++++++++-----------
 1 file changed, 91 insertions(+), 30 deletions(-)

diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 7758bc0617cb..26c688740742 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -16,6 +16,7 @@
 
 #include "trace.h"
 
+#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
 static struct trace_array		*irqsoff_trace __read_mostly;
 static int				tracer_enabled __read_mostly;
 
@@ -432,19 +433,17 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
 }
 
 /* start and stop critical timings used to for stoppage (in idle) */
-void start_critical_timings(void)
+static inline void start_critical_timings_tracer(void)
 {
 	if (preempt_trace() || irq_trace())
 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
-EXPORT_SYMBOL_GPL(start_critical_timings);
 
-void stop_critical_timings(void)
+static inline void stop_critical_timings_tracer(void)
 {
 	if (preempt_trace() || irq_trace())
 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
-EXPORT_SYMBOL_GPL(stop_critical_timings);
 
 #ifdef CONFIG_IRQSOFF_TRACER
 #ifdef CONFIG_PROVE_LOCKING
@@ -462,64 +461,44 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
 
 #else /* !CONFIG_PROVE_LOCKING */
 
-/*
- * Stubs:
- */
-
-void trace_softirqs_on(unsigned long ip)
-{
-}
-
-void trace_softirqs_off(unsigned long ip)
-{
-}
-
-inline void print_irqtrace_events(struct task_struct *curr)
-{
-}
-
 /*
  * We are only interested in hardirq on/off events:
  */
-void trace_hardirqs_on(void)
+static inline void tracer_hardirqs_on(void)
 {
 	if (!preempt_trace() && irq_trace())
 		stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
-EXPORT_SYMBOL(trace_hardirqs_on);
 
-void trace_hardirqs_off(void)
+static inline void tracer_hardirqs_off(void)
 {
 	if (!preempt_trace() && irq_trace())
 		start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
 }
-EXPORT_SYMBOL(trace_hardirqs_off);
 
-__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr)
 {
 	if (!preempt_trace() && irq_trace())
 		stop_critical_timing(CALLER_ADDR0, caller_addr);
 }
-EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
-__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr)
 {
 	if (!preempt_trace() && irq_trace())
 		start_critical_timing(CALLER_ADDR0, caller_addr);
 }
-EXPORT_SYMBOL(trace_hardirqs_off_caller);
 
 #endif /* CONFIG_PROVE_LOCKING */
 #endif /*  CONFIG_IRQSOFF_TRACER */
 
 #ifdef CONFIG_PREEMPT_TRACER
-void trace_preempt_on(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1)
 {
 	if (preempt_trace() && !irq_trace())
 		stop_critical_timing(a0, a1);
 }
 
-void trace_preempt_off(unsigned long a0, unsigned long a1)
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1)
 {
 	if (preempt_trace() && !irq_trace())
 		start_critical_timing(a0, a1);
@@ -781,3 +760,85 @@ __init static int init_irqsoff_tracer(void)
 	return 0;
 }
 core_initcall(init_irqsoff_tracer);
+#else /* IRQSOFF_TRACER || PREEMPTOFF_TRACER */
+static inline void start_critical_timings_tracer(void) { }
+static inline void stop_critical_timings_tracer(void) { }
+#endif
+
+#ifndef CONFIG_IRQSOFF_TRACER
+static inline void tracer_hardirqs_on(void) { }
+static inline void tracer_hardirqs_off(void) { }
+static inline void tracer_hardirqs_on_caller(unsigned long caller_addr) { }
+static inline void tracer_hardirqs_off_caller(unsigned long caller_addr) { }
+#endif
+
+#ifndef CONFIG_PREEMPT_TRACER
+static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
+static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
+#endif
+
+void start_critical_timings(void)
+{
+	start_critical_timings_tracer();
+}
+EXPORT_SYMBOL_GPL(start_critical_timings);
+
+void stop_critical_timings(void)
+{
+	stop_critical_timings_tracer();
+}
+EXPORT_SYMBOL_GPL(stop_critical_timings);
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
+void trace_hardirqs_on(void)
+{
+	tracer_hardirqs_on();
+}
+EXPORT_SYMBOL(trace_hardirqs_on);
+
+void trace_hardirqs_off(void)
+{
+	tracer_hardirqs_off();
+}
+EXPORT_SYMBOL(trace_hardirqs_off);
+
+__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
+{
+	tracer_hardirqs_on_caller(caller_addr);
+}
+EXPORT_SYMBOL(trace_hardirqs_on_caller);
+
+__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+{
+	tracer_hardirqs_off_caller(caller_addr);
+}
+EXPORT_SYMBOL(trace_hardirqs_off_caller);
+
+/*
+ * Stubs:
+ */
+
+void trace_softirqs_on(unsigned long ip)
+{
+}
+
+void trace_softirqs_off(unsigned long ip)
+{
+}
+
+inline void print_irqtrace_events(struct task_struct *curr)
+{
+}
+#endif
+
+#ifdef CONFIG_PREEMPT_TRACER
+void trace_preempt_on(unsigned long a0, unsigned long a1)
+{
+	tracer_preempt_on(a0, a1);
+}
+
+void trace_preempt_off(unsigned long a0, unsigned long a1)
+{
+	tracer_preempt_off(a0, a1);
+}
+#endif
-- 
2.14.1.581.gf28d330327-goog

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH v5 2/2] tracing: Add support for preempt and irq enable/disable events
  2017-09-12 18:11 [PATCH v5 0/2] tracing: Add trace events for preemption and irq disable/enable Joel Fernandes
  2017-09-12 18:11 ` [PATCH v5 1/2] tracing: Prepare to add preempt and irq trace events Joel Fernandes
@ 2017-09-12 18:11 ` Joel Fernandes
  2017-09-20 18:23   ` Steven Rostedt
  1 sibling, 1 reply; 5+ messages in thread
From: Joel Fernandes @ 2017-09-12 18:11 UTC (permalink / raw)
  To: linux-kernel
  Cc: Joel Fernandes, Steven Rostedt, Peter Zilstra, kernel-team, Ingo Molnar

Preempt and irq trace events can be used for tracing the start and
end of an atomic section which can be used by a trace viewer like
systrace to graphically view the start and end of an atomic section and
correlate them with latencies and scheduling issues.

This also serves as a prelude to using synthetic events or probes to
rewrite the preempt and irqsoff tracers, along with numerous benefits of
using trace events features for these events.

Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Peter Zilstra <peterz@infradead.org>
Cc: kernel-team@android.com
Signed-off-by: Joel Fernandes <joelaf@google.com>
---
 include/linux/ftrace.h            |  3 +-
 include/linux/irqflags.h          |  4 +--
 include/trace/events/preemptirq.h | 62 +++++++++++++++++++++++++++++++++++++++
 kernel/trace/Kconfig              |  9 ++++++
 kernel/trace/Makefile             |  1 +
 kernel/trace/trace_irqsoff.c      | 57 ++++++++++++++++++++++++++++++++++-
 6 files changed, 132 insertions(+), 4 deletions(-)
 create mode 100644 include/trace/events/preemptirq.h

diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6383115e9d2c..5790ba7a85bd 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -742,7 +742,8 @@ static inline unsigned long get_lock_parent_ip(void)
   static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
 #endif
 
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || \
+	(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
   extern void trace_preempt_on(unsigned long a0, unsigned long a1);
   extern void trace_preempt_off(unsigned long a0, unsigned long a1);
 #else
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 5dd1272d1ab2..6ad6732a4e98 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -44,8 +44,8 @@
 # define INIT_TRACE_IRQFLAGS
 #endif
 
-#if defined(CONFIG_IRQSOFF_TRACER) || \
-	defined(CONFIG_PREEMPT_TRACER)
+#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) || \
+	defined(CONFIG_PREEMPTIRQ_EVENTS)
  extern void stop_critical_timings(void);
  extern void start_critical_timings(void);
 #else
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
new file mode 100644
index 000000000000..601935e32cd1
--- /dev/null
+++ b/include/trace/events/preemptirq.h
@@ -0,0 +1,62 @@
+#ifdef CONFIG_PREEMPTIRQ_EVENTS
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM preemptirq
+
+#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PREEMPTIRQ_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+#include <asm/sections.h>
+
+DECLARE_EVENT_CLASS(preemptirq_template,
+
+	TP_PROTO(unsigned long ip, unsigned long parent_ip),
+
+	TP_ARGS(ip, parent_ip),
+
+	TP_STRUCT__entry(
+		__field(u32, caller_offs)
+		__field(u32, parent_offs)
+	),
+
+	TP_fast_assign(
+		__entry->caller_offs = (u32)(ip - (unsigned long)_stext);
+		__entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+	),
+
+	TP_printk("caller=%pF parent=%pF\n",
+		  (void *)((unsigned long)(_stext) + __entry->caller_offs),
+		  (void *)((unsigned long)(_stext) + __entry->parent_offs))
+);
+
+DEFINE_EVENT(preemptirq_template, irq_disable,
+	     TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	     TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, irq_enable,
+	     TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	     TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, preempt_disable,
+	     TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	     TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(preemptirq_template, preempt_enable,
+	     TP_PROTO(unsigned long ip, unsigned long parent_ip),
+	     TP_ARGS(ip, parent_ip));
+
+#endif /* _TRACE_PREEMPTIRQ_H */
+
+#include <trace/define_trace.h>
+
+#else /* !CONFIG_PREEMPTIRQ_EVENTS */
+
+#define trace_irq_enable(...)
+#define trace_irq_disable(...)
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
+
+#endif
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 434c840e2d82..420666152de8 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -160,6 +160,15 @@ config FUNCTION_GRAPH_TRACER
 	  address on the current task structure into a stack of calls.
 
 
+config PREEMPTIRQ_EVENTS
+	bool "Enable trace events for preempt and irq disable/enable"
+	select TRACE_IRQFLAGS
+	default n
+	help
+	  Enable tracing of disable and enable events for preemption and irqs.
+	  For tracing preempt disable/enable events, CONFIG_DEBUG_PREEMPT must
+	  be enabled.
+
 config IRQSOFF_TRACER
 	bool "Interrupts-off Latency Tracer"
 	default n
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 90f2701d92a7..9f62eee61f14 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o
 obj-$(CONFIG_TRACING_MAP) += tracing_map.o
 obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
 obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
+obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
 obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
 obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 26c688740742..361a17f83c4e 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -16,6 +16,9 @@
 
 #include "trace.h"
 
+#define CREATE_TRACE_POINTS
+#include <trace/events/preemptirq.h>
+
 #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
 static struct trace_array		*irqsoff_trace __read_mostly;
 static int				tracer_enabled __read_mostly;
@@ -777,14 +780,29 @@ static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
 static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
 #endif
 
+static DEFINE_PER_CPU(int, tracing_irq_cpu);
+static DEFINE_PER_CPU(int, tracing_preempt_cpu);
+
 void start_critical_timings(void)
 {
+	if (this_cpu_read(tracing_preempt_cpu))
+		trace_preempt_enable(CALLER_ADDR0, CALLER_ADDR1);
+
+	if (this_cpu_read(tracing_irq_cpu))
+		trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
+
 	start_critical_timings_tracer();
 }
 EXPORT_SYMBOL_GPL(start_critical_timings);
 
 void stop_critical_timings(void)
 {
+	if (this_cpu_read(tracing_preempt_cpu))
+		trace_preempt_disable(CALLER_ADDR0, CALLER_ADDR1);
+
+	if (this_cpu_read(tracing_irq_cpu))
+		trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
+
 	stop_critical_timings_tracer();
 }
 EXPORT_SYMBOL_GPL(stop_critical_timings);
@@ -792,24 +810,48 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
 #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
 void trace_hardirqs_on(void)
 {
+	if (!this_cpu_read(tracing_irq_cpu))
+		return;
+
+	trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
 	tracer_hardirqs_on();
+
+	this_cpu_write(tracing_irq_cpu, 0);
 }
 EXPORT_SYMBOL(trace_hardirqs_on);
 
 void trace_hardirqs_off(void)
 {
+	if (this_cpu_read(tracing_irq_cpu))
+		return;
+
+	this_cpu_write(tracing_irq_cpu, 1);
+
+	trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
 	tracer_hardirqs_off();
 }
 EXPORT_SYMBOL(trace_hardirqs_off);
 
 __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 {
+	if (!this_cpu_read(tracing_irq_cpu))
+		return;
+
+	trace_irq_enable(CALLER_ADDR0, caller_addr);
 	tracer_hardirqs_on_caller(caller_addr);
+
+	this_cpu_write(tracing_irq_cpu, 0);
 }
 EXPORT_SYMBOL(trace_hardirqs_on_caller);
 
 __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
 {
+	if (this_cpu_read(tracing_irq_cpu))
+		return;
+
+	this_cpu_write(tracing_irq_cpu, 1);
+
+	trace_irq_disable(CALLER_ADDR0, caller_addr);
 	tracer_hardirqs_off_caller(caller_addr);
 }
 EXPORT_SYMBOL(trace_hardirqs_off_caller);
@@ -831,14 +873,27 @@ inline void print_irqtrace_events(struct task_struct *curr)
 }
 #endif
 
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || \
+	(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
 void trace_preempt_on(unsigned long a0, unsigned long a1)
 {
+	if (!this_cpu_read(tracing_preempt_cpu))
+		return;
+
+	trace_preempt_enable(a0, a1);
 	tracer_preempt_on(a0, a1);
+
+	this_cpu_write(tracing_preempt_cpu, 0);
 }
 
 void trace_preempt_off(unsigned long a0, unsigned long a1)
 {
+	if (this_cpu_read(tracing_preempt_cpu))
+		return;
+
+	this_cpu_write(tracing_preempt_cpu, 1);
+
+	trace_preempt_disable(a0, a1);
 	tracer_preempt_off(a0, a1);
 }
 #endif
-- 
2.14.1.581.gf28d330327-goog

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v5 2/2] tracing: Add support for preempt and irq enable/disable events
  2017-09-12 18:11 ` [PATCH v5 2/2] tracing: Add support for preempt and irq enable/disable events Joel Fernandes
@ 2017-09-20 18:23   ` Steven Rostedt
  2017-09-20 18:51     ` Joel Fernandes
  0 siblings, 1 reply; 5+ messages in thread
From: Steven Rostedt @ 2017-09-20 18:23 UTC (permalink / raw)
  To: Joel Fernandes; +Cc: linux-kernel, Peter Zilstra, kernel-team, Ingo Molnar

On Tue, 12 Sep 2017 11:11:09 -0700
Joel Fernandes <joelaf@google.com> wrote:

> Preempt and irq trace events can be used for tracing the start and
> end of an atomic section which can be used by a trace viewer like
> systrace to graphically view the start and end of an atomic section and
> correlate them with latencies and scheduling issues.
> 
> This also serves as a prelude to using synthetic events or probes to
> rewrite the preempt and irqsoff tracers, along with numerous benefits of
> using trace events features for these events.
> 
> Cc: Steven Rostedt <rostedt@goodmis.org>
> Cc: Peter Zilstra <peterz@infradead.org>
> Cc: kernel-team@android.com
> Signed-off-by: Joel Fernandes <joelaf@google.com>
> ---
>  include/linux/ftrace.h            |  3 +-
>  include/linux/irqflags.h          |  4 +--
>  include/trace/events/preemptirq.h | 62 +++++++++++++++++++++++++++++++++++++++
>  kernel/trace/Kconfig              |  9 ++++++
>  kernel/trace/Makefile             |  1 +
>  kernel/trace/trace_irqsoff.c      | 57 ++++++++++++++++++++++++++++++++++-
>  6 files changed, 132 insertions(+), 4 deletions(-)
>  create mode 100644 include/trace/events/preemptirq.h
> 
> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
> index 6383115e9d2c..5790ba7a85bd 100644
> --- a/include/linux/ftrace.h
> +++ b/include/linux/ftrace.h
> @@ -742,7 +742,8 @@ static inline unsigned long get_lock_parent_ip(void)
>    static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
>  #endif
>  
> -#ifdef CONFIG_PREEMPT_TRACER
> +#if defined(CONFIG_PREEMPT_TRACER) || \
> +	(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
>    extern void trace_preempt_on(unsigned long a0, unsigned long a1);
>    extern void trace_preempt_off(unsigned long a0, unsigned long a1);
>  #else
> diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
> index 5dd1272d1ab2..6ad6732a4e98 100644
> --- a/include/linux/irqflags.h
> +++ b/include/linux/irqflags.h
> @@ -44,8 +44,8 @@
>  # define INIT_TRACE_IRQFLAGS
>  #endif
>  
> -#if defined(CONFIG_IRQSOFF_TRACER) || \
> -	defined(CONFIG_PREEMPT_TRACER)
> +#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) || \
> +	defined(CONFIG_PREEMPTIRQ_EVENTS)
>   extern void stop_critical_timings(void);
>   extern void start_critical_timings(void);
>  #else
> diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
> new file mode 100644
> index 000000000000..601935e32cd1
> --- /dev/null
> +++ b/include/trace/events/preemptirq.h
> @@ -0,0 +1,62 @@
> +#ifdef CONFIG_PREEMPTIRQ_EVENTS
> +
> +#undef TRACE_SYSTEM
> +#define TRACE_SYSTEM preemptirq
> +
> +#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
> +#define _TRACE_PREEMPTIRQ_H
> +
> +#include <linux/ktime.h>
> +#include <linux/tracepoint.h>
> +#include <linux/string.h>
> +#include <asm/sections.h>
> +
> +DECLARE_EVENT_CLASS(preemptirq_template,
> +
> +	TP_PROTO(unsigned long ip, unsigned long parent_ip),
> +
> +	TP_ARGS(ip, parent_ip),
> +
> +	TP_STRUCT__entry(
> +		__field(u32, caller_offs)
> +		__field(u32, parent_offs)
> +	),
> +
> +	TP_fast_assign(
> +		__entry->caller_offs = (u32)(ip - (unsigned long)_stext);
> +		__entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
> +	),
> +
> +	TP_printk("caller=%pF parent=%pF\n",

No '\n', otherwise you add a lot of whitespace to the trace:

====
          <idle>-0     [002] d..1   252.533022: preempt_enable: caller=cpuidle_enter_state+0xfe/0x3a0 parent=cpuidle_enter+0x17/0x20

     rcu_preempt-8     [002] ...1   252.533063: preempt_enable: caller=schedule+0x4a/0x90 parent=schedule+0x4a/0x90

     rcu_preempt-8     [002] d..1   252.533065: preempt_disable: caller=_raw_spin_lock_irqsave+0x2f/0x60 parent=lock_timer_base+0x81/0xa0

     rcu_preempt-8     [002] ...1   252.533066: preempt_enable: caller=_raw_spin_unlock_irqrestore+0x4a/0x80 parent=try_to_del_timer_sync+0x44/0x60

     rcu_preempt-8     [002] d..1   252.533067: preempt_disable: caller=_raw_spin_lock_irqsave+0x2f/0x60 parent=prepare_to_swait+0x22/0x70

     rcu_preempt-8     [002] ...1   252.533067: preempt_enable: caller=_raw_spin_unlock_irqrestore+0x4a/0x80 parent=prepare_to_swait+0x63/0x70
====


> +		  (void *)((unsigned long)(_stext) + __entry->caller_offs),
> +		  (void *)((unsigned long)(_stext) + __entry->parent_offs))
> +);
> +
> +DEFINE_EVENT(preemptirq_template, irq_disable,
> +	     TP_PROTO(unsigned long ip, unsigned long parent_ip),
> +	     TP_ARGS(ip, parent_ip));
> +
> +DEFINE_EVENT(preemptirq_template, irq_enable,
> +	     TP_PROTO(unsigned long ip, unsigned long parent_ip),
> +	     TP_ARGS(ip, parent_ip));
> +
> +DEFINE_EVENT(preemptirq_template, preempt_disable,
> +	     TP_PROTO(unsigned long ip, unsigned long parent_ip),
> +	     TP_ARGS(ip, parent_ip));
> +
> +DEFINE_EVENT(preemptirq_template, preempt_enable,
> +	     TP_PROTO(unsigned long ip, unsigned long parent_ip),
> +	     TP_ARGS(ip, parent_ip));
> +
> +#endif /* _TRACE_PREEMPTIRQ_H */
> +
> +#include <trace/define_trace.h>
> +
> +#else /* !CONFIG_PREEMPTIRQ_EVENTS */
> +
> +#define trace_irq_enable(...)
> +#define trace_irq_disable(...)
> +#define trace_preempt_enable(...)
> +#define trace_preempt_disable(...)
> +
> +#endif
> diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
> index 434c840e2d82..420666152de8 100644
> --- a/kernel/trace/Kconfig
> +++ b/kernel/trace/Kconfig
> @@ -160,6 +160,15 @@ config FUNCTION_GRAPH_TRACER
>  	  address on the current task structure into a stack of calls.
>  
>  
> +config PREEMPTIRQ_EVENTS
> +	bool "Enable trace events for preempt and irq disable/enable"
> +	select TRACE_IRQFLAGS
> +	default n
> +	help
> +	  Enable tracing of disable and enable events for preemption and irqs.
> +	  For tracing preempt disable/enable events, CONFIG_DEBUG_PREEMPT must
> +	  be enabled.
> +
>  config IRQSOFF_TRACER
>  	bool "Interrupts-off Latency Tracer"
>  	default n
> diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
> index 90f2701d92a7..9f62eee61f14 100644
> --- a/kernel/trace/Makefile
> +++ b/kernel/trace/Makefile
> @@ -34,6 +34,7 @@ obj-$(CONFIG_TRACING) += trace_printk.o
>  obj-$(CONFIG_TRACING_MAP) += tracing_map.o
>  obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
>  obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
> +obj-$(CONFIG_PREEMPTIRQ_EVENTS) += trace_irqsoff.o
>  obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
>  obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
>  obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> index 26c688740742..361a17f83c4e 100644
> --- a/kernel/trace/trace_irqsoff.c
> +++ b/kernel/trace/trace_irqsoff.c
> @@ -16,6 +16,9 @@
>  
>  #include "trace.h"
>  
> +#define CREATE_TRACE_POINTS
> +#include <trace/events/preemptirq.h>
> +
>  #if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
>  static struct trace_array		*irqsoff_trace __read_mostly;
>  static int				tracer_enabled __read_mostly;
> @@ -777,14 +780,29 @@ static inline void tracer_preempt_on(unsigned long a0, unsigned long a1) { }
>  static inline void tracer_preempt_off(unsigned long a0, unsigned long a1) { }
>  #endif
>  
> +static DEFINE_PER_CPU(int, tracing_irq_cpu);
> +static DEFINE_PER_CPU(int, tracing_preempt_cpu);
> +
>  void start_critical_timings(void)
>  {
> +	if (this_cpu_read(tracing_preempt_cpu))
> +		trace_preempt_enable(CALLER_ADDR0, CALLER_ADDR1);
> +
> +	if (this_cpu_read(tracing_irq_cpu))
> +		trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
> +
>  	start_critical_timings_tracer();
>  }
>  EXPORT_SYMBOL_GPL(start_critical_timings);
>  
>  void stop_critical_timings(void)
>  {
> +	if (this_cpu_read(tracing_preempt_cpu))
> +		trace_preempt_disable(CALLER_ADDR0, CALLER_ADDR1);
> +
> +	if (this_cpu_read(tracing_irq_cpu))
> +		trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
> +
>  	stop_critical_timings_tracer();
>  }
>  EXPORT_SYMBOL_GPL(stop_critical_timings);
> @@ -792,24 +810,48 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
>  #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
>  void trace_hardirqs_on(void)
>  {
> +	if (!this_cpu_read(tracing_irq_cpu))
> +		return;
> +
> +	trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
>  	tracer_hardirqs_on();
> +
> +	this_cpu_write(tracing_irq_cpu, 0);
>  }
>  EXPORT_SYMBOL(trace_hardirqs_on);
>  
>  void trace_hardirqs_off(void)
>  {
> +	if (this_cpu_read(tracing_irq_cpu))
> +		return;
> +
> +	this_cpu_write(tracing_irq_cpu, 1);
> +
> +	trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
>  	tracer_hardirqs_off();
>  }
>  EXPORT_SYMBOL(trace_hardirqs_off);
>  
>  __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
>  {
> +	if (!this_cpu_read(tracing_irq_cpu))
> +		return;
> +
> +	trace_irq_enable(CALLER_ADDR0, caller_addr);
>  	tracer_hardirqs_on_caller(caller_addr);
> +
> +	this_cpu_write(tracing_irq_cpu, 0);
>  }
>  EXPORT_SYMBOL(trace_hardirqs_on_caller);
>  
>  __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
>  {
> +	if (this_cpu_read(tracing_irq_cpu))
> +		return;
> +
> +	this_cpu_write(tracing_irq_cpu, 1);
> +
> +	trace_irq_disable(CALLER_ADDR0, caller_addr);
>  	tracer_hardirqs_off_caller(caller_addr);
>  }
>  EXPORT_SYMBOL(trace_hardirqs_off_caller);
> @@ -831,14 +873,27 @@ inline void print_irqtrace_events(struct task_struct *curr)
>  }
>  #endif
>  
> -#ifdef CONFIG_PREEMPT_TRACER
> +#if defined(CONFIG_PREEMPT_TRACER) || \
> +	(defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
>  void trace_preempt_on(unsigned long a0, unsigned long a1)
>  {
> +	if (!this_cpu_read(tracing_preempt_cpu))
> +		return;
> +
> +	trace_preempt_enable(a0, a1);
>  	tracer_preempt_on(a0, a1);
> +
> +	this_cpu_write(tracing_preempt_cpu, 0);
>  }
>  
>  void trace_preempt_off(unsigned long a0, unsigned long a1)
>  {
> +	if (this_cpu_read(tracing_preempt_cpu))
> +		return;
> +
> +	this_cpu_write(tracing_preempt_cpu, 1);
> +
> +	trace_preempt_disable(a0, a1);
>  	tracer_preempt_off(a0, a1);
>  }
>  #endif

Need to fold in the following patch as well. Otherwise RCU may have
issues (run with lockdep enabled and you'll see all the splats).

-- Steve

diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 361a17f..7dcb431 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -786,10 +786,10 @@ static DEFINE_PER_CPU(int, tracing_preempt_cpu);
 void start_critical_timings(void)
 {
 	if (this_cpu_read(tracing_preempt_cpu))
-		trace_preempt_enable(CALLER_ADDR0, CALLER_ADDR1);
+		trace_preempt_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 
 	if (this_cpu_read(tracing_irq_cpu))
-		trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
+		trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 
 	start_critical_timings_tracer();
 }
@@ -798,10 +798,10 @@ EXPORT_SYMBOL_GPL(start_critical_timings);
 void stop_critical_timings(void)
 {
 	if (this_cpu_read(tracing_preempt_cpu))
-		trace_preempt_disable(CALLER_ADDR0, CALLER_ADDR1);
+		trace_preempt_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 
 	if (this_cpu_read(tracing_irq_cpu))
-		trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
+		trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 
 	stop_critical_timings_tracer();
 }
@@ -813,7 +813,7 @@ void trace_hardirqs_on(void)
 	if (!this_cpu_read(tracing_irq_cpu))
 		return;
 
-	trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
+	trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 	tracer_hardirqs_on();
 
 	this_cpu_write(tracing_irq_cpu, 0);
@@ -827,7 +827,7 @@ void trace_hardirqs_off(void)
 
 	this_cpu_write(tracing_irq_cpu, 1);
 
-	trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
+	trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
 	tracer_hardirqs_off();
 }
 EXPORT_SYMBOL(trace_hardirqs_off);
@@ -837,7 +837,7 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
 	if (!this_cpu_read(tracing_irq_cpu))
 		return;
 
-	trace_irq_enable(CALLER_ADDR0, caller_addr);
+	trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
 	tracer_hardirqs_on_caller(caller_addr);
 
 	this_cpu_write(tracing_irq_cpu, 0);
@@ -851,9 +851,9 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
 
 	this_cpu_write(tracing_irq_cpu, 1);
 
-	trace_irq_disable(CALLER_ADDR0, caller_addr);
-	tracer_hardirqs_off_caller(caller_addr);
-}
+	trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+	tracer_hardirqs_off_caller(caller_addr);}
+
 EXPORT_SYMBOL(trace_hardirqs_off_caller);
 
 /*
@@ -880,7 +880,7 @@ void trace_preempt_on(unsigned long a0, unsigned long a1)
 	if (!this_cpu_read(tracing_preempt_cpu))
 		return;
 
-	trace_preempt_enable(a0, a1);
+	trace_preempt_enable_rcuidle(a0, a1);
 	tracer_preempt_on(a0, a1);
 
 	this_cpu_write(tracing_preempt_cpu, 0);
@@ -893,7 +893,7 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
 
 	this_cpu_write(tracing_preempt_cpu, 1);
 
-	trace_preempt_disable(a0, a1);
+	trace_preempt_disable_rcuidle(a0, a1);
 	tracer_preempt_off(a0, a1);
 }
 #endif

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [PATCH v5 2/2] tracing: Add support for preempt and irq enable/disable events
  2017-09-20 18:23   ` Steven Rostedt
@ 2017-09-20 18:51     ` Joel Fernandes
  0 siblings, 0 replies; 5+ messages in thread
From: Joel Fernandes @ 2017-09-20 18:51 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: LKML, Peter Zilstra, kernel-team, Ingo Molnar

On Wed, Sep 20, 2017 at 11:23 AM, Steven Rostedt <rostedt@goodmis.org> wrote:
> On Tue, 12 Sep 2017 11:11:09 -0700
> Joel Fernandes <joelaf@google.com> wrote:
>
>> Preempt and irq trace events can be used for tracing the start and
>> end of an atomic section which can be used by a trace viewer like
>> systrace to graphically view the start and end of an atomic section and
>> correlate them with latencies and scheduling issues.
>>
>> This also serves as a prelude to using synthetic events or probes to
>> rewrite the preempt and irqsoff tracers, along with numerous benefits of
>> using trace events features for these events.
>>
>> Cc: Steven Rostedt <rostedt@goodmis.org>
>> Cc: Peter Zilstra <peterz@infradead.org>
>> Cc: kernel-team@android.com
>> Signed-off-by: Joel Fernandes <joelaf@google.com>
>> ---
>>  include/linux/ftrace.h            |  3 +-
>>  include/linux/irqflags.h          |  4 +--
>>  include/trace/events/preemptirq.h | 62 +++++++++++++++++++++++++++++++++++++++
>>  kernel/trace/Kconfig              |  9 ++++++
>>  kernel/trace/Makefile             |  1 +
>>  kernel/trace/trace_irqsoff.c      | 57 ++++++++++++++++++++++++++++++++++-
>>  6 files changed, 132 insertions(+), 4 deletions(-)
>>  create mode 100644 include/trace/events/preemptirq.h
>>
>> diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
>> index 6383115e9d2c..5790ba7a85bd 100644
>> --- a/include/linux/ftrace.h
>> +++ b/include/linux/ftrace.h
>> @@ -742,7 +742,8 @@ static inline unsigned long get_lock_parent_ip(void)
>>    static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
>>  #endif
>>
>> -#ifdef CONFIG_PREEMPT_TRACER
>> +#if defined(CONFIG_PREEMPT_TRACER) || \
>> +     (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
>>    extern void trace_preempt_on(unsigned long a0, unsigned long a1);
>>    extern void trace_preempt_off(unsigned long a0, unsigned long a1);
>>  #else
>> diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
>> index 5dd1272d1ab2..6ad6732a4e98 100644
>> --- a/include/linux/irqflags.h
>> +++ b/include/linux/irqflags.h
>> @@ -44,8 +44,8 @@
>>  # define INIT_TRACE_IRQFLAGS
>>  #endif
>>
>> -#if defined(CONFIG_IRQSOFF_TRACER) || \
>> -     defined(CONFIG_PREEMPT_TRACER)
>> +#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER) || \
>> +     defined(CONFIG_PREEMPTIRQ_EVENTS)
>>   extern void stop_critical_timings(void);
>>   extern void start_critical_timings(void);
>>  #else
>> diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
>> new file mode 100644
>> index 000000000000..601935e32cd1
>> --- /dev/null
>> +++ b/include/trace/events/preemptirq.h
>> @@ -0,0 +1,62 @@
>> +#ifdef CONFIG_PREEMPTIRQ_EVENTS
>> +
>> +#undef TRACE_SYSTEM
>> +#define TRACE_SYSTEM preemptirq
>> +
>> +#if !defined(_TRACE_PREEMPTIRQ_H) || defined(TRACE_HEADER_MULTI_READ)
>> +#define _TRACE_PREEMPTIRQ_H
>> +
>> +#include <linux/ktime.h>
>> +#include <linux/tracepoint.h>
>> +#include <linux/string.h>
>> +#include <asm/sections.h>
>> +
>> +DECLARE_EVENT_CLASS(preemptirq_template,
>> +
>> +     TP_PROTO(unsigned long ip, unsigned long parent_ip),
>> +
>> +     TP_ARGS(ip, parent_ip),
>> +
>> +     TP_STRUCT__entry(
>> +             __field(u32, caller_offs)
>> +             __field(u32, parent_offs)
>> +     ),
>> +
>> +     TP_fast_assign(
>> +             __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
>> +             __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
>> +     ),
>> +
>> +     TP_printk("caller=%pF parent=%pF\n",
>
> No '\n', otherwise you add a lot of whitespace to the trace:
>

Sure will fix, thanks.

[..]
>>  EXPORT_SYMBOL_GPL(stop_critical_timings);
>> @@ -792,24 +810,48 @@ EXPORT_SYMBOL_GPL(stop_critical_timings);
>>  #if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PROVE_LOCKING)
>>  void trace_hardirqs_on(void)
>>  {
>> +     if (!this_cpu_read(tracing_irq_cpu))
>> +             return;
>> +
>> +     trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
>>       tracer_hardirqs_on();
>> +
>> +     this_cpu_write(tracing_irq_cpu, 0);
>>  }
>>  EXPORT_SYMBOL(trace_hardirqs_on);
>>
>>  void trace_hardirqs_off(void)
>>  {
>> +     if (this_cpu_read(tracing_irq_cpu))
>> +             return;
>> +
>> +     this_cpu_write(tracing_irq_cpu, 1);
>> +
>> +     trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
>>       tracer_hardirqs_off();
>>  }
>>  EXPORT_SYMBOL(trace_hardirqs_off);
>>
>>  __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
>>  {
>> +     if (!this_cpu_read(tracing_irq_cpu))
>> +             return;
>> +
>> +     trace_irq_enable(CALLER_ADDR0, caller_addr);
>>       tracer_hardirqs_on_caller(caller_addr);
>> +
>> +     this_cpu_write(tracing_irq_cpu, 0);
>>  }
>>  EXPORT_SYMBOL(trace_hardirqs_on_caller);
>>
>>  __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
>>  {
>> +     if (this_cpu_read(tracing_irq_cpu))
>> +             return;
>> +
>> +     this_cpu_write(tracing_irq_cpu, 1);
>> +
>> +     trace_irq_disable(CALLER_ADDR0, caller_addr);
>>       tracer_hardirqs_off_caller(caller_addr);
>>  }
>>  EXPORT_SYMBOL(trace_hardirqs_off_caller);
>> @@ -831,14 +873,27 @@ inline void print_irqtrace_events(struct task_struct *curr)
>>  }
>>  #endif
>>
>> -#ifdef CONFIG_PREEMPT_TRACER
>> +#if defined(CONFIG_PREEMPT_TRACER) || \
>> +     (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
>>  void trace_preempt_on(unsigned long a0, unsigned long a1)
>>  {
>> +     if (!this_cpu_read(tracing_preempt_cpu))
>> +             return;
>> +
>> +     trace_preempt_enable(a0, a1);
>>       tracer_preempt_on(a0, a1);
>> +
>> +     this_cpu_write(tracing_preempt_cpu, 0);
>>  }
>>
>>  void trace_preempt_off(unsigned long a0, unsigned long a1)
>>  {
>> +     if (this_cpu_read(tracing_preempt_cpu))
>> +             return;
>> +
>> +     this_cpu_write(tracing_preempt_cpu, 1);
>> +
>> +     trace_preempt_disable(a0, a1);
>>       tracer_preempt_off(a0, a1);
>>  }
>>  #endif
>
> Need to fold in the following patch as well. Otherwise RCU may have
> issues (run with lockdep enabled and you'll see all the splats).

Ok, will fix, thanks!

>
> -- Steve
>
> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> index 361a17f..7dcb431 100644
> --- a/kernel/trace/trace_irqsoff.c
> +++ b/kernel/trace/trace_irqsoff.c
> @@ -786,10 +786,10 @@ static DEFINE_PER_CPU(int, tracing_preempt_cpu);
>  void start_critical_timings(void)
>  {
>         if (this_cpu_read(tracing_preempt_cpu))
> -               trace_preempt_enable(CALLER_ADDR0, CALLER_ADDR1);
> +               trace_preempt_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
>
>         if (this_cpu_read(tracing_irq_cpu))
> -               trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
> +               trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
>
>         start_critical_timings_tracer();
>  }
> @@ -798,10 +798,10 @@ EXPORT_SYMBOL_GPL(start_critical_timings);
>  void stop_critical_timings(void)
>  {
>         if (this_cpu_read(tracing_preempt_cpu))
> -               trace_preempt_disable(CALLER_ADDR0, CALLER_ADDR1);
> +               trace_preempt_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
>
>         if (this_cpu_read(tracing_irq_cpu))
> -               trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
> +               trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
>
>         stop_critical_timings_tracer();
>  }
> @@ -813,7 +813,7 @@ void trace_hardirqs_on(void)
>         if (!this_cpu_read(tracing_irq_cpu))
>                 return;
>
> -       trace_irq_enable(CALLER_ADDR0, CALLER_ADDR1);
> +       trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
>         tracer_hardirqs_on();
>
>         this_cpu_write(tracing_irq_cpu, 0);
> @@ -827,7 +827,7 @@ void trace_hardirqs_off(void)
>
>         this_cpu_write(tracing_irq_cpu, 1);
>
> -       trace_irq_disable(CALLER_ADDR0, CALLER_ADDR1);
> +       trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1);
>         tracer_hardirqs_off();
>  }
>  EXPORT_SYMBOL(trace_hardirqs_off);
> @@ -837,7 +837,7 @@ __visible void trace_hardirqs_on_caller(unsigned long caller_addr)
>         if (!this_cpu_read(tracing_irq_cpu))
>                 return;
>
> -       trace_irq_enable(CALLER_ADDR0, caller_addr);
> +       trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr);
>         tracer_hardirqs_on_caller(caller_addr);
>
>         this_cpu_write(tracing_irq_cpu, 0);
> @@ -851,9 +851,9 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
>
>         this_cpu_write(tracing_irq_cpu, 1);
>
> -       trace_irq_disable(CALLER_ADDR0, caller_addr);
> -       tracer_hardirqs_off_caller(caller_addr);
> -}
> +       trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
> +       tracer_hardirqs_off_caller(caller_addr);}
> +
>  EXPORT_SYMBOL(trace_hardirqs_off_caller);
>
>  /*
> @@ -880,7 +880,7 @@ void trace_preempt_on(unsigned long a0, unsigned long a1)
>         if (!this_cpu_read(tracing_preempt_cpu))
>                 return;
>
> -       trace_preempt_enable(a0, a1);
> +       trace_preempt_enable_rcuidle(a0, a1);
>         tracer_preempt_on(a0, a1);
>
>         this_cpu_write(tracing_preempt_cpu, 0);
> @@ -893,7 +893,7 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
>
>         this_cpu_write(tracing_preempt_cpu, 1);
>
> -       trace_preempt_disable(a0, a1);
> +       trace_preempt_disable_rcuidle(a0, a1);
>         tracer_preempt_off(a0, a1);
>  }
>  #endif
>
> --
> You received this message because you are subscribed to the Google Groups "kernel-team" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to kernel-team+unsubscribe@android.com.
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2017-09-20 18:51 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-09-12 18:11 [PATCH v5 0/2] tracing: Add trace events for preemption and irq disable/enable Joel Fernandes
2017-09-12 18:11 ` [PATCH v5 1/2] tracing: Prepare to add preempt and irq trace events Joel Fernandes
2017-09-12 18:11 ` [PATCH v5 2/2] tracing: Add support for preempt and irq enable/disable events Joel Fernandes
2017-09-20 18:23   ` Steven Rostedt
2017-09-20 18:51     ` Joel Fernandes

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.