All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers
@ 2010-03-10  7:51 Jiri Olsa
  2010-03-10  7:51 ` [PATCHv2 1/4] tracing: adding ftrace events for graph tracer Jiri Olsa
                   ` (4 more replies)
  0 siblings, 5 replies; 9+ messages in thread
From: Jiri Olsa @ 2010-03-10  7:51 UTC (permalink / raw)
  To: rostedt; +Cc: linux-kernel

hi,

I'm sending reworked version of the graph output support for
preemptirqsoff/preemptoff/irqsoff tracers.

I made the graph output as an output event, so it could be shared
within tracers - patch 1/4.

I also added raw trace output for graph tracer. I have this one around
for long time and it was quite handy for investigating graph tracer issues.
(patch 4/4)

attached patches:
- 1/4 adding ftrace events for graph tracer
- 2/4 graph output support for irqsoff tracer
- 3/4 graph output support for preemptirqsoff/preemptoff tracers
- 4/4 raw output for graph tracer

v2 changes:
- keeping the current function graph output format


wbr,
jirka
---
 kernel/trace/trace.c                 |    2 +-
 kernel/trace/trace.h                 |   15 ++--
 kernel/trace/trace_functions_graph.c |  124 +++++++++++++++++++++----
 kernel/trace/trace_irqsoff.c         |  174 ++++++++++++++++++++++++++++++++--
 4 files changed, 283 insertions(+), 32 deletions(-)

^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCHv2 1/4] tracing: adding ftrace events for graph tracer
  2010-03-10  7:51 [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
@ 2010-03-10  7:51 ` Jiri Olsa
  2010-03-10  7:51 ` [PATCHv2 2/4] tracing: graph output support for irqsoff tracer Jiri Olsa
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 9+ messages in thread
From: Jiri Olsa @ 2010-03-10  7:51 UTC (permalink / raw)
  To: rostedt; +Cc: linux-kernel, Jiri Olsa

hi,

this patch adds ftrace events for graph tracer, so the graph output
could be shared within other tracers.

wbr,
jirka


Signed-off-by: Jiri Olsa <jolsa@redhat.com>
---
 kernel/trace/trace.h                 |    7 -------
 kernel/trace/trace_functions_graph.c |   30 ++++++++++++++++++++++++++++--
 2 files changed, 28 insertions(+), 9 deletions(-)

diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index fd05bca..9992c29 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -490,7 +490,6 @@ extern int trace_clock_id;
 
 /* Standard output formatting function used for function return traces */
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
-extern enum print_line_t print_graph_function(struct trace_iterator *iter);
 extern enum print_line_t
 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
 
@@ -521,12 +520,6 @@ static inline int ftrace_graph_addr(unsigned long addr)
 	return 1;
 }
 #endif /* CONFIG_DYNAMIC_FTRACE */
-#else /* CONFIG_FUNCTION_GRAPH_TRACER */
-static inline enum print_line_t
-print_graph_function(struct trace_iterator *iter)
-{
-	return TRACE_TYPE_UNHANDLED;
-}
 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
 
 extern struct list_head ftrace_pids;
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 3fc2a57..b0e6384 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -39,7 +39,7 @@ struct fgraph_data {
 #define TRACE_GRAPH_PRINT_OVERHEAD	0x4
 #define TRACE_GRAPH_PRINT_PROC		0x8
 #define TRACE_GRAPH_PRINT_DURATION	0x10
-#define TRACE_GRAPH_PRINT_ABS_TIME	0X20
+#define TRACE_GRAPH_PRINT_ABS_TIME	0x20
 
 static struct tracer_opt trace_opts[] = {
 	/* Display overruns? (for self-debug purpose) */
@@ -1017,7 +1017,7 @@ print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
 }
 
 
-enum print_line_t
+static enum print_line_t
 print_graph_function(struct trace_iterator *iter)
 {
 	struct ftrace_graph_ent_entry *field;
@@ -1073,6 +1073,12 @@ print_graph_function(struct trace_iterator *iter)
 	return TRACE_TYPE_HANDLED;
 }
 
+static enum print_line_t
+print_graph_function_event(struct trace_iterator *iter, int flags)
+{
+	return print_graph_function(iter);
+}
+
 static void print_lat_header(struct seq_file *s)
 {
 	static const char spaces[] = "                "	/* 16 spaces */
@@ -1176,6 +1182,16 @@ static void graph_trace_close(struct trace_iterator *iter)
 	}
 }
 
+static struct trace_event graph_trace_entry_event = {
+	.type		= TRACE_GRAPH_ENT,
+	.trace		= print_graph_function_event,
+};
+
+static struct trace_event graph_trace_ret_event = {
+	.type		= TRACE_GRAPH_RET,
+	.trace		= print_graph_function_event,
+};
+
 static struct tracer graph_trace __read_mostly = {
 	.name		= "function_graph",
 	.open		= graph_trace_open,
@@ -1197,6 +1213,16 @@ static __init int init_graph_trace(void)
 {
 	max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
 
+	if (!register_ftrace_event(&graph_trace_entry_event)) {
+		pr_warning("Warning: could not register graph trace events\n");
+		return 1;
+	}
+
+	if (!register_ftrace_event(&graph_trace_ret_event)) {
+		pr_warning("Warning: could not register graph trace events\n");
+		return 1;
+	}
+
 	return register_tracer(&graph_trace);
 }
 
-- 
1.6.6


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCHv2 2/4] tracing: graph output support for irqsoff tracer
  2010-03-10  7:51 [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
  2010-03-10  7:51 ` [PATCHv2 1/4] tracing: adding ftrace events for graph tracer Jiri Olsa
@ 2010-03-10  7:51 ` Jiri Olsa
  2010-03-24  1:19   ` Steven Rostedt
  2010-03-10  7:51 ` [PATCHv2 3/4] tracing: graph output support for preemptirqsoff/preemptoff tracers Jiri Olsa
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 9+ messages in thread
From: Jiri Olsa @ 2010-03-10  7:51 UTC (permalink / raw)
  To: rostedt; +Cc: linux-kernel, Jiri Olsa

hi,

when I was using irqsoff tracer, I thought it could be any use
to see the disabled IRQs flow output same way as function graph output.

The graph output is enabled by setting new 'display-graph' trace option
of irqsoff tracer.

It looks like this:

[root@dell-pe1950-05 tracing]# echo irqsoff-graph > ./trace_options 
[root@dell-pe1950-05 tracing]# cat ./trace
# tracer: irqsoff
#
#      _-----=> irqs-off        
#     / _----=> need-resched    
#    | / _---=> hardirq/softirq 
#    || / _--=> preempt-depth   
#    ||| / _-=> lock-depth      
#    |||| /                     
# CPU|||||  DURATION                  FUNCTION CALLS
# |  |||||   |   |                     |   |   |   |
hald-add-1048    3d...0    0us : _raw_spin_lock_irqsave <-scsi_dispatch_cmd
 3)  d...0              |      ata_scsi_queuecmd() {
 3)  d...0  0.318 us    |        _raw_spin_lock();
 3)  d...0              |        ata_scsi_find_dev() {
 3)  d...0              |          __ata_scsi_find_dev() {
 3)  d...0  0.273 us    |            ata_find_dev();
 3)  d...0  0.798 us    |          }
 3)  d...0  1.419 us    |        }
 3)  d...0              |        __ata_scsi_queuecmd() {
 3)  d...0              |          ata_qc_new_init() {
 3)  d...0              |            ata_qc_reinit() {
 3)  d...0  0.258 us    |              ata_tf_init();
 3)  d...0  0.784 us    |            }
 3)  d...0  1.441 us    |          }
 3)  d...0  0.348 us    |          atapi_xlat();
 3)  d...0              |          ata_qc_issue() {
...

SNIP

...
 3)  d...0+ 31.244 us   |                }
 3)  d...0+ 32.393 us   |              }
 3)  d...0+ 99.146 us   |            }
 3)  d...0! 100.455 us  |          }
 3)  d...0! 103.669 us  |        }
 3)  d...0  0.267 us    |        _raw_spin_lock();
 3)  d...0! 107.264 us  |      }
 3)  d...0              |      spin_unlock_irqrestore() {
 3)  d...0              |        _raw_spin_unlock_irqrestore() {
hald-add-1048    3d...0  109us : _raw_spin_unlock_irqrestore <-return_to_handler
hald-add-1048    3d...0  109us : trace_hardirqs_on <-return_to_handler
hald-add-1048    3d...0  109us : <stack trace>
 => trace_hardirqs_on
 => _raw_spin_unlock_irqrestore
 => return_to_handler
 => scsi_dispatch_cmd
 => return_to_handler
 => __generic_unplug_device
 => scsi_request_fn
 => return_to_handler
[root@dell-pe1950-05 tracing]#

wbr,
jirka


Signed-off-by: Jiri Olsa <jolsa@redhat.com>
---
 kernel/trace/trace.h                 |    8 ++
 kernel/trace/trace_functions_graph.c |   25 ++----
 kernel/trace/trace_irqsoff.c         |  162 ++++++++++++++++++++++++++++++++--
 3 files changed, 172 insertions(+), 23 deletions(-)

diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 9992c29..735ec7e 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -492,6 +492,14 @@ extern int trace_clock_id;
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 extern enum print_line_t
 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
+void graph_trace_open(struct trace_iterator *iter);
+void graph_trace_close(struct trace_iterator *iter);
+int __trace_graph_entry(struct trace_array *tr, struct ftrace_graph_ent *trace,
+			unsigned long flags, int pc);
+void __trace_graph_return(struct trace_array *tr,
+			struct ftrace_graph_ret *trace,
+			unsigned long flags, int pc);
+
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 /* TODO: make this variable */
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index b0e6384..78e09f1 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -178,7 +178,7 @@ unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
 	return ret;
 }
 
-static int __trace_graph_entry(struct trace_array *tr,
+int __trace_graph_entry(struct trace_array *tr,
 				struct ftrace_graph_ent *trace,
 				unsigned long flags,
 				int pc)
@@ -237,7 +237,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
 	return ret;
 }
 
-static void __trace_graph_return(struct trace_array *tr,
+void __trace_graph_return(struct trace_array *tr,
 				struct ftrace_graph_ret *trace,
 				unsigned long flags,
 				int pc)
@@ -944,9 +944,7 @@ static enum print_line_t
 print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
 		    struct trace_iterator *iter)
 {
-	unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
 	struct fgraph_data *data = iter->private;
-	struct trace_event *event;
 	int depth = 0;
 	int ret;
 	int i;
@@ -993,14 +991,6 @@ print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
 		if (ret != TRACE_TYPE_HANDLED)
 			return ret;
 		break;
-	default:
-		event = ftrace_find_event(ent->type);
-		if (!event)
-			return TRACE_TYPE_UNHANDLED;
-
-		ret = event->trace(iter, sym_flags);
-		if (ret != TRACE_TYPE_HANDLED)
-			return ret;
 	}
 
 	/* Strip ending newline */
@@ -1066,8 +1056,11 @@ print_graph_function(struct trace_iterator *iter)
 		trace_assign_type(field, entry);
 		return print_graph_return(&field->ret, s, entry, iter);
 	}
-	default:
+	case TRACE_BPRINT:
+	case TRACE_PRINT:
 		return print_graph_comment(s, entry, iter);
+	default:
+		return TRACE_TYPE_UNHANDLED;
 	}
 
 	return TRACE_TYPE_HANDLED;
@@ -1101,7 +1094,7 @@ static void print_lat_header(struct seq_file *s)
 	seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
 }
 
-static void print_graph_headers(struct seq_file *s)
+void print_graph_headers(struct seq_file *s)
 {
 	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
 
@@ -1137,7 +1130,7 @@ static void print_graph_headers(struct seq_file *s)
 	seq_printf(s, "               |   |   |   |\n");
 }
 
-static void graph_trace_open(struct trace_iterator *iter)
+void graph_trace_open(struct trace_iterator *iter)
 {
 	/* pid and depth on the last trace processed */
 	struct fgraph_data *data;
@@ -1172,7 +1165,7 @@ static void graph_trace_open(struct trace_iterator *iter)
 	pr_warning("function graph tracer: not enough memory\n");
 }
 
-static void graph_trace_close(struct trace_iterator *iter)
+void graph_trace_close(struct trace_iterator *iter)
 {
 	struct fgraph_data *data = iter->private;
 
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 2974bc7..d0c6d6c 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -34,6 +34,9 @@ static int trace_type __read_mostly;
 
 static int save_lat_flag;
 
+static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
+static int start_irqsoff_tracer(struct trace_array *tr, int graph);
+
 #ifdef CONFIG_PREEMPT_TRACER
 static inline int
 preempt_trace(void)
@@ -55,6 +58,23 @@ irq_trace(void)
 # define irq_trace() (0)
 #endif
 
+#define TRACE_DISPLAY_GRAPH	1
+
+static struct tracer_opt trace_opts[] = {
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	/* display latency trace as call graph */
+	{ TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
+#endif
+	{ } /* Empty entry */
+};
+
+static struct tracer_flags tracer_flags = {
+	.val = 0,
+	.opts = trace_opts,
+};
+
+#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
+
 /*
  * Sequence count - we record it when starting a measurement and
  * skip the latency if the sequence has changed - some other section
@@ -108,6 +128,109 @@ static struct ftrace_ops trace_ops __read_mostly =
 };
 #endif /* CONFIG_FUNCTION_TRACER */
 
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+{
+	int cpu;
+
+	if (!(bit & TRACE_DISPLAY_GRAPH))
+		return -EINVAL;
+
+	if (!(is_graph() ^ set))
+		return 0;
+
+	stop_irqsoff_tracer(irqsoff_trace, !set);
+
+	for_each_possible_cpu(cpu)
+		per_cpu(tracing_cpu, cpu) = 0;
+
+	tracing_max_latency = 0;
+	tracing_reset_online_cpus(irqsoff_trace);
+
+	return start_irqsoff_tracer(irqsoff_trace, set);
+}
+
+static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
+{
+	struct trace_array *tr = irqsoff_trace;
+	struct trace_array_cpu *data;
+	unsigned long flags;
+	long disabled;
+	int ret;
+	int cpu;
+	int pc;
+
+	cpu = raw_smp_processor_id();
+	if (likely(!per_cpu(tracing_cpu, cpu)))
+		return 0;
+
+	local_save_flags(flags);
+	/* slight chance to get a false positive on tracing_cpu */
+	if (!irqs_disabled_flags(flags))
+		return 0;
+
+	data = tr->data[cpu];
+	disabled = atomic_inc_return(&data->disabled);
+
+	if (likely(disabled == 1)) {
+		pc = preempt_count();
+		ret = __trace_graph_entry(tr, trace, flags, pc);
+	} else
+		ret = 0;
+
+	atomic_dec(&data->disabled);
+	return ret;
+}
+
+static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
+{
+	struct trace_array *tr = irqsoff_trace;
+	struct trace_array_cpu *data;
+	unsigned long flags;
+	long disabled;
+	int cpu;
+	int pc;
+
+	cpu = raw_smp_processor_id();
+	if (likely(!per_cpu(tracing_cpu, cpu)))
+		return;
+
+	local_save_flags(flags);
+	/* slight chance to get a false positive on tracing_cpu */
+	if (!irqs_disabled_flags(flags))
+		return;
+
+	data = tr->data[cpu];
+	disabled = atomic_inc_return(&data->disabled);
+
+	if (likely(disabled == 1)) {
+		pc = preempt_count();
+		__trace_graph_return(tr, trace, flags, pc);
+	}
+
+	atomic_dec(&data->disabled);
+}
+
+static void irqsoff_trace_open(struct trace_iterator *iter)
+{
+	if (is_graph())
+		graph_trace_open(iter);
+
+}
+
+static void irqsoff_trace_close(struct trace_iterator *iter)
+{
+	if (iter->private)
+		graph_trace_close(iter);
+}
+
+#else
+static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
+{
+	return -EINVAL;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
 /*
  * Should this new latency be reported/recorded?
  */
@@ -347,19 +470,36 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
 }
 #endif /* CONFIG_PREEMPT_TRACER */
 
-static void start_irqsoff_tracer(struct trace_array *tr)
+static int start_irqsoff_tracer(struct trace_array *tr, int graph)
 {
-	register_ftrace_function(&trace_ops);
-	if (tracing_is_enabled())
+	int ret = 0;
+
+	if (!graph)
+		ret = register_ftrace_function(&trace_ops);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	else
+		ret = register_ftrace_graph(&irqsoff_graph_return,
+					    &irqsoff_graph_entry);
+#endif
+
+	if (!ret && tracing_is_enabled())
 		tracer_enabled = 1;
 	else
 		tracer_enabled = 0;
+
+	return ret;
 }
 
-static void stop_irqsoff_tracer(struct trace_array *tr)
+static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
 {
 	tracer_enabled = 0;
-	unregister_ftrace_function(&trace_ops);
+
+	if (!graph)
+		unregister_ftrace_function(&trace_ops);
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	else
+		unregister_ftrace_graph();
+#endif
 }
 
 static void __irqsoff_tracer_init(struct trace_array *tr)
@@ -372,12 +512,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
 	/* make sure that the tracer is visible */
 	smp_wmb();
 	tracing_reset_online_cpus(tr);
-	start_irqsoff_tracer(tr);
+
+	if (start_irqsoff_tracer(tr, is_graph()))
+		printk(KERN_ERR "failed to start irqsoff tracer\n");
 }
 
 static void irqsoff_tracer_reset(struct trace_array *tr)
 {
-	stop_irqsoff_tracer(tr);
+	stop_irqsoff_tracer(tr, is_graph());
 
 	if (!save_lat_flag)
 		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
@@ -409,9 +551,15 @@ static struct tracer irqsoff_tracer __read_mostly =
 	.start		= irqsoff_tracer_start,
 	.stop		= irqsoff_tracer_stop,
 	.print_max	= 1,
+	.flags		= &tracer_flags,
+	.set_flag	= irqsoff_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
 	.selftest    = trace_selftest_startup_irqsoff,
 #endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	.open           = irqsoff_trace_open,
+	.close          = irqsoff_trace_close,
+#endif
 };
 # define register_irqsoff(trace) register_tracer(&trace)
 #else
-- 
1.6.6


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCHv2 3/4] tracing: graph output support for preemptirqsoff/preemptoff tracers
  2010-03-10  7:51 [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
  2010-03-10  7:51 ` [PATCHv2 1/4] tracing: adding ftrace events for graph tracer Jiri Olsa
  2010-03-10  7:51 ` [PATCHv2 2/4] tracing: graph output support for irqsoff tracer Jiri Olsa
@ 2010-03-10  7:51 ` Jiri Olsa
  2010-03-10  7:51 ` [PATCHv2 4/4] tracing: raw output for graph tracer Jiri Olsa
  2010-03-22  8:48 ` [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
  4 siblings, 0 replies; 9+ messages in thread
From: Jiri Olsa @ 2010-03-10  7:51 UTC (permalink / raw)
  To: rostedt; +Cc: linux-kernel, Jiri Olsa

hi,

adding support for graph output for preemptirqsoff/preemptoff tracers.

wbr,
jirka


Signed-off-by: Jiri Olsa <jolsa@redhat.com>
---
 kernel/trace/trace_irqsoff.c |   12 ++++++++++++
 1 files changed, 12 insertions(+), 0 deletions(-)

diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d0c6d6c..e8b4acf 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -583,9 +583,15 @@ static struct tracer preemptoff_tracer __read_mostly =
 	.start		= irqsoff_tracer_start,
 	.stop		= irqsoff_tracer_stop,
 	.print_max	= 1,
+	.flags		= &tracer_flags,
+	.set_flag	= irqsoff_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
 	.selftest    = trace_selftest_startup_preemptoff,
 #endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	.open		= irqsoff_trace_open,
+	.close		= irqsoff_trace_close,
+#endif
 };
 # define register_preemptoff(trace) register_tracer(&trace)
 #else
@@ -611,9 +617,15 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
 	.start		= irqsoff_tracer_start,
 	.stop		= irqsoff_tracer_stop,
 	.print_max	= 1,
+	.flags		= &tracer_flags,
+	.set_flag	= irqsoff_set_flag,
 #ifdef CONFIG_FTRACE_SELFTEST
 	.selftest    = trace_selftest_startup_preemptirqsoff,
 #endif
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+	.open		= irqsoff_trace_open,
+	.close		= irqsoff_trace_close,
+#endif
 };
 
 # define register_preemptirqsoff(trace) register_tracer(&trace)
-- 
1.6.6


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCHv2 4/4] tracing: raw output for graph tracer
  2010-03-10  7:51 [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
                   ` (2 preceding siblings ...)
  2010-03-10  7:51 ` [PATCHv2 3/4] tracing: graph output support for preemptirqsoff/preemptoff tracers Jiri Olsa
@ 2010-03-10  7:51 ` Jiri Olsa
  2010-03-22  8:48 ` [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
  4 siblings, 0 replies; 9+ messages in thread
From: Jiri Olsa @ 2010-03-10  7:51 UTC (permalink / raw)
  To: rostedt; +Cc: linux-kernel, Jiri Olsa

hi,

this patch adds raw trace output for graph tracer.

I have this one around for long time and it was quite handy
for investigating graph tracer issues.

wbr,
jirka


Signed-off-by: Jiri Olsa <jolsa@redhat.com>
---
 kernel/trace/trace.c                 |    2 +-
 kernel/trace/trace_functions_graph.c |   71 ++++++++++++++++++++++++++++++++++
 2 files changed, 72 insertions(+), 1 deletions(-)

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index f771ac1..6ce6d52 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1901,7 +1901,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
 	entry = iter->ent;
 
 	if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
-		if (!trace_seq_printf(s, "%d %d %llu ",
+		if (!trace_seq_printf(s, " %6d %3d %20llu ",
 				      entry->pid, iter->cpu, iter->ts))
 			goto partial;
 	}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 78e09f1..de76226 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -1017,6 +1017,9 @@ print_graph_function(struct trace_iterator *iter)
 	int cpu = iter->cpu;
 	int ret;
 
+	if (trace_flags & TRACE_ITER_RAW)
+		return TRACE_TYPE_UNHANDLED;
+
 	if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
 		per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
 		return TRACE_TYPE_HANDLED;
@@ -1072,6 +1075,66 @@ print_graph_function_event(struct trace_iterator *iter, int flags)
 	return print_graph_function(iter);
 }
 
+static enum print_line_t
+print_graph_raw(struct trace_iterator *iter, int flags)
+{
+	struct trace_entry *entry = iter->ent;
+	struct trace_seq *s = &iter->seq;
+	int ret = 0;
+	int depth;
+	void *func;
+	char *io;
+
+	switch (entry->type) {
+	case TRACE_GRAPH_ENT: {
+		struct ftrace_graph_ent_entry *field;
+		trace_assign_type(field, entry);
+		depth = field->graph_ent.depth;
+		func = (void *) field->graph_ent.func;
+		io = "-->";
+		break;
+	}
+	case TRACE_GRAPH_RET: {
+		struct ftrace_graph_ret_entry *field;
+		trace_assign_type(field, entry);
+		depth = field->ret.depth;
+		func = (void *) field->ret.func;
+		io = "<--";
+		break;
+	}
+	default:
+		return print_graph_comment(s, entry, iter);
+	}
+
+	ret = trace_seq_printf(s, "%s%6d %6d %6d %4d %7d %20p %4s %ps\n",
+		trace_flags & TRACE_ITER_CONTEXT_INFO ? "" : " ",
+		depth,
+		entry->lock_depth,
+		entry->flags,
+		entry->preempt_count,
+		entry->pid,
+		entry,
+		io,
+		func);
+
+	if (!ret)
+		return TRACE_TYPE_PARTIAL_LINE;
+
+	return TRACE_TYPE_HANDLED;
+}
+
+static void print_graph_raw_header(struct seq_file *s)
+{
+	if (trace_flags & TRACE_ITER_CONTEXT_INFO)
+		seq_printf(s, "#%6s %3s %20s ", "pid", "cpu", "duration");
+	else
+		seq_printf(s, "#");
+
+	seq_printf(s, "%6s %6s %6s %4s %7s %20s %4s %s\n",
+		"depth", "ldepth", "flags", "pc", "pid",
+		"entry", "i/o", "function");
+}
+
 static void print_lat_header(struct seq_file *s)
 {
 	static const char spaces[] = "                "	/* 16 spaces */
@@ -1098,6 +1161,11 @@ void print_graph_headers(struct seq_file *s)
 {
 	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
 
+	if (trace_flags & TRACE_ITER_RAW) {
+		print_graph_raw_header(s);
+		return;
+	}
+
 	if (lat)
 		print_lat_header(s);
 
@@ -1113,6 +1181,7 @@ void print_graph_headers(struct seq_file *s)
 		seq_printf(s, "|||||");
 	if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)
 		seq_printf(s, "  DURATION   ");
+
 	seq_printf(s, "               FUNCTION CALLS\n");
 
 	/* 2nd line */
@@ -1178,11 +1247,13 @@ void graph_trace_close(struct trace_iterator *iter)
 static struct trace_event graph_trace_entry_event = {
 	.type		= TRACE_GRAPH_ENT,
 	.trace		= print_graph_function_event,
+	.raw		= print_graph_raw,
 };
 
 static struct trace_event graph_trace_ret_event = {
 	.type		= TRACE_GRAPH_RET,
 	.trace		= print_graph_function_event,
+	.raw		= print_graph_raw,
 };
 
 static struct tracer graph_trace __read_mostly = {
-- 
1.6.6


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers
  2010-03-10  7:51 [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
                   ` (3 preceding siblings ...)
  2010-03-10  7:51 ` [PATCHv2 4/4] tracing: raw output for graph tracer Jiri Olsa
@ 2010-03-22  8:48 ` Jiri Olsa
  2010-03-22 13:10   ` Steven Rostedt
  4 siblings, 1 reply; 9+ messages in thread
From: Jiri Olsa @ 2010-03-22  8:48 UTC (permalink / raw)
  To: rostedt; +Cc: linux-kernel

On Wed, Mar 10, 2010 at 08:51:06AM +0100, Jiri Olsa wrote:
> hi,
> 
> I'm sending reworked version of the graph output support for
> preemptirqsoff/preemptoff/irqsoff tracers.
> 
> I made the graph output as an output event, so it could be shared
> within tracers - patch 1/4.
> 
> I also added raw trace output for graph tracer. I have this one around
> for long time and it was quite handy for investigating graph tracer issues.
> (patch 4/4)
> 
> attached patches:
> - 1/4 adding ftrace events for graph tracer
> - 2/4 graph output support for irqsoff tracer
> - 3/4 graph output support for preemptirqsoff/preemptoff tracers
> - 4/4 raw output for graph tracer
> 
> v2 changes:
> - keeping the current function graph output format
> 
> 
> wbr,
> jirka
> ---
>  kernel/trace/trace.c                 |    2 +-
>  kernel/trace/trace.h                 |   15 ++--
>  kernel/trace/trace_functions_graph.c |  124 +++++++++++++++++++++----
>  kernel/trace/trace_irqsoff.c         |  174 ++++++++++++++++++++++++++++++++--
>  4 files changed, 283 insertions(+), 32 deletions(-)
> --
> To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/


hi,

any feedback for the new version?

thanks,
jirka

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers
  2010-03-22  8:48 ` [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
@ 2010-03-22 13:10   ` Steven Rostedt
  0 siblings, 0 replies; 9+ messages in thread
From: Steven Rostedt @ 2010-03-22 13:10 UTC (permalink / raw)
  To: Jiri Olsa; +Cc: linux-kernel

On Mon, 2010-03-22 at 09:48 +0100, Jiri Olsa wrote:
> On Wed, Mar 10, 2010 at 08:51:06AM +0100, Jiri Olsa wrote:

> 
> hi,
> 
> any feedback for the new version?

Sorry, I got backed up on other things. This was on my todo list, I'll
see if I can take a look at it today.

-- Steve

> 
> thanks,
> jirka



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCHv2 2/4] tracing: graph output support for irqsoff tracer
  2010-03-10  7:51 ` [PATCHv2 2/4] tracing: graph output support for irqsoff tracer Jiri Olsa
@ 2010-03-24  1:19   ` Steven Rostedt
  2010-03-24  7:16     ` Jiri Olsa
  0 siblings, 1 reply; 9+ messages in thread
From: Steven Rostedt @ 2010-03-24  1:19 UTC (permalink / raw)
  To: Jiri Olsa; +Cc: linux-kernel

On Wed, 2010-03-10 at 08:51 +0100, Jiri Olsa wrote:

> @@ -993,14 +991,6 @@ print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
>  		if (ret != TRACE_TYPE_HANDLED)
>  			return ret;
>  		break;
> -	default:
> -		event = ftrace_find_event(ent->type);
> -		if (!event)
> -			return TRACE_TYPE_UNHANDLED;
> -
> -		ret = event->trace(iter, sym_flags);
> -		if (ret != TRACE_TYPE_HANDLED)
> -			return ret;
>  	}
>  

This has the same effect as the problem with the first patch:

was:

 2)               |                                  _raw_spin_unlock_irqrestore() {
 2)               |                                  /* lock_release: ffffffff816969a8 random_read_wait.lock */
 2)   6.022 us    |                                  }
 2) + 27.632 us   |                                }
 2)   2.939 us    |                                kill_fasync();
 2)               |                                _raw_spin_unlock_irqrestore() {
 2)               |                                /* lock_release: ffffffff816967f0 &input_pool.lock */
 2)   5.471 us    |                                }
 2) + 56.740 us   |                              }
 2) + 88.357 us   |                            }
 2) + 94.026 us   |                          }


is now:

 2)               |                                  _raw_spin_unlock_irqrestore() {
          <idle>-0     [002]   411.798721: lock_release: ffffffff816969a8 random_read_wait.lock
 2)   6.022 us    |                                  }
 2) + 27.632 us   |                                }
 2)   2.939 us    |                                kill_fasync();
 2)               |                                _raw_spin_unlock_irqrestore() {
          <idle>-0     [002]   411.798741: lock_release: ffffffff816967f0 &input_pool.lock
 2)   5.471 us    |                                }
 2) + 56.740 us   |                              }
 2) + 88.357 us   |                            }
 2) + 94.026 us   |                          }


Please, before you apply any patch do the following:

(assuming debugfs is at /debug)

# echo 1 > /debug/tracing/events/enable
# echo function_graph > /debug/tracing/current_tracer
# cat /debug/tracing/trace

Examine what you see.

Then apply all your patches, and repeat the above. Make sure the
formatting is the same.

Thanks,

-- Steve


>  	/* Strip ending newline */
> @@ -1066,8 +1056,11 @@ print_graph_function(struct trace_iterator *iter)
>  		trace_assign_type(field, entry);
>  		return print_graph_return(&field->ret, s, entry, iter);
>  	}
> -	default:
> +	case TRACE_BPRINT:
> +	case TRACE_PRINT:
>  		return print_graph_comment(s, entry, iter);
> +	default:
> +		return TRACE_TYPE_UNHANDLED;
>  	}
>  
>  	return TRACE_TYPE_HANDLED;
> @@ -1101,7 +1094,7 @@ static void print_lat_header(struct seq_file *s)
>  	seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
>  }
>  
> -static void print_graph_headers(struct seq_file *s)
> +void print_graph_headers(struct seq_file *s)
>  {
>  	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
>  
> @@ -1137,7 +1130,7 @@ static void print_graph_headers(struct seq_file *s)
>  	seq_printf(s, "               |   |   |   |\n");
>  }
>  
> -static void graph_trace_open(struct trace_iterator *iter)
> +void graph_trace_open(struct trace_iterator *iter)
>  {
>  	/* pid and depth on the last trace processed */
>  	struct fgraph_data *data;
> @@ -1172,7 +1165,7 @@ static void graph_trace_open(struct trace_iterator *iter)
>  	pr_warning("function graph tracer: not enough memory\n");
>  }
>  
> -static void graph_trace_close(struct trace_iterator *iter)
> +void graph_trace_close(struct trace_iterator *iter)
>  {
>  	struct fgraph_data *data = iter->private;
>  
> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> index 2974bc7..d0c6d6c 100644
> --- a/kernel/trace/trace_irqsoff.c
> +++ b/kernel/trace/trace_irqsoff.c
> @@ -34,6 +34,9 @@ static int trace_type __read_mostly;
>  
>  static int save_lat_flag;
>  
> +static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
> +static int start_irqsoff_tracer(struct trace_array *tr, int graph);
> +
>  #ifdef CONFIG_PREEMPT_TRACER
>  static inline int
>  preempt_trace(void)
> @@ -55,6 +58,23 @@ irq_trace(void)
>  # define irq_trace() (0)
>  #endif
>  
> +#define TRACE_DISPLAY_GRAPH	1
> +
> +static struct tracer_opt trace_opts[] = {
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +	/* display latency trace as call graph */
> +	{ TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
> +#endif
> +	{ } /* Empty entry */
> +};
> +
> +static struct tracer_flags tracer_flags = {
> +	.val = 0,
> +	.opts = trace_opts,
> +};
> +
> +#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
> +
>  /*
>   * Sequence count - we record it when starting a measurement and
>   * skip the latency if the sequence has changed - some other section
> @@ -108,6 +128,109 @@ static struct ftrace_ops trace_ops __read_mostly =
>  };
>  #endif /* CONFIG_FUNCTION_TRACER */
>  
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
> +{
> +	int cpu;
> +
> +	if (!(bit & TRACE_DISPLAY_GRAPH))
> +		return -EINVAL;
> +
> +	if (!(is_graph() ^ set))
> +		return 0;
> +
> +	stop_irqsoff_tracer(irqsoff_trace, !set);
> +
> +	for_each_possible_cpu(cpu)
> +		per_cpu(tracing_cpu, cpu) = 0;
> +
> +	tracing_max_latency = 0;
> +	tracing_reset_online_cpus(irqsoff_trace);
> +
> +	return start_irqsoff_tracer(irqsoff_trace, set);
> +}
> +
> +static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
> +{
> +	struct trace_array *tr = irqsoff_trace;
> +	struct trace_array_cpu *data;
> +	unsigned long flags;
> +	long disabled;
> +	int ret;
> +	int cpu;
> +	int pc;
> +
> +	cpu = raw_smp_processor_id();
> +	if (likely(!per_cpu(tracing_cpu, cpu)))
> +		return 0;
> +
> +	local_save_flags(flags);
> +	/* slight chance to get a false positive on tracing_cpu */
> +	if (!irqs_disabled_flags(flags))
> +		return 0;
> +
> +	data = tr->data[cpu];
> +	disabled = atomic_inc_return(&data->disabled);
> +
> +	if (likely(disabled == 1)) {
> +		pc = preempt_count();
> +		ret = __trace_graph_entry(tr, trace, flags, pc);
> +	} else
> +		ret = 0;
> +
> +	atomic_dec(&data->disabled);
> +	return ret;
> +}
> +
> +static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
> +{
> +	struct trace_array *tr = irqsoff_trace;
> +	struct trace_array_cpu *data;
> +	unsigned long flags;
> +	long disabled;
> +	int cpu;
> +	int pc;
> +
> +	cpu = raw_smp_processor_id();
> +	if (likely(!per_cpu(tracing_cpu, cpu)))
> +		return;
> +
> +	local_save_flags(flags);
> +	/* slight chance to get a false positive on tracing_cpu */
> +	if (!irqs_disabled_flags(flags))
> +		return;
> +
> +	data = tr->data[cpu];
> +	disabled = atomic_inc_return(&data->disabled);
> +
> +	if (likely(disabled == 1)) {
> +		pc = preempt_count();
> +		__trace_graph_return(tr, trace, flags, pc);
> +	}
> +
> +	atomic_dec(&data->disabled);
> +}
> +
> +static void irqsoff_trace_open(struct trace_iterator *iter)
> +{
> +	if (is_graph())
> +		graph_trace_open(iter);
> +
> +}
> +
> +static void irqsoff_trace_close(struct trace_iterator *iter)
> +{
> +	if (iter->private)
> +		graph_trace_close(iter);
> +}
> +
> +#else
> +static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
> +{
> +	return -EINVAL;
> +}
> +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> +
>  /*
>   * Should this new latency be reported/recorded?
>   */
> @@ -347,19 +470,36 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
>  }
>  #endif /* CONFIG_PREEMPT_TRACER */
>  
> -static void start_irqsoff_tracer(struct trace_array *tr)
> +static int start_irqsoff_tracer(struct trace_array *tr, int graph)
>  {
> -	register_ftrace_function(&trace_ops);
> -	if (tracing_is_enabled())
> +	int ret = 0;
> +
> +	if (!graph)
> +		ret = register_ftrace_function(&trace_ops);
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +	else
> +		ret = register_ftrace_graph(&irqsoff_graph_return,
> +					    &irqsoff_graph_entry);
> +#endif
> +
> +	if (!ret && tracing_is_enabled())
>  		tracer_enabled = 1;
>  	else
>  		tracer_enabled = 0;
> +
> +	return ret;
>  }
>  
> -static void stop_irqsoff_tracer(struct trace_array *tr)
> +static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
>  {
>  	tracer_enabled = 0;
> -	unregister_ftrace_function(&trace_ops);
> +
> +	if (!graph)
> +		unregister_ftrace_function(&trace_ops);
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +	else
> +		unregister_ftrace_graph();
> +#endif
>  }
>  
>  static void __irqsoff_tracer_init(struct trace_array *tr)
> @@ -372,12 +512,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
>  	/* make sure that the tracer is visible */
>  	smp_wmb();
>  	tracing_reset_online_cpus(tr);
> -	start_irqsoff_tracer(tr);
> +
> +	if (start_irqsoff_tracer(tr, is_graph()))
> +		printk(KERN_ERR "failed to start irqsoff tracer\n");
>  }
>  
>  static void irqsoff_tracer_reset(struct trace_array *tr)
>  {
> -	stop_irqsoff_tracer(tr);
> +	stop_irqsoff_tracer(tr, is_graph());
>  
>  	if (!save_lat_flag)
>  		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
> @@ -409,9 +551,15 @@ static struct tracer irqsoff_tracer __read_mostly =
>  	.start		= irqsoff_tracer_start,
>  	.stop		= irqsoff_tracer_stop,
>  	.print_max	= 1,
> +	.flags		= &tracer_flags,
> +	.set_flag	= irqsoff_set_flag,
>  #ifdef CONFIG_FTRACE_SELFTEST
>  	.selftest    = trace_selftest_startup_irqsoff,
>  #endif
> +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> +	.open           = irqsoff_trace_open,
> +	.close          = irqsoff_trace_close,
> +#endif
>  };
>  # define register_irqsoff(trace) register_tracer(&trace)
>  #else



^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCHv2 2/4] tracing: graph output support for irqsoff tracer
  2010-03-24  1:19   ` Steven Rostedt
@ 2010-03-24  7:16     ` Jiri Olsa
  0 siblings, 0 replies; 9+ messages in thread
From: Jiri Olsa @ 2010-03-24  7:16 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: linux-kernel

On Tue, Mar 23, 2010 at 09:19:16PM -0400, Steven Rostedt wrote:
> On Wed, 2010-03-10 at 08:51 +0100, Jiri Olsa wrote:
> 
> > @@ -993,14 +991,6 @@ print_graph_comment(struct trace_seq *s,  struct trace_entry *ent,
> >  		if (ret != TRACE_TYPE_HANDLED)
> >  			return ret;
> >  		break;
> > -	default:
> > -		event = ftrace_find_event(ent->type);
> > -		if (!event)
> > -			return TRACE_TYPE_UNHANDLED;
> > -
> > -		ret = event->trace(iter, sym_flags);
> > -		if (ret != TRACE_TYPE_HANDLED)
> > -			return ret;
> >  	}
> >  
> 
> This has the same effect as the problem with the first patch:
> 
> was:
> 
>  2)               |                                  _raw_spin_unlock_irqrestore() {
>  2)               |                                  /* lock_release: ffffffff816969a8 random_read_wait.lock */
>  2)   6.022 us    |                                  }
>  2) + 27.632 us   |                                }
>  2)   2.939 us    |                                kill_fasync();
>  2)               |                                _raw_spin_unlock_irqrestore() {
>  2)               |                                /* lock_release: ffffffff816967f0 &input_pool.lock */
>  2)   5.471 us    |                                }
>  2) + 56.740 us   |                              }
>  2) + 88.357 us   |                            }
>  2) + 94.026 us   |                          }
> 
> 
> is now:
> 
>  2)               |                                  _raw_spin_unlock_irqrestore() {
>           <idle>-0     [002]   411.798721: lock_release: ffffffff816969a8 random_read_wait.lock
>  2)   6.022 us    |                                  }
>  2) + 27.632 us   |                                }
>  2)   2.939 us    |                                kill_fasync();
>  2)               |                                _raw_spin_unlock_irqrestore() {
>           <idle>-0     [002]   411.798741: lock_release: ffffffff816967f0 &input_pool.lock
>  2)   5.471 us    |                                }
>  2) + 56.740 us   |                              }
>  2) + 88.357 us   |                            }
>  2) + 94.026 us   |                          }
> 
> 
> Please, before you apply any patch do the following:
> 
> (assuming debugfs is at /debug)
> 
> # echo 1 > /debug/tracing/events/enable
> # echo function_graph > /debug/tracing/current_tracer
> # cat /debug/tracing/trace
> 
> Examine what you see.
> 
> Then apply all your patches, and repeat the above. Make sure the
> formatting is the same.
hi,

shoot, looks like I forgot about comments... sending new version soon :)

thanks,
jirka

> 
> Thanks,
> 
> -- Steve
> 
> 
> >  	/* Strip ending newline */
> > @@ -1066,8 +1056,11 @@ print_graph_function(struct trace_iterator *iter)
> >  		trace_assign_type(field, entry);
> >  		return print_graph_return(&field->ret, s, entry, iter);
> >  	}
> > -	default:
> > +	case TRACE_BPRINT:
> > +	case TRACE_PRINT:
> >  		return print_graph_comment(s, entry, iter);
> > +	default:
> > +		return TRACE_TYPE_UNHANDLED;
> >  	}
> >  
> >  	return TRACE_TYPE_HANDLED;
> > @@ -1101,7 +1094,7 @@ static void print_lat_header(struct seq_file *s)
> >  	seq_printf(s, "#%.*s|||| /                     \n", size, spaces);
> >  }
> >  
> > -static void print_graph_headers(struct seq_file *s)
> > +void print_graph_headers(struct seq_file *s)
> >  {
> >  	int lat = trace_flags & TRACE_ITER_LATENCY_FMT;
> >  
> > @@ -1137,7 +1130,7 @@ static void print_graph_headers(struct seq_file *s)
> >  	seq_printf(s, "               |   |   |   |\n");
> >  }
> >  
> > -static void graph_trace_open(struct trace_iterator *iter)
> > +void graph_trace_open(struct trace_iterator *iter)
> >  {
> >  	/* pid and depth on the last trace processed */
> >  	struct fgraph_data *data;
> > @@ -1172,7 +1165,7 @@ static void graph_trace_open(struct trace_iterator *iter)
> >  	pr_warning("function graph tracer: not enough memory\n");
> >  }
> >  
> > -static void graph_trace_close(struct trace_iterator *iter)
> > +void graph_trace_close(struct trace_iterator *iter)
> >  {
> >  	struct fgraph_data *data = iter->private;
> >  
> > diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> > index 2974bc7..d0c6d6c 100644
> > --- a/kernel/trace/trace_irqsoff.c
> > +++ b/kernel/trace/trace_irqsoff.c
> > @@ -34,6 +34,9 @@ static int trace_type __read_mostly;
> >  
> >  static int save_lat_flag;
> >  
> > +static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
> > +static int start_irqsoff_tracer(struct trace_array *tr, int graph);
> > +
> >  #ifdef CONFIG_PREEMPT_TRACER
> >  static inline int
> >  preempt_trace(void)
> > @@ -55,6 +58,23 @@ irq_trace(void)
> >  # define irq_trace() (0)
> >  #endif
> >  
> > +#define TRACE_DISPLAY_GRAPH	1
> > +
> > +static struct tracer_opt trace_opts[] = {
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +	/* display latency trace as call graph */
> > +	{ TRACER_OPT(display-graph, TRACE_DISPLAY_GRAPH) },
> > +#endif
> > +	{ } /* Empty entry */
> > +};
> > +
> > +static struct tracer_flags tracer_flags = {
> > +	.val = 0,
> > +	.opts = trace_opts,
> > +};
> > +
> > +#define is_graph() (tracer_flags.val & TRACE_DISPLAY_GRAPH)
> > +
> >  /*
> >   * Sequence count - we record it when starting a measurement and
> >   * skip the latency if the sequence has changed - some other section
> > @@ -108,6 +128,109 @@ static struct ftrace_ops trace_ops __read_mostly =
> >  };
> >  #endif /* CONFIG_FUNCTION_TRACER */
> >  
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
> > +{
> > +	int cpu;
> > +
> > +	if (!(bit & TRACE_DISPLAY_GRAPH))
> > +		return -EINVAL;
> > +
> > +	if (!(is_graph() ^ set))
> > +		return 0;
> > +
> > +	stop_irqsoff_tracer(irqsoff_trace, !set);
> > +
> > +	for_each_possible_cpu(cpu)
> > +		per_cpu(tracing_cpu, cpu) = 0;
> > +
> > +	tracing_max_latency = 0;
> > +	tracing_reset_online_cpus(irqsoff_trace);
> > +
> > +	return start_irqsoff_tracer(irqsoff_trace, set);
> > +}
> > +
> > +static int irqsoff_graph_entry(struct ftrace_graph_ent *trace)
> > +{
> > +	struct trace_array *tr = irqsoff_trace;
> > +	struct trace_array_cpu *data;
> > +	unsigned long flags;
> > +	long disabled;
> > +	int ret;
> > +	int cpu;
> > +	int pc;
> > +
> > +	cpu = raw_smp_processor_id();
> > +	if (likely(!per_cpu(tracing_cpu, cpu)))
> > +		return 0;
> > +
> > +	local_save_flags(flags);
> > +	/* slight chance to get a false positive on tracing_cpu */
> > +	if (!irqs_disabled_flags(flags))
> > +		return 0;
> > +
> > +	data = tr->data[cpu];
> > +	disabled = atomic_inc_return(&data->disabled);
> > +
> > +	if (likely(disabled == 1)) {
> > +		pc = preempt_count();
> > +		ret = __trace_graph_entry(tr, trace, flags, pc);
> > +	} else
> > +		ret = 0;
> > +
> > +	atomic_dec(&data->disabled);
> > +	return ret;
> > +}
> > +
> > +static void irqsoff_graph_return(struct ftrace_graph_ret *trace)
> > +{
> > +	struct trace_array *tr = irqsoff_trace;
> > +	struct trace_array_cpu *data;
> > +	unsigned long flags;
> > +	long disabled;
> > +	int cpu;
> > +	int pc;
> > +
> > +	cpu = raw_smp_processor_id();
> > +	if (likely(!per_cpu(tracing_cpu, cpu)))
> > +		return;
> > +
> > +	local_save_flags(flags);
> > +	/* slight chance to get a false positive on tracing_cpu */
> > +	if (!irqs_disabled_flags(flags))
> > +		return;
> > +
> > +	data = tr->data[cpu];
> > +	disabled = atomic_inc_return(&data->disabled);
> > +
> > +	if (likely(disabled == 1)) {
> > +		pc = preempt_count();
> > +		__trace_graph_return(tr, trace, flags, pc);
> > +	}
> > +
> > +	atomic_dec(&data->disabled);
> > +}
> > +
> > +static void irqsoff_trace_open(struct trace_iterator *iter)
> > +{
> > +	if (is_graph())
> > +		graph_trace_open(iter);
> > +
> > +}
> > +
> > +static void irqsoff_trace_close(struct trace_iterator *iter)
> > +{
> > +	if (iter->private)
> > +		graph_trace_close(iter);
> > +}
> > +
> > +#else
> > +static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
> > +{
> > +	return -EINVAL;
> > +}
> > +#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
> > +
> >  /*
> >   * Should this new latency be reported/recorded?
> >   */
> > @@ -347,19 +470,36 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
> >  }
> >  #endif /* CONFIG_PREEMPT_TRACER */
> >  
> > -static void start_irqsoff_tracer(struct trace_array *tr)
> > +static int start_irqsoff_tracer(struct trace_array *tr, int graph)
> >  {
> > -	register_ftrace_function(&trace_ops);
> > -	if (tracing_is_enabled())
> > +	int ret = 0;
> > +
> > +	if (!graph)
> > +		ret = register_ftrace_function(&trace_ops);
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +	else
> > +		ret = register_ftrace_graph(&irqsoff_graph_return,
> > +					    &irqsoff_graph_entry);
> > +#endif
> > +
> > +	if (!ret && tracing_is_enabled())
> >  		tracer_enabled = 1;
> >  	else
> >  		tracer_enabled = 0;
> > +
> > +	return ret;
> >  }
> >  
> > -static void stop_irqsoff_tracer(struct trace_array *tr)
> > +static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
> >  {
> >  	tracer_enabled = 0;
> > -	unregister_ftrace_function(&trace_ops);
> > +
> > +	if (!graph)
> > +		unregister_ftrace_function(&trace_ops);
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +	else
> > +		unregister_ftrace_graph();
> > +#endif
> >  }
> >  
> >  static void __irqsoff_tracer_init(struct trace_array *tr)
> > @@ -372,12 +512,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
> >  	/* make sure that the tracer is visible */
> >  	smp_wmb();
> >  	tracing_reset_online_cpus(tr);
> > -	start_irqsoff_tracer(tr);
> > +
> > +	if (start_irqsoff_tracer(tr, is_graph()))
> > +		printk(KERN_ERR "failed to start irqsoff tracer\n");
> >  }
> >  
> >  static void irqsoff_tracer_reset(struct trace_array *tr)
> >  {
> > -	stop_irqsoff_tracer(tr);
> > +	stop_irqsoff_tracer(tr, is_graph());
> >  
> >  	if (!save_lat_flag)
> >  		trace_flags &= ~TRACE_ITER_LATENCY_FMT;
> > @@ -409,9 +551,15 @@ static struct tracer irqsoff_tracer __read_mostly =
> >  	.start		= irqsoff_tracer_start,
> >  	.stop		= irqsoff_tracer_stop,
> >  	.print_max	= 1,
> > +	.flags		= &tracer_flags,
> > +	.set_flag	= irqsoff_set_flag,
> >  #ifdef CONFIG_FTRACE_SELFTEST
> >  	.selftest    = trace_selftest_startup_irqsoff,
> >  #endif
> > +#ifdef CONFIG_FUNCTION_GRAPH_TRACER
> > +	.open           = irqsoff_trace_open,
> > +	.close          = irqsoff_trace_close,
> > +#endif
> >  };
> >  # define register_irqsoff(trace) register_tracer(&trace)
> >  #else
> 
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2010-03-24  7:16 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-03-10  7:51 [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
2010-03-10  7:51 ` [PATCHv2 1/4] tracing: adding ftrace events for graph tracer Jiri Olsa
2010-03-10  7:51 ` [PATCHv2 2/4] tracing: graph output support for irqsoff tracer Jiri Olsa
2010-03-24  1:19   ` Steven Rostedt
2010-03-24  7:16     ` Jiri Olsa
2010-03-10  7:51 ` [PATCHv2 3/4] tracing: graph output support for preemptirqsoff/preemptoff tracers Jiri Olsa
2010-03-10  7:51 ` [PATCHv2 4/4] tracing: raw output for graph tracer Jiri Olsa
2010-03-22  8:48 ` [PATCHv2 0/4] tracing: function graph output for preempt/irqs-off tracers Jiri Olsa
2010-03-22 13:10   ` Steven Rostedt

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.