* [PATCH -tip 1/4] tracing: change tracer's integer flags to bool
2012-10-02 8:13 [PATCH -tip 0/4] tracing: make a snapshot feature available from userspace Hiraku Toyooka
@ 2012-10-02 8:27 ` Hiraku Toyooka
2012-11-14 6:43 ` [tip:perf/core] tracing: Change " tip-bot for Hiraku Toyooka
2012-10-02 8:27 ` [PATCH -tip 2/4] tracing: add a resize function for making one buffer equivalent to the other buffer Hiraku Toyooka
` (2 subsequent siblings)
3 siblings, 1 reply; 9+ messages in thread
From: Hiraku Toyooka @ 2012-10-02 8:27 UTC (permalink / raw)
To: linux-kernel
Cc: yrl.pp-manager.tt, Hiraku Toyooka, Steven Rostedt,
Frederic Weisbecker, Ingo Molnar, linux-kernel
print_max and use_max_tr in struct tracer are "int" variables and
used like flags. This is wasteful, so change the type to "bool".
Signed-off-by: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-kernel@vger.kernel.org
---
kernel/trace/trace.h | 4 ++--
kernel/trace/trace_irqsoff.c | 12 ++++++------
kernel/trace/trace_sched_wakeup.c | 8 ++++----
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 593debe..0eb6a1a 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -285,8 +285,8 @@ struct tracer {
int (*set_flag)(u32 old_flags, u32 bit, int set);
struct tracer *next;
struct tracer_flags *flags;
- int print_max;
- int use_max_tr;
+ bool print_max;
+ bool use_max_tr;
};
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index d98ee82..5c823c1 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
@@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
@@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_preemptirqsoff(trace) register_tracer(&trace)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 02170c0..43f8abc 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly =
.reset = wakeup_tracer_reset,
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
static struct tracer wakeup_rt_tracer __read_mostly =
@@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
- .print_max = 1,
+ .print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
__init static int init_wakeup_tracer(void)
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [tip:perf/core] tracing: Change tracer's integer flags to bool
2012-10-02 8:27 ` [PATCH -tip 1/4] tracing: change tracer's integer flags to bool Hiraku Toyooka
@ 2012-11-14 6:43 ` tip-bot for Hiraku Toyooka
0 siblings, 0 replies; 9+ messages in thread
From: tip-bot for Hiraku Toyooka @ 2012-11-14 6:43 UTC (permalink / raw)
To: linux-tip-commits
Cc: linux-kernel, hpa, mingo, rostedt, hiraku.toyooka.gu, tglx
Commit-ID: f43c738bfa8608424610e4fc1aef4d4644e2ce11
Gitweb: http://git.kernel.org/tip/f43c738bfa8608424610e4fc1aef4d4644e2ce11
Author: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
AuthorDate: Tue, 2 Oct 2012 17:27:10 +0900
Committer: Steven Rostedt <rostedt@goodmis.org>
CommitDate: Wed, 31 Oct 2012 16:45:25 -0400
tracing: Change tracer's integer flags to bool
print_max and use_max_tr in struct tracer are "int" variables and
used like flags. This is wasteful, so change the type to "bool".
Link: http://lkml.kernel.org/r/20121002082710.9807.86393.stgit@falsita
Signed-off-by: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
kernel/trace/trace.h | 4 ++--
kernel/trace/trace_irqsoff.c | 12 ++++++------
kernel/trace/trace_sched_wakeup.c | 8 ++++----
3 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index c15f528..c56a233 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -285,8 +285,8 @@ struct tracer {
int (*set_flag)(u32 old_flags, u32 bit, int set);
struct tracer *next;
struct tracer_flags *flags;
- int print_max;
- int use_max_tr;
+ bool print_max;
+ bool use_max_tr;
};
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 11edebd..5ffce7b 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -604,7 +604,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -614,7 +614,7 @@ static struct tracer irqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_irqsoff(trace) register_tracer(&trace)
#else
@@ -637,7 +637,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -647,7 +647,7 @@ static struct tracer preemptoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_preemptoff(trace) register_tracer(&trace)
#else
@@ -672,7 +672,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.reset = irqsoff_tracer_reset,
.start = irqsoff_tracer_start,
.stop = irqsoff_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = irqsoff_print_header,
.print_line = irqsoff_print_line,
.flags = &tracer_flags,
@@ -682,7 +682,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
#endif
.open = irqsoff_trace_open,
.close = irqsoff_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
# define register_preemptirqsoff(trace) register_tracer(&trace)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index 2f6af78..bc64fc1 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -589,7 +589,7 @@ static struct tracer wakeup_tracer __read_mostly =
.reset = wakeup_tracer_reset,
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
- .print_max = 1,
+ .print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@@ -599,7 +599,7 @@ static struct tracer wakeup_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
static struct tracer wakeup_rt_tracer __read_mostly =
@@ -610,7 +610,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.start = wakeup_tracer_start,
.stop = wakeup_tracer_stop,
.wait_pipe = poll_wait_pipe,
- .print_max = 1,
+ .print_max = true,
.print_header = wakeup_print_header,
.print_line = wakeup_print_line,
.flags = &tracer_flags,
@@ -620,7 +620,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
#endif
.open = wakeup_trace_open,
.close = wakeup_trace_close,
- .use_max_tr = 1,
+ .use_max_tr = true,
};
__init static int init_wakeup_tracer(void)
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH -tip 2/4] tracing: add a resize function for making one buffer equivalent to the other buffer
2012-10-02 8:13 [PATCH -tip 0/4] tracing: make a snapshot feature available from userspace Hiraku Toyooka
2012-10-02 8:27 ` [PATCH -tip 1/4] tracing: change tracer's integer flags to bool Hiraku Toyooka
@ 2012-10-02 8:27 ` Hiraku Toyooka
2012-10-05 16:59 ` Steven Rostedt
2012-10-02 8:27 ` [PATCH -tip 3/4] tracing: make a snapshot feature available from userspace Hiraku Toyooka
2012-10-02 8:27 ` [PATCH -tip 4/4] tracing: add description of snapshot to Documentation/trace/ftrace.txt Hiraku Toyooka
3 siblings, 1 reply; 9+ messages in thread
From: Hiraku Toyooka @ 2012-10-02 8:27 UTC (permalink / raw)
To: linux-kernel
Cc: yrl.pp-manager.tt, Hiraku Toyooka, Steven Rostedt,
Frederic Weisbecker, Ingo Molnar, linux-kernel
Trace buffer size is now per-cpu, so that there are following two
patterns in resize of the buffers.
(1) resize per-cpu buffers to same given size
(2) resize per-cpu buffers to the other trace_array's buffer size
for each CPU (such as preparing the max_tr which is equivalent
to the global_trace's size)
__tracing_resize_ring_buffer() can be used for (1), and had
implemented (2) inside it for resetting the global_trace to the
original size.
(2) was also implemented in other place. So this patch assembles
them in a new function - resize_buffer_even().
Signed-off-by: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: linux-kernel@vger.kernel.org
---
kernel/trace/trace.c | 57 ++++++++++++++++++++++++++------------------------
1 files changed, 30 insertions(+), 27 deletions(-)
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 08acf42..1e599e6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3017,6 +3017,31 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val)
tr->data[cpu]->entries = val;
}
+/* resize @tr's buffer to the size of @size_tr's entries */
+static int resize_buffer_even(struct trace_array *tr,
+ struct trace_array *size_tr, int cpu_id)
+{
+ int cpu, ret = 0;
+
+ if (cpu_id == RING_BUFFER_ALL_CPUS) {
+ for_each_tracing_cpu(cpu) {
+ ret = ring_buffer_resize(tr->buffer,
+ size_tr->data[cpu]->entries, cpu);
+ if (ret < 0)
+ break;
+ tr->data[cpu]->entries = size_tr->data[cpu]->entries;
+ }
+ } else {
+ ret = ring_buffer_resize(tr->buffer,
+ size_tr->data[cpu_id]->entries, cpu_id);
+ if (ret == 0)
+ tr->data[cpu_id]->entries =
+ size_tr->data[cpu_id]->entries;
+ }
+
+ return ret;
+}
+
static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
{
int ret;
@@ -3037,23 +3062,7 @@ static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
ret = ring_buffer_resize(max_tr.buffer, size, cpu);
if (ret < 0) {
- int r = 0;
-
- if (cpu == RING_BUFFER_ALL_CPUS) {
- int i;
- for_each_tracing_cpu(i) {
- r = ring_buffer_resize(global_trace.buffer,
- global_trace.data[i]->entries,
- i);
- if (r < 0)
- break;
- }
- } else {
- r = ring_buffer_resize(global_trace.buffer,
- global_trace.data[cpu]->entries,
- cpu);
- }
-
+ int r = resize_buffer_even(&global_trace, &global_trace, cpu);
if (r < 0) {
/*
* AARGH! We are left with different
@@ -3191,17 +3200,11 @@ static int tracing_set_tracer(const char *buf)
topts = create_trace_option_files(t);
if (t->use_max_tr) {
- int cpu;
/* we need to make per cpu buffer sizes equivalent */
- for_each_tracing_cpu(cpu) {
- ret = ring_buffer_resize(max_tr.buffer,
- global_trace.data[cpu]->entries,
- cpu);
- if (ret < 0)
- goto out;
- max_tr.data[cpu]->entries =
- global_trace.data[cpu]->entries;
- }
+ ret = resize_buffer_even(&max_tr, &global_trace,
+ RING_BUFFER_ALL_CPUS);
+ if (ret < 0)
+ goto out;
}
if (t->init) {
^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [PATCH -tip 2/4] tracing: add a resize function for making one buffer equivalent to the other buffer
2012-10-02 8:27 ` [PATCH -tip 2/4] tracing: add a resize function for making one buffer equivalent to the other buffer Hiraku Toyooka
@ 2012-10-05 16:59 ` Steven Rostedt
2012-10-17 2:47 ` Hiraku Toyooka
0 siblings, 1 reply; 9+ messages in thread
From: Steven Rostedt @ 2012-10-05 16:59 UTC (permalink / raw)
To: Hiraku Toyooka
Cc: linux-kernel, yrl.pp-manager.tt, Frederic Weisbecker, Ingo Molnar
On Tue, 2012-10-02 at 17:27 +0900, Hiraku Toyooka wrote:
> Trace buffer size is now per-cpu, so that there are following two
> patterns in resize of the buffers.
>
> (1) resize per-cpu buffers to same given size
> (2) resize per-cpu buffers to the other trace_array's buffer size
> for each CPU (such as preparing the max_tr which is equivalent
> to the global_trace's size)
>
> __tracing_resize_ring_buffer() can be used for (1), and had
> implemented (2) inside it for resetting the global_trace to the
> original size.
>
> (2) was also implemented in other place. So this patch assembles
> them in a new function - resize_buffer_even().
>
> Signed-off-by: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
> Cc: Steven Rostedt <rostedt@goodmis.org>
> Cc: Frederic Weisbecker <fweisbec@gmail.com>
> Cc: Ingo Molnar <mingo@redhat.com>
> Cc: linux-kernel@vger.kernel.org
> ---
>
> kernel/trace/trace.c | 57 ++++++++++++++++++++++++++------------------------
> 1 files changed, 30 insertions(+), 27 deletions(-)
>
> diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
> index 08acf42..1e599e6 100644
> --- a/kernel/trace/trace.c
> +++ b/kernel/trace/trace.c
> @@ -3017,6 +3017,31 @@ static void set_buffer_entries(struct trace_array *tr, unsigned long val)
> tr->data[cpu]->entries = val;
> }
>
> +/* resize @tr's buffer to the size of @size_tr's entries */
> +static int resize_buffer_even(struct trace_array *tr,
I don't mind this patch, but I just hate the name "resize_buffer_even".
What about "resize_buffer_duplicate_size"?
-- Steve
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH -tip 2/4] tracing: add a resize function for making one buffer equivalent to the other buffer
2012-10-05 16:59 ` Steven Rostedt
@ 2012-10-17 2:47 ` Hiraku Toyooka
2012-10-17 2:57 ` Steven Rostedt
0 siblings, 1 reply; 9+ messages in thread
From: Hiraku Toyooka @ 2012-10-17 2:47 UTC (permalink / raw)
To: Steven Rostedt
Cc: linux-kernel, yrl.pp-manager.tt, Frederic Weisbecker, Ingo Molnar
Steven,
I'm sorry for my late reply.
(I was outside the office for business trip.)
(2012/10/06 1:59), Steven Rostedt wrote:
>> +/* resize @tr's buffer to the size of @size_tr's entries */
>> +static int resize_buffer_even(struct trace_array *tr,
>
> I don't mind this patch, but I just hate the name "resize_buffer_even".
> What about "resize_buffer_duplicate_size"?
>
O.K.
I'll send modified version of this patch later.
Regards,
Hiraku Toyooka
^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [PATCH -tip 2/4] tracing: add a resize function for making one buffer equivalent to the other buffer
2012-10-17 2:47 ` Hiraku Toyooka
@ 2012-10-17 2:57 ` Steven Rostedt
0 siblings, 0 replies; 9+ messages in thread
From: Steven Rostedt @ 2012-10-17 2:57 UTC (permalink / raw)
To: Hiraku Toyooka
Cc: linux-kernel, yrl.pp-manager.tt, Frederic Weisbecker, Ingo Molnar
On Wed, 2012-10-17 at 11:47 +0900, Hiraku Toyooka wrote:
> Steven,
>
> I'm sorry for my late reply.
> (I was outside the office for business trip.)
>
No problem. I'm currently on a business trip now :-)
-- Steve
^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH -tip 3/4] tracing: make a snapshot feature available from userspace
2012-10-02 8:13 [PATCH -tip 0/4] tracing: make a snapshot feature available from userspace Hiraku Toyooka
2012-10-02 8:27 ` [PATCH -tip 1/4] tracing: change tracer's integer flags to bool Hiraku Toyooka
2012-10-02 8:27 ` [PATCH -tip 2/4] tracing: add a resize function for making one buffer equivalent to the other buffer Hiraku Toyooka
@ 2012-10-02 8:27 ` Hiraku Toyooka
2012-10-02 8:27 ` [PATCH -tip 4/4] tracing: add description of snapshot to Documentation/trace/ftrace.txt Hiraku Toyooka
3 siblings, 0 replies; 9+ messages in thread
From: Hiraku Toyooka @ 2012-10-02 8:27 UTC (permalink / raw)
To: linux-kernel
Cc: yrl.pp-manager.tt, Hiraku Toyooka, Steven Rostedt,
Frederic Weisbecker, Ingo Molnar, Jiri Olsa, Li Zefan,
linux-kernel
Ftrace has a snapshot feature available from kernel space and
latency tracers (e.g. irqsoff) are using it. This patch enables
user applictions to take a snapshot via debugfs.
Add following two debugfs files in "tracing" directory.
snapshot:
This is used to take a snapshot and to read the output of the
snapshot.
# echo 1 > snapshot
# cat snapshot
snapshot_allocate:
Echoing 1 to the "snapshot" allocates the max_tr buffer if
it is not allocated. So taking a snapshot may delay or fail
beacuse of memory allocation. To avoid that, this file
provides a mean to pre-allocate (or free) the max_tr buffer.
# echo 1 > snapshot_allocate
[...]
# echo 1 > snapshot
Signed-off-by: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: linux-kernel@vger.kernel.org
---
include/linux/ftrace_event.h | 3 +
kernel/trace/Kconfig | 11 ++
kernel/trace/trace.c | 190 +++++++++++++++++++++++++++++++++++++++---
kernel/trace/trace.h | 1
4 files changed, 193 insertions(+), 12 deletions(-)
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 642928c..8c32c6e 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -84,6 +84,9 @@ struct trace_iterator {
long idx;
cpumask_var_t started;
+
+ /* it's true when current open file is snapshot */
+ bool snapshot;
};
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 4cea4f4..73d56d5 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -102,6 +102,17 @@ config RING_BUFFER_ALLOW_SWAP
Allow the use of ring_buffer_swap_cpu.
Adds a very slight overhead to tracing when enabled.
+config TRACER_SNAPSHOT
+ bool
+ default y
+ select TRACER_MAX_TRACE
+ help
+ Allow tracing users to take snapshot of the current buffer using the
+ ftrace interface, e.g.:
+
+ echo 1 > /sys/kernel/debug/tracing/snapshot
+ cat snapshot
+
# All tracer options should select GENERIC_TRACER. For those options that are
# enabled by all tracers (context switch and event tracer) they select TRACING.
# This allows those options to appear when no other tracer is selected. But the
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1e599e6..dac5733 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -699,7 +699,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;
WARN_ON_ONCE(!irqs_disabled());
- if (!current_trace->use_max_tr) {
+ if (!current_trace->allocated_snapshot) {
WARN_ON_ONCE(1);
return;
}
@@ -729,7 +729,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return;
WARN_ON_ONCE(!irqs_disabled());
- if (!current_trace->use_max_tr) {
+ if (!current_trace->allocated_snapshot) {
WARN_ON_ONCE(1);
return;
}
@@ -1902,7 +1902,8 @@ static void *s_start(struct seq_file *m, loff_t *pos)
}
mutex_unlock(&trace_types_lock);
- atomic_inc(&trace_record_cmdline_disabled);
+ if (!iter->snapshot)
+ atomic_inc(&trace_record_cmdline_disabled);
if (*pos != iter->pos) {
iter->ent = NULL;
@@ -1941,7 +1942,8 @@ static void s_stop(struct seq_file *m, void *p)
{
struct trace_iterator *iter = m->private;
- atomic_dec(&trace_record_cmdline_disabled);
+ if (!iter->snapshot)
+ atomic_dec(&trace_record_cmdline_disabled);
trace_access_unlock(iter->cpu_file);
trace_event_read_unlock();
}
@@ -2375,7 +2377,7 @@ static const struct seq_operations tracer_seq_ops = {
};
static struct trace_iterator *
-__tracing_open(struct inode *inode, struct file *file)
+__tracing_open(struct inode *inode, struct file *file, int snapshot)
{
long cpu_file = (long) inode->i_private;
struct trace_iterator *iter;
@@ -2408,10 +2410,11 @@ __tracing_open(struct inode *inode, struct file *file)
if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
goto fail;
- if (current_trace && current_trace->print_max)
+ if ((current_trace && current_trace->print_max) || snapshot)
iter->tr = &max_tr;
else
iter->tr = &global_trace;
+ iter->snapshot = !!snapshot;
iter->pos = -1;
mutex_init(&iter->mutex);
iter->cpu_file = cpu_file;
@@ -2424,8 +2427,9 @@ __tracing_open(struct inode *inode, struct file *file)
if (ring_buffer_overruns(iter->tr->buffer))
iter->iter_flags |= TRACE_FILE_ANNOTATE;
- /* stop the trace while dumping */
- tracing_stop();
+ /* stop the trace while dumping if we are not opening "snapshot" */
+ if (!iter->snapshot)
+ tracing_stop();
if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
for_each_tracing_cpu(cpu) {
@@ -2488,8 +2492,9 @@ static int tracing_release(struct inode *inode, struct file *file)
if (iter->trace && iter->trace->close)
iter->trace->close(iter);
- /* reenable tracing if it was previously enabled */
- tracing_start();
+ if (!iter->snapshot)
+ /* reenable tracing if it was previously enabled */
+ tracing_start();
mutex_unlock(&trace_types_lock);
mutex_destroy(&iter->mutex);
@@ -2517,7 +2522,7 @@ static int tracing_open(struct inode *inode, struct file *file)
}
if (file->f_mode & FMODE_READ) {
- iter = __tracing_open(inode, file);
+ iter = __tracing_open(inode, file, 0);
if (IS_ERR(iter))
ret = PTR_ERR(iter);
else if (trace_flags & TRACE_ITER_LATENCY_FMT)
@@ -3185,7 +3190,8 @@ static int tracing_set_tracer(const char *buf)
trace_branch_disable();
if (current_trace && current_trace->reset)
current_trace->reset(tr);
- if (current_trace && current_trace->use_max_tr) {
+ if (current_trace && current_trace->allocated_snapshot) {
+ tracing_reset_online_cpus(&max_tr);
/*
* We don't free the ring buffer. instead, resize it because
* The max_tr ring buffer has some state (e.g. ring->clock) and
@@ -3193,6 +3199,7 @@ static int tracing_set_tracer(const char *buf)
*/
ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&max_tr, 1);
+ current_trace->allocated_snapshot = false;
}
destroy_trace_option_files(topts);
@@ -3205,6 +3212,7 @@ static int tracing_set_tracer(const char *buf)
RING_BUFFER_ALL_CPUS);
if (ret < 0)
goto out;
+ t->allocated_snapshot = true;
}
if (t->init) {
@@ -4028,6 +4036,139 @@ static int tracing_clock_open(struct inode *inode, struct file *file)
return single_open(file, tracing_clock_show, NULL);
}
+#ifdef CONFIG_TRACER_SNAPSHOT
+static int tracing_snapshot_open(struct inode *inode, struct file *file)
+{
+ struct trace_iterator *iter;
+ int ret = 0;
+
+ if (file->f_mode & FMODE_READ) {
+ iter = __tracing_open(inode, file, 1);
+ if (IS_ERR(iter))
+ ret = PTR_ERR(iter);
+ }
+ return ret;
+}
+
+static ssize_t tracing_snapshot_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ ssize_t ret = 0;
+
+ mutex_lock(&trace_types_lock);
+ if (current_trace && current_trace->use_max_tr)
+ ret = -EBUSY;
+ mutex_unlock(&trace_types_lock);
+ if (ret < 0)
+ return ret;
+
+ ret = seq_read(filp, ubuf, cnt, ppos);
+
+ return ret;
+}
+
+static ssize_t
+tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
+ loff_t *ppos)
+{
+ unsigned long val = 0;
+ int ret;
+
+ ret = tracing_update_buffers();
+ if (ret < 0)
+ return ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&trace_types_lock);
+
+ if (current_trace->use_max_tr) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (!current_trace->allocated_snapshot) {
+ /* allocate spare buffer for snapshot */
+ ret = resize_buffer_even(&max_tr, &global_trace,
+ RING_BUFFER_ALL_CPUS);
+ if (ret < 0)
+ goto out;
+ current_trace->allocated_snapshot = true;
+ }
+
+ if (val) {
+ local_irq_disable();
+ update_max_tr(&global_trace, current, smp_processor_id());
+ local_irq_enable();
+ } else
+ tracing_reset_online_cpus(&max_tr);
+
+ *ppos += cnt;
+ ret = cnt;
+out:
+ mutex_unlock(&trace_types_lock);
+ return ret;
+}
+
+static ssize_t
+tracing_snapshot_allocate_read(struct file *filp, char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int r;
+
+ r = sprintf(buf, "%d\n", current_trace->allocated_snapshot);
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t
+tracing_snapshot_allocate_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ unsigned long val;
+ int ret;
+
+ ret = tracing_update_buffers();
+ if (ret < 0)
+ return ret;
+
+ ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
+ if (ret)
+ return ret;
+
+ mutex_lock(&trace_types_lock);
+
+ if (current_trace->use_max_tr) {
+ ret = -EBUSY;
+ goto out;
+ }
+
+ if (current_trace->allocated_snapshot == !val) {
+ if (val) {
+ /* allocate spare buffer for snapshot */
+ ret = resize_buffer_even(&max_tr, &global_trace,
+ RING_BUFFER_ALL_CPUS);
+ if (ret < 0)
+ goto out;
+ } else {
+ tracing_reset_online_cpus(&max_tr);
+ ring_buffer_resize(max_tr.buffer, 1,
+ RING_BUFFER_ALL_CPUS);
+ set_buffer_entries(&max_tr, 1);
+ }
+ current_trace->allocated_snapshot = !!val;
+ }
+
+ *ppos += cnt;
+ ret = cnt;
+out:
+ mutex_unlock(&trace_types_lock);
+ return ret;
+}
+#endif /* CONFIG_TRACER_SNAPSHOT */
+
static const struct file_operations tracing_max_lat_fops = {
.open = tracing_open_generic,
.read = tracing_max_lat_read,
@@ -4091,6 +4232,23 @@ static const struct file_operations trace_clock_fops = {
.write = tracing_clock_write,
};
+#ifdef CONFIG_TRACER_SNAPSHOT
+static const struct file_operations snapshot_fops = {
+ .open = tracing_snapshot_open,
+ .read = tracing_snapshot_read,
+ .write = tracing_snapshot_write,
+ .llseek = tracing_seek,
+ .release = tracing_release,
+};
+
+static const struct file_operations snapshot_allocate_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_snapshot_allocate_read,
+ .write = tracing_snapshot_allocate_write,
+ .llseek = generic_file_llseek,
+};
+#endif /* CONFIG_TRACER_SNAPSHOT */
+
struct ftrace_buffer_info {
struct trace_array *tr;
void *spare;
@@ -4877,6 +5035,14 @@ static __init int tracer_init_debugfs(void)
&ftrace_update_tot_cnt, &tracing_dyn_info_fops);
#endif
+#ifdef CONFIG_TRACER_SNAPSHOT
+ trace_create_file("snapshot", 0644, d_tracer,
+ (void *) TRACE_PIPE_ALL_CPU, &snapshot_fops);
+
+ trace_create_file("snapshot_allocate", 0644, d_tracer,
+ NULL, &snapshot_allocate_fops);
+#endif
+
create_trace_options_dir();
for_each_tracing_cpu(cpu)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 0eb6a1a..66a8631 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -287,6 +287,7 @@ struct tracer {
struct tracer_flags *flags;
bool print_max;
bool use_max_tr;
+ bool allocated_snapshot;
};
^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH -tip 4/4] tracing: add description of snapshot to Documentation/trace/ftrace.txt
2012-10-02 8:13 [PATCH -tip 0/4] tracing: make a snapshot feature available from userspace Hiraku Toyooka
` (2 preceding siblings ...)
2012-10-02 8:27 ` [PATCH -tip 3/4] tracing: make a snapshot feature available from userspace Hiraku Toyooka
@ 2012-10-02 8:27 ` Hiraku Toyooka
3 siblings, 0 replies; 9+ messages in thread
From: Hiraku Toyooka @ 2012-10-02 8:27 UTC (permalink / raw)
To: linux-kernel
Cc: yrl.pp-manager.tt, Hiraku Toyooka, Steven Rostedt,
Frederic Weisbecker, Ingo Molnar, Rob Landley, linux-doc,
linux-kernel
This patch adds snapshot description in ftrace documentation.
This description includes what the snapshot is and how to use it.
Signed-off-by: Hiraku Toyooka <hiraku.toyooka.gu@hitachi.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Rob Landley <rob@landley.net>
Cc: linux-doc@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
---
Documentation/trace/ftrace.txt | 97 ++++++++++++++++++++++++++++++++++++++++
1 files changed, 97 insertions(+), 0 deletions(-)
diff --git a/Documentation/trace/ftrace.txt b/Documentation/trace/ftrace.txt
index 6f51fed..68ac294 100644
--- a/Documentation/trace/ftrace.txt
+++ b/Documentation/trace/ftrace.txt
@@ -1842,6 +1842,103 @@ an error.
# cat buffer_size_kb
85
+Snapshot
+--------
+CONFIG_TRACER_SNAPSHOT makes a generic snapshot feature
+available to all non latency tracers. (Latency tracers which
+record max latency, such as "irqsoff" or "wakeup", can't use
+this feature, since those are already using the snapshot
+mechanism internally.)
+
+Snapshot preserves a trace buffer at a particular point in time
+without stopping tracing. Ftrace swaps the current buffer with a
+spare buffer, and tracing continues in the (previous) spare
+buffer.
+
+The following debugfs files in "tracing" are related to this
+feature:
+
+ snapshot:
+
+ This is used to take a snapshot and to read the output
+ of the snapshot. Echo 1 into this file to allocate a
+ spare buffer and to take a snapshot, then read the
+ snapshot from the file in the same format as "trace"
+ (described above in the section "The File System"). Both
+ reads snapshot and tracing are executable in parallel.
+ Echoing 0 erases the snapshot contents.
+
+ snapshot_allocate:
+
+ This is used to pre-allocate or free a spare buffer.
+ Echo 1 into this file to pre-allocate a spare buffer if
+ you don't want to fail in the next snapshot due to
+ memory allocation failure, or if you don't want to lose
+ older trace data while allocating buffer. Echo 0 to free
+ the spare buffer when the snapshot becomes unnecessary.
+ If you take the next snapshot again, you can reuse the
+ buffer, then just erase the snapshot contents by echoing
+ 1 into the "snapshot" file, instead of freeing the
+ buffer.
+
+ Reads from this file display whether the spare buffer is
+ allocated. When current_tracer is changed, the allocated
+ spare buffer is freed. If the next tracer is one of the
+ latency tracers, this value turns into 1 and can't be
+ changed, or else the value starts with 0.
+
+
+Here is an example of using the snapshot feature.
+
+ # echo 1 > snapshot_allocate (if you want to pre-allocate the spare buffer)
+ # echo 1 > events/sched/enable
+ # echo 1 > snapshot
+ # cat snapshot
+# tracer: nop
+#
+# entries-in-buffer/entries-written: 71/71 #P:8
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ <idle>-0 [005] d... 2440.603828: sched_switch: prev_comm=swapper/5 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2242 next_prio=120
+ sleep-2242 [005] d... 2440.603846: sched_switch: prev_comm=snapshot-test-2 prev_pid=2242 prev_prio=120 prev_state=R ==> next_comm=kworker/5:1 next_pid=60 next_prio=120
+[...]
+ <idle>-0 [002] d... 2440.707230: sched_switch: prev_comm=swapper/2 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2229 next_prio=120
+ # cat trace
+# tracer: nop
+#
+# entries-in-buffer/entries-written: 77/77 #P:8
+#
+# _-----=> irqs-off
+# / _----=> need-resched
+# | / _---=> hardirq/softirq
+# || / _--=> preempt-depth
+# ||| / delay
+# TASK-PID CPU# |||| TIMESTAMP FUNCTION
+# | | | |||| | |
+ <idle>-0 [007] d... 2440.707395: sched_switch: prev_comm=swapper/7 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=snapshot-test-2 next_pid=2243 next_prio=120
+ snapshot-test-2-2229 [002] d... 2440.707438: sched_switch: prev_comm=snapshot-test-2 prev_pid=2229 prev_prio=120 prev_state=S ==> next_comm=swapper/2 next_pid=0 next_prio=120
+[...]
+
+
+If you try to use this snapshot feature when current tracer is
+one of the latency tracers, you will get the following results.
+
+ # echo wakeup > current_tracer
+ # cat snapshot_allocate
+1
+ # echo 1 > snapshot_allocate
+bash: echo: write error: Device or resource busy
+ # echo 1 > snapshot
+bash: echo: write error: Device or resource busy
+ # cat snapshot
+cat: snapshot: Device or resource busy
+
-----------
More details can be found in the source code, in the
^ permalink raw reply related [flat|nested] 9+ messages in thread