All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines
@ 2019-03-15 17:45 Douglas Anderson
  2019-03-15 18:41 ` Steven Rostedt
  0 siblings, 1 reply; 8+ messages in thread
From: Douglas Anderson @ 2019-03-15 17:45 UTC (permalink / raw)
  To: Steven Rostedt, Ingo Molnar, Jason Wessel, Daniel Thompson
  Cc: kgdb-bugreport, Brian Norris, Douglas Anderson, linux-kernel

The 'ftdump' command in kdb is currently a bit of a last resort, at
least if you have lots of traces turned on.  It's going to print a
whole boatload of lines out your serial port which is probably running
at 115200.  This could easily take many, many minutes.

Usually you're most interested in what's at the _end_ of the ftrace
buffer, AKA what happened most recently.  That means you've got to
wait the full time for the dump.  The 'ftdump' command does attempt to
help you a little bit by allowing you to skip a fixed number of lines.
Unfortunately it provides no way for you to know how many lines you
should skip.

Let's do similar to python and allow you to use a negative number to
indicate that you want to skip all lines except the last few.  This
allows you to quickly see what you want.

Signed-off-by: Douglas Anderson <dianders@chromium.org>
---

Changes in v3:
- Optimize counting as per Steven Rostedt.
- Down to 1 patch since patch #1 from v2 landed.

 kernel/trace/trace_kdb.c | 38 +++++++++++++++++++++++++++++---------
 1 file changed, 29 insertions(+), 9 deletions(-)

diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index 810d78a8d14c..cc6ca6c0d6de 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -17,7 +17,7 @@
 #include "trace.h"
 #include "trace_output.h"
 
-static void ftrace_dump_buf(int skip_lines, long cpu_file)
+static int ftrace_dump_buf(int skip_lines, long cpu_file, bool quiet)
 {
 	/* use static because iter can be a bit big for the stack */
 	static struct trace_iterator iter;
@@ -39,8 +39,6 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
 	/* don't look at user memory in panic mode */
 	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
 
-	kdb_printf("Dumping ftrace buffer:\n");
-
 	/* reset all but tr, trace, and overruns */
 	memset(&iter.seq, 0,
 		   sizeof(struct trace_iterator) -
@@ -55,6 +53,9 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
 						 cpu, GFP_ATOMIC);
 			ring_buffer_read_start(iter.buffer_iter[cpu]);
 			tracing_iter_reset(&iter, cpu);
+
+			cnt +=
+			ring_buffer_entries_cpu(iter.trace_buffer->buffer, cpu);
 		}
 	} else {
 		iter.cpu_file = cpu_file;
@@ -63,13 +64,21 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
 						 cpu_file, GFP_ATOMIC);
 		ring_buffer_read_start(iter.buffer_iter[cpu_file]);
 		tracing_iter_reset(&iter, cpu_file);
+
+		cnt += ring_buffer_entries_cpu(iter.trace_buffer->buffer,
+					       cpu_file);
 	}
 
-	while (trace_find_next_entry_inc(&iter)) {
-		if (!cnt)
-			kdb_printf("---------------------------------\n");
-		cnt++;
+	if (quiet)
+		goto out;
+
+	kdb_printf("Dumping ftrace buffer (skipping %d lines):\n",
+		   skip_lines);
+
+	if (cnt)
+		kdb_printf("---------------------------------\n");
 
+	while (trace_find_next_entry_inc(&iter)) {
 		if (!skip_lines) {
 			print_trace_line(&iter);
 			trace_printk_seq(&iter.seq);
@@ -99,6 +108,8 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
 			iter.buffer_iter[cpu] = NULL;
 		}
 	}
+
+	return cnt;
 }
 
 /*
@@ -109,6 +120,7 @@ static int kdb_ftdump(int argc, const char **argv)
 	int skip_lines = 0;
 	long cpu_file;
 	char *cp;
+	int cnt;
 
 	if (argc > 2)
 		return KDB_ARGCOUNT;
@@ -129,7 +141,14 @@ static int kdb_ftdump(int argc, const char **argv)
 	}
 
 	kdb_trap_printk++;
-	ftrace_dump_buf(skip_lines, cpu_file);
+
+	/* A negative skip_lines means skip all but the last lines */
+	if (skip_lines < 0) {
+		cnt = ftrace_dump_buf(0, cpu_file, true);
+		skip_lines = max(cnt + skip_lines, 0);
+	}
+
+	ftrace_dump_buf(skip_lines, cpu_file, false);
 	kdb_trap_printk--;
 
 	return 0;
@@ -138,7 +157,8 @@ static int kdb_ftdump(int argc, const char **argv)
 static __init int kdb_ftrace_register(void)
 {
 	kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
-			    "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
+			    "Dump ftrace log; -skip dumps last #lines", 0,
+			    KDB_ENABLE_ALWAYS_SAFE);
 	return 0;
 }
 
-- 
2.21.0.360.g471c308f928-goog


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines
  2019-03-15 17:45 [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines Douglas Anderson
@ 2019-03-15 18:41 ` Steven Rostedt
  2019-03-15 19:55   ` Doug Anderson
  0 siblings, 1 reply; 8+ messages in thread
From: Steven Rostedt @ 2019-03-15 18:41 UTC (permalink / raw)
  To: Douglas Anderson
  Cc: Ingo Molnar, Jason Wessel, Daniel Thompson, kgdb-bugreport,
	Brian Norris, linux-kernel

On Fri, 15 Mar 2019 10:45:28 -0700
Douglas Anderson <dianders@chromium.org> wrote:


> Changes in v3:
> - Optimize counting as per Steven Rostedt.
> - Down to 1 patch since patch #1 from v2 landed.
> 
>  kernel/trace/trace_kdb.c | 38 +++++++++++++++++++++++++++++---------
>  1 file changed, 29 insertions(+), 9 deletions(-)
> 
> diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
> index 810d78a8d14c..cc6ca6c0d6de 100644
> --- a/kernel/trace/trace_kdb.c
> +++ b/kernel/trace/trace_kdb.c
> @@ -17,7 +17,7 @@
>  #include "trace.h"
>  #include "trace_output.h"
>  
> -static void ftrace_dump_buf(int skip_lines, long cpu_file)
> +static int ftrace_dump_buf(int skip_lines, long cpu_file, bool quiet)

Not quite what I wanted you to do.


>  {
>  	/* use static because iter can be a bit big for the stack */
>  	static struct trace_iterator iter;
> @@ -39,8 +39,6 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
>  	/* don't look at user memory in panic mode */
>  	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
>  
> -	kdb_printf("Dumping ftrace buffer:\n");
> -
>  	/* reset all but tr, trace, and overruns */
>  	memset(&iter.seq, 0,
>  		   sizeof(struct trace_iterator) -
> @@ -55,6 +53,9 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
>  						 cpu, GFP_ATOMIC);
>  			ring_buffer_read_start(iter.buffer_iter[cpu]);
>  			tracing_iter_reset(&iter, cpu);
> +
> +			cnt +=
> +			ring_buffer_entries_cpu(iter.trace_buffer->buffer, cpu);

BTW, 80 char limit is a guideline not a rule. Don't break a line to
make it look worse. But that doesn't matter because this isn't the
change I meant.

>  		}
>  	} else {
>  		iter.cpu_file = cpu_file;
> @@ -63,13 +64,21 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
>  						 cpu_file, GFP_ATOMIC);
>  		ring_buffer_read_start(iter.buffer_iter[cpu_file]);
>  		tracing_iter_reset(&iter, cpu_file);
> +
> +		cnt += ring_buffer_entries_cpu(iter.trace_buffer->buffer,
> +					       cpu_file);
>  	}
>  
> -	while (trace_find_next_entry_inc(&iter)) {
> -		if (!cnt)
> -			kdb_printf("---------------------------------\n");
> -		cnt++;
> +	if (quiet)
> +		goto out;
> +
> +	kdb_printf("Dumping ftrace buffer (skipping %d lines):\n",
> +		   skip_lines);
> +
> +	if (cnt)
> +		kdb_printf("---------------------------------\n");
>  
> +	while (trace_find_next_entry_inc(&iter)) {
>  		if (!skip_lines) {
>  			print_trace_line(&iter);
>  			trace_printk_seq(&iter.seq);
> @@ -99,6 +108,8 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
>  			iter.buffer_iter[cpu] = NULL;
>  		}
>  	}
> +
> +	return cnt;
>  }
>  
>  /*
> @@ -109,6 +120,7 @@ static int kdb_ftdump(int argc, const char **argv)
>  	int skip_lines = 0;
>  	long cpu_file;
>  	char *cp;
> +	int cnt;
>  
>  	if (argc > 2)
>  		return KDB_ARGCOUNT;
> @@ -129,7 +141,14 @@ static int kdb_ftdump(int argc, const char **argv)
>  	}
>  
>  	kdb_trap_printk++;
> -	ftrace_dump_buf(skip_lines, cpu_file);
> +
> +	/* A negative skip_lines means skip all but the last lines */
> +	if (skip_lines < 0) {
> +		cnt = ftrace_dump_buf(0, cpu_file, true);

Actually, you can add a function in trace.c that exposes
get_total_entries:

unsigned long trace_total_entries(struct trace_array *tr)
{
	unsigned long total, entries;

	if (!tr)
		tr = &global_trace;

	get_total_entries(tr->trace_buffer, &total, &entries);

	return entries;
}

and then do:
		cnt = trace_total_entries(NULL);

Don't modify ftrace_dump_buf()

-- Steve

> +		skip_lines = max(cnt + skip_lines, 0);
> +	}
> +
> +	ftrace_dump_buf(skip_lines, cpu_file, false);
>  	kdb_trap_printk--;
>  
>  	return 0;
> @@ -138,7 +157,8 @@ static int kdb_ftdump(int argc, const char **argv)
>  static __init int kdb_ftrace_register(void)
>  {
>  	kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
> -			    "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
> +			    "Dump ftrace log; -skip dumps last #lines", 0,
> +			    KDB_ENABLE_ALWAYS_SAFE);
>  	return 0;
>  }
>  


^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines
  2019-03-15 18:41 ` Steven Rostedt
@ 2019-03-15 19:55   ` Doug Anderson
  2019-03-15 20:12     ` Steven Rostedt
  0 siblings, 1 reply; 8+ messages in thread
From: Doug Anderson @ 2019-03-15 19:55 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: Ingo Molnar, Jason Wessel, Daniel Thompson, kgdb-bugreport,
	Brian Norris, LKML

Hi,

On Fri, Mar 15, 2019 at 11:41 AM Steven Rostedt <rostedt@goodmis.org> wrote:
>
> On Fri, 15 Mar 2019 10:45:28 -0700
> Douglas Anderson <dianders@chromium.org> wrote:
>
>
> > Changes in v3:
> > - Optimize counting as per Steven Rostedt.
> > - Down to 1 patch since patch #1 from v2 landed.
> >
> >  kernel/trace/trace_kdb.c | 38 +++++++++++++++++++++++++++++---------
> >  1 file changed, 29 insertions(+), 9 deletions(-)
> >
> > diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
> > index 810d78a8d14c..cc6ca6c0d6de 100644
> > --- a/kernel/trace/trace_kdb.c
> > +++ b/kernel/trace/trace_kdb.c
> > @@ -17,7 +17,7 @@
> >  #include "trace.h"
> >  #include "trace_output.h"
> >
> > -static void ftrace_dump_buf(int skip_lines, long cpu_file)
> > +static int ftrace_dump_buf(int skip_lines, long cpu_file, bool quiet)
>
> Not quite what I wanted you to do.
>
>
> >  {
> >       /* use static because iter can be a bit big for the stack */
> >       static struct trace_iterator iter;
> > @@ -39,8 +39,6 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
> >       /* don't look at user memory in panic mode */
> >       tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
> >
> > -     kdb_printf("Dumping ftrace buffer:\n");
> > -
> >       /* reset all but tr, trace, and overruns */
> >       memset(&iter.seq, 0,
> >                  sizeof(struct trace_iterator) -
> > @@ -55,6 +53,9 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
> >                                                cpu, GFP_ATOMIC);
> >                       ring_buffer_read_start(iter.buffer_iter[cpu]);
> >                       tracing_iter_reset(&iter, cpu);
> > +
> > +                     cnt +=
> > +                     ring_buffer_entries_cpu(iter.trace_buffer->buffer, cpu);
>
> BTW, 80 char limit is a guideline not a rule. Don't break a line to
> make it look worse. But that doesn't matter because this isn't the
> change I meant.

Yeah, I didn't like the look of it either but decided to match the
style of the function, which uses nearly the same solution for word
wrapping a few lines above.  :-)  I'm happy to just break 80
characters though...


> >               }
> >       } else {
> >               iter.cpu_file = cpu_file;
> > @@ -63,13 +64,21 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
> >                                                cpu_file, GFP_ATOMIC);
> >               ring_buffer_read_start(iter.buffer_iter[cpu_file]);
> >               tracing_iter_reset(&iter, cpu_file);
> > +
> > +             cnt += ring_buffer_entries_cpu(iter.trace_buffer->buffer,
> > +                                            cpu_file);
> >       }
> >
> > -     while (trace_find_next_entry_inc(&iter)) {
> > -             if (!cnt)
> > -                     kdb_printf("---------------------------------\n");
> > -             cnt++;
> > +     if (quiet)
> > +             goto out;
> > +
> > +     kdb_printf("Dumping ftrace buffer (skipping %d lines):\n",
> > +                skip_lines);
> > +
> > +     if (cnt)
> > +             kdb_printf("---------------------------------\n");
> >
> > +     while (trace_find_next_entry_inc(&iter)) {
> >               if (!skip_lines) {
> >                       print_trace_line(&iter);
> >                       trace_printk_seq(&iter.seq);
> > @@ -99,6 +108,8 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
> >                       iter.buffer_iter[cpu] = NULL;
> >               }
> >       }
> > +
> > +     return cnt;
> >  }
> >
> >  /*
> > @@ -109,6 +120,7 @@ static int kdb_ftdump(int argc, const char **argv)
> >       int skip_lines = 0;
> >       long cpu_file;
> >       char *cp;
> > +     int cnt;
> >
> >       if (argc > 2)
> >               return KDB_ARGCOUNT;
> > @@ -129,7 +141,14 @@ static int kdb_ftdump(int argc, const char **argv)
> >       }
> >
> >       kdb_trap_printk++;
> > -     ftrace_dump_buf(skip_lines, cpu_file);
> > +
> > +     /* A negative skip_lines means skip all but the last lines */
> > +     if (skip_lines < 0) {
> > +             cnt = ftrace_dump_buf(0, cpu_file, true);
>
> Actually, you can add a function in trace.c that exposes
> get_total_entries:
>
> unsigned long trace_total_entries(struct trace_array *tr)
> {
>         unsigned long total, entries;
>
>         if (!tr)
>                 tr = &global_trace;
>
>         get_total_entries(tr->trace_buffer, &total, &entries);
>
>         return entries;
> }
>
> and then do:
>                 cnt = trace_total_entries(NULL);

OK.  I guess I'll need to figure out how to add an argument to limit
it to just one CPU too since the kdb command allows you to specify a
single CPU or all CPUs.  I think the best way would mean adding an
argument to get_total_entries().  ...or I can just change the kdb
command to not allow specifying a CPU?  Which do you prefer?  I
personally haven't ever used the feature to just print the ftrace
buffer for a certain CPU so I'd be OK removing it.


> Don't modify ftrace_dump_buf()

I still kinda prefer modifying ftrace_dump_buf() just because it's
pretty important that the "count" we come up with match pretty exactly
the count that ftrace_dump_buf() will come up with.  My worry probably
comes from my lack of experience with the internals of ftrace but, for
instance, I see things like "tr->trace_flags &=
~TRACE_ITER_SYM_USEROBJ" in ftrace_dump_buf() and I worry that it will
affect the count.  ...but if you tell me that I need not worry about
things like that then I won't.  :-)


Thanks for your help!

-Doug

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines
  2019-03-15 19:55   ` Doug Anderson
@ 2019-03-15 20:12     ` Steven Rostedt
  2019-03-15 20:54       ` Doug Anderson
  0 siblings, 1 reply; 8+ messages in thread
From: Steven Rostedt @ 2019-03-15 20:12 UTC (permalink / raw)
  To: Doug Anderson
  Cc: Ingo Molnar, Jason Wessel, Daniel Thompson, kgdb-bugreport,
	Brian Norris, LKML

On Fri, 15 Mar 2019 12:55:31 -0700
Doug Anderson <dianders@chromium.org> wrote:

> >
> > Actually, you can add a function in trace.c that exposes
> > get_total_entries:
> >
> > unsigned long trace_total_entries(struct trace_array *tr)
> > {
> >         unsigned long total, entries;
> >
> >         if (!tr)
> >                 tr = &global_trace;
> >
> >         get_total_entries(tr->trace_buffer, &total, &entries);
> >
> >         return entries;
> > }
> >
> > and then do:
> >                 cnt = trace_total_entries(NULL);  
> 
> OK.  I guess I'll need to figure out how to add an argument to limit
> it to just one CPU too since the kdb command allows you to specify a
> single CPU or all CPUs.  I think the best way would mean adding an
> argument to get_total_entries().  ...or I can just change the kdb
> command to not allow specifying a CPU?  Which do you prefer?  I
> personally haven't ever used the feature to just print the ftrace
> buffer for a certain CPU so I'd be OK removing it.

I would make a get_total_entries_cpu() that contains the guts of
get_total_entries() which then calls get_total_entries_cpu()

static void
get_total_entries(struct trace_buffer *buf,
		  unsigned long *total, unsigned long *entries)
{
	unsigned long t, e;
	int cpu;

	*total = 0;
	*entries = 0;

	for_each_tracing_cpu(cpu)
		get_total_entries_cpu(buf, &t, &e, cpu);
		*total += t;
		*entries += e;
	}
}
	
> 
> 
> > Don't modify ftrace_dump_buf()  
> 
> I still kinda prefer modifying ftrace_dump_buf() just because it's
> pretty important that the "count" we come up with match pretty exactly
> the count that ftrace_dump_buf() will come up with.  My worry probably
> comes from my lack of experience with the internals of ftrace but, for
> instance, I see things like "tr->trace_flags &=
> ~TRACE_ITER_SYM_USEROBJ" in ftrace_dump_buf() and I worry that it will
> affect the count.  ...but if you tell me that I need not worry about
> things like that then I won't.  :-)

Hmm, actually your method still wont work, because you are only
counting entries not lines. The stack dumps are considered a single
line but will print multiple lines.

Not only that, perhaps you should break apart ftrace_dump_buf(),
because calling it twice (or doing what I suggested), wont stop tracing
in between, and the size of the buffer might change between the two
calls.

You need to move out:

	for_each_tracing_cpu(cpu) {
		atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
	}


and

	for_each_tracing_cpu(cpu) {
		atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
	}

to disable tracing while you do this.

The get_total_entries() is the faster approach to get the count, but in
either case, the count should end up the same.

-- Steve

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines
  2019-03-15 20:12     ` Steven Rostedt
@ 2019-03-15 20:54       ` Doug Anderson
  2019-03-15 21:08         ` Steven Rostedt
  0 siblings, 1 reply; 8+ messages in thread
From: Doug Anderson @ 2019-03-15 20:54 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: Ingo Molnar, Jason Wessel, Daniel Thompson, kgdb-bugreport,
	Brian Norris, LKML

Hi,

On Fri, Mar 15, 2019 at 1:12 PM Steven Rostedt <rostedt@goodmis.org> wrote:
>
> On Fri, 15 Mar 2019 12:55:31 -0700
> Doug Anderson <dianders@chromium.org> wrote:
>
> > >
> > > Actually, you can add a function in trace.c that exposes
> > > get_total_entries:
> > >
> > > unsigned long trace_total_entries(struct trace_array *tr)
> > > {
> > >         unsigned long total, entries;
> > >
> > >         if (!tr)
> > >                 tr = &global_trace;
> > >
> > >         get_total_entries(tr->trace_buffer, &total, &entries);
> > >
> > >         return entries;
> > > }
> > >
> > > and then do:
> > >                 cnt = trace_total_entries(NULL);
> >
> > OK.  I guess I'll need to figure out how to add an argument to limit
> > it to just one CPU too since the kdb command allows you to specify a
> > single CPU or all CPUs.  I think the best way would mean adding an
> > argument to get_total_entries().  ...or I can just change the kdb
> > command to not allow specifying a CPU?  Which do you prefer?  I
> > personally haven't ever used the feature to just print the ftrace
> > buffer for a certain CPU so I'd be OK removing it.
>
> I would make a get_total_entries_cpu() that contains the guts of
> get_total_entries() which then calls get_total_entries_cpu()
>
> static void
> get_total_entries(struct trace_buffer *buf,
>                   unsigned long *total, unsigned long *entries)
> {
>         unsigned long t, e;
>         int cpu;
>
>         *total = 0;
>         *entries = 0;
>
>         for_each_tracing_cpu(cpu)
>                 get_total_entries_cpu(buf, &t, &e, cpu);
>                 *total += t;
>                 *entries += e;
>         }
> }
>
> >
> >
> > > Don't modify ftrace_dump_buf()
> >
> > I still kinda prefer modifying ftrace_dump_buf() just because it's
> > pretty important that the "count" we come up with match pretty exactly
> > the count that ftrace_dump_buf() will come up with.  My worry probably
> > comes from my lack of experience with the internals of ftrace but, for
> > instance, I see things like "tr->trace_flags &=
> > ~TRACE_ITER_SYM_USEROBJ" in ftrace_dump_buf() and I worry that it will
> > affect the count.  ...but if you tell me that I need not worry about
> > things like that then I won't.  :-)
>
> Hmm, actually your method still wont work, because you are only
> counting entries not lines. The stack dumps are considered a single
> line but will print multiple lines.

LOL.  Back to v1 then?  v1 of the patch [1] was definitely consistent
even if it was very slow.  Specifically whatever was being counted by
ftrace_dump_buf() (entries or lines or something in between) was
definitely used to figure out how many to skip.


> Not only that, perhaps you should break apart ftrace_dump_buf(),
> because calling it twice (or doing what I suggested), wont stop tracing
> in between, and the size of the buffer might change between the two
> calls.
>
> You need to move out:
>
>         for_each_tracing_cpu(cpu) {
>                 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
>         }
>
>
> and
>
>         for_each_tracing_cpu(cpu) {
>                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
>         }
>
> to disable tracing while you do this.

Happy to do this in a v4.  I think it's very unlikely to matter
because we're in kdb and thus all the other CPUs are stopped and
interrupts are disabled.  ...so unless an NMI adds something to the
trace buffer in between the two calls the counts will be the same.
...but it certainly is cleaner.


> The get_total_entries() is the faster approach to get the count, but in
> either case, the count should end up the same.

If you're OK with going back to the super slow mechanism in v1 I can
do that and we can be guaranteed we're consistent.  Presumably it
can't be _that_ slow because we're going to use the same mechanism to
skip the lines later.

So, if you agree, I'll send out a v4 that looks like v1 except that it
disables / enables tracing directly in kdb_ftdump() so it stays
disabled for both calls.


[1] https://lkml.kernel.org/r/20190305233150.159633-1-dianders@chromium.org

-Doug

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines
  2019-03-15 20:54       ` Doug Anderson
@ 2019-03-15 21:08         ` Steven Rostedt
  2019-03-15 21:28           ` Doug Anderson
  0 siblings, 1 reply; 8+ messages in thread
From: Steven Rostedt @ 2019-03-15 21:08 UTC (permalink / raw)
  To: Doug Anderson
  Cc: Ingo Molnar, Jason Wessel, Daniel Thompson, kgdb-bugreport,
	Brian Norris, LKML

On Fri, 15 Mar 2019 13:54:11 -0700
Doug Anderson <dianders@chromium.org> wrote:

> > Hmm, actually your method still wont work, because you are only
> > counting entries not lines. The stack dumps are considered a single
> > line but will print multiple lines.  
> 
> LOL.  Back to v1 then?  v1 of the patch [1] was definitely consistent
> even if it was very slow.  Specifically whatever was being counted by
> ftrace_dump_buf() (entries or lines or something in between) was
> definitely used to figure out how many to skip.

You'll need to read the line itself. I don't see v1 giving a different
count than the get_total_entries() does.

> 
> 
> > Not only that, perhaps you should break apart ftrace_dump_buf(),
> > because calling it twice (or doing what I suggested), wont stop tracing
> > in between, and the size of the buffer might change between the two
> > calls.
> >
> > You need to move out:
> >
> >         for_each_tracing_cpu(cpu) {
> >                 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
> >         }
> >
> >
> > and
> >
> >         for_each_tracing_cpu(cpu) {
> >                 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
> >         }
> >
> > to disable tracing while you do this.  
> 
> Happy to do this in a v4.  I think it's very unlikely to matter
> because we're in kdb and thus all the other CPUs are stopped and
> interrupts are disabled.  ...so unless an NMI adds something to the
> trace buffer in between the two calls the counts will be the same.
> ...but it certainly is cleaner.

NMI's can indeed add to the trace.

> 
> 
> > The get_total_entries() is the faster approach to get the count, but in
> > either case, the count should end up the same.  
> 
> If you're OK with going back to the super slow mechanism in v1 I can
> do that and we can be guaranteed we're consistent.  Presumably it
> can't be _that_ slow because we're going to use the same mechanism to
> skip the lines later.
> 
> So, if you agree, I'll send out a v4 that looks like v1 except that it
> disables / enables tracing directly in kdb_ftdump() so it stays
> disabled for both calls.
> 
> 
> [1] https://lkml.kernel.org/r/20190305233150.159633-1-dianders@chromium.org
> 

But this part of the patch:

> -static void ftrace_dump_buf(int skip_lines, long cpu_file)
> +static int ftrace_dump_buf(int skip_lines, long cpu_file, bool quiet)
>  {
>  	/* use static because iter can be a bit big for the stack */
>  	static struct trace_iterator iter;
> @@ -39,7 +39,9 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
>  	/* don't look at user memory in panic mode */
>  	tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
>  
> -	kdb_printf("Dumping ftrace buffer:\n");
> +	if (!quiet)
> +		kdb_printf("Dumping ftrace buffer (skipping %d lines):\n",
> +			   skip_lines);
>  
>  	/* reset all but tr, trace, and overruns */
>  	memset(&iter.seq, 0,
> @@ -66,25 +68,29 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
>  	}
>  
>  	while (trace_find_next_entry_inc(&iter)) {
> -		if (!cnt)
> -			kdb_printf("---------------------------------\n");
> -		cnt++;
> -
> -		if (!skip_lines) {
> -			print_trace_line(&iter);
> -			trace_printk_seq(&iter.seq);
> -		} else {
> -			skip_lines--;
> +		if (!quiet) {
> +			if (!cnt)
> +				kdb_printf("---------------------------------\n");
> +
> +			if (!skip_lines) {
> +				print_trace_line(&iter);
> +				trace_printk_seq(&iter.seq);
> +			} else {
> +				skip_lines--;

How do you know that trace_printk_seq() didn't produce more than one line?

If the event is a stack dump, you need to read the seq, and count the
number of '\n' that are added.

The cnt in this code is no different than the get_total_entries() that
I suggested.

Now the really ugly solution is to have:

	print_trace_line(&iter);

	if (quiet || skip_lines) {
		lines = count_all_newlines(&iter.seq);
		if (skip_lines) {
			skip_lines -= lines;
			if (skip_lines < 0)
				skip_lines = 0;
		}
		cnt += lines
	} else (!quiet) {
		trace_printk_seq(&iter.seq);
	}

Where the count_all_newlines() needs to be created to read the seq
buffer and return the number of '\n' that are found.

-- Steve



> +			}
>  		}
> +		cnt++;
>  
>  		if (KDB_FLAG(CMD_INTERRUPT))
>  			goto out;
>  	}
>  
> -	if (!cnt)
> -		kdb_printf("   (ftrace buffer empty)\n");
> -	else
> -		kdb_printf("---------------------------------\n");
> +	if (!quiet) {
> +		if (!cnt)
> +			kdb_printf("   (ftrace buffer empty)\n");
> +		else
> +			kdb_printf("---------------------------------\n");
> +	}

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines
  2019-03-15 21:08         ` Steven Rostedt
@ 2019-03-15 21:28           ` Doug Anderson
  2019-03-15 21:33             ` Steven Rostedt
  0 siblings, 1 reply; 8+ messages in thread
From: Doug Anderson @ 2019-03-15 21:28 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: Ingo Molnar, Jason Wessel, Daniel Thompson, kgdb-bugreport,
	Brian Norris, LKML

Hi,

On Fri, Mar 15, 2019 at 2:08 PM Steven Rostedt <rostedt@goodmis.org> wrote:
> > > The get_total_entries() is the faster approach to get the count, but in
> > > either case, the count should end up the same.
> >
> > If you're OK with going back to the super slow mechanism in v1 I can
> > do that and we can be guaranteed we're consistent.  Presumably it
> > can't be _that_ slow because we're going to use the same mechanism to
> > skip the lines later.
> >
> > So, if you agree, I'll send out a v4 that looks like v1 except that it
> > disables / enables tracing directly in kdb_ftdump() so it stays
> > disabled for both calls.
> >
> >
> > [1] https://lkml.kernel.org/r/20190305233150.159633-1-dianders@chromium.org
> >
>
> But this part of the patch:
>
> > -static void ftrace_dump_buf(int skip_lines, long cpu_file)
> > +static int ftrace_dump_buf(int skip_lines, long cpu_file, bool quiet)
> >  {
> >       /* use static because iter can be a bit big for the stack */
> >       static struct trace_iterator iter;
> > @@ -39,7 +39,9 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
> >       /* don't look at user memory in panic mode */
> >       tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
> >
> > -     kdb_printf("Dumping ftrace buffer:\n");
> > +     if (!quiet)
> > +             kdb_printf("Dumping ftrace buffer (skipping %d lines):\n",
> > +                        skip_lines);
> >
> >       /* reset all but tr, trace, and overruns */
> >       memset(&iter.seq, 0,
> > @@ -66,25 +68,29 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
> >       }
> >
> >       while (trace_find_next_entry_inc(&iter)) {
> > -             if (!cnt)
> > -                     kdb_printf("---------------------------------\n");
> > -             cnt++;
> > -
> > -             if (!skip_lines) {
> > -                     print_trace_line(&iter);
> > -                     trace_printk_seq(&iter.seq);
> > -             } else {
> > -                     skip_lines--;
> > +             if (!quiet) {
> > +                     if (!cnt)
> > +                             kdb_printf("---------------------------------\n");
> > +
> > +                     if (!skip_lines) {
> > +                             print_trace_line(&iter);
> > +                             trace_printk_seq(&iter.seq);
> > +                     } else {
> > +                             skip_lines--;
>
> How do you know that trace_printk_seq() didn't produce more than one line?
>
> If the event is a stack dump, you need to read the seq, and count the
> number of '\n' that are added.
>
> The cnt in this code is no different than the get_total_entries() that
> I suggested.

I had a little bit of a hard time figuring out if print_trace_line()
plus trace_printk_seq() always printed one line or always printed one
entry.  I guess the point I was making was that in v1 of my patch it
wouldn't matter because the pseudo code looked like:

1. Count how many "things" would be printed, but don't printed them.

2. Use math to figure out how many "things" to skip given that we want
to print the last N "things".

3. Skip the "things" the math told us to and then print the last N "things".

...but it sounds like it _must_ print one entry because we're looping
over trace_find_next_entry_inc().  That means that the existing "skip
lines" that predates my patch should actually be "skip entries".  I'd
be happy adjusting the help text (and local variable name) so that
"skip_lines" is instead "skip_entries".  I think that would avoid
confusion everywhere.  It doesn't change behavior but just documents
the existing behavior.


With that I can use your optimized path assuming you can confirm that
"tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ" doesn't affect how many
entries will be iterated over by trace_find_next_entry_inc().

-Doug

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines
  2019-03-15 21:28           ` Doug Anderson
@ 2019-03-15 21:33             ` Steven Rostedt
  0 siblings, 0 replies; 8+ messages in thread
From: Steven Rostedt @ 2019-03-15 21:33 UTC (permalink / raw)
  To: Doug Anderson
  Cc: Ingo Molnar, Jason Wessel, Daniel Thompson, kgdb-bugreport,
	Brian Norris, LKML

On Fri, 15 Mar 2019 14:28:39 -0700
Doug Anderson <dianders@chromium.org> wrote:

> I had a little bit of a hard time figuring out if print_trace_line()
> plus trace_printk_seq() always printed one line or always printed one
> entry.  I guess the point I was making was that in v1 of my patch it
> wouldn't matter because the pseudo code looked like:
> 
> 1. Count how many "things" would be printed, but don't printed them.
> 
> 2. Use math to figure out how many "things" to skip given that we want
> to print the last N "things".
> 
> 3. Skip the "things" the math told us to and then print the last N "things".
> 
> ...but it sounds like it _must_ print one entry because we're looping
> over trace_find_next_entry_inc().  That means that the existing "skip
> lines" that predates my patch should actually be "skip entries".  I'd
> be happy adjusting the help text (and local variable name) so that
> "skip_lines" is instead "skip_entries".  I think that would avoid
> confusion everywhere.  It doesn't change behavior but just documents
> the existing behavior.

Sounds like a plan.

> 
> 
> With that I can use your optimized path assuming you can confirm that
> "tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ" doesn't affect how many
> entries will be iterated over by trace_find_next_entry_inc().

Here's the code:

	trace_seq_puts(s, "<user stack trace>\n"); <=== already prints a line regardless of SYM_USEROBJ.

	if (tr->trace_flags & TRACE_ITER_SYM_USEROBJ) {
		struct task_struct *task;
		/*
		 * we do the lookup on the thread group leader,
		 * since individual threads might have already quit!
		 */
		rcu_read_lock();
		task = find_task_by_vpid(field->tgid);
		if (task)
			mm = get_task_mm(task);
		rcu_read_unlock();
	}

-- Steve

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2019-03-15 21:33 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-03-15 17:45 [PATCH v3] tracing: kdb: Allow ftdump to skip all but the last few lines Douglas Anderson
2019-03-15 18:41 ` Steven Rostedt
2019-03-15 19:55   ` Doug Anderson
2019-03-15 20:12     ` Steven Rostedt
2019-03-15 20:54       ` Doug Anderson
2019-03-15 21:08         ` Steven Rostedt
2019-03-15 21:28           ` Doug Anderson
2019-03-15 21:33             ` Steven Rostedt

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.