All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] perf report: always honor callchain order for inlined nodes
@ 2017-05-16 21:54 Milian Wolff
  2017-05-17  4:26 ` Namhyung Kim
  0 siblings, 1 reply; 3+ messages in thread
From: Milian Wolff @ 2017-05-16 21:54 UTC (permalink / raw)
  To: Linux-kernel
  Cc: linux-perf-users, Milian Wolff, Arnaldo Carvalho de Melo,
	David Ahern, Namhyung Kim, Peter Zijlstra, Yao Jin

So far, the inlined nodes where only reversed when we built perf
against libbfd. If that was not available, the addr2line fallback
code path was missing the inline_list__reverse call.

Now this is done at the central place inside dso__parse_addr_inlines.

Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Yao Jin <yao.jin@linux.intel.com>
Signed-off-by: Milian Wolff <milian.wolff@kdab.com>
---
 tools/perf/util/srcline.c | 28 ++++++++++++++--------------
 1 file changed, 14 insertions(+), 14 deletions(-)

diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
index 62cf42c36955..8df6b29bf984 100644
--- a/tools/perf/util/srcline.c
+++ b/tools/perf/util/srcline.c
@@ -61,6 +61,14 @@ static int inline_list__append(char *filename, char *funcname, int line_nr,
 	return 0;
 }
 
+static void inline_list__reverse(struct inline_node *node)
+{
+	struct inline_list *ilist, *n;
+
+	list_for_each_entry_safe_reverse(ilist, n, &node->val, list)
+		list_move_tail(&ilist->list, &node->val);
+}
+
 #ifdef HAVE_LIBBFD_SUPPORT
 
 /*
@@ -200,14 +208,6 @@ static void addr2line_cleanup(struct a2l_data *a2l)
 
 #define MAX_INLINE_NEST 1024
 
-static void inline_list__reverse(struct inline_node *node)
-{
-	struct inline_list *ilist, *n;
-
-	list_for_each_entry_safe_reverse(ilist, n, &node->val, list)
-		list_move_tail(&ilist->list, &node->val);
-}
-
 static int addr2line(const char *dso_name, u64 addr,
 		     char **file, unsigned int *line, struct dso *dso,
 		     bool unwind_inlines, struct inline_node *node)
@@ -245,11 +245,6 @@ static int addr2line(const char *dso_name, u64 addr,
 					return 0;
 			}
 		}
-
-		if ((node != NULL) &&
-		    (callchain_param.order != ORDER_CALLEE)) {
-			inline_list__reverse(node);
-		}
 	}
 
 	if (a2l->found && a2l->filename) {
@@ -493,12 +488,17 @@ char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
 struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr)
 {
 	const char *dso_name;
+	struct inline_node *node;
 
 	dso_name = dso__name(dso);
 	if (dso_name == NULL)
 		return NULL;
 
-	return addr2inlines(dso_name, addr, dso);
+	node = addr2inlines(dso_name, addr, dso);
+	if (node && callchain_param.order != ORDER_CALLEE)
+		inline_list__reverse(node);
+
+	return node;
 }
 
 void inline_node__delete(struct inline_node *node)
-- 
2.13.0

^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] perf report: always honor callchain order for inlined nodes
  2017-05-16 21:54 [PATCH] perf report: always honor callchain order for inlined nodes Milian Wolff
@ 2017-05-17  4:26 ` Namhyung Kim
  2017-05-17  8:03   ` Milian Wolff
  0 siblings, 1 reply; 3+ messages in thread
From: Namhyung Kim @ 2017-05-17  4:26 UTC (permalink / raw)
  To: Milian Wolff
  Cc: Linux-kernel, linux-perf-users, Arnaldo Carvalho de Melo,
	David Ahern, Peter Zijlstra, Yao Jin, kernel-team

On Tue, May 16, 2017 at 11:54:22PM +0200, Milian Wolff wrote:
> So far, the inlined nodes where only reversed when we built perf
> against libbfd. If that was not available, the addr2line fallback
> code path was missing the inline_list__reverse call.
> 
> Now this is done at the central place inside dso__parse_addr_inlines.

What about changing inline_list__append() instead to honor callchain
order?

	if (ORDER_CALLEE)
		list_add_tail(...);
	else
		list_add(...);

Thanks,
Namhyung


> 
> Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
> Cc: David Ahern <dsahern@gmail.com>
> Cc: Namhyung Kim <namhyung@kernel.org>
> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
> Cc: Yao Jin <yao.jin@linux.intel.com>
> Signed-off-by: Milian Wolff <milian.wolff@kdab.com>
> ---
>  tools/perf/util/srcline.c | 28 ++++++++++++++--------------
>  1 file changed, 14 insertions(+), 14 deletions(-)
> 
> diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
> index 62cf42c36955..8df6b29bf984 100644
> --- a/tools/perf/util/srcline.c
> +++ b/tools/perf/util/srcline.c
> @@ -61,6 +61,14 @@ static int inline_list__append(char *filename, char *funcname, int line_nr,
>  	return 0;
>  }
>  
> +static void inline_list__reverse(struct inline_node *node)
> +{
> +	struct inline_list *ilist, *n;
> +
> +	list_for_each_entry_safe_reverse(ilist, n, &node->val, list)
> +		list_move_tail(&ilist->list, &node->val);
> +}
> +
>  #ifdef HAVE_LIBBFD_SUPPORT
>  
>  /*
> @@ -200,14 +208,6 @@ static void addr2line_cleanup(struct a2l_data *a2l)
>  
>  #define MAX_INLINE_NEST 1024
>  
> -static void inline_list__reverse(struct inline_node *node)
> -{
> -	struct inline_list *ilist, *n;
> -
> -	list_for_each_entry_safe_reverse(ilist, n, &node->val, list)
> -		list_move_tail(&ilist->list, &node->val);
> -}
> -
>  static int addr2line(const char *dso_name, u64 addr,
>  		     char **file, unsigned int *line, struct dso *dso,
>  		     bool unwind_inlines, struct inline_node *node)
> @@ -245,11 +245,6 @@ static int addr2line(const char *dso_name, u64 addr,
>  					return 0;
>  			}
>  		}
> -
> -		if ((node != NULL) &&
> -		    (callchain_param.order != ORDER_CALLEE)) {
> -			inline_list__reverse(node);
> -		}
>  	}
>  
>  	if (a2l->found && a2l->filename) {
> @@ -493,12 +488,17 @@ char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
>  struct inline_node *dso__parse_addr_inlines(struct dso *dso, u64 addr)
>  {
>  	const char *dso_name;
> +	struct inline_node *node;
>  
>  	dso_name = dso__name(dso);
>  	if (dso_name == NULL)
>  		return NULL;
>  
> -	return addr2inlines(dso_name, addr, dso);
> +	node = addr2inlines(dso_name, addr, dso);
> +	if (node && callchain_param.order != ORDER_CALLEE)
> +		inline_list__reverse(node);
> +
> +	return node;
>  }
>  
>  void inline_node__delete(struct inline_node *node)
> -- 
> 2.13.0
> 

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] perf report: always honor callchain order for inlined nodes
  2017-05-17  4:26 ` Namhyung Kim
@ 2017-05-17  8:03   ` Milian Wolff
  0 siblings, 0 replies; 3+ messages in thread
From: Milian Wolff @ 2017-05-17  8:03 UTC (permalink / raw)
  To: Namhyung Kim
  Cc: Linux-kernel, linux-perf-users, Arnaldo Carvalho de Melo,
	David Ahern, Peter Zijlstra, Yao Jin, kernel-team

[-- Attachment #1: Type: text/plain, Size: 751 bytes --]

On Wednesday, May 17, 2017 6:26:45 AM CEST Namhyung Kim wrote:
> On Tue, May 16, 2017 at 11:54:22PM +0200, Milian Wolff wrote:
> > So far, the inlined nodes where only reversed when we built perf
> > against libbfd. If that was not available, the addr2line fallback
> > code path was missing the inline_list__reverse call.
> > 
> > Now this is done at the central place inside dso__parse_addr_inlines.
> 
> What about changing inline_list__append() instead to honor callchain
> order?
> 
> 	if (ORDER_CALLEE)
> 		list_add_tail(...);
> 	else
> 		list_add(...);

good idea, I'll do that.

Cheers

-- 
Milian Wolff | milian.wolff@kdab.com | Software Engineer
KDAB (Deutschland) GmbH&Co KG, a KDAB Group company
Tel: +49-30-521325470
KDAB - The Qt Experts

[-- Attachment #2: smime.p7s --]
[-- Type: application/pkcs7-signature, Size: 5903 bytes --]

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2017-05-17  8:04 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-16 21:54 [PATCH] perf report: always honor callchain order for inlined nodes Milian Wolff
2017-05-17  4:26 ` Namhyung Kim
2017-05-17  8:03   ` Milian Wolff

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.