All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RESEND 1/5] perf diff: Fix to sort by baseline field by default
@ 2014-12-27  5:06 Namhyung Kim
  2014-12-27  5:06 ` [PATCH RESEND 2/5] perf diff: Get rid of hists__compute_resort() Namhyung Kim
                   ` (4 more replies)
  0 siblings, 5 replies; 17+ messages in thread
From: Namhyung Kim @ 2014-12-27  5:06 UTC (permalink / raw)
  To: Arnaldo Carvalho de Melo; +Cc: LKML, Ingo Molnar, Peter Zijlstra, Jiri Olsa

The currently perf diff didn't add the baseline and delta (or other
compute) fields to the sort list so output will be sorted by other
fields like alphabetical order of DSO or symbol as below example.

Fix it by adding hpp formats for the fields and provides default
compare functions.

Before:

  $ perf diff
  # Event 'cycles'
  #
  # Baseline    Delta  Shared Object       Symbol
  # ........  .......  ..................  ...............................
  #
                       [bridge]            [k] ip_sabotage_in
                       [btrfs]             [k] __etree_search.constprop.47
       0.01%           [btrfs]             [k] btrfs_file_mmap
       0.01%   -0.01%  [btrfs]             [k] btrfs_getattr
                       [e1000e]            [k] e1000_watchdog
       0.00%           [kernel.vmlinux]    [k] PageHuge
       0.00%           [kernel.vmlinux]    [k] __acct_update_integrals
       0.00%           [kernel.vmlinux]    [k] __activate_page
                       [kernel.vmlinux]    [k] __alloc_fd
       0.02%   +0.02%  [kernel.vmlinux]    [k] __alloc_pages_nodemask
       ...

After:

  # Baseline    Delta  Shared Object       Symbol
  # ........  .......  ..................  ................................
  #
      24.73%   -4.62%  perf                [.] append_chain_children
       7.96%   -1.29%  perf                [.] dso__find_symbol
       6.97%   -2.07%  libc-2.20.so        [.] vfprintf
       4.61%   +0.88%  libc-2.20.so        [.] __fprintf_chk
       4.41%   +2.43%  perf                [.] sort__comm_cmp
       4.10%   -0.16%  perf                [.] comm__str
       4.03%   -0.93%  perf                [.] machine__findnew_thread_time
       3.82%   +3.09%  perf                [.] __hists__add_entry
       2.95%   -0.18%  perf                [.] sort__dso_cmp
       ...

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 tools/perf/builtin-diff.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index e787a4406d1f..318ab9c3f0ba 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
 	return __hist_entry__cmp_compute(p_left, p_right, c);
 }
 
+static int64_t
+hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+		    struct hist_entry *right __maybe_unused)
+{
+	return 0;
+}
+
+static int64_t
+hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+{
+	if (sort_compute)
+		return 0;
+
+	if (left->stat.period == right->stat.period)
+		return 0;
+	return left->stat.period > right->stat.period ? 1 : -1;
+}
+
+static int64_t
+hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+{
+	return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+}
+
+static int64_t
+hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+{
+	return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+}
+
+static int64_t
+hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+{
+	return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+}
+
 static void insert_hist_entry_by_compute(struct rb_root *root,
 					 struct hist_entry *he,
 					 int c)
@@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx)
 	fmt->header = hpp__header;
 	fmt->width  = hpp__width;
 	fmt->entry  = hpp__entry_global;
+	fmt->cmp    = hist_entry__cmp_nop;
+	fmt->collapse = hist_entry__cmp_nop;
 
 	/* TODO more colors */
 	switch (idx) {
 	case PERF_HPP_DIFF__BASELINE:
 		fmt->color = hpp__color_baseline;
+		fmt->sort  = hist_entry__cmp_baseline;
 		break;
 	case PERF_HPP_DIFF__DELTA:
 		fmt->color = hpp__color_delta;
+		fmt->sort  = hist_entry__cmp_delta;
 		break;
 	case PERF_HPP_DIFF__RATIO:
 		fmt->color = hpp__color_ratio;
+		fmt->sort  = hist_entry__cmp_ratio;
 		break;
 	case PERF_HPP_DIFF__WEIGHTED_DIFF:
 		fmt->color = hpp__color_wdiff;
+		fmt->sort  = hist_entry__cmp_wdiff;
 		break;
 	default:
+		fmt->sort  = hist_entry__cmp_nop;
 		break;
 	}
 
 	init_header(d, dfmt);
 	perf_hpp__column_register(fmt);
+	perf_hpp__register_sort_field(fmt);
 }
 
 static void ui_init(void)
-- 
2.2.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH RESEND 2/5] perf diff: Get rid of hists__compute_resort()
  2014-12-27  5:06 [PATCH RESEND 1/5] perf diff: Fix to sort by baseline field by default Namhyung Kim
@ 2014-12-27  5:06 ` Namhyung Kim
  2015-01-04 16:33   ` Jiri Olsa
  2015-01-28 15:06   ` [tip:perf/core] " tip-bot for Namhyung Kim
  2014-12-27  5:06 ` [PATCH 3/5] perf diff: Print diff result more precisely Namhyung Kim
                   ` (3 subsequent siblings)
  4 siblings, 2 replies; 17+ messages in thread
From: Namhyung Kim @ 2014-12-27  5:06 UTC (permalink / raw)
  To: Arnaldo Carvalho de Melo; +Cc: LKML, Ingo Molnar, Peter Zijlstra, Jiri Olsa

The hists__compute_resort() is to sort output fields based on the
given field/criteria.  This was done without the sort list but as we
added the field to the sort list, we can do it with normal
hists__output_resort() using the ->sort callback.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 tools/perf/builtin-diff.c | 59 +++--------------------------------------------
 1 file changed, 3 insertions(+), 56 deletions(-)

diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 318ab9c3f0ba..72c718e6549c 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -581,68 +581,15 @@ hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
 	return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
 }
 
-static void insert_hist_entry_by_compute(struct rb_root *root,
-					 struct hist_entry *he,
-					 int c)
-{
-	struct rb_node **p = &root->rb_node;
-	struct rb_node *parent = NULL;
-	struct hist_entry *iter;
-
-	while (*p != NULL) {
-		parent = *p;
-		iter = rb_entry(parent, struct hist_entry, rb_node);
-		if (hist_entry__cmp_compute(he, iter, c) < 0)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	rb_link_node(&he->rb_node, parent, p);
-	rb_insert_color(&he->rb_node, root);
-}
-
-static void hists__compute_resort(struct hists *hists)
-{
-	struct rb_root *root;
-	struct rb_node *next;
-
-	if (sort__need_collapse)
-		root = &hists->entries_collapsed;
-	else
-		root = hists->entries_in;
-
-	hists->entries = RB_ROOT;
-	next = rb_first(root);
-
-	hists__reset_stats(hists);
-	hists__reset_col_len(hists);
-
-	while (next != NULL) {
-		struct hist_entry *he;
-
-		he = rb_entry(next, struct hist_entry, rb_node_in);
-		next = rb_next(&he->rb_node_in);
-
-		insert_hist_entry_by_compute(&hists->entries, he, compute);
-		hists__inc_stats(hists, he);
-
-		if (!he->filtered)
-			hists__calc_col_len(hists, he);
-	}
-}
-
 static void hists__process(struct hists *hists)
 {
 	if (show_baseline_only)
 		hists__baseline_only(hists);
 
-	if (sort_compute) {
+	if (sort_compute)
 		hists__precompute(hists);
-		hists__compute_resort(hists);
-	} else {
-		hists__output_resort(hists, NULL);
-	}
+
+	hists__output_resort(hists, NULL);
 
 	hists__fprintf(hists, true, 0, 0, 0, stdout);
 }
-- 
2.2.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 3/5] perf diff: Print diff result more precisely
  2014-12-27  5:06 [PATCH RESEND 1/5] perf diff: Fix to sort by baseline field by default Namhyung Kim
  2014-12-27  5:06 ` [PATCH RESEND 2/5] perf diff: Get rid of hists__compute_resort() Namhyung Kim
@ 2014-12-27  5:06 ` Namhyung Kim
  2015-01-04 16:47   ` Jiri Olsa
  2015-01-28 15:07   ` [tip:perf/core] " tip-bot for Namhyung Kim
  2014-12-27  5:06 ` [PATCH 4/5] perf diff: Fix output ordering to honor next column Namhyung Kim
                   ` (2 subsequent siblings)
  4 siblings, 2 replies; 17+ messages in thread
From: Namhyung Kim @ 2014-12-27  5:06 UTC (permalink / raw)
  To: Arnaldo Carvalho de Melo; +Cc: LKML, Ingo Molnar, Peter Zijlstra, Jiri Olsa

Current perf diff result is somewhat confusing since it sometimes hide
small result and sometimes there's no result.  So do not hide small
result (less than 0.01%) and print "N/A" if baseline is not
recorded (for ratio and wdiff only).  Blank means the baseline is
available but its pairs are not.

Before:

  # Baseline    Delta  Shared Object      Symbol
  # ........  .......  .................  .........................
  #
       ...
       0.01%   -0.01%  [kernel.kallsyms]  [k] native_write_msr_safe
       0.01%           [kernel.kallsyms]  [k] scheduler_tick
       0.01%           [kernel.kallsyms]  [k] native_read_msr_safe
       0.00%           [kernel.kallsyms]  [k] __rcu_read_unlock
                       [kernel.kallsyms]  [k] _raw_spin_lock
               +0.01%  [kernel.kallsyms]  [k] apic_timer_interrupt
                       [kernel.kallsyms]  [k] read_tsc

After:

  # Baseline    Delta  Shared Object      Symbol
  # ........  .......  .................  .........................
  #
       ...
       0.01%   -0.01%  [kernel.kallsyms]  [k] native_write_msr_safe
       0.01%           [kernel.kallsyms]  [k] scheduler_tick
       0.01%           [kernel.kallsyms]  [k] native_read_msr_safe
       0.00%           [kernel.kallsyms]  [k] __rcu_read_unlock
               +0.01%  [kernel.kallsyms]  [k] _raw_spin_lock
               +0.01%  [kernel.kallsyms]  [k] apic_timer_interrupt
               +0.01%  [kernel.kallsyms]  [k] read_tsc

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 tools/perf/builtin-diff.c | 18 +++++++++++-------
 1 file changed, 11 insertions(+), 7 deletions(-)

diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 72c718e6549c..3f86737da2c4 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -788,7 +788,7 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
 	char pfmt[20] = " ";
 
 	if (!pair)
-		goto dummy_print;
+		goto no_print;
 
 	switch (comparison_method) {
 	case COMPUTE_DELTA:
@@ -797,8 +797,6 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
 		else
 			diff = compute_delta(he, pair);
 
-		if (fabs(diff) < 0.01)
-			goto dummy_print;
 		scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1);
 		return percent_color_snprintf(hpp->buf, hpp->size,
 					pfmt, diff);
@@ -830,6 +828,9 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
 	}
 dummy_print:
 	return scnprintf(hpp->buf, hpp->size, "%*s",
+			dfmt->header_width, "N/A");
+no_print:
+	return scnprintf(hpp->buf, hpp->size, "%*s",
 			dfmt->header_width, pfmt);
 }
 
@@ -879,14 +880,15 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
 		else
 			diff = compute_delta(he, pair);
 
-		if (fabs(diff) >= 0.01)
-			scnprintf(buf, size, "%+4.2F%%", diff);
+		scnprintf(buf, size, "%+4.2F%%", diff);
 		break;
 
 	case PERF_HPP_DIFF__RATIO:
 		/* No point for ratio number if we are dummy.. */
-		if (he->dummy)
+		if (he->dummy) {
+			scnprintf(buf, size, "N/A");
 			break;
+		}
 
 		if (pair->diff.computed)
 			ratio = pair->diff.period_ratio;
@@ -899,8 +901,10 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
 
 	case PERF_HPP_DIFF__WEIGHTED_DIFF:
 		/* No point for wdiff number if we are dummy.. */
-		if (he->dummy)
+		if (he->dummy) {
+			scnprintf(buf, size, "N/A");
 			break;
+		}
 
 		if (pair->diff.computed)
 			wdiff = pair->diff.wdiff;
-- 
2.2.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 4/5] perf diff: Fix output ordering to honor next column
  2014-12-27  5:06 [PATCH RESEND 1/5] perf diff: Fix to sort by baseline field by default Namhyung Kim
  2014-12-27  5:06 ` [PATCH RESEND 2/5] perf diff: Get rid of hists__compute_resort() Namhyung Kim
  2014-12-27  5:06 ` [PATCH 3/5] perf diff: Print diff result more precisely Namhyung Kim
@ 2014-12-27  5:06 ` Namhyung Kim
  2015-01-04 18:16   ` Jiri Olsa
  2014-12-27  5:06 ` [PATCH 5/5] perf diff: Fix -o/--order option behavior Namhyung Kim
  2015-01-08  9:53 ` [tip:perf/urgent] perf diff: Fix to sort by baseline field by default tip-bot for Namhyung Kim
  4 siblings, 1 reply; 17+ messages in thread
From: Namhyung Kim @ 2014-12-27  5:06 UTC (permalink / raw)
  To: Arnaldo Carvalho de Melo; +Cc: LKML, Ingo Molnar, Peter Zijlstra, Jiri Olsa

When perf diff prints output, it sorts the entries using baseline field
by default, but entries which don't have baseline are not sorted
properly.  This patch makes it sorted by values of next column.

Before:

  # Baseline/0  Delta/1  Delta/2  Shared Object      Symbol
  # ..........  .......  .......  .................  ..........................................
  #
        32.75%   +0.28%   -0.83%  libc-2.20.so       [.] malloc
        31.50%   -0.74%   -0.23%  libc-2.20.so       [.] _int_free
        22.98%   +0.51%   +0.52%  libc-2.20.so       [.] _int_malloc
         5.70%   +0.28%   +0.30%  libc-2.20.so       [.] free
         4.38%   -0.21%   +0.25%  a.out              [.] main
         1.32%   -0.15%   +0.05%  a.out              [.] free@plt
         1.31%   +0.03%   -0.06%  a.out              [.] malloc@plt
         0.01%   -0.01%   -0.01%  [kernel.kallsyms]  [k] native_write_msr_safe
         0.01%                    [kernel.kallsyms]  [k] scheduler_tick
         0.01%            -0.00%  [kernel.kallsyms]  [k] native_read_msr_safe
                          +0.01%  [kernel.kallsyms]  [k] _raw_spin_lock_irqsave
                 +0.01%   +0.01%  [kernel.kallsyms]  [k] apic_timer_interrupt
                          +0.01%  [kernel.kallsyms]  [k] intel_pstate_timer_func
                 +0.01%           [kernel.kallsyms]  [k] perf_adjust_freq_unthr_context.part.82
                 +0.01%           [kernel.kallsyms]  [k] read_tsc
                          +0.01%  [kernel.kallsyms]  [k] timekeeping_update.constprop.8

After:

  # Baseline/0  Delta/1  Delta/2  Shared Object      Symbol
  # ..........  .......  .......  .................  ..........................................
  #
        32.75%   +0.28%   -0.83%  libc-2.20.so       [.] malloc
        31.50%   -0.74%   -0.23%  libc-2.20.so       [.] _int_free
        22.98%   +0.51%   +0.52%  libc-2.20.so       [.] _int_malloc
         5.70%   +0.28%   +0.30%  libc-2.20.so       [.] free
         4.38%   -0.21%   +0.25%  a.out              [.] main
         1.32%   -0.15%   +0.05%  a.out              [.] free@plt
         1.31%   +0.03%   -0.06%  a.out              [.] malloc@plt
         0.01%   -0.01%   -0.01%  [kernel.kallsyms]  [k] native_write_msr_safe
         0.01%                    [kernel.kallsyms]  [k] scheduler_tick
         0.01%            -0.00%  [kernel.kallsyms]  [k] native_read_msr_safe
                 +0.01%   +0.01%  [kernel.kallsyms]  [k] apic_timer_interrupt
                 +0.01%           [kernel.kallsyms]  [k] read_tsc
                 +0.01%           [kernel.kallsyms]  [k] perf_adjust_freq_unthr_context.part.82
                          +0.01%  [kernel.kallsyms]  [k] intel_pstate_timer_func
                          +0.01%  [kernel.kallsyms]  [k] _raw_spin_lock_irqsave
                          +0.01%  [kernel.kallsyms]  [k] timekeeping_update.constprop.8

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 tools/perf/builtin-diff.c | 60 +++++++++++++++++++++++++++--------------------
 1 file changed, 34 insertions(+), 26 deletions(-)

diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 3f86737da2c4..f490e24e9477 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -65,6 +65,7 @@ static bool show_period;
 static bool show_formula;
 static bool show_baseline_only;
 static unsigned int sort_compute;
+static unsigned int data_idx;
 
 static s64 compute_wdiff_w1;
 static s64 compute_wdiff_w2;
@@ -448,26 +449,30 @@ static void hists__precompute(struct hists *hists)
 	next = rb_first(root);
 	while (next != NULL) {
 		struct hist_entry *he, *pair;
+		struct data__file *d;
+		int i;
 
 		he   = rb_entry(next, struct hist_entry, rb_node_in);
 		next = rb_next(&he->rb_node_in);
 
-		pair = get_pair_data(he, &data__files[sort_compute]);
-		if (!pair)
-			continue;
+		data__for_each_file_new(i, d) {
+			pair = get_pair_data(he, d);
+			if (!pair)
+				continue;
 
-		switch (compute) {
-		case COMPUTE_DELTA:
-			compute_delta(he, pair);
-			break;
-		case COMPUTE_RATIO:
-			compute_ratio(he, pair);
-			break;
-		case COMPUTE_WEIGHTED_DIFF:
-			compute_wdiff(he, pair);
-			break;
-		default:
-			BUG_ON(1);
+			switch (compute) {
+			case COMPUTE_DELTA:
+				compute_delta(he, pair);
+				break;
+			case COMPUTE_RATIO:
+				compute_ratio(he, pair);
+				break;
+			case COMPUTE_WEIGHTED_DIFF:
+				compute_wdiff(he, pair);
+				break;
+			default:
+				BUG_ON(1);
+			}
 		}
 	}
 }
@@ -517,7 +522,7 @@ __hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
 
 static int64_t
 hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
-			int c)
+			int c, int sort_idx)
 {
 	bool pairs_left  = hist_entry__has_pairs(left);
 	bool pairs_right = hist_entry__has_pairs(right);
@@ -529,8 +534,8 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
 	if (!pairs_left || !pairs_right)
 		return pairs_left ? -1 : 1;
 
-	p_left  = get_pair_data(left,  &data__files[sort_compute]);
-	p_right = get_pair_data(right, &data__files[sort_compute]);
+	p_left  = get_pair_data(left,  &data__files[sort_idx]);
+	p_right = get_pair_data(right, &data__files[sort_idx]);
 
 	if (!p_left && !p_right)
 		return 0;
@@ -555,8 +560,13 @@ hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
 static int64_t
 hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
 {
-	if (sort_compute)
-		return 0;
+	/*
+	 * This function will be called first for each entry to resort
+	 * output.  Next compare-functions use this idx to find their
+	 * data and increase it for next data so we need to initialize
+	 * it everytime.
+	 */
+	data_idx = 0;
 
 	if (left->stat.period == right->stat.period)
 		return 0;
@@ -566,19 +576,19 @@ hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
 static int64_t
 hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
 {
-	return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+	return hist_entry__cmp_compute(right, left, COMPUTE_DELTA, ++data_idx);
 }
 
 static int64_t
 hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
 {
-	return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+	return hist_entry__cmp_compute(right, left, COMPUTE_RATIO, ++data_idx);
 }
 
 static int64_t
 hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
 {
-	return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+	return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, ++data_idx);
 }
 
 static void hists__process(struct hists *hists)
@@ -586,9 +596,7 @@ static void hists__process(struct hists *hists)
 	if (show_baseline_only)
 		hists__baseline_only(hists);
 
-	if (sort_compute)
-		hists__precompute(hists);
-
+	hists__precompute(hists);
 	hists__output_resort(hists, NULL);
 
 	hists__fprintf(hists, true, 0, 0, 0, stdout);
-- 
2.2.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [PATCH 5/5] perf diff: Fix -o/--order option behavior
  2014-12-27  5:06 [PATCH RESEND 1/5] perf diff: Fix to sort by baseline field by default Namhyung Kim
                   ` (2 preceding siblings ...)
  2014-12-27  5:06 ` [PATCH 4/5] perf diff: Fix output ordering to honor next column Namhyung Kim
@ 2014-12-27  5:06 ` Namhyung Kim
  2015-01-04 18:26   ` Jiri Olsa
  2015-01-08  9:53 ` [tip:perf/urgent] perf diff: Fix to sort by baseline field by default tip-bot for Namhyung Kim
  4 siblings, 1 reply; 17+ messages in thread
From: Namhyung Kim @ 2014-12-27  5:06 UTC (permalink / raw)
  To: Arnaldo Carvalho de Melo; +Cc: LKML, Ingo Molnar, Peter Zijlstra, Jiri Olsa

The prior change fixes default output ordering with each column but it
breaks -o/--order option.  This patch prepends a new hpp fmt struct to
sort list but not to output field list so that it can affect ordering
without adding a new output column.

The new hpp fmt uses its own compare functions which treats dummy
entries (which have no baseline) little differently - the delta field
can be computed without baseline but others (ratio and wdiff) are not.

The new output will look like below:

  $ perf diff -o 2 perf.data.{old,cur,new}
  ...
  # Baseline/0  Delta/1  Delta/2  Shared Object      Symbol
  # ..........  .......  .......  .................  ..........................................
        22.98%   +0.51%   +0.52%  libc-2.20.so       [.] _int_malloc
         5.70%   +0.28%   +0.30%  libc-2.20.so       [.] free
         4.38%   -0.21%   +0.25%  a.out              [.] main
         1.32%   -0.15%   +0.05%  a.out              [.] free@plt
                          +0.01%  [kernel.kallsyms]  [k] intel_pstate_timer_func
                          +0.01%  [kernel.kallsyms]  [k] _raw_spin_lock_irqsave
                          +0.01%  [kernel.kallsyms]  [k] timekeeping_update.constprop.8
                 +0.01%   +0.01%  [kernel.kallsyms]  [k] apic_timer_interrupt
         0.01%            -0.00%  [kernel.kallsyms]  [k] native_read_msr_safe
         0.01%   -0.01%   -0.01%  [kernel.kallsyms]  [k] native_write_msr_safe
         1.31%   +0.03%   -0.06%  a.out              [.] malloc@plt
        31.50%   -0.74%   -0.23%  libc-2.20.so       [.] _int_free
        32.75%   +0.28%   -0.83%  libc-2.20.so       [.] malloc
         0.01%                    [kernel.kallsyms]  [k] scheduler_tick
                 +0.01%           [kernel.kallsyms]  [k] read_tsc
                 +0.01%           [kernel.kallsyms]  [k] perf_adjust_freq_unthr_context.part.82

In above example, the output was sorted by 'Delta/2' column first, and
then 'Baseline/0' and finally 'Delta/1'.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 tools/perf/builtin-diff.c | 87 +++++++++++++++++++++++++++++++++++++++++++++--
 1 file changed, 85 insertions(+), 2 deletions(-)

diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index f490e24e9477..8ccb48e77eab 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -551,6 +551,37 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
 }
 
 static int64_t
+hist_entry__cmp_compute_idx(struct hist_entry *left, struct hist_entry *right,
+			    int c, int sort_idx)
+{
+	struct hist_entry *p_right, *p_left;
+
+	p_left  = get_pair_data(left,  &data__files[sort_idx]);
+	p_right = get_pair_data(right, &data__files[sort_idx]);
+
+	if (!p_left && !p_right)
+		return 0;
+
+	if (!p_left || !p_right)
+		return p_left ? -1 : 1;
+
+	if (c != COMPUTE_DELTA) {
+		/*
+		 * The delta can be computed without the baseline, but
+		 * others are not.  Put those entries which have no
+		 * values below.
+		 */
+		if (left->dummy && right->dummy)
+			return 0;
+
+		if (left->dummy || right->dummy)
+			return left->dummy ? 1 : -1;
+	}
+
+	return __hist_entry__cmp_compute(p_left, p_right, c);
+}
+
+static int64_t
 hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
 		    struct hist_entry *right __maybe_unused)
 {
@@ -591,6 +622,27 @@ hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
 	return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF, ++data_idx);
 }
 
+static int64_t
+hist_entry__cmp_delta_idx(struct hist_entry *left, struct hist_entry *right)
+{
+	return hist_entry__cmp_compute_idx(right, left, COMPUTE_DELTA,
+					   sort_compute);
+}
+
+static int64_t
+hist_entry__cmp_ratio_idx(struct hist_entry *left, struct hist_entry *right)
+{
+	return hist_entry__cmp_compute_idx(right, left, COMPUTE_RATIO,
+					   sort_compute);
+}
+
+static int64_t
+hist_entry__cmp_wdiff_idx(struct hist_entry *left, struct hist_entry *right)
+{
+	return hist_entry__cmp_compute_idx(right, left, COMPUTE_WEIGHTED_DIFF,
+					   sort_compute);
+}
+
 static void hists__process(struct hists *hists)
 {
 	if (show_baseline_only)
@@ -1064,9 +1116,10 @@ static void data__hpp_register(struct data__file *d, int idx)
 	perf_hpp__register_sort_field(fmt);
 }
 
-static void ui_init(void)
+static int ui_init(void)
 {
 	struct data__file *d;
+	struct perf_hpp_fmt *fmt;
 	int i;
 
 	data__for_each_file(i, d) {
@@ -1096,6 +1149,35 @@ static void ui_init(void)
 			data__hpp_register(d, i ? PERF_HPP_DIFF__PERIOD :
 						  PERF_HPP_DIFF__PERIOD_BASELINE);
 	}
+
+	if (!sort_compute)
+		return 0;
+
+	fmt = zalloc(sizeof(*fmt));
+	if (fmt == NULL) {
+		pr_err("Memory allocation failed\n");
+		return -1;
+	}
+
+	fmt->cmp      = hist_entry__cmp_nop;
+	fmt->collapse = hist_entry__cmp_nop;
+
+	switch (compute) {
+	case COMPUTE_DELTA:
+		fmt->sort = hist_entry__cmp_delta_idx;
+		break;
+	case COMPUTE_RATIO:
+		fmt->sort = hist_entry__cmp_ratio_idx;
+		break;
+	case COMPUTE_WEIGHTED_DIFF:
+		fmt->sort = hist_entry__cmp_wdiff_idx;
+		break;
+	default:
+		BUG_ON(1);
+	}
+
+	list_add(&fmt->sort_list, &perf_hpp__sort_list);
+	return 0;
 }
 
 static int data_init(int argc, const char **argv)
@@ -1161,7 +1243,8 @@ int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
 	if (data_init(argc, argv) < 0)
 		return -1;
 
-	ui_init();
+	if (ui_init() < 0)
+		return -1;
 
 	sort__mode = SORT_MODE__DIFF;
 
-- 
2.2.1


^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH RESEND 2/5] perf diff: Get rid of hists__compute_resort()
  2014-12-27  5:06 ` [PATCH RESEND 2/5] perf diff: Get rid of hists__compute_resort() Namhyung Kim
@ 2015-01-04 16:33   ` Jiri Olsa
  2015-01-28 15:06   ` [tip:perf/core] " tip-bot for Namhyung Kim
  1 sibling, 0 replies; 17+ messages in thread
From: Jiri Olsa @ 2015-01-04 16:33 UTC (permalink / raw)
  To: Namhyung Kim; +Cc: Arnaldo Carvalho de Melo, LKML, Ingo Molnar, Peter Zijlstra

On Sat, Dec 27, 2014 at 02:06:30PM +0900, Namhyung Kim wrote:
> The hists__compute_resort() is to sort output fields based on the
> given field/criteria.  This was done without the sort list but as we
> added the field to the sort list, we can do it with normal
> hists__output_resort() using the ->sort callback.
> 

Acked-by: Jiri Olsa <jolsa@kernel.org>

thanks,
jirka

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/5] perf diff: Print diff result more precisely
  2014-12-27  5:06 ` [PATCH 3/5] perf diff: Print diff result more precisely Namhyung Kim
@ 2015-01-04 16:47   ` Jiri Olsa
  2015-01-07 14:02     ` Namhyung Kim
  2015-01-28 15:07   ` [tip:perf/core] " tip-bot for Namhyung Kim
  1 sibling, 1 reply; 17+ messages in thread
From: Jiri Olsa @ 2015-01-04 16:47 UTC (permalink / raw)
  To: Namhyung Kim; +Cc: Arnaldo Carvalho de Melo, LKML, Ingo Molnar, Peter Zijlstra

On Sat, Dec 27, 2014 at 02:06:31PM +0900, Namhyung Kim wrote:
> Current perf diff result is somewhat confusing since it sometimes hide
> small result and sometimes there's no result.  So do not hide small
> result (less than 0.01%) and print "N/A" if baseline is not
> recorded (for ratio and wdiff only).  Blank means the baseline is
> available but its pairs are not.

I think we should document this, especialy when diff has different
behaviour from the rest of the computation.. :-\

anyway for the change itself:

Acked-by: Jiri Olsa <jolsa@kernel.org>

thanks,
jirka

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 4/5] perf diff: Fix output ordering to honor next column
  2014-12-27  5:06 ` [PATCH 4/5] perf diff: Fix output ordering to honor next column Namhyung Kim
@ 2015-01-04 18:16   ` Jiri Olsa
  2015-01-07  7:42     ` Namhyung Kim
  0 siblings, 1 reply; 17+ messages in thread
From: Jiri Olsa @ 2015-01-04 18:16 UTC (permalink / raw)
  To: Namhyung Kim; +Cc: Arnaldo Carvalho de Melo, LKML, Ingo Molnar, Peter Zijlstra

On Sat, Dec 27, 2014 at 02:06:32PM +0900, Namhyung Kim wrote:

SNIP

>  	if (!pairs_left || !pairs_right)
>  		return pairs_left ? -1 : 1;
>  
> -	p_left  = get_pair_data(left,  &data__files[sort_compute]);
> -	p_right = get_pair_data(right, &data__files[sort_compute]);
> +	p_left  = get_pair_data(left,  &data__files[sort_idx]);
> +	p_right = get_pair_data(right, &data__files[sort_idx]);
>  
>  	if (!p_left && !p_right)
>  		return 0;
> @@ -555,8 +560,13 @@ hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
>  static int64_t
>  hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
>  {
> -	if (sort_compute)
> -		return 0;
> +	/*
> +	 * This function will be called first for each entry to resort
> +	 * output.  Next compare-functions use this idx to find their
> +	 * data and increase it for next data so we need to initialize
> +	 * it everytime.
> +	 */
> +	data_idx = 0;

hum, could we omit the global 'data_idx' variable usage by passing
'struct perf_hpp_fmt' into color fmt callbacks? (like hist_entry__cmp_delta..)

we could get the data_idx from 'struct diff_hpp_fmt'::idx

jirka

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 5/5] perf diff: Fix -o/--order option behavior
  2014-12-27  5:06 ` [PATCH 5/5] perf diff: Fix -o/--order option behavior Namhyung Kim
@ 2015-01-04 18:26   ` Jiri Olsa
  2015-01-07  7:47     ` Namhyung Kim
  0 siblings, 1 reply; 17+ messages in thread
From: Jiri Olsa @ 2015-01-04 18:26 UTC (permalink / raw)
  To: Namhyung Kim; +Cc: Arnaldo Carvalho de Melo, LKML, Ingo Molnar, Peter Zijlstra

On Sat, Dec 27, 2014 at 02:06:33PM +0900, Namhyung Kim wrote:

SNIP

> +		return 0;
> +
> +	fmt = zalloc(sizeof(*fmt));
> +	if (fmt == NULL) {
> +		pr_err("Memory allocation failed\n");
> +		return -1;
> +	}
> +
> +	fmt->cmp      = hist_entry__cmp_nop;
> +	fmt->collapse = hist_entry__cmp_nop;
> +
> +	switch (compute) {
> +	case COMPUTE_DELTA:
> +		fmt->sort = hist_entry__cmp_delta_idx;
> +		break;
> +	case COMPUTE_RATIO:
> +		fmt->sort = hist_entry__cmp_ratio_idx;
> +		break;
> +	case COMPUTE_WEIGHTED_DIFF:
> +		fmt->sort = hist_entry__cmp_wdiff_idx;
> +		break;
> +	default:
> +		BUG_ON(1);
> +	}
> +
> +	list_add(&fmt->sort_list, &perf_hpp__sort_list);
> +	return 0;

so the first 'fmt' which gets to sorting is the one for
data__files[sort_idx] file, that sounds good..

but as the sorting goes through all the perf_hpp__sort_list list,
it will hit the 'sort_idx' data again.. should you disable sort
function for its 'fmt' then?

jirka

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 4/5] perf diff: Fix output ordering to honor next column
  2015-01-04 18:16   ` Jiri Olsa
@ 2015-01-07  7:42     ` Namhyung Kim
  0 siblings, 0 replies; 17+ messages in thread
From: Namhyung Kim @ 2015-01-07  7:42 UTC (permalink / raw)
  To: Jiri Olsa; +Cc: Arnaldo Carvalho de Melo, LKML, Ingo Molnar, Peter Zijlstra

Hi Jiri,

On Sun, Jan 04, 2015 at 07:16:40PM +0100, Jiri Olsa wrote:
> On Sat, Dec 27, 2014 at 02:06:32PM +0900, Namhyung Kim wrote:
> 
> SNIP
> 
> >  	if (!pairs_left || !pairs_right)
> >  		return pairs_left ? -1 : 1;
> >  
> > -	p_left  = get_pair_data(left,  &data__files[sort_compute]);
> > -	p_right = get_pair_data(right, &data__files[sort_compute]);
> > +	p_left  = get_pair_data(left,  &data__files[sort_idx]);
> > +	p_right = get_pair_data(right, &data__files[sort_idx]);
> >  
> >  	if (!p_left && !p_right)
> >  		return 0;
> > @@ -555,8 +560,13 @@ hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
> >  static int64_t
> >  hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
> >  {
> > -	if (sort_compute)
> > -		return 0;
> > +	/*
> > +	 * This function will be called first for each entry to resort
> > +	 * output.  Next compare-functions use this idx to find their
> > +	 * data and increase it for next data so we need to initialize
> > +	 * it everytime.
> > +	 */
> > +	data_idx = 0;
> 
> hum, could we omit the global 'data_idx' variable usage by passing
> 'struct perf_hpp_fmt' into color fmt callbacks? (like hist_entry__cmp_delta..)
> 
> we could get the data_idx from 'struct diff_hpp_fmt'::idx

Right, I thought that too.  But it requires adding '__maybe_unused
fmt' argument to all callbacks so I just decided to use a quick
solution.  I'll convert to pass the fmt argument as it'll make
possible future changes easier.

Thanks,
Namhyung

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 5/5] perf diff: Fix -o/--order option behavior
  2015-01-04 18:26   ` Jiri Olsa
@ 2015-01-07  7:47     ` Namhyung Kim
  2015-01-07  8:10       ` Jiri Olsa
  0 siblings, 1 reply; 17+ messages in thread
From: Namhyung Kim @ 2015-01-07  7:47 UTC (permalink / raw)
  To: Jiri Olsa; +Cc: Arnaldo Carvalho de Melo, LKML, Ingo Molnar, Peter Zijlstra

On Sun, Jan 04, 2015 at 07:26:56PM +0100, Jiri Olsa wrote:
> On Sat, Dec 27, 2014 at 02:06:33PM +0900, Namhyung Kim wrote:
> 
> SNIP
> 
> > +		return 0;
> > +
> > +	fmt = zalloc(sizeof(*fmt));
> > +	if (fmt == NULL) {
> > +		pr_err("Memory allocation failed\n");
> > +		return -1;
> > +	}
> > +
> > +	fmt->cmp      = hist_entry__cmp_nop;
> > +	fmt->collapse = hist_entry__cmp_nop;
> > +
> > +	switch (compute) {
> > +	case COMPUTE_DELTA:
> > +		fmt->sort = hist_entry__cmp_delta_idx;
> > +		break;
> > +	case COMPUTE_RATIO:
> > +		fmt->sort = hist_entry__cmp_ratio_idx;
> > +		break;
> > +	case COMPUTE_WEIGHTED_DIFF:
> > +		fmt->sort = hist_entry__cmp_wdiff_idx;
> > +		break;
> > +	default:
> > +		BUG_ON(1);
> > +	}
> > +
> > +	list_add(&fmt->sort_list, &perf_hpp__sort_list);
> > +	return 0;
> 
> so the first 'fmt' which gets to sorting is the one for
> data__files[sort_idx] file, that sounds good..
> 
> but as the sorting goes through all the perf_hpp__sort_list list,
> it will hit the 'sort_idx' data again.. should you disable sort
> function for its 'fmt' then?

Do you really think it's needed?

Yes, it'll be called twice but I think it's a relatively rare case as
most entries will be sorted by the sort_idx and baseline columns.

Thanks,
Namhyung

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 5/5] perf diff: Fix -o/--order option behavior
  2015-01-07  7:47     ` Namhyung Kim
@ 2015-01-07  8:10       ` Jiri Olsa
  2015-01-07 13:31         ` Namhyung Kim
  0 siblings, 1 reply; 17+ messages in thread
From: Jiri Olsa @ 2015-01-07  8:10 UTC (permalink / raw)
  To: Namhyung Kim; +Cc: Arnaldo Carvalho de Melo, LKML, Ingo Molnar, Peter Zijlstra

On Wed, Jan 07, 2015 at 04:47:06PM +0900, Namhyung Kim wrote:
> On Sun, Jan 04, 2015 at 07:26:56PM +0100, Jiri Olsa wrote:
> > On Sat, Dec 27, 2014 at 02:06:33PM +0900, Namhyung Kim wrote:
> > 
> > SNIP
> > 
> > > +		return 0;
> > > +
> > > +	fmt = zalloc(sizeof(*fmt));
> > > +	if (fmt == NULL) {
> > > +		pr_err("Memory allocation failed\n");
> > > +		return -1;
> > > +	}
> > > +
> > > +	fmt->cmp      = hist_entry__cmp_nop;
> > > +	fmt->collapse = hist_entry__cmp_nop;
> > > +
> > > +	switch (compute) {
> > > +	case COMPUTE_DELTA:
> > > +		fmt->sort = hist_entry__cmp_delta_idx;
> > > +		break;
> > > +	case COMPUTE_RATIO:
> > > +		fmt->sort = hist_entry__cmp_ratio_idx;
> > > +		break;
> > > +	case COMPUTE_WEIGHTED_DIFF:
> > > +		fmt->sort = hist_entry__cmp_wdiff_idx;
> > > +		break;
> > > +	default:
> > > +		BUG_ON(1);
> > > +	}
> > > +
> > > +	list_add(&fmt->sort_list, &perf_hpp__sort_list);
> > > +	return 0;
> > 
> > so the first 'fmt' which gets to sorting is the one for
> > data__files[sort_idx] file, that sounds good..
> > 
> > but as the sorting goes through all the perf_hpp__sort_list list,
> > it will hit the 'sort_idx' data again.. should you disable sort
> > function for its 'fmt' then?
> 
> Do you really think it's needed?
> 
> Yes, it'll be called twice but I think it's a relatively rare case as
> most entries will be sorted by the sort_idx and baseline columns.

I did not think it through completely.. thats why I asked ;-)

but I think you're right and it's not a big deal, maybe just
add some comment that this is happening and we dont think
it's too bad.. or something like that ;-)

thanks,
jirka

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 5/5] perf diff: Fix -o/--order option behavior
  2015-01-07  8:10       ` Jiri Olsa
@ 2015-01-07 13:31         ` Namhyung Kim
  0 siblings, 0 replies; 17+ messages in thread
From: Namhyung Kim @ 2015-01-07 13:31 UTC (permalink / raw)
  To: Jiri Olsa; +Cc: Arnaldo Carvalho de Melo, LKML, Ingo Molnar, Peter Zijlstra

On Wed, Jan 7, 2015 at 5:10 PM, Jiri Olsa <jolsa@redhat.com> wrote:
> On Wed, Jan 07, 2015 at 04:47:06PM +0900, Namhyung Kim wrote:
>> On Sun, Jan 04, 2015 at 07:26:56PM +0100, Jiri Olsa wrote:
>> > On Sat, Dec 27, 2014 at 02:06:33PM +0900, Namhyung Kim wrote:
>> >
>> > SNIP
>> >
>> > > +         return 0;
>> > > +
>> > > + fmt = zalloc(sizeof(*fmt));
>> > > + if (fmt == NULL) {
>> > > +         pr_err("Memory allocation failed\n");
>> > > +         return -1;
>> > > + }
>> > > +
>> > > + fmt->cmp      = hist_entry__cmp_nop;
>> > > + fmt->collapse = hist_entry__cmp_nop;
>> > > +
>> > > + switch (compute) {
>> > > + case COMPUTE_DELTA:
>> > > +         fmt->sort = hist_entry__cmp_delta_idx;
>> > > +         break;
>> > > + case COMPUTE_RATIO:
>> > > +         fmt->sort = hist_entry__cmp_ratio_idx;
>> > > +         break;
>> > > + case COMPUTE_WEIGHTED_DIFF:
>> > > +         fmt->sort = hist_entry__cmp_wdiff_idx;
>> > > +         break;
>> > > + default:
>> > > +         BUG_ON(1);
>> > > + }
>> > > +
>> > > + list_add(&fmt->sort_list, &perf_hpp__sort_list);
>> > > + return 0;
>> >
>> > so the first 'fmt' which gets to sorting is the one for
>> > data__files[sort_idx] file, that sounds good..
>> >
>> > but as the sorting goes through all the perf_hpp__sort_list list,
>> > it will hit the 'sort_idx' data again.. should you disable sort
>> > function for its 'fmt' then?
>>
>> Do you really think it's needed?
>>
>> Yes, it'll be called twice but I think it's a relatively rare case as
>> most entries will be sorted by the sort_idx and baseline columns.
>
> I did not think it through completely.. thats why I asked ;-)
>
> but I think you're right and it's not a big deal, maybe just
> add some comment that this is happening and we dont think
> it's too bad.. or something like that ;-)

OK, I'll add the comment.

Thanks,
Namhyung

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH 3/5] perf diff: Print diff result more precisely
  2015-01-04 16:47   ` Jiri Olsa
@ 2015-01-07 14:02     ` Namhyung Kim
  0 siblings, 0 replies; 17+ messages in thread
From: Namhyung Kim @ 2015-01-07 14:02 UTC (permalink / raw)
  To: Jiri Olsa; +Cc: Arnaldo Carvalho de Melo, LKML, Ingo Molnar, Peter Zijlstra

On Mon, Jan 5, 2015 at 1:47 AM, Jiri Olsa <jolsa@redhat.com> wrote:
> On Sat, Dec 27, 2014 at 02:06:31PM +0900, Namhyung Kim wrote:
>> Current perf diff result is somewhat confusing since it sometimes hide
>> small result and sometimes there's no result.  So do not hide small
>> result (less than 0.01%) and print "N/A" if baseline is not
>> recorded (for ratio and wdiff only).  Blank means the baseline is
>> available but its pairs are not.
>
> I think we should document this, especialy when diff has different
> behaviour from the rest of the computation.. :-\

Well, probably I'm missing something but I don't know what needs to be
documented.  Currently perf diff doesn't show small result so it's
hard to distinguish entries that have small result and no result.  But
to sort the output properly, it needs to show the results and I think
it's natural to not hide the result to user.


>
> anyway for the change itself:
>
> Acked-by: Jiri Olsa <jolsa@kernel.org>

Thanks,
Namhyung

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [tip:perf/urgent] perf diff: Fix to sort by baseline field by default
  2014-12-27  5:06 [PATCH RESEND 1/5] perf diff: Fix to sort by baseline field by default Namhyung Kim
                   ` (3 preceding siblings ...)
  2014-12-27  5:06 ` [PATCH 5/5] perf diff: Fix -o/--order option behavior Namhyung Kim
@ 2015-01-08  9:53 ` tip-bot for Namhyung Kim
  4 siblings, 0 replies; 17+ messages in thread
From: tip-bot for Namhyung Kim @ 2015-01-08  9:53 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: a.p.zijlstra, linux-kernel, mingo, acme, namhyung, tglx, hpa, jolsa

Commit-ID:  e7024fc3783317608b8e07048116a72a7d1cd26d
Gitweb:     http://git.kernel.org/tip/e7024fc3783317608b8e07048116a72a7d1cd26d
Author:     Namhyung Kim <namhyung@kernel.org>
AuthorDate: Sat, 27 Dec 2014 14:06:29 +0900
Committer:  Arnaldo Carvalho de Melo <acme@redhat.com>
CommitDate: Fri, 2 Jan 2015 23:27:18 -0300

perf diff: Fix to sort by baseline field by default

The currently perf diff didn't add the baseline and delta (or other
compute) fields to the sort list so output will be sorted by other
fields like alphabetical order of DSO or symbol as below example.

Fix it by adding hpp formats for the fields and provides default compare
functions.

Before:

  $ perf diff
  # Event 'cycles'
  #
  # Baseline    Delta  Shared Object       Symbol
  # ........  .......  ..................  ...............................
  #
                       [bridge]            [k] ip_sabotage_in
                       [btrfs]             [k] __etree_search.constprop.47
       0.01%           [btrfs]             [k] btrfs_file_mmap
       0.01%   -0.01%  [btrfs]             [k] btrfs_getattr
                       [e1000e]            [k] e1000_watchdog
       0.00%           [kernel.vmlinux]    [k] PageHuge
       0.00%           [kernel.vmlinux]    [k] __acct_update_integrals
       0.00%           [kernel.vmlinux]    [k] __activate_page
                       [kernel.vmlinux]    [k] __alloc_fd
       0.02%   +0.02%  [kernel.vmlinux]    [k] __alloc_pages_nodemask
       ...

After:

  # Baseline    Delta  Shared Object       Symbol
  # ........  .......  ..................  ................................
  #
      24.73%   -4.62%  perf                [.] append_chain_children
       7.96%   -1.29%  perf                [.] dso__find_symbol
       6.97%   -2.07%  libc-2.20.so        [.] vfprintf
       4.61%   +0.88%  libc-2.20.so        [.] __fprintf_chk
       4.41%   +2.43%  perf                [.] sort__comm_cmp
       4.10%   -0.16%  perf                [.] comm__str
       4.03%   -0.93%  perf                [.] machine__findnew_thread_time
       3.82%   +3.09%  perf                [.] __hists__add_entry
       2.95%   -0.18%  perf                [.] sort__dso_cmp
       ...

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1419656793-32756-1-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
 tools/perf/builtin-diff.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 44 insertions(+)

diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 303c1e1..1fd96c1 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
 	return __hist_entry__cmp_compute(p_left, p_right, c);
 }
 
+static int64_t
+hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+		    struct hist_entry *right __maybe_unused)
+{
+	return 0;
+}
+
+static int64_t
+hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+{
+	if (sort_compute)
+		return 0;
+
+	if (left->stat.period == right->stat.period)
+		return 0;
+	return left->stat.period > right->stat.period ? 1 : -1;
+}
+
+static int64_t
+hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+{
+	return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+}
+
+static int64_t
+hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+{
+	return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+}
+
+static int64_t
+hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+{
+	return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+}
+
 static void insert_hist_entry_by_compute(struct rb_root *root,
 					 struct hist_entry *he,
 					 int c)
@@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx)
 	fmt->header = hpp__header;
 	fmt->width  = hpp__width;
 	fmt->entry  = hpp__entry_global;
+	fmt->cmp    = hist_entry__cmp_nop;
+	fmt->collapse = hist_entry__cmp_nop;
 
 	/* TODO more colors */
 	switch (idx) {
 	case PERF_HPP_DIFF__BASELINE:
 		fmt->color = hpp__color_baseline;
+		fmt->sort  = hist_entry__cmp_baseline;
 		break;
 	case PERF_HPP_DIFF__DELTA:
 		fmt->color = hpp__color_delta;
+		fmt->sort  = hist_entry__cmp_delta;
 		break;
 	case PERF_HPP_DIFF__RATIO:
 		fmt->color = hpp__color_ratio;
+		fmt->sort  = hist_entry__cmp_ratio;
 		break;
 	case PERF_HPP_DIFF__WEIGHTED_DIFF:
 		fmt->color = hpp__color_wdiff;
+		fmt->sort  = hist_entry__cmp_wdiff;
 		break;
 	default:
+		fmt->sort  = hist_entry__cmp_nop;
 		break;
 	}
 
 	init_header(d, dfmt);
 	perf_hpp__column_register(fmt);
+	perf_hpp__register_sort_field(fmt);
 }
 
 static void ui_init(void)

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [tip:perf/core] perf diff: Get rid of hists__compute_resort()
  2014-12-27  5:06 ` [PATCH RESEND 2/5] perf diff: Get rid of hists__compute_resort() Namhyung Kim
  2015-01-04 16:33   ` Jiri Olsa
@ 2015-01-28 15:06   ` tip-bot for Namhyung Kim
  1 sibling, 0 replies; 17+ messages in thread
From: tip-bot for Namhyung Kim @ 2015-01-28 15:06 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: jolsa, jolsa, tglx, acme, mingo, namhyung, linux-kernel,
	a.p.zijlstra, hpa

Commit-ID:  38259a170dfab4865968b592ae90b373e9f7d5b5
Gitweb:     http://git.kernel.org/tip/38259a170dfab4865968b592ae90b373e9f7d5b5
Author:     Namhyung Kim <namhyung@kernel.org>
AuthorDate: Sat, 27 Dec 2014 14:06:30 +0900
Committer:  Arnaldo Carvalho de Melo <acme@redhat.com>
CommitDate: Wed, 21 Jan 2015 13:24:33 -0300

perf diff: Get rid of hists__compute_resort()

The hists__compute_resort() is to sort output fields based on the
given field/criteria.  This was done without the sort list but as we
added the field to the sort list, we can do it with normal
hists__output_resort() using the ->sort callback.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1419656793-32756-2-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
 tools/perf/builtin-diff.c | 59 +++--------------------------------------------
 1 file changed, 3 insertions(+), 56 deletions(-)

diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 318ab9c..72c718e 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -581,68 +581,15 @@ hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
 	return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
 }
 
-static void insert_hist_entry_by_compute(struct rb_root *root,
-					 struct hist_entry *he,
-					 int c)
-{
-	struct rb_node **p = &root->rb_node;
-	struct rb_node *parent = NULL;
-	struct hist_entry *iter;
-
-	while (*p != NULL) {
-		parent = *p;
-		iter = rb_entry(parent, struct hist_entry, rb_node);
-		if (hist_entry__cmp_compute(he, iter, c) < 0)
-			p = &(*p)->rb_left;
-		else
-			p = &(*p)->rb_right;
-	}
-
-	rb_link_node(&he->rb_node, parent, p);
-	rb_insert_color(&he->rb_node, root);
-}
-
-static void hists__compute_resort(struct hists *hists)
-{
-	struct rb_root *root;
-	struct rb_node *next;
-
-	if (sort__need_collapse)
-		root = &hists->entries_collapsed;
-	else
-		root = hists->entries_in;
-
-	hists->entries = RB_ROOT;
-	next = rb_first(root);
-
-	hists__reset_stats(hists);
-	hists__reset_col_len(hists);
-
-	while (next != NULL) {
-		struct hist_entry *he;
-
-		he = rb_entry(next, struct hist_entry, rb_node_in);
-		next = rb_next(&he->rb_node_in);
-
-		insert_hist_entry_by_compute(&hists->entries, he, compute);
-		hists__inc_stats(hists, he);
-
-		if (!he->filtered)
-			hists__calc_col_len(hists, he);
-	}
-}
-
 static void hists__process(struct hists *hists)
 {
 	if (show_baseline_only)
 		hists__baseline_only(hists);
 
-	if (sort_compute) {
+	if (sort_compute)
 		hists__precompute(hists);
-		hists__compute_resort(hists);
-	} else {
-		hists__output_resort(hists, NULL);
-	}
+
+	hists__output_resort(hists, NULL);
 
 	hists__fprintf(hists, true, 0, 0, 0, stdout);
 }

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* [tip:perf/core] perf diff: Print diff result more precisely
  2014-12-27  5:06 ` [PATCH 3/5] perf diff: Print diff result more precisely Namhyung Kim
  2015-01-04 16:47   ` Jiri Olsa
@ 2015-01-28 15:07   ` tip-bot for Namhyung Kim
  1 sibling, 0 replies; 17+ messages in thread
From: tip-bot for Namhyung Kim @ 2015-01-28 15:07 UTC (permalink / raw)
  To: linux-tip-commits
  Cc: a.p.zijlstra, jolsa, mingo, linux-kernel, acme, tglx, jolsa,
	namhyung, hpa

Commit-ID:  ec3d07cb630da5da3ccfdf2b2f5472cadedb9470
Gitweb:     http://git.kernel.org/tip/ec3d07cb630da5da3ccfdf2b2f5472cadedb9470
Author:     Namhyung Kim <namhyung@kernel.org>
AuthorDate: Sat, 27 Dec 2014 14:06:31 +0900
Committer:  Arnaldo Carvalho de Melo <acme@redhat.com>
CommitDate: Wed, 21 Jan 2015 13:24:34 -0300

perf diff: Print diff result more precisely

Current perf diff result is somewhat confusing since it sometimes hide
small result and sometimes there's no result.  So do not hide small
result (less than 0.01%) and print "N/A" if baseline is not
recorded (for ratio and wdiff only).  Blank means the baseline is
available but its pairs are not.

Before:

  # Baseline    Delta  Shared Object      Symbol
  # ........  .......  .................  .........................
  #
       ...
       0.01%   -0.01%  [kernel.kallsyms]  [k] native_write_msr_safe
       0.01%           [kernel.kallsyms]  [k] scheduler_tick
       0.01%           [kernel.kallsyms]  [k] native_read_msr_safe
       0.00%           [kernel.kallsyms]  [k] __rcu_read_unlock
                       [kernel.kallsyms]  [k] _raw_spin_lock
               +0.01%  [kernel.kallsyms]  [k] apic_timer_interrupt
                       [kernel.kallsyms]  [k] read_tsc

After:

  # Baseline    Delta  Shared Object      Symbol
  # ........  .......  .................  .........................
  #
       ...
       0.01%   -0.01%  [kernel.kallsyms]  [k] native_write_msr_safe
       0.01%           [kernel.kallsyms]  [k] scheduler_tick
       0.01%           [kernel.kallsyms]  [k] native_read_msr_safe
       0.00%           [kernel.kallsyms]  [k] __rcu_read_unlock
               +0.01%  [kernel.kallsyms]  [k] _raw_spin_lock
               +0.01%  [kernel.kallsyms]  [k] apic_timer_interrupt
               +0.01%  [kernel.kallsyms]  [k] read_tsc

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1419656793-32756-3-git-send-email-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
---
 tools/perf/builtin-diff.c | 18 +++++++++++-------
 1 file changed, 11 insertions(+), 7 deletions(-)

diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 72c718e..3f86737 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -788,7 +788,7 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
 	char pfmt[20] = " ";
 
 	if (!pair)
-		goto dummy_print;
+		goto no_print;
 
 	switch (comparison_method) {
 	case COMPUTE_DELTA:
@@ -797,8 +797,6 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
 		else
 			diff = compute_delta(he, pair);
 
-		if (fabs(diff) < 0.01)
-			goto dummy_print;
 		scnprintf(pfmt, 20, "%%%+d.2f%%%%", dfmt->header_width - 1);
 		return percent_color_snprintf(hpp->buf, hpp->size,
 					pfmt, diff);
@@ -830,6 +828,9 @@ static int __hpp__color_compare(struct perf_hpp_fmt *fmt,
 	}
 dummy_print:
 	return scnprintf(hpp->buf, hpp->size, "%*s",
+			dfmt->header_width, "N/A");
+no_print:
+	return scnprintf(hpp->buf, hpp->size, "%*s",
 			dfmt->header_width, pfmt);
 }
 
@@ -879,14 +880,15 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
 		else
 			diff = compute_delta(he, pair);
 
-		if (fabs(diff) >= 0.01)
-			scnprintf(buf, size, "%+4.2F%%", diff);
+		scnprintf(buf, size, "%+4.2F%%", diff);
 		break;
 
 	case PERF_HPP_DIFF__RATIO:
 		/* No point for ratio number if we are dummy.. */
-		if (he->dummy)
+		if (he->dummy) {
+			scnprintf(buf, size, "N/A");
 			break;
+		}
 
 		if (pair->diff.computed)
 			ratio = pair->diff.period_ratio;
@@ -899,8 +901,10 @@ hpp__entry_pair(struct hist_entry *he, struct hist_entry *pair,
 
 	case PERF_HPP_DIFF__WEIGHTED_DIFF:
 		/* No point for wdiff number if we are dummy.. */
-		if (he->dummy)
+		if (he->dummy) {
+			scnprintf(buf, size, "N/A");
 			break;
+		}
 
 		if (pair->diff.computed)
 			wdiff = pair->diff.wdiff;

^ permalink raw reply related	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2015-01-28 21:24 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2014-12-27  5:06 [PATCH RESEND 1/5] perf diff: Fix to sort by baseline field by default Namhyung Kim
2014-12-27  5:06 ` [PATCH RESEND 2/5] perf diff: Get rid of hists__compute_resort() Namhyung Kim
2015-01-04 16:33   ` Jiri Olsa
2015-01-28 15:06   ` [tip:perf/core] " tip-bot for Namhyung Kim
2014-12-27  5:06 ` [PATCH 3/5] perf diff: Print diff result more precisely Namhyung Kim
2015-01-04 16:47   ` Jiri Olsa
2015-01-07 14:02     ` Namhyung Kim
2015-01-28 15:07   ` [tip:perf/core] " tip-bot for Namhyung Kim
2014-12-27  5:06 ` [PATCH 4/5] perf diff: Fix output ordering to honor next column Namhyung Kim
2015-01-04 18:16   ` Jiri Olsa
2015-01-07  7:42     ` Namhyung Kim
2014-12-27  5:06 ` [PATCH 5/5] perf diff: Fix -o/--order option behavior Namhyung Kim
2015-01-04 18:26   ` Jiri Olsa
2015-01-07  7:47     ` Namhyung Kim
2015-01-07  8:10       ` Jiri Olsa
2015-01-07 13:31         ` Namhyung Kim
2015-01-08  9:53 ` [tip:perf/urgent] perf diff: Fix to sort by baseline field by default tip-bot for Namhyung Kim

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.