* [PATCH V2 01/13] perf/core: Add new branch sample type for LBR TOS
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-22 9:39 ` Peter Zijlstra
2019-10-21 20:03 ` [PATCH V2 02/13] perf/x86/intel: Output LBR TOS information kan.liang
` (11 subsequent siblings)
12 siblings, 1 reply; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
In LBR call stack mode, the depth of reconstructed LBR call stack limits
to the number of LBR registers. With LBR Top-of-Stack (TOS) information,
perf tool may stitch the stacks of two samples. The reconstructed LBR
call stack can break the HW limitation.
Add a new branch sample type to retrieve LBR TOS.
Only when the new branch sample type is set, the TOS information is
dumped into the PERF_SAMPLE_BRANCH_STACK output.
Perf tool should check the attr.branch_sample_type, and apply the
corresponding format for PERF_SAMPLE_BRANCH_STACK samples.
Otherwise, some user case may be broken. For example, users may parse a
perf.data, which include the new branch sample type, with an old version
perf tool (without the check). Users probably get incorrect information
without any warning.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
include/linux/perf_event.h | 4 ++++
include/uapi/linux/perf_event.h | 10 +++++++++-
kernel/events/core.c | 10 ++++++++++
3 files changed, 23 insertions(+), 1 deletion(-)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 61448c19a132..0cebc8ec44fa 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -972,6 +972,10 @@ struct perf_sample_data {
u64 stack_user_size;
u64 phys_addr;
+
+ /* PMU specific data */
+ u64 lbr_tos;
+
} ____cacheline_aligned;
/* default value for data source */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index bb7b271397a6..b1f022190571 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -180,6 +180,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
+ PERF_SAMPLE_BRANCH_LBR_TOS_SHIFT = 17, /* save LBR TOS */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -207,6 +209,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_TYPE_SAVE =
1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_LBR_TOS = 1U << PERF_SAMPLE_BRANCH_LBR_TOS_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -849,7 +853,11 @@ enum perf_event_type {
* char data[size];}&& PERF_SAMPLE_RAW
*
* { u64 nr;
- * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+ * { u64 from, to, flags } lbr[nr];
+ *
+ * # only available if PERF_SAMPLE_BRANCH_LBR_TOS is set
+ * u64 tos;
+ * } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 275eae05af20..3c1f88352404 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6318,6 +6318,11 @@ static void perf_output_read(struct perf_output_handle *handle,
perf_output_read_one(handle, event, enabled, running);
}
+static inline bool perf_sample_save_lbr_tos(struct perf_event *event)
+{
+ return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_LBR_TOS;
+}
+
void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
@@ -6407,6 +6412,8 @@ void perf_output_sample(struct perf_output_handle *handle,
perf_output_put(handle, data->br_stack->nr);
perf_output_copy(handle, data->br_stack->entries, size);
+ if (perf_sample_save_lbr_tos(event))
+ perf_output_put(handle, data->lbr_tos);
} else {
/*
* we always store at least the value of nr
@@ -6595,6 +6602,9 @@ void perf_prepare_sample(struct perf_event_header *header,
size += data->br_stack->nr
* sizeof(struct perf_branch_entry);
}
+ if (perf_sample_save_lbr_tos(event))
+ size += sizeof(u64);
+
header->size += size;
}
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* Re: [PATCH V2 01/13] perf/core: Add new branch sample type for LBR TOS
2019-10-21 20:03 ` [PATCH V2 01/13] perf/core: Add new branch sample type for LBR TOS kan.liang
@ 2019-10-22 9:39 ` Peter Zijlstra
2019-10-22 15:39 ` Liang, Kan
0 siblings, 1 reply; 16+ messages in thread
From: Peter Zijlstra @ 2019-10-22 9:39 UTC (permalink / raw)
To: kan.liang
Cc: acme, mingo, linux-kernel, jolsa, namhyung, vitaly.slobodskoy,
pavel.gerasimov, ak, eranian
On Mon, Oct 21, 2019 at 01:03:02PM -0700, kan.liang@linux.intel.com wrote:
> From: Kan Liang <kan.liang@linux.intel.com>
>
> In LBR call stack mode, the depth of reconstructed LBR call stack limits
> to the number of LBR registers. With LBR Top-of-Stack (TOS) information,
> perf tool may stitch the stacks of two samples. The reconstructed LBR
> call stack can break the HW limitation.
>
> Add a new branch sample type to retrieve LBR TOS.
>
> Only when the new branch sample type is set, the TOS information is
> dumped into the PERF_SAMPLE_BRANCH_STACK output.
> Perf tool should check the attr.branch_sample_type, and apply the
> corresponding format for PERF_SAMPLE_BRANCH_STACK samples.
> Otherwise, some user case may be broken. For example, users may parse a
> perf.data, which include the new branch sample type, with an old version
> perf tool (without the check). Users probably get incorrect information
> without any warning.
>
> Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
> ---
> include/linux/perf_event.h | 4 ++++
> include/uapi/linux/perf_event.h | 10 +++++++++-
> kernel/events/core.c | 10 ++++++++++
> 3 files changed, 23 insertions(+), 1 deletion(-)
>
> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
> index 61448c19a132..0cebc8ec44fa 100644
> --- a/include/linux/perf_event.h
> +++ b/include/linux/perf_event.h
> @@ -972,6 +972,10 @@ struct perf_sample_data {
> u64 stack_user_size;
>
> u64 phys_addr;
> +
> + /* PMU specific data */
> + u64 lbr_tos;
> +
> } ____cacheline_aligned;
Last time you put this in perf_branch_stack, that was a much better
place. Can't this work now?
^ permalink raw reply [flat|nested] 16+ messages in thread
* Re: [PATCH V2 01/13] perf/core: Add new branch sample type for LBR TOS
2019-10-22 9:39 ` Peter Zijlstra
@ 2019-10-22 15:39 ` Liang, Kan
0 siblings, 0 replies; 16+ messages in thread
From: Liang, Kan @ 2019-10-22 15:39 UTC (permalink / raw)
To: Peter Zijlstra
Cc: acme, mingo, linux-kernel, jolsa, namhyung, vitaly.slobodskoy,
pavel.gerasimov, ak, eranian
On 10/22/2019 5:39 AM, Peter Zijlstra wrote:
> On Mon, Oct 21, 2019 at 01:03:02PM -0700, kan.liang@linux.intel.com wrote:
>> From: Kan Liang <kan.liang@linux.intel.com>
>>
>> In LBR call stack mode, the depth of reconstructed LBR call stack limits
>> to the number of LBR registers. With LBR Top-of-Stack (TOS) information,
>> perf tool may stitch the stacks of two samples. The reconstructed LBR
>> call stack can break the HW limitation.
>>
>> Add a new branch sample type to retrieve LBR TOS.
>>
>> Only when the new branch sample type is set, the TOS information is
>> dumped into the PERF_SAMPLE_BRANCH_STACK output.
>> Perf tool should check the attr.branch_sample_type, and apply the
>> corresponding format for PERF_SAMPLE_BRANCH_STACK samples.
>> Otherwise, some user case may be broken. For example, users may parse a
>> perf.data, which include the new branch sample type, with an old version
>> perf tool (without the check). Users probably get incorrect information
>> without any warning.
>>
>> Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
>> ---
>> include/linux/perf_event.h | 4 ++++
>> include/uapi/linux/perf_event.h | 10 +++++++++-
>> kernel/events/core.c | 10 ++++++++++
>> 3 files changed, 23 insertions(+), 1 deletion(-)
>>
>> diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
>> index 61448c19a132..0cebc8ec44fa 100644
>> --- a/include/linux/perf_event.h
>> +++ b/include/linux/perf_event.h
>> @@ -972,6 +972,10 @@ struct perf_sample_data {
>> u64 stack_user_size;
>>
>> u64 phys_addr;
>> +
>> + /* PMU specific data */
>> + u64 lbr_tos;
>> +
>> } ____cacheline_aligned;
>
> Last time you put this in perf_branch_stack, that was a much better
> place. Can't this work now?
It should still work.
I just thought perf_branch_stack is a generic structure for branches.
TOS is Intel specific for LBR call stack. Maybe it's better to move it out.
Also, I wanted to make it consistent as perf tool struct branch_stack.
But those should not be big deals.
I will move tos to perf_branch_stack in V3 as below.
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 0cebc8ec44fa..c8bf40e608b6 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -92,6 +92,7 @@ struct perf_raw_record {
/*
* branch stack layout:
* nr: number of taken branches stored in entries[]
+ * tos: Top-of-Stack (TOS) information. PMU specific data.
*
* Note that nr can vary from sample to sample
* branches (to, from) are stored from most recent
@@ -100,6 +101,7 @@ struct perf_raw_record {
*/
struct perf_branch_stack {
__u64 nr;
+ __u64 tos; /* PMU specific data */
struct perf_branch_entry entries[0];
};
Thanks,
Kan
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 02/13] perf/x86/intel: Output LBR TOS information
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
2019-10-21 20:03 ` [PATCH V2 01/13] perf/core: Add new branch sample type for LBR TOS kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 03/13] perf tools: Support new branch sample type for LBR TOS kan.liang
` (10 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
A new branch sample type was introduced to require the LBR Top-of-Stack
(TOS) information.
For non-adaptive PEBS and non-PEBS, the TOS information can be directly
retrieved from TOS MSR read in intel_pmu_lbr_read().
For adaptive PEBS, the LBR information stored in PEBS record doesn't
include the TOS information. For single PEBS, TOS can be directly read
from MSR, because the PMI is triggered immediately after PEBS is
written. TOS MSR is still unchanged.
For large PEBS, TOS MSR has stale value. Set -1ULL to indicate that the
TOS information is not available.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
arch/x86/events/intel/core.c | 4 +++-
arch/x86/events/intel/ds.c | 5 ++++-
arch/x86/events/intel/lbr.c | 9 +++++++++
arch/x86/events/perf_event.h | 1 +
4 files changed, 17 insertions(+), 2 deletions(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 27ee47a7be66..d9e5611b0282 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -2411,8 +2411,10 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
perf_sample_data_init(&data, 0, event->hw.last_period);
- if (has_branch_stack(event))
+ if (has_branch_stack(event)) {
data.br_stack = &cpuc->lbr_stack;
+ data.lbr_tos = cpuc->lbr_tos;
+ }
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index ce83950036c5..29355175fdea 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1474,8 +1474,10 @@ static void setup_pebs_fixed_sample_data(struct perf_event *event,
event->attr.use_clockid == 0)
data->time = native_sched_clock_from_tsc(pebs->tsc);
- if (has_branch_stack(event))
+ if (has_branch_stack(event)) {
data->br_stack = &cpuc->lbr_stack;
+ data->lbr_tos = cpuc->lbr_tos;
+ }
}
static void adaptive_pebs_save_regs(struct pt_regs *regs,
@@ -1602,6 +1604,7 @@ static void setup_pebs_adaptive_sample_data(struct perf_event *event,
if (has_branch_stack(event)) {
intel_pmu_store_pebs_lbrs(lbr);
data->br_stack = &cpuc->lbr_stack;
+ data->lbr_tos = cpuc->lbr_tos;
}
}
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index ea54634eabf3..39412b819290 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -562,6 +562,7 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc)
cpuc->lbr_entries[i].reserved = 0;
}
cpuc->lbr_stack.nr = i;
+ cpuc->lbr_tos = tos;
}
/*
@@ -657,6 +658,7 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
out++;
}
cpuc->lbr_stack.nr = out;
+ cpuc->lbr_tos = tos;
}
void intel_pmu_lbr_read(void)
@@ -1097,6 +1099,13 @@ void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr)
int i;
cpuc->lbr_stack.nr = x86_pmu.lbr_nr;
+
+ /* Cannot get TOS for large PEBS */
+ if (cpuc->n_pebs == cpuc->n_large_pebs)
+ cpuc->lbr_tos = -1ULL;
+ else
+ cpuc->lbr_tos = intel_pmu_lbr_tos();
+
for (i = 0; i < x86_pmu.lbr_nr; i++) {
u64 info = lbr->lbr[i].info;
struct perf_branch_entry *e = &cpuc->lbr_entries[i];
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index ecacfbf4ebc1..deaf3935f627 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -234,6 +234,7 @@ struct cpu_hw_events {
struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
struct er_account *lbr_sel;
u64 br_sel;
+ u64 lbr_tos;
struct x86_perf_task_context *last_task_ctx;
int last_log_id;
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 03/13] perf tools: Support new branch sample type for LBR TOS
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
2019-10-21 20:03 ` [PATCH V2 01/13] perf/core: Add new branch sample type for LBR TOS kan.liang
2019-10-21 20:03 ` [PATCH V2 02/13] perf/x86/intel: Output LBR TOS information kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 04/13] perf header: Add check for event attr kan.liang
` (9 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
Support new branch sample type for LBR TOS.
Enable LBR_TOS by default in LBR call stack mode.
If kernel doesn't support the sample type, switching it off.
Add a new branch options "tos" for the new branch sample type.
Set tos to -1ULL if the LBR TOS information is unavailable.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
tools/include/uapi/linux/perf_event.h | 10 +++++++++-
tools/perf/util/event.h | 1 +
tools/perf/util/evsel.c | 20 +++++++++++++++++---
tools/perf/util/evsel.h | 6 ++++++
tools/perf/util/parse-branch-options.c | 1 +
tools/perf/util/perf_event_attr_fprintf.c | 1 +
6 files changed, 35 insertions(+), 4 deletions(-)
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index bb7b271397a6..b1f022190571 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -180,6 +180,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
+ PERF_SAMPLE_BRANCH_LBR_TOS_SHIFT = 17, /* save LBR TOS */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -207,6 +209,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_TYPE_SAVE =
1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_LBR_TOS = 1U << PERF_SAMPLE_BRANCH_LBR_TOS_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -849,7 +853,11 @@ enum perf_event_type {
* char data[size];}&& PERF_SAMPLE_RAW
*
* { u64 nr;
- * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+ * { u64 from, to, flags } lbr[nr];
+ *
+ * # only available if PERF_SAMPLE_BRANCH_LBR_TOS is set
+ * u64 tos;
+ * } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index a0a0c91cde4a..98794758546b 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -130,6 +130,7 @@ struct perf_sample {
u32 raw_size;
u64 data_src;
u64 phys_addr;
+ u64 lbr_tos;
u32 flags;
u16 insn_len;
u8 cpumode;
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index abc7fda4a0fe..6b91897e4ca2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -712,7 +712,8 @@ static void __perf_evsel__config_callchain(struct evsel *evsel,
attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
PERF_SAMPLE_BRANCH_CALL_STACK |
PERF_SAMPLE_BRANCH_NO_CYCLES |
- PERF_SAMPLE_BRANCH_NO_FLAGS;
+ PERF_SAMPLE_BRANCH_NO_FLAGS |
+ PERF_SAMPLE_BRANCH_LBR_TOS;
}
} else
pr_warning("Cannot use LBR callstack with branch stack. "
@@ -763,7 +764,8 @@ perf_evsel__reset_callgraph(struct evsel *evsel,
if (param->record_mode == CALLCHAIN_LBR) {
perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
- PERF_SAMPLE_BRANCH_CALL_STACK);
+ PERF_SAMPLE_BRANCH_CALL_STACK |
+ PERF_SAMPLE_BRANCH_LBR_TOS);
}
if (param->record_mode == CALLCHAIN_DWARF) {
perf_evsel__reset_sample_bit(evsel, REGS_USER);
@@ -1641,6 +1643,8 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
evsel->core.attr.ksymbol = 0;
if (perf_missing_features.bpf)
evsel->core.attr.bpf_event = 0;
+ if (perf_missing_features.lbr_tos)
+ evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_LBR_TOS;
retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->core.attr.sample_id_all = 0;
@@ -1752,7 +1756,12 @@ int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
* Must probe features in the order they were added to the
* perf_event_attr interface.
*/
- if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
+ if (!perf_missing_features.lbr_tos &&
+ (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_LBR_TOS)) {
+ perf_missing_features.lbr_tos = true;
+ pr_debug2("switching off LBR TOS support\n");
+ goto fallback_missing_features;
+ } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
perf_missing_features.aux_output = true;
pr_debug2("Kernel has no attr.aux_output support, bailing out\n");
goto out_close;
@@ -2126,6 +2135,11 @@ int perf_evsel__parse_sample(struct evsel *evsel, union perf_event *event,
sz = data->branch_stack->nr * sizeof(struct branch_entry);
OVERFLOW_CHECK(array, sz, max_size);
array = (void *)array + sz;
+
+ if (perf_evsel__has_lbr_tos(evsel))
+ data->lbr_tos = *array++;
+ else
+ data->lbr_tos = -1ULL;
}
if (type & PERF_SAMPLE_REGS_USER) {
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index ddc5ee6f6592..52f8a5401361 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -115,6 +115,7 @@ struct perf_missing_features {
bool ksymbol;
bool bpf;
bool aux_output;
+ bool lbr_tos;
};
extern struct perf_missing_features perf_missing_features;
@@ -377,6 +378,11 @@ for ((_evsel) = _leader; \
(_evsel) && (_evsel)->leader == (_leader); \
(_evsel) = list_entry((_evsel)->core.node.next, struct evsel, core.node))
+static inline bool perf_evsel__has_lbr_tos(const struct evsel *evsel)
+{
+ return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_LBR_TOS;
+}
+
static inline bool perf_evsel__has_branch_callstack(const struct evsel *evsel)
{
return evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK;
diff --git a/tools/perf/util/parse-branch-options.c b/tools/perf/util/parse-branch-options.c
index bb4aa88c50a8..1425c3525f17 100644
--- a/tools/perf/util/parse-branch-options.c
+++ b/tools/perf/util/parse-branch-options.c
@@ -32,6 +32,7 @@ static const struct branch_mode branch_modes[] = {
BRANCH_OPT("call", PERF_SAMPLE_BRANCH_CALL),
BRANCH_OPT("save_type", PERF_SAMPLE_BRANCH_TYPE_SAVE),
BRANCH_OPT("stack", PERF_SAMPLE_BRANCH_CALL_STACK),
+ BRANCH_OPT("tos", PERF_SAMPLE_BRANCH_LBR_TOS),
BRANCH_END
};
diff --git a/tools/perf/util/perf_event_attr_fprintf.c b/tools/perf/util/perf_event_attr_fprintf.c
index d4ad3f04923a..5c4047e7cc8d 100644
--- a/tools/perf/util/perf_event_attr_fprintf.c
+++ b/tools/perf/util/perf_event_attr_fprintf.c
@@ -50,6 +50,7 @@ static void __p_branch_sample_type(char *buf, size_t size, u64 value)
bit_name(ABORT_TX), bit_name(IN_TX), bit_name(NO_TX),
bit_name(COND), bit_name(CALL_STACK), bit_name(IND_JUMP),
bit_name(CALL), bit_name(NO_FLAGS), bit_name(NO_CYCLES),
+ bit_name(LBR_TOS),
{ .name = NULL, }
};
#undef bit_name
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 04/13] perf header: Add check for event attr
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (2 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 03/13] perf tools: Support new branch sample type for LBR TOS kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 05/13] perf pmu: Add support for PMU capabilities kan.liang
` (8 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
The perf.data may be generated by a newer version of perf tool,
which support new input bits in attr, e.g. new bit for
branch_sample_type.
The perf.data may be parsed by an older version of perf tool later.
The old perf tool may parse the perf.data incorrectly. There is no
warning message for this case.
Current perf header never check for unknown input bits in attr.
When read the event desc from header, check the stored event attr.
The reserved bits, sample type, read format and branch sample type
will be checked.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
| 38 ++++++++++++++++++++++++++++++++++++++
1 file changed, 38 insertions(+)
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 86d9396cb131..6c51404fbeef 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1597,6 +1597,41 @@ static void free_event_desc(struct evsel *events)
free(events);
}
+static bool perf_attr_check(struct perf_event_attr *attr)
+{
+ if (attr->__reserved_1) {
+ pr_warning("Unexpected reserved bits (0x%x) are detected. "
+ "Please update perf tool.\n",
+ attr->__reserved_1);
+ return false;
+ }
+
+ if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) {
+ pr_warning("Unknown sample type (0x%llx) is detected. "
+ "Please update perf tool.\n",
+ attr->sample_type);
+ return false;
+ }
+
+ if (attr->read_format & ~(PERF_FORMAT_MAX-1)) {
+ pr_warning("Unknown read format (0x%llx) is detected. "
+ "Please update perf tool.\n",
+ attr->read_format);
+ return false;
+ }
+
+ if ((attr->sample_type & PERF_SAMPLE_BRANCH_STACK) &&
+ (attr->branch_sample_type & ~(PERF_SAMPLE_BRANCH_MAX-1))) {
+ pr_warning("Unknown branch sample type (0x%llx) is detected. "
+ "Please update perf tool.\n",
+ attr->branch_sample_type);
+
+ return false;
+ }
+
+ return true;
+}
+
static struct evsel *read_event_desc(struct feat_fd *ff)
{
struct evsel *evsel, *events = NULL;
@@ -1641,6 +1676,9 @@ static struct evsel *read_event_desc(struct feat_fd *ff)
memcpy(&evsel->core.attr, buf, msz);
+ if (!perf_attr_check(&evsel->core.attr))
+ goto error;
+
if (do_read_u32(ff, &nr))
goto error;
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 05/13] perf pmu: Add support for PMU capabilities
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (3 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 04/13] perf header: Add check for event attr kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 06/13] perf header: Support CPU " kan.liang
` (7 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
The PMU capabilities information, which is located at
/sys/bus/event_source/devices/<dev>/caps, is required by perf tool.
For example, the max LBR information is required to stitch LBR call
stack.
Add perf_pmu__caps_parse() to parse the PMU capabilities information.
The information is stored in a list.
Add perf_pmu__scan_caps() to scan the capabilities once by one.
The following patch will store the capabilities information in perf
header.
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
tools/perf/util/pmu.c | 87 +++++++++++++++++++++++++++++++++++++++++++
tools/perf/util/pmu.h | 12 ++++++
2 files changed, 99 insertions(+)
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 5608da82ad23..d3dc9d4f9479 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -847,6 +847,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
INIT_LIST_HEAD(&pmu->format);
INIT_LIST_HEAD(&pmu->aliases);
+ INIT_LIST_HEAD(&pmu->caps);
list_splice(&format, &pmu->format);
list_splice(&aliases, &pmu->aliases);
list_add_tail(&pmu->list, &pmus);
@@ -1552,3 +1553,89 @@ int perf_pmu__scan_file(struct perf_pmu *pmu, const char *name, const char *fmt,
va_end(args);
return ret;
}
+
+static int perf_pmu__new_caps(struct list_head *list, char *name, char *value)
+{
+ struct perf_pmu_caps *caps;
+
+ caps = zalloc(sizeof(*caps));
+ if (!caps)
+ return -ENOMEM;
+
+ caps->name = strdup(name);
+ caps->value = strndup(value, strlen(value) - 1);
+ list_add_tail(&caps->list, list);
+ return 0;
+}
+
+/*
+ * Reading/parsing the given pmu capabilities, which should be located at:
+ * /sys/bus/event_source/devices/<dev>/caps as sysfs group attributes.
+ * Return the number of capabilities
+ */
+int perf_pmu__caps_parse(struct perf_pmu *pmu)
+{
+ struct stat st;
+ char caps_path[PATH_MAX];
+ const char *sysfs = sysfs__mountpoint();
+ DIR *caps_dir;
+ struct dirent *evt_ent;
+ int nr_caps = 0;
+
+ if (!sysfs)
+ return -1;
+
+ snprintf(caps_path, PATH_MAX,
+ "%s" EVENT_SOURCE_DEVICE_PATH "%s/caps", sysfs, pmu->name);
+
+ if (stat(caps_path, &st) < 0)
+ return 0; /* no error if caps does not exist */
+
+ caps_dir = opendir(caps_path);
+ if (!caps_dir)
+ return -EINVAL;
+
+ while ((evt_ent = readdir(caps_dir)) != NULL) {
+ char *name = evt_ent->d_name;
+ char path[PATH_MAX];
+ char value[128];
+ FILE *file;
+
+ if (!strcmp(name, ".") || !strcmp(name, ".."))
+ continue;
+
+ snprintf(path, PATH_MAX, "%s/%s", caps_path, name);
+
+ file = fopen(path, "r");
+ if (!file)
+ break;
+
+ if (!fgets(value, sizeof(value), file) ||
+ (perf_pmu__new_caps(&pmu->caps, name, value) < 0)) {
+ fclose(file);
+ break;
+ }
+
+ nr_caps++;
+ fclose(file);
+ }
+
+ closedir(caps_dir);
+
+ return nr_caps;
+}
+
+struct perf_pmu_caps *perf_pmu__scan_caps(struct perf_pmu *pmu,
+ struct perf_pmu_caps *caps)
+{
+ if (!pmu)
+ return NULL;
+
+ if (!caps)
+ caps = list_prepare_entry(caps, &pmu->caps, list);
+
+ list_for_each_entry_continue(caps, &pmu->caps, list)
+ return caps;
+
+ return NULL;
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index f36ade6df76d..5ded4e3e28e4 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -21,6 +21,12 @@ enum {
struct perf_event_attr;
+struct perf_pmu_caps {
+ char *name;
+ char *value;
+ struct list_head list;
+};
+
struct perf_pmu {
char *name;
__u32 type;
@@ -31,6 +37,7 @@ struct perf_pmu {
struct perf_cpu_map *cpus;
struct list_head format; /* HEAD struct perf_pmu_format -> list */
struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
+ struct list_head caps; /* HEAD struct perf_pmu_caps -> list */
struct list_head list; /* ELEM */
};
@@ -98,4 +105,9 @@ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
+int perf_pmu__caps_parse(struct perf_pmu *pmu);
+
+struct perf_pmu_caps *perf_pmu__scan_caps(struct perf_pmu *pmu,
+ struct perf_pmu_caps *caps);
+
#endif /* __PMU_H */
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 06/13] perf header: Support CPU PMU capabilities
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (4 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 05/13] perf pmu: Add support for PMU capabilities kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 07/13] perf machine: Refine the function for LBR call stack reconstruction kan.liang
` (6 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
To stitch LBR call stack, the max LBR information is required. So the
CPU PMU capabilities information has to be stored in perf header.
Add a new feature HEADER_CPU_PMU_CAPS for CPU PMU capabilities.
Retrieve all CPU PMU capabilities, not just max LBR information.
Add variable max_branches to facilitate future usage.
The CPU PMU capabilities information is only useful for LBR call stack
mode. Clear the feature for perf stat and other perf record mode.
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
.../Documentation/perf.data-file-format.txt | 16 +++
tools/perf/builtin-record.c | 3 +
tools/perf/builtin-stat.c | 1 +
tools/perf/util/env.h | 3 +
| 110 ++++++++++++++++++
| 1 +
6 files changed, 134 insertions(+)
diff --git a/tools/perf/Documentation/perf.data-file-format.txt b/tools/perf/Documentation/perf.data-file-format.txt
index b0152e1095c5..b6472e463284 100644
--- a/tools/perf/Documentation/perf.data-file-format.txt
+++ b/tools/perf/Documentation/perf.data-file-format.txt
@@ -373,6 +373,22 @@ struct {
Indicates that trace contains records of PERF_RECORD_COMPRESSED type
that have perf_events records in compressed form.
+ HEADER_CPU_PMU_CAPS = 28,
+
+ A list of cpu PMU capabilities. The format of data is as below.
+
+struct {
+ u32 nr_cpu_pmu_caps;
+ {
+ char name[];
+ char value[];
+ } [nr_cpu_pmu_caps]
+};
+
+
+Example:
+ cpu pmu capabilities: branches=32, max_precise=3, pmu_name=icelake
+
other bits are reserved and should ignored for now
HEADER_FEAT_BITS = 256,
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 23332861de6e..fbbeb1e625ef 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1057,6 +1057,9 @@ static void record__init_features(struct record *rec)
if (!record__comp_enabled(rec))
perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
+ if (!callchain_param.enabled || (callchain_param.record_mode != CALLCHAIN_LBR))
+ perf_header__clear_feat(&session->header, HEADER_CPU_PMU_CAPS);
+
perf_header__clear_feat(&session->header, HEADER_STAT);
}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 468fc49420ce..26bb9794e95a 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1418,6 +1418,7 @@ static void init_features(struct perf_session *session)
perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
+ perf_header__clear_feat(&session->header, HEADER_CPU_PMU_CAPS);
}
static int __cmd_record(int argc, const char **argv)
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index a3059dc1abe5..dae64e20b280 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -48,6 +48,7 @@ struct perf_env {
char *cpuid;
unsigned long long total_mem;
unsigned int msr_pmu_type;
+ unsigned int max_branches;
int nr_cmdline;
int nr_sibling_cores;
@@ -57,12 +58,14 @@ struct perf_env {
int nr_memory_nodes;
int nr_pmu_mappings;
int nr_groups;
+ int nr_cpu_pmu_caps;
char *cmdline;
const char **cmdline_argv;
char *sibling_cores;
char *sibling_dies;
char *sibling_threads;
char *pmu_mappings;
+ char *cpu_pmu_caps;
struct cpu_topology_map *cpu;
struct cpu_cache_level *caches;
int caches_cnt;
--git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 6c51404fbeef..16274ffd875e 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1402,6 +1402,39 @@ static int write_compressed(struct feat_fd *ff __maybe_unused,
return do_write(ff, &(ff->ph->env.comp_mmap_len), sizeof(ff->ph->env.comp_mmap_len));
}
+static int write_cpu_pmu_caps(struct feat_fd *ff,
+ struct evlist *evlist __maybe_unused)
+{
+ struct perf_pmu_caps *caps = NULL;
+ struct perf_pmu *cpu_pmu;
+ int nr_caps;
+ int ret;
+
+ cpu_pmu = perf_pmu__find("cpu");
+ if (!cpu_pmu)
+ return -ENOENT;
+
+ nr_caps = perf_pmu__caps_parse(cpu_pmu);
+ if (nr_caps < 0)
+ return nr_caps;
+
+ ret = do_write(ff, &nr_caps, sizeof(nr_caps));
+ if (ret < 0)
+ return ret;
+
+ while ((caps = perf_pmu__scan_caps(cpu_pmu, caps))) {
+ ret = do_write_string(ff, caps->name);
+ if (ret < 0)
+ return ret;
+
+ ret = do_write_string(ff, caps->value);
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
static void print_hostname(struct feat_fd *ff, FILE *fp)
{
fprintf(fp, "# hostname : %s\n", ff->ph->env.hostname);
@@ -1817,6 +1850,28 @@ static void print_compressed(struct feat_fd *ff, FILE *fp)
ff->ph->env.comp_level, ff->ph->env.comp_ratio);
}
+static void print_cpu_pmu_caps(struct feat_fd *ff, FILE *fp)
+{
+ const char *delimiter = "# cpu pmu capabilities: ";
+ char *str;
+ u32 nr_caps;
+
+ nr_caps = ff->ph->env.nr_cpu_pmu_caps;
+ if (!nr_caps) {
+ fprintf(fp, "# cpu pmu capabilities: not available\n");
+ return;
+ }
+
+ str = ff->ph->env.cpu_pmu_caps;
+ while (nr_caps--) {
+ fprintf(fp, "%s%s", delimiter, str);
+ delimiter = ", ";
+ str += strlen(str) + 1;
+ }
+
+ fprintf(fp, "\n");
+}
+
static void print_pmu_mappings(struct feat_fd *ff, FILE *fp)
{
const char *delimiter = "# pmu mappings: ";
@@ -2854,6 +2909,60 @@ static int process_compressed(struct feat_fd *ff,
return 0;
}
+static int process_cpu_pmu_caps(struct feat_fd *ff,
+ void *data __maybe_unused)
+{
+ char *name, *value;
+ struct strbuf sb;
+ u32 nr_caps;
+
+ if (do_read_u32(ff, &nr_caps))
+ return -1;
+
+ if (!nr_caps) {
+ pr_debug("cpu pmu capabilities not available\n");
+ return 0;
+ }
+
+ ff->ph->env.nr_cpu_pmu_caps = nr_caps;
+
+ if (strbuf_init(&sb, 128) < 0)
+ return -1;
+
+ while (nr_caps--) {
+ name = do_read_string(ff);
+ if (!name)
+ goto error;
+
+ value = do_read_string(ff);
+ if (!value)
+ goto free_name;
+
+ if (strbuf_addf(&sb, "%s=%s", name, value) < 0)
+ goto free_value;
+
+ /* include a NULL character at the end */
+ if (strbuf_add(&sb, "", 1) < 0)
+ goto free_value;
+
+ if (!strcmp(name, "branches"))
+ ff->ph->env.max_branches = atoi(value);
+
+ free(value);
+ free(name);
+ }
+ ff->ph->env.cpu_pmu_caps = strbuf_detach(&sb, NULL);
+ return 0;
+
+free_value:
+ free(value);
+free_name:
+ free(name);
+error:
+ strbuf_release(&sb);
+ return -1;
+}
+
#define FEAT_OPR(n, func, __full_only) \
[HEADER_##n] = { \
.name = __stringify(n), \
@@ -2911,6 +3020,7 @@ const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE] = {
FEAT_OPR(BPF_PROG_INFO, bpf_prog_info, false),
FEAT_OPR(BPF_BTF, bpf_btf, false),
FEAT_OPR(COMPRESSED, compressed, false),
+ FEAT_OPR(CPU_PMU_CAPS, cpu_pmu_caps, false),
};
struct header_print_data {
--git a/tools/perf/util/header.h b/tools/perf/util/header.h
index ca53a929e9fd..ae8a7108f52b 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -43,6 +43,7 @@ enum {
HEADER_BPF_PROG_INFO,
HEADER_BPF_BTF,
HEADER_COMPRESSED,
+ HEADER_CPU_PMU_CAPS,
HEADER_LAST_FEATURE,
HEADER_FEAT_BITS = 256,
};
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 07/13] perf machine: Refine the function for LBR call stack reconstruction
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (5 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 06/13] perf header: Support CPU " kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 08/13] perf tools: Stitch LBR call stack kan.liang
` (5 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
LBR only collect the user call stack. To reconstruct a call stack, both
kernel call stack and user call stack are required. The function
resolve_lbr_callchain_sample() mix the kernel call stack and user
call stack. Now, with the help of TOS, perf tool can reconstruct a more
complete call stack by adding some user call stack from previous sample.
However, current implementation is hard to be extended to support it.
Abstract two new functions to resolve user call stack and kernel
call stack respectively.
No functional changes.
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
tools/perf/util/machine.c | 186 ++++++++++++++++++++++++--------------
1 file changed, 119 insertions(+), 67 deletions(-)
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 70a9f8716a4b..e3e516e30093 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2183,6 +2183,96 @@ static int remove_loops(struct branch_entry *l, int nr,
return nr;
}
+
+static int lbr_callchain_add_kernel_ip(struct thread *thread,
+ struct callchain_cursor *cursor,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ bool callee, int end)
+{
+ struct ip_callchain *chain = sample->callchain;
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ int err, i;
+
+ if (callee) {
+ for (i = 0; i < end + 1; i++) {
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, chain->ips[i],
+ false, NULL, NULL, 0);
+ if (err)
+ return err;
+ }
+ } else {
+ for (i = end; i >= 0; i--) {
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, chain->ips[i],
+ false, NULL, NULL, 0);
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int lbr_callchain_add_lbr_ip(struct thread *thread,
+ struct callchain_cursor *cursor,
+ struct perf_sample *sample,
+ struct symbol **parent,
+ struct addr_location *root_al,
+ bool callee)
+{
+ struct branch_stack *lbr_stack = sample->branch_stack;
+ u8 cpumode = PERF_RECORD_MISC_USER;
+ int lbr_nr = lbr_stack->nr;
+ struct branch_flags *flags;
+ u64 ip, branch_from = 0;
+ int err, i;
+
+ if (callee) {
+ ip = lbr_stack->entries[0].to;
+ flags = &lbr_stack->entries[0].flags;
+ branch_from = lbr_stack->entries[0].from;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL, branch_from);
+ if (err)
+ return err;
+
+ for (i = 0; i < lbr_nr; i++) {
+ ip = lbr_stack->entries[i].from;
+ flags = &lbr_stack->entries[i].flags;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL, branch_from);
+ if (err)
+ return err;
+ }
+ } else {
+ for (i = lbr_nr - 1; i >= 0; i--) {
+ ip = lbr_stack->entries[i].from;
+ flags = &lbr_stack->entries[i].flags;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL, branch_from);
+ if (err)
+ return err;
+ }
+
+ ip = lbr_stack->entries[0].to;
+ flags = &lbr_stack->entries[0].flags;
+ branch_from = lbr_stack->entries[0].from;
+ err = add_callchain_ip(thread, cursor, parent,
+ root_al, &cpumode, ip,
+ true, flags, NULL, branch_from);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
/*
* Recolve LBR callstack chain sample
* Return:
@@ -2198,82 +2288,44 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
int max_stack)
{
struct ip_callchain *chain = sample->callchain;
- int chain_nr = min(max_stack, (int)chain->nr), i;
- u8 cpumode = PERF_RECORD_MISC_USER;
- u64 ip, branch_from = 0;
+ int chain_nr = min(max_stack, (int)chain->nr);
+ int i, err;
for (i = 0; i < chain_nr; i++) {
if (chain->ips[i] == PERF_CONTEXT_USER)
break;
}
- /* LBR only affects the user callchain */
- if (i != chain_nr) {
- struct branch_stack *lbr_stack = sample->branch_stack;
- int lbr_nr = lbr_stack->nr, j, k;
- bool branch;
- struct branch_flags *flags;
- /*
- * LBR callstack can only get user call chain.
- * The mix_chain_nr is kernel call chain
- * number plus LBR user call chain number.
- * i is kernel call chain number,
- * 1 is PERF_CONTEXT_USER,
- * lbr_nr + 1 is the user call chain number.
- * For details, please refer to the comments
- * in callchain__printf
- */
- int mix_chain_nr = i + 1 + lbr_nr + 1;
-
- for (j = 0; j < mix_chain_nr; j++) {
- int err;
- branch = false;
- flags = NULL;
-
- if (callchain_param.order == ORDER_CALLEE) {
- if (j < i + 1)
- ip = chain->ips[j];
- else if (j > i + 1) {
- k = j - i - 2;
- ip = lbr_stack->entries[k].from;
- branch = true;
- flags = &lbr_stack->entries[k].flags;
- } else {
- ip = lbr_stack->entries[0].to;
- branch = true;
- flags = &lbr_stack->entries[0].flags;
- branch_from =
- lbr_stack->entries[0].from;
- }
- } else {
- if (j < lbr_nr) {
- k = lbr_nr - j - 1;
- ip = lbr_stack->entries[k].from;
- branch = true;
- flags = &lbr_stack->entries[k].flags;
- }
- else if (j > lbr_nr)
- ip = chain->ips[i + 1 - (j - lbr_nr)];
- else {
- ip = lbr_stack->entries[0].to;
- branch = true;
- flags = &lbr_stack->entries[0].flags;
- branch_from =
- lbr_stack->entries[0].from;
- }
- }
+ /*
+ * LBR only affects the user callchain.
+ * Fall back if there is no user callchain.
+ */
+ if (i == chain_nr)
+ return 0;
- err = add_callchain_ip(thread, cursor, parent,
- root_al, &cpumode, ip,
- branch, flags, NULL,
- branch_from);
- if (err)
- return (err < 0) ? err : 0;
- }
- return 1;
+ if (callchain_param.order == ORDER_CALLEE) {
+ err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
+ parent, root_al, true, i);
+ if (err)
+ goto error;
+ err = lbr_callchain_add_lbr_ip(thread, cursor, sample,
+ parent, root_al, true);
+ if (err)
+ goto error;
+ } else {
+ err = lbr_callchain_add_lbr_ip(thread, cursor, sample,
+ parent, root_al, false);
+ if (err)
+ goto error;
+ err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
+ parent, root_al, false, i);
+ if (err)
+ goto error;
}
- return 0;
+ return 1;
+error:
+ return (err < 0) ? err : 0;
}
static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 08/13] perf tools: Stitch LBR call stack
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (6 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 07/13] perf machine: Refine the function for LBR call stack reconstruction kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 09/13] perf report: Add option to enable the LBR stitching approach kan.liang
` (4 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
In LBR call stack mode, the depth of reconstructed LBR call stack limits
to the number of LBR registers.
For example, on skylake, the depth of reconstructed LBR call stack is
always <= 32.
# To display the perf.data header info, please use
# --header/--header-only options.
#
#
# Total Lost Samples: 0
#
# Samples: 6K of event 'cycles'
# Event count (approx.): 6487119731
#
# Children Self Command Shared Object Symbol
# ........ ........ ............... ..................
# ................................
99.97% 99.97% tchain_edit tchain_edit [.] f43
|
--99.64%--f11
f12
f13
f14
f15
f16
f17
f18
f19
f20
f21
f22
f23
f24
f25
f26
f27
f28
f29
f30
f31
f32
f33
f34
f35
f36
f37
f38
f39
f40
f41
f42
f43
For a call stack which is deeper than LBR limit, HW will overwrite the
LBR register with oldest branch. Only partial call stacks can be
reconstructed.
However, the overwritten LBRs may still be retrieved from previous
sample. At that moment, HW hasn't overwritten the LBR registers yet.
Perf tools can stitch those overwritten LBRs on current call stacks to
get a more complete call stack.
To determine if LBRs can be stitched, perf tools need to compare current
sample with previous sample.
- They should have identical LBR records (Same from, to and flags
values, and the same physical index of LBR registers).
- The searching starts from the base-of-stack of current sample.
In struct lbr_stitch, add 'prev_sample' to save the previous sample.
Add 'prev_lbr_cursor' to save all LBR cursor nodes from previous sample.
Once perf determines to stitch the previous LBRs, the corresponding LBR
cursor nodes will be copied to 'lists'.
The 'lists' is to track the LBR cursor nodes which are going to be
stitched.
When the stitching is over, the nodes will not be freed immediately.
They will be moved to 'free_lists'. Next stitching may reuse the space.
Both 'lists' and 'free_lists' will be freed when all samples are
processed.
The 'lbr_stitch_enable' is used to indicate whether enable LBR stitch
approach, which is disabled by default. The following patch will
introduce a new option to enable the LBR stitch approach.
This is because,
- The stitching approach base on LBR call stack technology. The known
limitations of LBR call stack technology still apply to the approach,
e.g. Exception handing such as setjmp/longjmp will have calls/returns
not match.
- This approach is not full proof. There can be cases where it creates
incorrect call stacks from incorrect matches. There is no attempt
to validate any matches in another way.
However in many common cases with call stack overflows it can recreate
better call stacks than the default lbr call stack output. So if there
are problems with LBR overflows, this is a possible workaround.
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
tools/perf/util/branch.h | 5 +-
tools/perf/util/callchain.h | 12 +-
tools/perf/util/machine.c | 232 +++++++++++++++++++++++++++++++++++-
tools/perf/util/thread.c | 2 +
tools/perf/util/thread.h | 34 ++++++
5 files changed, 280 insertions(+), 5 deletions(-)
diff --git a/tools/perf/util/branch.h b/tools/perf/util/branch.h
index 88e00d268f6f..749fce3675b6 100644
--- a/tools/perf/util/branch.h
+++ b/tools/perf/util/branch.h
@@ -34,7 +34,10 @@ struct branch_info {
struct branch_entry {
u64 from;
u64 to;
- struct branch_flags flags;
+ union {
+ struct branch_flags flags;
+ u64 flags_value;
+ };
};
struct branch_stack {
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 83398e5bbe4b..9708b8640946 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -149,7 +149,17 @@ struct callchain_cursor_node {
u64 branch_from;
int nr_loop_iter;
u64 iter_cycles;
- struct callchain_cursor_node *next;
+ union {
+ struct callchain_cursor_node *next;
+
+ /* Indicate valid cursor node for LBR stitch */
+ bool valid;
+ };
+};
+
+struct stitch_list {
+ struct list_head node;
+ struct callchain_cursor_node cursor;
};
struct callchain_cursor {
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index e3e516e30093..dd8c764bc9ae 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -2216,6 +2216,31 @@ static int lbr_callchain_add_kernel_ip(struct thread *thread,
return 0;
}
+static void save_lbr_cursor_node(struct thread *thread,
+ struct callchain_cursor *cursor,
+ int idx)
+{
+ struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+
+ if (!lbr_stitch)
+ return;
+
+ if (cursor->pos == cursor->nr) {
+ lbr_stitch->prev_lbr_cursor[idx].valid = false;
+ return;
+ }
+
+ if (!cursor->curr)
+ cursor->curr = cursor->first;
+ else
+ cursor->curr = cursor->curr->next;
+ memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
+ sizeof(struct callchain_cursor_node));
+
+ lbr_stitch->prev_lbr_cursor[idx].valid = true;
+ cursor->pos++;
+}
+
static int lbr_callchain_add_lbr_ip(struct thread *thread,
struct callchain_cursor *cursor,
struct perf_sample *sample,
@@ -2230,6 +2255,21 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
u64 ip, branch_from = 0;
int err, i;
+ /*
+ * The curr and pos are not used in writing session. They are cleared
+ * in callchain_cursor_commit() when the writing session is closed.
+ * Using curr and pos to track the current cursor node.
+ */
+ if (thread->lbr_stitch) {
+ cursor->curr = NULL;
+ cursor->pos = cursor->nr;
+ if (cursor->nr) {
+ cursor->curr = cursor->first;
+ for (i = 0; i < (int)(cursor->nr - 1); i++)
+ cursor->curr = cursor->curr->next;
+ }
+ }
+
if (callee) {
ip = lbr_stack->entries[0].to;
flags = &lbr_stack->entries[0].flags;
@@ -2240,6 +2280,20 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
if (err)
return err;
+ /*
+ * The number of cursor node increases.
+ * Move the current cursor node.
+ * But does not need to save current cursor node for entry 0.
+ * It's impossible to stitch the whole LBRs of previous sample.
+ */
+ if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
+ if (!cursor->curr)
+ cursor->curr = cursor->first;
+ else
+ cursor->curr = cursor->curr->next;
+ cursor->pos++;
+ }
+
for (i = 0; i < lbr_nr; i++) {
ip = lbr_stack->entries[i].from;
flags = &lbr_stack->entries[i].flags;
@@ -2248,6 +2302,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
true, flags, NULL, branch_from);
if (err)
return err;
+ save_lbr_cursor_node(thread, cursor, i);
}
} else {
for (i = lbr_nr - 1; i >= 0; i--) {
@@ -2258,6 +2313,7 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
true, flags, NULL, branch_from);
if (err)
return err;
+ save_lbr_cursor_node(thread, cursor, i);
}
ip = lbr_stack->entries[0].to;
@@ -2273,6 +2329,145 @@ static int lbr_callchain_add_lbr_ip(struct thread *thread,
return 0;
}
+static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
+ struct callchain_cursor *cursor)
+{
+ struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct stitch_list *stitch_node;
+ int err;
+
+ struct callchain_cursor_node *cnode;
+
+ list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
+ cnode = &stitch_node->cursor;
+
+ err = callchain_cursor_append(cursor, cnode->ip, cnode->map,
+ cnode->sym, cnode->branch,
+ &cnode->branch_flags,
+ cnode->nr_loop_iter,
+ cnode->iter_cycles,
+ cnode->branch_from,
+ cnode->srcline);
+ if (err)
+ return err;
+
+ }
+ return 0;
+}
+
+static struct stitch_list *get_stitch_node(struct thread *thread)
+{
+ struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct stitch_list *stitch_node;
+
+ if (!list_empty(&lbr_stitch->free_lists)) {
+ stitch_node = list_first_entry(&lbr_stitch->free_lists,
+ struct stitch_list, node);
+ list_del(&stitch_node->node);
+
+ return stitch_node;
+ }
+
+ return malloc(sizeof(struct stitch_list));
+}
+
+static bool has_stitched_lbr(struct thread *thread,
+ struct perf_sample *cur,
+ struct perf_sample *prev,
+ unsigned int max_lbr,
+ bool callee)
+{
+ struct branch_stack *cur_stack = cur->branch_stack;
+ struct branch_stack *prev_stack = prev->branch_stack;
+ struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ int i, j, nr_identical_branches = 0;
+ struct stitch_list *stitch_node;
+ u64 cur_base, distance;
+
+ if (!cur_stack || !prev_stack)
+ return false;
+
+ /* Find the physical index of the base-of-stack for current sample. */
+ cur_base = max_lbr - cur_stack->nr + cur->lbr_tos + 1;
+
+ distance = (prev->lbr_tos > cur_base) ? (prev->lbr_tos - cur_base) :
+ (max_lbr + prev->lbr_tos - cur_base);
+ /* Previous sample has shorter stack. Nothing can be stitched. */
+ if (distance + 1 > prev_stack->nr)
+ return false;
+
+ /*
+ * Check if there are identical LBRs between two samples.
+ * Identicall LBRs must have same from, to and flags values. Also,
+ * they have to be saved in the same LBR registers (same physical
+ * index).
+ *
+ * Starts from the base-of-stack of current sample.
+ */
+ for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
+ if ((prev_stack->entries[i].from != cur_stack->entries[j].from) ||
+ (prev_stack->entries[i].to != cur_stack->entries[j].to) ||
+ (prev_stack->entries[i].flags_value != cur_stack->entries[j].flags_value))
+ break;
+
+ nr_identical_branches++;
+ }
+
+ if (!nr_identical_branches)
+ return false;
+
+ /*
+ * Save the LBRs between the base-of-stack of previous sample
+ * and the base-of-stack of current sample into lbr_stitch->lists.
+ * These LBRs will be stitched later.
+ */
+ for (i = prev_stack->nr - 1; i > (int)distance; i--) {
+
+ if (!lbr_stitch->prev_lbr_cursor[i].valid)
+ continue;
+
+ stitch_node = get_stitch_node(thread);
+ if (!stitch_node)
+ return false;
+
+ memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
+ sizeof(struct callchain_cursor_node));
+
+ if (callee)
+ list_add(&stitch_node->node, &lbr_stitch->lists);
+ else
+ list_add_tail(&stitch_node->node, &lbr_stitch->lists);
+ }
+
+ return true;
+}
+
+static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
+{
+ if (thread->lbr_stitch)
+ return true;
+
+ thread->lbr_stitch = calloc(1, sizeof(struct lbr_stitch));
+ if (!thread->lbr_stitch)
+ goto err;
+
+ thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
+ if (!thread->lbr_stitch->prev_lbr_cursor)
+ goto free_lbr_stitch;
+
+ INIT_LIST_HEAD(&thread->lbr_stitch->lists);
+ INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
+
+ return true;
+
+free_lbr_stitch:
+ free(thread->lbr_stitch);
+ thread->lbr_stitch = NULL;
+err:
+ pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
+ thread->lbr_stitch_enable = false;
+ return false;
+}
/*
* Recolve LBR callstack chain sample
* Return:
@@ -2285,10 +2480,14 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
struct perf_sample *sample,
struct symbol **parent,
struct addr_location *root_al,
- int max_stack)
+ int max_stack,
+ unsigned int max_lbr)
{
struct ip_callchain *chain = sample->callchain;
int chain_nr = min(max_stack, (int)chain->nr);
+ bool callee = (callchain_param.order == ORDER_CALLEE);
+ struct lbr_stitch *lbr_stitch;
+ bool stitched_lbr = false;
int i, err;
for (i = 0; i < chain_nr; i++) {
@@ -2303,7 +2502,21 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
if (i == chain_nr)
return 0;
- if (callchain_param.order == ORDER_CALLEE) {
+ if (thread->lbr_stitch_enable && sample->lbr_tos != (-1ULL) &&
+ (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
+ lbr_stitch = thread->lbr_stitch;
+
+ stitched_lbr = has_stitched_lbr(thread, sample,
+ &lbr_stitch->prev_sample,
+ max_lbr, callee);
+ if (!stitched_lbr) {
+ list_replace_init(&lbr_stitch->lists,
+ &lbr_stitch->free_lists);
+ }
+ memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
+ }
+
+ if (callee) {
err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
parent, root_al, true, i);
if (err)
@@ -2312,7 +2525,17 @@ static int resolve_lbr_callchain_sample(struct thread *thread,
parent, root_al, true);
if (err)
goto error;
+ if (stitched_lbr) {
+ err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
+ if (err)
+ goto error;
+ }
} else {
+ if (stitched_lbr) {
+ err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
+ if (err)
+ goto error;
+ }
err = lbr_callchain_add_lbr_ip(thread, cursor, sample,
parent, root_al, false);
if (err)
@@ -2369,8 +2592,11 @@ static int thread__resolve_callchain_sample(struct thread *thread,
chain_nr = chain->nr;
if (perf_evsel__has_branch_callstack(evsel)) {
+ struct perf_env *env = perf_evsel__env(evsel);
+
err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
- root_al, max_stack);
+ root_al, max_stack,
+ !env ? 0 : env->max_branches);
if (err)
return (err < 0) ? err : 0;
}
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index b64e9e049636..d3d8758e273e 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -47,6 +47,7 @@ struct thread *thread__new(pid_t pid, pid_t tid)
thread->tid = tid;
thread->ppid = -1;
thread->cpu = -1;
+ thread->lbr_stitch_enable = false;
INIT_LIST_HEAD(&thread->namespaces_list);
INIT_LIST_HEAD(&thread->comm_list);
init_rwsem(&thread->namespaces_lock);
@@ -110,6 +111,7 @@ void thread__delete(struct thread *thread)
exit_rwsem(&thread->namespaces_lock);
exit_rwsem(&thread->comm_lock);
+ thread__free_stitch_list(thread);
free(thread);
}
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 51bdb9a7af7f..e6740ca37091 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -13,6 +13,8 @@
#include <strlist.h>
#include <intlist.h>
#include "rwsem.h"
+#include "event.h"
+#include "callchain.h"
struct addr_location;
struct map;
@@ -20,6 +22,13 @@ struct perf_record_namespaces;
struct thread_stack;
struct unwind_libunwind_ops;
+struct lbr_stitch {
+ struct list_head lists;
+ struct list_head free_lists;
+ struct perf_sample prev_sample;
+ struct callchain_cursor_node *prev_lbr_cursor;
+};
+
struct thread {
union {
struct rb_node rb_node;
@@ -46,6 +55,10 @@ struct thread {
struct srccode_state srccode_state;
bool filter;
int filter_entry_depth;
+
+ /* LBR call stack stitch */
+ bool lbr_stitch_enable;
+ struct lbr_stitch *lbr_stitch;
};
struct machine;
@@ -142,4 +155,25 @@ static inline bool thread__is_filtered(struct thread *thread)
return false;
}
+static inline void thread__free_stitch_list(struct thread *thread)
+{
+ struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
+ struct stitch_list *pos, *tmp;
+
+ if (!lbr_stitch)
+ return;
+
+ list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
+ list_del_init(&pos->node);
+ free(pos);
+ }
+
+ list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
+ list_del_init(&pos->node);
+ free(pos);
+ }
+ free(lbr_stitch->prev_lbr_cursor);
+ free(thread->lbr_stitch);
+}
+
#endif /* __PERF_THREAD_H */
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 09/13] perf report: Add option to enable the LBR stitching approach
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (7 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 08/13] perf tools: Stitch LBR call stack kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 10/13] perf script: " kan.liang
` (3 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
With the LBR stitching approach, the reconstructed LBR call stack
can break the HW limitation. However, it may reconstruct invalid call
stacks in some cases, e.g. exception handing such as setjmp/longjmp.
Also, it may impact the processing time especially when the number of
samples with stitched LBRs are huge.
Add an option to enable the approach.
# To display the perf.data header info, please use
# --header/--header-only options.
#
#
# Total Lost Samples: 0
#
# Samples: 6K of event 'cycles'
# Event count (approx.): 6492797701
#
# Children Self Command Shared Object Symbol
# ........ ........ ............... ..................
# .................................
#
99.99% 99.99% tchain_edit tchain_edit [.] f43
|
---main
f1
f2
f3
f4
f5
f6
f7
f8
f9
f10
f11
f12
f13
f14
f15
f16
f17
f18
f19
f20
f21
f22
f23
f24
f25
f26
f27
f28
f29
f30
f31
|
--99.65%--f32
f33
f34
f35
f36
f37
f38
f39
f40
f41
f42
f43
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
tools/perf/Documentation/perf-report.txt | 11 +++++++++++
tools/perf/builtin-report.c | 6 ++++++
2 files changed, 17 insertions(+)
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index 7315f155803f..c0651b68ed5d 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -476,6 +476,17 @@ include::itrace.txt[]
This option extends the perf report to show reference callgraphs,
which collected by reference event, in no callgraph event.
+--stitch-lbr::
+ Show callgraph with stitched LBRs, which may have more complete
+ callgraph. The perf.data file must have been obtained using
+ perf record --call-graph lbr.
+ Disabled by default. In common cases with call stack overflows,
+ it can recreate better call stacks than the default lbr call stack
+ output. But this approach is not full proof. There can be cases
+ where it creates incorrect call stacks from incorrect matches.
+ The known limitations include exception handing such as
+ setjmp/longjmp will have calls/returns not match.
+
--socket-filter::
Only report the samples on the processor socket that match with this filter
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index aae0e57c60fb..0d4275a46645 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -83,6 +83,7 @@ struct report {
bool header_only;
bool nonany_branch_mode;
bool group_set;
+ bool stitch_lbr;
int max_stack;
struct perf_read_values show_threads_values;
struct annotation_options annotation_opts;
@@ -263,6 +264,9 @@ static int process_sample_event(struct perf_tool *tool,
return -1;
}
+ if (rep->stitch_lbr)
+ al.thread->lbr_stitch_enable = true;
+
if (symbol_conf.hide_unresolved && al.sym == NULL)
goto out_put;
@@ -1183,6 +1187,8 @@ int cmd_report(int argc, const char **argv)
"Show full source file name path for source lines"),
OPT_BOOLEAN(0, "show-ref-call-graph", &symbol_conf.show_ref_callgraph,
"Show callgraph from reference event"),
+ OPT_BOOLEAN(0, "stitch-lbr", &report.stitch_lbr,
+ "Enable LBR callgraph stitching approach"),
OPT_INTEGER(0, "socket-filter", &report.socket_filter,
"only show processor socket that match with this filter"),
OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 10/13] perf script: Add option to enable the LBR stitching approach
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (8 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 09/13] perf report: Add option to enable the LBR stitching approach kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 11/13] perf top: " kan.liang
` (2 subsequent siblings)
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
With the LBR stitching approach, the reconstructed LBR call stack
can break the HW limitation. However, it may reconstruct invalid call
stacks in some cases, e.g. exception handing such as setjmp/longjmp.
Also, it may impact the processing time especially when the number of
samples with stitched LBRs are huge.
Add an option to enable the approach.
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
tools/perf/Documentation/perf-script.txt | 11 +++++++++++
tools/perf/builtin-script.c | 6 ++++++
2 files changed, 17 insertions(+)
diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt
index 2599b057e47b..472f20f1e479 100644
--- a/tools/perf/Documentation/perf-script.txt
+++ b/tools/perf/Documentation/perf-script.txt
@@ -426,6 +426,17 @@ include::itrace.txt[]
--show-on-off-events::
Show the --switch-on/off events too.
+--stitch-lbr::
+ Show callgraph with stitched LBRs, which may have more complete
+ callgraph. The perf.data file must have been obtained using
+ perf record --call-graph lbr.
+ Disabled by default. In common cases with call stack overflows,
+ it can recreate better call stacks than the default lbr call stack
+ output. But this approach is not full proof. There can be cases
+ where it creates incorrect call stacks from incorrect matches.
+ The known limitations include exception handing such as
+ setjmp/longjmp will have calls/returns not match.
+
SEE ALSO
--------
linkperf:perf-record[1], linkperf:perf-script-perl[1],
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 67be8d31afab..0fc4d07864d1 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1641,6 +1641,7 @@ struct perf_script {
bool show_bpf_events;
bool allocated;
bool per_event_dump;
+ bool stitch_lbr;
struct evswitch evswitch;
struct perf_cpu_map *cpus;
struct perf_thread_map *threads;
@@ -1867,6 +1868,9 @@ static void process_event(struct perf_script *script,
if (PRINT_FIELD(IP)) {
struct callchain_cursor *cursor = NULL;
+ if (script->stitch_lbr)
+ al->thread->lbr_stitch_enable = true;
+
if (symbol_conf.use_callchain && sample->callchain &&
thread__resolve_callchain(al->thread, &callchain_cursor, evsel,
sample, NULL, NULL, scripting_max_stack) == 0)
@@ -3556,6 +3560,8 @@ int cmd_script(int argc, const char **argv)
"file", "file saving guest os /proc/kallsyms"),
OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules,
"file", "file saving guest os /proc/modules"),
+ OPT_BOOLEAN('\0', "stitch-lbr", &script.stitch_lbr,
+ "Enable LBR callgraph stitching approach"),
OPTS_EVSWITCH(&script.evswitch),
OPT_END()
};
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 11/13] perf top: Add option to enable the LBR stitching approach
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (9 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 10/13] perf script: " kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [PATCH V2 12/13] perf c2c: " kan.liang
2019-10-21 20:03 ` [RFC PATCH V2 13/13] perf hist: Add fast path for duplicate entries check kan.liang
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
With the LBR stitching approach, the reconstructed LBR call stack
can break the HW limitation. However, it may reconstruct invalid call
stacks in some cases, e.g. exception handing such as setjmp/longjmp.
Also, it may impact the processing time especially when the number of
samples with stitched LBRs are huge.
Add an option to enable the approach.
The option must be used with --call-graph lbr.
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
tools/perf/Documentation/perf-top.txt | 9 +++++++++
tools/perf/builtin-top.c | 11 +++++++++++
tools/perf/util/top.h | 1 +
3 files changed, 21 insertions(+)
diff --git a/tools/perf/Documentation/perf-top.txt b/tools/perf/Documentation/perf-top.txt
index 5596129a71cf..80b57f942a86 100644
--- a/tools/perf/Documentation/perf-top.txt
+++ b/tools/perf/Documentation/perf-top.txt
@@ -304,6 +304,15 @@ Default is to monitor all CPUS.
go straight to the histogram browser, just like 'perf top' with no events
explicitely specified does.
+--stitch-lbr::
+ Show callgraph with stitched LBRs, which may have more complete
+ callgraph. The option must be used with --call-graph lbr recording.
+ Disabled by default. In common cases with call stack overflows,
+ it can recreate better call stacks than the default lbr call stack
+ output. But this approach is not full proof. There can be cases
+ where it creates incorrect call stacks from incorrect matches.
+ The known limitations include exception handing such as
+ setjmp/longjmp will have calls/returns not match.
INTERACTIVE PROMPTING KEYS
--------------------------
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 611d03030abc..539670377e0f 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -33,6 +33,7 @@
#include "util/map.h"
#include "util/mmap.h"
#include "util/session.h"
+#include "util/thread.h"
#include "util/symbol.h"
#include "util/synthetic-events.h"
#include "util/top.h"
@@ -764,6 +765,9 @@ static void perf_event__process_sample(struct perf_tool *tool,
if (machine__resolve(machine, &al, sample) < 0)
return;
+ if (top->stitch_lbr)
+ al.thread->lbr_stitch_enable = true;
+
if (!machine->kptr_restrict_warned &&
symbol_conf.kptr_restrict &&
al.cpumode == PERF_RECORD_MISC_KERNEL) {
@@ -1537,6 +1541,8 @@ int cmd_top(int argc, const char **argv)
"number of thread to run event synthesize"),
OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
"Record namespaces events"),
+ OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
+ "Enable LBR callgraph stitching approach"),
OPTS_EVSWITCH(&top.evswitch),
OPT_END()
};
@@ -1599,6 +1605,11 @@ int cmd_top(int argc, const char **argv)
}
}
+ if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) {
+ pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n");
+ goto out_delete_evlist;
+ }
+
if (opts->branch_stack && callchain_param.enabled)
symbol_conf.show_branchflag_count = true;
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h
index f117d4f4821e..45dc84ddff37 100644
--- a/tools/perf/util/top.h
+++ b/tools/perf/util/top.h
@@ -36,6 +36,7 @@ struct perf_top {
bool use_tui, use_stdio;
bool vmlinux_warned;
bool dump_symtab;
+ bool stitch_lbr;
struct hist_entry *sym_filter_entry;
struct evsel *sym_evsel;
struct perf_session *session;
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [PATCH V2 12/13] perf c2c: Add option to enable the LBR stitching approach
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (10 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 11/13] perf top: " kan.liang
@ 2019-10-21 20:03 ` kan.liang
2019-10-21 20:03 ` [RFC PATCH V2 13/13] perf hist: Add fast path for duplicate entries check kan.liang
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
With the LBR stitching approach, the reconstructed LBR call stack
can break the HW limitation. However, it may reconstruct invalid call
stacks in some cases, e.g. exception handing such as setjmp/longjmp.
Also, it may impact the processing time especially when the number of
samples with stitched LBRs are huge.
Add an option to enable the approach.
Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
tools/perf/Documentation/perf-c2c.txt | 11 +++++++++++
tools/perf/builtin-c2c.c | 6 ++++++
2 files changed, 17 insertions(+)
diff --git a/tools/perf/Documentation/perf-c2c.txt b/tools/perf/Documentation/perf-c2c.txt
index e6150f21267d..2133eb320cb0 100644
--- a/tools/perf/Documentation/perf-c2c.txt
+++ b/tools/perf/Documentation/perf-c2c.txt
@@ -111,6 +111,17 @@ REPORT OPTIONS
--display::
Switch to HITM type (rmt, lcl) to display and sort on. Total HITMs as default.
+--stitch-lbr::
+ Show callgraph with stitched LBRs, which may have more complete
+ callgraph. The perf.data file must have been obtained using
+ perf c2c record --call-graph lbr.
+ Disabled by default. In common cases with call stack overflows,
+ it can recreate better call stacks than the default lbr call stack
+ output. But this approach is not full proof. There can be cases
+ where it creates incorrect call stacks from incorrect matches.
+ The known limitations include exception handing such as
+ setjmp/longjmp will have calls/returns not match.
+
C2C RECORD
----------
The perf c2c record command setup options related to HITM cacheline analysis
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 3542b6ab9813..c3658986c38a 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -95,6 +95,7 @@ struct perf_c2c {
bool use_stdio;
bool stats_only;
bool symbol_full;
+ bool stitch_lbr;
/* HITM shared clines stats */
struct c2c_stats hitm_stats;
@@ -273,6 +274,9 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
return -1;
}
+ if (c2c.stitch_lbr)
+ al.thread->lbr_stitch_enable = true;
+
ret = sample__resolve_callchain(sample, &callchain_cursor, NULL,
evsel, &al, sysctl_perf_event_max_stack);
if (ret)
@@ -2746,6 +2750,8 @@ static int perf_c2c__report(int argc, const char **argv)
OPT_STRING('c', "coalesce", &coalesce, "coalesce fields",
"coalesce fields: pid,tid,iaddr,dso"),
OPT_BOOLEAN('f', "force", &symbol_conf.force, "don't complain, do it"),
+ OPT_BOOLEAN(0, "stitch-lbr", &c2c.stitch_lbr,
+ "Enable LBR callgraph stitching approach"),
OPT_PARENT(c2c_options),
OPT_END()
};
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread
* [RFC PATCH V2 13/13] perf hist: Add fast path for duplicate entries check
2019-10-21 20:03 [PATCH V2 00/13] Stitch LBR call stack kan.liang
` (11 preceding siblings ...)
2019-10-21 20:03 ` [PATCH V2 12/13] perf c2c: " kan.liang
@ 2019-10-21 20:03 ` kan.liang
12 siblings, 0 replies; 16+ messages in thread
From: kan.liang @ 2019-10-21 20:03 UTC (permalink / raw)
To: peterz, acme, mingo, linux-kernel
Cc: jolsa, namhyung, vitaly.slobodskoy, pavel.gerasimov, ak, eranian,
Kan Liang
From: Kan Liang <kan.liang@linux.intel.com>
Perf checks the duplicate entries in a callchain before adding an entry.
However the check is very slow especially with deeper call stack.
Almost ~50% elapsed time of perf report is spent on the check when the
call stack is always depth of 32.
The hist_entry__cmp() is used to compare the new entry with the old
entries. It will go through all the available sorts in the sort_list,
and call the specific cmp of each sort, which is very slow.
Actually, for most cases, there are no duplicate entries in callchain.
The symbols are usually different. It's much faster to do a quick check
for symbols first. Only do the full cmp when the symbols are exactly the
same.
The quick check is only to check symbols, not dso. Export
_sort__sym_cmp.
$perf record --call-graph lbr ./tchain_edit_64
Without the patch
$time perf report --stdio
real 0m21.142s
user 0m21.110s
sys 0m0.033s
With the patch
$time perf report --stdio
real 0m10.977s
user 0m10.948s
sys 0m0.027s
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
---
Other solution:
I'm not sure if the full check, hist_entry__cmp(), is ever needed here.
Can we just check the sym and dso for duplicate entries?
Using sort__sym_cmp() to replace hist_entry__cmp() may be an alternative
solution.
tools/perf/util/hist.c | 23 +++++++++++++++++++++++
tools/perf/util/sort.c | 2 +-
tools/perf/util/sort.h | 2 ++
3 files changed, 26 insertions(+), 1 deletion(-)
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 679a1d75090c..94044b8f1b61 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1047,6 +1047,20 @@ iter_next_cumulative_entry(struct hist_entry_iter *iter,
return fill_callchain_info(al, node, iter->hide_unresolved);
}
+static bool
+hist_entry__fast__sym_diff(struct hist_entry *left,
+ struct hist_entry *right)
+{
+ struct symbol *sym_l = left->ms.sym;
+ struct symbol *sym_r = right->ms.sym;
+
+ if (!sym_l && !sym_r)
+ return left->ip != right->ip;
+
+ return !!_sort__sym_cmp(sym_l, sym_r);
+}
+
+
static int
iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al)
@@ -1072,6 +1086,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
};
int i;
struct callchain_cursor cursor;
+ bool fast = hists__has(he_tmp.hists, sym);
callchain_cursor_snapshot(&cursor, &callchain_cursor);
@@ -1082,6 +1097,14 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
* It's possible that it has cycles or recursive calls.
*/
for (i = 0; i < iter->curr; i++) {
+ /*
+ * For most cases, there are no duplicate entries in callchain.
+ * The symbols are usually different. Do a quick check for
+ * symbols first.
+ */
+ if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
+ continue;
+
if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
/* to avoid calling callback function */
iter->he = NULL;
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 43d1d410854a..8ccf4e44aa90 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -234,7 +234,7 @@ static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
return (int64_t)(right_ip - left_ip);
}
-static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
+int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
{
if (!sym_l || !sym_r)
return cmp_null(sym_l, sym_r);
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 5aff9542d9b7..d608b8a28a92 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -307,5 +307,7 @@ int64_t
sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right);
int64_t
sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right);
+int64_t
+_sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r);
char *hist_entry__srcline(struct hist_entry *he);
#endif /* __PERF_SORT_H */
--
2.17.1
^ permalink raw reply related [flat|nested] 16+ messages in thread