* x86/perf: Improve sysfs enumeration for cpu pmu @ 2017-08-22 18:51 Andi Kleen 2017-08-22 18:52 ` [PATCH v1 1/2] x86/perf: Only show format attributes when supported Andi Kleen 2017-08-22 18:52 ` [PATCH v1 2/2] x86/perf: Export some PMU attributes in caps Andi Kleen 0 siblings, 2 replies; 9+ messages in thread From: Andi Kleen @ 2017-08-22 18:51 UTC (permalink / raw) To: peterz; +Cc: linux-kernel Some improvements for the sysfs enumeration for the cpu pmu to make it easier for user programs to discover what is supported. -Andi ^ permalink raw reply [flat|nested] 9+ messages in thread
* [PATCH v1 1/2] x86/perf: Only show format attributes when supported 2017-08-22 18:51 x86/perf: Improve sysfs enumeration for cpu pmu Andi Kleen @ 2017-08-22 18:52 ` Andi Kleen 2017-08-25 11:54 ` [tip:perf/core] perf/x86: " tip-bot for Andi Kleen 2017-08-22 18:52 ` [PATCH v1 2/2] x86/perf: Export some PMU attributes in caps Andi Kleen 1 sibling, 1 reply; 9+ messages in thread From: Andi Kleen @ 2017-08-22 18:52 UTC (permalink / raw) To: peterz; +Cc: linux-kernel, Andi Kleen From: Andi Kleen <ak@linux.intel.com> Only show the Intel format attributes in sysfs when the feature is actually supported with the current model numbers. This allows programs to probe what format attributes are available, and give a sensible error message to users if they are not. This handles near all cases for intel attributes since Nehalem, except the (obscure) case when the model number if known, but PEBS is disabled in PERF_CAPABILITIES. Signed-off-by: Andi Kleen <ak@linux.intel.com> --- arch/x86/events/intel/core.c | 48 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 98b0f0729527..82faeed30135 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3415,12 +3415,26 @@ static struct attribute *intel_arch3_formats_attr[] = { &format_attr_any.attr, &format_attr_inv.attr, &format_attr_cmask.attr, + NULL, +}; + +static struct attribute *hsw_format_attr[] = { &format_attr_in_tx.attr, &format_attr_in_tx_cp.attr, + &format_attr_offcore_rsp.attr, + &format_attr_ldlat.attr, + NULL +}; - &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */ - &format_attr_ldlat.attr, /* PEBS load latency */ - NULL, +static struct attribute *nhm_format_attr[] = { + &format_attr_offcore_rsp.attr, + &format_attr_ldlat.attr, + NULL +}; + +static struct attribute *slm_format_attr[] = { + &format_attr_offcore_rsp.attr, + NULL }; static struct attribute *skl_format_attr[] = { @@ -3795,6 +3809,7 @@ __init int intel_pmu_init(void) unsigned int unused; struct extra_reg *er; int version, i; + struct attribute **extra_attr = NULL; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { switch (boot_cpu_data.x86) { @@ -3905,6 +3920,7 @@ __init int intel_pmu_init(void) intel_pmu_pebs_data_source_nhm(); x86_add_quirk(intel_nehalem_quirk); + extra_attr = nhm_format_attr; pr_cont("Nehalem events, "); break; @@ -3940,6 +3956,7 @@ __init int intel_pmu_init(void) x86_pmu.extra_regs = intel_slm_extra_regs; x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.cpu_events = slm_events_attrs; + extra_attr = slm_format_attr; pr_cont("Silvermont events, "); break; @@ -3965,6 +3982,7 @@ __init int intel_pmu_init(void) x86_pmu.lbr_pt_coexist = true; x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.cpu_events = glm_events_attrs; + extra_attr = slm_format_attr; pr_cont("Goldmont events, "); break; @@ -3991,6 +4009,7 @@ __init int intel_pmu_init(void) x86_pmu.cpu_events = glm_events_attrs; /* Goldmont Plus has 4-wide pipeline */ event_attr_td_total_slots_scale_glm.event_str = "4"; + extra_attr = slm_format_attr; pr_cont("Goldmont plus events, "); break; @@ -4020,6 +4039,7 @@ __init int intel_pmu_init(void) X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); intel_pmu_pebs_data_source_nhm(); + extra_attr = nhm_format_attr; pr_cont("Westmere events, "); break; @@ -4056,6 +4076,8 @@ __init int intel_pmu_init(void) intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); + extra_attr = nhm_format_attr; + pr_cont("SandyBridge events, "); break; @@ -4090,6 +4112,8 @@ __init int intel_pmu_init(void) intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); + extra_attr = nhm_format_attr; + pr_cont("IvyBridge events, "); break; @@ -4118,6 +4142,8 @@ __init int intel_pmu_init(void) x86_pmu.get_event_constraints = hsw_get_event_constraints; x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.lbr_double_abort = true; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + hsw_format_attr : nhm_format_attr; pr_cont("Haswell events, "); break; @@ -4154,6 +4180,8 @@ __init int intel_pmu_init(void) x86_pmu.get_event_constraints = hsw_get_event_constraints; x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.limit_period = bdw_limit_period; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + hsw_format_attr : nhm_format_attr; pr_cont("Broadwell events, "); break; @@ -4172,7 +4200,7 @@ __init int intel_pmu_init(void) /* all extra regs are per-cpu when HT is on */ x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - + extra_attr = slm_format_attr; pr_cont("Knights Landing/Mill events, "); break; @@ -4203,9 +4231,9 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, - skl_format_attr); - WARN_ON(!x86_pmu.format_attrs); + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + hsw_format_attr : nhm_format_attr; + extra_attr = merge_attr(extra_attr, skl_format_attr); x86_pmu.cpu_events = hsw_events_attrs; pr_cont("Skylake events, "); break; @@ -4226,6 +4254,12 @@ __init int intel_pmu_init(void) } } + if (version >= 2 && extra_attr) { + x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, + extra_attr); + WARN_ON(!x86_pmu.format_attrs); + } + if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); -- 2.9.4 ^ permalink raw reply related [flat|nested] 9+ messages in thread
* [tip:perf/core] perf/x86: Only show format attributes when supported 2017-08-22 18:52 ` [PATCH v1 1/2] x86/perf: Only show format attributes when supported Andi Kleen @ 2017-08-25 11:54 ` tip-bot for Andi Kleen 0 siblings, 0 replies; 9+ messages in thread From: tip-bot for Andi Kleen @ 2017-08-25 11:54 UTC (permalink / raw) To: linux-tip-commits; +Cc: tglx, torvalds, linux-kernel, hpa, ak, mingo, peterz Commit-ID: a5df70c354c26e20d5fd8eb64517f724e97ef0b2 Gitweb: http://git.kernel.org/tip/a5df70c354c26e20d5fd8eb64517f724e97ef0b2 Author: Andi Kleen <ak@linux.intel.com> AuthorDate: Tue, 22 Aug 2017 11:52:00 -0700 Committer: Ingo Molnar <mingo@kernel.org> CommitDate: Fri, 25 Aug 2017 11:04:18 +0200 perf/x86: Only show format attributes when supported Only show the Intel format attributes in sysfs when the feature is actually supported with the current model numbers. This allows programs to probe what format attributes are available, and give a sensible error message to users if they are not. This handles near all cases for intel attributes since Nehalem, except the (obscure) case when the model number if known, but PEBS is disabled in PERF_CAPABILITIES. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170822185201.9261-2-andi@firstfloor.org Signed-off-by: Ingo Molnar <mingo@kernel.org> --- arch/x86/events/intel/core.c | 48 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 41 insertions(+), 7 deletions(-) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6f34200..b00f135 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3415,12 +3415,26 @@ static struct attribute *intel_arch3_formats_attr[] = { &format_attr_any.attr, &format_attr_inv.attr, &format_attr_cmask.attr, + NULL, +}; + +static struct attribute *hsw_format_attr[] = { &format_attr_in_tx.attr, &format_attr_in_tx_cp.attr, + &format_attr_offcore_rsp.attr, + &format_attr_ldlat.attr, + NULL +}; - &format_attr_offcore_rsp.attr, /* XXX do NHM/WSM + SNB breakout */ - &format_attr_ldlat.attr, /* PEBS load latency */ - NULL, +static struct attribute *nhm_format_attr[] = { + &format_attr_offcore_rsp.attr, + &format_attr_ldlat.attr, + NULL +}; + +static struct attribute *slm_format_attr[] = { + &format_attr_offcore_rsp.attr, + NULL }; static struct attribute *skl_format_attr[] = { @@ -3795,6 +3809,7 @@ __init int intel_pmu_init(void) unsigned int unused; struct extra_reg *er; int version, i; + struct attribute **extra_attr = NULL; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { switch (boot_cpu_data.x86) { @@ -3906,6 +3921,7 @@ __init int intel_pmu_init(void) intel_pmu_pebs_data_source_nhm(); x86_add_quirk(intel_nehalem_quirk); x86_pmu.pebs_no_tlb = 1; + extra_attr = nhm_format_attr; pr_cont("Nehalem events, "); break; @@ -3941,6 +3957,7 @@ __init int intel_pmu_init(void) x86_pmu.extra_regs = intel_slm_extra_regs; x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.cpu_events = slm_events_attrs; + extra_attr = slm_format_attr; pr_cont("Silvermont events, "); break; @@ -3966,6 +3983,7 @@ __init int intel_pmu_init(void) x86_pmu.lbr_pt_coexist = true; x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.cpu_events = glm_events_attrs; + extra_attr = slm_format_attr; pr_cont("Goldmont events, "); break; @@ -3992,6 +4010,7 @@ __init int intel_pmu_init(void) x86_pmu.cpu_events = glm_events_attrs; /* Goldmont Plus has 4-wide pipeline */ event_attr_td_total_slots_scale_glm.event_str = "4"; + extra_attr = slm_format_attr; pr_cont("Goldmont plus events, "); break; @@ -4021,6 +4040,7 @@ __init int intel_pmu_init(void) X86_CONFIG(.event=0xb1, .umask=0x3f, .inv=1, .cmask=1); intel_pmu_pebs_data_source_nhm(); + extra_attr = nhm_format_attr; pr_cont("Westmere events, "); break; @@ -4057,6 +4077,8 @@ __init int intel_pmu_init(void) intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = X86_CONFIG(.event=0xb1, .umask=0x01, .inv=1, .cmask=1); + extra_attr = nhm_format_attr; + pr_cont("SandyBridge events, "); break; @@ -4091,6 +4113,8 @@ __init int intel_pmu_init(void) intel_perfmon_event_map[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = X86_CONFIG(.event=0x0e, .umask=0x01, .inv=1, .cmask=1); + extra_attr = nhm_format_attr; + pr_cont("IvyBridge events, "); break; @@ -4119,6 +4143,8 @@ __init int intel_pmu_init(void) x86_pmu.get_event_constraints = hsw_get_event_constraints; x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.lbr_double_abort = true; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + hsw_format_attr : nhm_format_attr; pr_cont("Haswell events, "); break; @@ -4155,6 +4181,8 @@ __init int intel_pmu_init(void) x86_pmu.get_event_constraints = hsw_get_event_constraints; x86_pmu.cpu_events = hsw_events_attrs; x86_pmu.limit_period = bdw_limit_period; + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + hsw_format_attr : nhm_format_attr; pr_cont("Broadwell events, "); break; @@ -4173,7 +4201,7 @@ __init int intel_pmu_init(void) /* all extra regs are per-cpu when HT is on */ x86_pmu.flags |= PMU_FL_HAS_RSP_1; x86_pmu.flags |= PMU_FL_NO_HT_SHARING; - + extra_attr = slm_format_attr; pr_cont("Knights Landing/Mill events, "); break; @@ -4204,9 +4232,9 @@ __init int intel_pmu_init(void) x86_pmu.hw_config = hsw_hw_config; x86_pmu.get_event_constraints = hsw_get_event_constraints; - x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, - skl_format_attr); - WARN_ON(!x86_pmu.format_attrs); + extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? + hsw_format_attr : nhm_format_attr; + extra_attr = merge_attr(extra_attr, skl_format_attr); x86_pmu.cpu_events = hsw_events_attrs; intel_pmu_pebs_data_source_skl( boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); @@ -4229,6 +4257,12 @@ __init int intel_pmu_init(void) } } + if (version >= 2 && extra_attr) { + x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, + extra_attr); + WARN_ON(!x86_pmu.format_attrs); + } + if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!", x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC); ^ permalink raw reply related [flat|nested] 9+ messages in thread
* [PATCH v1 2/2] x86/perf: Export some PMU attributes in caps 2017-08-22 18:51 x86/perf: Improve sysfs enumeration for cpu pmu Andi Kleen 2017-08-22 18:52 ` [PATCH v1 1/2] x86/perf: Only show format attributes when supported Andi Kleen @ 2017-08-22 18:52 ` Andi Kleen 2017-08-25 11:55 ` [tip:perf/core] perf/x86: Export some PMU attributes in caps/ directory tip-bot for Andi Kleen 1 sibling, 1 reply; 9+ messages in thread From: Andi Kleen @ 2017-08-22 18:52 UTC (permalink / raw) To: peterz; +Cc: linux-kernel, Andi Kleen From: Andi Kleen <ak@linux.intel.com> It can be difficult to figure out for user programs what features the x86 cpu pmu driver actually supports. Currently it requires grepping in dmesg, but dmesg is not always available. This adds a caps directory to /sys/devices/cpu, similar to the caps already used on intel_pt, which can be used to discover the available capabilities cleanly. Currently three capabilities are defined: - pmu_name Underlying CPU name known to the driver - max_precise Max precise level supported - branches Known depth of LBR. Example: % grep . /sys/devices/cpu/caps/* /sys/devices/cpu/caps/branches:32 /sys/devices/cpu/caps/max_precise:3 /sys/devices/cpu/caps/pmu_name:skylake % Signed-off-by: Andi Kleen <ak@linux.intel.com> --- arch/x86/events/core.c | 34 +++++++++++++++-------- arch/x86/events/intel/core.c | 66 +++++++++++++++++++++++++++++++++++++++++++- arch/x86/events/perf_event.h | 3 ++ 3 files changed, 91 insertions(+), 12 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index af12e294caed..d5f98095a155 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -487,22 +487,28 @@ static inline int precise_br_compat(struct perf_event *event) return m == b; } -int x86_pmu_hw_config(struct perf_event *event) +int x86_pmu_max_precise(void) { - if (event->attr.precise_ip) { - int precise = 0; + int precise = 0; + + /* Support for constant skid */ + if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { + precise++; - /* Support for constant skid */ - if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { + /* Support for IP fixup */ + if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) precise++; - /* Support for IP fixup */ - if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) - precise++; + if (x86_pmu.pebs_prec_dist) + precise++; + } + return precise; +} - if (x86_pmu.pebs_prec_dist) - precise++; - } +int x86_pmu_hw_config(struct perf_event *event) +{ + if (event->attr.precise_ip) { + int precise = x86_pmu_max_precise(); if (event->attr.precise_ip > precise) return -EOPNOTSUPP; @@ -1752,6 +1758,10 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) static struct attribute_group x86_pmu_attr_group; +static struct attribute_group x86_pmu_caps_group = { + .name = "caps", +}; + static int __init init_hw_perf_events(void) { struct x86_pmu_quirk *quirk; @@ -1798,6 +1808,7 @@ static int __init init_hw_perf_events(void) 0, x86_pmu.num_counters, 0, 0); x86_pmu_format_group.attrs = x86_pmu.format_attrs; + x86_pmu_caps_group.attrs = x86_pmu.caps_attrs; if (x86_pmu.event_attrs) x86_pmu_events_group.attrs = x86_pmu.event_attrs; @@ -2217,6 +2228,7 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { &x86_pmu_attr_group, &x86_pmu_format_group, &x86_pmu_events_group, + &x86_pmu_caps_group, NULL, }; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 82faeed30135..a46bf78c0105 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3795,6 +3795,46 @@ static ssize_t freeze_on_smi_store(struct device *cdev, static DEVICE_ATTR_RW(freeze_on_smi); +static ssize_t branches_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); +} + +static DEVICE_ATTR_RO(branches); + +static struct attribute *lbr_attrs[] = { + &dev_attr_branches.attr, + NULL +}; + +static char pmu_name_str[30]; + +static ssize_t pmu_name_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str); +} + +static DEVICE_ATTR_RO(pmu_name); + +static ssize_t max_precise_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); +} + +static DEVICE_ATTR_RO(max_precise); + +static struct attribute *intel_pmu_caps_attrs[] = { + &dev_attr_pmu_name.attr, + &dev_attr_max_precise.attr, + NULL +}; + static struct attribute *intel_pmu_attrs[] = { &dev_attr_freeze_on_smi.attr, NULL, @@ -3810,6 +3850,7 @@ __init int intel_pmu_init(void) struct extra_reg *er; int version, i; struct attribute **extra_attr = NULL; + char *name; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { switch (boot_cpu_data.x86) { @@ -3877,6 +3918,7 @@ __init int intel_pmu_init(void) switch (boot_cpu_data.x86_model) { case INTEL_FAM6_CORE_YONAH: pr_cont("Core events, "); + name = "core"; break; case INTEL_FAM6_CORE2_MEROM: @@ -3892,6 +3934,7 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_core2_event_constraints; x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; pr_cont("Core2 events, "); + name = "core2"; break; case INTEL_FAM6_NEHALEM: @@ -3923,6 +3966,7 @@ __init int intel_pmu_init(void) extra_attr = nhm_format_attr; pr_cont("Nehalem events, "); + name = "nehalem"; break; case INTEL_FAM6_ATOM_PINEVIEW: @@ -3939,6 +3983,7 @@ __init int intel_pmu_init(void) x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_core2; pr_cont("Atom events, "); + name = "bonnell"; break; case INTEL_FAM6_ATOM_SILVERMONT1: @@ -3958,6 +4003,7 @@ __init int intel_pmu_init(void) x86_pmu.cpu_events = slm_events_attrs; extra_attr = slm_format_attr; pr_cont("Silvermont events, "); + name = "silvermont"; break; case INTEL_FAM6_ATOM_GOLDMONT: @@ -3984,6 +4030,7 @@ __init int intel_pmu_init(void) x86_pmu.cpu_events = glm_events_attrs; extra_attr = slm_format_attr; pr_cont("Goldmont events, "); + name = "goldmont"; break; case INTEL_FAM6_ATOM_GEMINI_LAKE: @@ -4011,6 +4058,7 @@ __init int intel_pmu_init(void) event_attr_td_total_slots_scale_glm.event_str = "4"; extra_attr = slm_format_attr; pr_cont("Goldmont plus events, "); + name = "goldmont_plus"; break; case INTEL_FAM6_WESTMERE: @@ -4041,6 +4089,7 @@ __init int intel_pmu_init(void) intel_pmu_pebs_data_source_nhm(); extra_attr = nhm_format_attr; pr_cont("Westmere events, "); + name = "westmere"; break; case INTEL_FAM6_SANDYBRIDGE: @@ -4079,6 +4128,7 @@ __init int intel_pmu_init(void) extra_attr = nhm_format_attr; pr_cont("SandyBridge events, "); + name = "sandybridge"; break; case INTEL_FAM6_IVYBRIDGE: @@ -4115,6 +4165,7 @@ __init int intel_pmu_init(void) extra_attr = nhm_format_attr; pr_cont("IvyBridge events, "); + name = "ivybridge"; break; @@ -4145,6 +4196,7 @@ __init int intel_pmu_init(void) extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; pr_cont("Haswell events, "); + name = "haswell"; break; case INTEL_FAM6_BROADWELL_CORE: @@ -4183,6 +4235,7 @@ __init int intel_pmu_init(void) extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; pr_cont("Broadwell events, "); + name = "broadwell"; break; case INTEL_FAM6_XEON_PHI_KNL: @@ -4202,6 +4255,7 @@ __init int intel_pmu_init(void) x86_pmu.flags |= PMU_FL_NO_HT_SHARING; extra_attr = slm_format_attr; pr_cont("Knights Landing/Mill events, "); + name = "knights-landing"; break; case INTEL_FAM6_SKYLAKE_MOBILE: @@ -4236,6 +4290,7 @@ __init int intel_pmu_init(void) extra_attr = merge_attr(extra_attr, skl_format_attr); x86_pmu.cpu_events = hsw_events_attrs; pr_cont("Skylake events, "); + name = "skylake"; break; default: @@ -4243,6 +4298,7 @@ __init int intel_pmu_init(void) case 1: x86_pmu.event_constraints = intel_v1_event_constraints; pr_cont("generic architected perfmon v1, "); + name = "generic_arch_v1"; break; default: /* @@ -4250,10 +4306,13 @@ __init int intel_pmu_init(void) */ x86_pmu.event_constraints = intel_gen_event_constraints; pr_cont("generic architected perfmon, "); + name = "generic_arch_v2+"; break; } } + snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name); + if (version >= 2 && extra_attr) { x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, extra_attr); @@ -4306,8 +4365,13 @@ __init int intel_pmu_init(void) x86_pmu.lbr_nr = 0; } - if (x86_pmu.lbr_nr) + x86_pmu.caps_attrs = intel_pmu_caps_attrs; + + if (x86_pmu.lbr_nr) { + x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs); pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); + } + /* * Access extra MSR may cause #GP under certain circumstances. * E.g. KVM doesn't support offcore event diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 476aec3a4cab..e3d38b1cda56 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -558,6 +558,7 @@ struct x86_pmu { int attr_rdpmc; struct attribute **format_attrs; struct attribute **event_attrs; + struct attribute **caps_attrs; ssize_t (*events_sysfs_show)(char *page, u64 config); struct attribute **cpu_events; @@ -741,6 +742,8 @@ int x86_reserve_hardware(void); void x86_release_hardware(void); +int x86_pmu_max_precise(void); + void hw_perf_lbr_event_destroy(struct perf_event *event); int x86_setup_perfctr(struct perf_event *event); -- 2.9.4 ^ permalink raw reply related [flat|nested] 9+ messages in thread
* [tip:perf/core] perf/x86: Export some PMU attributes in caps/ directory 2017-08-22 18:52 ` [PATCH v1 2/2] x86/perf: Export some PMU attributes in caps Andi Kleen @ 2017-08-25 11:55 ` tip-bot for Andi Kleen 2017-08-28 10:46 ` Peter Zijlstra 0 siblings, 1 reply; 9+ messages in thread From: tip-bot for Andi Kleen @ 2017-08-25 11:55 UTC (permalink / raw) To: linux-tip-commits; +Cc: torvalds, peterz, tglx, ak, hpa, linux-kernel, mingo Commit-ID: b00233b5306512a09e339d69ef5e390a77f2d302 Gitweb: http://git.kernel.org/tip/b00233b5306512a09e339d69ef5e390a77f2d302 Author: Andi Kleen <ak@linux.intel.com> AuthorDate: Tue, 22 Aug 2017 11:52:01 -0700 Committer: Ingo Molnar <mingo@kernel.org> CommitDate: Fri, 25 Aug 2017 11:04:20 +0200 perf/x86: Export some PMU attributes in caps/ directory It can be difficult to figure out for user programs what features the x86 CPU PMU driver actually supports. Currently it requires grepping in dmesg, but dmesg is not always available. This adds a caps directory to /sys/bus/event_source/devices/cpu/, similar to the caps already used on intel_pt, which can be used to discover the available capabilities cleanly. Three capabilities are defined: - pmu_name: Underlying CPU name known to the driver - max_precise: Max precise level supported - branches: Known depth of LBR. Example: % grep . /sys/bus/event_source/devices/cpu/caps/* /sys/bus/event_source/devices/cpu/caps/branches:32 /sys/bus/event_source/devices/cpu/caps/max_precise:3 /sys/bus/event_source/devices/cpu/caps/pmu_name:skylake Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/20170822185201.9261-3-andi@firstfloor.org Signed-off-by: Ingo Molnar <mingo@kernel.org> --- arch/x86/events/core.c | 34 +++++++++++++++-------- arch/x86/events/intel/core.c | 66 +++++++++++++++++++++++++++++++++++++++++++- arch/x86/events/perf_event.h | 3 ++ 3 files changed, 91 insertions(+), 12 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index af12e29..d5f9809 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -487,22 +487,28 @@ static inline int precise_br_compat(struct perf_event *event) return m == b; } -int x86_pmu_hw_config(struct perf_event *event) +int x86_pmu_max_precise(void) { - if (event->attr.precise_ip) { - int precise = 0; + int precise = 0; + + /* Support for constant skid */ + if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { + precise++; - /* Support for constant skid */ - if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) { + /* Support for IP fixup */ + if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) precise++; - /* Support for IP fixup */ - if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2) - precise++; + if (x86_pmu.pebs_prec_dist) + precise++; + } + return precise; +} - if (x86_pmu.pebs_prec_dist) - precise++; - } +int x86_pmu_hw_config(struct perf_event *event) +{ + if (event->attr.precise_ip) { + int precise = x86_pmu_max_precise(); if (event->attr.precise_ip > precise) return -EOPNOTSUPP; @@ -1752,6 +1758,10 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) static struct attribute_group x86_pmu_attr_group; +static struct attribute_group x86_pmu_caps_group = { + .name = "caps", +}; + static int __init init_hw_perf_events(void) { struct x86_pmu_quirk *quirk; @@ -1798,6 +1808,7 @@ static int __init init_hw_perf_events(void) 0, x86_pmu.num_counters, 0, 0); x86_pmu_format_group.attrs = x86_pmu.format_attrs; + x86_pmu_caps_group.attrs = x86_pmu.caps_attrs; if (x86_pmu.event_attrs) x86_pmu_events_group.attrs = x86_pmu.event_attrs; @@ -2217,6 +2228,7 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { &x86_pmu_attr_group, &x86_pmu_format_group, &x86_pmu_events_group, + &x86_pmu_caps_group, NULL, }; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index b00f135..8fa2abd 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3795,6 +3795,46 @@ done: static DEVICE_ATTR_RW(freeze_on_smi); +static ssize_t branches_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu.lbr_nr); +} + +static DEVICE_ATTR_RO(branches); + +static struct attribute *lbr_attrs[] = { + &dev_attr_branches.attr, + NULL +}; + +static char pmu_name_str[30]; + +static ssize_t pmu_name_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%s\n", pmu_name_str); +} + +static DEVICE_ATTR_RO(pmu_name); + +static ssize_t max_precise_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); +} + +static DEVICE_ATTR_RO(max_precise); + +static struct attribute *intel_pmu_caps_attrs[] = { + &dev_attr_pmu_name.attr, + &dev_attr_max_precise.attr, + NULL +}; + static struct attribute *intel_pmu_attrs[] = { &dev_attr_freeze_on_smi.attr, NULL, @@ -3810,6 +3850,7 @@ __init int intel_pmu_init(void) struct extra_reg *er; int version, i; struct attribute **extra_attr = NULL; + char *name; if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { switch (boot_cpu_data.x86) { @@ -3877,6 +3918,7 @@ __init int intel_pmu_init(void) switch (boot_cpu_data.x86_model) { case INTEL_FAM6_CORE_YONAH: pr_cont("Core events, "); + name = "core"; break; case INTEL_FAM6_CORE2_MEROM: @@ -3892,6 +3934,7 @@ __init int intel_pmu_init(void) x86_pmu.event_constraints = intel_core2_event_constraints; x86_pmu.pebs_constraints = intel_core2_pebs_event_constraints; pr_cont("Core2 events, "); + name = "core2"; break; case INTEL_FAM6_NEHALEM: @@ -3924,6 +3967,7 @@ __init int intel_pmu_init(void) extra_attr = nhm_format_attr; pr_cont("Nehalem events, "); + name = "nehalem"; break; case INTEL_FAM6_ATOM_PINEVIEW: @@ -3940,6 +3984,7 @@ __init int intel_pmu_init(void) x86_pmu.pebs_constraints = intel_atom_pebs_event_constraints; x86_pmu.pebs_aliases = intel_pebs_aliases_core2; pr_cont("Atom events, "); + name = "bonnell"; break; case INTEL_FAM6_ATOM_SILVERMONT1: @@ -3959,6 +4004,7 @@ __init int intel_pmu_init(void) x86_pmu.cpu_events = slm_events_attrs; extra_attr = slm_format_attr; pr_cont("Silvermont events, "); + name = "silvermont"; break; case INTEL_FAM6_ATOM_GOLDMONT: @@ -3985,6 +4031,7 @@ __init int intel_pmu_init(void) x86_pmu.cpu_events = glm_events_attrs; extra_attr = slm_format_attr; pr_cont("Goldmont events, "); + name = "goldmont"; break; case INTEL_FAM6_ATOM_GEMINI_LAKE: @@ -4012,6 +4059,7 @@ __init int intel_pmu_init(void) event_attr_td_total_slots_scale_glm.event_str = "4"; extra_attr = slm_format_attr; pr_cont("Goldmont plus events, "); + name = "goldmont_plus"; break; case INTEL_FAM6_WESTMERE: @@ -4042,6 +4090,7 @@ __init int intel_pmu_init(void) intel_pmu_pebs_data_source_nhm(); extra_attr = nhm_format_attr; pr_cont("Westmere events, "); + name = "westmere"; break; case INTEL_FAM6_SANDYBRIDGE: @@ -4080,6 +4129,7 @@ __init int intel_pmu_init(void) extra_attr = nhm_format_attr; pr_cont("SandyBridge events, "); + name = "sandybridge"; break; case INTEL_FAM6_IVYBRIDGE: @@ -4116,6 +4166,7 @@ __init int intel_pmu_init(void) extra_attr = nhm_format_attr; pr_cont("IvyBridge events, "); + name = "ivybridge"; break; @@ -4146,6 +4197,7 @@ __init int intel_pmu_init(void) extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; pr_cont("Haswell events, "); + name = "haswell"; break; case INTEL_FAM6_BROADWELL_CORE: @@ -4184,6 +4236,7 @@ __init int intel_pmu_init(void) extra_attr = boot_cpu_has(X86_FEATURE_RTM) ? hsw_format_attr : nhm_format_attr; pr_cont("Broadwell events, "); + name = "broadwell"; break; case INTEL_FAM6_XEON_PHI_KNL: @@ -4203,6 +4256,7 @@ __init int intel_pmu_init(void) x86_pmu.flags |= PMU_FL_NO_HT_SHARING; extra_attr = slm_format_attr; pr_cont("Knights Landing/Mill events, "); + name = "knights-landing"; break; case INTEL_FAM6_SKYLAKE_MOBILE: @@ -4239,6 +4293,7 @@ __init int intel_pmu_init(void) intel_pmu_pebs_data_source_skl( boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X); pr_cont("Skylake events, "); + name = "skylake"; break; default: @@ -4246,6 +4301,7 @@ __init int intel_pmu_init(void) case 1: x86_pmu.event_constraints = intel_v1_event_constraints; pr_cont("generic architected perfmon v1, "); + name = "generic_arch_v1"; break; default: /* @@ -4253,10 +4309,13 @@ __init int intel_pmu_init(void) */ x86_pmu.event_constraints = intel_gen_event_constraints; pr_cont("generic architected perfmon, "); + name = "generic_arch_v2+"; break; } } + snprintf(pmu_name_str, sizeof pmu_name_str, "%s", name); + if (version >= 2 && extra_attr) { x86_pmu.format_attrs = merge_attr(intel_arch3_formats_attr, extra_attr); @@ -4309,8 +4368,13 @@ __init int intel_pmu_init(void) x86_pmu.lbr_nr = 0; } - if (x86_pmu.lbr_nr) + x86_pmu.caps_attrs = intel_pmu_caps_attrs; + + if (x86_pmu.lbr_nr) { + x86_pmu.caps_attrs = merge_attr(x86_pmu.caps_attrs, lbr_attrs); pr_cont("%d-deep LBR, ", x86_pmu.lbr_nr); + } + /* * Access extra MSR may cause #GP under certain circumstances. * E.g. KVM doesn't support offcore event diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 0f7dad8..9337589 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -558,6 +558,7 @@ struct x86_pmu { int attr_rdpmc; struct attribute **format_attrs; struct attribute **event_attrs; + struct attribute **caps_attrs; ssize_t (*events_sysfs_show)(char *page, u64 config); struct attribute **cpu_events; @@ -742,6 +743,8 @@ int x86_reserve_hardware(void); void x86_release_hardware(void); +int x86_pmu_max_precise(void); + void hw_perf_lbr_event_destroy(struct perf_event *event); int x86_setup_perfctr(struct perf_event *event); ^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [tip:perf/core] perf/x86: Export some PMU attributes in caps/ directory 2017-08-25 11:55 ` [tip:perf/core] perf/x86: Export some PMU attributes in caps/ directory tip-bot for Andi Kleen @ 2017-08-28 10:46 ` Peter Zijlstra 2017-08-28 11:01 ` Borislav Petkov ` (2 more replies) 0 siblings, 3 replies; 9+ messages in thread From: Peter Zijlstra @ 2017-08-28 10:46 UTC (permalink / raw) To: mingo, linux-kernel, hpa, tglx, ak, torvalds Cc: linux-tip-commits, Borislav Petkov On Fri, Aug 25, 2017 at 04:55:03AM -0700, tip-bot for Andi Kleen wrote: > @@ -1798,6 +1808,7 @@ static int __init init_hw_perf_events(void) > 0, x86_pmu.num_counters, 0, 0); > > x86_pmu_format_group.attrs = x86_pmu.format_attrs; > + x86_pmu_caps_group.attrs = x86_pmu.caps_attrs; > > if (x86_pmu.event_attrs) > x86_pmu_events_group.attrs = x86_pmu.event_attrs; > @@ -2217,6 +2228,7 @@ static const struct attribute_group *x86_pmu_attr_groups[] = { > &x86_pmu_attr_group, > &x86_pmu_format_group, > &x86_pmu_events_group, > + &x86_pmu_caps_group, > NULL, > }; This generates: [ 1.421821] ------------[ cut here ]------------ [ 1.423424] WARNING: CPU: 1 PID: 1 at fs/sysfs/group.c:120 internal_create_group+0x277/0x2c0 [ 1.426453] Modules linked in: [ 1.427777] CPU: 1 PID: 1 Comm: swapper/0 Not tainted 4.13.0-rc7+ #2 [ 1.429538] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Debian-1.8.2-1 04/01/2014 [ 1.432218] task: ffff88007c138000 task.stack: ffffc90000008000 [ 1.433760] RIP: 0010:internal_create_group+0x277/0x2c0 [ 1.435187] RSP: 0018:ffffc9000000bd78 EFLAGS: 00010296 [ 1.436757] RAX: 000000000000003b RBX: 0000000000000003 RCX: 0000000000000000 [ 1.438461] RDX: 0000000000000001 RSI: ffffffff8109ff63 RDI: ffffffff8109ff63 [ 1.447453] RBP: ffffc9000000bdb0 R08: 0000000000000000 R09: 0000000000000102 [ 1.449227] R10: ffff88007c0e10d0 R11: 0000000081e22b01 R12: ffffffff81c10fc0 [ 1.450924] R13: 0000000000000000 R14: ffff88007b018aa0 R15: ffff88007b018810 [ 1.452718] FS: 0000000000000000(0000) GS:ffff88007ec40000(0000) knlGS:0000000000000000 [ 1.455053] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 1.456630] CR2: ffffc900001fc000 CR3: 0000000001c09000 CR4: 00000000000406e0 [ 1.458334] Call Trace: [ 1.459341] sysfs_create_groups+0x41/0x80 [ 1.460675] device_add+0x5ae/0x600 [ 1.461842] ? set_debug_rodata+0x17/0x17 [ 1.463042] pmu_dev_alloc+0x9a/0xf0 [ 1.464297] perf_event_sysfs_init+0x54/0x8d [ 1.465570] ? trace_event_define_fields_xdp_exception+0x87/0x87 [ 1.486645] do_one_initcall+0x52/0x190 [ 1.487872] ? set_debug_rodata+0x17/0x17 [ 1.489188] kernel_init_freeable+0x11e/0x1a1 [ 1.490800] ? rest_init+0xd0/0xd0 [ 1.492628] kernel_init+0xe/0x100 [ 1.494344] ret_from_fork+0x27/0x40 [ 1.496130] Code: 48 83 7a 20 00 0f 85 f5 fd ff ff 48 8b 02 48 8b 37 48 c7 c2 16 d4 9e 81 48 c7 c7 c8 08 9f 81 48 85 c0 48 0f 45 d0 e8 3a 17 ea ff <0f> ff b8 ea ff ff ff e9 ab fe ff ff 48 83 7f 30 00 0f 85 98 fd [ 1.501991] ---[ end trace aa30ea041c8942a2 ]--- When ran on !intel systems and: > +static ssize_t max_precise_show(struct device *cdev, > + struct device_attribute *attr, > + char *buf) > +{ > + return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); > +} > + > +static DEVICE_ATTR_RO(max_precise); is not intel specific at all.. Not very nice. Boris, could you give this a spin? --- Subject: perf/x86: Fix caps/ for !Intel Move the 'max_precise' capability into generic x86 code where it belongs. This fixes a sysfs splat on !Intel systems where we fail to set x86_pmu_caps_group.atts. Fixes: 22688d1c20f5 ("x86/perf: Export some PMU attributes in caps/ directory") Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- arch/x86/events/core.c | 33 ++++++++++++++++++++++++++++----- arch/x86/events/intel/core.c | 14 ++------------ 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d5f98095a155..73a6311c8baa 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1757,10 +1757,7 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) } static struct attribute_group x86_pmu_attr_group; - -static struct attribute_group x86_pmu_caps_group = { - .name = "caps", -}; +static struct attribute_group x86_pmu_caps_group; static int __init init_hw_perf_events(void) { @@ -1808,7 +1805,14 @@ static int __init init_hw_perf_events(void) 0, x86_pmu.num_counters, 0, 0); x86_pmu_format_group.attrs = x86_pmu.format_attrs; - x86_pmu_caps_group.attrs = x86_pmu.caps_attrs; + + if (x86_pmu.caps_attrs) { + struct attribute **tmp; + + tmp = merge_attr(x86_pmu_caps_group.attrs, x86_pmu.caps_attrs); + if (!WARN_ON(!tmp)) + x86_pmu_caps_group.attrs = tmp; + } if (x86_pmu.event_attrs) x86_pmu_events_group.attrs = x86_pmu.event_attrs; @@ -2224,6 +2228,25 @@ static struct attribute_group x86_pmu_attr_group = { .attrs = x86_pmu_attrs, }; +static ssize_t max_precise_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); +} + +static DEVICE_ATTR_RO(max_precise); + +static struct attribute *x86_pmu_caps_attrs[] = { + &dev_attr_max_precise.attr, + NULL +}; + +static struct attribute_group x86_pmu_caps_group = { + .name = "caps", + .attrs = x86_pmu_caps_attrs, +}; + static const struct attribute_group *x86_pmu_attr_groups[] = { &x86_pmu_attr_group, &x86_pmu_format_group, diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 8fa2abd9c8b6..829e89cfcee2 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3820,19 +3820,9 @@ static ssize_t pmu_name_show(struct device *cdev, static DEVICE_ATTR_RO(pmu_name); -static ssize_t max_precise_show(struct device *cdev, - struct device_attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); -} - -static DEVICE_ATTR_RO(max_precise); - static struct attribute *intel_pmu_caps_attrs[] = { - &dev_attr_pmu_name.attr, - &dev_attr_max_precise.attr, - NULL + &dev_attr_pmu_name.attr, + NULL }; static struct attribute *intel_pmu_attrs[] = { ^ permalink raw reply related [flat|nested] 9+ messages in thread
* Re: [tip:perf/core] perf/x86: Export some PMU attributes in caps/ directory 2017-08-28 10:46 ` Peter Zijlstra @ 2017-08-28 11:01 ` Borislav Petkov 2017-08-28 19:29 ` Andi Kleen 2017-08-29 14:22 ` [tip:perf/core] perf/x86: Fix caps/ for !Intel tip-bot for Peter Zijlstra 2 siblings, 0 replies; 9+ messages in thread From: Borislav Petkov @ 2017-08-28 11:01 UTC (permalink / raw) To: Peter Zijlstra Cc: mingo, linux-kernel, hpa, tglx, ak, torvalds, linux-tip-commits On Mon, Aug 28, 2017 at 12:46:50PM +0200, Peter Zijlstra wrote: > Boris, could you give this a spin? > > --- > Subject: perf/x86: Fix caps/ for !Intel > > Move the 'max_precise' capability into generic x86 code where it > belongs. This fixes a sysfs splat on !Intel systems where we fail to set > x86_pmu_caps_group.atts. > > Fixes: 22688d1c20f5 ("x86/perf: Export some PMU attributes in caps/ directory") > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reported-and-tested-by: Borislav Petkov <bp@suse.de> -- Regards/Gruss, Boris. Good mailing practices for 400: avoid top-posting and trim the reply. ^ permalink raw reply [flat|nested] 9+ messages in thread
* Re: [tip:perf/core] perf/x86: Export some PMU attributes in caps/ directory 2017-08-28 10:46 ` Peter Zijlstra 2017-08-28 11:01 ` Borislav Petkov @ 2017-08-28 19:29 ` Andi Kleen 2017-08-29 14:22 ` [tip:perf/core] perf/x86: Fix caps/ for !Intel tip-bot for Peter Zijlstra 2 siblings, 0 replies; 9+ messages in thread From: Andi Kleen @ 2017-08-28 19:29 UTC (permalink / raw) To: Peter Zijlstra Cc: mingo, linux-kernel, hpa, tglx, torvalds, linux-tip-commits, Borislav Petkov > is not intel specific at all.. > > Not very nice. > > Boris, could you give this a spin? Thanks for fixing. I guess could also just have removed the warning, but your patch is better Reviewed-by: Andi Kleen <ak@linux.intel.com> -Andi ^ permalink raw reply [flat|nested] 9+ messages in thread
* [tip:perf/core] perf/x86: Fix caps/ for !Intel 2017-08-28 10:46 ` Peter Zijlstra 2017-08-28 11:01 ` Borislav Petkov 2017-08-28 19:29 ` Andi Kleen @ 2017-08-29 14:22 ` tip-bot for Peter Zijlstra 2 siblings, 0 replies; 9+ messages in thread From: tip-bot for Peter Zijlstra @ 2017-08-29 14:22 UTC (permalink / raw) To: linux-tip-commits Cc: mingo, tglx, linux-kernel, ak, peterz, torvalds, hpa, bp Commit-ID: 5da382eb6ea37e2c49ef521c636d73f6ecc3fa81 Gitweb: http://git.kernel.org/tip/5da382eb6ea37e2c49ef521c636d73f6ecc3fa81 Author: Peter Zijlstra <peterz@infradead.org> AuthorDate: Mon, 28 Aug 2017 12:46:50 +0200 Committer: Ingo Molnar <mingo@kernel.org> CommitDate: Tue, 29 Aug 2017 15:09:25 +0200 perf/x86: Fix caps/ for !Intel Move the 'max_precise' capability into generic x86 code where it belongs. This fixes a sysfs splat on !Intel systems where we fail to set x86_pmu_caps_group.atts. Reported-and-tested-by: Borislav Petkov <bp@suse.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Andi Kleen <ak@linux.intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: hpa@zytor.com Fixes: 22688d1c20f5 ("x86/perf: Export some PMU attributes in caps/ directory") Link: http://lkml.kernel.org/r/20170828104650.2u3rsim4jafyjzv2@hirez.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org> --- arch/x86/events/core.c | 33 ++++++++++++++++++++++++++++----- arch/x86/events/intel/core.c | 14 ++------------ 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index d5f9809..73a6311 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1757,10 +1757,7 @@ ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event) } static struct attribute_group x86_pmu_attr_group; - -static struct attribute_group x86_pmu_caps_group = { - .name = "caps", -}; +static struct attribute_group x86_pmu_caps_group; static int __init init_hw_perf_events(void) { @@ -1808,7 +1805,14 @@ static int __init init_hw_perf_events(void) 0, x86_pmu.num_counters, 0, 0); x86_pmu_format_group.attrs = x86_pmu.format_attrs; - x86_pmu_caps_group.attrs = x86_pmu.caps_attrs; + + if (x86_pmu.caps_attrs) { + struct attribute **tmp; + + tmp = merge_attr(x86_pmu_caps_group.attrs, x86_pmu.caps_attrs); + if (!WARN_ON(!tmp)) + x86_pmu_caps_group.attrs = tmp; + } if (x86_pmu.event_attrs) x86_pmu_events_group.attrs = x86_pmu.event_attrs; @@ -2224,6 +2228,25 @@ static struct attribute_group x86_pmu_attr_group = { .attrs = x86_pmu_attrs, }; +static ssize_t max_precise_show(struct device *cdev, + struct device_attribute *attr, + char *buf) +{ + return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); +} + +static DEVICE_ATTR_RO(max_precise); + +static struct attribute *x86_pmu_caps_attrs[] = { + &dev_attr_max_precise.attr, + NULL +}; + +static struct attribute_group x86_pmu_caps_group = { + .name = "caps", + .attrs = x86_pmu_caps_attrs, +}; + static const struct attribute_group *x86_pmu_attr_groups[] = { &x86_pmu_attr_group, &x86_pmu_format_group, diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 8fa2abd..829e89c 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3820,19 +3820,9 @@ static ssize_t pmu_name_show(struct device *cdev, static DEVICE_ATTR_RO(pmu_name); -static ssize_t max_precise_show(struct device *cdev, - struct device_attribute *attr, - char *buf) -{ - return snprintf(buf, PAGE_SIZE, "%d\n", x86_pmu_max_precise()); -} - -static DEVICE_ATTR_RO(max_precise); - static struct attribute *intel_pmu_caps_attrs[] = { - &dev_attr_pmu_name.attr, - &dev_attr_max_precise.attr, - NULL + &dev_attr_pmu_name.attr, + NULL }; static struct attribute *intel_pmu_attrs[] = { ^ permalink raw reply related [flat|nested] 9+ messages in thread
end of thread, other threads:[~2017-08-29 14:26 UTC | newest] Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed) -- links below jump to the message on this page -- 2017-08-22 18:51 x86/perf: Improve sysfs enumeration for cpu pmu Andi Kleen 2017-08-22 18:52 ` [PATCH v1 1/2] x86/perf: Only show format attributes when supported Andi Kleen 2017-08-25 11:54 ` [tip:perf/core] perf/x86: " tip-bot for Andi Kleen 2017-08-22 18:52 ` [PATCH v1 2/2] x86/perf: Export some PMU attributes in caps Andi Kleen 2017-08-25 11:55 ` [tip:perf/core] perf/x86: Export some PMU attributes in caps/ directory tip-bot for Andi Kleen 2017-08-28 10:46 ` Peter Zijlstra 2017-08-28 11:01 ` Borislav Petkov 2017-08-28 19:29 ` Andi Kleen 2017-08-29 14:22 ` [tip:perf/core] perf/x86: Fix caps/ for !Intel tip-bot for Peter Zijlstra
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).