linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: kan.liang@linux.intel.com
To: peterz@infradead.org, acme@kernel.org, mingo@kernel.org,
	linux-kernel@vger.kernel.org
Cc: tglx@linutronix.de, bp@alien8.de, namhyung@kernel.org,
	jolsa@redhat.com, ak@linux.intel.com, yao.jin@linux.intel.com,
	alexander.shishkin@linux.intel.com, adrian.hunter@intel.com
Subject: [PATCH 31/49] perf stat: Hybrid evsel uses its own cpus
Date: Mon,  8 Feb 2021 07:25:28 -0800	[thread overview]
Message-ID: <1612797946-18784-32-git-send-email-kan.liang@linux.intel.com> (raw)
In-Reply-To: <1612797946-18784-1-git-send-email-kan.liang@linux.intel.com>

From: Jin Yao <yao.jin@linux.intel.com>

On hybrid platform, atom events can be only enabled on atom CPUs. Core
events can be only enabled on core CPUs. So for a hybrid event, it can
be only enabled on it's own CPUs.

But what the problem for current perf is, the cpus for evsel
(via PMU sysfs) have been merged to evsel_list->core.all_cpus.
It might be all CPUs.

So we need to figure out one way to let the hybrid event only use it's
own CPUs.

The idea is to create a new evlist__invalidate_all_cpus to invalidate
the evsel_list->core.all_cpus then evlist__for_each_cpu returns cpu -1
for hybrid evsel. If cpu is -1, hybrid evsel will use it's own cpus.

We will see following code piece in patch.

if (cpu == -1 && !evlist->thread_mode)
        evsel__enable_cpus(pos);

It lets the event be only enabled on event's own cpus.

Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
---
 tools/perf/builtin-stat.c | 37 ++++++++++++++++++++++--
 tools/perf/util/evlist.c  | 72 ++++++++++++++++++++++++++++++++++++++++++++---
 tools/perf/util/evlist.h  |  4 +++
 tools/perf/util/evsel.h   |  8 ++++++
 4 files changed, 115 insertions(+), 6 deletions(-)

diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index bc84b31..afb8789 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -392,6 +392,18 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu)
 	return 0;
 }
 
+static int read_counter_cpus(struct evsel *counter, struct timespec *rs)
+{
+	int cpu, nr_cpus, err = 0;
+	struct perf_cpu_map *cpus = evsel__cpus(counter);
+
+	nr_cpus = cpus ? cpus->nr : 1;
+	for (cpu = 0; cpu < nr_cpus; cpu++)
+		err = read_counter_cpu(counter, rs, cpu);
+
+	return err;
+}
+
 static int read_affinity_counters(struct timespec *rs)
 {
 	struct evsel *counter;
@@ -413,8 +425,14 @@ static int read_affinity_counters(struct timespec *rs)
 			if (evsel__cpu_iter_skip(counter, cpu))
 				continue;
 			if (!counter->err) {
-				counter->err = read_counter_cpu(counter, rs,
-								counter->cpu_iter - 1);
+				if (cpu == -1 && !evsel_list->thread_mode) {
+					counter->err = read_counter_cpus(counter, rs);
+				} else if (evsel_list->thread_mode) {
+					counter->err = read_counter_cpu(counter, rs, 0);
+				} else {
+					counter->err = read_counter_cpu(counter, rs,
+									counter->cpu_iter - 1);
+				}
 			}
 		}
 	}
@@ -747,6 +765,21 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx)
 	if (group)
 		evlist__set_leader(evsel_list);
 
+	/*
+	 * On hybrid platform, the cpus for evsel (via PMU sysfs) have been
+	 * merged to evsel_list->core.all_cpus. We use evlist__invalidate_all_cpus
+	 * to invalidate the evsel_list->core.all_cpus then evlist__for_each_cpu
+	 * returns cpu -1 for hybrid evsel. If cpu is -1, hybrid evsel will
+	 * use it's own cpus.
+	 */
+	if (evlist__has_hybrid_events(evsel_list)) {
+		evlist__invalidate_all_cpus(evsel_list);
+		if (!target__has_cpu(&target) ||
+		    target__has_per_thread(&target)) {
+			evsel_list->thread_mode = true;
+		}
+	}
+
 	if (affinity__setup(&affinity) < 0)
 		return -1;
 
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 05363a7..626a0e7 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -375,7 +375,8 @@ bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
 bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
 {
 	if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
-		ev->cpu_iter++;
+		if (cpu != -1)
+			ev->cpu_iter++;
 		return false;
 	}
 	return true;
@@ -404,6 +405,16 @@ static int evlist__is_enabled(struct evlist *evlist)
 	return false;
 }
 
+static void evsel__disable_cpus(struct evsel *evsel)
+{
+	int cpu, nr_cpus;
+	struct perf_cpu_map *cpus = evsel__cpus(evsel);
+
+	nr_cpus = cpus ? cpus->nr : 1;
+	for (cpu = 0; cpu < nr_cpus; cpu++)
+		evsel__disable_cpu(evsel, cpu);
+}
+
 static void __evlist__disable(struct evlist *evlist, char *evsel_name)
 {
 	struct evsel *pos;
@@ -430,7 +441,12 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
 					has_imm = true;
 				if (pos->immediate != imm)
 					continue;
-				evsel__disable_cpu(pos, pos->cpu_iter - 1);
+				if (cpu == -1 && !evlist->thread_mode)
+					evsel__disable_cpus(pos);
+				else if (evlist->thread_mode)
+					evsel__disable_cpu(pos, 0);
+				else
+					evsel__disable_cpu(pos, pos->cpu_iter - 1);
 			}
 		}
 		if (!has_imm)
@@ -466,6 +482,15 @@ void evlist__disable_evsel(struct evlist *evlist, char *evsel_name)
 	__evlist__disable(evlist, evsel_name);
 }
 
+static void evsel__enable_cpus(struct evsel *evsel)
+{
+	int cpu, nr_cpus;
+	struct perf_cpu_map *cpus = evsel__cpus(evsel);
+
+	nr_cpus = cpus ? cpus->nr : 1;
+	for (cpu = 0; cpu < nr_cpus; cpu++)
+		evsel__enable_cpu(evsel, cpu);
+}
 static void __evlist__enable(struct evlist *evlist, char *evsel_name)
 {
 	struct evsel *pos;
@@ -485,7 +510,12 @@ static void __evlist__enable(struct evlist *evlist, char *evsel_name)
 				continue;
 			if (!evsel__is_group_leader(pos) || !pos->core.fd)
 				continue;
-			evsel__enable_cpu(pos, pos->cpu_iter - 1);
+                        if (cpu == -1 && !evlist->thread_mode)
+                                evsel__enable_cpus(pos);
+                        else if (evlist->thread_mode)
+                                evsel__enable_cpu(pos, 0);
+                        else
+                                evsel__enable_cpu(pos, pos->cpu_iter - 1);
 		}
 	}
 	affinity__cleanup(&affinity);
@@ -1260,6 +1290,16 @@ void evlist__set_selected(struct evlist *evlist, struct evsel *evsel)
 	evlist->selected = evsel;
 }
 
+static void evsel__close_cpus(struct evsel *evsel)
+{
+	int cpu, nr_cpus;
+	struct perf_cpu_map *cpus = evsel__cpus(evsel);
+
+	nr_cpus = cpus ? cpus->nr : 1;
+	for (cpu = 0; cpu < nr_cpus; cpu++)
+		perf_evsel__close_cpu(&evsel->core, cpu);
+}
+
 void evlist__close(struct evlist *evlist)
 {
 	struct evsel *evsel;
@@ -1284,7 +1324,13 @@ void evlist__close(struct evlist *evlist)
 		evlist__for_each_entry_reverse(evlist, evsel) {
 			if (evsel__cpu_iter_skip(evsel, cpu))
 			    continue;
-			perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
+
+			if (cpu == -1 && !evlist->thread_mode)
+				evsel__close_cpus(evsel);
+			else if (evlist->thread_mode)
+				perf_evsel__close_cpu(&evsel->core, 0);
+			else
+				perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
 		}
 	}
 	affinity__cleanup(&affinity);
@@ -2010,3 +2056,21 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
 	}
 	return NULL;
 }
+
+bool evlist__has_hybrid_events(struct evlist *evlist)
+{
+	struct evsel *evsel;
+
+	evlist__for_each_entry(evlist, evsel) {
+		if (evsel__is_hybrid_event(evsel))
+			return true;
+	}
+
+	return false;
+}
+
+void evlist__invalidate_all_cpus(struct evlist *evlist)
+{
+	perf_cpu_map__put(evlist->core.all_cpus);
+	evlist->core.all_cpus = perf_cpu_map__empty_new(1);
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 1aae758..9741df4 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -52,6 +52,7 @@ struct evlist {
 	struct perf_evlist core;
 	int		 nr_groups;
 	bool		 enabled;
+	bool		 thread_mode;
 	int		 id_pos;
 	int		 is_pos;
 	u64		 combined_sample_type;
@@ -353,4 +354,7 @@ int evlist__ctlfd_ack(struct evlist *evlist);
 #define EVLIST_DISABLED_MSG "Events disabled\n"
 
 struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
+void evlist__invalidate_all_cpus(struct evlist *evlist);
+
+bool evlist__has_hybrid_events(struct evlist *evlist);
 #endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 5c161a2..4eb054a 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -7,9 +7,11 @@
 #include <sys/types.h>
 #include <linux/perf_event.h>
 #include <linux/types.h>
+#include <string.h>
 #include <internal/evsel.h>
 #include <perf/evsel.h>
 #include "symbol_conf.h"
+#include "pmu.h"
 #include <internal/cpumap.h>
 
 struct bpf_object;
@@ -427,4 +429,10 @@ static inline bool evsel__is_dummy_event(struct evsel *evsel)
 struct perf_env *evsel__env(struct evsel *evsel);
 
 int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
+
+static inline bool evsel__is_hybrid_event(struct evsel *evsel)
+{
+	return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name);
+}
+
 #endif /* __PERF_EVSEL_H */
-- 
2.7.4


  parent reply	other threads:[~2021-02-08 18:21 UTC|newest]

Thread overview: 82+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-08 15:24 [PATCH 00/49] Add Alder Lake support for perf kan.liang
2021-02-08 15:24 ` [PATCH 01/49] x86/cpufeatures: Enumerate Intel Hybrid Technology feature bit kan.liang
2021-02-08 15:24 ` [PATCH 02/49] x86/cpu: Describe hybrid CPUs in cpuinfo_x86 kan.liang
2021-02-08 17:56   ` Borislav Petkov
2021-02-08 19:04     ` Liang, Kan
2021-02-08 19:10       ` Luck, Tony
2021-02-08 19:19         ` Borislav Petkov
2021-02-08 15:25 ` [PATCH 03/49] perf/x86/intel: Hybrid PMU support for perf capabilities kan.liang
2021-02-08 15:25 ` [PATCH 04/49] perf/x86: Hybrid PMU support for intel_ctrl kan.liang
2021-02-08 15:25 ` [PATCH 05/49] perf/x86: Hybrid PMU support for counters kan.liang
2021-02-08 15:25 ` [PATCH 06/49] perf/x86: Hybrid PMU support for unconstrained kan.liang
2021-02-08 15:25 ` [PATCH 07/49] perf/x86: Hybrid PMU support for hardware cache event kan.liang
2021-02-08 15:25 ` [PATCH 08/49] perf/x86: Hybrid PMU support for event constraints kan.liang
2021-02-08 15:25 ` [PATCH 09/49] perf/x86: Hybrid PMU support for extra_regs kan.liang
2021-02-08 15:25 ` [PATCH 10/49] perf/x86/intel: Factor out intel_pmu_check_num_counters kan.liang
2021-02-08 15:25 ` [PATCH 11/49] perf/x86/intel: Factor out intel_pmu_check_event_constraints kan.liang
2021-02-08 15:25 ` [PATCH 12/49] perf/x86/intel: Factor out intel_pmu_check_extra_regs kan.liang
2021-02-08 15:25 ` [PATCH 13/49] perf/x86: Expose check_hw_exists kan.liang
2021-02-08 15:25 ` [PATCH 14/49] perf/x86: Remove temporary pmu assignment in event_init kan.liang
2021-02-08 15:25 ` [PATCH 15/49] perf/x86: Factor out x86_pmu_show_pmu_cap kan.liang
2021-02-08 15:25 ` [PATCH 16/49] perf/x86: Register hybrid PMUs kan.liang
2021-02-08 15:25 ` [PATCH 17/49] perf/x86: Add structures for the attributes of Hybrid PMUs kan.liang
2021-02-08 15:25 ` [PATCH 18/49] perf/x86/intel: Add attr_update for " kan.liang
2021-02-08 15:25 ` [PATCH 19/49] perf/x86: Support filter_match callback kan.liang
2021-02-08 15:25 ` [PATCH 20/49] perf/x86/intel: Add Alder Lake Hybrid support kan.liang
2021-02-08 15:25 ` [PATCH 21/49] perf: Introduce PERF_TYPE_HARDWARE_PMU and PERF_TYPE_HW_CACHE_PMU kan.liang
2021-02-08 15:25 ` [PATCH 22/49] perf/x86/intel/uncore: Add Alder Lake support kan.liang
2021-02-09  4:18   ` kernel test robot
2021-02-08 15:25 ` [PATCH 23/49] perf/x86/msr: Add Alder Lake CPU support kan.liang
2021-02-09  3:58   ` kernel test robot
2021-02-09 13:44     ` Liang, Kan
2021-02-09  5:15   ` kernel test robot
2021-02-08 15:25 ` [PATCH 24/49] perf/x86/cstate: " kan.liang
2021-02-08 15:25 ` [PATCH 25/49] perf/x86/rapl: Add support for Intel Alder Lake kan.liang
2021-02-09  5:16   ` kernel test robot
2021-02-08 15:25 ` [PATCH 26/49] perf jevents: Support unit value "cpu_core" and "cpu_atom" kan.liang
2021-02-08 15:25 ` [PATCH 27/49] perf util: Save pmu name to struct perf_pmu_alias kan.liang
2021-02-08 18:57   ` Arnaldo Carvalho de Melo
2021-02-09  0:17     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 28/49] perf pmu: Save detected hybrid pmus to a global pmu list kan.liang
2021-02-08 18:55   ` Arnaldo Carvalho de Melo
2021-02-09  0:05     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 29/49] perf pmu: Add hybrid helper functions kan.liang
2021-02-08 15:25 ` [PATCH 30/49] perf list: Support --cputype option to list hybrid pmu events kan.liang
2021-02-08 15:25 ` kan.liang [this message]
2021-02-08 15:25 ` [PATCH 32/49] perf header: Support HYBRID_TOPOLOGY feature kan.liang
2021-02-08 19:05   ` Arnaldo Carvalho de Melo
2021-02-09  0:26     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 33/49] perf header: Support hybrid CPU_PMU_CAPS kan.liang
2021-02-08 15:25 ` [PATCH 34/49] tools headers uapi: Update tools's copy of linux/perf_event.h kan.liang
2021-02-08 15:25 ` [PATCH 35/49] perf parse-events: Create two hybrid hardware events kan.liang
2021-02-08 18:59   ` Arnaldo Carvalho de Melo
2021-02-09  0:23     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 36/49] perf parse-events: Create two hybrid cache events kan.liang
2021-02-08 15:25 ` [PATCH 37/49] perf parse-events: Support hardware events inside PMU kan.liang
2021-02-08 15:25 ` [PATCH 38/49] perf list: Display pmu prefix for partially supported hybrid cache events kan.liang
2021-02-08 15:25 ` [PATCH 39/49] perf parse-events: Support hybrid raw events kan.liang
2021-02-08 19:07   ` Arnaldo Carvalho de Melo
2021-02-09  0:28     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 40/49] perf stat: Support --cputype option for hybrid events kan.liang
2021-02-08 15:25 ` [PATCH 41/49] perf stat: Support metrics with " kan.liang
2021-02-08 15:25 ` [PATCH 42/49] perf evlist: Create two hybrid 'cycles' events by default kan.liang
2021-02-08 15:25 ` [PATCH 43/49] perf stat: Add default hybrid events kan.liang
2021-02-08 19:10   ` Arnaldo Carvalho de Melo
2021-02-09  0:36     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 44/49] perf stat: Uniquify hybrid event name kan.liang
2021-02-08 15:25 ` [PATCH 45/49] perf stat: Merge event counts from all hybrid PMUs kan.liang
2021-02-08 15:25 ` [PATCH 46/49] perf stat: Filter out unmatched aggregation for hybrid event kan.liang
2021-02-08 19:16   ` Arnaldo Carvalho de Melo
2021-02-09  0:53     ` Jin, Yao
2021-02-08 15:25 ` [PATCH 47/49] perf evlist: Warn as events from different hybrid PMUs in a group kan.liang
2021-02-08 15:25 ` [PATCH 48/49] perf Documentation: Document intel-hybrid support kan.liang
2021-02-08 15:25 ` [PATCH 49/49] perf evsel: Adjust hybrid event and global event mixed group kan.liang
2021-02-08 19:12   ` Arnaldo Carvalho de Melo
2021-02-09  0:47     ` Jin, Yao
2021-02-11 11:40 ` [PATCH 00/49] Add Alder Lake support for perf Jiri Olsa
2021-02-11 16:22   ` Liang, Kan
2021-02-18  0:07     ` Jin, Yao
2021-03-04 15:50 ` Liang, Kan
2021-03-04 17:50   ` Peter Zijlstra
2021-03-05 11:14     ` Peter Zijlstra
2021-03-05 13:36       ` Liang, Kan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1612797946-18784-32-git-send-email-kan.liang@linux.intel.com \
    --to=kan.liang@linux.intel.com \
    --cc=acme@kernel.org \
    --cc=adrian.hunter@intel.com \
    --cc=ak@linux.intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=bp@alien8.de \
    --cc=jolsa@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@kernel.org \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    --cc=tglx@linutronix.de \
    --cc=yao.jin@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).