* [PATCH v2 1/4] perf record: allocate affinity masks
2018-12-13 7:07 [PATCH v2 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
@ 2018-12-13 7:18 ` Alexey Budankov
2018-12-13 7:19 ` [PATCH v2 2/4] perf record: bind the AIO user space buffers to nodes Alexey Budankov
` (2 subsequent siblings)
3 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2018-12-13 7:18 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Allocate affinity option and masks for mmap data buffers and
record thread as well as initialize allocated objects.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
Changes in v2:
- made debug affinity mode message user friendly
- converted affinity mode defines to enum values
---
tools/perf/builtin-record.c | 13 ++++++++++++-
tools/perf/perf.h | 8 ++++++++
tools/perf/util/evlist.c | 6 +++---
tools/perf/util/evlist.h | 2 +-
tools/perf/util/mmap.c | 2 ++
tools/perf/util/mmap.h | 3 ++-
6 files changed, 28 insertions(+), 6 deletions(-)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 882285fb9f64..b26febb54d01 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -81,12 +81,17 @@ struct record {
bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
+ cpu_set_t affinity_mask;
};
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
+static const char* affinity_tags[PERF_AFFINITY_EOF] = {
+ "SYS", "NODE", "CPU"
+};
+
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
@@ -533,7 +538,8 @@ static int record__mmap_evlist(struct record *rec,
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
+ opts->auxtrace_snapshot_mode,
+ opts->nr_cblocks, opts->affinity) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -1980,6 +1986,9 @@ int cmd_record(int argc, const char **argv)
# undef REASON
#endif
+ CPU_ZERO(&rec->affinity_mask);
+ rec->opts.affinity = PERF_AFFINITY_SYS;
+
rec->evlist = perf_evlist__new();
if (rec->evlist == NULL)
return -ENOMEM;
@@ -2143,6 +2152,8 @@ int cmd_record(int argc, const char **argv)
if (verbose > 0)
pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+ pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
+
err = __cmd_record(&record, argc, argv);
out:
perf_evlist__delete(rec->evlist);
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 388c6dd128b8..69f54529d81f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -83,6 +83,14 @@ struct record_opts {
clockid_t clockid;
u64 clockid_res_ns;
int nr_cblocks;
+ int affinity;
+};
+
+enum perf_affinity {
+ PERF_AFFINITY_SYS = 0,
+ PERF_AFFINITY_NODE,
+ PERF_AFFINITY_CPU,
+ PERF_AFFINITY_EOF
};
struct option;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index e90575192209..60e825be944a 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1018,7 +1018,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks)
+ bool auxtrace_overwrite, int nr_cblocks, int affinity)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@@ -1028,7 +1028,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks };
+ struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1060,7 +1060,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 868294491194..72728d7f4432 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -162,7 +162,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks);
+ bool auxtrace_overwrite, int nr_cblocks, int affinity);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 8fc39311a30d..e68ba754a8e2 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -343,6 +343,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->fd = fd;
map->cpu = cpu;
+ CPU_ZERO(&map->affinity_mask);
+
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index aeb6942fdb00..e566c19b242b 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -38,6 +38,7 @@ struct perf_mmap {
int nr_cblocks;
} aio;
#endif
+ cpu_set_t affinity_mask;
};
/*
@@ -69,7 +70,7 @@ enum bkw_mmap_state {
};
struct mmap_params {
- int prot, mask, nr_cblocks;
+ int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
};
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v2 2/4] perf record: bind the AIO user space buffers to nodes
2018-12-13 7:07 [PATCH v2 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
2018-12-13 7:18 ` [PATCH v2 1/4] perf record: allocate affinity masks Alexey Budankov
@ 2018-12-13 7:19 ` Alexey Budankov
2018-12-13 7:20 ` [PATCH v2 3/4] perf record: apply affinity masks when reading mmap, buffers Alexey Budankov
2018-12-13 7:20 ` [PATCH v2 4/4] perf record: implement --affinity=node|cpu option Alexey Budankov
3 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2018-12-13 7:19 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Allocate and bind AIO user space buffers to the memory nodes
that mmap kernel buffers are bound to.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
Changes in v2:
- implemented perf_mmap__aio_alloc, perf_mmap__aio_free, perf_mmap__aio_bind
and put HAVE_LIBNUMA_SUPPORT #ifdefs in there
---
tools/perf/util/mmap.c | 49 ++++++++++++++++++++++++++++++++++++++++--
1 file changed, 47 insertions(+), 2 deletions(-)
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index e68ba754a8e2..742fa9a8e498 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -10,6 +10,9 @@
#include <sys/mman.h>
#include <inttypes.h>
#include <asm/bug.h>
+#ifdef HAVE_LIBNUMA_SUPPORT
+#include <numaif.h>
+#endif
#include "debug.h"
#include "event.h"
#include "mmap.h"
@@ -154,6 +157,46 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
}
#ifdef HAVE_AIO_SUPPORT
+
+#ifdef HAVE_LIBNUMA_SUPPORT
+static void perf_mmap__aio_alloc(void **data, size_t len)
+{
+ *data = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+}
+
+static void perf_mmap__aio_free(void **data, size_t len)
+{
+ munmap(*data, len);
+ *data = NULL;
+}
+
+static void perf_mmap__aio_bind(void *data, size_t len, int cpu, int affinity)
+{
+ if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
+ unsigned long node_mask = 1UL << cpu__get_node(cpu);
+ if (mbind(data, len, MPOL_BIND, &node_mask, 1, 0)) {
+ pr_debug2("failed to bind [%p-%p] to node %d\n",
+ data, data + len, cpu__get_node(cpu));
+ }
+ }
+}
+#else
+static void perf_mmap__aio_alloc(void **data, size_t len)
+{
+ *data = malloc(len);
+}
+
+static void perf_mmap__aio_free(void **data, size_t len __maybe_unused)
+{
+ zfree(data);
+}
+
+static void perf_mmap__aio_bind(void *data __maybe_unused, size_t len __maybe_unused,
+ int cpu __maybe_unused, int affinity __maybe_unused)
+{
+}
+#endif
+
static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
{
int delta_max, i, prio;
@@ -177,11 +220,13 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
}
delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
for (i = 0; i < map->aio.nr_cblocks; ++i) {
- map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
+ size_t mmap_len = perf_mmap__mmap_len(map);
+ perf_mmap__aio_alloc(&(map->aio.data[i]), mmap_len);
if (!map->aio.data[i]) {
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
+ perf_mmap__aio_bind(map->aio.data[i], mmap_len, map->cpu, mp->affinity);
/*
* Use cblock.aio_fildes value different from -1
* to denote started aio write operation on the
@@ -210,7 +255,7 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
int i;
for (i = 0; i < map->aio.nr_cblocks; ++i)
- zfree(&map->aio.data[i]);
+ perf_mmap__aio_free(&(map->aio.data[i]), perf_mmap__mmap_len(map));
if (map->aio.data)
zfree(&map->aio.data);
zfree(&map->aio.cblocks);
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v2 3/4] perf record: apply affinity masks when reading mmap, buffers
2018-12-13 7:07 [PATCH v2 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
2018-12-13 7:18 ` [PATCH v2 1/4] perf record: allocate affinity masks Alexey Budankov
2018-12-13 7:19 ` [PATCH v2 2/4] perf record: bind the AIO user space buffers to nodes Alexey Budankov
@ 2018-12-13 7:20 ` Alexey Budankov
2018-12-13 7:20 ` [PATCH v2 4/4] perf record: implement --affinity=node|cpu option Alexey Budankov
3 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2018-12-13 7:20 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Build node cpu masks for mmap data buffers. Apply node cpu
masks to tool thread every time it references data buffers
cross node or cross cpu.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
Changes in v2:
- separated AIO buffers binding to patch 2/4
---
tools/perf/builtin-record.c | 9 +++++++++
tools/perf/util/evlist.c | 6 +++++-
tools/perf/util/mmap.c | 12 ++++++++++++
tools/perf/util/mmap.h | 1 +
4 files changed, 27 insertions(+), 1 deletion(-)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index b26febb54d01..eea96794ee45 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
+ if (opts->affinity != PERF_AFFINITY_SYS)
+ cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode,
@@ -755,6 +758,12 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct perf_mmap *map = &maps[i];
if (map->base) {
+ if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+ !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
+ CPU_ZERO(&rec->affinity_mask);
+ CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
+ sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
+ }
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) != 0) {
rc = -1;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 60e825be944a..5ca5bb5ea0db 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1028,7 +1028,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
+ struct mmap_params mp = {
+ .nr_cblocks = nr_cblocks,
+ .affinity = affinity,
+ .cpu_map = cpus
+ };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 742fa9a8e498..a2095e4eda4b 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
{
+ int c, nr_cpus, node;
/*
* The last one will be done at perf_mmap__consume(), so that we
* make sure we don't prevent tools from consuming every last event in
@@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->cpu = cpu;
CPU_ZERO(&map->affinity_mask);
+ if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
+ nr_cpus = cpu_map__nr(mp->cpu_map);
+ node = cpu__get_node(map->cpu);
+ for (c = 0; c < nr_cpus; c++) {
+ if (cpu__get_node(c) == node) {
+ CPU_SET(c, &map->affinity_mask);
+ }
+ }
+ } else if (mp->affinity == PERF_AFFINITY_CPU) {
+ CPU_SET(map->cpu, &map->affinity_mask);
+ }
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e566c19b242b..b3f724fad22e 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -72,6 +72,7 @@ enum bkw_mmap_state {
struct mmap_params {
int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
+ const struct cpu_map *cpu_map;
};
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v2 4/4] perf record: implement --affinity=node|cpu option
2018-12-13 7:07 [PATCH v2 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
` (2 preceding siblings ...)
2018-12-13 7:20 ` [PATCH v2 3/4] perf record: apply affinity masks when reading mmap, buffers Alexey Budankov
@ 2018-12-13 7:20 ` Alexey Budankov
3 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2018-12-13 7:20 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Implement --affinity=node|cpu option for the record mode defaulting
to system affinity mask bouncing.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
tools/perf/Documentation/perf-record.txt | 5 +++++
tools/perf/builtin-record.c | 18 ++++++++++++++++++
2 files changed, 23 insertions(+)
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index d232b13ea713..efb839784f32 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -440,6 +440,11 @@ Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default:
Asynchronous mode is supported only when linking Perf tool with libc library
providing implementation for Posix AIO API.
+--affinity=mode::
+Set affinity mask of trace reading thread according to the policy defined by 'mode' value:
+ node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
+ cpu - thread affinity mask is set to cpu of the processed mmap buffer
+
--all-kernel::
Configure all used events to run in kernel space.
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index eea96794ee45..57dc3a45d16f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1653,6 +1653,21 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
ui__warning("unknown clockid %s, check man page\n", ostr);
return -1;
}
+static int record__parse_affinity(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+
+ if (!unset) {
+ if (str) {
+ if (!strcasecmp(str, "node"))
+ opts->affinity = PERF_AFFINITY_NODE;
+ else if (!strcasecmp(str, "cpu"))
+ opts->affinity = PERF_AFFINITY_CPU;
+ }
+ }
+
+ return 0;
+}
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
@@ -1961,6 +1976,9 @@ static struct option __record_options[] = {
&nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
record__aio_parse),
#endif
+ OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
+ "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
+ record__parse_affinity),
OPT_END()
};
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers
2018-12-24 12:11 [PATCH v2 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
@ 2018-12-24 12:27 ` Alexey Budankov
2019-01-01 21:39 ` Jiri Olsa
2019-01-01 21:39 ` Jiri Olsa
0 siblings, 2 replies; 10+ messages in thread
From: Alexey Budankov @ 2018-12-24 12:27 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Build node cpu masks for mmap data buffers. Apply node cpu
masks to tool thread every time it references data buffers
cross node or cross cpu.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
Changes in v2:
- separated AIO buffers binding to patch 2/4
---
tools/perf/builtin-record.c | 9 +++++++++
tools/perf/util/evlist.c | 6 +++++-
tools/perf/util/mmap.c | 12 ++++++++++++
tools/perf/util/mmap.h | 1 +
4 files changed, 27 insertions(+), 1 deletion(-)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index b26febb54d01..eea96794ee45 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
+ if (opts->affinity != PERF_AFFINITY_SYS)
+ cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode,
@@ -755,6 +758,12 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct perf_mmap *map = &maps[i];
if (map->base) {
+ if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+ !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
+ CPU_ZERO(&rec->affinity_mask);
+ CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
+ sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
+ }
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) != 0) {
rc = -1;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 60e825be944a..5ca5bb5ea0db 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1028,7 +1028,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
+ struct mmap_params mp = {
+ .nr_cblocks = nr_cblocks,
+ .affinity = affinity,
+ .cpu_map = cpus
+ };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 742fa9a8e498..a2095e4eda4b 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
{
+ int c, nr_cpus, node;
/*
* The last one will be done at perf_mmap__consume(), so that we
* make sure we don't prevent tools from consuming every last event in
@@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->cpu = cpu;
CPU_ZERO(&map->affinity_mask);
+ if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
+ nr_cpus = cpu_map__nr(mp->cpu_map);
+ node = cpu__get_node(map->cpu);
+ for (c = 0; c < nr_cpus; c++) {
+ if (cpu__get_node(c) == node) {
+ CPU_SET(c, &map->affinity_mask);
+ }
+ }
+ } else if (mp->affinity == PERF_AFFINITY_CPU) {
+ CPU_SET(map->cpu, &map->affinity_mask);
+ }
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e566c19b242b..b3f724fad22e 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -72,6 +72,7 @@ enum bkw_mmap_state {
struct mmap_params {
int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
+ const struct cpu_map *cpu_map;
};
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers
2018-12-24 12:27 ` [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers Alexey Budankov
@ 2019-01-01 21:39 ` Jiri Olsa
2019-01-09 9:13 ` Alexey Budankov
2019-01-01 21:39 ` Jiri Olsa
1 sibling, 1 reply; 10+ messages in thread
From: Jiri Olsa @ 2019-01-01 21:39 UTC (permalink / raw)
To: Alexey Budankov
Cc: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra,
Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
On Mon, Dec 24, 2018 at 03:27:17PM +0300, Alexey Budankov wrote:
SNIP
> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
> index 742fa9a8e498..a2095e4eda4b 100644
> --- a/tools/perf/util/mmap.c
> +++ b/tools/perf/util/mmap.c
> @@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
>
> int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
> {
> + int c, nr_cpus, node;
> /*
> * The last one will be done at perf_mmap__consume(), so that we
> * make sure we don't prevent tools from consuming every last event in
> @@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
> map->cpu = cpu;
>
> CPU_ZERO(&map->affinity_mask);
> + if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
> + nr_cpus = cpu_map__nr(mp->cpu_map);
> + node = cpu__get_node(map->cpu);
> + for (c = 0; c < nr_cpus; c++) {
> + if (cpu__get_node(c) == node) {
the 'c' is just an index here, I think you need to
use the real cpu value from the mp->cpu_map->map[c]
jirka
> + CPU_SET(c, &map->affinity_mask);
> + }
> + }
> + } else if (mp->affinity == PERF_AFFINITY_CPU) {
> + CPU_SET(map->cpu, &map->affinity_mask);
> + }
>
> if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
> &mp->auxtrace_mp, map->base, fd))
> diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
> index e566c19b242b..b3f724fad22e 100644
> --- a/tools/perf/util/mmap.h
> +++ b/tools/perf/util/mmap.h
> @@ -72,6 +72,7 @@ enum bkw_mmap_state {
> struct mmap_params {
> int prot, mask, nr_cblocks, affinity;
> struct auxtrace_mmap_params auxtrace_mp;
> + const struct cpu_map *cpu_map;
> };
>
> int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers
2019-01-01 21:39 ` Jiri Olsa
@ 2019-01-09 9:13 ` Alexey Budankov
0 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2019-01-09 9:13 UTC (permalink / raw)
To: Jiri Olsa
Cc: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra,
Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Hi,
On 02.01.2019 0:39, Jiri Olsa wrote:
> On Mon, Dec 24, 2018 at 03:27:17PM +0300, Alexey Budankov wrote:
>
> SNIP
>
>> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
>> index 742fa9a8e498..a2095e4eda4b 100644
>> --- a/tools/perf/util/mmap.c
>> +++ b/tools/perf/util/mmap.c
>> @@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
>>
>> int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
>> {
>> + int c, nr_cpus, node;
>> /*
>> * The last one will be done at perf_mmap__consume(), so that we
>> * make sure we don't prevent tools from consuming every last event in
>> @@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
>> map->cpu = cpu;
>>
>> CPU_ZERO(&map->affinity_mask);
>> + if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
>> + nr_cpus = cpu_map__nr(mp->cpu_map);
>> + node = cpu__get_node(map->cpu);
>> + for (c = 0; c < nr_cpus; c++) {
>> + if (cpu__get_node(c) == node) {
>
> the 'c' is just an index here, I think you need to
> use the real cpu value from the mp->cpu_map->map[c]
Well, yes, mapping c index to online cpu index is more generic.
Corrected in v3. Thanks!
Alexey
>
> jirka
>
>> + CPU_SET(c, &map->affinity_mask);
>> + }
>> + }
>> + } else if (mp->affinity == PERF_AFFINITY_CPU) {
>> + CPU_SET(map->cpu, &map->affinity_mask);
>> + }
>>
>> if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
>> &mp->auxtrace_mp, map->base, fd))
>> diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
>> index e566c19b242b..b3f724fad22e 100644
>> --- a/tools/perf/util/mmap.h
>> +++ b/tools/perf/util/mmap.h
>> @@ -72,6 +72,7 @@ enum bkw_mmap_state {
>> struct mmap_params {
>> int prot, mask, nr_cblocks, affinity;
>> struct auxtrace_mmap_params auxtrace_mp;
>> + const struct cpu_map *cpu_map;
>> };
>>
>> int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers
2018-12-24 12:27 ` [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers Alexey Budankov
2019-01-01 21:39 ` Jiri Olsa
@ 2019-01-01 21:39 ` Jiri Olsa
2019-01-09 9:14 ` Alexey Budankov
1 sibling, 1 reply; 10+ messages in thread
From: Jiri Olsa @ 2019-01-01 21:39 UTC (permalink / raw)
To: Alexey Budankov
Cc: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra,
Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
On Mon, Dec 24, 2018 at 03:27:17PM +0300, Alexey Budankov wrote:
>
> Build node cpu masks for mmap data buffers. Apply node cpu
> masks to tool thread every time it references data buffers
> cross node or cross cpu.
>
> Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
> ---
> Changes in v2:
> - separated AIO buffers binding to patch 2/4
> ---
> tools/perf/builtin-record.c | 9 +++++++++
> tools/perf/util/evlist.c | 6 +++++-
> tools/perf/util/mmap.c | 12 ++++++++++++
> tools/perf/util/mmap.h | 1 +
> 4 files changed, 27 insertions(+), 1 deletion(-)
>
> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
> index b26febb54d01..eea96794ee45 100644
> --- a/tools/perf/builtin-record.c
> +++ b/tools/perf/builtin-record.c
> @@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
> struct record_opts *opts = &rec->opts;
> char msg[512];
>
> + if (opts->affinity != PERF_AFFINITY_SYS)
> + cpu__setup_cpunode_map();
> +
> if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
> opts->auxtrace_mmap_pages,
> opts->auxtrace_snapshot_mode,
> @@ -755,6 +758,12 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
> struct perf_mmap *map = &maps[i];
>
> if (map->base) {
> + if (rec->opts.affinity != PERF_AFFINITY_SYS &&
> + !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
> + CPU_ZERO(&rec->affinity_mask);
> + CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
> + sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
all this code depends on aio and LIBNUMA, let's keep it there then
also please add this and the affinity_mask setup code below to a function
thanks,
jirka
> + }
> if (!record__aio_enabled(rec)) {
> if (perf_mmap__push(map, rec, record__pushfn) != 0) {
> rc = -1;
> diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
> index 60e825be944a..5ca5bb5ea0db 100644
> --- a/tools/perf/util/evlist.c
> +++ b/tools/perf/util/evlist.c
> @@ -1028,7 +1028,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
> * Its value is decided by evsel's write_backward.
> * So &mp should not be passed through const pointer.
> */
> - struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
> + struct mmap_params mp = {
> + .nr_cblocks = nr_cblocks,
> + .affinity = affinity,
> + .cpu_map = cpus
> + };
>
> if (!evlist->mmap)
> evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
> index 742fa9a8e498..a2095e4eda4b 100644
> --- a/tools/perf/util/mmap.c
> +++ b/tools/perf/util/mmap.c
> @@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
>
> int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
> {
> + int c, nr_cpus, node;
> /*
> * The last one will be done at perf_mmap__consume(), so that we
> * make sure we don't prevent tools from consuming every last event in
> @@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
> map->cpu = cpu;
>
> CPU_ZERO(&map->affinity_mask);
> + if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
> + nr_cpus = cpu_map__nr(mp->cpu_map);
> + node = cpu__get_node(map->cpu);
> + for (c = 0; c < nr_cpus; c++) {
> + if (cpu__get_node(c) == node) {
> + CPU_SET(c, &map->affinity_mask);
> + }
> + }
> + } else if (mp->affinity == PERF_AFFINITY_CPU) {
> + CPU_SET(map->cpu, &map->affinity_mask);
> + }
>
> if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
> &mp->auxtrace_mp, map->base, fd))
> diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
> index e566c19b242b..b3f724fad22e 100644
> --- a/tools/perf/util/mmap.h
> +++ b/tools/perf/util/mmap.h
> @@ -72,6 +72,7 @@ enum bkw_mmap_state {
> struct mmap_params {
> int prot, mask, nr_cblocks, affinity;
> struct auxtrace_mmap_params auxtrace_mp;
> + const struct cpu_map *cpu_map;
> };
>
> int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v2 3/4] perf record: apply affinity masks when reading mmap buffers
2019-01-01 21:39 ` Jiri Olsa
@ 2019-01-09 9:14 ` Alexey Budankov
0 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2019-01-09 9:14 UTC (permalink / raw)
To: Jiri Olsa
Cc: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra,
Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Hi,
On 02.01.2019 0:39, Jiri Olsa wrote:
> On Mon, Dec 24, 2018 at 03:27:17PM +0300, Alexey Budankov wrote:
>>
>> Build node cpu masks for mmap data buffers. Apply node cpu
>> masks to tool thread every time it references data buffers
>> cross node or cross cpu.
>>
>> Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
>> ---
>> Changes in v2:
>> - separated AIO buffers binding to patch 2/4
>> ---
>> tools/perf/builtin-record.c | 9 +++++++++
>> tools/perf/util/evlist.c | 6 +++++-
>> tools/perf/util/mmap.c | 12 ++++++++++++
>> tools/perf/util/mmap.h | 1 +
>> 4 files changed, 27 insertions(+), 1 deletion(-)
>>
>> diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
>> index b26febb54d01..eea96794ee45 100644
>> --- a/tools/perf/builtin-record.c
>> +++ b/tools/perf/builtin-record.c
>> @@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
>> struct record_opts *opts = &rec->opts;
>> char msg[512];
>>
>> + if (opts->affinity != PERF_AFFINITY_SYS)
>> + cpu__setup_cpunode_map();
>> +
>> if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
>> opts->auxtrace_mmap_pages,
>> opts->auxtrace_snapshot_mode,
>> @@ -755,6 +758,12 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
>> struct perf_mmap *map = &maps[i];
>>
>> if (map->base) {
>> + if (rec->opts.affinity != PERF_AFFINITY_SYS &&
>> + !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
>> + CPU_ZERO(&rec->affinity_mask);
>> + CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
>> + sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
>
> all this code depends on aio and LIBNUMA, let's keep it there then
Please note that thread migration improves performance for serial case too:
BENCH REPORT BASED ELAPSED TIME BASED
v4.20.0-rc5
(tip perf/core):
(current) SERIAL-SYS / BASE : 1.27x (14.37/11.31), 1.29x (15.19/11.69)
SERIAL-NODE / BASE : 1.15x (13.04/11.31), 1.17x (13.79/11.69)
SERIAL-CPU / BASE : 1.00x (11.32/11.31), 1.01x (11.89/11.69)
mbind() for AIO buffers is the only related adjustment.
>
> also please add this and the affinity_mask setup code below to a function
Separated the code into record__adjust_affinity() and perf_mmap__setup_affinity_mask() in v3.
Thanks,
Alexey
>
> thanks,
> jirka
>
>> + }
>> if (!record__aio_enabled(rec)) {
>> if (perf_mmap__push(map, rec, record__pushfn) != 0) {
>> rc = -1;
>> diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
>> index 60e825be944a..5ca5bb5ea0db 100644
>> --- a/tools/perf/util/evlist.c
>> +++ b/tools/perf/util/evlist.c
>> @@ -1028,7 +1028,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
>> * Its value is decided by evsel's write_backward.
>> * So &mp should not be passed through const pointer.
>> */
>> - struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
>> + struct mmap_params mp = {
>> + .nr_cblocks = nr_cblocks,
>> + .affinity = affinity,
>> + .cpu_map = cpus
>> + };
>>
>> if (!evlist->mmap)
>> evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
>> diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
>> index 742fa9a8e498..a2095e4eda4b 100644
>> --- a/tools/perf/util/mmap.c
>> +++ b/tools/perf/util/mmap.c
>> @@ -361,6 +361,7 @@ void perf_mmap__munmap(struct perf_mmap *map)
>>
>> int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
>> {
>> + int c, nr_cpus, node;
>> /*
>> * The last one will be done at perf_mmap__consume(), so that we
>> * make sure we don't prevent tools from consuming every last event in
>> @@ -389,6 +390,17 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
>> map->cpu = cpu;
>>
>> CPU_ZERO(&map->affinity_mask);
>> + if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) {
>> + nr_cpus = cpu_map__nr(mp->cpu_map);
>> + node = cpu__get_node(map->cpu);
>> + for (c = 0; c < nr_cpus; c++) {
>> + if (cpu__get_node(c) == node) {
>> + CPU_SET(c, &map->affinity_mask);
>> + }
>> + }
>> + } else if (mp->affinity == PERF_AFFINITY_CPU) {
>> + CPU_SET(map->cpu, &map->affinity_mask);
>> + }
>>
>> if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
>> &mp->auxtrace_mp, map->base, fd))
>> diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
>> index e566c19b242b..b3f724fad22e 100644
>> --- a/tools/perf/util/mmap.h
>> +++ b/tools/perf/util/mmap.h
>> @@ -72,6 +72,7 @@ enum bkw_mmap_state {
>> struct mmap_params {
>> int prot, mask, nr_cblocks, affinity;
>> struct auxtrace_mmap_params auxtrace_mp;
>> + const struct cpu_map *cpu_map;
>> };
>>
>> int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
>
^ permalink raw reply [flat|nested] 10+ messages in thread