* [PATCH v4 1/4] perf record: allocate affinity masks
2019-01-16 9:16 [PATCH v4 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
@ 2019-01-16 9:21 ` Alexey Budankov
2019-01-16 9:22 ` [PATCH v4 2/4] perf record: bind the AIO user space buffers to nodes Alexey Budankov
` (2 subsequent siblings)
3 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2019-01-16 9:21 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Allocate affinity option and masks for mmap data buffers and
record thread as well as initialize allocated objects.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
Changes in v3:
- converted PERF_AFFINITY_EOF to PERF_AFFINITY_MAX
Changes in v2:
- made debug affinity mode message user friendly
- converted affinity mode defines to enum values
---
tools/perf/builtin-record.c | 13 ++++++++++++-
tools/perf/perf.h | 8 ++++++++
tools/perf/util/evlist.c | 6 +++---
tools/perf/util/evlist.h | 2 +-
tools/perf/util/mmap.c | 2 ++
tools/perf/util/mmap.h | 3 ++-
6 files changed, 28 insertions(+), 6 deletions(-)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 882285fb9f64..e5a108b11d46 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -81,12 +81,17 @@ struct record {
bool timestamp_boundary;
struct switch_output switch_output;
unsigned long long samples;
+ cpu_set_t affinity_mask;
};
static volatile int auxtrace_record__snapshot_started;
static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
static DEFINE_TRIGGER(switch_output_trigger);
+static const char *affinity_tags[PERF_AFFINITY_MAX] = {
+ "SYS", "NODE", "CPU"
+};
+
static bool switch_output_signal(struct record *rec)
{
return rec->switch_output.signal &&
@@ -533,7 +538,8 @@ static int record__mmap_evlist(struct record *rec,
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
- opts->auxtrace_snapshot_mode, opts->nr_cblocks) < 0) {
+ opts->auxtrace_snapshot_mode,
+ opts->nr_cblocks, opts->affinity) < 0) {
if (errno == EPERM) {
pr_err("Permission error mapping pages.\n"
"Consider increasing "
@@ -1980,6 +1986,9 @@ int cmd_record(int argc, const char **argv)
# undef REASON
#endif
+ CPU_ZERO(&rec->affinity_mask);
+ rec->opts.affinity = PERF_AFFINITY_SYS;
+
rec->evlist = perf_evlist__new();
if (rec->evlist == NULL)
return -ENOMEM;
@@ -2143,6 +2152,8 @@ int cmd_record(int argc, const char **argv)
if (verbose > 0)
pr_info("nr_cblocks: %d\n", rec->opts.nr_cblocks);
+ pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
+
err = __cmd_record(&record, argc, argv);
out:
perf_evlist__delete(rec->evlist);
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 388c6dd128b8..36d5cfe6362f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -83,6 +83,14 @@ struct record_opts {
clockid_t clockid;
u64 clockid_res_ns;
int nr_cblocks;
+ int affinity;
+};
+
+enum perf_affinity {
+ PERF_AFFINITY_SYS = 0,
+ PERF_AFFINITY_NODE,
+ PERF_AFFINITY_CPU,
+ PERF_AFFINITY_MAX
};
struct option;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8c902276d4b4..08cedb643ea6 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1022,7 +1022,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
*/
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks)
+ bool auxtrace_overwrite, int nr_cblocks, int affinity)
{
struct perf_evsel *evsel;
const struct cpu_map *cpus = evlist->cpus;
@@ -1032,7 +1032,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks };
+ struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
@@ -1064,7 +1064,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages)
{
- return perf_evlist__mmap_ex(evlist, pages, 0, false, 0);
+ return perf_evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS);
}
int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 868294491194..72728d7f4432 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -162,7 +162,7 @@ unsigned long perf_event_mlock_kb_in_pages(void);
int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
unsigned int auxtrace_pages,
- bool auxtrace_overwrite, int nr_cblocks);
+ bool auxtrace_overwrite, int nr_cblocks, int affinity);
int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages);
void perf_evlist__munmap(struct perf_evlist *evlist);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 8fc39311a30d..e68ba754a8e2 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -343,6 +343,8 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->fd = fd;
map->cpu = cpu;
+ CPU_ZERO(&map->affinity_mask);
+
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
return -1;
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index aeb6942fdb00..e566c19b242b 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -38,6 +38,7 @@ struct perf_mmap {
int nr_cblocks;
} aio;
#endif
+ cpu_set_t affinity_mask;
};
/*
@@ -69,7 +70,7 @@ enum bkw_mmap_state {
};
struct mmap_params {
- int prot, mask, nr_cblocks;
+ int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
};
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v4 2/4] perf record: bind the AIO user space buffers to nodes
2019-01-16 9:16 [PATCH v4 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
2019-01-16 9:21 ` [PATCH v4 1/4] perf record: allocate affinity masks Alexey Budankov
@ 2019-01-16 9:22 ` Alexey Budankov
2019-01-16 9:23 ` [PATCH v4 3/4] perf record: apply affinity masks when reading mmap buffers Alexey Budankov
2019-01-16 9:23 ` [PATCH v4 4/4] perf record: implement --affinity=node|cpu option Alexey Budankov
3 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2019-01-16 9:22 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Allocate and bind AIO user space buffers to the memory nodes
that mmap kernel buffers are bound to.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
Changes in v4:
- fixed compilation issue converting pr_warn() to pr_warning()
- implemented stop if mbind() fails
Changes in v3:
- corrected code style issues
- adjusted __aio_alloc,__aio_bind,__aio_free() implementation
Changes in v2:
- implemented perf_mmap__aio_alloc, perf_mmap__aio_free, perf_mmap__aio_bind
and put HAVE_LIBNUMA_SUPPORT #ifdefs in there
---
tools/perf/util/mmap.c | 77 +++++++++++++++++++++++++++++++++++++++---
1 file changed, 73 insertions(+), 4 deletions(-)
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index e68ba754a8e2..34be9f900575 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -10,6 +10,9 @@
#include <sys/mman.h>
#include <inttypes.h>
#include <asm/bug.h>
+#ifdef HAVE_LIBNUMA_SUPPORT
+#include <numaif.h>
+#endif
#include "debug.h"
#include "event.h"
#include "mmap.h"
@@ -154,9 +157,72 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb
}
#ifdef HAVE_AIO_SUPPORT
+
+#ifdef HAVE_LIBNUMA_SUPPORT
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int index)
+{
+ map->aio.data[index] = mmap(NULL, perf_mmap__mmap_len(map), PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
+ if (map->aio.data[index] == MAP_FAILED) {
+ map->aio.data[index] = NULL;
+ return -1;
+ }
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int index)
+{
+ if (map->aio.data[index]) {
+ munmap(map->aio.data[index], perf_mmap__mmap_len(map));
+ map->aio.data[index] = NULL;
+ }
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map, int index, int cpu, int affinity)
+{
+ void *data;
+ size_t mmap_len;
+ unsigned long node_mask;
+
+ if (affinity != PERF_AFFINITY_SYS && cpu__max_node() > 1) {
+ data = map->aio.data[index];
+ mmap_len = perf_mmap__mmap_len(map);
+ node_mask = 1UL << cpu__get_node(cpu);
+ if (mbind(data, mmap_len, MPOL_BIND, &node_mask, 1, 0)) {
+ pr_err("Failed to bind [%p-%p] AIO buffer to node %d: error %m\n",
+ data, data + mmap_len, cpu__get_node(cpu));
+ return -1;
+ }
+ }
+
+ return 0;
+}
+#else
+static int perf_mmap__aio_alloc(struct perf_mmap *map, int index)
+{
+ map->aio.data[index] = malloc(perf_mmap__mmap_len(map));
+ if (map->aio.data[index] == NULL)
+ return -1;
+
+ return 0;
+}
+
+static void perf_mmap__aio_free(struct perf_mmap *map, int index)
+{
+ zfree(&(map->aio.data[index]));
+}
+
+static int perf_mmap__aio_bind(struct perf_mmap *map __maybe_unused, int index __maybe_unused,
+ int cpu __maybe_unused, int affinity __maybe_unused)
+{
+ return 0;
+}
+#endif
+
static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
{
- int delta_max, i, prio;
+ int delta_max, i, prio, ret;
map->aio.nr_cblocks = mp->nr_cblocks;
if (map->aio.nr_cblocks) {
@@ -177,11 +243,14 @@ static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp)
}
delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX);
for (i = 0; i < map->aio.nr_cblocks; ++i) {
- map->aio.data[i] = malloc(perf_mmap__mmap_len(map));
- if (!map->aio.data[i]) {
+ ret = perf_mmap__aio_alloc(map, i);
+ if (ret == -1) {
pr_debug2("failed to allocate data buffer area, error %m");
return -1;
}
+ ret = perf_mmap__aio_bind(map, i, map->cpu, mp->affinity);
+ if (ret == -1)
+ return -1;
/*
* Use cblock.aio_fildes value different from -1
* to denote started aio write operation on the
@@ -210,7 +279,7 @@ static void perf_mmap__aio_munmap(struct perf_mmap *map)
int i;
for (i = 0; i < map->aio.nr_cblocks; ++i)
- zfree(&map->aio.data[i]);
+ perf_mmap__aio_free(map, i);
if (map->aio.data)
zfree(&map->aio.data);
zfree(&map->aio.cblocks);
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH v4 3/4] perf record: apply affinity masks when reading mmap buffers
2019-01-16 9:16 [PATCH v4 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
2019-01-16 9:21 ` [PATCH v4 1/4] perf record: allocate affinity masks Alexey Budankov
2019-01-16 9:22 ` [PATCH v4 2/4] perf record: bind the AIO user space buffers to nodes Alexey Budankov
@ 2019-01-16 9:23 ` Alexey Budankov
2019-01-21 11:54 ` Jiri Olsa
2019-01-16 9:23 ` [PATCH v4 4/4] perf record: implement --affinity=node|cpu option Alexey Budankov
3 siblings, 1 reply; 10+ messages in thread
From: Alexey Budankov @ 2019-01-16 9:23 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Build node cpu masks for mmap data buffers. Apply node cpu
masks to tool thread every time it references data buffers
cross node or cross cpu.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
Changes in v4:
- corrected mmap_params->cpu_map initialization to be based on /sys/devices/system/cpu/online
- separated node cpu map generation into build_node_mask()
Changes in v3:
- separated mask manipulations into __adjust_affinity() and __setup_affinity_mask()
- implemented mapping of c index into online cpu index
Changes in v2:
- separated AIO buffers binding to patch 2/4
---
tools/perf/builtin-record.c | 14 ++++++++++++++
tools/perf/util/evlist.c | 6 +++++-
tools/perf/util/mmap.c | 23 ++++++++++++++++++++++-
tools/perf/util/mmap.h | 1 +
4 files changed, 42 insertions(+), 2 deletions(-)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index e5a108b11d46..553c2fabf3c1 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
+ if (opts->affinity != PERF_AFFINITY_SYS)
+ cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode,
@@ -728,6 +731,16 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
+static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
+{
+ if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+ !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
+ CPU_ZERO(&rec->affinity_mask);
+ CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
+ sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
+ }
+}
+
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
bool overwrite)
{
@@ -755,6 +768,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct perf_mmap *map = &maps[i];
if (map->base) {
+ record__adjust_affinity(rec, map);
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) != 0) {
rc = -1;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 08cedb643ea6..178d3280ba62 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1032,7 +1032,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
+ struct mmap_params mp = {
+ .nr_cblocks = nr_cblocks,
+ .affinity = affinity,
+ .cpu_map = cpu_map__new(NULL) /* from /sys/devices/system/cpu/online */
+ };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 34be9f900575..8ff2f1c243a2 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -383,6 +383,27 @@ void perf_mmap__munmap(struct perf_mmap *map)
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
+static void build_node_mask(const struct cpu_map *cpu_map, int node, cpu_set_t *mask)
+{
+ int c, cpu, nr_cpus;
+
+ nr_cpus = cpu_map__nr(cpu_map);
+ for (c = 0; c < nr_cpus; c++) {
+ cpu = cpu_map->map[c]; /* map c index to online cpu index */
+ if (cpu__get_node(cpu) == node)
+ CPU_SET(cpu, mask);
+ }
+}
+
+static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
+{
+ CPU_ZERO(&map->affinity_mask);
+ if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1 && mp->cpu_map)
+ build_node_mask(mp->cpu_map, cpu__get_node(map->cpu), &map->affinity_mask);
+ else if (mp->affinity == PERF_AFFINITY_CPU)
+ CPU_SET(map->cpu, &map->affinity_mask);
+}
+
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
@@ -412,7 +433,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->fd = fd;
map->cpu = cpu;
- CPU_ZERO(&map->affinity_mask);
+ perf_mmap__setup_affinity_mask(map, mp);
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e566c19b242b..b3f724fad22e 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -72,6 +72,7 @@ enum bkw_mmap_state {
struct mmap_params {
int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
+ const struct cpu_map *cpu_map;
};
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v4 3/4] perf record: apply affinity masks when reading mmap buffers
2019-01-16 9:23 ` [PATCH v4 3/4] perf record: apply affinity masks when reading mmap buffers Alexey Budankov
@ 2019-01-21 11:54 ` Jiri Olsa
2019-01-22 12:55 ` Alexey Budankov
0 siblings, 1 reply; 10+ messages in thread
From: Jiri Olsa @ 2019-01-21 11:54 UTC (permalink / raw)
To: Alexey Budankov
Cc: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra,
Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
On Wed, Jan 16, 2019 at 12:23:05PM +0300, Alexey Budankov wrote:
SNIP
> static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
> bool overwrite)
> {
> @@ -755,6 +768,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
> struct perf_mmap *map = &maps[i];
>
> if (map->base) {
> + record__adjust_affinity(rec, map);
> if (!record__aio_enabled(rec)) {
> if (perf_mmap__push(map, rec, record__pushfn) != 0) {
> rc = -1;
> diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
> index 08cedb643ea6..178d3280ba62 100644
> --- a/tools/perf/util/evlist.c
> +++ b/tools/perf/util/evlist.c
> @@ -1032,7 +1032,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
> * Its value is decided by evsel's write_backward.
> * So &mp should not be passed through const pointer.
> */
> - struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
> + struct mmap_params mp = {
> + .nr_cblocks = nr_cblocks,
> + .affinity = affinity,
> + .cpu_map = cpu_map__new(NULL) /* from /sys/devices/system/cpu/online */
> + };
cpu_map won't get released.. if there's no better solution,
at least we could have it as static in build_node_mask..
this way it will be created only once
jirka
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v4 3/4] perf record: apply affinity masks when reading mmap buffers
2019-01-21 11:54 ` Jiri Olsa
@ 2019-01-22 12:55 ` Alexey Budankov
0 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2019-01-22 12:55 UTC (permalink / raw)
To: Jiri Olsa
Cc: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra,
Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Hi,
On 21.01.2019 14:54, Jiri Olsa wrote:
> On Wed, Jan 16, 2019 at 12:23:05PM +0300, Alexey Budankov wrote:
>
> SNIP
>
>> static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
>> bool overwrite)
>> {
>> @@ -755,6 +768,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
>> struct perf_mmap *map = &maps[i];
>>
>> if (map->base) {
>> + record__adjust_affinity(rec, map);
>> if (!record__aio_enabled(rec)) {
>> if (perf_mmap__push(map, rec, record__pushfn) != 0) {
>> rc = -1;
>> diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
>> index 08cedb643ea6..178d3280ba62 100644
>> --- a/tools/perf/util/evlist.c
>> +++ b/tools/perf/util/evlist.c
>> @@ -1032,7 +1032,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
>> * Its value is decided by evsel's write_backward.
>> * So &mp should not be passed through const pointer.
>> */
>> - struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
>> + struct mmap_params mp = {
>> + .nr_cblocks = nr_cblocks,
>> + .affinity = affinity,
>> + .cpu_map = cpu_map__new(NULL) /* from /sys/devices/system/cpu/online */
>> + };
>
> cpu_map won't get released.. if there's no better solution,
> at least we could have it as static in build_node_mask..
> this way it will be created only once
nice catch, thanks!! moved the static into a separate cpu_map__online()
in cpumap.c and that simplified the changes a bit.
Alexey
>
> jirka
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH v4 4/4] perf record: implement --affinity=node|cpu option
2019-01-16 9:16 [PATCH v4 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
` (2 preceding siblings ...)
2019-01-16 9:23 ` [PATCH v4 3/4] perf record: apply affinity masks when reading mmap buffers Alexey Budankov
@ 2019-01-16 9:23 ` Alexey Budankov
2019-01-21 11:54 ` Jiri Olsa
3 siblings, 1 reply; 10+ messages in thread
From: Alexey Budankov @ 2019-01-16 9:23 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Implement --affinity=node|cpu option for the record mode defaulting
to system affinity mask bouncing.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
changes in v3:
- adjusted indentation at record__parse_affinity()
---
tools/perf/Documentation/perf-record.txt | 5 +++++
tools/perf/builtin-record.c | 20 ++++++++++++++++++++
2 files changed, 25 insertions(+)
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index d232b13ea713..efb839784f32 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -440,6 +440,11 @@ Use <n> control blocks in asynchronous (Posix AIO) trace writing mode (default:
Asynchronous mode is supported only when linking Perf tool with libc library
providing implementation for Posix AIO API.
+--affinity=mode::
+Set affinity mask of trace reading thread according to the policy defined by 'mode' value:
+ node - thread affinity mask is set to NUMA node cpu mask of the processed mmap buffer
+ cpu - thread affinity mask is set to cpu of the processed mmap buffer
+
--all-kernel::
Configure all used events to run in kernel space.
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 553c2fabf3c1..94a966ba9a6f 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1659,6 +1659,23 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
return -1;
}
+static int record__parse_affinity(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = (struct record_opts *)opt->value;
+
+ if (unset)
+ return 0;
+
+ if (str) {
+ if (!strcasecmp(str, "node"))
+ opts->affinity = PERF_AFFINITY_NODE;
+ else if (!strcasecmp(str, "cpu"))
+ opts->affinity = PERF_AFFINITY_CPU;
+ }
+
+ return 0;
+}
+
static int record__parse_mmap_pages(const struct option *opt,
const char *str,
int unset __maybe_unused)
@@ -1966,6 +1983,9 @@ static struct option __record_options[] = {
&nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
record__aio_parse),
#endif
+ OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
+ "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
+ record__parse_affinity),
OPT_END()
};
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v4 4/4] perf record: implement --affinity=node|cpu option
2019-01-16 9:23 ` [PATCH v4 4/4] perf record: implement --affinity=node|cpu option Alexey Budankov
@ 2019-01-21 11:54 ` Jiri Olsa
2019-01-22 12:55 ` Alexey Budankov
0 siblings, 1 reply; 10+ messages in thread
From: Jiri Olsa @ 2019-01-21 11:54 UTC (permalink / raw)
To: Alexey Budankov
Cc: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra,
Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
On Wed, Jan 16, 2019 at 12:23:56PM +0300, Alexey Budankov wrote:
>
> Implement --affinity=node|cpu option for the record mode defaulting
> to system affinity mask bouncing.
>
> Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
> ---
> changes in v3:
> - adjusted indentation at record__parse_affinity()
having problems to apply this one:
Applying: perf record: allocate affinity masks
Applying: perf record: bind the AIO user space buffers to nodes
Applying: perf record: apply affinity masks when reading mmap buffers
Applying: perf record: implement --affinity=node|cpu option
error: corrupt patch at line 62
Patch failed at 0004 perf record: implement --affinity=node|cpu option
jirka
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v4 4/4] perf record: implement --affinity=node|cpu option
2019-01-21 11:54 ` Jiri Olsa
@ 2019-01-22 12:55 ` Alexey Budankov
0 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2019-01-22 12:55 UTC (permalink / raw)
To: Jiri Olsa
Cc: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra,
Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
On 21.01.2019 14:54, Jiri Olsa wrote:
> On Wed, Jan 16, 2019 at 12:23:56PM +0300, Alexey Budankov wrote:
>>
>> Implement --affinity=node|cpu option for the record mode defaulting
>> to system affinity mask bouncing.
>>
>> Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
>> ---
>> changes in v3:
>> - adjusted indentation at record__parse_affinity()
>
> having problems to apply this one:
>
> Applying: perf record: allocate affinity masks
> Applying: perf record: bind the AIO user space buffers to nodes
> Applying: perf record: apply affinity masks when reading mmap buffers
> Applying: perf record: implement --affinity=node|cpu option
> error: corrupt patch at line 62
> Patch failed at 0004 perf record: implement --affinity=node|cpu option
It looks like Arnaldo's perf/core is progressing fast.
Currently I have this thing when building clean Arnaldo's perf/core:
CC tests/hists_output.o
In file included from util/session.c:26:
util/sample-raw.h:9:6: error: redundant redeclaration of perf_evlist__s390_sample_raw [-Werror=redundant-decls]
void perf_evlist__s390_sample_raw(struct perf_evlist *evlist,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~
In file included from util/session.c:13:
util/evlist.h:321:6: note: previous declaration of perf_evlist__s390_sample_raw was here
void perf_evlist__s390_sample_raw(struct perf_evlist *evlist, union perf_event *event,
^~~~~~~~~~~~~~~~~~~~~~~~~~~~
CC tests/hists_cumulate.o
CC tests/python-use.o
CC util/s390-sample-raw.o
Thanks,
Alexey
>
>
> jirka
>
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH v4 3/4] perf record: apply affinity masks when reading mmap buffers
2019-01-10 10:46 [PATCH v4 0/4] Reduce NUMA related overhead in perf record profiling on large server systems Alexey Budankov
@ 2019-01-10 11:24 ` Alexey Budankov
0 siblings, 0 replies; 10+ messages in thread
From: Alexey Budankov @ 2019-01-10 11:24 UTC (permalink / raw)
To: Arnaldo Carvalho de Melo, Ingo Molnar, Peter Zijlstra
Cc: Jiri Olsa, Namhyung Kim, Alexander Shishkin, Andi Kleen, linux-kernel
Build node cpu masks for mmap data buffers. Apply node cpu
masks to tool thread every time it references data buffers
cross node or cross cpu.
Signed-off-by: Alexey Budankov <alexey.budankov@linux.intel.com>
---
Changes in v4:
- corrected mmap_params->cpu_map initialization to be based on /sys/devices/system/cpu/online
- separated node cpu map generation into build_node_mask()
Changes in v3:
- separated mask manipulations into __adjust_affinity() and __setup_affinity_mask()
- implemented mapping of c index into online cpu index
Changes in v2:
- separated AIO buffers binding to patch 2/4
---
tools/perf/builtin-record.c | 14 ++++++++++++++
tools/perf/util/evlist.c | 6 +++++-
tools/perf/util/mmap.c | 23 ++++++++++++++++++++++-
tools/perf/util/mmap.h | 1 +
4 files changed, 42 insertions(+), 2 deletions(-)
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index e5a108b11d46..553c2fabf3c1 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -536,6 +536,9 @@ static int record__mmap_evlist(struct record *rec,
struct record_opts *opts = &rec->opts;
char msg[512];
+ if (opts->affinity != PERF_AFFINITY_SYS)
+ cpu__setup_cpunode_map();
+
if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
opts->auxtrace_mmap_pages,
opts->auxtrace_snapshot_mode,
@@ -728,6 +731,16 @@ static struct perf_event_header finished_round_event = {
.type = PERF_RECORD_FINISHED_ROUND,
};
+static void record__adjust_affinity(struct record *rec, struct perf_mmap *map)
+{
+ if (rec->opts.affinity != PERF_AFFINITY_SYS &&
+ !CPU_EQUAL(&rec->affinity_mask, &map->affinity_mask)) {
+ CPU_ZERO(&rec->affinity_mask);
+ CPU_OR(&rec->affinity_mask, &rec->affinity_mask, &map->affinity_mask);
+ sched_setaffinity(0, sizeof(rec->affinity_mask), &rec->affinity_mask);
+ }
+}
+
static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
bool overwrite)
{
@@ -755,6 +768,7 @@ static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evli
struct perf_mmap *map = &maps[i];
if (map->base) {
+ record__adjust_affinity(rec, map);
if (!record__aio_enabled(rec)) {
if (perf_mmap__push(map, rec, record__pushfn) != 0) {
rc = -1;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 08cedb643ea6..178d3280ba62 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1032,7 +1032,11 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
* Its value is decided by evsel's write_backward.
* So &mp should not be passed through const pointer.
*/
- struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity };
+ struct mmap_params mp = {
+ .nr_cblocks = nr_cblocks,
+ .affinity = affinity,
+ .cpu_map = cpu_map__new(NULL) /* from /sys/devices/system/cpu/online */
+ };
if (!evlist->mmap)
evlist->mmap = perf_evlist__alloc_mmap(evlist, false);
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c
index 34be9f900575..8ff2f1c243a2 100644
--- a/tools/perf/util/mmap.c
+++ b/tools/perf/util/mmap.c
@@ -383,6 +383,27 @@ void perf_mmap__munmap(struct perf_mmap *map)
auxtrace_mmap__munmap(&map->auxtrace_mmap);
}
+static void build_node_mask(const struct cpu_map *cpu_map, int node, cpu_set_t *mask)
+{
+ int c, cpu, nr_cpus;
+
+ nr_cpus = cpu_map__nr(cpu_map);
+ for (c = 0; c < nr_cpus; c++) {
+ cpu = cpu_map->map[c]; /* map c index to online cpu index */
+ if (cpu__get_node(cpu) == node)
+ CPU_SET(cpu, mask);
+ }
+}
+
+static void perf_mmap__setup_affinity_mask(struct perf_mmap *map, struct mmap_params *mp)
+{
+ CPU_ZERO(&map->affinity_mask);
+ if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1 && mp->cpu_map)
+ build_node_mask(mp->cpu_map, cpu__get_node(map->cpu), &map->affinity_mask);
+ else if (mp->affinity == PERF_AFFINITY_CPU)
+ CPU_SET(map->cpu, &map->affinity_mask);
+}
+
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu)
{
/*
@@ -412,7 +433,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c
map->fd = fd;
map->cpu = cpu;
- CPU_ZERO(&map->affinity_mask);
+ perf_mmap__setup_affinity_mask(map, mp);
if (auxtrace_mmap__mmap(&map->auxtrace_mmap,
&mp->auxtrace_mp, map->base, fd))
diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h
index e566c19b242b..b3f724fad22e 100644
--- a/tools/perf/util/mmap.h
+++ b/tools/perf/util/mmap.h
@@ -72,6 +72,7 @@ enum bkw_mmap_state {
struct mmap_params {
int prot, mask, nr_cblocks, affinity;
struct auxtrace_mmap_params auxtrace_mp;
+ const struct cpu_map *cpu_map;
};
int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
^ permalink raw reply related [flat|nested] 10+ messages in thread