From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 607BCC47088 for ; Wed, 26 May 2021 10:53:24 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 3BD31613F4 for ; Wed, 26 May 2021 10:53:24 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234259AbhEZKyy (ORCPT ); Wed, 26 May 2021 06:54:54 -0400 Received: from mga02.intel.com ([134.134.136.20]:1577 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234251AbhEZKyv (ORCPT ); Wed, 26 May 2021 06:54:51 -0400 IronPort-SDR: aAo98NMcGEpPVDfaZ2Rah651857Ux/SUCrzJMrgwLofAo7O4P22k/cNVxgGySjENUPZWa/JET9 NUrCxOjn2TAg== X-IronPort-AV: E=McAfee;i="6200,9189,9995"; a="189556392" X-IronPort-AV: E=Sophos;i="5.82,331,1613462400"; d="scan'208";a="189556392" Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 26 May 2021 03:53:20 -0700 IronPort-SDR: LSGD0GC3YzXbX71USt1LSaWyRRhlaqH4Z8o5Zv5ui4W+B6ga3yW9wRhJIOR/ifg4/spmOfI7tr AQ196rzUw1Dg== X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.82,331,1613462400"; d="scan'208";a="436079107" Received: from nntpat99-84.inn.intel.com ([10.125.99.84]) by orsmga007.jf.intel.com with ESMTP; 26 May 2021 03:53:17 -0700 From: Alexey Bayduraev To: Arnaldo Carvalho de Melo Cc: Jiri Olsa , Namhyung Kim , Alexander Shishkin , Peter Zijlstra , Ingo Molnar , linux-kernel , Andi Kleen , Adrian Hunter , Alexander Antonov , Alexei Budankov Subject: [PATCH v6 01/20] perf record: Introduce thread affinity and mmap masks Date: Wed, 26 May 2021 13:52:54 +0300 Message-Id: <2f7297a4eaf5acb632a71b48e85dd18e3dd765b1.1622025774.git.alexey.v.bayduraev@linux.intel.com> X-Mailer: git-send-email 2.19.0 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Introduce affinity and mmap thread masks. Thread affinity mask defines cpus that a thread is allowed to run on. Thread maps mask defines mmap data buffers the thread serves to stream profiling data from. Acked-by: Andi Kleen Signed-off-by: Alexey Bayduraev --- tools/perf/builtin-record.c | 127 ++++++++++++++++++++++++++++++++++++ 1 file changed, 127 insertions(+) diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index bc3dd379eb67..bad8c3dae068 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -87,6 +87,11 @@ struct switch_output { int cur_file; }; +struct thread_mask { + struct mmap_cpu_mask maps; + struct mmap_cpu_mask affinity; +}; + struct record { struct perf_tool tool; struct record_opts opts; @@ -111,6 +116,8 @@ struct record { unsigned long long samples; struct mmap_cpu_mask affinity_mask; unsigned long output_max_size; /* = 0: unlimited */ + struct thread_mask *thread_masks; + int nr_threads; }; static volatile int done; @@ -2216,6 +2223,55 @@ static int record__parse_affinity(const struct option *opt, const char *str, int return 0; } +static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits) +{ + mask->nbits = nr_bits; + mask->bits = bitmap_alloc(mask->nbits); + if (!mask->bits) { + pr_err("Failed to allocate mmap_cpu mask\n"); + return -ENOMEM; + } + + return 0; +} + +static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask) +{ + bitmap_free(mask->bits); + mask->nbits = 0; +} + +static void record__thread_mask_clear(struct thread_mask *mask) +{ + bitmap_zero(mask->maps.bits, mask->maps.nbits); + bitmap_zero(mask->affinity.bits, mask->affinity.nbits); +} + +static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits) +{ + int ret; + + ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits); + if (ret) { + mask->affinity.bits = NULL; + return ret; + } + + ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits); + if (ret) { + record__mmap_cpu_mask_free(&mask->maps); + mask->maps.bits = NULL; + } + + return ret; +} + +static void record__thread_mask_free(struct thread_mask *mask) +{ + record__mmap_cpu_mask_free(&mask->maps); + record__mmap_cpu_mask_free(&mask->affinity); +} + static int parse_output_max_size(const struct option *opt, const char *str, int unset) { @@ -2664,6 +2720,70 @@ static struct option __record_options[] = { struct option *record_options = __record_options; +static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus) +{ + int c; + + for (c = 0; c < cpus->nr; c++) + set_bit(cpus->map[c], mask->bits); +} + +static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits) +{ + int t, ret; + + rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks))); + if (!rec->thread_masks) { + pr_err("Failed to allocate thread masks\n"); + return -ENOMEM; + } + + for (t = 0; t < nr_threads; t++) { + ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits); + if (ret) + return ret; + record__thread_mask_clear(&rec->thread_masks[t]); + } + + return 0; +} +static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus) +{ + int ret; + + ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu()); + if (ret) + return ret; + + record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus); + + rec->nr_threads = 1; + + return 0; +} + +static int record__init_thread_masks(struct record *rec) +{ + struct perf_cpu_map *cpus = rec->evlist->core.cpus; + + return record__init_thread_default_masks(rec, cpus); +} + +static int record__fini_thread_masks(struct record *rec) +{ + int t; + + if (rec->thread_masks) + for (t = 0; t < rec->nr_threads; t++) + record__thread_mask_free(&rec->thread_masks[t]); + + zfree(&rec->thread_masks); + + rec->nr_threads = 0; + + return 0; +} + int cmd_record(int argc, const char **argv) { int err; @@ -2906,6 +3026,12 @@ int cmd_record(int argc, const char **argv) goto out; } + err = record__init_thread_masks(rec); + if (err) { + pr_err("record__init_thread_masks failed, error %d\n", err); + goto out; + } + if (rec->opts.nr_cblocks > nr_cblocks_max) rec->opts.nr_cblocks = nr_cblocks_max; pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks); @@ -2924,6 +3050,7 @@ int cmd_record(int argc, const char **argv) symbol__exit(); auxtrace_record__free(rec->itr); out_opts: + record__fini_thread_masks(rec); evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close); return err; } -- 2.19.0