linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Riccardo Mancini <rickyman7@gmail.com>
To: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Ian Rogers <irogers@google.com>,
	Namhyung Kim <namhyung@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	Mark Rutland <mark.rutland@arm.com>, Jiri Olsa <jolsa@redhat.com>,
	linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org,
	Riccardo Mancini <rickyman7@gmail.com>
Subject: [RFC PATCH v1 08/37] perf workqueue: add method to execute work on specific CPU
Date: Sat, 21 Aug 2021 11:19:14 +0200	[thread overview]
Message-ID: <fc51bfc206cd5976eec14541fc5ee50ce6788818.1629490974.git.rickyman7@gmail.com> (raw)
In-Reply-To: <cover.1629490974.git.rickyman7@gmail.com>

This patch adds the possibility to schedule a work item on a specific
CPU.
There are 2 possibilities:
 - threads are pinned to a CPU using the new functions
   workqueue_set_affinity_cpu and workqueue_set_affinities_cpu
 - no thread is pinned to the requested cpu. In this case, affinity will
   be set before (and cleared after) executing the work.

Signed-off-by: Riccardo Mancini <rickyman7@gmail.com>
---
 tools/perf/util/workqueue/workqueue.c | 133 +++++++++++++++++++++++++-
 tools/perf/util/workqueue/workqueue.h |  12 +++
 2 files changed, 144 insertions(+), 1 deletion(-)

diff --git a/tools/perf/util/workqueue/workqueue.c b/tools/perf/util/workqueue/workqueue.c
index 61f1b6c41deba031..650170a6a11f56bd 100644
--- a/tools/perf/util/workqueue/workqueue.c
+++ b/tools/perf/util/workqueue/workqueue.c
@@ -10,9 +10,12 @@
 #include <linux/string.h>
 #include <linux/zalloc.h>
 #include <linux/kernel.h>
+#include <linux/bitmap.h>
 #include "debug.h"
 #include <internal/lib.h>
 #include "workqueue.h"
+#include <perf/cpumap.h>
+#include "util/affinity.h"
 
 struct workqueue_struct *global_wq;
 
@@ -43,6 +46,10 @@ struct workqueue_struct {
 	struct worker		**workers;	/* array of all workers */
 	struct worker		*next_worker;	/* next worker to choose (round robin) */
 	int			first_stopped_worker; /* next worker to start if needed */
+	struct {
+		int		*map;		/* maps cpu to thread idx */
+		int		size;		/* size of the map array */
+	} cpu_to_tidx_map;
 };
 
 static const char * const workqueue_errno_str[] = {
@@ -429,6 +436,7 @@ static void worker_thread(int tidx, struct task_struct *task)
 struct workqueue_struct *create_workqueue(int nr_threads)
 {
 	int ret, err = 0;
+	int nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
 	struct workqueue_struct *wq = zalloc(sizeof(struct workqueue_struct));
 
 	if (!wq) {
@@ -449,10 +457,18 @@ struct workqueue_struct *create_workqueue(int nr_threads)
 		goto out_delete_pool;
 	}
 
+	wq->cpu_to_tidx_map.size = nr_cpus;
+	wq->cpu_to_tidx_map.map = calloc(nr_cpus, sizeof(*wq->cpu_to_tidx_map.map));
+	if (!wq->workers) {
+		err = -ENOMEM;
+		goto out_free_workers;
+	}
+	memset(wq->cpu_to_tidx_map.map, -1, nr_cpus * sizeof(*wq->cpu_to_tidx_map.map));
+
 	ret = pthread_mutex_init(&wq->lock, NULL);
 	if (ret) {
 		err = -ret;
-		goto out_free_workers;
+		goto out_free_cpu_to_idx_map;
 	}
 
 	ret = pthread_cond_init(&wq->idle_cond, NULL);
@@ -494,6 +510,8 @@ struct workqueue_struct *create_workqueue(int nr_threads)
 	pthread_mutex_destroy(&wq->lock);
 out_free_workers:
 	free(wq->workers);
+out_free_cpu_to_idx_map:
+	free(wq->cpu_to_tidx_map.map);
 out_delete_pool:
 	threadpool__delete(wq->pool);
 out_free_wq:
@@ -552,6 +570,7 @@ int destroy_workqueue(struct workqueue_struct *wq)
 	wq->msg_pipe[1] = -1;
 
 	zfree(&wq->workers);
+	zfree(&wq->cpu_to_tidx_map.map);
 	free(wq);
 	return err;
 }
@@ -779,6 +798,118 @@ int workqueue_set_affinity(struct workqueue_struct *wq, int tidx,
 	return wq->pool_errno ? -WORKQUEUE_ERROR__POOLAFFINITY : 0;
 }
 
+/**
+ * workqueue_set_affinity_cpu - set affinity to @cpu to thread @tidx in @wq->pool
+ *
+ * If cpu is -1, then affinity is set to all online processors.
+ */
+int workqueue_set_affinity_cpu(struct workqueue_struct *wq, int tidx, int cpu)
+{
+	struct mmap_cpu_mask affinity;
+	int i, err;
+
+	if (cpu >= 0)
+		affinity.nbits = cpu+1;
+	else
+		affinity.nbits = wq->cpu_to_tidx_map.size;
+
+	affinity.bits = bitmap_alloc(affinity.nbits);
+	if (!affinity.bits) {
+		pr_debug2("Failed allocation of bitmapset\n");
+		return -ENOMEM;
+	}
+
+	if (cpu >= 0)
+		test_and_set_bit(cpu, affinity.bits);
+	else
+		bitmap_fill(affinity.bits, affinity.nbits);
+
+	err = workqueue_set_affinity(wq, tidx, &affinity);
+	if (err)
+		goto out;
+
+	// find and unset this thread from the map
+	for (i = 0; i < wq->cpu_to_tidx_map.size; i++) {
+		if (wq->cpu_to_tidx_map.map[i] == tidx)
+			wq->cpu_to_tidx_map.map[i] = -1;
+	}
+
+	if (cpu >= 0)
+		wq->cpu_to_tidx_map.map[cpu] = tidx;
+
+out:
+	bitmap_free(affinity.bits);
+	return err;
+}
+
+/**
+ * workqueue_set_affinities_cpu - set single-cpu affinities to all threads in @wq->pool
+ */
+int workqueue_set_affinities_cpu(struct workqueue_struct *wq,
+				struct perf_cpu_map *cpus)
+{
+	int cpu, idx, err;
+
+	if (perf_cpu_map__nr(cpus) > threadpool__size(wq->pool))
+		return -EINVAL;
+
+
+	perf_cpu_map__for_each_cpu(cpu, idx, cpus) {
+		err = workqueue_set_affinity_cpu(wq, idx, cpu);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+struct cpu_bound_work {
+	struct work_struct work;
+	int cpu;
+	struct work_struct *original_work;
+};
+
+static void set_affinity_and_execute(struct work_struct *work)
+{
+	struct cpu_bound_work *cpu_bound_work = container_of(work, struct cpu_bound_work, work);
+	struct affinity affinity;
+
+	if (affinity__setup(&affinity) < 0)
+		goto out;
+
+	affinity__set(&affinity, cpu_bound_work->cpu);
+	cpu_bound_work->original_work->func(cpu_bound_work->original_work);
+	affinity__cleanup(&affinity);
+
+out:
+	free(cpu_bound_work);
+}
+
+/**
+ * queue_work_on - execute @work on @cpu
+ *
+ * The work is assigned to the worker pinned to @cpu, if any.
+ * Otherwise, affinity is set before running the work and unset after.
+ */
+int queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
+{
+	struct cpu_bound_work *cpu_bound_work;
+	int tidx = wq->cpu_to_tidx_map.map[cpu];
+
+	if (tidx >= 0)
+		return queue_work_on_worker(tidx, wq, work);
+
+	cpu_bound_work = malloc(sizeof(*cpu_bound_work));
+	if (!cpu_bound_work)
+		return -ENOMEM;
+
+	init_work(&cpu_bound_work->work);
+	cpu_bound_work->work.func = set_affinity_and_execute;
+	cpu_bound_work->cpu = cpu;
+	cpu_bound_work->original_work = work;
+	return queue_work(wq, &cpu_bound_work->work);
+}
+
 /**
  * init_work - initialize the @work struct
  */
diff --git a/tools/perf/util/workqueue/workqueue.h b/tools/perf/util/workqueue/workqueue.h
index dc6baee138b22ab2..a91a37e367b62d02 100644
--- a/tools/perf/util/workqueue/workqueue.h
+++ b/tools/perf/util/workqueue/workqueue.h
@@ -25,6 +25,7 @@ extern int workqueue_nr_threads(struct workqueue_struct *wq);
 
 extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
 extern int queue_work_on_worker(int tidx, struct workqueue_struct *wq, struct work_struct *work);
+extern int queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work);
 
 extern int flush_workqueue(struct workqueue_struct *wq);
 
@@ -32,6 +33,9 @@ extern int workqueue_set_affinities(struct workqueue_struct *wq,
 				struct mmap_cpu_mask *affinities);
 extern int workqueue_set_affinity(struct workqueue_struct *wq, int tidx,
 				struct mmap_cpu_mask *affinity);
+extern int workqueue_set_affinity_cpu(struct workqueue_struct *wq, int tidx, int cpu);
+extern int workqueue_set_affinities_cpu(struct workqueue_struct *wq,
+					struct perf_cpu_map *cpus);
 
 extern void init_work(struct work_struct *work);
 
@@ -82,6 +86,14 @@ static inline int schedule_work_on_worker(int tidx, struct work_struct *work)
 	return queue_work_on_worker(tidx, global_wq, work);
 }
 
+/**
+ * schedule_work_on - queue @work to be executed on @cpu by global_wq
+ */
+static inline int schedule_work_on(int cpu, struct work_struct *work)
+{
+	return queue_work_on(cpu, global_wq, work);
+}
+
 /**
  * flush_scheduled_work - ensure that any scheduled work in global_wq has run to completion
  */
-- 
2.31.1


  parent reply	other threads:[~2021-08-21  9:20 UTC|newest]

Thread overview: 63+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-21  9:19 [RFC PATCH v1 00/37] perf: use workqueue for evlist operations Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 01/37] libperf cpumap: improve idx function Riccardo Mancini
2021-08-31 18:46   ` Arnaldo Carvalho de Melo
2021-10-08 14:29   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 02/37] libperf cpumap: improve max function Riccardo Mancini
2021-08-31 18:47   ` Arnaldo Carvalho de Melo
2021-08-31 19:16     ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 03/37] perf evlist: replace evsel__cpu_iter* functions with evsel__find_cpu Riccardo Mancini
2021-10-08 14:38   ` [RFC PATCH v1 03/37] perf evlist: replace evsel__cpu_iter* functions with evsel__find_cpu() Arnaldo Carvalho de Melo
2021-12-11  0:20   ` [RFC PATCH v1 03/37] perf evlist: replace evsel__cpu_iter* functions with evsel__find_cpu Ian Rogers
2021-08-21  9:19 ` [RFC PATCH v1 04/37] perf util: add mmap_cpu_mask__duplicate function Riccardo Mancini
2021-08-31 19:21   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 05/37] perf util/mmap: add missing bitops.h header Riccardo Mancini
2021-08-31 19:22   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 06/37] perf workqueue: add affinities to threadpool Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 07/37] perf workqueue: add support for setting affinities to workers Riccardo Mancini
2021-08-21  9:19 ` Riccardo Mancini [this message]
2021-08-21  9:19 ` [RFC PATCH v1 09/37] perf python: add workqueue dependency Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 10/37] perf evlist: add multithreading helper Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 11/37] perf evlist: add multithreading to evlist__disable Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 12/37] perf evlist: add multithreading to evlist__enable Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 13/37] perf evlist: add multithreading to evlist__close Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 14/37] perf evsel: remove retry_sample_id goto label Riccardo Mancini
2021-08-31 19:25   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 15/37] perf evsel: separate open preparation from open itself Riccardo Mancini
2021-08-31 19:27   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 16/37] perf evsel: save open flags in evsel Riccardo Mancini
2021-08-31 19:31   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 17/37] perf evsel: separate missing feature disabling from evsel__open_cpu Riccardo Mancini
2021-08-31 19:35   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 18/37] perf evsel: add evsel__prepare_open function Riccardo Mancini
2021-08-31 19:36   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 19/37] perf evsel: separate missing feature detection from evsel__open_cpu Riccardo Mancini
2021-08-31 19:39   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 20/37] perf evsel: separate rlimit increase " Riccardo Mancini
2021-08-31 19:41   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 21/37] perf evsel: move ignore_missing_thread to fallback code Riccardo Mancini
2021-08-31 19:44   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 22/37] perf evsel: move test_attr__open to success path in evsel__open_cpu Riccardo Mancini
2021-08-31 19:47   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 23/37] perf evsel: move bpf_counter__install_pe " Riccardo Mancini
2021-08-31 19:50   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 24/37] perf evsel: handle precise_ip fallback " Riccardo Mancini
2021-08-31 19:52   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 25/37] perf evsel: move event open in evsel__open_cpu to separate function Riccardo Mancini
2021-08-31 19:54   ` Arnaldo Carvalho de Melo
2021-09-03 21:52     ` Riccardo Mancini
2021-09-11 19:10       ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 26/37] perf evsel: add evsel__open_per_cpu_no_fallback function Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 27/37] perf evlist: add evlist__for_each_entry_from macro Riccardo Mancini
2021-08-31 20:06   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 28/37] perf evlist: add multithreading to evlist__open Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 29/37] perf evlist: add custom fallback " Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 30/37] perf record: use evlist__open_custom Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 31/37] tools lib/subcmd: add OPT_UINTEGER_OPTARG option type Riccardo Mancini
2021-08-31 18:44   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 32/37] perf record: add --threads option Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 33/37] perf record: pin threads to monitored cpus if enough threads available Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 34/37] perf record: apply multithreading in init and fini phases Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 35/37] perf test/evlist-open-close: add multithreading Riccardo Mancini
2021-08-21  9:19 ` [RFC PATCH v1 36/37] perf test/evlist-open-close: use inline func to convert timeval to usec Riccardo Mancini
2021-10-08 14:46   ` Arnaldo Carvalho de Melo
2021-08-21  9:19 ` [RFC PATCH v1 37/37] perf test/evlist-open-close: add detailed output mode Riccardo Mancini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=fc51bfc206cd5976eec14541fc5ee50ce6788818.1629490974.git.rickyman7@gmail.com \
    --to=rickyman7@gmail.com \
    --cc=acme@kernel.org \
    --cc=irogers@google.com \
    --cc=jolsa@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).