linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Riccardo Mancini <rickyman7@gmail.com>
To: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Ian Rogers <irogers@google.com>,
	Namhyung Kim <namhyung@kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Ingo Molnar <mingo@redhat.com>,
	Mark Rutland <mark.rutland@arm.com>, Jiri Olsa <jolsa@redhat.com>,
	linux-kernel@vger.kernel.org, linux-perf-users@vger.kernel.org,
	Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>,
	Riccardo Mancini <rickyman7@gmail.com>
Subject: [RFC PATCH v3 08/15] perf workqueue: add queue_work and flush_workqueue functions
Date: Fri, 20 Aug 2021 12:53:54 +0200	[thread overview]
Message-ID: <f9100c6e428eafe1b9761f947550e45ccc9e8849.1629454773.git.rickyman7@gmail.com> (raw)
In-Reply-To: <cover.1629454773.git.rickyman7@gmail.com>

This patch adds functions to queue and wait work_structs, and
related tests.

When a new work item is added, the workqueue first checks if there
are threads to wake up. If so, it wakes it up with the given work item,
otherwise it will pick the next round-robin thread and queue the work
item to its queue. A thread which completes its queue will go to sleep.

The round-robin mechanism is implemented through the next_worker
attibute which will point to the next worker to be chosen for queueing.
When work is assigned to that worker or when the worker goes to sleep,
the pointer is moved to the next worker in the busy_list, if any.
When a worker is woken up, it is added in the busy list just before the
next_worker, so that it will be chosen as last (it's just been assigned
a work item).

Signed-off-by: Riccardo Mancini <rickyman7@gmail.com>
---
 tools/perf/tests/workqueue.c          |  71 ++++++++++-
 tools/perf/util/workqueue/workqueue.c | 176 +++++++++++++++++++++++++-
 tools/perf/util/workqueue/workqueue.h |   9 ++
 3 files changed, 254 insertions(+), 2 deletions(-)

diff --git a/tools/perf/tests/workqueue.c b/tools/perf/tests/workqueue.c
index 1aa6ee788b0b1c32..194bab2f3f668ce9 100644
--- a/tools/perf/tests/workqueue.c
+++ b/tools/perf/tests/workqueue.c
@@ -147,6 +147,28 @@ static int __test__threadpool(void *_args)
 	return ret;
 }
 
+struct test_work {
+	struct work_struct work;
+	int i;
+	int *array;
+};
+
+static void test_work_fn1(struct work_struct *work)
+{
+	struct test_work *mwork = container_of(work, struct test_work, work);
+
+	dummy_work(mwork->i);
+	mwork->array[mwork->i] = mwork->i+1;
+}
+
+static void test_work_fn2(struct work_struct *work)
+{
+	struct test_work *mwork = container_of(work, struct test_work, work);
+
+	dummy_work(mwork->i);
+	mwork->array[mwork->i] = mwork->i*2;
+}
+
 static int __workqueue__prepare(struct workqueue_struct **wq,
 				int pool_size)
 {
@@ -166,21 +188,68 @@ static int __workqueue__teardown(struct workqueue_struct *wq)
 	return 0;
 }
 
+static int __workqueue__exec_wait(struct workqueue_struct *wq,
+				int *array, struct test_work *works,
+				work_func_t func, int n_work_items)
+{
+	int ret, i;
+
+	for (i = 0; i < n_work_items; i++) {
+		works[i].array = array;
+		works[i].i = i;
+
+		init_work(&works[i].work);
+		works[i].work.func = func;
+		queue_work(wq, &works[i].work);
+	}
+
+	ret = flush_workqueue(wq);
+	TEST_ASSERT_VAL("workqueue flush failure", ret == 0);
+
+	return TEST_OK;
+}
+
+
 static int __test__workqueue(void *_args)
 {
 	struct workqueue_test_args_t *args = _args;
 	struct workqueue_struct *wq;
+	struct test_work *works;
+	int *array;
 	int pool_size = args->pool_size ?: sysconf(_SC_NPROCESSORS_ONLN);
-	int ret = __workqueue__prepare(&wq, pool_size);
+	int i, ret = __workqueue__prepare(&wq, pool_size);
 
+	if (ret)
+		return ret;
+
+	array = calloc(args->n_work_items, sizeof(*array));
+	TEST_ASSERT_VAL("failed array calloc", array);
+	works = calloc(args->n_work_items, sizeof(*works));
+	TEST_ASSERT_VAL("failed works calloc", works);
+
+	ret = __workqueue__exec_wait(wq, array, works, test_work_fn1,
+					args->n_work_items);
 	if (ret)
 		goto out;
 
+	for (i = 0; i < args->n_work_items; i++)
+		TEST_ASSERT_VAL("failed array check (1)", array[i] == i+1);
+
+	ret = __workqueue__exec_wait(wq, array, works, test_work_fn2,
+					args->n_work_items);
+	if (ret)
+		goto out;
+
+	for (i = 0; i < args->n_work_items; i++)
+		TEST_ASSERT_VAL("failed array check (2)", array[i] == 2*i);
+
 	ret = __workqueue__teardown(wq);
 	if (ret)
 		goto out;
 
 out:
+	free(array);
+	free(works);
 	return ret;
 }
 
diff --git a/tools/perf/util/workqueue/workqueue.c b/tools/perf/util/workqueue/workqueue.c
index a2747fcc004ab0d1..1092ece9ad39d6d2 100644
--- a/tools/perf/util/workqueue/workqueue.c
+++ b/tools/perf/util/workqueue/workqueue.c
@@ -38,6 +38,7 @@ struct workqueue_struct {
 	struct list_head	idle_list;	/* idle workers */
 	int			msg_pipe[2];	/* main thread comm pipes */
 	struct worker		**workers;	/* array of all workers */
+	struct worker		*next_worker;	/* next worker to choose (round robin) */
 };
 
 static const char * const workqueue_errno_str[] = {
@@ -48,6 +49,8 @@ static const char * const workqueue_errno_str[] = {
 	"Error sending message to worker",
 	"Error receiving message from worker",
 	"Received unexpected message from worker",
+	"Worker is not ready",
+	"Worker is in an unrecognized status",
 };
 
 struct worker {
@@ -94,6 +97,15 @@ __releases(&worker->lock)
 	return pthread_mutex_unlock(&worker->lock);
 }
 
+static void advance_next_worker(struct workqueue_struct *wq)
+__must_hold(&wq->lock)
+{
+	if (list_is_last(&wq->next_worker->entry, &wq->busy_list))
+		wq->next_worker = list_first_entry(&wq->busy_list, struct worker, entry);
+	else
+		wq->next_worker = list_next_entry(wq->next_worker, entry);
+}
+
 /**
  * available_work - check if worker @worker has work to do
  */
@@ -159,9 +171,13 @@ static void sleep_worker(struct workqueue_struct *wq, struct worker *worker)
 __must_hold(&wq->lock)
 {
 	worker->status = WORKER_STATUS__IDLE;
+	if (wq->next_worker == worker)
+		advance_next_worker(wq);
 	list_move(&worker->entry, &wq->idle_list);
-	if (list_empty(&wq->busy_list))
+	if (list_empty(&wq->busy_list)) {
+		wq->next_worker = NULL;
 		pthread_cond_signal(&wq->idle_cond);
+	}
 }
 
 /**
@@ -196,6 +212,52 @@ __must_hold(&worker->lock)
 	}
 }
 
+/**
+ * wake_worker - prepare for waking worker @worker of workqueue @wq assigning @work to do
+ *
+ * Called from main thread.
+ * Moves worker from idle to busy list and assigns @work to it.
+ * Must call wake_worker outside critical section afterwards.
+ */
+static int prepare_wake_worker(struct workqueue_struct *wq, struct worker *worker,
+			struct work_struct *work)
+__must_hold(&wq->lock)
+__must_hold(&worker->lock)
+{
+	if (wq->next_worker)
+		list_move_tail(&worker->entry, &wq->next_worker->entry);
+	else
+		list_move(&worker->entry, &wq->busy_list);
+	wq->next_worker = worker;
+
+	list_add_tail(&work->entry, &worker->queue);
+	worker->status = WORKER_STATUS__BUSY;
+
+	return 0;
+}
+
+/**
+ * wake_worker - send wake message to worker @worker of workqueue @wq
+ *
+ * Called from main thread.
+ * Must be called after prepare_wake_worker and outside critical section to
+ * reduce time spent inside it
+ */
+static int wake_worker(struct worker *worker)
+{
+	enum worker_msg msg = WORKER_MSG__WAKE;
+	int ret;
+	char sbuf[STRERR_BUFSIZE];
+
+	ret = writen(worker->msg_pipe[1], &msg, sizeof(msg));
+	if (ret < 0) {
+		pr_debug2("wake worker %d: error seding msg: %s\n",
+			worker->tidx, str_error_r(errno, sbuf, sizeof(sbuf)));
+		return -WORKQUEUE_ERROR__WRITEPIPE;
+	}
+
+	return 0;
+}
 
 /**
  * stop_worker - stop worker @worker
@@ -418,6 +480,8 @@ struct workqueue_struct *create_workqueue(int nr_threads)
 			goto out_stop_pool;
 	}
 
+	wq->next_worker = NULL;
+
 	return wq;
 
 out_stop_pool:
@@ -532,6 +596,8 @@ int workqueue_strerror(struct workqueue_struct *wq, int err, char *buf, size_t s
 		emsg = str_error_r(errno, sbuf, sizeof(sbuf));
 		return scnprintf(buf, size, "%s: %s.\n", errno_str, emsg);
 	case -WORKQUEUE_ERROR__INVALIDMSG:
+	case -WORKQUEUE_ERROR__INVALIDWORKERSTATUS:
+	case -WORKQUEUE_ERROR__NOTREADY:
 		return scnprintf(buf, size, "%s.\n", errno_str);
 	default:
 		emsg = str_error_r(err, sbuf, sizeof(sbuf));
@@ -566,3 +632,111 @@ int workqueue_nr_threads(struct workqueue_struct *wq)
 {
 	return threadpool__size(wq->pool);
 }
+
+/**
+ * __queue_work_on_worker - add @work to the internal queue of worker @worker
+ *
+ * NB: this function releases the locks to be able to send notification to
+ * thread outside the critical section.
+ */
+static int __queue_work_on_worker(struct workqueue_struct *wq __maybe_unused,
+				struct worker *worker, struct work_struct *work)
+__must_hold(&wq->lock)
+__must_hold(&worker->lock)
+__releases(&wq->lock)
+__releases(&worker->lock)
+{
+	int ret;
+
+	switch (worker->status) {
+	case WORKER_STATUS__BUSY:
+		list_add_tail(&work->entry, &worker->queue);
+
+		unlock_worker(worker);
+		unlock_workqueue(wq);
+		pr_debug("workqueue: queued new work item\n");
+		return 0;
+	case WORKER_STATUS__IDLE:
+		ret = prepare_wake_worker(wq, worker, work);
+		unlock_worker(worker);
+		unlock_workqueue(wq);
+		if (ret)
+			return ret;
+		ret = wake_worker(worker);
+		if (!ret)
+		pr_debug("workqueue: woke worker %d\n", worker->tidx);
+		return ret;
+	default:
+	case WORKER_STATUS__MAX:
+		unlock_worker(worker);
+		unlock_workqueue(wq);
+		pr_debug2("workqueue: worker is in unrecognized status %d\n",
+			worker->status);
+		return -WORKQUEUE_ERROR__INVALIDWORKERSTATUS;
+	}
+
+	return 0;
+}
+
+/**
+ * queue_work - add @work to @wq internal queue
+ *
+ * If there are idle threads, one of these will be woken up.
+ * Otherwise, the work is added to the pending list.
+ */
+int queue_work(struct workqueue_struct *wq, struct work_struct *work)
+{
+	struct worker *worker;
+
+	lock_workqueue(wq);
+	if (list_empty(&wq->idle_list)) {
+		worker = wq->next_worker;
+		advance_next_worker(wq);
+	} else {
+		worker = list_first_entry(&wq->idle_list, struct worker, entry);
+	}
+	lock_worker(worker);
+
+	return __queue_work_on_worker(wq, worker, work);
+}
+
+/**
+ * queue_work_on_worker - add @work to worker @tidx internal queue
+ */
+int queue_work_on_worker(int tidx, struct workqueue_struct *wq, struct work_struct *work)
+{
+	lock_workqueue(wq);
+	lock_worker(wq->workers[tidx]);
+	return __queue_work_on_worker(wq, wq->workers[tidx], work);
+}
+
+/**
+ * flush_workqueue - wait for all currently executed and pending work to finish
+ *
+ * This function blocks until all threads become idle.
+ */
+int flush_workqueue(struct workqueue_struct *wq)
+{
+	int err = 0, ret;
+
+	lock_workqueue(wq);
+	while (!list_empty(&wq->busy_list)) {
+		ret = pthread_cond_wait(&wq->idle_cond, &wq->lock);
+		if (ret) {
+			pr_debug2("%s: error in pthread_cond_wait\n", __func__);
+			err = -ret;
+			break;
+		}
+	}
+	unlock_workqueue(wq);
+
+	return err;
+}
+
+/**
+ * init_work - initialize the @work struct
+ */
+void init_work(struct work_struct *work)
+{
+	INIT_LIST_HEAD(&work->entry);
+}
diff --git a/tools/perf/util/workqueue/workqueue.h b/tools/perf/util/workqueue/workqueue.h
index 100841cc035fde1d..37ef84fc9c6ed4b6 100644
--- a/tools/perf/util/workqueue/workqueue.h
+++ b/tools/perf/util/workqueue/workqueue.h
@@ -22,6 +22,13 @@ extern int destroy_workqueue(struct workqueue_struct *wq);
 
 extern int workqueue_nr_threads(struct workqueue_struct *wq);
 
+extern int queue_work(struct workqueue_struct *wq, struct work_struct *work);
+extern int queue_work_on_worker(int tidx, struct workqueue_struct *wq, struct work_struct *work);
+
+extern int flush_workqueue(struct workqueue_struct *wq);
+
+extern void init_work(struct work_struct *work);
+
 #define WORKQUEUE_STRERR_BUFSIZE (128+THREADPOOL_STRERR_BUFSIZE)
 #define WORKQUEUE_ERROR__OFFSET 512
 enum {
@@ -32,6 +39,8 @@ enum {
 	WORKQUEUE_ERROR__WRITEPIPE,
 	WORKQUEUE_ERROR__READPIPE,
 	WORKQUEUE_ERROR__INVALIDMSG,
+	WORKQUEUE_ERROR__NOTREADY,
+	WORKQUEUE_ERROR__INVALIDWORKERSTATUS,
 };
 extern int workqueue_strerror(struct workqueue_struct *wq, int err, char *buf, size_t size);
 extern int create_workqueue_strerror(struct workqueue_struct *err_ptr, char *buf, size_t size);
-- 
2.31.1


  parent reply	other threads:[~2021-08-20 10:54 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-20 10:53 [RFC PATCH v3 00/15] perf: add workqueue library and use it in synthetic-events Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 01/15] perf workqueue: threadpool creation and destruction Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 02/15] perf tests: add test for workqueue Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 03/15] perf workqueue: add threadpool start and stop functions Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 04/15] perf workqueue: add threadpool execute and wait functions Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 05/15] tools: add sparse context/locking annotations in compiler-types.h Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 06/15] perf workqueue: introduce workqueue struct Riccardo Mancini
2021-08-24 19:27   ` Namhyung Kim
2021-08-31 16:13     ` Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 07/15] perf workqueue: implement worker thread and management Riccardo Mancini
2021-08-30  7:22   ` Jiri Olsa
2021-08-20 10:53 ` Riccardo Mancini [this message]
2021-08-24 19:40   ` [RFC PATCH v3 08/15] perf workqueue: add queue_work and flush_workqueue functions Namhyung Kim
2021-08-31 16:23     ` Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 09/15] perf workqueue: spinup threads when needed Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 10/15] perf workqueue: create global workqueue Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 11/15] perf workqueue: add utility to execute a for loop in parallel Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 12/15] perf record: setup global workqueue Riccardo Mancini
2021-08-20 10:53 ` [RFC PATCH v3 13/15] perf top: " Riccardo Mancini
2021-08-20 10:54 ` [RFC PATCH v3 14/15] perf test/synthesis: " Riccardo Mancini
2021-08-20 10:54 ` [RFC PATCH v3 15/15] perf synthetic-events: use workqueue parallel_for Riccardo Mancini
2021-08-29 21:59 ` [RFC PATCH v3 00/15] perf: add workqueue library and use it in synthetic-events Jiri Olsa
2021-08-31 15:46   ` Jiri Olsa
2021-08-31 16:57     ` Riccardo Mancini

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=f9100c6e428eafe1b9761f947550e45ccc9e8849.1629454773.git.rickyman7@gmail.com \
    --to=rickyman7@gmail.com \
    --cc=acme@kernel.org \
    --cc=alexey.v.bayduraev@linux.intel.com \
    --cc=irogers@google.com \
    --cc=jolsa@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-perf-users@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).