io-uring.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] io-wq: handle hashed writes in chains
@ 2020-03-19 18:56 Jens Axboe
  2020-03-22 16:09 ` Pavel Begunkov
  2020-03-22 20:56 ` Pavel Begunkov
  0 siblings, 2 replies; 20+ messages in thread
From: Jens Axboe @ 2020-03-19 18:56 UTC (permalink / raw)
  To: io-uring

We always punt async buffered writes to an io-wq helper, as the core
kernel does not have IOCB_NOWAIT support for that. Most buffered async
writes complete very quickly, as it's just a copy operation. This means
that doing multiple locking roundtrips on the shared wqe lock for each
buffered write is wasteful. Additionally, buffered writes are hashed
work items, which means that any buffered write to a given file is
serialized.

When looking for a new work item, build a chain of identicaly hashed
work items, and then hand back that batch. Until the batch is done, the
caller doesn't have to synchronize with the wqe or worker locks again.

Signed-off-by: Jens Axboe <axboe@kernel.dk>

---

Changes:
- Don't overwrite passed back work


diff --git a/fs/io-wq.c b/fs/io-wq.c
index 9541df2729de..8402c6e417e1 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -380,32 +380,65 @@ static inline unsigned int io_get_work_hash(struct io_wq_work *work)
 	return work->flags >> IO_WQ_HASH_SHIFT;
 }
 
-static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
+/*
+ * Returns the next work item to process, if any. For hashed work that hash
+ * to the same key, we can't process N+1 before N is done. To make the
+ * processing more efficient, return N+1 and later identically hashed work
+ * in the passed in list. This avoids repeated hammering on the wqe lock for,
+ * as the caller can just process items in the on-stack list.
+ */
+static struct io_wq_work *io_get_next_work(struct io_wqe *wqe,
+					   struct io_wq_work_list *list)
 	__must_hold(wqe->lock)
 {
-	struct io_wq_work_node *node, *prev;
-	struct io_wq_work *work;
-	unsigned int hash;
+	struct io_wq_work *ret = NULL;
 
-	wq_list_for_each(node, prev, &wqe->work_list) {
-		work = container_of(node, struct io_wq_work, list);
+	do {
+		unsigned int new_hash, hash;
+		struct io_wq_work *work;
+
+		work = wq_first_entry(&wqe->work_list, struct io_wq_work, list);
+		if (!work)
+			break;
 
 		/* not hashed, can run anytime */
 		if (!io_wq_is_hashed(work)) {
-			wq_node_del(&wqe->work_list, node, prev);
-			return work;
+			/* already have hashed work, let new worker get this */
+			if (ret) {
+				struct io_wqe_acct *acct;
+
+				/* get new worker for unhashed, if none now */
+				acct = io_work_get_acct(wqe, work);
+				if (!atomic_read(&acct->nr_running))
+					io_wqe_wake_worker(wqe, acct);
+				break;
+			}
+			wq_node_del(&wqe->work_list, &work->list);
+			ret = work;
+			break;
 		}
 
 		/* hashed, can run if not already running */
-		hash = io_get_work_hash(work);
-		if (!(wqe->hash_map & BIT(hash))) {
+		new_hash = io_get_work_hash(work);
+		if (wqe->hash_map & BIT(new_hash))
+			break;
+
+		if (!ret) {
+			hash = new_hash;
 			wqe->hash_map |= BIT(hash);
-			wq_node_del(&wqe->work_list, node, prev);
-			return work;
+		} else if (hash != new_hash) {
+			break;
 		}
-	}
 
-	return NULL;
+		wq_node_del(&wqe->work_list, &work->list);
+		/* return first node, add subsequent same hash to the list */
+		if (!ret)
+			ret = work;
+		else
+			wq_list_add_tail(&work->list, list);
+	} while (1);
+
+	return ret;
 }
 
 static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
@@ -481,6 +514,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
 static void io_worker_handle_work(struct io_worker *worker)
 	__releases(wqe->lock)
 {
+	struct io_wq_work_list list = { .first = NULL, .last = NULL };
 	struct io_wqe *wqe = worker->wqe;
 	struct io_wq *wq = wqe->wq;
 
@@ -495,7 +529,7 @@ static void io_worker_handle_work(struct io_worker *worker)
 		 * can't make progress, any work completion or insertion will
 		 * clear the stalled flag.
 		 */
-		work = io_get_next_work(wqe);
+		work = io_get_next_work(wqe, &list);
 		if (work)
 			__io_worker_busy(wqe, worker, work);
 		else if (!wq_list_empty(&wqe->work_list))
@@ -504,6 +538,7 @@ static void io_worker_handle_work(struct io_worker *worker)
 		spin_unlock_irq(&wqe->lock);
 		if (!work)
 			break;
+got_work:
 		io_assign_current_work(worker, work);
 
 		/* handle a whole dependent link */
@@ -530,6 +565,24 @@ static void io_worker_handle_work(struct io_worker *worker)
 				work = NULL;
 			}
 			if (hash != -1U) {
+				/*
+				 * If the local list is non-empty, then we
+				 * have work that hashed to the same key.
+				 * No need for a lock round-trip, or fiddling
+				 * the the free/busy state of the worker, or
+				 * clearing the hashed state. Just process the
+				 * next one.
+				 */
+				if (!work) {
+					work = wq_first_entry(&list,
+							      struct io_wq_work,
+							      list);
+					if (work) {
+						wq_node_del(&list, &work->list);
+						goto got_work;
+					}
+				}
+
 				spin_lock_irq(&wqe->lock);
 				wqe->hash_map &= ~BIT_ULL(hash);
 				wqe->flags &= ~IO_WQE_FLAG_STALLED;
@@ -910,7 +963,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
 		work = container_of(node, struct io_wq_work, list);
 
 		if (match->fn(work, match->data)) {
-			wq_node_del(&wqe->work_list, node, prev);
+			__wq_node_del(&wqe->work_list, node, prev);
 			found = true;
 			break;
 		}
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 298b21f4a4d2..9a194339bd9d 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -40,9 +40,9 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node,
 	}
 }
 
-static inline void wq_node_del(struct io_wq_work_list *list,
-			       struct io_wq_work_node *node,
-			       struct io_wq_work_node *prev)
+static inline void __wq_node_del(struct io_wq_work_list *list,
+				struct io_wq_work_node *node,
+				struct io_wq_work_node *prev)
 {
 	if (node == list->first)
 		WRITE_ONCE(list->first, node->next);
@@ -53,6 +53,21 @@ static inline void wq_node_del(struct io_wq_work_list *list,
 	node->next = NULL;
 }
 
+
+static inline void wq_node_del(struct io_wq_work_list *list,
+			       struct io_wq_work_node *node)
+{
+	__wq_node_del(list, node, NULL);
+}
+
+#define wq_first_entry(list, type, member)				\
+({									\
+	struct io_wq_work *__work = NULL;				\
+	if (!wq_list_empty((list)))					\
+		__work = container_of((list)->first, type, member);	\
+	__work;								\
+})
+
 #define wq_list_for_each(pos, prv, head)			\
 	for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
 
-- 
Jens Axboe


^ permalink raw reply related	[flat|nested] 20+ messages in thread
* [PATCH v2] io-wq: handle hashed writes in chains
@ 2020-03-23 19:57 Pavel Begunkov
  2020-03-24  2:31 ` Jens Axboe
  0 siblings, 1 reply; 20+ messages in thread
From: Pavel Begunkov @ 2020-03-23 19:57 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

We always punt async buffered writes to an io-wq helper, as the core
kernel does not have IOCB_NOWAIT support for that. Most buffered async
writes complete very quickly, as it's just a copy operation. This means
that doing multiple locking roundtrips on the shared wqe lock for each
buffered write is wasteful. Additionally, buffered writes are hashed
work items, which means that any buffered write to a given file is
serialized.

Keep identicaly hashed work items contiguously in @wqe->work_list, and
track a tail for each hash bucket. On dequeue of a hashed item, splice
all of the same hash in one go using the tracked tail. Until the batch
is done, the caller doesn't have to synchronize with the wqe or worker
locks again.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---

v2 (since diff):
- add wq_next_work() helper
- rename list helpers to match list.h naming
- reshuffle io_worker_handle_work() bits, but keeping
  functionally identical

 fs/io-wq.c | 68 ++++++++++++++++++++++++++++++++++++++----------------
 fs/io-wq.h | 45 +++++++++++++++++++++++++++++-------
 2 files changed, 85 insertions(+), 28 deletions(-)

diff --git a/fs/io-wq.c b/fs/io-wq.c
index b3fb61ec0870..cc5cf2209fb0 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -69,6 +69,8 @@ struct io_worker {
 #define IO_WQ_HASH_ORDER	5
 #endif
 
+#define IO_WQ_NR_HASH_BUCKETS	(1u << IO_WQ_HASH_ORDER)
+
 struct io_wqe_acct {
 	unsigned nr_workers;
 	unsigned max_workers;
@@ -98,6 +100,7 @@ struct io_wqe {
 	struct list_head all_list;
 
 	struct io_wq *wq;
+	struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
 };
 
 /*
@@ -384,7 +387,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
 	__must_hold(wqe->lock)
 {
 	struct io_wq_work_node *node, *prev;
-	struct io_wq_work *work;
+	struct io_wq_work *work, *tail;
 	unsigned int hash;
 
 	wq_list_for_each(node, prev, &wqe->work_list) {
@@ -392,7 +395,7 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
 
 		/* not hashed, can run anytime */
 		if (!io_wq_is_hashed(work)) {
-			wq_node_del(&wqe->work_list, node, prev);
+			wq_list_del(&wqe->work_list, node, prev);
 			return work;
 		}
 
@@ -400,7 +403,10 @@ static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
 		hash = io_get_work_hash(work);
 		if (!(wqe->hash_map & BIT(hash))) {
 			wqe->hash_map |= BIT(hash);
-			wq_node_del(&wqe->work_list, node, prev);
+			/* all items with this hash lie in [work, tail] */
+			tail = wqe->hash_tail[hash];
+			wqe->hash_tail[hash] = NULL;
+			wq_list_cut(&wqe->work_list, &tail->list, prev);
 			return work;
 		}
 	}
@@ -485,7 +491,7 @@ static void io_worker_handle_work(struct io_worker *worker)
 	struct io_wq *wq = wqe->wq;
 
 	do {
-		struct io_wq_work *work, *assign_work;
+		struct io_wq_work *work;
 		unsigned int hash;
 get_next:
 		/*
@@ -508,8 +514,9 @@ static void io_worker_handle_work(struct io_worker *worker)
 
 		/* handle a whole dependent link */
 		do {
-			struct io_wq_work *old_work;
+			struct io_wq_work *old_work, *next_hashed, *linked;
 
+			next_hashed = wq_next_work(work);
 			io_impersonate_work(worker, work);
 			/*
 			 * OK to set IO_WQ_WORK_CANCEL even for uncancellable
@@ -518,22 +525,23 @@ static void io_worker_handle_work(struct io_worker *worker)
 			if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
 				work->flags |= IO_WQ_WORK_CANCEL;
 
-			old_work = work;
 			hash = io_get_work_hash(work);
-			work->func(&work);
-			work = (old_work == work) ? NULL : work;
-
-			assign_work = work;
-			if (work && io_wq_is_hashed(work))
-				assign_work = NULL;
-			io_assign_current_work(worker, assign_work);
+			linked = old_work = work;
+			linked->func(&linked);
+			linked = (old_work == linked) ? NULL : linked;
+
+			work = next_hashed;
+			if (!work && linked && !io_wq_is_hashed(linked)) {
+				work = linked;
+				linked = NULL;
+			}
+			io_assign_current_work(worker, work);
 			wq->free_work(old_work);
 
-			if (work && !assign_work) {
-				io_wqe_enqueue(wqe, work);
-				work = NULL;
-			}
-			if (hash != -1U) {
+			if (linked)
+				io_wqe_enqueue(wqe, linked);
+
+			if (hash != -1U && !next_hashed) {
 				spin_lock_irq(&wqe->lock);
 				wqe->hash_map &= ~BIT_ULL(hash);
 				wqe->flags &= ~IO_WQE_FLAG_STALLED;
@@ -776,6 +784,26 @@ static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
 	} while (work);
 }
 
+static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
+{
+	unsigned int hash;
+	struct io_wq_work *tail;
+
+	if (!io_wq_is_hashed(work)) {
+append:
+		wq_list_add_tail(&work->list, &wqe->work_list);
+		return;
+	}
+
+	hash = io_get_work_hash(work);
+	tail = wqe->hash_tail[hash];
+	wqe->hash_tail[hash] = work;
+	if (!tail)
+		goto append;
+
+	wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
+}
+
 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
 {
 	struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
@@ -795,7 +823,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
 
 	work_flags = work->flags;
 	spin_lock_irqsave(&wqe->lock, flags);
-	wq_list_add_tail(&work->list, &wqe->work_list);
+	io_wqe_insert_work(wqe, work);
 	wqe->flags &= ~IO_WQE_FLAG_STALLED;
 	spin_unlock_irqrestore(&wqe->lock, flags);
 
@@ -914,7 +942,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
 		work = container_of(node, struct io_wq_work, list);
 
 		if (match->fn(work, match->data)) {
-			wq_node_del(&wqe->work_list, node, prev);
+			wq_list_del(&wqe->work_list, node, prev);
 			found = true;
 			break;
 		}
diff --git a/fs/io-wq.h b/fs/io-wq.h
index d2a5684bf673..3ee7356d6be5 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -28,6 +28,18 @@ struct io_wq_work_list {
 	struct io_wq_work_node *last;
 };
 
+static inline void wq_list_add_after(struct io_wq_work_node *node,
+				     struct io_wq_work_node *pos,
+				     struct io_wq_work_list *list)
+{
+	struct io_wq_work_node *next = pos->next;
+
+	pos->next = node;
+	node->next = next;
+	if (!next)
+		list->last = node;
+}
+
 static inline void wq_list_add_tail(struct io_wq_work_node *node,
 				    struct io_wq_work_list *list)
 {
@@ -40,17 +52,26 @@ static inline void wq_list_add_tail(struct io_wq_work_node *node,
 	}
 }
 
-static inline void wq_node_del(struct io_wq_work_list *list,
-			       struct io_wq_work_node *node,
+static inline void wq_list_cut(struct io_wq_work_list *list,
+			       struct io_wq_work_node *last,
 			       struct io_wq_work_node *prev)
 {
-	if (node == list->first)
-		WRITE_ONCE(list->first, node->next);
-	if (node == list->last)
+	/* first in the list, if prev==NULL */
+	if (!prev)
+		WRITE_ONCE(list->first, last->next);
+	else
+		prev->next = last->next;
+
+	if (last == list->last)
 		list->last = prev;
-	if (prev)
-		prev->next = node->next;
-	node->next = NULL;
+	last->next = NULL;
+}
+
+static inline void wq_list_del(struct io_wq_work_list *list,
+			       struct io_wq_work_node *node,
+			       struct io_wq_work_node *prev)
+{
+	wq_list_cut(list, node, prev);
 }
 
 #define wq_list_for_each(pos, prv, head)			\
@@ -78,6 +99,14 @@ struct io_wq_work {
 		*(work) = (struct io_wq_work){ .func = _func };	\
 	} while (0)						\
 
+static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
+{
+	if (!work->list.next)
+		return NULL;
+
+	return container_of(work->list.next, struct io_wq_work, list);
+}
+
 typedef void (free_work_fn)(struct io_wq_work *);
 
 struct io_wq_data {
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2020-03-24  2:31 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-03-19 18:56 [PATCH v2] io-wq: handle hashed writes in chains Jens Axboe
2020-03-22 16:09 ` Pavel Begunkov
2020-03-22 16:24   ` Pavel Begunkov
2020-03-22 17:08     ` Jens Axboe
2020-03-22 18:54     ` Pavel Begunkov
2020-03-22 19:51       ` Jens Axboe
2020-03-22 20:05         ` Jens Axboe
2020-03-22 20:15           ` Jens Axboe
2020-03-22 20:20             ` Pavel Begunkov
2020-03-22 21:16               ` Pavel Begunkov
2020-03-22 21:31                 ` Pavel Begunkov
2020-03-22 20:25         ` Pavel Begunkov
2020-03-23  1:37           ` Jens Axboe
2020-03-23  8:38             ` Pavel Begunkov
2020-03-23 14:26               ` Jens Axboe
2020-03-22 17:08   ` Jens Axboe
2020-03-22 17:37     ` Pavel Begunkov
2020-03-22 20:56 ` Pavel Begunkov
2020-03-23 19:57 Pavel Begunkov
2020-03-24  2:31 ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).