All of lore.kernel.org
 help / color / mirror / Atom feed
From: Pavel Begunkov <asml.silence@gmail.com>
To: Jens Axboe <axboe@kernel.dk>, io-uring@vger.kernel.org
Subject: [PATCH 05/23] io_uring: remove allocation cache array
Date: Fri, 24 Sep 2021 17:31:43 +0100	[thread overview]
Message-ID: <21ee9095e4b7fbb3fa42de8c4879a4a4cfa798a9.1632500264.git.asml.silence@gmail.com> (raw)
In-Reply-To: <cover.1632500264.git.asml.silence@gmail.com>

We have several of request allocation layers, remove the last one, which
is the submit->reqs array, and always use submit->free_reqs instead.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 60 +++++++++++++++------------------------------------
 1 file changed, 17 insertions(+), 43 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 027e6595321e..bf59ca19aef2 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -310,12 +310,6 @@ struct io_submit_state {
 	struct blk_plug		plug;
 	struct io_submit_link	link;
 
-	/*
-	 * io_kiocb alloc cache
-	 */
-	void			*reqs[IO_REQ_CACHE_SIZE];
-	unsigned int		free_reqs;
-
 	bool			plug_started;
 	bool			need_plug;
 
@@ -1900,7 +1894,6 @@ static void io_flush_cached_locked_reqs(struct io_ring_ctx *ctx,
 static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
 {
 	struct io_submit_state *state = &ctx->submit_state;
-	int nr;
 
 	/*
 	 * If we have more than a batch's worth of requests in our IRQ side
@@ -1909,20 +1902,7 @@ static bool io_flush_cached_reqs(struct io_ring_ctx *ctx)
 	 */
 	if (READ_ONCE(ctx->locked_free_nr) > IO_COMPL_BATCH)
 		io_flush_cached_locked_reqs(ctx, state);
-
-	nr = state->free_reqs;
-	while (!list_empty(&state->free_list)) {
-		struct io_kiocb *req = list_first_entry(&state->free_list,
-					struct io_kiocb, inflight_entry);
-
-		list_del(&req->inflight_entry);
-		state->reqs[nr++] = req;
-		if (nr == ARRAY_SIZE(state->reqs))
-			break;
-	}
-
-	state->free_reqs = nr;
-	return nr != 0;
+	return !list_empty(&state->free_list);
 }
 
 /*
@@ -1936,33 +1916,36 @@ static struct io_kiocb *io_alloc_req(struct io_ring_ctx *ctx)
 {
 	struct io_submit_state *state = &ctx->submit_state;
 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
+	void *reqs[IO_REQ_ALLOC_BATCH];
+	struct io_kiocb *req;
 	int ret, i;
 
-	BUILD_BUG_ON(ARRAY_SIZE(state->reqs) < IO_REQ_ALLOC_BATCH);
-
-	if (likely(state->free_reqs || io_flush_cached_reqs(ctx)))
+	if (likely(!list_empty(&state->free_list) || io_flush_cached_reqs(ctx)))
 		goto got_req;
 
-	ret = kmem_cache_alloc_bulk(req_cachep, gfp, IO_REQ_ALLOC_BATCH,
-				    state->reqs);
+	ret = kmem_cache_alloc_bulk(req_cachep, gfp, ARRAY_SIZE(reqs), reqs);
 
 	/*
 	 * Bulk alloc is all-or-nothing. If we fail to get a batch,
 	 * retry single alloc to be on the safe side.
 	 */
 	if (unlikely(ret <= 0)) {
-		state->reqs[0] = kmem_cache_alloc(req_cachep, gfp);
-		if (!state->reqs[0])
+		reqs[0] = kmem_cache_alloc(req_cachep, gfp);
+		if (!reqs[0])
 			return NULL;
 		ret = 1;
 	}
 
-	for (i = 0; i < ret; i++)
-		io_preinit_req(state->reqs[i], ctx);
-	state->free_reqs = ret;
+	for (i = 0; i < ret; i++) {
+		req = reqs[i];
+
+		io_preinit_req(req, ctx);
+		list_add(&req->inflight_entry, &state->free_list);
+	}
 got_req:
-	state->free_reqs--;
-	return state->reqs[state->free_reqs];
+	req = list_first_entry(&state->free_list, struct io_kiocb, inflight_entry);
+	list_del(&req->inflight_entry);
+	return req;
 }
 
 static inline void io_put_file(struct file *file)
@@ -2320,10 +2303,7 @@ static void io_req_free_batch(struct req_batch *rb, struct io_kiocb *req,
 	rb->task_refs++;
 	rb->ctx_refs++;
 
-	if (state->free_reqs != ARRAY_SIZE(state->reqs))
-		state->reqs[state->free_reqs++] = req;
-	else
-		list_add(&req->inflight_entry, &state->free_list);
+	list_add(&req->inflight_entry, &state->free_list);
 }
 
 static void __io_submit_flush_completions(struct io_ring_ctx *ctx)
@@ -9179,12 +9159,6 @@ static void io_req_caches_free(struct io_ring_ctx *ctx)
 	struct io_submit_state *state = &ctx->submit_state;
 
 	mutex_lock(&ctx->uring_lock);
-
-	if (state->free_reqs) {
-		kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
-		state->free_reqs = 0;
-	}
-
 	io_flush_cached_locked_reqs(ctx, state);
 	io_req_cache_free(&state->free_list);
 	mutex_unlock(&ctx->uring_lock);
-- 
2.33.0


  parent reply	other threads:[~2021-09-24 16:36 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-24 16:31 [RFC][PATCHSET 00/23] rework/optimise submission+completion paths Pavel Begunkov
2021-09-24 16:31 ` [PATCH 01/23] io_uring: mark having different creds unlikely Pavel Begunkov
2021-09-24 16:31 ` [PATCH 02/23] io_uring: force_nonspin Pavel Begunkov
2021-09-24 16:31 ` [PATCH 03/23] io_uring: make io_do_iopoll return number of reqs Pavel Begunkov
2021-09-24 16:31 ` [PATCH 04/23] io_uring: use slist for completion batching Pavel Begunkov
2021-09-24 16:31 ` Pavel Begunkov [this message]
2021-09-24 16:31 ` [PATCH 06/23] io-wq: add io_wq_work_node based stack Pavel Begunkov
2021-09-24 16:31 ` [PATCH 07/23] io_uring: replace list with stack for req caches Pavel Begunkov
2021-09-24 16:31 ` [PATCH 08/23] io_uring: split iopoll loop Pavel Begunkov
2021-09-24 16:31 ` [PATCH 09/23] io_uring: use single linked list for iopoll Pavel Begunkov
2021-09-24 16:31 ` [PATCH 10/23] io_uring: add a helper for batch free Pavel Begunkov
2021-09-24 16:31 ` [PATCH 11/23] io_uring: convert iopoll_completed to store_release Pavel Begunkov
2021-09-24 16:31 ` [PATCH 12/23] io_uring: optimise batch completion Pavel Begunkov
2021-09-24 16:31 ` [PATCH 13/23] io_uring: inline completion batching helpers Pavel Begunkov
2021-09-24 16:31 ` [PATCH 14/23] io_uring: don't pass tail into io_free_batch_list Pavel Begunkov
2021-09-24 16:31 ` [PATCH 15/23] io_uring: don't pass state to io_submit_state_end Pavel Begunkov
2021-09-24 16:31 ` [PATCH 16/23] io_uring: deduplicate io_queue_sqe() call sites Pavel Begunkov
2021-09-24 16:31 ` [PATCH 17/23] io_uring: remove drain_active check from hot path Pavel Begunkov
2021-09-24 16:31 ` [PATCH 18/23] io_uring: split slow path from io_queue_sqe Pavel Begunkov
2021-09-24 16:31 ` [PATCH 19/23] io_uring: inline hot path of __io_queue_sqe() Pavel Begunkov
2021-09-24 16:31 ` [PATCH 20/23] io_uring: reshuffle queue_sqe completion handling Pavel Begunkov
2021-09-24 16:31 ` [PATCH 21/23] io_uring: restructure submit sqes to_submit checks Pavel Begunkov
2021-09-24 16:32 ` [PATCH 22/23] io_uring: kill off ->inflight_entry field Pavel Begunkov
2021-09-24 16:32 ` [PATCH 23/23] io_uring: comment why inline complete calls io_clean_op() Pavel Begunkov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=21ee9095e4b7fbb3fa42de8c4879a4a4cfa798a9.1632500264.git.asml.silence@gmail.com \
    --to=asml.silence@gmail.com \
    --cc=axboe@kernel.dk \
    --cc=io-uring@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.