From mboxrd@z Thu Jan 1 00:00:00 1970 From: Jens Axboe Subject: [PATCH 10/18] io_uring: batch io_kiocb allocation Date: Mon, 28 Jan 2019 14:35:30 -0700 Message-ID: <20190128213538.13486-11-axboe@kernel.dk> References: <20190128213538.13486-1-axboe@kernel.dk> Return-path: In-Reply-To: <20190128213538.13486-1-axboe@kernel.dk> Sender: owner-linux-aio@kvack.org To: linux-aio@kvack.org, linux-block@vger.kernel.org, linux-man@vger.kernel.org, linux-api@vger.kernel.org Cc: hch@lst.de, jmoyer@redhat.com, avi@scylladb.com, Jens Axboe List-Id: linux-man@vger.kernel.org Similarly to how we use the state->ios_left to know how many references to get to a file, we can use it to allocate the io_kiocb's we need in bulk. Signed-off-by: Jens Axboe --- fs/io_uring.c | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) diff --git a/fs/io_uring.c b/fs/io_uring.c index 25bdd719690d..a33f1b1709d0 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -134,6 +134,13 @@ struct io_kiocb { struct io_submit_state { struct blk_plug plug; + /* + * io_kiocb alloc cache + */ + void *reqs[IO_IOPOLL_BATCH]; + unsigned int free_reqs; + unsigned int cur_req; + /* * File reference cache */ @@ -257,20 +264,42 @@ static void io_ring_drop_ctx_refs(struct io_ring_ctx *ctx, unsigned refs) wake_up(&ctx->wait); } -static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx) +static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx, + struct io_submit_state *state) { + gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; struct io_kiocb *req; /* safe to use the non tryget, as we're inside ring ref already */ percpu_ref_get(&ctx->refs); - req = kmem_cache_alloc(req_cachep, GFP_ATOMIC | __GFP_NOWARN); + if (!state) + req = kmem_cache_alloc(req_cachep, gfp); + else if (!state->free_reqs) { + size_t sz; + int ret; + + sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs)); + ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, + state->reqs); + if (ret <= 0) + goto out; + state->free_reqs = ret - 1; + state->cur_req = 1; + req = state->reqs[0]; + } else { + req = state->reqs[state->cur_req]; + state->free_reqs--; + state->cur_req++; + } + if (req) { req->ctx = ctx; req->flags = 0; return req; } +out: io_ring_drop_ctx_refs(ctx, 1); return NULL; } @@ -883,7 +912,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s, if (unlikely(s->sqe->flags)) return -EINVAL; - req = io_get_req(ctx); + req = io_get_req(ctx, state); if (unlikely(!req)) return -EAGAIN; @@ -907,6 +936,9 @@ static void io_submit_state_end(struct io_submit_state *state) { blk_finish_plug(&state->plug); io_file_put(state, NULL); + if (state->free_reqs) + kmem_cache_free_bulk(req_cachep, state->free_reqs, + &state->reqs[state->cur_req]); } /* @@ -916,6 +948,7 @@ static void io_submit_state_start(struct io_submit_state *state, struct io_ring_ctx *ctx, unsigned max_ios) { blk_start_plug(&state->plug); + state->free_reqs = 0; state->file = NULL; state->ios_left = max_ios; } -- 2.17.1 -- To unsubscribe, send a message with 'unsubscribe linux-aio' in the body to majordomo@kvack.org. For more info on Linux AIO, see: http://www.kvack.org/aio/ Don't email: aart@kvack.org