From: Jens Axboe <axboe@kernel.dk>
To: linux-fsdevel@vger.kernel.org, linux-aio@kvack.org,
linux-block@vger.kernel.org, linux-arch@vger.kernel.org
Cc: hch@lst.de, jmoyer@redhat.com, avi@scylladb.com,
Jens Axboe <axboe@kernel.dk>
Subject: [PATCH 07/15] io_uring: add submission side request cache
Date: Wed, 9 Jan 2019 19:43:56 -0700 [thread overview]
Message-ID: <20190110024404.25372-8-axboe@kernel.dk> (raw)
In-Reply-To: <20190110024404.25372-1-axboe@kernel.dk>
We have to add each submitted polled request to the io_ring_ctx
poll_submitted list, which means we have to grab the poll_lock. We
already use the block plug to batch submissions if we're doing a batch
of IO submissions, extend that to cover the poll requests internally as
well.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
fs/io_uring.c | 122 +++++++++++++++++++++++++++++++++++++++++++-------
1 file changed, 106 insertions(+), 16 deletions(-)
diff --git a/fs/io_uring.c b/fs/io_uring.c
index c872bfb32a03..f7938156552f 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -113,6 +113,21 @@ struct sqe_submit {
unsigned index;
};
+struct io_submit_state {
+ struct io_ring_ctx *ctx;
+
+ struct blk_plug plug;
+#ifdef CONFIG_BLOCK
+ struct blk_plug_cb plug_cb;
+#endif
+
+ /*
+ * Polled iocbs that have been submitted, but not added to the ctx yet
+ */
+ struct list_head req_list;
+ unsigned int req_count;
+};
+
static struct kmem_cache *kiocb_cachep;
static const struct file_operations io_scqring_fops;
@@ -480,21 +495,29 @@ static inline void io_rw_done(struct kiocb *req, ssize_t ret)
}
/*
- * After the iocb has been issued, it's safe to be found on the poll list.
- * Adding the kiocb to the list AFTER submission ensures that we don't
- * find it from a io_getevents() thread before the issuer is done accessing
- * the kiocb cookie.
+ * Called either at the end of IO submission, or through a plug callback
+ * because we're going to schedule. Moves out local batch of requests to
+ * the ctx poll list, so they can be found for polling + reaping.
*/
-static void io_iopoll_kiocb_issued(struct io_kiocb *kiocb)
+static void io_flush_state_reqs(struct io_ring_ctx *ctx,
+ struct io_submit_state *state)
{
+ spin_lock(&ctx->poll_lock);
+ list_splice_tail_init(&state->req_list, &ctx->poll_submitted);
+ spin_unlock(&ctx->poll_lock);
+ state->req_count = 0;
+}
+
+static void io_iopoll_iocb_add_list(struct io_kiocb *kiocb)
+{
+ const int front = test_bit(KIOCB_F_IOPOLL_COMPLETED, &kiocb->ki_flags);
+ struct io_ring_ctx *ctx = kiocb->ki_ctx;
+
/*
* For fast devices, IO may have already completed. If it has, add
* it to the front so we find it first. We can't add to the poll_done
* list as that's unlocked from the completion side.
*/
- const int front = test_bit(KIOCB_F_IOPOLL_COMPLETED, &kiocb->ki_flags);
- struct io_ring_ctx *ctx = kiocb->ki_ctx;
-
spin_lock(&ctx->poll_lock);
if (front)
list_add(&kiocb->ki_list, &ctx->poll_submitted);
@@ -503,6 +526,33 @@ static void io_iopoll_kiocb_issued(struct io_kiocb *kiocb)
spin_unlock(&ctx->poll_lock);
}
+static void io_iopoll_iocb_add_state(struct io_submit_state *state,
+ struct io_kiocb *kiocb)
+{
+ if (test_bit(KIOCB_F_IOPOLL_COMPLETED, &kiocb->ki_flags))
+ list_add(&kiocb->ki_list, &state->req_list);
+ else
+ list_add_tail(&kiocb->ki_list, &state->req_list);
+
+ if (++state->req_count >= IO_IOPOLL_BATCH)
+ io_flush_state_reqs(state->ctx, state);
+}
+
+/*
+ * After the iocb has been issued, it's safe to be found on the poll list.
+ * Adding the kiocb to the list AFTER submission ensures that we don't
+ * find it from a io_getevents() thread before the issuer is done accessing
+ * the kiocb cookie.
+ */
+static void io_iopoll_kiocb_issued(struct io_submit_state *state,
+ struct io_kiocb *kiocb)
+{
+ if (!state || !IS_ENABLED(CONFIG_BLOCK))
+ io_iopoll_iocb_add_list(kiocb);
+ else
+ io_iopoll_iocb_add_state(state, kiocb);
+}
+
static ssize_t io_read(struct io_kiocb *kiocb, const struct io_uring_sqe *sqe)
{
struct iovec inline_vecs[UIO_FASTIOV], *iovec = inline_vecs;
@@ -624,7 +674,8 @@ static int io_fsync(struct io_kiocb *kiocb, const struct io_uring_sqe *sqe,
return 0;
}
-static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s)
+static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s,
+ struct io_submit_state *state)
{
const struct io_uring_sqe *sqe = s->sqe;
struct io_kiocb *req;
@@ -673,7 +724,7 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s)
ret = -EAGAIN;
goto out_put_req;
}
- io_iopoll_kiocb_issued(req);
+ io_iopoll_kiocb_issued(state, req);
}
return 0;
out_put_req:
@@ -681,6 +732,43 @@ static int io_submit_sqe(struct io_ring_ctx *ctx, struct sqe_submit *s)
return ret;
}
+#ifdef CONFIG_BLOCK
+static void io_state_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct io_submit_state *state;
+
+ state = container_of(cb, struct io_submit_state, plug_cb);
+ if (!list_empty(&state->req_list))
+ io_flush_state_reqs(state->ctx, state);
+}
+#endif
+
+/*
+ * Batched submission is done, ensure local IO is flushed out.
+ */
+static void io_submit_state_end(struct io_submit_state *state)
+{
+ blk_finish_plug(&state->plug);
+ if (!list_empty(&state->req_list))
+ io_flush_state_reqs(state->ctx, state);
+}
+
+/*
+ * Start submission side cache.
+ */
+static void io_submit_state_start(struct io_submit_state *state,
+ struct io_ring_ctx *ctx)
+{
+ state->ctx = ctx;
+ INIT_LIST_HEAD(&state->req_list);
+ state->req_count = 0;
+#ifdef CONFIG_BLOCK
+ state->plug_cb.callback = io_state_unplug;
+ blk_start_plug(&state->plug);
+ list_add(&state->plug_cb.list, &state->plug.cb_list);
+#endif
+}
+
static void io_inc_sqring(struct io_ring_ctx *ctx)
{
struct io_sq_ring *ring = ctx->sq_ring;
@@ -715,11 +803,13 @@ static bool io_peek_sqring(struct io_ring_ctx *ctx, struct sqe_submit *s)
static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
{
+ struct io_submit_state state, *statep = NULL;
int i, ret = 0, submit = 0;
- struct blk_plug plug;
- if (to_submit > IO_PLUG_THRESHOLD)
- blk_start_plug(&plug);
+ if (to_submit > IO_PLUG_THRESHOLD) {
+ io_submit_state_start(&state, ctx);
+ statep = &state;
+ }
for (i = 0; i < to_submit; i++) {
struct sqe_submit s;
@@ -727,7 +817,7 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
if (!io_peek_sqring(ctx, &s))
break;
- ret = io_submit_sqe(ctx, &s);
+ ret = io_submit_sqe(ctx, &s, statep);
if (ret)
break;
@@ -735,8 +825,8 @@ static int io_ring_submit(struct io_ring_ctx *ctx, unsigned int to_submit)
io_inc_sqring(ctx);
}
- if (to_submit > IO_PLUG_THRESHOLD)
- blk_finish_plug(&plug);
+ if (statep)
+ io_submit_state_end(statep);
return submit ? submit : ret;
}
--
2.17.1
next prev parent reply other threads:[~2019-01-10 2:44 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-01-10 2:43 [PATCHSET v2] io_uring IO interface Jens Axboe
2019-01-10 2:43 ` [PATCH 01/15] fs: add an iopoll method to struct file_operations Jens Axboe
2019-01-10 2:43 ` [PATCH 02/15] block: wire up block device iopoll method Jens Axboe
2019-01-10 2:43 ` [PATCH 03/15] block: add bio_set_polled() helper Jens Axboe
2019-01-10 2:43 ` [PATCH 04/15] iomap: wire up the iopoll method Jens Axboe
2019-01-10 2:43 ` [PATCH 05/15] Add io_uring IO interface Jens Axboe
2019-01-11 18:19 ` Martin K. Petersen
2019-01-11 18:34 ` Jens Axboe
2019-01-13 16:22 ` Jens Axboe
2019-01-15 17:31 ` Martin K. Petersen
2019-01-10 2:43 ` [PATCH 06/15] io_uring: support for IO polling Jens Axboe
2019-01-10 2:43 ` Jens Axboe [this message]
2019-01-10 2:43 ` [PATCH 08/15] fs: add fget_many() and fput_many() Jens Axboe
2019-01-10 2:43 ` [PATCH 09/15] io_uring: use fget/fput_many() for file references Jens Axboe
2019-01-10 2:43 ` [PATCH 10/15] io_uring: batch io_kiocb allocation Jens Axboe
2019-01-10 2:44 ` [PATCH 11/15] block: implement bio helper to add iter bvec pages to bio Jens Axboe
2019-01-10 2:44 ` [PATCH 12/15] io_uring: add support for pre-mapped user IO buffers Jens Axboe
2019-01-10 2:44 ` [PATCH 13/15] io_uring: support kernel side submission Jens Axboe
2019-01-10 2:44 ` [PATCH 14/15] io_uring: add submission polling Jens Axboe
2019-01-10 2:44 ` [PATCH 15/15] io_uring: add io_uring_event cache hit information Jens Axboe
2019-01-10 23:12 ` Jeff Moyer
2019-01-10 23:47 ` Jens Axboe
2019-01-11 9:46 ` [PATCHSET v2] io_uring IO interface Roman Penyaev
2019-01-11 16:11 ` Ilya Dryomov
2019-01-11 16:21 ` Christoph Hellwig
2019-01-11 16:39 ` Roman Penyaev
2019-01-11 18:05 ` Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190110024404.25372-8-axboe@kernel.dk \
--to=axboe@kernel.dk \
--cc=avi@scylladb.com \
--cc=hch@lst.de \
--cc=jmoyer@redhat.com \
--cc=linux-aio@kvack.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).