From: Jens Axboe <axboe@kernel.dk>
To: linux-block@vger.kernel.org, linux-fsdevel@vger.kernel.org,
linux-aio@kvack.org
Cc: hch@lst.de, Jens Axboe <axboe@kernel.dk>
Subject: [PATCH 18/27] aio: add submission side request cache
Date: Fri, 30 Nov 2018 09:56:37 -0700 [thread overview]
Message-ID: <20181130165646.27341-19-axboe@kernel.dk> (raw)
In-Reply-To: <20181130165646.27341-1-axboe@kernel.dk>
We have to add each submitted polled request to the io_context
poll_submitted list, which means we have to grab the poll_lock. We
already use the block plug to batch submissions if we're doing a batch
of IO submissions, extend that to cover the poll requests internally as
well.
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
fs/aio.c | 136 +++++++++++++++++++++++++++++++++++++++++++++----------
1 file changed, 113 insertions(+), 23 deletions(-)
diff --git a/fs/aio.c b/fs/aio.c
index f7a49abc7694..182e2fc6ec82 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -230,6 +230,21 @@ struct aio_kiocb {
};
};
+struct aio_submit_state {
+ struct kioctx *ctx;
+
+ struct blk_plug plug;
+#ifdef CONFIG_BLOCK
+ struct blk_plug_cb plug_cb;
+#endif
+
+ /*
+ * Polled iocbs that have been submitted, but not added to the ctx yet
+ */
+ struct list_head req_list;
+ unsigned int req_count;
+};
+
/*------ sysctl variables----*/
static DEFINE_SPINLOCK(aio_nr_lock);
unsigned long aio_nr; /* current system wide number of aio requests */
@@ -247,6 +262,15 @@ static const struct address_space_operations aio_ctx_aops;
static const unsigned int iocb_page_shift =
ilog2(PAGE_SIZE / sizeof(struct iocb));
+/*
+ * We rely on block level unplugs to flush pending requests, if we schedule
+ */
+#ifdef CONFIG_BLOCK
+static const bool aio_use_state_req_list = true;
+#else
+static const bool aio_use_state_req_list = false;
+#endif
+
static void aio_useriocb_free(struct kioctx *);
static void aio_iopoll_reap_events(struct kioctx *);
@@ -1851,13 +1875,28 @@ static inline void aio_rw_done(struct kiocb *req, ssize_t ret)
}
}
+/*
+ * Called either at the end of IO submission, or through a plug callback
+ * because we're going to schedule. Moves out local batch of requests to
+ * the ctx poll list, so they can be found for polling + reaping.
+ */
+static void aio_flush_state_reqs(struct kioctx *ctx,
+ struct aio_submit_state *state)
+{
+ spin_lock(&ctx->poll_lock);
+ list_splice_tail_init(&state->req_list, &ctx->poll_submitted);
+ spin_unlock(&ctx->poll_lock);
+ state->req_count = 0;
+}
+
/*
* After the iocb has been issued, it's safe to be found on the poll list.
* Adding the kiocb to the list AFTER submission ensures that we don't
* find it from a io_getevents() thread before the issuer is done accessing
* the kiocb cookie.
*/
-static void aio_iopoll_iocb_issued(struct aio_kiocb *kiocb)
+static void aio_iopoll_iocb_issued(struct aio_submit_state *state,
+ struct aio_kiocb *kiocb)
{
/*
* For fast devices, IO may have already completed. If it has, add
@@ -1867,12 +1906,21 @@ static void aio_iopoll_iocb_issued(struct aio_kiocb *kiocb)
const int front_add = test_bit(IOCB_POLL_COMPLETED, &kiocb->ki_flags);
struct kioctx *ctx = kiocb->ki_ctx;
- spin_lock(&ctx->poll_lock);
- if (front_add)
- list_add(&kiocb->ki_list, &ctx->poll_submitted);
- else
- list_add_tail(&kiocb->ki_list, &ctx->poll_submitted);
- spin_unlock(&ctx->poll_lock);
+ if (!state || !aio_use_state_req_list) {
+ spin_lock(&ctx->poll_lock);
+ if (front_add)
+ list_add(&kiocb->ki_list, &ctx->poll_submitted);
+ else
+ list_add_tail(&kiocb->ki_list, &ctx->poll_submitted);
+ spin_unlock(&ctx->poll_lock);
+ } else {
+ if (front_add)
+ list_add(&kiocb->ki_list, &state->req_list);
+ else
+ list_add_tail(&kiocb->ki_list, &state->req_list);
+ if (++state->req_count >= AIO_IOPOLL_BATCH)
+ aio_flush_state_reqs(ctx, state);
+ }
}
static ssize_t aio_read(struct aio_kiocb *kiocb, const struct iocb *iocb,
@@ -2168,7 +2216,8 @@ static ssize_t aio_poll(struct aio_kiocb *aiocb, const struct iocb *iocb)
}
static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
- struct iocb __user *user_iocb, bool compat)
+ struct iocb __user *user_iocb,
+ struct aio_submit_state *state, bool compat)
{
struct aio_kiocb *req;
ssize_t ret;
@@ -2272,7 +2321,7 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
ret = -EAGAIN;
goto out_put_req;
}
- aio_iopoll_iocb_issued(req);
+ aio_iopoll_iocb_issued(state, req);
}
return 0;
out_put_req:
@@ -2286,7 +2335,7 @@ static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb,
}
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
- bool compat)
+ struct aio_submit_state *state, bool compat)
{
struct iocb iocb, *iocbp;
@@ -2303,7 +2352,44 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
iocbp = &iocb;
}
- return __io_submit_one(ctx, iocbp, user_iocb, compat);
+ return __io_submit_one(ctx, iocbp, user_iocb, state, compat);
+}
+
+#ifdef CONFIG_BLOCK
+static void aio_state_unplug(struct blk_plug_cb *cb, bool from_schedule)
+{
+ struct aio_submit_state *state;
+
+ state = container_of(cb, struct aio_submit_state, plug_cb);
+ if (!list_empty(&state->req_list))
+ aio_flush_state_reqs(state->ctx, state);
+}
+#endif
+
+/*
+ * Batched submission is done, ensure local IO is flushed out.
+ */
+static void aio_submit_state_end(struct aio_submit_state *state)
+{
+ blk_finish_plug(&state->plug);
+ if (!list_empty(&state->req_list))
+ aio_flush_state_reqs(state->ctx, state);
+}
+
+/*
+ * Start submission side cache.
+ */
+static void aio_submit_state_start(struct aio_submit_state *state,
+ struct kioctx *ctx)
+{
+ state->ctx = ctx;
+ INIT_LIST_HEAD(&state->req_list);
+ state->req_count = 0;
+#ifdef CONFIG_BLOCK
+ state->plug_cb.callback = aio_state_unplug;
+ blk_start_plug(&state->plug);
+ list_add(&state->plug_cb.list, &state->plug.cb_list);
+#endif
}
/*
@@ -2327,10 +2413,10 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
struct iocb __user * __user *, iocbpp)
{
+ struct aio_submit_state state, *statep = NULL;
struct kioctx *ctx;
long ret = 0;
int i = 0;
- struct blk_plug plug;
if (unlikely(nr < 0))
return -EINVAL;
@@ -2344,8 +2430,10 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
if (nr > ctx->nr_events)
nr = ctx->nr_events;
- if (nr > AIO_PLUG_THRESHOLD)
- blk_start_plug(&plug);
+ if (nr > AIO_PLUG_THRESHOLD) {
+ aio_submit_state_start(&state, ctx);
+ statep = &state;
+ }
for (i = 0; i < nr; i++) {
struct iocb __user *user_iocb;
@@ -2354,12 +2442,12 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
break;
}
- ret = io_submit_one(ctx, user_iocb, false);
+ ret = io_submit_one(ctx, user_iocb, statep, false);
if (ret)
break;
}
- if (nr > AIO_PLUG_THRESHOLD)
- blk_finish_plug(&plug);
+ if (statep)
+ aio_submit_state_end(statep);
percpu_ref_put(&ctx->users);
return i ? i : ret;
@@ -2369,10 +2457,10 @@ SYSCALL_DEFINE3(io_submit, aio_context_t, ctx_id, long, nr,
COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
int, nr, compat_uptr_t __user *, iocbpp)
{
+ struct aio_submit_state state, *statep = NULL;
struct kioctx *ctx;
long ret = 0;
int i = 0;
- struct blk_plug plug;
if (unlikely(nr < 0))
return -EINVAL;
@@ -2386,8 +2474,10 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
if (nr > ctx->nr_events)
nr = ctx->nr_events;
- if (nr > AIO_PLUG_THRESHOLD)
- blk_start_plug(&plug);
+ if (nr > AIO_PLUG_THRESHOLD) {
+ aio_submit_state_start(&state, ctx);
+ statep = &state;
+ }
for (i = 0; i < nr; i++) {
compat_uptr_t user_iocb;
@@ -2396,12 +2486,12 @@ COMPAT_SYSCALL_DEFINE3(io_submit, compat_aio_context_t, ctx_id,
break;
}
- ret = io_submit_one(ctx, compat_ptr(user_iocb), true);
+ ret = io_submit_one(ctx, compat_ptr(user_iocb), statep, true);
if (ret)
break;
}
- if (nr > AIO_PLUG_THRESHOLD)
- blk_finish_plug(&plug);
+ if (statep)
+ aio_submit_state_end(statep);
percpu_ref_put(&ctx->users);
return i ? i : ret;
--
2.17.1
next prev parent reply other threads:[~2018-11-30 16:57 UTC|newest]
Thread overview: 59+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-11-30 16:56 [PATCHSET v4] Support for polled aio Jens Axboe
2018-11-30 16:56 ` [PATCH 01/27] aio: fix failure to put the file pointer Jens Axboe
2018-11-30 17:07 ` Bart Van Assche
2018-11-30 17:08 ` Jens Axboe
2018-11-30 17:24 ` Bart Van Assche
2018-11-30 16:56 ` [PATCH 02/27] aio: clear IOCB_HIPRI Jens Axboe
2018-11-30 17:13 ` Christoph Hellwig
2018-11-30 17:14 ` Jens Axboe
2018-12-04 14:46 ` Christoph Hellwig
2018-12-04 16:40 ` Jens Axboe
2018-11-30 16:56 ` [PATCH 03/27] fs: add an iopoll method to struct file_operations Jens Axboe
2018-11-30 16:56 ` [PATCH 04/27] block: wire up block device iopoll method Jens Axboe
2018-11-30 16:56 ` [PATCH 05/27] block: ensure that async polled IO is marked REQ_NOWAIT Jens Axboe
2018-11-30 17:12 ` Bart Van Assche
2018-11-30 17:17 ` Jens Axboe
2018-12-04 14:48 ` Christoph Hellwig
2018-12-04 18:13 ` Jens Axboe
2018-11-30 16:56 ` [PATCH 06/27] iomap: wire up the iopoll method Jens Axboe
2018-11-30 16:56 ` [PATCH 07/27] iomap: ensure that async polled IO is marked REQ_NOWAIT Jens Axboe
2018-11-30 16:56 ` [PATCH 08/27] aio: use assigned completion handler Jens Axboe
2018-11-30 16:56 ` [PATCH 09/27] aio: separate out ring reservation from req allocation Jens Axboe
2018-11-30 16:56 ` [PATCH 10/27] aio: don't zero entire aio_kiocb aio_get_req() Jens Axboe
2018-12-04 14:49 ` Christoph Hellwig
2018-12-04 15:27 ` Jens Axboe
2018-11-30 16:56 ` [PATCH 11/27] aio: only use blk plugs for > 2 depth submissions Jens Axboe
2018-12-04 14:50 ` Christoph Hellwig
2018-11-30 16:56 ` [PATCH 12/27] aio: use iocb_put() instead of open coding it Jens Axboe
2018-12-04 14:50 ` Christoph Hellwig
2018-11-30 16:56 ` [PATCH 13/27] aio: split out iocb copy from io_submit_one() Jens Axboe
2018-11-30 16:56 ` [PATCH 14/27] aio: abstract out io_event filler helper Jens Axboe
2018-11-30 16:56 ` [PATCH 15/27] aio: add io_setup2() system call Jens Axboe
2018-11-30 16:56 ` [PATCH 16/27] aio: add support for having user mapped iocbs Jens Axboe
2018-11-30 16:56 ` [PATCH 17/27] aio: support for IO polling Jens Axboe
2018-11-30 16:56 ` Jens Axboe [this message]
2018-11-30 16:56 ` [PATCH 19/27] fs: add fget_many() and fput_many() Jens Axboe
2018-11-30 16:56 ` [PATCH 20/27] aio: use fget/fput_many() for file references Jens Axboe
2018-11-30 16:56 ` [PATCH 21/27] aio: split iocb init from allocation Jens Axboe
2018-11-30 16:56 ` [PATCH 22/27] aio: batch aio_kiocb allocation Jens Axboe
2018-11-30 16:56 ` [PATCH 23/27] block: add BIO_HOLD_PAGES flag Jens Axboe
2018-11-30 16:56 ` [PATCH 24/27] block: implement bio helper to add iter kvec pages to bio Jens Axboe
2018-11-30 19:21 ` Al Viro
2018-11-30 20:15 ` Jens Axboe
2018-11-30 20:32 ` Jens Axboe
2018-11-30 21:11 ` Al Viro
2018-11-30 21:16 ` Jens Axboe
2018-11-30 21:25 ` Al Viro
2018-11-30 21:34 ` Jens Axboe
2018-11-30 22:06 ` Jens Axboe
2018-12-04 14:55 ` Christoph Hellwig
2018-12-04 15:25 ` Jens Axboe
2018-11-30 16:56 ` [PATCH 25/27] fs: add support for mapping an ITER_KVEC for O_DIRECT Jens Axboe
2018-11-30 16:56 ` [PATCH 26/27] iov_iter: add import_kvec() Jens Axboe
2018-11-30 19:17 ` Al Viro
2018-11-30 20:15 ` Jens Axboe
2018-11-30 16:56 ` [PATCH 27/27] aio: add support for pre-mapped user IO buffers Jens Axboe
2018-11-30 21:44 ` Jeff Moyer
2018-11-30 21:57 ` Jens Axboe
2018-11-30 22:04 ` Jeff Moyer
2018-11-30 22:11 ` Jens Axboe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20181130165646.27341-19-axboe@kernel.dk \
--to=axboe@kernel.dk \
--cc=hch@lst.de \
--cc=linux-aio@kvack.org \
--cc=linux-block@vger.kernel.org \
--cc=linux-fsdevel@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).