linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 0/6] add persistent submission state
@ 2020-01-31 22:15 Pavel Begunkov
  2020-01-31 22:15 ` [PATCH v3 1/6] io_uring: always pass non-null io_submit_state Pavel Begunkov
                   ` (6 more replies)
  0 siblings, 7 replies; 11+ messages in thread
From: Pavel Begunkov @ 2020-01-31 22:15 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

Apart from unrelated first patch, this persues two goals:

1. start preparing io_uring to move resources handling into
opcode specific functions

2. make the first step towards long-standing optimisation ideas

Basically, it makes struct io_submit_state embedded into ctx, so
easily accessible and persistent, and then plays a bit around that.

v2: rebase
v3: drop the mm-related patch and rebase

Pavel Begunkov (6):
  io_uring: always pass non-null io_submit_state
  io_uring: place io_submit_state into ctx
  io_uring: move ring_fd into io_submit_state
  io_uring: move *link into io_submit_state
  io_uring: persistent req bulk allocation cache
  io_uring: optimise req bulk allocation cache

 fs/io_uring.c | 166 +++++++++++++++++++++++++++-----------------------
 1 file changed, 89 insertions(+), 77 deletions(-)

-- 
2.24.0


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH v3 1/6] io_uring: always pass non-null io_submit_state
  2020-01-31 22:15 [PATCH v3 0/6] add persistent submission state Pavel Begunkov
@ 2020-01-31 22:15 ` Pavel Begunkov
  2020-01-31 22:15 ` [PATCH v3 2/6] io_uring: place io_submit_state into ctx Pavel Begunkov
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2020-01-31 22:15 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

There is more harm than merit from conditionally passing
io_submit_state. Always pass non-null pointer. It shouldn't affect
performance, but even if so the gap will be closed by the following
commits. Also, in prepartion move plugging out of it.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 33 ++++++++++++---------------------
 1 file changed, 12 insertions(+), 21 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 05b9fb0764e1..6f3998e6475a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -579,8 +579,6 @@ struct io_kiocb {
 #define IO_IOPOLL_BATCH			8
 
 struct io_submit_state {
-	struct blk_plug		plug;
-
 	/*
 	 * io_kiocb alloc cache
 	 */
@@ -1166,11 +1164,7 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
 	struct io_kiocb *req;
 
-	if (!state) {
-		req = kmem_cache_alloc(req_cachep, gfp);
-		if (unlikely(!req))
-			goto fallback;
-	} else if (!state->free_reqs) {
+	if (!state->free_reqs) {
 		size_t sz;
 		int ret;
 
@@ -1813,9 +1807,6 @@ static void io_file_put(struct io_submit_state *state)
  */
 static struct file *io_file_get(struct io_submit_state *state, int fd)
 {
-	if (!state)
-		return fget(fd);
-
 	if (state->file) {
 		if (state->fd == fd) {
 			state->used_refs++;
@@ -4834,7 +4825,6 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
  */
 static void io_submit_state_end(struct io_submit_state *state)
 {
-	blk_finish_plug(&state->plug);
 	io_file_put(state);
 	if (state->free_reqs)
 		kmem_cache_free_bulk(req_cachep, state->free_reqs,
@@ -4847,7 +4837,6 @@ static void io_submit_state_end(struct io_submit_state *state)
 static void io_submit_state_start(struct io_submit_state *state,
 				  unsigned int max_ios)
 {
-	blk_start_plug(&state->plug);
 	state->free_reqs = 0;
 	state->file = NULL;
 	state->ios_left = max_ios;
@@ -4913,7 +4902,8 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 			  struct file *ring_file, int ring_fd,
 			  struct mm_struct **mm, bool async)
 {
-	struct io_submit_state state, *statep = NULL;
+	struct blk_plug plug;
+	struct io_submit_state state;
 	struct io_kiocb *link = NULL;
 	int i, submitted = 0;
 	bool mm_fault = false;
@@ -4931,10 +4921,9 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	if (!percpu_ref_tryget_many(&ctx->refs, nr))
 		return -EAGAIN;
 
-	if (nr > IO_PLUG_THRESHOLD) {
-		io_submit_state_start(&state, nr);
-		statep = &state;
-	}
+	io_submit_state_start(&state, nr);
+	if (nr > IO_PLUG_THRESHOLD)
+		blk_start_plug(&plug);
 
 	ctx->ring_fd = ring_fd;
 	ctx->ring_file = ring_file;
@@ -4943,7 +4932,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 		const struct io_uring_sqe *sqe;
 		struct io_kiocb *req;
 
-		req = io_get_req(ctx, statep);
+		req = io_get_req(ctx, &state);
 		if (unlikely(!req)) {
 			if (!submitted)
 				submitted = -EAGAIN;
@@ -4976,7 +4965,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 		req->needs_fixed_file = async;
 		trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
 						true, async);
-		if (!io_submit_sqe(req, sqe, statep, &link))
+		if (!io_submit_sqe(req, sqe, &state, &link))
 			break;
 	}
 
@@ -4987,8 +4976,10 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	}
 	if (link)
 		io_queue_link_head(link);
-	if (statep)
-		io_submit_state_end(&state);
+
+	io_submit_state_end(&state);
+	if (nr > IO_PLUG_THRESHOLD)
+		blk_finish_plug(&plug);
 
 	 /* Commit SQ ring head once we've consumed and submitted all SQEs */
 	io_commit_sqring(ctx);
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 2/6] io_uring: place io_submit_state into ctx
  2020-01-31 22:15 [PATCH v3 0/6] add persistent submission state Pavel Begunkov
  2020-01-31 22:15 ` [PATCH v3 1/6] io_uring: always pass non-null io_submit_state Pavel Begunkov
@ 2020-01-31 22:15 ` Pavel Begunkov
  2020-01-31 22:15 ` [PATCH v3 3/6] io_uring: move ring_fd into io_submit_state Pavel Begunkov
                   ` (4 subsequent siblings)
  6 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2020-01-31 22:15 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

io_submit_state is used only during submmission and holding
ctx->uring_lock, so only one instance is used at a time. Move it into
struct io_ring_ctx, so it:
- doesn't consume on-stack memory
- persists across io_uring_enter
- available without passing it through the call-stack

The last point is very useful to make opcode handlers manage their
resources themselfs, like splice would. Also, it's a base for other
hackish optimisations in the future.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 75 +++++++++++++++++++++++++++------------------------
 1 file changed, 40 insertions(+), 35 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6f3998e6475a..6109969709ff 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -197,6 +197,27 @@ struct fixed_file_data {
 	struct completion		done;
 };
 
+#define IO_PLUG_THRESHOLD		2
+#define IO_IOPOLL_BATCH			8
+
+struct io_submit_state {
+	/*
+	 * io_kiocb alloc cache
+	 */
+	void			*reqs[IO_IOPOLL_BATCH];
+	unsigned int		free_reqs;
+	unsigned int		cur_req;
+
+	/*
+	 * File reference cache
+	 */
+	struct file		*file;
+	unsigned int		fd;
+	unsigned int		has_refs;
+	unsigned int		used_refs;
+	unsigned int		ios_left;
+};
+
 struct io_ring_ctx {
 	struct {
 		struct percpu_ref	refs;
@@ -310,6 +331,9 @@ struct io_ring_ctx {
 		spinlock_t		inflight_lock;
 		struct list_head	inflight_list;
 	} ____cacheline_aligned_in_smp;
+
+	/* protected by uring_lock */
+	struct io_submit_state		submit_state;
 };
 
 /*
@@ -575,27 +599,6 @@ struct io_kiocb {
 	struct io_wq_work	work;
 };
 
-#define IO_PLUG_THRESHOLD		2
-#define IO_IOPOLL_BATCH			8
-
-struct io_submit_state {
-	/*
-	 * io_kiocb alloc cache
-	 */
-	void			*reqs[IO_IOPOLL_BATCH];
-	unsigned		int free_reqs;
-	unsigned		int cur_req;
-
-	/*
-	 * File reference cache
-	 */
-	struct file		*file;
-	unsigned int		fd;
-	unsigned int		has_refs;
-	unsigned int		used_refs;
-	unsigned int		ios_left;
-};
-
 struct io_op_def {
 	/* needs req->io allocated for deferral/async */
 	unsigned		async_ctx : 1;
@@ -1158,11 +1161,11 @@ static struct io_kiocb *io_get_fallback_req(struct io_ring_ctx *ctx)
 	return NULL;
 }
 
-static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
-				   struct io_submit_state *state)
+static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx)
 {
 	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
 	struct io_kiocb *req;
+	struct io_submit_state *state = &ctx->submit_state;
 
 	if (!state->free_reqs) {
 		size_t sz;
@@ -4475,10 +4478,10 @@ static inline struct file *io_file_from_index(struct io_ring_ctx *ctx,
 	return table->files[index & IORING_FILE_TABLE_MASK];;
 }
 
-static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
-			   const struct io_uring_sqe *sqe)
+static int io_req_set_file(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	struct io_ring_ctx *ctx = req->ctx;
+	struct io_submit_state *state = &ctx->submit_state;
 	unsigned flags;
 	int fd;
 
@@ -4717,7 +4720,7 @@ static inline void io_queue_link_head(struct io_kiocb *req)
 				IOSQE_IO_HARDLINK | IOSQE_ASYNC)
 
 static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-			  struct io_submit_state *state, struct io_kiocb **link)
+			  struct io_kiocb **link)
 {
 	const struct cred *old_creds = NULL;
 	struct io_ring_ctx *ctx = req->ctx;
@@ -4748,7 +4751,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	req->flags |= sqe_flags & (IOSQE_IO_DRAIN|IOSQE_IO_HARDLINK|
 					IOSQE_ASYNC);
 
-	ret = io_req_set_file(state, req, sqe);
+	ret = io_req_set_file(req, sqe);
 	if (unlikely(ret)) {
 err_req:
 		io_cqring_add_event(req, ret);
@@ -4823,8 +4826,10 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 /*
  * Batched submission is done, ensure local IO is flushed out.
  */
-static void io_submit_state_end(struct io_submit_state *state)
+static void io_submit_end(struct io_ring_ctx *ctx)
 {
+	struct io_submit_state *state = &ctx->submit_state;
+
 	io_file_put(state);
 	if (state->free_reqs)
 		kmem_cache_free_bulk(req_cachep, state->free_reqs,
@@ -4834,9 +4839,10 @@ static void io_submit_state_end(struct io_submit_state *state)
 /*
  * Start submission side cache.
  */
-static void io_submit_state_start(struct io_submit_state *state,
-				  unsigned int max_ios)
+static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios)
 {
+	struct io_submit_state *state = &ctx->submit_state;
+
 	state->free_reqs = 0;
 	state->file = NULL;
 	state->ios_left = max_ios;
@@ -4903,7 +4909,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 			  struct mm_struct **mm, bool async)
 {
 	struct blk_plug plug;
-	struct io_submit_state state;
 	struct io_kiocb *link = NULL;
 	int i, submitted = 0;
 	bool mm_fault = false;
@@ -4921,7 +4926,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	if (!percpu_ref_tryget_many(&ctx->refs, nr))
 		return -EAGAIN;
 
-	io_submit_state_start(&state, nr);
+	io_submit_start(ctx, nr);
 	if (nr > IO_PLUG_THRESHOLD)
 		blk_start_plug(&plug);
 
@@ -4932,7 +4937,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 		const struct io_uring_sqe *sqe;
 		struct io_kiocb *req;
 
-		req = io_get_req(ctx, &state);
+		req = io_get_req(ctx);
 		if (unlikely(!req)) {
 			if (!submitted)
 				submitted = -EAGAIN;
@@ -4965,7 +4970,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 		req->needs_fixed_file = async;
 		trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
 						true, async);
-		if (!io_submit_sqe(req, sqe, &state, &link))
+		if (!io_submit_sqe(req, sqe, &link))
 			break;
 	}
 
@@ -4977,7 +4982,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	if (link)
 		io_queue_link_head(link);
 
-	io_submit_state_end(&state);
+	io_submit_end(ctx);
 	if (nr > IO_PLUG_THRESHOLD)
 		blk_finish_plug(&plug);
 
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 3/6] io_uring: move ring_fd into io_submit_state
  2020-01-31 22:15 [PATCH v3 0/6] add persistent submission state Pavel Begunkov
  2020-01-31 22:15 ` [PATCH v3 1/6] io_uring: always pass non-null io_submit_state Pavel Begunkov
  2020-01-31 22:15 ` [PATCH v3 2/6] io_uring: place io_submit_state into ctx Pavel Begunkov
@ 2020-01-31 22:15 ` Pavel Begunkov
  2020-01-31 22:15 ` [PATCH v3 4/6] io_uring: move *link " Pavel Begunkov
                   ` (3 subsequent siblings)
  6 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2020-01-31 22:15 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

ring_fd and ring_file are set per submission, so move them into
the submission state.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 23 +++++++++++++----------
 1 file changed, 13 insertions(+), 10 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6109969709ff..725e852e22c5 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -216,6 +216,9 @@ struct io_submit_state {
 	unsigned int		has_refs;
 	unsigned int		used_refs;
 	unsigned int		ios_left;
+
+	struct file		*ring_file;
+	int			ring_fd;
 };
 
 struct io_ring_ctx {
@@ -274,8 +277,6 @@ struct io_ring_ctx {
 	 */
 	struct fixed_file_data	*file_data;
 	unsigned		nr_user_files;
-	int 			ring_fd;
-	struct file 		*ring_file;
 
 	/* if used, fixed mapped user buffers */
 	unsigned		nr_user_bufs;
@@ -2822,7 +2823,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 
 	req->close.fd = READ_ONCE(sqe->fd);
 	if (req->file->f_op == &io_uring_fops ||
-	    req->close.fd == req->ctx->ring_fd)
+	    req->close.fd == req->ctx->submit_state.ring_fd)
 		return -EBADF;
 
 	return 0;
@@ -4517,10 +4518,11 @@ static int io_grab_files(struct io_kiocb *req)
 {
 	int ret = -EBADF;
 	struct io_ring_ctx *ctx = req->ctx;
+	struct io_submit_state *state = &ctx->submit_state;
 
 	if (req->work.files)
 		return 0;
-	if (!ctx->ring_file)
+	if (!state->ring_file)
 		return -EBADF;
 
 	rcu_read_lock();
@@ -4531,7 +4533,7 @@ static int io_grab_files(struct io_kiocb *req)
 	 * the fd has changed since we started down this path, and disallow
 	 * this operation if it has.
 	 */
-	if (fcheck(ctx->ring_fd) == ctx->ring_file) {
+	if (fcheck(state->ring_fd) == state->ring_file) {
 		list_add(&req->inflight_entry, &ctx->inflight_list);
 		req->flags |= REQ_F_INFLIGHT;
 		req->work.files = current->files;
@@ -4839,13 +4841,17 @@ static void io_submit_end(struct io_ring_ctx *ctx)
 /*
  * Start submission side cache.
  */
-static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios)
+static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios,
+			    struct file *ring_file, int ring_fd)
 {
 	struct io_submit_state *state = &ctx->submit_state;
 
 	state->free_reqs = 0;
 	state->file = NULL;
 	state->ios_left = max_ios;
+
+	state->ring_file = ring_file;
+	state->ring_fd = ring_fd;
 }
 
 static void io_commit_sqring(struct io_ring_ctx *ctx)
@@ -4926,13 +4932,10 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 	if (!percpu_ref_tryget_many(&ctx->refs, nr))
 		return -EAGAIN;
 
-	io_submit_start(ctx, nr);
+	io_submit_start(ctx, nr, ring_file, ring_fd);
 	if (nr > IO_PLUG_THRESHOLD)
 		blk_start_plug(&plug);
 
-	ctx->ring_fd = ring_fd;
-	ctx->ring_file = ring_file;
-
 	for (i = 0; i < nr; i++) {
 		const struct io_uring_sqe *sqe;
 		struct io_kiocb *req;
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 4/6] io_uring: move *link into io_submit_state
  2020-01-31 22:15 [PATCH v3 0/6] add persistent submission state Pavel Begunkov
                   ` (2 preceding siblings ...)
  2020-01-31 22:15 ` [PATCH v3 3/6] io_uring: move ring_fd into io_submit_state Pavel Begunkov
@ 2020-01-31 22:15 ` Pavel Begunkov
  2020-01-31 22:15 ` [PATCH v3 5/6] io_uring: persistent req bulk allocation cache Pavel Begunkov
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2020-01-31 22:15 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

It's more convenient to have it in the submission state, than passing as
a pointer, so move it.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 22 ++++++++++++----------
 1 file changed, 12 insertions(+), 10 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 725e852e22c5..cbe639caa096 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -219,6 +219,8 @@ struct io_submit_state {
 
 	struct file		*ring_file;
 	int			ring_fd;
+
+	struct io_kiocb		*link;
 };
 
 struct io_ring_ctx {
@@ -4721,11 +4723,11 @@ static inline void io_queue_link_head(struct io_kiocb *req)
 #define SQE_VALID_FLAGS	(IOSQE_FIXED_FILE|IOSQE_IO_DRAIN|IOSQE_IO_LINK|	\
 				IOSQE_IO_HARDLINK | IOSQE_ASYNC)
 
-static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
-			  struct io_kiocb **link)
+static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	const struct cred *old_creds = NULL;
 	struct io_ring_ctx *ctx = req->ctx;
+	struct io_submit_state *state = &ctx->submit_state;
 	unsigned int sqe_flags;
 	int ret, id;
 
@@ -4770,8 +4772,8 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 	 * submitted sync once the chain is complete. If none of those
 	 * conditions are true (normal request), then just queue it.
 	 */
-	if (*link) {
-		struct io_kiocb *head = *link;
+	if (state->link) {
+		struct io_kiocb *head = state->link;
 
 		/*
 		 * Taking sequential execution of a link, draining both sides
@@ -4801,7 +4803,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 		/* last request of a link, enqueue the link */
 		if (!(sqe_flags & (IOSQE_IO_LINK|IOSQE_IO_HARDLINK))) {
 			io_queue_link_head(head);
-			*link = NULL;
+			state->link = NULL;
 		}
 	} else {
 		if (unlikely(ctx->drain_next)) {
@@ -4814,7 +4816,7 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
 			ret = io_req_defer_prep(req, sqe);
 			if (ret)
 				req->flags |= REQ_F_FAIL_LINK;
-			*link = req;
+			state->link = req;
 		} else {
 			io_queue_sqe(req, sqe);
 		}
@@ -4836,6 +4838,8 @@ static void io_submit_end(struct io_ring_ctx *ctx)
 	if (state->free_reqs)
 		kmem_cache_free_bulk(req_cachep, state->free_reqs,
 					&state->reqs[state->cur_req]);
+	if (state->link)
+		io_queue_link_head(state->link);
 }
 
 /*
@@ -4852,6 +4856,7 @@ static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios,
 
 	state->ring_file = ring_file;
 	state->ring_fd = ring_fd;
+	state->link = NULL;
 }
 
 static void io_commit_sqring(struct io_ring_ctx *ctx)
@@ -4915,7 +4920,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 			  struct mm_struct **mm, bool async)
 {
 	struct blk_plug plug;
-	struct io_kiocb *link = NULL;
 	int i, submitted = 0;
 	bool mm_fault = false;
 
@@ -4973,7 +4977,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 		req->needs_fixed_file = async;
 		trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
 						true, async);
-		if (!io_submit_sqe(req, sqe, &link))
+		if (!io_submit_sqe(req, sqe))
 			break;
 	}
 
@@ -4982,8 +4986,6 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
 
 		percpu_ref_put_many(&ctx->refs, nr - ref_used);
 	}
-	if (link)
-		io_queue_link_head(link);
 
 	io_submit_end(ctx);
 	if (nr > IO_PLUG_THRESHOLD)
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 5/6] io_uring: persistent req bulk allocation cache
  2020-01-31 22:15 [PATCH v3 0/6] add persistent submission state Pavel Begunkov
                   ` (3 preceding siblings ...)
  2020-01-31 22:15 ` [PATCH v3 4/6] io_uring: move *link " Pavel Begunkov
@ 2020-01-31 22:15 ` Pavel Begunkov
  2020-01-31 22:15 ` [PATCH v3 6/6] io_uring: optimise " Pavel Begunkov
  2020-01-31 22:22 ` [PATCH v3 0/6] add persistent submission state Jens Axboe
  6 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2020-01-31 22:15 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

Save bulk allocated requests across io_uring_enter(), so lower QD also
could benefit from that. This is not much of an optimisation, and for
current cache sizes would probably affect only offloaded ~QD=1.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 28 ++++++++++++++++++++++------
 1 file changed, 22 insertions(+), 6 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index cbe639caa096..66742d5772fa 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -845,6 +845,23 @@ static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p)
 	return NULL;
 }
 
+static void io_init_submit_state(struct io_ring_ctx *ctx)
+{
+	struct io_submit_state *state = &ctx->submit_state;
+
+	state->free_reqs = 0;
+	state->cur_req = 0;
+}
+
+static void io_clear_submit_state(struct io_ring_ctx *ctx)
+{
+	struct io_submit_state *state = &ctx->submit_state;
+
+	if (state->free_reqs)
+		kmem_cache_free_bulk(req_cachep, state->free_reqs,
+					&state->reqs[state->cur_req]);
+}
+
 static inline bool __req_need_defer(struct io_kiocb *req)
 {
 	struct io_ring_ctx *ctx = req->ctx;
@@ -1171,10 +1188,9 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx)
 	struct io_submit_state *state = &ctx->submit_state;
 
 	if (!state->free_reqs) {
-		size_t sz;
+		size_t sz = ARRAY_SIZE(state->reqs);
 		int ret;
 
-		sz = min_t(size_t, state->ios_left, ARRAY_SIZE(state->reqs));
 		ret = kmem_cache_alloc_bulk(req_cachep, gfp, sz, state->reqs);
 
 		/*
@@ -4835,9 +4851,6 @@ static void io_submit_end(struct io_ring_ctx *ctx)
 	struct io_submit_state *state = &ctx->submit_state;
 
 	io_file_put(state);
-	if (state->free_reqs)
-		kmem_cache_free_bulk(req_cachep, state->free_reqs,
-					&state->reqs[state->cur_req]);
 	if (state->link)
 		io_queue_link_head(state->link);
 }
@@ -4850,7 +4863,6 @@ static void io_submit_start(struct io_ring_ctx *ctx, unsigned int max_ios,
 {
 	struct io_submit_state *state = &ctx->submit_state;
 
-	state->free_reqs = 0;
 	state->file = NULL;
 	state->ios_left = max_ios;
 
@@ -6247,6 +6259,8 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
 	if (ctx->sqo_mm)
 		mmdrop(ctx->sqo_mm);
 
+	io_clear_submit_state(ctx);
+
 	io_iopoll_reap_events(ctx);
 	io_sqe_buffer_unregister(ctx);
 	io_sqe_files_unregister(ctx);
@@ -6767,6 +6781,8 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p)
 	if (ret)
 		goto err;
 
+	io_init_submit_state(ctx);
+
 	ret = io_sq_offload_start(ctx, p);
 	if (ret)
 		goto err;
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH v3 6/6] io_uring: optimise req bulk allocation cache
  2020-01-31 22:15 [PATCH v3 0/6] add persistent submission state Pavel Begunkov
                   ` (4 preceding siblings ...)
  2020-01-31 22:15 ` [PATCH v3 5/6] io_uring: persistent req bulk allocation cache Pavel Begunkov
@ 2020-01-31 22:15 ` Pavel Begunkov
  2020-01-31 22:22 ` [PATCH v3 0/6] add persistent submission state Jens Axboe
  6 siblings, 0 replies; 11+ messages in thread
From: Pavel Begunkov @ 2020-01-31 22:15 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel

Traverse backward through @reqs in struct io_submit_state, so it's
possible to remove cur_req from it and easier to handle in general.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
---
 fs/io_uring.c | 11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 66742d5772fa..799e80e85027 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -206,7 +206,6 @@ struct io_submit_state {
 	 */
 	void			*reqs[IO_IOPOLL_BATCH];
 	unsigned int		free_reqs;
-	unsigned int		cur_req;
 
 	/*
 	 * File reference cache
@@ -850,7 +849,6 @@ static void io_init_submit_state(struct io_ring_ctx *ctx)
 	struct io_submit_state *state = &ctx->submit_state;
 
 	state->free_reqs = 0;
-	state->cur_req = 0;
 }
 
 static void io_clear_submit_state(struct io_ring_ctx *ctx)
@@ -858,8 +856,7 @@ static void io_clear_submit_state(struct io_ring_ctx *ctx)
 	struct io_submit_state *state = &ctx->submit_state;
 
 	if (state->free_reqs)
-		kmem_cache_free_bulk(req_cachep, state->free_reqs,
-					&state->reqs[state->cur_req]);
+		kmem_cache_free_bulk(req_cachep, state->free_reqs, state->reqs);
 }
 
 static inline bool __req_need_defer(struct io_kiocb *req)
@@ -1204,12 +1201,10 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx)
 			ret = 1;
 		}
 		state->free_reqs = ret - 1;
-		state->cur_req = 1;
-		req = state->reqs[0];
+		req = state->reqs[ret - 1];
 	} else {
-		req = state->reqs[state->cur_req];
 		state->free_reqs--;
-		state->cur_req++;
+		req = state->reqs[state->free_reqs];
 	}
 
 got_it:
-- 
2.24.0


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH v3 0/6] add persistent submission state
  2020-01-31 22:15 [PATCH v3 0/6] add persistent submission state Pavel Begunkov
                   ` (5 preceding siblings ...)
  2020-01-31 22:15 ` [PATCH v3 6/6] io_uring: optimise " Pavel Begunkov
@ 2020-01-31 22:22 ` Jens Axboe
  2020-01-31 22:32   ` Pavel Begunkov
  6 siblings, 1 reply; 11+ messages in thread
From: Jens Axboe @ 2020-01-31 22:22 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring, linux-kernel

On 1/31/20 3:15 PM, Pavel Begunkov wrote:
> Apart from unrelated first patch, this persues two goals:
> 
> 1. start preparing io_uring to move resources handling into
> opcode specific functions
> 
> 2. make the first step towards long-standing optimisation ideas
> 
> Basically, it makes struct io_submit_state embedded into ctx, so
> easily accessible and persistent, and then plays a bit around that.

Do you have any perf/latency numbers for this? Just curious if we
see any improvements on that front, cross submit persistence of
alloc caches should be a nice sync win, for example, or even
for peak iops by not having to replenish the pool for each batch.

I can try and run some here too.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v3 0/6] add persistent submission state
  2020-01-31 22:22 ` [PATCH v3 0/6] add persistent submission state Jens Axboe
@ 2020-01-31 22:32   ` Pavel Begunkov
  2020-02-01  0:31     ` Pavel Begunkov
  0 siblings, 1 reply; 11+ messages in thread
From: Pavel Begunkov @ 2020-01-31 22:32 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel


[-- Attachment #1.1: Type: text/plain, Size: 1170 bytes --]

On 01/02/2020 01:22, Jens Axboe wrote:
> On 1/31/20 3:15 PM, Pavel Begunkov wrote:
>> Apart from unrelated first patch, this persues two goals:
>>
>> 1. start preparing io_uring to move resources handling into
>> opcode specific functions
>>
>> 2. make the first step towards long-standing optimisation ideas
>>
>> Basically, it makes struct io_submit_state embedded into ctx, so
>> easily accessible and persistent, and then plays a bit around that.
> 
> Do you have any perf/latency numbers for this? Just curious if we
> see any improvements on that front, cross submit persistence of
> alloc caches should be a nice sync win, for example, or even
> for peak iops by not having to replenish the pool for each batch.
> 
> I can try and run some here too.
> 

I tested the first version, but my drive is too slow, so it was only nops and
hence no offloading. Honestly, there waren't statistically significant results.
I'll rerun anyway.

I have a plan to reuse it for a tricky optimisation, but thinking twice, I can
just stash it until everything is done. That's not the first thing in TODO and
will take a while.


-- 
Pavel Begunkov


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v3 0/6] add persistent submission state
  2020-01-31 22:32   ` Pavel Begunkov
@ 2020-02-01  0:31     ` Pavel Begunkov
  2020-02-01  2:10       ` Jens Axboe
  0 siblings, 1 reply; 11+ messages in thread
From: Pavel Begunkov @ 2020-02-01  0:31 UTC (permalink / raw)
  To: Jens Axboe, io-uring, linux-kernel


[-- Attachment #1.1: Type: text/plain, Size: 1476 bytes --]

On 01/02/2020 01:32, Pavel Begunkov wrote:
> On 01/02/2020 01:22, Jens Axboe wrote:
>> On 1/31/20 3:15 PM, Pavel Begunkov wrote:
>>> Apart from unrelated first patch, this persues two goals:
>>>
>>> 1. start preparing io_uring to move resources handling into
>>> opcode specific functions
>>>
>>> 2. make the first step towards long-standing optimisation ideas
>>>
>>> Basically, it makes struct io_submit_state embedded into ctx, so
>>> easily accessible and persistent, and then plays a bit around that.
>>
>> Do you have any perf/latency numbers for this? Just curious if we
>> see any improvements on that front, cross submit persistence of
>> alloc caches should be a nice sync win, for example, or even
>> for peak iops by not having to replenish the pool for each batch.
>>
>> I can try and run some here too.
>>
> 
> I tested the first version, but my drive is too slow, so it was only nops and
> hence no offloading. Honestly, there waren't statistically significant results.
> I'll rerun anyway.
> 
> I have a plan to reuse it for a tricky optimisation, but thinking twice, I can
> just stash it until everything is done. That's not the first thing in TODO and
> will take a while.
> 

I've got numbers, but there is nothing really interesting. Throughput is
insignificantly better with the patches, but I'd need much more experiments
across reboots to confirm that.

Let's postpone the patchset for later

-- 
Pavel Begunkov


[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH v3 0/6] add persistent submission state
  2020-02-01  0:31     ` Pavel Begunkov
@ 2020-02-01  2:10       ` Jens Axboe
  0 siblings, 0 replies; 11+ messages in thread
From: Jens Axboe @ 2020-02-01  2:10 UTC (permalink / raw)
  To: Pavel Begunkov, io-uring, linux-kernel

On 1/31/20 5:31 PM, Pavel Begunkov wrote:
> On 01/02/2020 01:32, Pavel Begunkov wrote:
>> On 01/02/2020 01:22, Jens Axboe wrote:
>>> On 1/31/20 3:15 PM, Pavel Begunkov wrote:
>>>> Apart from unrelated first patch, this persues two goals:
>>>>
>>>> 1. start preparing io_uring to move resources handling into
>>>> opcode specific functions
>>>>
>>>> 2. make the first step towards long-standing optimisation ideas
>>>>
>>>> Basically, it makes struct io_submit_state embedded into ctx, so
>>>> easily accessible and persistent, and then plays a bit around that.
>>>
>>> Do you have any perf/latency numbers for this? Just curious if we
>>> see any improvements on that front, cross submit persistence of
>>> alloc caches should be a nice sync win, for example, or even
>>> for peak iops by not having to replenish the pool for each batch.
>>>
>>> I can try and run some here too.
>>>
>>
>> I tested the first version, but my drive is too slow, so it was only nops and
>> hence no offloading. Honestly, there waren't statistically significant results.
>> I'll rerun anyway.
>>
>> I have a plan to reuse it for a tricky optimisation, but thinking twice, I can
>> just stash it until everything is done. That's not the first thing in TODO and
>> will take a while.
>>
> 
> I've got numbers, but there is nothing really interesting. Throughput is
> insignificantly better with the patches, but I'd need much more experiments
> across reboots to confirm that.
> 
> Let's postpone the patchset for later

Sounds fine to me, no need to do it unless it's a nice cleanup, and/or
provides some nice improvements.

It would be great to see the splice stuff revamped, though :-)

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2020-02-01  2:10 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-01-31 22:15 [PATCH v3 0/6] add persistent submission state Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 1/6] io_uring: always pass non-null io_submit_state Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 2/6] io_uring: place io_submit_state into ctx Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 3/6] io_uring: move ring_fd into io_submit_state Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 4/6] io_uring: move *link " Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 5/6] io_uring: persistent req bulk allocation cache Pavel Begunkov
2020-01-31 22:15 ` [PATCH v3 6/6] io_uring: optimise " Pavel Begunkov
2020-01-31 22:22 ` [PATCH v3 0/6] add persistent submission state Jens Axboe
2020-01-31 22:32   ` Pavel Begunkov
2020-02-01  0:31     ` Pavel Begunkov
2020-02-01  2:10       ` Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).