io-uring.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHSET 0/2] Extend io_uring batch freeing
@ 2019-12-28 19:18 Jens Axboe
  2019-12-28 19:18 ` [PATCH 1/2] io_uring: wrap multi-req freeing in struct req_batch Jens Axboe
  2019-12-28 19:18 ` [PATCH 2/2] io_uring: extend batch freeing to cover more cases Jens Axboe
  0 siblings, 2 replies; 3+ messages in thread
From: Jens Axboe @ 2019-12-28 19:18 UTC (permalink / raw)
  To: io-uring

This extends the free batching, allowing it to cover more cases. It
was pretty limited before, since it required fixed files.

We could go further with this and batch the file puts as well, assuming
we often have batches that are the same file. Probably not the case
from the poll side, so I left it as-is.

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] io_uring: wrap multi-req freeing in struct req_batch
  2019-12-28 19:18 [PATCHSET 0/2] Extend io_uring batch freeing Jens Axboe
@ 2019-12-28 19:18 ` Jens Axboe
  2019-12-28 19:18 ` [PATCH 2/2] io_uring: extend batch freeing to cover more cases Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2019-12-28 19:18 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

This cleans up the code a bit, and it allows us to build on top of the
multi-req freeing.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 fs/io_uring.c | 65 ++++++++++++++++++++++++---------------------------
 1 file changed, 31 insertions(+), 34 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 0e6ed31cdb12..2de1e8409ab6 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1129,14 +1129,19 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
 	return NULL;
 }
 
-static void io_free_req_many(struct io_ring_ctx *ctx, void **reqs, int *nr)
+struct req_batch {
+	void *reqs[IO_IOPOLL_BATCH];
+	int to_free;
+};
+
+static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
 {
-	if (*nr) {
-		kmem_cache_free_bulk(req_cachep, *nr, reqs);
-		percpu_ref_put_many(&ctx->refs, *nr);
-		percpu_ref_put_many(&ctx->file_data->refs, *nr);
-		*nr = 0;
-	}
+	if (!rb->to_free)
+		return;
+	kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
+	percpu_ref_put_many(&ctx->refs, rb->to_free);
+	percpu_ref_put_many(&ctx->file_data->refs, rb->to_free);
+	rb->to_free = 0;
 }
 
 static void __io_req_do_free(struct io_kiocb *req)
@@ -1368,7 +1373,7 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
 	return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head;
 }
 
-static inline bool io_req_multi_free(struct io_kiocb *req)
+static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
 {
 	/*
 	 * If we're not using fixed files, we have to pair the completion part
@@ -1376,8 +1381,12 @@ static inline bool io_req_multi_free(struct io_kiocb *req)
 	 * free for fixed file and non-linked commands.
 	 */
 	if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) == REQ_F_FIXED_FILE)
-	    && !io_is_fallback_req(req) && !req->io)
+	    && !io_is_fallback_req(req) && !req->io) {
+		rb->reqs[rb->to_free++] = req;
+		if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
+			io_free_req_many(req->ctx, rb);
 		return true;
+	}
 
 	return false;
 }
@@ -1388,11 +1397,10 @@ static inline bool io_req_multi_free(struct io_kiocb *req)
 static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 			       struct list_head *done)
 {
-	void *reqs[IO_IOPOLL_BATCH];
+	struct req_batch rb;
 	struct io_kiocb *req;
-	int to_free;
 
-	to_free = 0;
+	rb.to_free = 0;
 	while (!list_empty(done)) {
 		req = list_first_entry(done, struct io_kiocb, list);
 		list_del(&req->list);
@@ -1400,19 +1408,13 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 		io_cqring_fill_event(req, req->result);
 		(*nr_events)++;
 
-		if (refcount_dec_and_test(&req->refs)) {
-			if (io_req_multi_free(req)) {
-				reqs[to_free++] = req;
-				if (to_free == ARRAY_SIZE(reqs))
-					io_free_req_many(ctx, reqs, &to_free);
-			} else {
-				io_free_req(req);
-			}
-		}
+		if (refcount_dec_and_test(&req->refs) &&
+		    !io_req_multi_free(&rb, req))
+			io_free_req(req);
 	}
 
 	io_commit_cqring(ctx);
-	io_free_req_many(ctx, reqs, &to_free);
+	io_free_req_many(ctx, &rb);
 }
 
 static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
@@ -3225,30 +3227,25 @@ static void io_poll_complete_work(struct io_wq_work **workptr)
 
 static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes)
 {
-	void *reqs[IO_IOPOLL_BATCH];
 	struct io_kiocb *req, *tmp;
-	int to_free = 0;
+	struct req_batch rb;
 
+	rb.to_free = 0;
 	spin_lock_irq(&ctx->completion_lock);
 	llist_for_each_entry_safe(req, tmp, nodes, llist_node) {
 		hash_del(&req->hash_node);
 		io_poll_complete(req, req->result, 0);
 
-		if (refcount_dec_and_test(&req->refs)) {
-			if (io_req_multi_free(req)) {
-				reqs[to_free++] = req;
-				if (to_free == ARRAY_SIZE(reqs))
-					io_free_req_many(ctx, reqs, &to_free);
-			} else {
-				req->flags |= REQ_F_COMP_LOCKED;
-				io_free_req(req);
-			}
+		if (refcount_dec_and_test(&req->refs) &&
+		    !io_req_multi_free(&rb, req)) {
+			req->flags |= REQ_F_COMP_LOCKED;
+			io_free_req(req);
 		}
 	}
 	spin_unlock_irq(&ctx->completion_lock);
 
 	io_cqring_ev_posted(ctx);
-	io_free_req_many(ctx, reqs, &to_free);
+	io_free_req_many(ctx, &rb);
 }
 
 static void io_poll_flush(struct io_wq_work **workptr)
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/2] io_uring: extend batch freeing to cover more cases
  2019-12-28 19:18 [PATCHSET 0/2] Extend io_uring batch freeing Jens Axboe
  2019-12-28 19:18 ` [PATCH 1/2] io_uring: wrap multi-req freeing in struct req_batch Jens Axboe
@ 2019-12-28 19:18 ` Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2019-12-28 19:18 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

Currently we only batch free if fixed files are used, no links, no aux
data, etc. This extends the batch freeing to only exclude the linked
case and fallback case, and make io_free_req_many() handle the other
cases just fine.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 fs/io_uring.c | 100 ++++++++++++++++++++++++++++++++++----------------
 1 file changed, 69 insertions(+), 31 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 2de1e8409ab6..104128e429a6 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1129,21 +1129,6 @@ static struct io_kiocb *io_get_req(struct io_ring_ctx *ctx,
 	return NULL;
 }
 
-struct req_batch {
-	void *reqs[IO_IOPOLL_BATCH];
-	int to_free;
-};
-
-static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
-{
-	if (!rb->to_free)
-		return;
-	kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
-	percpu_ref_put_many(&ctx->refs, rb->to_free);
-	percpu_ref_put_many(&ctx->file_data->refs, rb->to_free);
-	rb->to_free = 0;
-}
-
 static void __io_req_do_free(struct io_kiocb *req)
 {
 	if (likely(!io_is_fallback_req(req)))
@@ -1152,7 +1137,7 @@ static void __io_req_do_free(struct io_kiocb *req)
 		clear_bit_unlock(0, (unsigned long *) req->ctx->fallback_req);
 }
 
-static void __io_free_req(struct io_kiocb *req)
+static void __io_req_aux_free(struct io_kiocb *req)
 {
 	struct io_ring_ctx *ctx = req->ctx;
 
@@ -1164,7 +1149,14 @@ static void __io_free_req(struct io_kiocb *req)
 		else
 			fput(req->file);
 	}
+}
+
+static void __io_free_req(struct io_kiocb *req)
+{
+	__io_req_aux_free(req);
+
 	if (req->flags & REQ_F_INFLIGHT) {
+		struct io_ring_ctx *ctx = req->ctx;
 		unsigned long flags;
 
 		spin_lock_irqsave(&ctx->inflight_lock, flags);
@@ -1178,6 +1170,56 @@ static void __io_free_req(struct io_kiocb *req)
 	__io_req_do_free(req);
 }
 
+struct req_batch {
+	void *reqs[IO_IOPOLL_BATCH];
+	int to_free;
+	int need_iter;
+};
+
+static void io_free_req_many(struct io_ring_ctx *ctx, struct req_batch *rb)
+{
+	if (!rb->to_free)
+		return;
+	if (rb->need_iter) {
+		int i, inflight = 0;
+		unsigned long flags;
+
+		for (i = 0; i < rb->to_free; i++) {
+			struct io_kiocb *req = rb->reqs[i];
+
+			if (req->flags & REQ_F_FIXED_FILE)
+				req->file = NULL;
+			if (req->flags & REQ_F_INFLIGHT)
+				inflight++;
+			else
+				rb->reqs[i] = NULL;
+			__io_req_aux_free(req);
+		}
+		if (!inflight)
+			goto do_free;
+
+		spin_lock_irqsave(&ctx->inflight_lock, flags);
+		for (i = 0; i < rb->to_free; i++) {
+			struct io_kiocb *req = rb->reqs[i];
+
+			if (req) {
+				list_del(&req->inflight_entry);
+				if (!--inflight)
+					break;
+			}
+		}
+		spin_unlock_irqrestore(&ctx->inflight_lock, flags);
+
+		if (waitqueue_active(&ctx->inflight_wait))
+			wake_up(&ctx->inflight_wait);
+	}
+do_free:
+	kmem_cache_free_bulk(req_cachep, rb->to_free, rb->reqs);
+	percpu_ref_put_many(&ctx->refs, rb->to_free);
+	percpu_ref_put_many(&ctx->file_data->refs, rb->to_free);
+	rb->to_free = rb->need_iter = 0;
+}
+
 static bool io_link_cancel_timeout(struct io_kiocb *req)
 {
 	struct io_ring_ctx *ctx = req->ctx;
@@ -1375,20 +1417,16 @@ static inline unsigned int io_sqring_entries(struct io_ring_ctx *ctx)
 
 static inline bool io_req_multi_free(struct req_batch *rb, struct io_kiocb *req)
 {
-	/*
-	 * If we're not using fixed files, we have to pair the completion part
-	 * with the file put. Use regular completions for those, only batch
-	 * free for fixed file and non-linked commands.
-	 */
-	if (((req->flags & (REQ_F_FIXED_FILE|REQ_F_LINK)) == REQ_F_FIXED_FILE)
-	    && !io_is_fallback_req(req) && !req->io) {
-		rb->reqs[rb->to_free++] = req;
-		if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
-			io_free_req_many(req->ctx, rb);
-		return true;
-	}
+	if ((req->flags & REQ_F_LINK) || io_is_fallback_req(req))
+		return false;
 
-	return false;
+	if (!(req->flags & REQ_F_FIXED_FILE) || req->io)
+		rb->need_iter++;
+
+	rb->reqs[rb->to_free++] = req;
+	if (unlikely(rb->to_free == ARRAY_SIZE(rb->reqs)))
+		io_free_req_many(req->ctx, rb);
+	return true;
 }
 
 /*
@@ -1400,7 +1438,7 @@ static void io_iopoll_complete(struct io_ring_ctx *ctx, unsigned int *nr_events,
 	struct req_batch rb;
 	struct io_kiocb *req;
 
-	rb.to_free = 0;
+	rb.to_free = rb.need_iter = 0;
 	while (!list_empty(done)) {
 		req = list_first_entry(done, struct io_kiocb, list);
 		list_del(&req->list);
@@ -3230,7 +3268,7 @@ static void __io_poll_flush(struct io_ring_ctx *ctx, struct llist_node *nodes)
 	struct io_kiocb *req, *tmp;
 	struct req_batch rb;
 
-	rb.to_free = 0;
+	rb.to_free = rb.need_iter = 0;
 	spin_lock_irq(&ctx->completion_lock);
 	llist_for_each_entry_safe(req, tmp, nodes, llist_node) {
 		hash_del(&req->hash_node);
-- 
2.24.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-12-28 19:19 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-12-28 19:18 [PATCHSET 0/2] Extend io_uring batch freeing Jens Axboe
2019-12-28 19:18 ` [PATCH 1/2] io_uring: wrap multi-req freeing in struct req_batch Jens Axboe
2019-12-28 19:18 ` [PATCH 2/2] io_uring: extend batch freeing to cover more cases Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).