io-uring.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCHSET 0/2] io_uring -EAGAIN IOPOLL retries
@ 2020-08-27 22:49 Jens Axboe
  2020-08-27 22:49 ` [PATCH 1/2] io_uring: fix IOPOLL -EAGAIN retries Jens Axboe
  2020-08-27 22:49 ` [PATCH 2/2] io_uring: don't bounce block based -EAGAIN retry off task_work Jens Axboe
  0 siblings, 2 replies; 3+ messages in thread
From: Jens Axboe @ 2020-08-27 22:49 UTC (permalink / raw)
  To: io-uring

First patch is a fix, second one is just a cleanup that gets rid of a
useless task_work bounce for retries.

-- 
Jens Axboe



^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH 1/2] io_uring: fix IOPOLL -EAGAIN retries
  2020-08-27 22:49 [PATCHSET 0/2] io_uring -EAGAIN IOPOLL retries Jens Axboe
@ 2020-08-27 22:49 ` Jens Axboe
  2020-08-27 22:49 ` [PATCH 2/2] io_uring: don't bounce block based -EAGAIN retry off task_work Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2020-08-27 22:49 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe, Andres Freund

This normally isn't hit, as polling is mostly done on NVMe with deep
queue depths. But if we do run into request starvation, we need to
ensure that retries are properly serialized.

Reported-by: Andres Freund <andres@anarazel.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 fs/io_uring.c | 10 +++++++---
 1 file changed, 7 insertions(+), 3 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index 6df08287c59e..d27fe2b742d8 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -1150,7 +1150,7 @@ static void io_prep_async_work(struct io_kiocb *req)
 	io_req_init_async(req);
 
 	if (req->flags & REQ_F_ISREG) {
-		if (def->hash_reg_file)
+		if (def->hash_reg_file || (req->ctx->flags & IORING_SETUP_IOPOLL))
 			io_wq_hash_work(&req->work, file_inode(req->file));
 	} else {
 		if (def->unbound_nonreg_file)
@@ -3157,7 +3157,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock,
 		ret = 0;
 		goto out_free;
 	} else if (ret == -EAGAIN) {
-		if (!force_nonblock)
+		/* IOPOLL retry should happen for io-wq threads */
+		if (!force_nonblock && !(req->ctx->flags & IORING_SETUP_IOPOLL))
 			goto done;
 		/* some cases will consume bytes even on error returns */
 		iov_iter_revert(iter, iov_count - iov_iter_count(iter));
@@ -3301,11 +3302,14 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
 	if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
 		ret2 = -EAGAIN;
 	if (!force_nonblock || ret2 != -EAGAIN) {
+		/* IOPOLL retry should happen for io-wq threads */
+		if ((req->ctx->flags & IORING_SETUP_IOPOLL) && ret2 == -EAGAIN)
+			goto copy_iov;
 		kiocb_done(kiocb, ret2, cs);
 	} else {
+copy_iov:
 		/* some cases will consume bytes even on error returns */
 		iov_iter_revert(iter, iov_count - iov_iter_count(iter));
-copy_iov:
 		ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false);
 		if (!ret)
 			return -EAGAIN;
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH 2/2] io_uring: don't bounce block based -EAGAIN retry off task_work
  2020-08-27 22:49 [PATCHSET 0/2] io_uring -EAGAIN IOPOLL retries Jens Axboe
  2020-08-27 22:49 ` [PATCH 1/2] io_uring: fix IOPOLL -EAGAIN retries Jens Axboe
@ 2020-08-27 22:49 ` Jens Axboe
  1 sibling, 0 replies; 3+ messages in thread
From: Jens Axboe @ 2020-08-27 22:49 UTC (permalink / raw)
  To: io-uring; +Cc: Jens Axboe

These events happen inline from submission, so there's no need to
bounce them through the original task. Just set them up for retry
and issue retry directly instead of going over task_work.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 fs/io_uring.c | 26 ++++++--------------------
 1 file changed, 6 insertions(+), 20 deletions(-)

diff --git a/fs/io_uring.c b/fs/io_uring.c
index d27fe2b742d8..81143458f3b6 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -2295,22 +2295,6 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error)
 	io_req_complete(req, ret);
 	return false;
 }
-
-static void io_rw_resubmit(struct callback_head *cb)
-{
-	struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work);
-	struct io_ring_ctx *ctx = req->ctx;
-	int err;
-
-	err = io_sq_thread_acquire_mm(ctx, req);
-
-	if (io_resubmit_prep(req, err)) {
-		refcount_inc(&req->refs);
-		io_queue_async_work(req);
-	}
-
-	percpu_ref_put(&ctx->refs);
-}
 #endif
 
 static bool io_rw_reissue(struct io_kiocb *req, long res)
@@ -2321,12 +2305,14 @@ static bool io_rw_reissue(struct io_kiocb *req, long res)
 	if ((res != -EAGAIN && res != -EOPNOTSUPP) || io_wq_current_is_worker())
 		return false;
 
-	init_task_work(&req->task_work, io_rw_resubmit);
-	percpu_ref_get(&req->ctx->refs);
+	ret = io_sq_thread_acquire_mm(req->ctx, req);
 
-	ret = io_req_task_work_add(req, &req->task_work, true);
-	if (!ret)
+	if (io_resubmit_prep(req, ret)) {
+		refcount_inc(&req->refs);
+		io_queue_async_work(req);
 		return true;
+	}
+
 #endif
 	return false;
 }
-- 
2.28.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-08-27 22:52 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-27 22:49 [PATCHSET 0/2] io_uring -EAGAIN IOPOLL retries Jens Axboe
2020-08-27 22:49 ` [PATCH 1/2] io_uring: fix IOPOLL -EAGAIN retries Jens Axboe
2020-08-27 22:49 ` [PATCH 2/2] io_uring: don't bounce block based -EAGAIN retry off task_work Jens Axboe

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).