All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jens Axboe <axboe@kernel.dk>
To: io-uring@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>
Subject: [PATCH 11/17] io_uring: get rid of struct io_rw_state
Date: Wed, 20 Mar 2024 16:55:26 -0600	[thread overview]
Message-ID: <20240320225750.1769647-12-axboe@kernel.dk> (raw)
In-Reply-To: <20240320225750.1769647-1-axboe@kernel.dk>

A separate state struct is not needed anymore, just fold it in with
io_async_rw.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 io_uring/rw.c | 45 +++++++++++++++++++++++----------------------
 io_uring/rw.h | 10 +++-------
 2 files changed, 26 insertions(+), 29 deletions(-)

diff --git a/io_uring/rw.c b/io_uring/rw.c
index 583fe61a0acb..19e866929cd3 100644
--- a/io_uring/rw.c
+++ b/io_uring/rw.c
@@ -96,12 +96,12 @@ static int __io_import_iovec(int ddir, struct io_kiocb *req,
 			rw->len = sqe_len;
 		}
 
-		return import_ubuf(ddir, buf, sqe_len, &io->s.iter);
+		return import_ubuf(ddir, buf, sqe_len, &io->iter);
 	}
 
-	io->free_iovec = io->s.fast_iov;
+	io->free_iovec = io->fast_iov;
 	return __import_iovec(ddir, buf, sqe_len, UIO_FASTIOV, &io->free_iovec,
-				&io->s.iter, req->ctx->compat);
+				&io->iter, req->ctx->compat);
 }
 
 static inline int io_import_iovec(int rw, struct io_kiocb *req,
@@ -114,7 +114,7 @@ static inline int io_import_iovec(int rw, struct io_kiocb *req,
 	if (unlikely(ret < 0))
 		return ret;
 
-	iov_iter_save_state(&io->s.iter, &io->s.iter_state);
+	iov_iter_save_state(&io->iter, &io->iter_state);
 	return 0;
 }
 
@@ -216,7 +216,7 @@ static int io_prep_rw_setup(struct io_kiocb *req, int ddir, bool do_import)
 	if (unlikely(ret < 0))
 		return ret;
 
-	iov_iter_save_state(&rw->s.iter, &rw->s.iter_state);
+	iov_iter_save_state(&rw->iter, &rw->iter_state);
 	return 0;
 }
 
@@ -308,8 +308,8 @@ static int io_prep_rw_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe
 	io_req_set_rsrc_node(req, ctx, 0);
 
 	io = req->async_data;
-	ret = io_import_fixed(ddir, &io->s.iter, req->imu, rw->addr, rw->len);
-	iov_iter_save_state(&io->s.iter, &io->s.iter_state);
+	ret = io_import_fixed(ddir, &io->iter, req->imu, rw->addr, rw->len);
+	iov_iter_save_state(&io->iter, &io->iter_state);
 	return ret;
 }
 
@@ -374,7 +374,7 @@ static void io_resubmit_prep(struct io_kiocb *req)
 {
 	struct io_async_rw *io = req->async_data;
 
-	iov_iter_restore(&io->s.iter, &io->s.iter_state);
+	iov_iter_restore(&io->iter, &io->iter_state);
 }
 
 static bool io_rw_should_reissue(struct io_kiocb *req)
@@ -808,7 +808,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
 	ret = io_rw_init_file(req, FMODE_READ);
 	if (unlikely(ret))
 		return ret;
-	req->cqe.res = iov_iter_count(&io->s.iter);
+	req->cqe.res = iov_iter_count(&io->iter);
 
 	if (force_nonblock) {
 		/* If the file doesn't support async, just async punt */
@@ -826,7 +826,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
 	if (unlikely(ret))
 		return ret;
 
-	ret = io_iter_do_read(rw, &io->s.iter);
+	ret = io_iter_do_read(rw, &io->iter);
 
 	if (ret == -EAGAIN || (req->flags & REQ_F_REISSUE)) {
 		req->flags &= ~REQ_F_REISSUE;
@@ -853,7 +853,7 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
 	 * untouched in case of error. Restore it and we'll advance it
 	 * manually if we need to.
 	 */
-	iov_iter_restore(&io->s.iter, &io->s.iter_state);
+	iov_iter_restore(&io->iter, &io->iter_state);
 
 	do {
 		/*
@@ -861,11 +861,11 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
 		 * above or inside this loop. Advance the iter by the bytes
 		 * that were consumed.
 		 */
-		iov_iter_advance(&io->s.iter, ret);
-		if (!iov_iter_count(&io->s.iter))
+		iov_iter_advance(&io->iter, ret);
+		if (!iov_iter_count(&io->iter))
 			break;
 		io->bytes_done += ret;
-		iov_iter_save_state(&io->s.iter, &io->s.iter_state);
+		iov_iter_save_state(&io->iter, &io->iter_state);
 
 		/* if we can retry, do so with the callbacks armed */
 		if (!io_rw_should_retry(req)) {
@@ -873,19 +873,19 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
 			return -EAGAIN;
 		}
 
-		req->cqe.res = iov_iter_count(&io->s.iter);
+		req->cqe.res = iov_iter_count(&io->iter);
 		/*
 		 * Now retry read with the IOCB_WAITQ parts set in the iocb. If
 		 * we get -EIOCBQUEUED, then we'll get a notification when the
 		 * desired page gets unlocked. We can also get a partial read
 		 * here, and if we do, then just retry at the new offset.
 		 */
-		ret = io_iter_do_read(rw, &io->s.iter);
+		ret = io_iter_do_read(rw, &io->iter);
 		if (ret == -EIOCBQUEUED)
 			return IOU_ISSUE_SKIP_COMPLETE;
 		/* we got some bytes, but not all. retry. */
 		kiocb->ki_flags &= ~IOCB_WAITQ;
-		iov_iter_restore(&io->s.iter, &io->s.iter_state);
+		iov_iter_restore(&io->iter, &io->iter_state);
 	} while (ret > 0);
 done:
 	/* it's faster to check here then delegate to kfree */
@@ -982,7 +982,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
 	ret = io_rw_init_file(req, FMODE_WRITE);
 	if (unlikely(ret))
 		return ret;
-	req->cqe.res = iov_iter_count(&io->s.iter);
+	req->cqe.res = iov_iter_count(&io->iter);
 
 	if (force_nonblock) {
 		/* If the file doesn't support async, just async punt */
@@ -1012,9 +1012,9 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
 	kiocb->ki_flags |= IOCB_WRITE;
 
 	if (likely(req->file->f_op->write_iter))
-		ret2 = call_write_iter(req->file, kiocb, &io->s.iter);
+		ret2 = call_write_iter(req->file, kiocb, &io->iter);
 	else if (req->file->f_op->write)
-		ret2 = loop_rw_iter(WRITE, rw, &io->s.iter);
+		ret2 = loop_rw_iter(WRITE, rw, &io->iter);
 	else
 		ret2 = -EINVAL;
 
@@ -1046,7 +1046,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
 			 * in the worker. Also update bytes_done to account for
 			 * the bytes already written.
 			 */
-			iov_iter_save_state(&io->s.iter, &io->s.iter_state);
+			iov_iter_save_state(&io->iter, &io->iter_state);
 			io->bytes_done += ret2;
 
 			if (kiocb->ki_flags & IOCB_WRITE)
@@ -1057,7 +1057,7 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
 		ret = kiocb_done(req, ret2, issue_flags);
 	} else {
 ret_eagain:
-		iov_iter_restore(&io->s.iter, &io->s.iter_state);
+		iov_iter_restore(&io->iter, &io->iter_state);
 		if (kiocb->ki_flags & IOCB_WRITE)
 			io_req_end_write(req);
 		return -EAGAIN;
@@ -1157,5 +1157,6 @@ void io_rw_cache_free(struct io_cache_entry *entry)
 	struct io_async_rw *rw;
 
 	rw = container_of(entry, struct io_async_rw, cache);
+	kfree(rw->free_iovec);
 	kfree(rw);
 }
diff --git a/io_uring/rw.h b/io_uring/rw.h
index f7905070d10b..7824896dc52d 100644
--- a/io_uring/rw.h
+++ b/io_uring/rw.h
@@ -2,18 +2,14 @@
 
 #include <linux/pagemap.h>
 
-struct io_rw_state {
-	struct iov_iter			iter;
-	struct iov_iter_state		iter_state;
-	struct iovec			fast_iov[UIO_FASTIOV];
-};
-
 struct io_async_rw {
 	union {
 		size_t			bytes_done;
 		struct io_cache_entry	cache;
 	};
-	struct io_rw_state		s;
+	struct iov_iter			iter;
+	struct iov_iter_state		iter_state;
+	struct iovec			fast_iov[UIO_FASTIOV];
 	struct iovec			*free_iovec;
 	struct wait_page_queue		wpq;
 };
-- 
2.43.0


  parent reply	other threads:[~2024-03-20 22:58 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-20 22:55 [PATCHSET v2 0/17] Improve async state handling Jens Axboe
2024-03-20 22:55 ` [PATCH 01/17] io_uring/net: switch io_send() and io_send_zc() to using io_async_msghdr Jens Axboe
2024-04-06 20:58   ` Pavel Begunkov
2024-04-07 21:47     ` Jens Axboe
2024-03-20 22:55 ` [PATCH 02/17] io_uring/net: switch io_recv() " Jens Axboe
2024-03-20 22:55 ` [PATCH 03/17] io_uring/net: unify cleanup handling Jens Axboe
2024-03-20 22:55 ` [PATCH 04/17] io_uring/net: always setup an io_async_msghdr Jens Axboe
2024-03-20 22:55 ` [PATCH 05/17] io_uring/net: get rid of ->prep_async() for receive side Jens Axboe
2024-03-20 22:55 ` [PATCH 06/17] io_uring/net: get rid of ->prep_async() for send side Jens Axboe
2024-03-20 22:55 ` [PATCH 07/17] io_uring: kill io_msg_alloc_async_prep() Jens Axboe
2024-03-20 22:55 ` [PATCH 08/17] io_uring/net: add iovec recycling Jens Axboe
2024-03-20 22:55 ` [PATCH 09/17] io_uring/net: drop 'kmsg' parameter from io_req_msg_cleanup() Jens Axboe
2024-03-20 22:55 ` [PATCH 10/17] io_uring/rw: always setup io_async_rw for read/write requests Jens Axboe
2024-03-25 12:03   ` Anuj gupta
2024-03-25 14:54     ` Jens Axboe
2024-03-20 22:55 ` Jens Axboe [this message]
2024-03-20 22:55 ` [PATCH 12/17] io_uring/rw: add iovec recycling Jens Axboe
2024-03-20 22:55 ` [PATCH 13/17] io_uring/net: move connect to always using async data Jens Axboe
2024-03-20 22:55 ` [PATCH 14/17] io_uring/uring_cmd: switch to always allocating " Jens Axboe
2024-03-20 22:55 ` [PATCH 15/17] io_uring/uring_cmd: defer SQE copying until we need it Jens Axboe
2024-03-25 12:41   ` Anuj gupta
2024-03-25 14:55     ` Jens Axboe
2024-03-20 22:55 ` [PATCH 16/17] io_uring: drop ->prep_async() Jens Axboe
2024-04-06 20:54   ` Pavel Begunkov
2024-04-07 21:46     ` Jens Axboe
2024-03-20 22:55 ` [PATCH 17/17] io_uring/alloc_cache: switch to array based caching Jens Axboe
2024-03-21 15:59   ` Gabriel Krisman Bertazi
2024-03-21 16:38     ` Jens Axboe
2024-03-21 17:20       ` Gabriel Krisman Bertazi
2024-03-21 17:22         ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240320225750.1769647-12-axboe@kernel.dk \
    --to=axboe@kernel.dk \
    --cc=io-uring@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.