All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jens Axboe <axboe@kernel.dk>
To: io-uring@vger.kernel.org
Cc: Jens Axboe <axboe@kernel.dk>
Subject: [PATCH 06/17] io_uring/net: get rid of ->prep_async() for send side
Date: Wed, 20 Mar 2024 16:55:21 -0600	[thread overview]
Message-ID: <20240320225750.1769647-7-axboe@kernel.dk> (raw)
In-Reply-To: <20240320225750.1769647-1-axboe@kernel.dk>

Move the io_async_msghdr out of the issue path and into prep handling,
e it's now done unconditionally and hence does not need to be part
of the issue path. This means we can drop any usage of
io_sendrecv_prep_async() and io_sendmsg_prep_async(), and hence the
forced async setup path is now unified with the normal prep setup.

Signed-off-by: Jens Axboe <axboe@kernel.dk>
---
 io_uring/net.c   | 162 +++++++++++++++--------------------------------
 io_uring/net.h   |   2 -
 io_uring/opdef.c |   4 --
 3 files changed, 50 insertions(+), 118 deletions(-)

diff --git a/io_uring/net.c b/io_uring/net.c
index e438b1ac2420..dc6cda076a93 100644
--- a/io_uring/net.c
+++ b/io_uring/net.c
@@ -290,50 +290,59 @@ static int io_sendmsg_copy_hdr(struct io_kiocb *req,
 	return ret;
 }
 
-int io_sendrecv_prep_async(struct io_kiocb *req)
+void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
+{
+	struct io_async_msghdr *io = req->async_data;
+
+	kfree(io->free_iov);
+}
+
+static int io_send_setup(struct io_kiocb *req)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr *io;
+	struct io_async_msghdr *kmsg = req->async_data;
 	int ret;
 
-	if (req_has_async_data(req))
-		return 0;
-	sr->done_io = 0;
-	if (!sr->addr)
-		return 0;
-	io = io_msg_alloc_async_prep(req);
-	if (!io)
-		return -ENOMEM;
-	memset(&io->msg, 0, sizeof(io->msg));
-	ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &io->msg.msg_iter);
-	if (unlikely(ret))
-		return ret;
-	io->msg.msg_name = &io->addr;
-	io->msg.msg_namelen = sr->addr_len;
-	return move_addr_to_kernel(sr->addr, sr->addr_len, &io->addr);
+	kmsg->msg.msg_name = NULL;
+	kmsg->msg.msg_namelen = 0;
+	kmsg->msg.msg_control = NULL;
+	kmsg->msg.msg_controllen = 0;
+	kmsg->msg.msg_ubuf = NULL;
+
+	if (sr->addr) {
+		ret = move_addr_to_kernel(sr->addr, sr->addr_len, &kmsg->addr);
+		if (unlikely(ret < 0))
+			return ret;
+		kmsg->msg.msg_name = &kmsg->addr;
+		kmsg->msg.msg_namelen = sr->addr_len;
+	}
+	if (!io_do_buffer_select(req)) {
+		ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
+				  &kmsg->msg.msg_iter);
+		if (unlikely(ret < 0))
+			return ret;
+	}
+
+	return 0;
 }
 
-int io_sendmsg_prep_async(struct io_kiocb *req)
+static int io_sendmsg_prep_setup(struct io_kiocb *req, int is_msg)
 {
-	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
+	struct io_async_msghdr *kmsg;
 	int ret;
 
-	sr->done_io = 0;
-	if (!io_msg_alloc_async_prep(req))
+	/* always locked for prep */
+	kmsg = io_msg_alloc_async(req, 0);
+	if (unlikely(!kmsg))
 		return -ENOMEM;
-	ret = io_sendmsg_copy_hdr(req, req->async_data);
+	if (!is_msg)
+		return io_send_setup(req);
+	ret = io_sendmsg_copy_hdr(req, kmsg);
 	if (!ret)
 		req->flags |= REQ_F_NEED_CLEANUP;
 	return ret;
 }
 
-void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req)
-{
-	struct io_async_msghdr *io = req->async_data;
-
-	kfree(io->free_iov);
-}
-
 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
@@ -362,7 +371,7 @@ int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	if (req->ctx->compat)
 		sr->msg_flags |= MSG_CMSG_COMPAT;
 #endif
-	return 0;
+	return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG);
 }
 
 static void io_req_msg_cleanup(struct io_kiocb *req,
@@ -379,7 +388,7 @@ static void io_req_msg_cleanup(struct io_kiocb *req,
 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr *kmsg;
+	struct io_async_msghdr *kmsg = req->async_data;
 	struct socket *sock;
 	unsigned flags;
 	int min_ret = 0;
@@ -389,18 +398,6 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 	if (unlikely(!sock))
 		return -ENOTSOCK;
 
-	if (req_has_async_data(req)) {
-		kmsg = req->async_data;
-		kmsg->msg.msg_control_user = sr->msg_control;
-	} else {
-		kmsg = io_msg_alloc_async(req, issue_flags);
-		if (unlikely(!kmsg))
-			return -ENOMEM;
-		ret = io_sendmsg_copy_hdr(req, kmsg);
-		if (ret)
-			return ret;
-	}
-
 	if (!(req->flags & REQ_F_POLLED) &&
 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 		return -EAGAIN;
@@ -436,54 +433,10 @@ int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags)
 	return IOU_OK;
 }
 
-static struct io_async_msghdr *io_send_setup(struct io_kiocb *req,
-					     unsigned int issue_flags)
-{
-	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr *kmsg;
-	int ret;
-
-	if (req_has_async_data(req)) {
-		kmsg = req->async_data;
-	} else {
-		kmsg = io_msg_alloc_async(req, issue_flags);
-		if (unlikely(!kmsg))
-			return ERR_PTR(-ENOMEM);
-		kmsg->msg.msg_name = NULL;
-		kmsg->msg.msg_namelen = 0;
-		kmsg->msg.msg_control = NULL;
-		kmsg->msg.msg_controllen = 0;
-		kmsg->msg.msg_ubuf = NULL;
-
-		if (sr->addr) {
-			ret = move_addr_to_kernel(sr->addr, sr->addr_len,
-						  &kmsg->addr);
-			if (unlikely(ret < 0))
-				return ERR_PTR(ret);
-			kmsg->msg.msg_name = &kmsg->addr;
-			kmsg->msg.msg_namelen = sr->addr_len;
-		}
-
-		if (!io_do_buffer_select(req)) {
-			ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len,
-					  &kmsg->msg.msg_iter);
-			if (unlikely(ret))
-				return ERR_PTR(ret);
-		}
-	}
-
-	if (!(req->flags & REQ_F_POLLED) &&
-	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
-		return ERR_PTR(-EAGAIN);
-
-	return kmsg;
-}
-
 int io_send(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr *kmsg;
-	size_t len = sr->len;
+	struct io_async_msghdr *kmsg = req->async_data;
 	struct socket *sock;
 	unsigned flags;
 	int min_ret = 0;
@@ -493,13 +446,9 @@ int io_send(struct io_kiocb *req, unsigned int issue_flags)
 	if (unlikely(!sock))
 		return -ENOTSOCK;
 
-	kmsg = io_send_setup(req, issue_flags);
-	if (IS_ERR(kmsg))
-		return PTR_ERR(kmsg);
-
-	ret = import_ubuf(ITER_SOURCE, sr->buf, len, &kmsg->msg.msg_iter);
-	if (unlikely(ret))
-		return ret;
+	if (!(req->flags & REQ_F_POLLED) &&
+	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
+		return -EAGAIN;
 
 	flags = sr->msg_flags;
 	if (issue_flags & IO_URING_F_NONBLOCK)
@@ -1085,7 +1034,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
 	if (req->ctx->compat)
 		zc->msg_flags |= MSG_CMSG_COMPAT;
 #endif
-	return 0;
+	return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG_ZC);
 }
 
 static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb,
@@ -1174,7 +1123,7 @@ static int io_send_zc_import(struct io_kiocb *req, struct io_async_msghdr *kmsg)
 int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr *kmsg;
+	struct io_async_msghdr *kmsg = req->async_data;
 	struct socket *sock;
 	unsigned msg_flags;
 	int ret, min_ret = 0;
@@ -1185,9 +1134,9 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
 		return -EOPNOTSUPP;
 
-	kmsg = io_send_setup(req, issue_flags);
-	if (IS_ERR(kmsg))
-		return PTR_ERR(kmsg);
+	if (!(req->flags & REQ_F_POLLED) &&
+	    (zc->flags & IORING_RECVSEND_POLL_FIRST))
+		return -EAGAIN;
 
 	if (!zc->done_io) {
 		ret = io_send_zc_import(req, kmsg);
@@ -1243,7 +1192,7 @@ int io_send_zc(struct io_kiocb *req, unsigned int issue_flags)
 int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
 {
 	struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
-	struct io_async_msghdr *kmsg;
+	struct io_async_msghdr *kmsg = req->async_data;
 	struct socket *sock;
 	unsigned flags;
 	int ret, min_ret = 0;
@@ -1256,17 +1205,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
 	if (!test_bit(SOCK_SUPPORT_ZC, &sock->flags))
 		return -EOPNOTSUPP;
 
-	if (req_has_async_data(req)) {
-		kmsg = req->async_data;
-	} else {
-		kmsg = io_msg_alloc_async(req, issue_flags);
-		if (unlikely(!kmsg))
-			return -ENOMEM;
-		ret = io_sendmsg_copy_hdr(req, kmsg);
-		if (ret)
-			return ret;
-	}
-
 	if (!(req->flags & REQ_F_POLLED) &&
 	    (sr->flags & IORING_RECVSEND_POLL_FIRST))
 		return -EAGAIN;
diff --git a/io_uring/net.h b/io_uring/net.h
index 4b4fd9b1b7b4..f99ebb9dc0bb 100644
--- a/io_uring/net.h
+++ b/io_uring/net.h
@@ -34,13 +34,11 @@ struct io_async_connect {
 int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_shutdown(struct io_kiocb *req, unsigned int issue_flags);
 
-int io_sendmsg_prep_async(struct io_kiocb *req);
 void io_sendmsg_recvmsg_cleanup(struct io_kiocb *req);
 int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_sendmsg(struct io_kiocb *req, unsigned int issue_flags);
 
 int io_send(struct io_kiocb *req, unsigned int issue_flags);
-int io_sendrecv_prep_async(struct io_kiocb *req);
 
 int io_recvmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe);
 int io_recvmsg(struct io_kiocb *req, unsigned int issue_flags);
diff --git a/io_uring/opdef.c b/io_uring/opdef.c
index 1368193edc57..dd4a1e1425e1 100644
--- a/io_uring/opdef.c
+++ b/io_uring/opdef.c
@@ -527,7 +527,6 @@ const struct io_cold_def io_cold_defs[] = {
 		.name			= "SENDMSG",
 #if defined(CONFIG_NET)
 		.async_size		= sizeof(struct io_async_msghdr),
-		.prep_async		= io_sendmsg_prep_async,
 		.cleanup		= io_sendmsg_recvmsg_cleanup,
 		.fail			= io_sendrecv_fail,
 #endif
@@ -603,7 +602,6 @@ const struct io_cold_def io_cold_defs[] = {
 		.async_size		= sizeof(struct io_async_msghdr),
 		.cleanup		= io_sendmsg_recvmsg_cleanup,
 		.fail			= io_sendrecv_fail,
-		.prep_async		= io_sendrecv_prep_async,
 #endif
 	},
 	[IORING_OP_RECV] = {
@@ -688,7 +686,6 @@ const struct io_cold_def io_cold_defs[] = {
 		.name			= "SEND_ZC",
 #if defined(CONFIG_NET)
 		.async_size		= sizeof(struct io_async_msghdr),
-		.prep_async		= io_sendrecv_prep_async,
 		.cleanup		= io_send_zc_cleanup,
 		.fail			= io_sendrecv_fail,
 #endif
@@ -697,7 +694,6 @@ const struct io_cold_def io_cold_defs[] = {
 		.name			= "SENDMSG_ZC",
 #if defined(CONFIG_NET)
 		.async_size		= sizeof(struct io_async_msghdr),
-		.prep_async		= io_sendmsg_prep_async,
 		.cleanup		= io_send_zc_cleanup,
 		.fail			= io_sendrecv_fail,
 #endif
-- 
2.43.0


  parent reply	other threads:[~2024-03-20 22:58 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-03-20 22:55 [PATCHSET v2 0/17] Improve async state handling Jens Axboe
2024-03-20 22:55 ` [PATCH 01/17] io_uring/net: switch io_send() and io_send_zc() to using io_async_msghdr Jens Axboe
2024-04-06 20:58   ` Pavel Begunkov
2024-04-07 21:47     ` Jens Axboe
2024-03-20 22:55 ` [PATCH 02/17] io_uring/net: switch io_recv() " Jens Axboe
2024-03-20 22:55 ` [PATCH 03/17] io_uring/net: unify cleanup handling Jens Axboe
2024-03-20 22:55 ` [PATCH 04/17] io_uring/net: always setup an io_async_msghdr Jens Axboe
2024-03-20 22:55 ` [PATCH 05/17] io_uring/net: get rid of ->prep_async() for receive side Jens Axboe
2024-03-20 22:55 ` Jens Axboe [this message]
2024-03-20 22:55 ` [PATCH 07/17] io_uring: kill io_msg_alloc_async_prep() Jens Axboe
2024-03-20 22:55 ` [PATCH 08/17] io_uring/net: add iovec recycling Jens Axboe
2024-03-20 22:55 ` [PATCH 09/17] io_uring/net: drop 'kmsg' parameter from io_req_msg_cleanup() Jens Axboe
2024-03-20 22:55 ` [PATCH 10/17] io_uring/rw: always setup io_async_rw for read/write requests Jens Axboe
2024-03-25 12:03   ` Anuj gupta
2024-03-25 14:54     ` Jens Axboe
2024-03-20 22:55 ` [PATCH 11/17] io_uring: get rid of struct io_rw_state Jens Axboe
2024-03-20 22:55 ` [PATCH 12/17] io_uring/rw: add iovec recycling Jens Axboe
2024-03-20 22:55 ` [PATCH 13/17] io_uring/net: move connect to always using async data Jens Axboe
2024-03-20 22:55 ` [PATCH 14/17] io_uring/uring_cmd: switch to always allocating " Jens Axboe
2024-03-20 22:55 ` [PATCH 15/17] io_uring/uring_cmd: defer SQE copying until we need it Jens Axboe
2024-03-25 12:41   ` Anuj gupta
2024-03-25 14:55     ` Jens Axboe
2024-03-20 22:55 ` [PATCH 16/17] io_uring: drop ->prep_async() Jens Axboe
2024-04-06 20:54   ` Pavel Begunkov
2024-04-07 21:46     ` Jens Axboe
2024-03-20 22:55 ` [PATCH 17/17] io_uring/alloc_cache: switch to array based caching Jens Axboe
2024-03-21 15:59   ` Gabriel Krisman Bertazi
2024-03-21 16:38     ` Jens Axboe
2024-03-21 17:20       ` Gabriel Krisman Bertazi
2024-03-21 17:22         ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240320225750.1769647-7-axboe@kernel.dk \
    --to=axboe@kernel.dk \
    --cc=io-uring@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.