linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: David Howells <dhowells@redhat.com>
To: Matthew Wilcox <willy@infradead.org>,
	"David S. Miller" <davem@davemloft.net>,
	Eric Dumazet <edumazet@google.com>,
	Jakub Kicinski <kuba@kernel.org>, Paolo Abeni <pabeni@redhat.com>
Cc: David Howells <dhowells@redhat.com>,
	Al Viro <viro@zeniv.linux.org.uk>,
	Christoph Hellwig <hch@infradead.org>,
	Jens Axboe <axboe@kernel.dk>, Jeff Layton <jlayton@kernel.org>,
	Christian Brauner <brauner@kernel.org>,
	Chuck Lever III <chuck.lever@oracle.com>,
	Linus Torvalds <torvalds@linux-foundation.org>,
	netdev@vger.kernel.org, linux-fsdevel@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Santosh Shilimkar <santosh.shilimkar@oracle.com>,
	linux-rdma@vger.kernel.org, rds-devel@oss.oracle.com
Subject: [RFC PATCH v2 38/48] rds: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage
Date: Wed, 29 Mar 2023 15:13:44 +0100	[thread overview]
Message-ID: <20230329141354.516864-39-dhowells@redhat.com> (raw)
In-Reply-To: <20230329141354.516864-1-dhowells@redhat.com>

When transmitting data, call down into TCP using a single sendmsg with
MSG_SPLICE_PAGES to indicate that content should be spliced rather than
performing several sendmsg and sendpage calls to transmit header and data
pages.

To make this work, the data is assembled in a bio_vec array and attached to
a BVEC-type iterator.  The header are copied into memory acquired from
zcopy_alloc() which just breaks a page up into small pieces that can be
freed with put_page().

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Santosh Shilimkar <santosh.shilimkar@oracle.com>
cc: "David S. Miller" <davem@davemloft.net>
cc: Eric Dumazet <edumazet@google.com>
cc: Jakub Kicinski <kuba@kernel.org>
cc: Paolo Abeni <pabeni@redhat.com>
cc: Jens Axboe <axboe@kernel.dk>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-rdma@vger.kernel.org
cc: rds-devel@oss.oracle.com
cc: netdev@vger.kernel.org
---
 net/rds/tcp_send.c | 86 +++++++++++++++++++++-------------------------
 1 file changed, 40 insertions(+), 46 deletions(-)

diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
index 8c4d1d6e9249..660d9f203d99 100644
--- a/net/rds/tcp_send.c
+++ b/net/rds/tcp_send.c
@@ -52,29 +52,24 @@ void rds_tcp_xmit_path_complete(struct rds_conn_path *cp)
 	tcp_sock_set_cork(tc->t_sock->sk, false);
 }
 
-/* the core send_sem serializes this with other xmit and shutdown */
-static int rds_tcp_sendmsg(struct socket *sock, void *data, unsigned int len)
-{
-	struct kvec vec = {
-		.iov_base = data,
-		.iov_len = len,
-	};
-	struct msghdr msg = {
-		.msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL,
-	};
-
-	return kernel_sendmsg(sock, &msg, &vec, 1, vec.iov_len);
-}
-
 /* the core send_sem serializes this with other xmit and shutdown */
 int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
 		 unsigned int hdr_off, unsigned int sg, unsigned int off)
 {
 	struct rds_conn_path *cp = rm->m_inc.i_conn_path;
 	struct rds_tcp_connection *tc = cp->cp_transport_data;
+	struct msghdr msg = {
+		.msg_flags = MSG_SPLICE_PAGES | MSG_DONTWAIT | MSG_NOSIGNAL,
+	};
+	struct bio_vec *bvec;
+	unsigned int i, size = 0, ix = 0;
+	bool free_hdr = false;
 	int done = 0;
-	int ret = 0;
-	int more;
+	int ret = -ENOMEM;
+
+	bvec = kmalloc_array(1 + sg, sizeof(struct bio_vec), GFP_KERNEL);
+	if (!bvec)
+		goto out;
 
 	if (hdr_off == 0) {
 		/*
@@ -99,43 +94,37 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
 
 	if (hdr_off < sizeof(struct rds_header)) {
 		/* see rds_tcp_write_space() */
+		void *p;
+
 		set_bit(SOCK_NOSPACE, &tc->t_sock->sk->sk_socket->flags);
 
-		ret = rds_tcp_sendmsg(tc->t_sock,
-				      (void *)&rm->m_inc.i_hdr + hdr_off,
-				      sizeof(rm->m_inc.i_hdr) - hdr_off);
-		if (ret < 0)
-			goto out;
-		done += ret;
-		if (hdr_off + done != sizeof(struct rds_header))
+		ret = -ENOMEM;
+		p = page_frag_memdup(NULL,
+				     (void *)&rm->m_inc.i_hdr + hdr_off,
+				     sizeof(rm->m_inc.i_hdr) - hdr_off,
+				     GFP_KERNEL, ULONG_MAX);
+		if (!p)
 			goto out;
+		bvec_set_virt(&bvec[ix], p, sizeof(rm->m_inc.i_hdr) - hdr_off);
+		free_hdr = true;
+		size += bvec[ix].bv_len;
+		ix++;
 	}
 
-	more = rm->data.op_nents > 1 ? (MSG_MORE | MSG_SENDPAGE_NOTLAST) : 0;
-	while (sg < rm->data.op_nents) {
-		int flags = MSG_DONTWAIT | MSG_NOSIGNAL | more;
-
-		ret = tc->t_sock->ops->sendpage(tc->t_sock,
-						sg_page(&rm->data.op_sg[sg]),
-						rm->data.op_sg[sg].offset + off,
-						rm->data.op_sg[sg].length - off,
-						flags);
-		rdsdebug("tcp sendpage %p:%u:%u ret %d\n", (void *)sg_page(&rm->data.op_sg[sg]),
-			 rm->data.op_sg[sg].offset + off, rm->data.op_sg[sg].length - off,
-			 ret);
-		if (ret <= 0)
-			break;
-
-		off += ret;
-		done += ret;
-		if (off == rm->data.op_sg[sg].length) {
-			off = 0;
-			sg++;
-		}
-		if (sg == rm->data.op_nents - 1)
-			more = 0;
+	for (i = sg; i < rm->data.op_nents; i++) {
+		bvec_set_page(&bvec[ix],
+			      sg_page(&rm->data.op_sg[i]),
+			      rm->data.op_sg[i].length - off,
+			      rm->data.op_sg[i].offset + off);
+		off = 0;
+		size += bvec[ix].bv_len;
+		ix++;
 	}
 
+	iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, bvec, ix, size);
+	ret = sock_sendmsg(tc->t_sock, &msg);
+	rdsdebug("tcp sendmsg-splice %u,%u ret %d\n", ix, size, ret);
+
 out:
 	if (ret <= 0) {
 		/* write_space will hit after EAGAIN, all else fatal */
@@ -158,6 +147,11 @@ int rds_tcp_xmit(struct rds_connection *conn, struct rds_message *rm,
 	}
 	if (done == 0)
 		done = ret;
+	if (bvec) {
+		if (free_hdr)
+			put_page(bvec[0].bv_page);
+		kfree(bvec);
+	}
 	return done;
 }
 


  parent reply	other threads:[~2023-03-29 14:22 UTC|newest]

Thread overview: 79+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-03-29 14:13 [RFC PATCH v2 00/48] splice, net: Replace sendpage with sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-29 14:13 ` [RFC PATCH v2 01/48] netfs: Fix netfs_extract_iter_to_sg() for ITER_UBUF/IOVEC David Howells
2023-03-29 14:13 ` [RFC PATCH v2 02/48] iov_iter: Remove last_offset member David Howells
2023-03-29 14:13 ` [RFC PATCH v2 03/48] iov_iter: Add an iterator-of-iterators David Howells
2023-03-29 14:13 ` [RFC PATCH v2 04/48] net: Declare MSG_SPLICE_PAGES internal sendmsg() flag David Howells
2023-03-30 14:28   ` Willem de Bruijn
2023-03-30 15:07   ` David Howells
2023-03-30 17:51     ` Willem de Bruijn
2023-03-29 14:13 ` [RFC PATCH v2 05/48] mm: Move the page fragment allocator from page_alloc.c into its own file David Howells
2023-03-29 14:13 ` [RFC PATCH v2 06/48] mm: Make the page_frag_cache allocator use multipage folios David Howells
2023-03-29 14:13 ` [RFC PATCH v2 07/48] mm: Make the page_frag_cache allocator use per-cpu David Howells
2023-03-29 14:13 ` [RFC PATCH v2 08/48] tcp: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 09/48] tcp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13 ` [RFC PATCH v2 10/48] tcp: Convert do_tcp_sendpages() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 11/48] tcp_bpf: Inline do_tcp_sendpages as it's now a wrapper around tcp_sendmsg David Howells
2023-03-29 14:13 ` [RFC PATCH v2 12/48] espintcp: Inline do_tcp_sendpages() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 13/48] tls: " David Howells
2023-03-29 14:13 ` [RFC PATCH v2 14/48] siw: " David Howells
2023-03-29 14:13 ` [RFC PATCH v2 15/48] tcp: Fold do_tcp_sendpages() into tcp_sendpage_locked() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 16/48] ip, udp: Support MSG_SPLICE_PAGES David Howells
2023-03-30 14:20   ` Willem de Bruijn
2023-03-30 14:39   ` David Howells
2023-03-30 17:46     ` Willem de Bruijn
2023-03-30 15:11   ` David Howells
2023-03-30 17:55     ` Willem de Bruijn
2023-03-30 19:49     ` David Howells
2023-03-29 14:13 ` [RFC PATCH v2 17/48] ip, udp: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13 ` [RFC PATCH v2 18/48] udp: Convert udp_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 19/48] af_unix: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 20/48] af_unix: Make sendmsg(MSG_SPLICE_PAGES) copy unspliceable data David Howells
2023-03-29 14:13 ` [RFC PATCH v2 21/48] crypto: af_alg: Pin pages rather than ref'ing if appropriate David Howells
2023-03-29 14:13 ` [RFC PATCH v2 22/48] crypto: af_alg: Use netfs_extract_iter_to_sg() to create scatterlists David Howells
2023-03-29 14:13 ` [RFC PATCH v2 23/48] crypto: af_alg: Indent the loop in af_alg_sendmsg() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 24/48] crypto: af_alg: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 25/48] crypto: af_alg: Convert af_alg_sendpage() to use MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 26/48] crypto: af_alg/hash: Support MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 27/48] splice, net: Use sendmsg(MSG_SPLICE_PAGES) rather than ->sendpage() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 28/48] splice: Reimplement splice_to_socket() to pass multiple bufs to sendmsg() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 29/48] Remove file->f_op->sendpage David Howells
2023-03-29 14:13 ` [RFC PATCH v2 30/48] siw: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage to transmit David Howells
2023-03-29 15:18   ` Bernard Metzler
2023-03-29 15:32   ` David Howells
2023-03-29 14:13 ` [RFC PATCH v2 31/48] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-29 14:13 ` [RFC PATCH v2 32/48] iscsi: " David Howells
2023-03-29 14:13 ` [RFC PATCH v2 33/48] iscsi: Assume "sendpage" is okay in iscsi_tcp_segment_map() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 34/48] tcp_bpf: Make tcp_bpf_sendpage() go through tcp_bpf_sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-29 14:13 ` [RFC PATCH v2 35/48] net: Use sendmsg(MSG_SPLICE_PAGES) not sendpage in skb_send_sock() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 36/48] algif: Remove hash_sendpage*() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 37/48] ceph: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-03-30  1:45   ` Xiubo Li
2023-03-30  6:48   ` David Howells
2023-03-31 13:05     ` Xiubo Li
2023-04-03  3:27     ` Xiubo Li
2023-04-03  8:32     ` David Howells
2023-04-10  0:38       ` Xiubo Li
2023-03-29 14:13 ` David Howells [this message]
2023-03-29 14:13 ` [RFC PATCH v2 39/48] dlm: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage David Howells
2023-03-29 14:13 ` [RFC PATCH v2 40/48] sunrpc: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-03-29 15:28   ` Chuck Lever III
2023-03-29 19:58   ` David Howells
2023-03-30  9:29   ` David Howells
2023-03-30  9:41   ` David Howells
2023-03-30 13:16     ` Chuck Lever III
2023-03-30 13:01   ` David Howells
2023-03-30 13:16   ` David Howells
2023-03-30 13:27     ` Chuck Lever III
2023-03-30 14:26     ` David Howells
2023-03-30 16:36       ` Chuck Lever III
2023-04-14 14:41         ` Daire Byrne
2023-03-29 14:13 ` [RFC PATCH v2 41/48] sunrpc: Rely on TCP sendmsg + MSG_SPLICE_PAGES to copy unspliceable data David Howells
2023-03-29 14:13 ` [RFC PATCH v2 42/48] nvme: Use sendmsg(MSG_SPLICE_PAGES) rather then sendpage David Howells
2023-03-29 14:13 ` [RFC PATCH v2 43/48] kcm: " David Howells
2023-03-29 14:13 ` [RFC PATCH v2 44/48] smc: Drop smc_sendpage() in favour of smc_sendmsg() + MSG_SPLICE_PAGES David Howells
2023-03-29 14:13 ` [RFC PATCH v2 45/48] ocfs2: Use sendmsg(MSG_SPLICE_PAGES) rather than sendpage() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 46/48] drbd: Use sendmsg(MSG_SPLICE_PAGES) rather than sendmsg() David Howells
2023-03-29 14:13 ` [RFC PATCH v2 47/48] drdb: Send an entire bio in a single sendmsg David Howells
2023-03-29 14:13 ` [RFC PATCH v2 48/48] sock: Remove ->sendpage*() in favour of sendmsg(MSG_SPLICE_PAGES) David Howells
2023-03-29 14:23   ` Hannes Reinecke
2023-03-29 14:39   ` David Howells

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230329141354.516864-39-dhowells@redhat.com \
    --to=dhowells@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=brauner@kernel.org \
    --cc=chuck.lever@oracle.com \
    --cc=davem@davemloft.net \
    --cc=edumazet@google.com \
    --cc=hch@infradead.org \
    --cc=jlayton@kernel.org \
    --cc=kuba@kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pabeni@redhat.com \
    --cc=rds-devel@oss.oracle.com \
    --cc=santosh.shilimkar@oracle.com \
    --cc=torvalds@linux-foundation.org \
    --cc=viro@zeniv.linux.org.uk \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).