All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paolo Abeni <pabeni@redhat.com>
To: Mina Almasry <almasrymina@google.com>,
	netdev@vger.kernel.org,  linux-kernel@vger.kernel.org,
	linux-arch@vger.kernel.org,  linux-kselftest@vger.kernel.org,
	linux-media@vger.kernel.org,  dri-devel@lists.freedesktop.org,
	linaro-mm-sig@lists.linaro.org
Cc: "Willem de Bruijn" <willemdebruijn.kernel@gmail.com>,
	"Kaiyuan Zhang" <kaiyuanz@google.com>,
	"Jeroen de Borst" <jeroendb@google.com>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"Christian König" <christian.koenig@amd.com>,
	"David Ahern" <dsahern@kernel.org>,
	"Ilias Apalodimas" <ilias.apalodimas@linaro.org>,
	"Willem de Bruijn" <willemb@google.com>,
	"Sumit Semwal" <sumit.semwal@linaro.org>,
	"Eric Dumazet" <edumazet@google.com>,
	"Shakeel Butt" <shakeelb@google.com>,
	"Praveen Kaligineedi" <pkaligineedi@google.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Shuah Khan" <shuah@kernel.org>,
	"David S. Miller" <davem@davemloft.net>
Subject: Re: [RFC PATCH v3 10/12] tcp: RX path for devmem TCP
Date: Thu, 09 Nov 2023 11:52:01 +0100	[thread overview]
Message-ID: <e584ca804a2e98bcf6e8e5ea2d4206f9f579e0ce.camel@redhat.com> (raw)
In-Reply-To: <20231106024413.2801438-11-almasrymina@google.com>

On Sun, 2023-11-05 at 18:44 -0800, Mina Almasry wrote:
[...]
> +/* On error, returns the -errno. On success, returns number of bytes sent to the
> + * user. May not consume all of @remaining_len.
> + */
> +static int tcp_recvmsg_devmem(const struct sock *sk, const struct sk_buff *skb,
> +			      unsigned int offset, struct msghdr *msg,
> +			      int remaining_len)
> +{
> +	struct cmsg_devmem cmsg_devmem = { 0 };
> +	unsigned int start;
> +	int i, copy, n;
> +	int sent = 0;
> +	int err = 0;
> +
> +	do {
> +		start = skb_headlen(skb);
> +
> +		if (!skb_frags_not_readable(skb)) {

As 'skb_frags_not_readable()' is intended to be a possibly wider scope
test then skb->devmem, should the above test explicitly skb->devmem?

> +			err = -ENODEV;
> +			goto out;
> +		}
> +
> +		/* Copy header. */
> +		copy = start - offset;
> +		if (copy > 0) {
> +			copy = min(copy, remaining_len);
> +
> +			n = copy_to_iter(skb->data + offset, copy,
> +					 &msg->msg_iter);
> +			if (n != copy) {
> +				err = -EFAULT;
> +				goto out;
> +			}
> +
> +			offset += copy;
> +			remaining_len -= copy;
> +
> +			/* First a cmsg_devmem for # bytes copied to user
> +			 * buffer.
> +			 */
> +			memset(&cmsg_devmem, 0, sizeof(cmsg_devmem));
> +			cmsg_devmem.frag_size = copy;
> +			err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_HEADER,
> +				       sizeof(cmsg_devmem), &cmsg_devmem);
> +			if (err || msg->msg_flags & MSG_CTRUNC) {
> +				msg->msg_flags &= ~MSG_CTRUNC;
> +				if (!err)
> +					err = -ETOOSMALL;
> +				goto out;
> +			}
> +
> +			sent += copy;
> +
> +			if (remaining_len == 0)
> +				goto out;
> +		}
> +
> +		/* after that, send information of devmem pages through a
> +		 * sequence of cmsg
> +		 */
> +		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
> +			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
> +			struct page_pool_iov *ppiov;
> +			u64 frag_offset;
> +			u32 user_token;
> +			int end;
> +
> +			/* skb_frags_not_readable() should indicate that ALL the
> +			 * frags in this skb are unreadable page_pool_iovs.
> +			 * We're checking for that flag above, but also check
> +			 * individual pages here. If the tcp stack is not
> +			 * setting skb->devmem correctly, we still don't want to
> +			 * crash here when accessing pgmap or priv below.
> +			 */
> +			if (!skb_frag_page_pool_iov(frag)) {
> +				net_err_ratelimited("Found non-devmem skb with page_pool_iov");
> +				err = -ENODEV;
> +				goto out;
> +			}
> +
> +			ppiov = skb_frag_page_pool_iov(frag);
> +			end = start + skb_frag_size(frag);
> +			copy = end - offset;
> +
> +			if (copy > 0) {
> +				copy = min(copy, remaining_len);
> +
> +				frag_offset = page_pool_iov_virtual_addr(ppiov) +
> +					      skb_frag_off(frag) + offset -
> +					      start;
> +				cmsg_devmem.frag_offset = frag_offset;
> +				cmsg_devmem.frag_size = copy;
> +				err = xa_alloc((struct xarray *)&sk->sk_user_pages,
> +					       &user_token, frag->bv_page,
> +					       xa_limit_31b, GFP_KERNEL);
> +				if (err)
> +					goto out;
> +
> +				cmsg_devmem.frag_token = user_token;
> +
> +				offset += copy;
> +				remaining_len -= copy;
> +
> +				err = put_cmsg(msg, SOL_SOCKET,
> +					       SO_DEVMEM_OFFSET,
> +					       sizeof(cmsg_devmem),
> +					       &cmsg_devmem);
> +				if (err || msg->msg_flags & MSG_CTRUNC) {
> +					msg->msg_flags &= ~MSG_CTRUNC;
> +					xa_erase((struct xarray *)&sk->sk_user_pages,
> +						 user_token);
> +					if (!err)
> +						err = -ETOOSMALL;
> +					goto out;
> +				}
> +
> +				page_pool_iov_get_many(ppiov, 1);
> +
> +				sent += copy;
> +
> +				if (remaining_len == 0)
> +					goto out;
> +			}
> +			start = end;
> +		}
> +
> +		if (!remaining_len)
> +			goto out;
> +
> +		/* if remaining_len is not satisfied yet, we need to go to the
> +		 * next frag in the frag_list to satisfy remaining_len.
> +		 */
> +		skb = skb_shinfo(skb)->frag_list ?: skb->next;

I think at this point the 'skb' is still on the sk receive queue. The
above will possibly walk the queue.

Later on, only the current queue tail could be possibly consumed by
tcp_recvmsg_locked(). This feel confusing to me?!? Why don't limit the
loop only the 'current' skb and it's frags?

> +
> +		offset = offset - start;
> +	} while (skb);
> +
> +	if (remaining_len) {
> +		err = -EFAULT;
> +		goto out;
> +	}
> +
> +out:
> +	if (!sent)
> +		sent = err;
> +
> +	return sent;
> +}
> +
>  /*
>   *	This routine copies from a sock struct into the user buffer.
>   *
> @@ -2314,6 +2463,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
>  			      int *cmsg_flags)
>  {
>  	struct tcp_sock *tp = tcp_sk(sk);
> +	int last_copied_devmem = -1; /* uninitialized */
>  	int copied = 0;
>  	u32 peek_seq;
>  	u32 *seq;
> @@ -2491,15 +2641,44 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
>  		}
>  
>  		if (!(flags & MSG_TRUNC)) {
> -			err = skb_copy_datagram_msg(skb, offset, msg, used);
> -			if (err) {
> -				/* Exception. Bailout! */
> -				if (!copied)
> -					copied = -EFAULT;
> +			if (last_copied_devmem != -1 &&
> +			    last_copied_devmem != skb->devmem)
>  				break;
> +
> +			if (!skb->devmem) {
> +				err = skb_copy_datagram_msg(skb, offset, msg,
> +							    used);
> +				if (err) {
> +					/* Exception. Bailout! */
> +					if (!copied)
> +						copied = -EFAULT;
> +					break;
> +				}
> +			} else {
> +				if (!(flags & MSG_SOCK_DEVMEM)) {
> +					/* skb->devmem skbs can only be received
> +					 * with the MSG_SOCK_DEVMEM flag.
> +					 */
> +					if (!copied)
> +						copied = -EFAULT;
> +
> +					break;
> +				}
> +
> +				err = tcp_recvmsg_devmem(sk, skb, offset, msg,
> +							 used);
> +				if (err <= 0) {
> +					if (!copied)
> +						copied = -EFAULT;
> +
> +					break;
> +				}
> +				used = err;

Minor nit: I personally would find the above more readable, placing
this whole chunk in a single helper (e.g. the current
tcp_recvmsg_devmem(), renamed to something more appropriate).

Cheers,

Paolo


WARNING: multiple messages have this Message-ID (diff)
From: Paolo Abeni <pabeni@redhat.com>
To: Mina Almasry <almasrymina@google.com>,
	netdev@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-arch@vger.kernel.org, linux-kselftest@vger.kernel.org,
	linux-media@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linaro-mm-sig@lists.linaro.org
Cc: "David S. Miller" <davem@davemloft.net>,
	"Eric Dumazet" <edumazet@google.com>,
	"Jakub Kicinski" <kuba@kernel.org>,
	"Jesper Dangaard Brouer" <hawk@kernel.org>,
	"Ilias Apalodimas" <ilias.apalodimas@linaro.org>,
	"Arnd Bergmann" <arnd@arndb.de>,
	"David Ahern" <dsahern@kernel.org>,
	"Willem de Bruijn" <willemdebruijn.kernel@gmail.com>,
	"Shuah Khan" <shuah@kernel.org>,
	"Sumit Semwal" <sumit.semwal@linaro.org>,
	"Christian König" <christian.koenig@amd.com>,
	"Shakeel Butt" <shakeelb@google.com>,
	"Jeroen de Borst" <jeroendb@google.com>,
	"Praveen Kaligineedi" <pkaligineedi@google.com>,
	"Willem de Bruijn" <willemb@google.com>,
	"Kaiyuan Zhang" <kaiyuanz@google.com>
Subject: Re: [RFC PATCH v3 10/12] tcp: RX path for devmem TCP
Date: Thu, 09 Nov 2023 11:52:01 +0100	[thread overview]
Message-ID: <e584ca804a2e98bcf6e8e5ea2d4206f9f579e0ce.camel@redhat.com> (raw)
In-Reply-To: <20231106024413.2801438-11-almasrymina@google.com>

On Sun, 2023-11-05 at 18:44 -0800, Mina Almasry wrote:
[...]
> +/* On error, returns the -errno. On success, returns number of bytes sent to the
> + * user. May not consume all of @remaining_len.
> + */
> +static int tcp_recvmsg_devmem(const struct sock *sk, const struct sk_buff *skb,
> +			      unsigned int offset, struct msghdr *msg,
> +			      int remaining_len)
> +{
> +	struct cmsg_devmem cmsg_devmem = { 0 };
> +	unsigned int start;
> +	int i, copy, n;
> +	int sent = 0;
> +	int err = 0;
> +
> +	do {
> +		start = skb_headlen(skb);
> +
> +		if (!skb_frags_not_readable(skb)) {

As 'skb_frags_not_readable()' is intended to be a possibly wider scope
test then skb->devmem, should the above test explicitly skb->devmem?

> +			err = -ENODEV;
> +			goto out;
> +		}
> +
> +		/* Copy header. */
> +		copy = start - offset;
> +		if (copy > 0) {
> +			copy = min(copy, remaining_len);
> +
> +			n = copy_to_iter(skb->data + offset, copy,
> +					 &msg->msg_iter);
> +			if (n != copy) {
> +				err = -EFAULT;
> +				goto out;
> +			}
> +
> +			offset += copy;
> +			remaining_len -= copy;
> +
> +			/* First a cmsg_devmem for # bytes copied to user
> +			 * buffer.
> +			 */
> +			memset(&cmsg_devmem, 0, sizeof(cmsg_devmem));
> +			cmsg_devmem.frag_size = copy;
> +			err = put_cmsg(msg, SOL_SOCKET, SO_DEVMEM_HEADER,
> +				       sizeof(cmsg_devmem), &cmsg_devmem);
> +			if (err || msg->msg_flags & MSG_CTRUNC) {
> +				msg->msg_flags &= ~MSG_CTRUNC;
> +				if (!err)
> +					err = -ETOOSMALL;
> +				goto out;
> +			}
> +
> +			sent += copy;
> +
> +			if (remaining_len == 0)
> +				goto out;
> +		}
> +
> +		/* after that, send information of devmem pages through a
> +		 * sequence of cmsg
> +		 */
> +		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
> +			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
> +			struct page_pool_iov *ppiov;
> +			u64 frag_offset;
> +			u32 user_token;
> +			int end;
> +
> +			/* skb_frags_not_readable() should indicate that ALL the
> +			 * frags in this skb are unreadable page_pool_iovs.
> +			 * We're checking for that flag above, but also check
> +			 * individual pages here. If the tcp stack is not
> +			 * setting skb->devmem correctly, we still don't want to
> +			 * crash here when accessing pgmap or priv below.
> +			 */
> +			if (!skb_frag_page_pool_iov(frag)) {
> +				net_err_ratelimited("Found non-devmem skb with page_pool_iov");
> +				err = -ENODEV;
> +				goto out;
> +			}
> +
> +			ppiov = skb_frag_page_pool_iov(frag);
> +			end = start + skb_frag_size(frag);
> +			copy = end - offset;
> +
> +			if (copy > 0) {
> +				copy = min(copy, remaining_len);
> +
> +				frag_offset = page_pool_iov_virtual_addr(ppiov) +
> +					      skb_frag_off(frag) + offset -
> +					      start;
> +				cmsg_devmem.frag_offset = frag_offset;
> +				cmsg_devmem.frag_size = copy;
> +				err = xa_alloc((struct xarray *)&sk->sk_user_pages,
> +					       &user_token, frag->bv_page,
> +					       xa_limit_31b, GFP_KERNEL);
> +				if (err)
> +					goto out;
> +
> +				cmsg_devmem.frag_token = user_token;
> +
> +				offset += copy;
> +				remaining_len -= copy;
> +
> +				err = put_cmsg(msg, SOL_SOCKET,
> +					       SO_DEVMEM_OFFSET,
> +					       sizeof(cmsg_devmem),
> +					       &cmsg_devmem);
> +				if (err || msg->msg_flags & MSG_CTRUNC) {
> +					msg->msg_flags &= ~MSG_CTRUNC;
> +					xa_erase((struct xarray *)&sk->sk_user_pages,
> +						 user_token);
> +					if (!err)
> +						err = -ETOOSMALL;
> +					goto out;
> +				}
> +
> +				page_pool_iov_get_many(ppiov, 1);
> +
> +				sent += copy;
> +
> +				if (remaining_len == 0)
> +					goto out;
> +			}
> +			start = end;
> +		}
> +
> +		if (!remaining_len)
> +			goto out;
> +
> +		/* if remaining_len is not satisfied yet, we need to go to the
> +		 * next frag in the frag_list to satisfy remaining_len.
> +		 */
> +		skb = skb_shinfo(skb)->frag_list ?: skb->next;

I think at this point the 'skb' is still on the sk receive queue. The
above will possibly walk the queue.

Later on, only the current queue tail could be possibly consumed by
tcp_recvmsg_locked(). This feel confusing to me?!? Why don't limit the
loop only the 'current' skb and it's frags?

> +
> +		offset = offset - start;
> +	} while (skb);
> +
> +	if (remaining_len) {
> +		err = -EFAULT;
> +		goto out;
> +	}
> +
> +out:
> +	if (!sent)
> +		sent = err;
> +
> +	return sent;
> +}
> +
>  /*
>   *	This routine copies from a sock struct into the user buffer.
>   *
> @@ -2314,6 +2463,7 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
>  			      int *cmsg_flags)
>  {
>  	struct tcp_sock *tp = tcp_sk(sk);
> +	int last_copied_devmem = -1; /* uninitialized */
>  	int copied = 0;
>  	u32 peek_seq;
>  	u32 *seq;
> @@ -2491,15 +2641,44 @@ static int tcp_recvmsg_locked(struct sock *sk, struct msghdr *msg, size_t len,
>  		}
>  
>  		if (!(flags & MSG_TRUNC)) {
> -			err = skb_copy_datagram_msg(skb, offset, msg, used);
> -			if (err) {
> -				/* Exception. Bailout! */
> -				if (!copied)
> -					copied = -EFAULT;
> +			if (last_copied_devmem != -1 &&
> +			    last_copied_devmem != skb->devmem)
>  				break;
> +
> +			if (!skb->devmem) {
> +				err = skb_copy_datagram_msg(skb, offset, msg,
> +							    used);
> +				if (err) {
> +					/* Exception. Bailout! */
> +					if (!copied)
> +						copied = -EFAULT;
> +					break;
> +				}
> +			} else {
> +				if (!(flags & MSG_SOCK_DEVMEM)) {
> +					/* skb->devmem skbs can only be received
> +					 * with the MSG_SOCK_DEVMEM flag.
> +					 */
> +					if (!copied)
> +						copied = -EFAULT;
> +
> +					break;
> +				}
> +
> +				err = tcp_recvmsg_devmem(sk, skb, offset, msg,
> +							 used);
> +				if (err <= 0) {
> +					if (!copied)
> +						copied = -EFAULT;
> +
> +					break;
> +				}
> +				used = err;

Minor nit: I personally would find the above more readable, placing
this whole chunk in a single helper (e.g. the current
tcp_recvmsg_devmem(), renamed to something more appropriate).

Cheers,

Paolo


  parent reply	other threads:[~2023-11-09 10:52 UTC|newest]

Thread overview: 254+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-06  2:43 [RFC PATCH v3 00/12] Device Memory TCP Mina Almasry
2023-11-06  2:43 ` Mina Almasry
2023-11-06  2:44 ` [RFC PATCH v3 01/12] net: page_pool: factor out releasing DMA from releasing the page Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-06  2:44 ` [RFC PATCH v3 02/12] net: page_pool: create hooks for custom page providers Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-07  7:44   ` Yunsheng Lin
2023-11-07  7:44     ` Yunsheng Lin
2023-11-09 11:09   ` Paolo Abeni
2023-11-09 11:09     ` Paolo Abeni
2023-11-10 23:19   ` Jakub Kicinski
2023-11-10 23:19     ` Jakub Kicinski
2023-11-13  3:28     ` Mina Almasry
2023-11-13  3:28       ` Mina Almasry
2023-11-13 22:10       ` Jakub Kicinski
2023-11-13 22:10         ` Jakub Kicinski
2023-11-06  2:44 ` [RFC PATCH v3 03/12] net: netdev netlink api to bind dma-buf to a net device Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-10 23:16   ` Jakub Kicinski
2023-11-10 23:16     ` Jakub Kicinski
2023-11-06  2:44 ` [RFC PATCH v3 04/12] netdev: support binding dma-buf to netdevice Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-07  7:46   ` Yunsheng Lin
2023-11-07  7:46     ` Yunsheng Lin
2023-11-07 21:59     ` Mina Almasry
2023-11-07 21:59       ` Mina Almasry
2023-11-08  3:40       ` Yunsheng Lin
2023-11-08  3:40         ` Yunsheng Lin
2023-11-09  2:22         ` Mina Almasry
2023-11-09  2:22           ` Mina Almasry
2023-11-09  9:29           ` Yunsheng Lin
2023-11-09  9:29             ` Yunsheng Lin
2023-11-08 23:47   ` David Wei
2023-11-08 23:47     ` David Wei
2023-11-09  2:25     ` Mina Almasry
2023-11-09  2:25       ` Mina Almasry
2023-11-09  8:29   ` Paolo Abeni
2023-11-09  8:29     ` Paolo Abeni
2023-11-10  2:59     ` Mina Almasry
2023-11-10  2:59       ` Mina Almasry
2023-11-10  7:38       ` Yunsheng Lin
2023-11-10  7:38         ` Yunsheng Lin
2023-11-10  9:45         ` Mina Almasry
2023-11-10  9:45           ` Mina Almasry
2023-11-10 23:19   ` Jakub Kicinski
2023-11-10 23:19     ` Jakub Kicinski
2023-11-11  2:19     ` Mina Almasry
2023-11-11  2:19       ` Mina Almasry
2023-11-06  2:44 ` [RFC PATCH v3 05/12] netdev: netdevice devmem allocator Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-06 23:44   ` David Ahern
2023-11-06 23:44     ` David Ahern
2023-11-07 22:10     ` Mina Almasry
2023-11-07 22:10       ` Mina Almasry
2023-11-07 22:55       ` David Ahern
2023-11-07 22:55         ` David Ahern
2023-11-07 23:03         ` Mina Almasry
2023-11-07 23:03           ` Mina Almasry
2023-11-09  1:15           ` David Wei
2023-11-09  1:15             ` David Wei
2023-11-10 14:26           ` Pavel Begunkov
2023-11-10 14:26             ` Pavel Begunkov
2023-11-11 17:19             ` David Ahern
2023-11-11 17:19               ` David Ahern
2023-11-14 16:09               ` Pavel Begunkov
2023-11-14 16:09                 ` Pavel Begunkov
2023-11-09  1:00         ` David Wei
2023-11-09  1:00           ` David Wei
2023-11-08  3:48       ` Yunsheng Lin
2023-11-08  3:48         ` Yunsheng Lin
2023-11-09  1:41         ` Mina Almasry
2023-11-09  1:41           ` Mina Almasry
2023-11-07  7:45   ` Yunsheng Lin
2023-11-07  7:45     ` Yunsheng Lin
2023-11-09  8:44   ` Paolo Abeni
2023-11-09  8:44     ` Paolo Abeni
2023-11-06  2:44 ` [RFC PATCH v3 06/12] memory-provider: dmabuf devmem memory provider Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-06 21:02   ` Stanislav Fomichev
2023-11-06 21:02     ` Stanislav Fomichev
2023-11-06 23:49   ` David Ahern
2023-11-06 23:49     ` David Ahern
2023-11-08  0:02     ` Mina Almasry
2023-11-08  0:02       ` Mina Almasry
2023-11-08  0:10       ` David Ahern
2023-11-08  0:10         ` David Ahern
2023-11-10 23:16   ` Jakub Kicinski
2023-11-10 23:16     ` Jakub Kicinski
2023-11-13  4:54     ` Mina Almasry
2023-11-13  4:54       ` Mina Almasry
2023-11-06  2:44 ` [RFC PATCH v3 07/12] page-pool: device memory support Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-07  8:00   ` Yunsheng Lin
2023-11-07  8:00     ` Yunsheng Lin
2023-11-07 21:56     ` Mina Almasry
2023-11-07 21:56       ` Mina Almasry
2023-11-08 10:56       ` Yunsheng Lin
2023-11-08 10:56         ` Yunsheng Lin
2023-11-09  3:20         ` Mina Almasry
2023-11-09  3:20           ` Mina Almasry
2023-11-09  9:30           ` Yunsheng Lin
2023-11-09  9:30             ` Yunsheng Lin
2023-11-09 12:20             ` Mina Almasry
2023-11-09 12:20               ` Mina Almasry
2023-11-09 13:23               ` Yunsheng Lin
2023-11-09 13:23                 ` Yunsheng Lin
2023-11-09 14:23           ` Christian König
2023-11-09  9:01   ` Paolo Abeni
2023-11-09  9:01     ` Paolo Abeni
2023-11-06  2:44 ` [RFC PATCH v3 08/12] net: support non paged skb frags Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-07  9:00   ` Yunsheng Lin
2023-11-07  9:00     ` Yunsheng Lin
2023-11-07 21:19     ` Mina Almasry
2023-11-07 21:19       ` Mina Almasry
2023-11-08 11:25       ` Yunsheng Lin
2023-11-08 11:25         ` Yunsheng Lin
2023-11-09  9:14   ` Paolo Abeni
2023-11-09  9:14     ` Paolo Abeni
2023-11-10  4:06     ` Mina Almasry
2023-11-10  4:06       ` Mina Almasry
2023-11-10 23:19   ` Jakub Kicinski
2023-11-10 23:19     ` Jakub Kicinski
2023-11-13  6:05     ` Mina Almasry
2023-11-13  6:05       ` Mina Almasry
2023-11-13 22:17       ` Jakub Kicinski
2023-11-13 22:17         ` Jakub Kicinski
2023-11-06  2:44 ` [RFC PATCH v3 09/12] net: add support for skbs with unreadable frags Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-06 18:47   ` Stanislav Fomichev
2023-11-06 18:47     ` Stanislav Fomichev
2023-11-06 19:34     ` David Ahern
2023-11-06 19:34       ` David Ahern
2023-11-06 20:31       ` Mina Almasry
2023-11-06 20:31         ` Mina Almasry
2023-11-06 21:59         ` Stanislav Fomichev
2023-11-06 21:59           ` Stanislav Fomichev
2023-11-06 22:18           ` Mina Almasry
2023-11-06 22:18             ` Mina Almasry
2023-11-06 22:59             ` Stanislav Fomichev
2023-11-06 22:59               ` Stanislav Fomichev
2023-11-06 23:14               ` Kaiyuan Zhang
2023-11-06 23:27               ` Mina Almasry
2023-11-06 23:27                 ` Mina Almasry
2023-11-06 23:55                 ` Stanislav Fomichev
2023-11-06 23:55                   ` Stanislav Fomichev
2023-11-07  0:07                   ` Willem de Bruijn
2023-11-07  0:07                     ` Willem de Bruijn
2023-11-07  0:14                     ` Stanislav Fomichev
2023-11-07  0:14                       ` Stanislav Fomichev
2023-11-07  0:59                       ` Stanislav Fomichev
2023-11-07  0:59                         ` Stanislav Fomichev
2023-11-07  2:23                         ` Willem de Bruijn
2023-11-07  2:23                           ` Willem de Bruijn
2023-11-07 17:44                           ` Stanislav Fomichev
2023-11-07 17:44                             ` Stanislav Fomichev
2023-11-07 17:57                             ` Willem de Bruijn
2023-11-07 17:57                               ` Willem de Bruijn
2023-11-07 18:14                               ` Stanislav Fomichev
2023-11-07 18:14                                 ` Stanislav Fomichev
2023-11-07  0:20                     ` Mina Almasry
2023-11-07  0:20                       ` Mina Almasry
2023-11-07  1:06                       ` Stanislav Fomichev
2023-11-07  1:06                         ` Stanislav Fomichev
2023-11-07 19:53                         ` Mina Almasry
2023-11-07 19:53                           ` Mina Almasry
2023-11-07 21:05                           ` Stanislav Fomichev
2023-11-07 21:05                             ` Stanislav Fomichev
2023-11-07 21:17                             ` Eric Dumazet
2023-11-07 21:17                               ` Eric Dumazet
2023-11-07 22:23                               ` Stanislav Fomichev
2023-11-07 22:23                                 ` Stanislav Fomichev
2023-11-10 23:17                                 ` Jakub Kicinski
2023-11-10 23:17                                   ` Jakub Kicinski
2023-11-10 23:19                           ` Jakub Kicinski
2023-11-10 23:19                             ` Jakub Kicinski
2023-11-07  1:09                       ` David Ahern
2023-11-07  1:09                         ` David Ahern
2023-11-06 23:37             ` David Ahern
2023-11-06 23:37               ` David Ahern
2023-11-07  0:03               ` Mina Almasry
2023-11-07  0:03                 ` Mina Almasry
2023-11-06 20:56   ` Stanislav Fomichev
2023-11-06 20:56     ` Stanislav Fomichev
2023-11-07  0:16   ` David Ahern
2023-11-07  0:16     ` David Ahern
2023-11-07  0:23     ` Mina Almasry
2023-11-07  0:23       ` Mina Almasry
2023-11-08 14:43   ` David Laight
2023-11-08 14:43     ` David Laight
2023-11-06  2:44 ` [RFC PATCH v3 10/12] tcp: RX path for devmem TCP Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-06 18:44   ` Stanislav Fomichev
2023-11-06 18:44     ` Stanislav Fomichev
2023-11-06 19:29     ` Mina Almasry
2023-11-06 19:29       ` Mina Almasry
2023-11-06 21:14       ` Willem de Bruijn
2023-11-06 21:14         ` Willem de Bruijn
2023-11-06 22:34         ` Stanislav Fomichev
2023-11-06 22:34           ` Stanislav Fomichev
2023-11-06 22:55           ` Willem de Bruijn
2023-11-06 22:55             ` Willem de Bruijn
2023-11-06 23:32             ` Stanislav Fomichev
2023-11-06 23:32               ` Stanislav Fomichev
2023-11-06 23:55               ` David Ahern
2023-11-06 23:55                 ` David Ahern
2023-11-07  0:02                 ` Willem de Bruijn
2023-11-07  0:02                   ` Willem de Bruijn
2023-11-07 23:55                   ` Mina Almasry
2023-11-07 23:55                     ` Mina Almasry
2023-11-08  0:01                     ` David Ahern
2023-11-08  0:01                       ` David Ahern
2023-11-09  2:39                       ` Mina Almasry
2023-11-09  2:39                         ` Mina Almasry
2023-11-09 16:07                         ` Edward Cree
2023-11-09 16:07                           ` Edward Cree
2023-12-08 20:12                           ` Pavel Begunkov
2023-12-08 20:12                             ` Pavel Begunkov
2023-11-09 11:05             ` Paolo Abeni
2023-11-09 11:05               ` Paolo Abeni
2023-11-10 23:16               ` Jakub Kicinski
2023-11-10 23:16                 ` Jakub Kicinski
2023-12-08 20:28             ` Pavel Begunkov
2023-12-08 20:28               ` Pavel Begunkov
2023-12-08 20:09           ` Pavel Begunkov
2023-12-08 20:09             ` Pavel Begunkov
2023-11-06 21:17       ` Stanislav Fomichev
2023-11-06 21:17         ` Stanislav Fomichev
2023-11-08 15:36         ` Edward Cree
2023-11-08 15:36           ` Edward Cree
2023-11-09 10:52   ` Paolo Abeni [this message]
2023-11-09 10:52     ` Paolo Abeni
2023-11-10 23:19   ` Jakub Kicinski
2023-11-10 23:19     ` Jakub Kicinski
2023-11-06  2:44 ` [RFC PATCH v3 11/12] net: add SO_DEVMEM_DONTNEED setsockopt to release RX pages Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-06  2:44 ` [RFC PATCH v3 12/12] selftests: add ncdevmem, netcat for devmem TCP Mina Almasry
2023-11-06  2:44   ` Mina Almasry
2023-11-09 11:03   ` Paolo Abeni
2023-11-09 11:03     ` Paolo Abeni
2023-11-10 23:13   ` Jakub Kicinski
2023-11-10 23:13     ` Jakub Kicinski
2023-11-11  2:27     ` Mina Almasry
2023-11-11  2:27       ` Mina Almasry
2023-11-11  2:35       ` Jakub Kicinski
2023-11-11  2:35         ` Jakub Kicinski
2023-11-13  4:08         ` Mina Almasry
2023-11-13  4:08           ` Mina Almasry
2023-11-13 22:20           ` Jakub Kicinski
2023-11-13 22:20             ` Jakub Kicinski
2023-11-10 23:17   ` Jakub Kicinski
2023-11-10 23:17     ` Jakub Kicinski
2023-11-07 15:18 ` [RFC PATCH v3 00/12] Device Memory TCP David Ahern
2023-11-07 15:18   ` David Ahern

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=e584ca804a2e98bcf6e8e5ea2d4206f9f579e0ce.camel@redhat.com \
    --to=pabeni@redhat.com \
    --cc=almasrymina@google.com \
    --cc=arnd@arndb.de \
    --cc=christian.koenig@amd.com \
    --cc=davem@davemloft.net \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=dsahern@kernel.org \
    --cc=edumazet@google.com \
    --cc=hawk@kernel.org \
    --cc=ilias.apalodimas@linaro.org \
    --cc=jeroendb@google.com \
    --cc=kaiyuanz@google.com \
    --cc=kuba@kernel.org \
    --cc=linaro-mm-sig@lists.linaro.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-kselftest@vger.kernel.org \
    --cc=linux-media@vger.kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pkaligineedi@google.com \
    --cc=shakeelb@google.com \
    --cc=shuah@kernel.org \
    --cc=sumit.semwal@linaro.org \
    --cc=willemb@google.com \
    --cc=willemdebruijn.kernel@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.