All of lore.kernel.org
 help / color / mirror / Atom feed
* Re: [MPTCP] [PATCH 3/3] mptcp: allow collapsing consecutive sendpages on the same substream
@ 2019-04-19 16:19 Paolo Abeni
  0 siblings, 0 replies; 4+ messages in thread
From: Paolo Abeni @ 2019-04-19 16:19 UTC (permalink / raw)
  To: mptcp

[-- Attachment #1: Type: text/plain, Size: 2368 bytes --]

Hi,

Thank you for the feedback

On Thu, 2019-04-18 at 16:56 -0700, Mat Martineau wrote:
> @@ -78,25 +91,48 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
> > 	if (!psize)
> > 		return -EINVAL;
> > 
> > -	/* Mark the end of the previous write so the beginning of the
> > -	 * next write (with its own mptcp skb extension data) is not
> > -	 * collapsed.
> > -	 */
> > +	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
> > +	*pmss_now = mss_now;
> > +	*ps_goal = size_goal;
> > +	avail_size = size_goal;
> > 	skb = tcp_write_queue_tail(ssk);
> > -	if (skb)
> > -		TCP_SKB_CB(skb)->eor = 1;
> > +	if (skb) {
> > +		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
> > +		BUG_ON(!mpext);
> > +
> > +		/* Limit the write to the size available in the
> > +		 * current skb, if any, so that we create at most a new skb.
> > +		 * If we run out of space in the current skb (e.g. the window
> > +		 * size shrunk from last sent) a new skb will be allocated even
> > +		 * is collapsing was allowed: collapsing is effectively
> > +		 * disabled.
> > +		 */
> > +		can_collapse = mptcp_skb_can_collapse_to(msk, skb, mpext);
> > +		if (!can_collapse)
> > +			TCP_SKB_CB(skb)->eor = 1;
> > +		else if (size_goal - skb->len > 0)
> > +			avail_size = size_goal - skb->len;
> > +		else
> > +			can_collapse = false;
> 
> In this final clause, should it set eor as well? If we're not expecting a 
> collapse, it might be better to make sure it does not happen.

yes, we could set eor here, but if there is no available size in the
current skb, collapsing can't happen, otherwise is a bug. I think
setting it will not add additional safety.

> > -	if (skb == tcp_write_queue_tail(ssk))
> > -		pr_err("no new skb %p/%p", sk, ssk);
> > +	collapsed = skb == tcp_write_queue_tail(ssk);
> > +	BUG_ON(collapsed && !can_collapse);
> > 
> > 	skb = tcp_write_queue_tail(ssk);
> 
> Minor: seems like this skb assignment is coupled with the mpext line below 
> rather than the conditional to update mpext and exit, and could be moved 
> after the collapse check.

Agreed, I will do that in the next iteration.

Final note: it looks like this patch is not the root cause of the
stream corruption I was observing, it just uncovers a bug present in
current code. I'll try to send a fix soon.

Thanks,

Paolo


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [MPTCP] [PATCH 3/3] mptcp: allow collapsing consecutive sendpages on the same substream
@ 2019-05-07 13:15 Paolo Abeni
  0 siblings, 0 replies; 4+ messages in thread
From: Paolo Abeni @ 2019-05-07 13:15 UTC (permalink / raw)
  To: mptcp

[-- Attachment #1: Type: text/plain, Size: 5616 bytes --]

If the current sendmsg() lands on the same subflow we used last, we
can try to collapse the data.

 v1 -> v2:
 - reordered the code according to Mat's suggestion
 - rebased on top of previous patch's changes

 RFC -> v1:
 - claried collasping schema
 - update data_len for collapsed skb in mptcp_sendmsg_frag()
 - fix collapsing decision on mptcp-level OoO

Signed-off-by: Paolo Abeni <pabeni(a)redhat.com>
---
 net/mptcp/protocol.c | 79 +++++++++++++++++++++++++++++++++-----------
 1 file changed, 60 insertions(+), 19 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 5e721fe85216..7bc4133aea8f 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -48,12 +48,25 @@ static struct sock *mptcp_subflow_get_ref(const struct mptcp_sock *msk)
 	return NULL;
 }
 
+static inline bool mptcp_skb_can_collapse_to(const struct mptcp_sock *msk,
+					     const struct sk_buff *skb,
+					     const struct mptcp_ext *mpext)
+{
+	if (!tcp_skb_can_collapse_to(skb))
+		return false;
+
+	/* can't collapse if there is a MPTCP level out-of-order */
+	return mpext->data_seq + mpext->data_len == msk->write_seq;
+}
+
 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
-			      struct msghdr *msg, long *timeo)
+			      struct msghdr *msg, long *timeo, int *pmss_now,
+			      int *ps_goal)
 {
+	int mss_now, avail_size, size_goal, ret;
 	struct mptcp_sock *msk = mptcp_sk(sk);
+	bool collapsed, can_collapse = false;
 	struct mptcp_ext *mpext = NULL;
-	int mss_now, size_goal, ret;
 	struct page_frag *pfrag;
 	struct sk_buff *skb;
 	size_t psize;
@@ -70,8 +83,32 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
 
 	/* compute copy limit */
 	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
-	psize = min_t(int, pfrag->size - pfrag->offset, size_goal);
+	*pmss_now = mss_now;
+	*ps_goal = size_goal;
+	avail_size = size_goal;
+	skb = tcp_write_queue_tail(ssk);
+	if (skb) {
+		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
+		BUG_ON(!mpext);
+
+		/* Limit the write to the size available in the
+		 * current skb, if any, so that we create at most a new skb.
+		 * If we run out of space in the current skb (e.g. the window
+		 * size shrunk from last sent) a new skb will be allocated even
+		 * is collapsing was allowed: collapsing is effectively
+		 * disabled.
+		 */
+		can_collapse = mptcp_skb_can_collapse_to(msk, skb, mpext);
+		if (!can_collapse)
+			TCP_SKB_CB(skb)->eor = 1;
+		else if (size_goal - skb->len > 0)
+			avail_size = size_goal - skb->len;
+		else
+			can_collapse = false;
+	}
+	psize = min_t(size_t, pfrag->size - pfrag->offset, avail_size);
 
+	/* Copy to page */
 	pr_debug("left=%zu", msg_data_left(msg));
 	psize = copy_page_from_iter(pfrag->page, pfrag->offset,
 				    min_t(size_t, msg_data_left(msg), psize),
@@ -80,14 +117,9 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
 	if (!psize)
 		return -EINVAL;
 
-	/* Mark the end of the previous write so the beginning of the
-	 * next write (with its own mptcp skb extension data) is not
-	 * collapsed.
+	/* tell the TCP stack to delay the push so that we can safely
+	 * access the skb after the sendpages call
 	 */
-	skb = tcp_write_queue_tail(ssk);
-	if (skb)
-		TCP_SKB_CB(skb)->eor = 1;
-
 	ret = do_tcp_sendpages(ssk, pfrag->page, pfrag->offset, psize,
 			       msg->msg_flags | MSG_SENDPAGE_NOTLAST);
 	if (ret <= 0)
@@ -95,13 +127,15 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
 	if (unlikely(ret < psize))
 		iov_iter_revert(&msg->msg_iter, psize - ret);
 
-	if (skb == tcp_write_queue_tail(ssk))
-		pr_err("no new skb %p/%p", sk, ssk);
+	collapsed = skb == tcp_write_queue_tail(ssk);
+	BUG_ON(collapsed && !can_collapse);
+	if (collapsed) {
+		mpext->data_len += ret;
+		goto out;
+	}
 
 	skb = tcp_write_queue_tail(ssk);
-
 	mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
-
 	if (mpext) {
 		memset(mpext, 0, sizeof(*mpext));
 		mpext->data_seq = msk->write_seq;
@@ -114,22 +148,25 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
 		pr_debug("data_seq=%llu subflow_seq=%u data_len=%u checksum=%u, dsn64=%d",
 			 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
 			 mpext->checksum, mpext->dsn64);
-	} /* TODO: else fallback */
+	}
+	/* TODO: else fallback; allocation can fail, but we can't easily retire
+	 * skbs from the write_queue, as we need to roll-back TCP status
+	 */
 
+out:
 	pfrag->offset += ret;
 	msk->write_seq += ret;
 	subflow_ctx(ssk)->rel_write_seq += ret;
 
-	tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, size_goal);
 	return ret;
 }
 
 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
+	int mss_now = 0, size_goal = 0, ret = 0;
 	struct mptcp_sock *msk = mptcp_sk(sk);
 	size_t copied = 0;
 	struct sock *ssk;
-	int ret = 0;
 	long timeo;
 
 	pr_debug("msk=%p", msk);
@@ -159,14 +196,18 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 	lock_sock(ssk);
 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 	while (msg_data_left(msg)) {
-		ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo);
+		ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo, &mss_now,
+					 &size_goal);
 		if (ret < 0)
 			break;
 
 		copied += ret;
 	}
-	if (copied > 0)
+	if (copied) {
 		ret = copied;
+		tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
+			 size_goal);
+	}
 
 	release_sock(ssk);
 	release_sock(sk);
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [MPTCP] [PATCH 3/3] mptcp: allow collapsing consecutive sendpages on the same substream
@ 2019-04-18 23:56 Mat Martineau
  0 siblings, 0 replies; 4+ messages in thread
From: Mat Martineau @ 2019-04-18 23:56 UTC (permalink / raw)
  To: mptcp

[-- Attachment #1: Type: text/plain, Size: 5631 bytes --]

On Mon, 15 Apr 2019, Paolo Abeni wrote:

> If the current sendmsg() lands on the same subflow we used last, we
> can try to collapse the data.
>
> RFC -> v1
> - claried collasping schema
> - update data_len for collapsed skb in mptcp_sendmsg_frag()
> - fix collapsing decision on mptcp-level OoO
>
> Signed-off-by: Paolo Abeni <pabeni(a)redhat.com>
> ---
> net/mptcp/protocol.c | 77 ++++++++++++++++++++++++++++++++++----------
> 1 file changed, 60 insertions(+), 17 deletions(-)
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index 085d658af772..0a947f44f712 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -47,11 +47,24 @@ static struct sock *mptcp_subflow_get_ref(const struct mptcp_sock *msk)
> 	return NULL;
> }
>
> +static inline bool mptcp_skb_can_collapse_to(const struct mptcp_sock *msk,
> +					     const struct sk_buff *skb,
> +					     const struct mptcp_ext *mpext)
> +{
> +	if (!tcp_skb_can_collapse_to(skb))
> +		return false;
> +
> +	/* can't collapse if there is a MPTCP level out-of-order */
> +	return mpext->data_seq + mpext->data_len == msk->write_seq;

Ok, looks like a good in-order check.

> +}
> +
> static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
> -			      struct msghdr *msg, long *timeo)
> +			      struct msghdr *msg, long *timeo, int *pmss_now,
> +			      int *ps_goal)
> {
> +	int mss_now, avail_size, size_goal, poffset, ret;
> 	struct mptcp_sock *msk = mptcp_sk(sk);
> -	int mss_now, size_goal, poffset, ret;
> +	bool collapsed, can_collapse = false;
> 	struct mptcp_ext *mpext = NULL;
> 	struct page_frag *pfrag;
> 	struct sk_buff *skb;
> @@ -78,25 +91,48 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
> 	if (!psize)
> 		return -EINVAL;
>
> -	/* Mark the end of the previous write so the beginning of the
> -	 * next write (with its own mptcp skb extension data) is not
> -	 * collapsed.
> -	 */
> +	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
> +	*pmss_now = mss_now;
> +	*ps_goal = size_goal;
> +	avail_size = size_goal;
> 	skb = tcp_write_queue_tail(ssk);
> -	if (skb)
> -		TCP_SKB_CB(skb)->eor = 1;
> +	if (skb) {
> +		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
> +		BUG_ON(!mpext);
> +
> +		/* Limit the write to the size available in the
> +		 * current skb, if any, so that we create at most a new skb.
> +		 * If we run out of space in the current skb (e.g. the window
> +		 * size shrunk from last sent) a new skb will be allocated even
> +		 * is collapsing was allowed: collapsing is effectively
> +		 * disabled.
> +		 */
> +		can_collapse = mptcp_skb_can_collapse_to(msk, skb, mpext);
> +		if (!can_collapse)
> +			TCP_SKB_CB(skb)->eor = 1;
> +		else if (size_goal - skb->len > 0)
> +			avail_size = size_goal - skb->len;
> +		else
> +			can_collapse = false;

In this final clause, should it set eor as well? If we're not expecting a 
collapse, it might be better to make sure it does not happen.

> +	}
>
> -	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
> -	psize = min_t(int, size_goal, psize);
> +	/* tell the TCP stack to delay the push so that we can safely
> +	 * access the skb after the sendpages call
> +	 */
> +	psize = min_t(int, avail_size, psize);
> 	ret = do_tcp_sendpages(ssk, pfrag->page, poffset, psize,
> 			       msg->msg_flags | MSG_SENDPAGE_NOTLAST);
> 	if (ret <= 0)
> 		return ret;
>
> -	if (skb == tcp_write_queue_tail(ssk))
> -		pr_err("no new skb %p/%p", sk, ssk);
> +	collapsed = skb == tcp_write_queue_tail(ssk);
> +	BUG_ON(collapsed && !can_collapse);
>
> 	skb = tcp_write_queue_tail(ssk);

Minor: seems like this skb assignment is coupled with the mpext line below 
rather than the conditional to update mpext and exit, and could be moved 
after the collapse check.

Thanks,

Mat


> +	if (collapsed) {
> +		mpext->data_len += ret;
> +		goto out;
> +	}
>
> 	mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
>
> @@ -112,22 +148,25 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
> 		pr_debug("data_seq=%llu subflow_seq=%u data_len=%u checksum=%u, dsn64=%d",
> 			 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
> 			 mpext->checksum, mpext->dsn64);
> -	} /* TODO: else fallback */
> +	}
> +	/* TODO: else fallback; allocation can fail, but we can't easily retire
> +	 * skbs from the write_queue, as we need to roll-back TCP status
> +	 */
>
> +out:
> 	pfrag->offset += ret;
> 	msk->write_seq += ret;
> 	subflow_ctx(ssk)->rel_write_seq += ret;
>
> -	tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, size_goal);
> 	return ret;
> }
>
> static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
> {
> +	int mss_now = 0, size_goal = 0, ret = 0;
> 	struct mptcp_sock *msk = mptcp_sk(sk);
> 	size_t copied = 0;
> 	struct sock *ssk;
> -	int ret = 0;
> 	long timeo;
>
> 	pr_debug("msk=%p", msk);
> @@ -157,14 +196,18 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
> 	lock_sock(ssk);
> 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
> 	while (msg_data_left(msg)) {
> -		ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo);
> +		ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo, &mss_now,
> +					 &size_goal);
> 		if (ret < 0)
> 			break;
>
> 		copied += ret;
> 	}
> -	if (copied > 0)
> +	if (copied) {
> 		ret = copied;
> +		tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
> +			 size_goal);
> +	}
>
> 	release_sock(ssk);
> 	release_sock(sk);
> -- 
> 2.20.1

--
Mat Martineau
Intel

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [MPTCP] [PATCH 3/3] mptcp: allow collapsing consecutive sendpages on the same substream
@ 2019-04-15 15:30 Paolo Abeni
  0 siblings, 0 replies; 4+ messages in thread
From: Paolo Abeni @ 2019-04-15 15:30 UTC (permalink / raw)
  To: mptcp

[-- Attachment #1: Type: text/plain, Size: 4940 bytes --]

If the current sendmsg() lands on the same subflow we used last, we
can try to collapse the data.

 RFC -> v1
 - claried collasping schema
 - update data_len for collapsed skb in mptcp_sendmsg_frag()
 - fix collapsing decision on mptcp-level OoO

Signed-off-by: Paolo Abeni <pabeni(a)redhat.com>
---
 net/mptcp/protocol.c | 77 ++++++++++++++++++++++++++++++++++----------
 1 file changed, 60 insertions(+), 17 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 085d658af772..0a947f44f712 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -47,11 +47,24 @@ static struct sock *mptcp_subflow_get_ref(const struct mptcp_sock *msk)
 	return NULL;
 }
 
+static inline bool mptcp_skb_can_collapse_to(const struct mptcp_sock *msk,
+					     const struct sk_buff *skb,
+					     const struct mptcp_ext *mpext)
+{
+	if (!tcp_skb_can_collapse_to(skb))
+		return false;
+
+	/* can't collapse if there is a MPTCP level out-of-order */
+	return mpext->data_seq + mpext->data_len == msk->write_seq;
+}
+
 static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
-			      struct msghdr *msg, long *timeo)
+			      struct msghdr *msg, long *timeo, int *pmss_now,
+			      int *ps_goal)
 {
+	int mss_now, avail_size, size_goal, poffset, ret;
 	struct mptcp_sock *msk = mptcp_sk(sk);
-	int mss_now, size_goal, poffset, ret;
+	bool collapsed, can_collapse = false;
 	struct mptcp_ext *mpext = NULL;
 	struct page_frag *pfrag;
 	struct sk_buff *skb;
@@ -78,25 +91,48 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
 	if (!psize)
 		return -EINVAL;
 
-	/* Mark the end of the previous write so the beginning of the
-	 * next write (with its own mptcp skb extension data) is not
-	 * collapsed.
-	 */
+	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
+	*pmss_now = mss_now;
+	*ps_goal = size_goal;
+	avail_size = size_goal;
 	skb = tcp_write_queue_tail(ssk);
-	if (skb)
-		TCP_SKB_CB(skb)->eor = 1;
+	if (skb) {
+		mpext = skb_ext_find(skb, SKB_EXT_MPTCP);
+		BUG_ON(!mpext);
+
+		/* Limit the write to the size available in the
+		 * current skb, if any, so that we create at most a new skb.
+		 * If we run out of space in the current skb (e.g. the window
+		 * size shrunk from last sent) a new skb will be allocated even
+		 * is collapsing was allowed: collapsing is effectively
+		 * disabled.
+		 */
+		can_collapse = mptcp_skb_can_collapse_to(msk, skb, mpext);
+		if (!can_collapse)
+			TCP_SKB_CB(skb)->eor = 1;
+		else if (size_goal - skb->len > 0)
+			avail_size = size_goal - skb->len;
+		else
+			can_collapse = false;
+	}
 
-	mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags);
-	psize = min_t(int, size_goal, psize);
+	/* tell the TCP stack to delay the push so that we can safely
+	 * access the skb after the sendpages call
+	 */
+	psize = min_t(int, avail_size, psize);
 	ret = do_tcp_sendpages(ssk, pfrag->page, poffset, psize,
 			       msg->msg_flags | MSG_SENDPAGE_NOTLAST);
 	if (ret <= 0)
 		return ret;
 
-	if (skb == tcp_write_queue_tail(ssk))
-		pr_err("no new skb %p/%p", sk, ssk);
+	collapsed = skb == tcp_write_queue_tail(ssk);
+	BUG_ON(collapsed && !can_collapse);
 
 	skb = tcp_write_queue_tail(ssk);
+	if (collapsed) {
+		mpext->data_len += ret;
+		goto out;
+	}
 
 	mpext = skb_ext_add(skb, SKB_EXT_MPTCP);
 
@@ -112,22 +148,25 @@ static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk,
 		pr_debug("data_seq=%llu subflow_seq=%u data_len=%u checksum=%u, dsn64=%d",
 			 mpext->data_seq, mpext->subflow_seq, mpext->data_len,
 			 mpext->checksum, mpext->dsn64);
-	} /* TODO: else fallback */
+	}
+	/* TODO: else fallback; allocation can fail, but we can't easily retire
+	 * skbs from the write_queue, as we need to roll-back TCP status
+	 */
 
+out:
 	pfrag->offset += ret;
 	msk->write_seq += ret;
 	subflow_ctx(ssk)->rel_write_seq += ret;
 
-	tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, size_goal);
 	return ret;
 }
 
 static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 {
+	int mss_now = 0, size_goal = 0, ret = 0;
 	struct mptcp_sock *msk = mptcp_sk(sk);
 	size_t copied = 0;
 	struct sock *ssk;
-	int ret = 0;
 	long timeo;
 
 	pr_debug("msk=%p", msk);
@@ -157,14 +196,18 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
 	lock_sock(ssk);
 	timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
 	while (msg_data_left(msg)) {
-		ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo);
+		ret = mptcp_sendmsg_frag(sk, ssk, msg, &timeo, &mss_now,
+					 &size_goal);
 		if (ret < 0)
 			break;
 
 		copied += ret;
 	}
-	if (copied > 0)
+	if (copied) {
 		ret = copied;
+		tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle,
+			 size_goal);
+	}
 
 	release_sock(ssk);
 	release_sock(sk);
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2019-05-07 13:15 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-19 16:19 [MPTCP] [PATCH 3/3] mptcp: allow collapsing consecutive sendpages on the same substream Paolo Abeni
  -- strict thread matches above, loose matches on Subject: below --
2019-05-07 13:15 Paolo Abeni
2019-04-18 23:56 Mat Martineau
2019-04-15 15:30 Paolo Abeni

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.