All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH mptcp-next 0/4] refactor push pending
@ 2022-09-28  9:48 Geliang Tang
  2022-09-28  9:48 ` [PATCH mptcp-next 1/4] mptcp: update __mptcp_push_pending Geliang Tang
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: Geliang Tang @ 2022-09-28  9:48 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

Refactor __mptcp_push_pending() and __mptcp_subflow_push_pending() to
remove duplicate code and support redundant scheduler more easily in
__mptcp_subflow_push_pending().

Geliang Tang (4):
  mptcp: update __mptcp_push_pending
  mptcp: add do_push_pending helper
  mptcp: update __mptcp_subflow_push_pending
  mptcp: simplify __mptcp_subflow_push_pending

 net/mptcp/protocol.c | 137 ++++++++++++++++---------------------------
 1 file changed, 50 insertions(+), 87 deletions(-)

-- 
2.35.3


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH mptcp-next 1/4] mptcp: update __mptcp_push_pending
  2022-09-28  9:48 [PATCH mptcp-next 0/4] refactor push pending Geliang Tang
@ 2022-09-28  9:48 ` Geliang Tang
  2022-09-28 11:02   ` Paolo Abeni
  2022-09-28  9:48 ` [PATCH mptcp-next 2/4] mptcp: add do_push_pending helper Geliang Tang
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 11+ messages in thread
From: Geliang Tang @ 2022-09-28  9:48 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

To support redundant package schedulers more easily, this patch moves the
packet scheduler out of the dfrags loop in __mptcp_push_pending(), invoke
mptcp_sched_get_send() only once. And update the socket locks
correspondingly.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 33 +++++++++++----------------------
 1 file changed, 11 insertions(+), 22 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7ca21915b3d1..fc972d56926e 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1530,15 +1530,24 @@ void mptcp_check_and_set_pending(struct sock *sk)
 
 void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 {
-	struct sock *prev_ssk = NULL, *ssk = NULL;
 	struct mptcp_sock *msk = mptcp_sk(sk);
 	struct mptcp_sendmsg_info info = {
 				.flags = flags,
 	};
 	bool do_check_data_fin = false;
 	struct mptcp_data_frag *dfrag;
+	struct sock *ssk;
 	int len;
 
+	ssk = mptcp_sched_get_send(msk);
+	if (!ssk)
+		goto out;
+
+	if (!mptcp_send_head(sk))
+		goto out;
+
+	lock_sock(ssk);
+
 	while ((dfrag = mptcp_send_head(sk))) {
 		info.sent = dfrag->already_sent;
 		info.limit = dfrag->data_len;
@@ -1546,24 +1555,6 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 		while (len > 0) {
 			int ret = 0;
 
-			prev_ssk = ssk;
-			ssk = mptcp_sched_get_send(msk);
-
-			/* First check. If the ssk has changed since
-			 * the last round, release prev_ssk
-			 */
-			if (ssk != prev_ssk && prev_ssk)
-				mptcp_push_release(prev_ssk, &info);
-			if (!ssk)
-				goto out;
-
-			/* Need to lock the new subflow only if different
-			 * from the previous one, otherwise we are still
-			 * helding the relevant lock
-			 */
-			if (ssk != prev_ssk)
-				lock_sock(ssk);
-
 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
 			if (ret <= 0) {
 				if (ret == -EAGAIN)
@@ -1581,9 +1572,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
 	}
 
-	/* at this point we held the socket lock for the last subflow we used */
-	if (ssk)
-		mptcp_push_release(ssk, &info);
+	mptcp_push_release(ssk, &info);
 
 out:
 	/* ensure the rtx timer is running */
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH mptcp-next 2/4] mptcp: add do_push_pending helper
  2022-09-28  9:48 [PATCH mptcp-next 0/4] refactor push pending Geliang Tang
  2022-09-28  9:48 ` [PATCH mptcp-next 1/4] mptcp: update __mptcp_push_pending Geliang Tang
@ 2022-09-28  9:48 ` Geliang Tang
  2022-09-28  9:48 ` [PATCH mptcp-next 3/4] mptcp: update __mptcp_subflow_push_pending Geliang Tang
  2022-09-28  9:48 ` [PATCH mptcp-next 4/4] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
  3 siblings, 0 replies; 11+ messages in thread
From: Geliang Tang @ 2022-09-28  9:48 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

This patch moves the duplicate code from __mptcp_push_pending() and
__mptcp_subflow_push_pending() into a new helper function, named
__do_push_pending(). And simplify __mptcp_push_pending() by invoking
this helper.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 65 ++++++++++++++++++++++++--------------------
 1 file changed, 36 insertions(+), 29 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index fc972d56926e..6a20a000622f 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1491,12 +1491,6 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
 	return ssk;
 }
 
-static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
-{
-	tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
-	release_sock(ssk);
-}
-
 static void mptcp_update_post_push(struct mptcp_sock *msk,
 				   struct mptcp_data_frag *dfrag,
 				   u32 sent)
@@ -1528,43 +1522,29 @@ void mptcp_check_and_set_pending(struct sock *sk)
 		mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING);
 }
 
-void __mptcp_push_pending(struct sock *sk, unsigned int flags)
+static int __do_push_pending(struct sock *sk, struct sock *ssk,
+			     struct mptcp_sendmsg_info *info)
 {
 	struct mptcp_sock *msk = mptcp_sk(sk);
-	struct mptcp_sendmsg_info info = {
-				.flags = flags,
-	};
-	bool do_check_data_fin = false;
 	struct mptcp_data_frag *dfrag;
-	struct sock *ssk;
-	int len;
-
-	ssk = mptcp_sched_get_send(msk);
-	if (!ssk)
-		goto out;
-
-	if (!mptcp_send_head(sk))
-		goto out;
-
-	lock_sock(ssk);
+	int len, copied = 0;
 
 	while ((dfrag = mptcp_send_head(sk))) {
-		info.sent = dfrag->already_sent;
-		info.limit = dfrag->data_len;
+		info->sent = dfrag->already_sent;
+		info->limit = dfrag->data_len;
 		len = dfrag->data_len - dfrag->already_sent;
 		while (len > 0) {
 			int ret = 0;
 
-			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
+			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info);
 			if (ret <= 0) {
 				if (ret == -EAGAIN)
 					continue;
-				mptcp_push_release(ssk, &info);
 				goto out;
 			}
 
-			do_check_data_fin = true;
-			info.sent += ret;
+			info->sent += ret;
+			copied += ret;
 			len -= ret;
 
 			mptcp_update_post_push(msk, dfrag, ret);
@@ -1572,7 +1552,34 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
 	}
 
-	mptcp_push_release(ssk, &info);
+out:
+	if (copied) {
+		tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle,
+			 info->size_goal);
+	}
+
+	return copied;
+}
+
+void __mptcp_push_pending(struct sock *sk, unsigned int flags)
+{
+	struct mptcp_sock *msk = mptcp_sk(sk);
+	struct mptcp_sendmsg_info info = {
+		.flags = flags,
+	};
+	int do_check_data_fin = 0;
+	struct sock *ssk;
+
+	ssk = mptcp_sched_get_send(msk);
+	if (!ssk)
+		goto out;
+
+	if (!mptcp_send_head(sk))
+		goto out;
+
+	lock_sock(ssk);
+	do_check_data_fin = __do_push_pending(sk, ssk, &info);
+	release_sock(ssk);
 
 out:
 	/* ensure the rtx timer is running */
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH mptcp-next 3/4] mptcp: update __mptcp_subflow_push_pending
  2022-09-28  9:48 [PATCH mptcp-next 0/4] refactor push pending Geliang Tang
  2022-09-28  9:48 ` [PATCH mptcp-next 1/4] mptcp: update __mptcp_push_pending Geliang Tang
  2022-09-28  9:48 ` [PATCH mptcp-next 2/4] mptcp: add do_push_pending helper Geliang Tang
@ 2022-09-28  9:48 ` Geliang Tang
  2022-09-28  9:48 ` [PATCH mptcp-next 4/4] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
  3 siblings, 0 replies; 11+ messages in thread
From: Geliang Tang @ 2022-09-28  9:48 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

Move the packet scheduler out of the dfrags loop, invoke it only once in
__mptcp_subflow_push_pending().

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 36 ++++++++++++------------------------
 1 file changed, 12 insertions(+), 24 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 6a20a000622f..550fc375a7a6 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1598,7 +1598,15 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 	struct mptcp_data_frag *dfrag;
 	struct sock *xmit_ssk;
 	int len, copied = 0;
-	bool first = true;
+
+	xmit_ssk = mptcp_sched_get_send(mptcp_sk(sk));
+	if (!xmit_ssk)
+		goto out;
+	if (xmit_ssk != ssk) {
+		mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
+				       MPTCP_DELEGATE_SEND);
+		goto out;
+	}
 
 	info.flags = 0;
 	while ((dfrag = mptcp_send_head(sk))) {
@@ -1608,19 +1616,6 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 		while (len > 0) {
 			int ret = 0;
 
-			/* the caller already invoked the packet scheduler,
-			 * check for a different subflow usage only after
-			 * spooling the first chunk of data
-			 */
-			xmit_ssk = first ? ssk : mptcp_sched_get_send(mptcp_sk(sk));
-			if (!xmit_ssk)
-				goto out;
-			if (xmit_ssk != ssk) {
-				mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
-						       MPTCP_DELEGATE_SEND);
-				goto out;
-			}
-
 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
 			if (ret <= 0)
 				goto out;
@@ -1628,7 +1623,6 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 			info.sent += ret;
 			copied += ret;
 			len -= ret;
-			first = false;
 
 			mptcp_update_post_push(msk, dfrag, ret);
 		}
@@ -3191,16 +3185,10 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
 	if (!mptcp_send_head(sk))
 		return;
 
-	if (!sock_owned_by_user(sk)) {
-		struct sock *xmit_ssk = mptcp_sched_get_send(mptcp_sk(sk));
-
-		if (xmit_ssk == ssk)
-			__mptcp_subflow_push_pending(sk, ssk);
-		else if (xmit_ssk)
-			mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
-	} else {
+	if (!sock_owned_by_user(sk))
+		__mptcp_subflow_push_pending(sk, ssk);
+	else
 		__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
-	}
 }
 
 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH mptcp-next 4/4] mptcp: simplify __mptcp_subflow_push_pending
  2022-09-28  9:48 [PATCH mptcp-next 0/4] refactor push pending Geliang Tang
                   ` (2 preceding siblings ...)
  2022-09-28  9:48 ` [PATCH mptcp-next 3/4] mptcp: update __mptcp_subflow_push_pending Geliang Tang
@ 2022-09-28  9:48 ` Geliang Tang
  2022-09-28 11:56   ` mptcp: simplify __mptcp_subflow_push_pending: Tests Results MPTCP CI
  3 siblings, 1 reply; 11+ messages in thread
From: Geliang Tang @ 2022-09-28  9:48 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

This patch simplifies __mptcp_subflow_push_pending() by invoking
__do_push_pending() helper.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 27 +++------------------------
 1 file changed, 3 insertions(+), 24 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 550fc375a7a6..779794f42137 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1593,11 +1593,11 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 {
 	struct mptcp_sock *msk = mptcp_sk(sk);
 	struct mptcp_sendmsg_info info = {
+		.flags		= 0,
 		.data_lock_held = true,
 	};
-	struct mptcp_data_frag *dfrag;
 	struct sock *xmit_ssk;
-	int len, copied = 0;
+	int copied = 0;
 
 	xmit_ssk = mptcp_sched_get_send(mptcp_sk(sk));
 	if (!xmit_ssk)
@@ -1608,34 +1608,13 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 		goto out;
 	}
 
-	info.flags = 0;
-	while ((dfrag = mptcp_send_head(sk))) {
-		info.sent = dfrag->already_sent;
-		info.limit = dfrag->data_len;
-		len = dfrag->data_len - dfrag->already_sent;
-		while (len > 0) {
-			int ret = 0;
-
-			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
-			if (ret <= 0)
-				goto out;
-
-			info.sent += ret;
-			copied += ret;
-			len -= ret;
-
-			mptcp_update_post_push(msk, dfrag, ret);
-		}
-		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
-	}
+	copied = __do_push_pending(sk, ssk, &info);
 
 out:
 	/* __mptcp_alloc_tx_skb could have released some wmem and we are
 	 * not going to flush it via release_sock()
 	 */
 	if (copied) {
-		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
-			 info.size_goal);
 		if (!mptcp_timer_pending(sk))
 			mptcp_reset_timer(sk);
 
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH mptcp-next 1/4] mptcp: update __mptcp_push_pending
  2022-09-28  9:48 ` [PATCH mptcp-next 1/4] mptcp: update __mptcp_push_pending Geliang Tang
@ 2022-09-28 11:02   ` Paolo Abeni
  2022-09-28 12:52     ` Geliang Tang
  0 siblings, 1 reply; 11+ messages in thread
From: Paolo Abeni @ 2022-09-28 11:02 UTC (permalink / raw)
  To: Geliang Tang, mptcp

On Wed, 2022-09-28 at 17:48 +0800, Geliang Tang wrote:
> To support redundant package schedulers more easily, this patch moves the
> packet scheduler out of the dfrags loop in __mptcp_push_pending(), invoke
> mptcp_sched_get_send() only once. 

I fear the above will make cause hitting HoL blocking [more]
frequently/easily. I'm not sure if the simult_flows.sh self test will
catch that, but you should see a measurably worse running time for such
test - in non debug build, as the average of multiple runs.

I'm sorry, I can't see a feasible way out here :(

/P


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: mptcp: simplify __mptcp_subflow_push_pending: Tests Results
  2022-09-28  9:48 ` [PATCH mptcp-next 4/4] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
@ 2022-09-28 11:56   ` MPTCP CI
  0 siblings, 0 replies; 11+ messages in thread
From: MPTCP CI @ 2022-09-28 11:56 UTC (permalink / raw)
  To: Geliang Tang; +Cc: mptcp

Hi Geliang,

Thank you for your modifications, that's great!

Our CI did some validations and here is its report:

- KVM Validation: normal:
  - Unstable: 1 failed test(s): selftest_mptcp_join 🔴:
  - Task: https://cirrus-ci.com/task/5267113110667264
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5267113110667264/summary/summary.txt

- KVM Validation: debug:
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/6156566679257088
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/6156566679257088/summary/summary.txt

Initiator: Patchew Applier
Commits: https://github.com/multipath-tcp/mptcp_net-next/commits/9801ca9d6329


If there are some issues, you can reproduce them using the same environment as
the one used by the CI thanks to a docker image, e.g.:

    $ cd [kernel source code]
    $ docker run -v "${PWD}:${PWD}:rw" -w "${PWD}" --privileged --rm -it \
        --pull always mptcp/mptcp-upstream-virtme-docker:latest \
        auto-debug

For more details:

    https://github.com/multipath-tcp/mptcp-upstream-virtme-docker


Please note that despite all the efforts that have been already done to have a
stable tests suite when executed on a public CI like here, it is possible some
reported issues are not due to your modifications. Still, do not hesitate to
help us improve that ;-)

Cheers,
MPTCP GH Action bot
Bot operated by Matthieu Baerts (Tessares)

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH mptcp-next 1/4] mptcp: update __mptcp_push_pending
  2022-09-28 11:02   ` Paolo Abeni
@ 2022-09-28 12:52     ` Geliang Tang
  2022-09-29  0:02       ` Mat Martineau
  0 siblings, 1 reply; 11+ messages in thread
From: Geliang Tang @ 2022-09-28 12:52 UTC (permalink / raw)
  To: Paolo Abeni; +Cc: mptcp

On Wed, Sep 28, 2022 at 01:02:07PM +0200, Paolo Abeni wrote:
> On Wed, 2022-09-28 at 17:48 +0800, Geliang Tang wrote:
> > To support redundant package schedulers more easily, this patch moves the
> > packet scheduler out of the dfrags loop in __mptcp_push_pending(), invoke
> > mptcp_sched_get_send() only once. 
> 
> I fear the above will make cause hitting HoL blocking [more]
> frequently/easily. I'm not sure if the simult_flows.sh self test will
> catch that, but you should see a measurably worse running time for such
> test - in non debug build, as the average of multiple runs.

Yes, simult_flows.sh self test failed in my tests. I guess this problem
also exists in "BPF redundant scheduler" v12, because it also uses the
same logic in __mptcp_push_pending(), move the packet scheduler out of
the dfrags loop.

> 
> I'm sorry, I can't see a feasible way out here :(
> 
> /P
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH mptcp-next 1/4] mptcp: update __mptcp_push_pending
  2022-09-28 12:52     ` Geliang Tang
@ 2022-09-29  0:02       ` Mat Martineau
  0 siblings, 0 replies; 11+ messages in thread
From: Mat Martineau @ 2022-09-29  0:02 UTC (permalink / raw)
  To: Geliang Tang; +Cc: Paolo Abeni, mptcp

On Wed, 28 Sep 2022, Geliang Tang wrote:

> On Wed, Sep 28, 2022 at 01:02:07PM +0200, Paolo Abeni wrote:
>> On Wed, 2022-09-28 at 17:48 +0800, Geliang Tang wrote:
>>> To support redundant package schedulers more easily, this patch moves the
>>> packet scheduler out of the dfrags loop in __mptcp_push_pending(), invoke
>>> mptcp_sched_get_send() only once.
>>
>> I fear the above will make cause hitting HoL blocking [more]
>> frequently/easily. I'm not sure if the simult_flows.sh self test will
>> catch that, but you should see a measurably worse running time for such
>> test - in non debug build, as the average of multiple runs.
>
> Yes, simult_flows.sh self test failed in my tests. I guess this problem
> also exists in "BPF redundant scheduler" v12, because it also uses the
> same logic in __mptcp_push_pending(), move the packet scheduler out of
> the dfrags loop.
>

Hi Geliang -


Thanks for posting the __mptcp_push_pending() changes as a separate 
series, that's the main area of complexity in the BPF redundant scheduler 
series. I was trying to come up with some more detailed proposals for this 
part of the code, but it will be easier to discuss it in this 4-patch 
series.


We had talked about the need to change the __mptcp_push_pending() loops to 
support the redundant scheduler, since the existing structure and locking 
optimizations don't fit well with trying to send the same dfrags across 
multiple subflows. A simplified view of the existing code is this:

/* [1] Existing send loops */
For each dfrag:
 	While sends succeed:
 		Call the scheduler (selects subflow and msk->snd_burst)
 		Update subflow locks (push/release/acquire as needed)
 		Send the dfrag data with mptcp_sendmsg_frag()
 		Update dfrag->already_sent, msk->snd_nxt, msk->snd_burst
 	Update msk->first_pending
Push/release on final subflow


With the redundant scheduler, it's necessary to send each dfrag on 
multiple subflows. If the top-level loop is "For each dfrag", then 
redundant sends have to lock/unlock every subflow on every iteration of 
the inner loop.

Because of that, I proposed changing the outer loop to be based on the 
scheduler:


/* [2] Proposed send loops */
While the scheduler selects one or more subflows:
 	For each subflow:
 		Lock the subflow
 		For each pending dfrag:
 			Send the dfrag data with mptcp_sendmsg_frag()
 			Break if required by msk->snd_burst / etc
 		Push and release the subflow
 	Update dfrag metadata and msk->first_pending

>>
>> I'm sorry, I can't see a feasible way out here :(
>>

I'm still optimistic here :)

Whatever the outer loop is, I think we can still make basically the same 
sequence of calls to the subflow locks, mptcp_sendmsg_frag(), and 
tcp_push(). And fewer scheduler calls too! That would maintain the 
blest-like behavior Paolo implemented.

It seems to me that this v1 iteration of the patches is missing one 
important detail compared to the proposed pseudocode [2]: There's no outer 
loop to call the scheduler again after msk->snd_burst is exhausted. If 
this block of code is moved out of mptcp_subflow_get_send():

 	/* re-use last subflow, if the burst allow that */
 	if (msk->last_snd && msk->snd_burst > 0 &&
 	    sk_stream_memory_free(msk->last_snd) &&
 	    mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
 		mptcp_set_timeout(sk);
 		return msk->last_snd;
 	}

and those conditions are instead checked in the inner "For each pending 
dfrag" loop of my proposed pseudocode above, I think the single subflow 
case is feasible. Once that is working then redundant subflow support can 
be added in later patches.


--
Mat Martineau
Intel

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: mptcp: simplify __mptcp_subflow_push_pending: Tests Results
  2022-09-29 15:03 [PATCH mptcp-next v2 4/4] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
@ 2022-10-06 17:46 ` MPTCP CI
  0 siblings, 0 replies; 11+ messages in thread
From: MPTCP CI @ 2022-10-06 17:46 UTC (permalink / raw)
  To: Geliang Tang; +Cc: mptcp

Hi Geliang,

Thank you for your modifications, that's great!

Our CI did some validations and here is its report:

- KVM Validation: normal:
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/5897416976105472
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5897416976105472/summary/summary.txt

- KVM Validation: debug:
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/5334467022684160
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5334467022684160/summary/summary.txt

Initiator: Patchew Applier
Commits: https://github.com/multipath-tcp/mptcp_net-next/commits/95535e05c593


If there are some issues, you can reproduce them using the same environment as
the one used by the CI thanks to a docker image, e.g.:

    $ cd [kernel source code]
    $ docker run -v "${PWD}:${PWD}:rw" -w "${PWD}" --privileged --rm -it \
        --pull always mptcp/mptcp-upstream-virtme-docker:latest \
        auto-debug

For more details:

    https://github.com/multipath-tcp/mptcp-upstream-virtme-docker


Please note that despite all the efforts that have been already done to have a
stable tests suite when executed on a public CI like here, it is possible some
reported issues are not due to your modifications. Still, do not hesitate to
help us improve that ;-)

Cheers,
MPTCP GH Action bot
Bot operated by Matthieu Baerts (Tessares)

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: mptcp: simplify __mptcp_subflow_push_pending: Tests Results
  2022-09-30 14:17 [PATCH mptcp-next v3 5/5] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
@ 2022-10-06 17:31 ` MPTCP CI
  0 siblings, 0 replies; 11+ messages in thread
From: MPTCP CI @ 2022-10-06 17:31 UTC (permalink / raw)
  To: Geliang Tang; +Cc: mptcp

Hi Geliang,

Thank you for your modifications, that's great!

Our CI did some validations and here is its report:

- KVM Validation: normal:
  - Unstable: 1 failed test(s): selftest_mptcp_join 🔴:
  - Task: https://cirrus-ci.com/task/5589049061670912
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5589049061670912/summary/summary.txt

- KVM Validation: debug:
  - Critical: 108 Call Trace(s) - Critical: Global Timeout ❌:
  - Task: https://cirrus-ci.com/task/6714948968513536
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/6714948968513536/summary/summary.txt

Initiator: Patchew Applier
Commits: https://github.com/multipath-tcp/mptcp_net-next/commits/1e73e81ed3b5


If there are some issues, you can reproduce them using the same environment as
the one used by the CI thanks to a docker image, e.g.:

    $ cd [kernel source code]
    $ docker run -v "${PWD}:${PWD}:rw" -w "${PWD}" --privileged --rm -it \
        --pull always mptcp/mptcp-upstream-virtme-docker:latest \
        auto-debug

For more details:

    https://github.com/multipath-tcp/mptcp-upstream-virtme-docker


Please note that despite all the efforts that have been already done to have a
stable tests suite when executed on a public CI like here, it is possible some
reported issues are not due to your modifications. Still, do not hesitate to
help us improve that ;-)

Cheers,
MPTCP GH Action bot
Bot operated by Matthieu Baerts (Tessares)

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2022-10-06 17:46 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-28  9:48 [PATCH mptcp-next 0/4] refactor push pending Geliang Tang
2022-09-28  9:48 ` [PATCH mptcp-next 1/4] mptcp: update __mptcp_push_pending Geliang Tang
2022-09-28 11:02   ` Paolo Abeni
2022-09-28 12:52     ` Geliang Tang
2022-09-29  0:02       ` Mat Martineau
2022-09-28  9:48 ` [PATCH mptcp-next 2/4] mptcp: add do_push_pending helper Geliang Tang
2022-09-28  9:48 ` [PATCH mptcp-next 3/4] mptcp: update __mptcp_subflow_push_pending Geliang Tang
2022-09-28  9:48 ` [PATCH mptcp-next 4/4] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
2022-09-28 11:56   ` mptcp: simplify __mptcp_subflow_push_pending: Tests Results MPTCP CI
2022-09-29 15:03 [PATCH mptcp-next v2 4/4] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
2022-10-06 17:46 ` mptcp: simplify __mptcp_subflow_push_pending: Tests Results MPTCP CI
2022-09-30 14:17 [PATCH mptcp-next v3 5/5] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
2022-10-06 17:31 ` mptcp: simplify __mptcp_subflow_push_pending: Tests Results MPTCP CI

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.