All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH mptcp-next v3 0/5] refactor push pending
@ 2022-09-30 14:17 Geliang Tang
  2022-09-30 14:17 ` [PATCH mptcp-next v3 1/5] mptcp: use msk instead of mptcp_sk(sk) Geliang Tang
                   ` (4 more replies)
  0 siblings, 5 replies; 8+ messages in thread
From: Geliang Tang @ 2022-09-30 14:17 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

v3:
- add a cleanup patch.
- remove msk->last_snd in mptcp_subflow_get_send().
- add the loop that calls the scheduler again in __mptcp_push_pending().

v2:
- add snd_burst check in dfrags loop as Mat suggested.

Refactor __mptcp_push_pending() and __mptcp_subflow_push_pending() to
remove duplicate code and support redundant scheduler more easily in
__mptcp_subflow_push_pending().

Geliang Tang (5):
  mptcp: use msk instead of mptcp_sk(sk)
  mptcp: update __mptcp_push_pending
  mptcp: add do_push_pending helper
  mptcp: update __mptcp_subflow_push_pending
  mptcp: simplify __mptcp_subflow_push_pending

 net/mptcp/protocol.c | 162 ++++++++++++++++---------------------------
 1 file changed, 58 insertions(+), 104 deletions(-)

-- 
2.35.3


^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH mptcp-next v3 1/5] mptcp: use msk instead of mptcp_sk(sk)
  2022-09-30 14:17 [PATCH mptcp-next v3 0/5] refactor push pending Geliang Tang
@ 2022-09-30 14:17 ` Geliang Tang
  2022-09-30 14:17 ` [PATCH mptcp-next v3 2/5] mptcp: update __mptcp_push_pending Geliang Tang
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Geliang Tang @ 2022-09-30 14:17 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

Use msk instead of mptcp_sk(sk) in the functions where
"msk = mptcp_sk(sk)" has been defined.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 10 +++++-----
 1 file changed, 5 insertions(+), 5 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 7ca21915b3d1..3641fb73bc1f 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1616,7 +1616,7 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 			 * check for a different subflow usage only after
 			 * spooling the first chunk of data
 			 */
-			xmit_ssk = first ? ssk : mptcp_sched_get_send(mptcp_sk(sk));
+			xmit_ssk = first ? ssk : mptcp_sched_get_send(msk);
 			if (!xmit_ssk)
 				goto out;
 			if (xmit_ssk != ssk) {
@@ -2239,7 +2239,7 @@ bool __mptcp_retransmit_pending_data(struct sock *sk)
 	struct mptcp_data_frag *cur, *rtx_head;
 	struct mptcp_sock *msk = mptcp_sk(sk);
 
-	if (__mptcp_check_fallback(mptcp_sk(sk)))
+	if (__mptcp_check_fallback(msk))
 		return false;
 
 	if (tcp_rtx_and_write_queues_empty(sk))
@@ -2921,7 +2921,7 @@ bool __mptcp_close(struct sock *sk, long timeout)
 
 	sock_hold(sk);
 	pr_debug("msk=%p state=%d", sk, sk->sk_state);
-	if (mptcp_sk(sk)->token)
+	if (msk->token)
 		mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
 
 	if (sk->sk_state == TCP_CLOSE) {
@@ -2980,8 +2980,8 @@ static int mptcp_disconnect(struct sock *sk, int flags)
 	mptcp_stop_timer(sk);
 	sk_stop_timer(sk, &sk->sk_timer);
 
-	if (mptcp_sk(sk)->token)
-		mptcp_event(MPTCP_EVENT_CLOSED, mptcp_sk(sk), NULL, GFP_KERNEL);
+	if (msk->token)
+		mptcp_event(MPTCP_EVENT_CLOSED, msk, NULL, GFP_KERNEL);
 
 	/* msk->subflow is still intact, the following will not free the first
 	 * subflow
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH mptcp-next v3 2/5] mptcp: update __mptcp_push_pending
  2022-09-30 14:17 [PATCH mptcp-next v3 0/5] refactor push pending Geliang Tang
  2022-09-30 14:17 ` [PATCH mptcp-next v3 1/5] mptcp: use msk instead of mptcp_sk(sk) Geliang Tang
@ 2022-09-30 14:17 ` Geliang Tang
  2022-09-30 14:17 ` [PATCH mptcp-next v3 3/5] mptcp: add do_push_pending helper Geliang Tang
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 8+ messages in thread
From: Geliang Tang @ 2022-09-30 14:17 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

To support redundant package schedulers more easily, this patch moves the
packet scheduler out of the dfrags loop in __mptcp_push_pending(), invoke
mptcp_sched_get_send() only once. And update the socket locks
correspondingly.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 85 +++++++++++++++++---------------------------
 1 file changed, 33 insertions(+), 52 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 3641fb73bc1f..99a9ec70c7e9 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1417,14 +1417,6 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
 	u64 linger_time;
 	long tout = 0;
 
-	/* re-use last subflow, if the burst allow that */
-	if (msk->last_snd && msk->snd_burst > 0 &&
-	    sk_stream_memory_free(msk->last_snd) &&
-	    mptcp_subflow_active(mptcp_subflow_ctx(msk->last_snd))) {
-		mptcp_set_timeout(sk);
-		return msk->last_snd;
-	}
-
 	/* pick the subflow with the lower wmem/wspace ratio */
 	for (i = 0; i < SSK_MODE_MAX; ++i) {
 		send_info[i].ssk = NULL;
@@ -1477,16 +1469,13 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
 
 	burst = min_t(int, MPTCP_SEND_BURST_SIZE, mptcp_wnd_end(msk) - msk->snd_nxt);
 	wmem = READ_ONCE(ssk->sk_wmem_queued);
-	if (!burst) {
-		msk->last_snd = NULL;
+	if (!burst)
 		return ssk;
-	}
 
 	subflow = mptcp_subflow_ctx(ssk);
 	subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
 					   READ_ONCE(ssk->sk_pacing_rate) * burst,
 					   burst + wmem);
-	msk->last_snd = ssk;
 	msk->snd_burst = burst;
 	return ssk;
 }
@@ -1530,60 +1519,52 @@ void mptcp_check_and_set_pending(struct sock *sk)
 
 void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 {
-	struct sock *prev_ssk = NULL, *ssk = NULL;
 	struct mptcp_sock *msk = mptcp_sk(sk);
 	struct mptcp_sendmsg_info info = {
 				.flags = flags,
 	};
 	bool do_check_data_fin = false;
 	struct mptcp_data_frag *dfrag;
+	struct sock *ssk;
 	int len;
 
-	while ((dfrag = mptcp_send_head(sk))) {
-		info.sent = dfrag->already_sent;
-		info.limit = dfrag->data_len;
-		len = dfrag->data_len - dfrag->already_sent;
-		while (len > 0) {
-			int ret = 0;
-
-			prev_ssk = ssk;
-			ssk = mptcp_sched_get_send(msk);
-
-			/* First check. If the ssk has changed since
-			 * the last round, release prev_ssk
-			 */
-			if (ssk != prev_ssk && prev_ssk)
-				mptcp_push_release(prev_ssk, &info);
-			if (!ssk)
-				goto out;
-
-			/* Need to lock the new subflow only if different
-			 * from the previous one, otherwise we are still
-			 * helding the relevant lock
-			 */
-			if (ssk != prev_ssk)
-				lock_sock(ssk);
+	while (mptcp_send_head(sk) && (ssk = mptcp_sched_get_send(msk))) {
+		lock_sock(ssk);
 
-			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
-			if (ret <= 0) {
-				if (ret == -EAGAIN)
-					continue;
-				mptcp_push_release(ssk, &info);
-				goto out;
+		while ((dfrag = mptcp_send_head(sk))) {
+			info.sent = dfrag->already_sent;
+			info.limit = dfrag->data_len;
+			len = dfrag->data_len - dfrag->already_sent;
+			while (len > 0) {
+				int ret = 0;
+
+				ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
+				if (ret <= 0) {
+					if (ret == -EAGAIN)
+						continue;
+					mptcp_push_release(ssk, &info);
+					goto out;
+				}
+
+				do_check_data_fin = true;
+				info.sent += ret;
+				len -= ret;
+
+				mptcp_update_post_push(msk, dfrag, ret);
 			}
+			WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
 
-			do_check_data_fin = true;
-			info.sent += ret;
-			len -= ret;
-
-			mptcp_update_post_push(msk, dfrag, ret);
+			if (msk->snd_burst > 0 &&
+			    sk_stream_memory_free(ssk) &&
+			    mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
+				mptcp_set_timeout(sk);
+			} else {
+				break;
+			}
 		}
-		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
-	}
 
-	/* at this point we held the socket lock for the last subflow we used */
-	if (ssk)
 		mptcp_push_release(ssk, &info);
+	}
 
 out:
 	/* ensure the rtx timer is running */
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH mptcp-next v3 3/5] mptcp: add do_push_pending helper
  2022-09-30 14:17 [PATCH mptcp-next v3 0/5] refactor push pending Geliang Tang
  2022-09-30 14:17 ` [PATCH mptcp-next v3 1/5] mptcp: use msk instead of mptcp_sk(sk) Geliang Tang
  2022-09-30 14:17 ` [PATCH mptcp-next v3 2/5] mptcp: update __mptcp_push_pending Geliang Tang
@ 2022-09-30 14:17 ` Geliang Tang
  2022-09-30 14:17 ` [PATCH mptcp-next v3 4/5] mptcp: update __mptcp_subflow_push_pending Geliang Tang
  2022-09-30 14:17 ` [PATCH mptcp-next v3 5/5] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
  4 siblings, 0 replies; 8+ messages in thread
From: Geliang Tang @ 2022-09-30 14:17 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

This patch moves the duplicate code from __mptcp_push_pending() and
__mptcp_subflow_push_pending() into a new helper function, named
__do_push_pending(). And simplify __mptcp_push_pending() by invoking
this helper.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 92 +++++++++++++++++++++++---------------------
 1 file changed, 49 insertions(+), 43 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 99a9ec70c7e9..dc5e03a616b3 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1480,12 +1480,6 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
 	return ssk;
 }
 
-static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
-{
-	tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
-	release_sock(ssk);
-}
-
 static void mptcp_update_post_push(struct mptcp_sock *msk,
 				   struct mptcp_data_frag *dfrag,
 				   u32 sent)
@@ -1517,6 +1511,53 @@ void mptcp_check_and_set_pending(struct sock *sk)
 		mptcp_sk(sk)->push_pending |= BIT(MPTCP_PUSH_PENDING);
 }
 
+static int __do_push_pending(struct sock *sk, struct sock *ssk,
+			     struct mptcp_sendmsg_info *info)
+{
+	struct mptcp_sock *msk = mptcp_sk(sk);
+	struct mptcp_data_frag *dfrag;
+	int len, copied = 0;
+
+	while ((dfrag = mptcp_send_head(sk))) {
+		info->sent = dfrag->already_sent;
+		info->limit = dfrag->data_len;
+		len = dfrag->data_len - dfrag->already_sent;
+		while (len > 0) {
+			int ret = 0;
+
+			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, info);
+			if (ret <= 0) {
+				if (ret == -EAGAIN)
+					continue;
+				goto out;
+			}
+
+			info->sent += ret;
+			copied += ret;
+			len -= ret;
+
+			mptcp_update_post_push(msk, dfrag, ret);
+		}
+		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+
+		if (msk->snd_burst > 0 &&
+		    sk_stream_memory_free(ssk) &&
+		    mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
+			mptcp_set_timeout(sk);
+		} else {
+			break;
+		}
+	}
+
+out:
+	if (copied) {
+		tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle,
+			 info->size_goal);
+	}
+
+	return copied;
+}
+
 void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 {
 	struct mptcp_sock *msk = mptcp_sk(sk);
@@ -1524,49 +1565,14 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 				.flags = flags,
 	};
 	bool do_check_data_fin = false;
-	struct mptcp_data_frag *dfrag;
 	struct sock *ssk;
-	int len;
 
 	while (mptcp_send_head(sk) && (ssk = mptcp_sched_get_send(msk))) {
 		lock_sock(ssk);
-
-		while ((dfrag = mptcp_send_head(sk))) {
-			info.sent = dfrag->already_sent;
-			info.limit = dfrag->data_len;
-			len = dfrag->data_len - dfrag->already_sent;
-			while (len > 0) {
-				int ret = 0;
-
-				ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
-				if (ret <= 0) {
-					if (ret == -EAGAIN)
-						continue;
-					mptcp_push_release(ssk, &info);
-					goto out;
-				}
-
-				do_check_data_fin = true;
-				info.sent += ret;
-				len -= ret;
-
-				mptcp_update_post_push(msk, dfrag, ret);
-			}
-			WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
-
-			if (msk->snd_burst > 0 &&
-			    sk_stream_memory_free(ssk) &&
-			    mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
-				mptcp_set_timeout(sk);
-			} else {
-				break;
-			}
-		}
-
-		mptcp_push_release(ssk, &info);
+		do_check_data_fin = __do_push_pending(sk, ssk, &info);
+		release_sock(ssk);
 	}
 
-out:
 	/* ensure the rtx timer is running */
 	if (!mptcp_timer_pending(sk))
 		mptcp_reset_timer(sk);
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH mptcp-next v3 4/5] mptcp: update __mptcp_subflow_push_pending
  2022-09-30 14:17 [PATCH mptcp-next v3 0/5] refactor push pending Geliang Tang
                   ` (2 preceding siblings ...)
  2022-09-30 14:17 ` [PATCH mptcp-next v3 3/5] mptcp: add do_push_pending helper Geliang Tang
@ 2022-09-30 14:17 ` Geliang Tang
  2022-10-01  0:16   ` Mat Martineau
  2022-09-30 14:17 ` [PATCH mptcp-next v3 5/5] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
  4 siblings, 1 reply; 8+ messages in thread
From: Geliang Tang @ 2022-09-30 14:17 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

Move the packet scheduler out of the dfrags loop, invoke it only once in
__mptcp_subflow_push_pending().

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 44 ++++++++++++++++++++------------------------
 1 file changed, 20 insertions(+), 24 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index dc5e03a616b3..8e67c149bbab 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1589,7 +1589,15 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 	struct mptcp_data_frag *dfrag;
 	struct sock *xmit_ssk;
 	int len, copied = 0;
-	bool first = true;
+
+	xmit_ssk = mptcp_sched_get_send(msk);
+	if (!xmit_ssk)
+		goto out;
+	if (xmit_ssk != ssk) {
+		mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
+				       MPTCP_DELEGATE_SEND);
+		goto out;
+	}
 
 	info.flags = 0;
 	while ((dfrag = mptcp_send_head(sk))) {
@@ -1599,19 +1607,6 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 		while (len > 0) {
 			int ret = 0;
 
-			/* the caller already invoked the packet scheduler,
-			 * check for a different subflow usage only after
-			 * spooling the first chunk of data
-			 */
-			xmit_ssk = first ? ssk : mptcp_sched_get_send(msk);
-			if (!xmit_ssk)
-				goto out;
-			if (xmit_ssk != ssk) {
-				mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
-						       MPTCP_DELEGATE_SEND);
-				goto out;
-			}
-
 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
 			if (ret <= 0)
 				goto out;
@@ -1619,11 +1614,18 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 			info.sent += ret;
 			copied += ret;
 			len -= ret;
-			first = false;
 
 			mptcp_update_post_push(msk, dfrag, ret);
 		}
 		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
+
+		if (msk->snd_burst > 0 &&
+		    sk_stream_memory_free(ssk) &&
+		    mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
+			mptcp_set_timeout(sk);
+		} else {
+			break;
+		}
 	}
 
 out:
@@ -3182,16 +3184,10 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
 	if (!mptcp_send_head(sk))
 		return;
 
-	if (!sock_owned_by_user(sk)) {
-		struct sock *xmit_ssk = mptcp_sched_get_send(mptcp_sk(sk));
-
-		if (xmit_ssk == ssk)
-			__mptcp_subflow_push_pending(sk, ssk);
-		else if (xmit_ssk)
-			mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
-	} else {
+	if (!sock_owned_by_user(sk))
+		__mptcp_subflow_push_pending(sk, ssk);
+	else
 		__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
-	}
 }
 
 #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH mptcp-next v3 5/5] mptcp: simplify __mptcp_subflow_push_pending
  2022-09-30 14:17 [PATCH mptcp-next v3 0/5] refactor push pending Geliang Tang
                   ` (3 preceding siblings ...)
  2022-09-30 14:17 ` [PATCH mptcp-next v3 4/5] mptcp: update __mptcp_subflow_push_pending Geliang Tang
@ 2022-09-30 14:17 ` Geliang Tang
  2022-10-06 17:31   ` mptcp: simplify __mptcp_subflow_push_pending: Tests Results MPTCP CI
  4 siblings, 1 reply; 8+ messages in thread
From: Geliang Tang @ 2022-09-30 14:17 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

This patch simplifies __mptcp_subflow_push_pending() by invoking
__do_push_pending() helper.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 33 ++-------------------------------
 1 file changed, 2 insertions(+), 31 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 8e67c149bbab..940384952015 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1586,9 +1586,8 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 	struct mptcp_sendmsg_info info = {
 		.data_lock_held = true,
 	};
-	struct mptcp_data_frag *dfrag;
 	struct sock *xmit_ssk;
-	int len, copied = 0;
+	int copied = 0;
 
 	xmit_ssk = mptcp_sched_get_send(msk);
 	if (!xmit_ssk)
@@ -1600,41 +1599,13 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
 	}
 
 	info.flags = 0;
-	while ((dfrag = mptcp_send_head(sk))) {
-		info.sent = dfrag->already_sent;
-		info.limit = dfrag->data_len;
-		len = dfrag->data_len - dfrag->already_sent;
-		while (len > 0) {
-			int ret = 0;
-
-			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
-			if (ret <= 0)
-				goto out;
-
-			info.sent += ret;
-			copied += ret;
-			len -= ret;
-
-			mptcp_update_post_push(msk, dfrag, ret);
-		}
-		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
-
-		if (msk->snd_burst > 0 &&
-		    sk_stream_memory_free(ssk) &&
-		    mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
-			mptcp_set_timeout(sk);
-		} else {
-			break;
-		}
-	}
+	copied = __do_push_pending(sk, ssk, &info);
 
 out:
 	/* __mptcp_alloc_tx_skb could have released some wmem and we are
 	 * not going to flush it via release_sock()
 	 */
 	if (copied) {
-		tcp_push(ssk, 0, info.mss_now, tcp_sk(ssk)->nonagle,
-			 info.size_goal);
 		if (!mptcp_timer_pending(sk))
 			mptcp_reset_timer(sk);
 
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH mptcp-next v3 4/5] mptcp: update __mptcp_subflow_push_pending
  2022-09-30 14:17 ` [PATCH mptcp-next v3 4/5] mptcp: update __mptcp_subflow_push_pending Geliang Tang
@ 2022-10-01  0:16   ` Mat Martineau
  0 siblings, 0 replies; 8+ messages in thread
From: Mat Martineau @ 2022-10-01  0:16 UTC (permalink / raw)
  To: Geliang Tang; +Cc: mptcp

On Fri, 30 Sep 2022, Geliang Tang wrote:

> Move the packet scheduler out of the dfrags loop, invoke it only once in
> __mptcp_subflow_push_pending().
>
> Signed-off-by: Geliang Tang <geliang.tang@suse.com>

Hi Geliang -

I think the __mptcp_push_pending() changes are looking good. Some changes 
are needed here for __mptcp_subflow_push_pending().

> ---
> net/mptcp/protocol.c | 44 ++++++++++++++++++++------------------------
> 1 file changed, 20 insertions(+), 24 deletions(-)
>
> diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
> index dc5e03a616b3..8e67c149bbab 100644
> --- a/net/mptcp/protocol.c
> +++ b/net/mptcp/protocol.c
> @@ -1589,7 +1589,15 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
> 	struct mptcp_data_frag *dfrag;
> 	struct sock *xmit_ssk;
> 	int len, copied = 0;
> -	bool first = true;
> +
> +	xmit_ssk = mptcp_sched_get_send(msk);
> +	if (!xmit_ssk)
> +		goto out;
> +	if (xmit_ssk != ssk) {
> +		mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
> +				       MPTCP_DELEGATE_SEND);
> +		goto out;
> +	}

There's no need to delegate before the first __do_push_pending(). In the 
existing code, the first send skips the scheduler call and tries to send 
on the selected ssk (the one that's the last arg to the function).

The existing code then calls the scheduler after each 
mptcp_sendmsg_frag(), and if the scheduler chooses a different xmit_ssk 
then mptcp_subflow_delegate() is called and it does the 'goto out'.

In other words, this function also needs a loop like 
__mptcp_push_pending() to call the scheduler multiple times.


- Mat

>
> 	info.flags = 0;
> 	while ((dfrag = mptcp_send_head(sk))) {
> @@ -1599,19 +1607,6 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
> 		while (len > 0) {
> 			int ret = 0;
>
> -			/* the caller already invoked the packet scheduler,
> -			 * check for a different subflow usage only after
> -			 * spooling the first chunk of data
> -			 */
> -			xmit_ssk = first ? ssk : mptcp_sched_get_send(msk);
> -			if (!xmit_ssk)
> -				goto out;
> -			if (xmit_ssk != ssk) {
> -				mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk),
> -						       MPTCP_DELEGATE_SEND);
> -				goto out;
> -			}
> -
> 			ret = mptcp_sendmsg_frag(sk, ssk, dfrag, &info);
> 			if (ret <= 0)
> 				goto out;
> @@ -1619,11 +1614,18 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
> 			info.sent += ret;
> 			copied += ret;
> 			len -= ret;
> -			first = false;
>
> 			mptcp_update_post_push(msk, dfrag, ret);
> 		}
> 		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
> +
> +		if (msk->snd_burst > 0 &&
> +		    sk_stream_memory_free(ssk) &&
> +		    mptcp_subflow_active(mptcp_subflow_ctx(ssk))) {
> +			mptcp_set_timeout(sk);
> +		} else {
> +			break;
> +		}
> 	}
>
> out:
> @@ -3182,16 +3184,10 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
> 	if (!mptcp_send_head(sk))
> 		return;
>
> -	if (!sock_owned_by_user(sk)) {
> -		struct sock *xmit_ssk = mptcp_sched_get_send(mptcp_sk(sk));
> -
> -		if (xmit_ssk == ssk)
> -			__mptcp_subflow_push_pending(sk, ssk);
> -		else if (xmit_ssk)
> -			mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
> -	} else {
> +	if (!sock_owned_by_user(sk))
> +		__mptcp_subflow_push_pending(sk, ssk);
> +	else
> 		__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
> -	}
> }
>
> #define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
> -- 
> 2.35.3
>
>
>

--
Mat Martineau
Intel

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: mptcp: simplify __mptcp_subflow_push_pending: Tests Results
  2022-09-30 14:17 ` [PATCH mptcp-next v3 5/5] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
@ 2022-10-06 17:31   ` MPTCP CI
  0 siblings, 0 replies; 8+ messages in thread
From: MPTCP CI @ 2022-10-06 17:31 UTC (permalink / raw)
  To: Geliang Tang; +Cc: mptcp

Hi Geliang,

Thank you for your modifications, that's great!

Our CI did some validations and here is its report:

- KVM Validation: normal:
  - Unstable: 1 failed test(s): selftest_mptcp_join 🔴:
  - Task: https://cirrus-ci.com/task/5589049061670912
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5589049061670912/summary/summary.txt

- KVM Validation: debug:
  - Critical: 108 Call Trace(s) - Critical: Global Timeout ❌:
  - Task: https://cirrus-ci.com/task/6714948968513536
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/6714948968513536/summary/summary.txt

Initiator: Patchew Applier
Commits: https://github.com/multipath-tcp/mptcp_net-next/commits/1e73e81ed3b5


If there are some issues, you can reproduce them using the same environment as
the one used by the CI thanks to a docker image, e.g.:

    $ cd [kernel source code]
    $ docker run -v "${PWD}:${PWD}:rw" -w "${PWD}" --privileged --rm -it \
        --pull always mptcp/mptcp-upstream-virtme-docker:latest \
        auto-debug

For more details:

    https://github.com/multipath-tcp/mptcp-upstream-virtme-docker


Please note that despite all the efforts that have been already done to have a
stable tests suite when executed on a public CI like here, it is possible some
reported issues are not due to your modifications. Still, do not hesitate to
help us improve that ;-)

Cheers,
MPTCP GH Action bot
Bot operated by Matthieu Baerts (Tessares)

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2022-10-06 17:31 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-09-30 14:17 [PATCH mptcp-next v3 0/5] refactor push pending Geliang Tang
2022-09-30 14:17 ` [PATCH mptcp-next v3 1/5] mptcp: use msk instead of mptcp_sk(sk) Geliang Tang
2022-09-30 14:17 ` [PATCH mptcp-next v3 2/5] mptcp: update __mptcp_push_pending Geliang Tang
2022-09-30 14:17 ` [PATCH mptcp-next v3 3/5] mptcp: add do_push_pending helper Geliang Tang
2022-09-30 14:17 ` [PATCH mptcp-next v3 4/5] mptcp: update __mptcp_subflow_push_pending Geliang Tang
2022-10-01  0:16   ` Mat Martineau
2022-09-30 14:17 ` [PATCH mptcp-next v3 5/5] mptcp: simplify __mptcp_subflow_push_pending Geliang Tang
2022-10-06 17:31   ` mptcp: simplify __mptcp_subflow_push_pending: Tests Results MPTCP CI

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.