All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH mptcp-next v2 0/2] BPF redundant scheduler, part 3
@ 2022-12-06 13:30 Geliang Tang
  2022-12-06 13:30 ` [PATCH mptcp-next v2 1/2] mptcp: add use_lock for mptcp_retrans Geliang Tang
  2022-12-06 13:30 ` [PATCH mptcp-next v2 2/2] mptcp: retrans for redundant sends Geliang Tang
  0 siblings, 2 replies; 3+ messages in thread
From: Geliang Tang @ 2022-12-06 13:30 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

v2:
- drop retrans_redundant flag.
- call __mptcp_retrans() directly.
- depends on "BPF redundant scheduler, part 2" v23.

v1:
- The DSS issue has been fixed in this version, and all tests
(mptcp_connect.sh, mptcp_join.sh, simult_flows.sh and BPF test_progs)
passed.
- No need to set already_sent to 0, drop this.
- Add retrans_redundant flag.
- depends on "BPF redundant scheduler, part 2" v22.

Geliang Tang (2):
  mptcp: add use_lock for mptcp_retrans
  mptcp: retrans for redundant sends

 net/mptcp/protocol.c | 51 +++++++++++++++++++++++++++++++++++---------
 1 file changed, 41 insertions(+), 10 deletions(-)

-- 
2.35.3


^ permalink raw reply	[flat|nested] 3+ messages in thread

* [PATCH mptcp-next v2 1/2] mptcp: add use_lock for mptcp_retrans
  2022-12-06 13:30 [PATCH mptcp-next v2 0/2] BPF redundant scheduler, part 3 Geliang Tang
@ 2022-12-06 13:30 ` Geliang Tang
  2022-12-06 13:30 ` [PATCH mptcp-next v2 2/2] mptcp: retrans for redundant sends Geliang Tang
  1 sibling, 0 replies; 3+ messages in thread
From: Geliang Tang @ 2022-12-06 13:30 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

This patch adds a new parameter named 'ues_lock' for __mptcp_retrans()
to set whether to use socket locks in this function.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 12 +++++++-----
 1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 5505c1439b3e..9f0237b5a4b4 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2523,7 +2523,7 @@ static void mptcp_check_fastclose(struct mptcp_sock *msk)
 	sk_error_report(sk);
 }
 
-static void __mptcp_retrans(struct sock *sk)
+static void __mptcp_retrans(struct sock *sk, bool use_lock)
 {
 	struct mptcp_sock *msk = mptcp_sk(sk);
 	struct mptcp_subflow_context *subflow;
@@ -2566,7 +2566,8 @@ static void __mptcp_retrans(struct sock *sk)
 
 			ssk = mptcp_subflow_tcp_sock(subflow);
 
-			lock_sock(ssk);
+			if (use_lock)
+				lock_sock(ssk);
 
 			/* limit retransmission to the bytes already sent on some subflows */
 			info.sent = 0;
@@ -2588,7 +2589,8 @@ static void __mptcp_retrans(struct sock *sk)
 				WRITE_ONCE(msk->allow_infinite_fallback, false);
 			}
 
-			release_sock(ssk);
+			if (use_lock)
+				release_sock(ssk);
 
 			msk->last_snd = ssk;
 		}
@@ -2694,7 +2696,7 @@ static void mptcp_worker(struct work_struct *work)
 		__mptcp_close_subflow(msk);
 
 	if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags))
-		__mptcp_retrans(sk);
+		__mptcp_retrans(sk, true);
 
 	fail_tout = msk->first ? READ_ONCE(mptcp_subflow_ctx(msk->first)->fail_tout) : 0;
 	if (fail_tout && time_after(jiffies, fail_tout))
@@ -3308,7 +3310,7 @@ static void mptcp_release_cb(struct sock *sk)
 		if (flags & BIT(MPTCP_PUSH_PENDING))
 			__mptcp_push_pending(sk, 0);
 		if (flags & BIT(MPTCP_RETRANSMIT))
-			__mptcp_retrans(sk);
+			__mptcp_retrans(sk, true);
 
 		cond_resched();
 		spin_lock_bh(&sk->sk_lock.slock);
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* [PATCH mptcp-next v2 2/2] mptcp: retrans for redundant sends
  2022-12-06 13:30 [PATCH mptcp-next v2 0/2] BPF redundant scheduler, part 3 Geliang Tang
  2022-12-06 13:30 ` [PATCH mptcp-next v2 1/2] mptcp: add use_lock for mptcp_retrans Geliang Tang
@ 2022-12-06 13:30 ` Geliang Tang
  1 sibling, 0 replies; 3+ messages in thread
From: Geliang Tang @ 2022-12-06 13:30 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

Redundant sends need to work more like the MPTCP retransmit code path.
When the scheduler selects multiple subflows, the first subflow to send
is a "normal" transmit, and any other subflows would act like a retransmit
when accessing the dfrags.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/protocol.c | 39 ++++++++++++++++++++++++++++++++++-----
 1 file changed, 34 insertions(+), 5 deletions(-)

diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 9f0237b5a4b4..a8cb12af1d51 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -45,6 +45,7 @@ static struct percpu_counter mptcp_sockets_allocated ____cacheline_aligned_in_sm
 
 static void __mptcp_destroy_sock(struct sock *sk);
 static void __mptcp_check_send_data_fin(struct sock *sk);
+static void __mptcp_retrans(struct sock *sk, bool use_lock);
 
 DEFINE_PER_CPU(struct mptcp_delegated_action, mptcp_delegated_actions);
 static struct net_device mptcp_napi_dev;
@@ -998,7 +999,7 @@ static void __mptcp_clean_una(struct sock *sk)
 
 		if (unlikely(dfrag == msk->first_pending)) {
 			/* in recovery mode can see ack after the current snd head */
-			if (WARN_ON_ONCE(!msk->recovery))
+			if (!msk->recovery)
 				break;
 
 			WRITE_ONCE(msk->first_pending, mptcp_send_next(sk));
@@ -1013,7 +1014,7 @@ static void __mptcp_clean_una(struct sock *sk)
 
 		/* prevent wrap around in recovery mode */
 		if (unlikely(delta > dfrag->already_sent)) {
-			if (WARN_ON_ONCE(!msk->recovery))
+			if (!msk->recovery)
 				goto out;
 			if (WARN_ON_ONCE(delta > dfrag->data_len))
 				goto out;
@@ -1473,7 +1474,8 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
 
 static void mptcp_push_release(struct sock *ssk, struct mptcp_sendmsg_info *info)
 {
-	tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
+	if (info->mss_now)
+		tcp_push(ssk, 0, info->mss_now, tcp_sk(ssk)->nonagle, info->size_goal);
 	release_sock(ssk);
 }
 
@@ -1559,10 +1561,15 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 				.flags = flags,
 	};
 	bool do_check_data_fin = false;
+	struct mptcp_data_frag *head;
 	int push_count = 1;
 
+	head = mptcp_send_head(sk);
+	if (!head)
+		goto out;
+
 	while (mptcp_send_head(sk) && (push_count > 0)) {
-		int ret = 0;
+		int ret = 0, i = 0;
 
 		if (mptcp_sched_get_send(msk))
 			break;
@@ -1571,6 +1578,13 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 
 		mptcp_for_each_subflow(msk, subflow) {
 			if (READ_ONCE(subflow->scheduled)) {
+				if (i > 0) {
+					WRITE_ONCE(msk->first_pending, head);
+					mptcp_push_release(ssk, &info);
+					__mptcp_retrans(sk, true);
+					goto out;
+				}
+
 				mptcp_subflow_set_scheduled(subflow, false);
 
 				prev_ssk = ssk;
@@ -1599,6 +1613,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 						push_count--;
 					continue;
 				}
+				i++;
 				do_check_data_fin = true;
 				msk->last_snd = ssk;
 			}
@@ -1609,6 +1624,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
 	if (ssk)
 		mptcp_push_release(ssk, &info);
 
+out:
 	/* ensure the rtx timer is running */
 	if (!mptcp_timer_pending(sk))
 		mptcp_reset_timer(sk);
@@ -1623,14 +1639,19 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool
 	struct mptcp_sendmsg_info info = {
 		.data_lock_held = true,
 	};
+	struct mptcp_data_frag *head;
 	struct sock *xmit_ssk;
 	bool push = true;
 	int copied = 0;
 
+	head = mptcp_send_head(sk);
+	if (!head)
+		goto out;
+
 	info.flags = 0;
 	while (mptcp_send_head(sk) && push) {
 		bool delegate = false;
-		int ret = 0;
+		int ret = 0, i = 0;
 
 		/* check for a different subflow usage only after
 		 * spooling the first chunk of data
@@ -1650,6 +1671,12 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool
 
 		mptcp_for_each_subflow(msk, subflow) {
 			if (READ_ONCE(subflow->scheduled)) {
+				if (i > 0) {
+					WRITE_ONCE(msk->first_pending, head);
+					__mptcp_retrans(sk, false);
+					goto out;
+				}
+
 				mptcp_subflow_set_scheduled(subflow, false);
 
 				xmit_ssk = mptcp_subflow_tcp_sock(subflow);
@@ -1660,6 +1687,7 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool
 						goto out;
 					mptcp_subflow_delegate(subflow,
 							       MPTCP_DELEGATE_SEND);
+					i++;
 					msk->last_snd = ssk;
 					delegate = true;
 					push = false;
@@ -1671,6 +1699,7 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool
 					push = false;
 					continue;
 				}
+				i++;
 				copied += ret;
 				msk->last_snd = ssk;
 			}
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2022-12-06 13:30 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-12-06 13:30 [PATCH mptcp-next v2 0/2] BPF redundant scheduler, part 3 Geliang Tang
2022-12-06 13:30 ` [PATCH mptcp-next v2 1/2] mptcp: add use_lock for mptcp_retrans Geliang Tang
2022-12-06 13:30 ` [PATCH mptcp-next v2 2/2] mptcp: retrans for redundant sends Geliang Tang

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.