mptcp.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
From: Geliang Tang <geliang.tang@suse.com>
To: mptcp@lists.linux.dev
Cc: Geliang Tang <geliang.tang@suse.com>
Subject: [PATCH mptcp-next v7 04/13] Squash to "mptcp: add get_subflow wrappers"
Date: Thu,  2 Jun 2022 12:53:41 +0800	[thread overview]
Message-ID: <79d1cf14bb4e5619f5dd148fbdc7d89b09c0ac7f.1654143895.git.geliang.tang@suse.com> (raw)
In-Reply-To: <cover.1654143895.git.geliang.tang@suse.com>

Please update the commit log:

'''
This patch defines two new wrappers mptcp_sched_get_send() and
mptcp_sched_get_retrans(), invoke get_subflow() of msk->sched in them.
Use them instead of using mptcp_subflow_get_send() or
mptcp_subflow_get_retrans() directly.

Set the subflow pointers array in struct mptcp_sched_data before invoking
get_subflow(), then it can be used in get_subflow() in the BPF contexts.

Check the subflow scheduled flags to test which subflow or subflows are
picked by the scheduler.
'''

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 net/mptcp/sched.c | 54 +++++++++++++++++++++++++++++++++++++----------
 1 file changed, 43 insertions(+), 11 deletions(-)

diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index 5a6f289ad985..8858e1fc8b74 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -94,11 +94,25 @@ void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
 	WRITE_ONCE(subflow->scheduled, scheduled);
 }
 
-static int mptcp_sched_data_init(struct mptcp_sock *msk,
+static int mptcp_sched_data_init(struct mptcp_sock *msk, bool reinject,
 				 struct mptcp_sched_data *data)
 {
-	data->sock = NULL;
-	data->call_again = 0;
+	struct mptcp_subflow_context *subflow;
+	int i = 0;
+
+	data->reinject = reinject;
+
+	mptcp_for_each_subflow(msk, subflow) {
+		if (i == MPTCP_SUBFLOWS_MAX) {
+			pr_warn_once("too many subflows");
+			break;
+		}
+		mptcp_subflow_set_scheduled(subflow, false);
+		data->contexts[i++] = subflow;
+	}
+
+	for (; i < MPTCP_SUBFLOWS_MAX; i++)
+		data->contexts[i] = NULL;
 
 	return 0;
 }
@@ -106,6 +120,8 @@ static int mptcp_sched_data_init(struct mptcp_sock *msk,
 struct sock *mptcp_sched_get_send(struct mptcp_sock *msk)
 {
 	struct mptcp_sched_data data;
+	struct sock *ssk = NULL;
+	int i;
 
 	sock_owned_by_me((struct sock *)msk);
 
@@ -119,16 +135,25 @@ struct sock *mptcp_sched_get_send(struct mptcp_sock *msk)
 	if (!msk->sched)
 		return mptcp_subflow_get_send(msk);
 
-	mptcp_sched_data_init(msk, &data);
-	msk->sched->get_subflow(msk, false, &data);
+	mptcp_sched_data_init(msk, false, &data);
+	msk->sched->get_subflow(msk, &data);
+
+	for (i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
+		if (data.contexts[i] && READ_ONCE(data.contexts[i]->scheduled)) {
+			ssk = data.contexts[i]->tcp_sock;
+			msk->last_snd = ssk;
+			break;
+		}
+	}
 
-	msk->last_snd = data.sock;
-	return data.sock;
+	return ssk;
 }
 
 struct sock *mptcp_sched_get_retrans(struct mptcp_sock *msk)
 {
 	struct mptcp_sched_data data;
+	struct sock *ssk = NULL;
+	int i;
 
 	sock_owned_by_me((const struct sock *)msk);
 
@@ -139,9 +164,16 @@ struct sock *mptcp_sched_get_retrans(struct mptcp_sock *msk)
 	if (!msk->sched)
 		return mptcp_subflow_get_retrans(msk);
 
-	mptcp_sched_data_init(msk, &data);
-	msk->sched->get_subflow(msk, true, &data);
+	mptcp_sched_data_init(msk, true, &data);
+	msk->sched->get_subflow(msk, &data);
+
+	for (i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
+		if (data.contexts[i] && READ_ONCE(data.contexts[i]->scheduled)) {
+			ssk = data.contexts[i]->tcp_sock;
+			msk->last_snd = ssk;
+			break;
+		}
+	}
 
-	msk->last_snd = data.sock;
-	return data.sock;
+	return ssk;
 }
-- 
2.34.1


  parent reply	other threads:[~2022-06-02  4:54 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-06-02  4:53 [PATCH mptcp-next v7 00/13] BPF packet scheduler Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 01/13] Squash to "mptcp: add struct mptcp_sched_ops" Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 02/13] Squash to "mptcp: add sched in mptcp_sock" Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 03/13] mptcp: add scheduled in mptcp_subflow_context Geliang Tang
2022-06-02  4:53 ` Geliang Tang [this message]
2022-06-02  4:53 ` [PATCH mptcp-next v7 05/13] Squash to "mptcp: add bpf_mptcp_sched_ops" Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 06/13] bpf: Add bpf_mptcp_sched_kfunc_set Geliang Tang
2022-07-25  7:56   ` Matthieu Baerts
2022-07-26  3:33     ` Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 07/13] selftests/bpf: Add mptcp sched structs Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 08/13] Squash to "selftests/bpf: add bpf_first scheduler" Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 09/13] Squash to "selftests/bpf: add bpf_first test" Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 10/13] selftests/bpf: Add bpf_bkup scheduler Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 11/13] selftests/bpf: Add bpf_bkup test Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 12/13] selftests/bpf: Add bpf_rr scheduler Geliang Tang
2022-06-02  4:53 ` [PATCH mptcp-next v7 13/13] selftests/bpf: Add bpf_rr test Geliang Tang
2022-06-02  5:20   ` selftests/bpf: Add bpf_rr test: Build Failure MPTCP CI
2022-06-02  6:36   ` selftests/bpf: Add bpf_rr test: Tests Results MPTCP CI
2022-06-04 10:39 ` [PATCH mptcp-next v7 00/13] BPF packet scheduler Matthieu Baerts

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=79d1cf14bb4e5619f5dd148fbdc7d89b09c0ac7f.1654143895.git.geliang.tang@suse.com \
    --to=geliang.tang@suse.com \
    --cc=mptcp@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).