From: Geliang Tang <geliang.tang@suse.com>
To: mptcp@lists.linux.dev
Cc: Geliang Tang <geliang.tang@suse.com>
Subject: [PATCH mptcp-next v7 1/5] Squash to "mptcp: add get_subflow wrappers"
Date: Tue, 21 Jun 2022 17:54:15 +0800 [thread overview]
Message-ID: <f9ad80d798602ba5ac3b21dd6dc6ff213c59c82d.1655804700.git.geliang.tang@suse.com> (raw)
In-Reply-To: <cover.1655804700.git.geliang.tang@suse.com>
Please update the commit log:
'''
This patch defines two new wrappers mptcp_sched_get_send() and
mptcp_sched_get_retrans(), invoke get_subflow() of msk->sched in them.
Use them instead of using mptcp_subflow_get_send() or
mptcp_subflow_get_retrans() directly.
Set the subflow pointers array in struct mptcp_sched_data before invoking
get_subflow(), then it can be used in get_subflow() in the BPF contexts.
Check the subflow scheduled flags to test which subflow or subflows are
picked by the scheduler.
Move sock_owned_by_me() and the fallback check code from
mptcp_subflow_get_send/retrans() into the wrappers.
Redundant subflows are not supported in __mptcp_subflow_push_pending()
yet. This patch adds a placeholder in mptcp_sched_get_send() to pick the
first subflow for the redundant subflows case.
'''
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/protocol.c | 12 +++++----
net/mptcp/protocol.h | 4 +--
net/mptcp/sched.c | 61 ++++++++++++++++++++++++++------------------
3 files changed, 45 insertions(+), 32 deletions(-)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index a0f9f3831509..043ac3f222ed 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1567,7 +1567,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
int ret = 0;
prev_ssk = ssk;
- ssk = mptcp_sched_get_send(msk);
+ ssk = mptcp_subflow_get_send(msk);
/* First check. If the ssk has changed since
* the last round, release prev_ssk
@@ -1628,13 +1628,13 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
info.limit = dfrag->data_len;
len = dfrag->data_len - dfrag->already_sent;
while (len > 0) {
- int ret = 0;
+ int ret = 0, err = 0;
/* the caller already invoked the packet scheduler,
* check for a different subflow usage only after
* spooling the first chunk of data
*/
- xmit_ssk = first ? ssk : mptcp_sched_get_send(mptcp_sk(sk));
+ xmit_ssk = first ? ssk : mptcp_sched_get_send(mptcp_sk(sk), &err);
if (!xmit_ssk)
goto out;
if (xmit_ssk != ssk) {
@@ -2438,7 +2438,7 @@ static void __mptcp_retrans(struct sock *sk)
mptcp_clean_una_wakeup(sk);
/* first check ssk: need to kick "stale" logic */
- ssk = mptcp_sched_get_retrans(msk);
+ ssk = mptcp_subflow_get_retrans(msk);
dfrag = mptcp_rtx_head(sk);
if (!dfrag) {
if (mptcp_data_fin_enabled(msk)) {
@@ -3088,11 +3088,13 @@ void __mptcp_data_acked(struct sock *sk)
void __mptcp_check_push(struct sock *sk, struct sock *ssk)
{
+ int err = 0;
+
if (!mptcp_send_head(sk))
return;
if (!sock_owned_by_user(sk)) {
- struct sock *xmit_ssk = mptcp_sched_get_send(mptcp_sk(sk));
+ struct sock *xmit_ssk = mptcp_sched_get_send(mptcp_sk(sk), &err);
if (xmit_ssk == ssk)
__mptcp_subflow_push_pending(sk, ssk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index bef7dea9f358..c4ce576458a2 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -634,8 +634,8 @@ void mptcp_subflow_set_scheduled(struct mptcp_subflow_context *subflow,
bool scheduled);
struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk);
struct sock *mptcp_subflow_get_retrans(struct mptcp_sock *msk);
-struct sock *mptcp_sched_get_send(struct mptcp_sock *msk);
-struct sock *mptcp_sched_get_retrans(struct mptcp_sock *msk);
+struct sock *mptcp_sched_get_send(struct mptcp_sock *msk, int *err);
+int mptcp_sched_get_retrans(struct mptcp_sock *msk);
static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index 8858e1fc8b74..5bd96ec5da5a 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -117,31 +117,46 @@ static int mptcp_sched_data_init(struct mptcp_sock *msk, bool reinject,
return 0;
}
-struct sock *mptcp_sched_get_send(struct mptcp_sock *msk)
+struct sock *mptcp_sched_get_send(struct mptcp_sock *msk, int *err)
{
+ struct mptcp_subflow_context *subflow;
struct mptcp_sched_data data;
struct sock *ssk = NULL;
- int i;
+ *err = -EINVAL;
sock_owned_by_me((struct sock *)msk);
/* the following check is moved out of mptcp_subflow_get_send */
if (__mptcp_check_fallback(msk)) {
- if (!msk->first)
- return NULL;
- return sk_stream_memory_free(msk->first) ? msk->first : NULL;
+ if (msk->first && sk_stream_memory_free(msk->first)) {
+ mptcp_subflow_set_scheduled(mptcp_subflow_ctx(msk->first), true);
+ *err = 0;
+ return msk->first;
+ }
+ return NULL;
}
- if (!msk->sched)
- return mptcp_subflow_get_send(msk);
+ if (!msk->sched) {
+ ssk = mptcp_subflow_get_send(msk);
+ if (!ssk)
+ return NULL;
+ mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
+ *err = 0;
+ return ssk;
+ }
mptcp_sched_data_init(msk, false, &data);
msk->sched->get_subflow(msk, &data);
- for (i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
- if (data.contexts[i] && READ_ONCE(data.contexts[i]->scheduled)) {
- ssk = data.contexts[i]->tcp_sock;
- msk->last_snd = ssk;
+ mptcp_for_each_subflow(msk, subflow) {
+ if (READ_ONCE(subflow->scheduled)) {
+ /* TODO: Redundant subflows are not supported in
+ * __mptcp_subflow_push_pending() yet. Here's a
+ * placeholder to pick the first subflow for the
+ * redundant subflows case.
+ */
+ ssk = subflow->tcp_sock;
+ *err = 0;
break;
}
}
@@ -149,31 +164,27 @@ struct sock *mptcp_sched_get_send(struct mptcp_sock *msk)
return ssk;
}
-struct sock *mptcp_sched_get_retrans(struct mptcp_sock *msk)
+int mptcp_sched_get_retrans(struct mptcp_sock *msk)
{
struct mptcp_sched_data data;
struct sock *ssk = NULL;
- int i;
sock_owned_by_me((const struct sock *)msk);
/* the following check is moved out of mptcp_subflow_get_retrans */
if (__mptcp_check_fallback(msk))
- return NULL;
+ return -EINVAL;
- if (!msk->sched)
- return mptcp_subflow_get_retrans(msk);
+ if (!msk->sched) {
+ ssk = mptcp_subflow_get_retrans(msk);
+ if (!ssk)
+ return -EINVAL;
+ mptcp_subflow_set_scheduled(mptcp_subflow_ctx(ssk), true);
+ return 0;
+ }
mptcp_sched_data_init(msk, true, &data);
msk->sched->get_subflow(msk, &data);
- for (i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
- if (data.contexts[i] && READ_ONCE(data.contexts[i]->scheduled)) {
- ssk = data.contexts[i]->tcp_sock;
- msk->last_snd = ssk;
- break;
- }
- }
-
- return ssk;
+ return 0;
}
--
2.35.3
next prev parent reply other threads:[~2022-06-21 9:54 UTC|newest]
Thread overview: 8+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-06-21 9:54 [PATCH mptcp-next v7 0/5] BPF redundant scheduler Geliang Tang
2022-06-21 9:54 ` Geliang Tang [this message]
2022-06-21 9:54 ` [PATCH mptcp-next v7 2/5] mptcp: redundant subflows push pending Geliang Tang
2022-06-23 0:46 ` Mat Martineau
2022-06-21 9:54 ` [PATCH mptcp-next v7 3/5] mptcp: redundant subflows retrans support Geliang Tang
2022-06-21 9:54 ` [PATCH mptcp-next v7 4/5] selftests/bpf: Add bpf_red scheduler Geliang Tang
2022-06-21 9:54 ` [PATCH mptcp-next v7 5/5] selftests/bpf: Add bpf_red test Geliang Tang
2022-06-21 11:07 ` selftests/bpf: Add bpf_red test: Tests Results MPTCP CI
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=f9ad80d798602ba5ac3b21dd6dc6ff213c59c82d.1655804700.git.geliang.tang@suse.com \
--to=geliang.tang@suse.com \
--cc=mptcp@lists.linux.dev \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).