* [PATCH mptcp-next v2 1/3] mptcp: add get_subflows helper
2022-04-06 13:26 [PATCH mptcp-next v2 0/3] BPF round-robin scheduler Geliang Tang
@ 2022-04-06 13:26 ` Geliang Tang
2022-04-06 13:26 ` [PATCH mptcp-next v2 2/3] selftests: bpf: add bpf_rr scheduler Geliang Tang
2022-04-06 13:26 ` [PATCH mptcp-next v2 3/3] selftests: bpf: add bpf_rr test Geliang Tang
2 siblings, 0 replies; 4+ messages in thread
From: Geliang Tang @ 2022-04-06 13:26 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch implements a new helper bpf_mptcp_get_subflows() to get all the
subflows of msk.
Register this helper in bpf_mptcp_sched_kfunc_init() to make sure it can
be invoked from the BPF context.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/bpf.c | 28 ++++++++++++++++++++++++++++
net/mptcp/protocol.h | 3 +++
2 files changed, 31 insertions(+)
diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
index a3c41c079fe4..660579f1589a 100644
--- a/net/mptcp/bpf.c
+++ b/net/mptcp/bpf.c
@@ -114,6 +114,22 @@ struct bpf_struct_ops bpf_mptcp_sched_ops = {
.name = "mptcp_sched_ops",
};
+BTF_SET_START(bpf_mptcp_sched_kfunc_ids)
+BTF_ID(func, bpf_mptcp_get_subflows)
+BTF_SET_END(bpf_mptcp_sched_kfunc_ids)
+
+static const struct btf_kfunc_id_set bpf_mptcp_sched_kfunc_set = {
+ .owner = THIS_MODULE,
+ .check_set = &bpf_mptcp_sched_kfunc_ids,
+};
+
+static int __init bpf_mptcp_sched_kfunc_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &bpf_mptcp_sched_kfunc_set);
+}
+late_initcall(bpf_mptcp_sched_kfunc_init);
+
struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
{
if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
@@ -122,3 +138,15 @@ struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
return NULL;
}
EXPORT_SYMBOL(bpf_mptcp_sock_from_subflow);
+
+u8 bpf_mptcp_get_subflows(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ u8 nr = 0;
+
+ mptcp_for_each_subflow(msk, subflow)
+ msk->subflows[nr++] = subflow;
+
+ return nr;
+}
+EXPORT_SYMBOL(bpf_mptcp_get_subflows);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 3caa1a08b7e8..1990ae5c1397 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -238,6 +238,8 @@ struct mptcp_data_frag {
struct page *page;
};
+#define MPTCP_SUBFLOWS_MAX 8
+
/* MPTCP connection sock */
struct mptcp_sock {
/* inet_connection_sock must be the first member */
@@ -298,6 +300,7 @@ struct mptcp_sock {
u32 setsockopt_seq;
char ca_name[TCP_CA_NAME_MAX];
+ struct mptcp_subflow_context *subflows[MPTCP_SUBFLOWS_MAX];
};
#define mptcp_data_lock(sk) spin_lock_bh(&(sk)->sk_lock.slock)
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH mptcp-next v2 2/3] selftests: bpf: add bpf_rr scheduler
2022-04-06 13:26 [PATCH mptcp-next v2 0/3] BPF round-robin scheduler Geliang Tang
2022-04-06 13:26 ` [PATCH mptcp-next v2 1/3] mptcp: add get_subflows helper Geliang Tang
@ 2022-04-06 13:26 ` Geliang Tang
2022-04-06 13:26 ` [PATCH mptcp-next v2 3/3] selftests: bpf: add bpf_rr test Geliang Tang
2 siblings, 0 replies; 4+ messages in thread
From: Geliang Tang @ 2022-04-06 13:26 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch implements the round-robin BPF MPTCP scheduler, named bpf_rr,
which always picks the next available subflow to send data. If no such
next subflow available, picks the first one.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
.../testing/selftests/bpf/bpf_mptcp_helpers.h | 9 ++++
.../selftests/bpf/progs/mptcp_bpf_rr.c | 49 +++++++++++++++++++
2 files changed, 58 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
diff --git a/tools/testing/selftests/bpf/bpf_mptcp_helpers.h b/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
index 5135eb6710e8..6acd62fbdf24 100644
--- a/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
@@ -6,12 +6,16 @@
#include "bpf_tcp_helpers.h"
+#define MPTCP_SUBFLOWS_MAX 8
+
struct mptcp_sock {
struct inet_connection_sock sk;
+ struct sock *last_snd;
__u32 token;
struct sock *first;
char ca_name[TCP_CA_NAME_MAX];
+ struct mptcp_subflow_context *subflows[MPTCP_SUBFLOWS_MAX];
} __attribute__((preserve_access_index));
#define MPTCP_SCHED_NAME_MAX 16
@@ -26,4 +30,9 @@ struct mptcp_sched_ops {
void *owner;
};
+struct mptcp_subflow_context {
+ __u32 token;
+ struct sock *tcp_sock; /* tcp sk backpointer */
+} __attribute__((preserve_access_index));
+
#endif
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
new file mode 100644
index 000000000000..3e83c94971c3
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
@@ -0,0 +1,49 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2022, SUSE. */
+
+#include <linux/bpf.h>
+#include <linux/stddef.h>
+#include <linux/tcp.h>
+#include "bpf_mptcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+extern __u8 bpf_mptcp_get_subflows(struct mptcp_sock *msk) __ksym;
+
+SEC("struct_ops/mptcp_sched_rr_init")
+void BPF_PROG(mptcp_sched_rr_init, struct mptcp_sock *msk)
+{
+}
+
+SEC("struct_ops/mptcp_sched_rr_release")
+void BPF_PROG(mptcp_sched_rr_release, struct mptcp_sock *msk)
+{
+}
+
+struct sock *BPF_STRUCT_OPS(bpf_rr_get_subflow, struct mptcp_sock *msk)
+{
+ __u8 nr = bpf_mptcp_get_subflows(msk);
+ struct mptcp_subflow_context *subflow;
+ struct sock *ssk = msk->first;
+
+ for (int i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
+ if (i == nr)
+ break;
+
+ subflow = msk->subflows[i];
+ if (msk->last_snd && subflow->tcp_sock != msk->last_snd) {
+ ssk = subflow->tcp_sock;
+ break;
+ }
+ }
+
+ return ssk;
+}
+
+SEC(".struct_ops")
+struct mptcp_sched_ops rr = {
+ .init = (void *)mptcp_sched_rr_init,
+ .release = (void *)mptcp_sched_rr_release,
+ .get_subflow = (void *)bpf_rr_get_subflow,
+ .name = "bpf_rr",
+};
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH mptcp-next v2 3/3] selftests: bpf: add bpf_rr test
2022-04-06 13:26 [PATCH mptcp-next v2 0/3] BPF round-robin scheduler Geliang Tang
2022-04-06 13:26 ` [PATCH mptcp-next v2 1/3] mptcp: add get_subflows helper Geliang Tang
2022-04-06 13:26 ` [PATCH mptcp-next v2 2/3] selftests: bpf: add bpf_rr scheduler Geliang Tang
@ 2022-04-06 13:26 ` Geliang Tang
2 siblings, 0 replies; 4+ messages in thread
From: Geliang Tang @ 2022-04-06 13:26 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch adds the round-robin BPF MPTCP scheduler test. Use sysctl to
set net.mptcp.scheduler to use this sched. Add a veth address to test
multiple subflows case.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
.../testing/selftests/bpf/prog_tests/mptcp.c | 37 +++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index 467769e229f5..4b191a7fd12c 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -5,6 +5,7 @@
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "mptcp_bpf_first.skel.h"
+#include "mptcp_bpf_rr.skel.h"
#define min(a, b) ((a) < (b) ? (a) : (b))
@@ -363,10 +364,46 @@ static void test_first(void)
mptcp_bpf_first__destroy(first_skel);
}
+static void test_rr(void)
+{
+ struct mptcp_bpf_rr *rr_skel;
+ int server_fd, client_fd;
+ struct bpf_link *link;
+
+ rr_skel = mptcp_bpf_rr__open_and_load();
+ if (CHECK(!rr_skel, "bpf_rr__open_and_load", "failed\n"))
+ return;
+
+ link = bpf_map__attach_struct_ops(rr_skel->maps.rr);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
+ mptcp_bpf_rr__destroy(rr_skel);
+ return;
+ }
+
+ system("ip link add veth1 type veth; \
+ ip addr add 10.0.1.1/24 dev veth1; \
+ ip link set veth1 up");
+ system("ip mptcp endpoint add 10.0.1.1 subflow");
+ system("sysctl -q net.mptcp.scheduler=bpf_rr");
+ server_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ client_fd = connect_to_mptcp_fd(server_fd, 0);
+
+ send_data(server_fd, client_fd);
+
+ close(client_fd);
+ close(server_fd);
+ system("ip mptcp endpoint flush");
+ system("ip link del veth1");
+ bpf_link__destroy(link);
+ mptcp_bpf_rr__destroy(rr_skel);
+}
+
void test_mptcp(void)
{
if (test__start_subtest("base"))
test_base();
if (test__start_subtest("first"))
test_first();
+ if (test__start_subtest("rr"))
+ test_rr();
}
--
2.34.1
^ permalink raw reply related [flat|nested] 4+ messages in thread