* [PATCH mptcp-next v5 1/4] mptcp: add last_snd write access
2022-04-13 14:30 [PATCH mptcp-next v5 0/4] BPF round-robin scheduler Geliang Tang
@ 2022-04-13 14:30 ` Geliang Tang
2022-04-13 14:30 ` [PATCH mptcp-next v5 2/4] mptcp: add bpf get_subflows helper Geliang Tang
` (2 subsequent siblings)
3 siblings, 0 replies; 5+ messages in thread
From: Geliang Tang @ 2022-04-13 14:30 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch exports the member last_snd of struct mptcp_sock in
bpf_mptcp_helpers.h, and adds BPF write access to it.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/bpf.c | 16 ++++++++++++++++
tools/testing/selftests/bpf/bpf_mptcp_helpers.h | 1 +
2 files changed, 17 insertions(+)
diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
index e849fc3fb6c5..bd3c50b07ab2 100644
--- a/net/mptcp/bpf.c
+++ b/net/mptcp/bpf.c
@@ -40,6 +40,7 @@ static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
{
const struct btf_type *state;
u32 type_id;
+ size_t end;
if (atype == BPF_READ)
return btf_struct_access(log, btf, t, off, size, atype,
@@ -55,6 +56,21 @@ static int bpf_mptcp_sched_btf_struct_access(struct bpf_verifier_log *log,
return -EACCES;
}
+ switch (off) {
+ case offsetof(struct mptcp_sock, last_snd):
+ end = offsetofend(struct mptcp_sock, last_snd);
+ break;
+ default:
+ bpf_log(log, "no write support to mptcp_sock at off %d\n", off);
+ return -EACCES;
+ }
+
+ if (off + size > end) {
+ bpf_log(log, "access beyond mptcp_sock at off %u size %u ended at %lu",
+ off, size, end);
+ return -EACCES;
+ }
+
return NOT_INIT;
}
diff --git a/tools/testing/selftests/bpf/bpf_mptcp_helpers.h b/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
index 9a0e3e7766b0..4f82baee998c 100644
--- a/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
@@ -9,6 +9,7 @@
struct mptcp_sock {
struct inet_connection_sock sk;
+ struct sock *last_snd;
__u32 token;
struct sock *first;
char ca_name[TCP_CA_NAME_MAX];
--
2.34.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH mptcp-next v5 2/4] mptcp: add bpf get_subflows helper
2022-04-13 14:30 [PATCH mptcp-next v5 0/4] BPF round-robin scheduler Geliang Tang
2022-04-13 14:30 ` [PATCH mptcp-next v5 1/4] mptcp: add last_snd write access Geliang Tang
@ 2022-04-13 14:30 ` Geliang Tang
2022-04-13 14:30 ` [PATCH mptcp-next v5 3/4] selftests: bpf: add bpf_rr scheduler Geliang Tang
2022-04-13 14:30 ` [PATCH mptcp-next v5 4/4] selftests: bpf: add bpf_rr test Geliang Tang
3 siblings, 0 replies; 5+ messages in thread
From: Geliang Tang @ 2022-04-13 14:30 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch implements a new helper bpf_mptcp_get_subflows() to get all the
subflows of the given mptcp_sock, it returns the number of suflows. Add
a new member subflows in struct mptcp_sock as a pointers array of all the
subflows.
Register this helper in bpf_mptcp_sched_kfunc_init() to make sure it can
be accessed from the BPF context.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/bpf.c | 44 ++++++++++++++++++++++++++++++++++++++++++++
net/mptcp/protocol.h | 7 +++++++
2 files changed, 51 insertions(+)
diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
index bd3c50b07ab2..823e5af1a2f4 100644
--- a/net/mptcp/bpf.c
+++ b/net/mptcp/bpf.c
@@ -157,6 +157,23 @@ struct bpf_struct_ops bpf_mptcp_sched_ops = {
.name = "mptcp_sched_ops",
};
+BTF_SET_START(bpf_mptcp_sched_kfunc_ids)
+BTF_ID(func, bpf_mptcp_get_subflows_array)
+BTF_ID(func, bpf_mptcp_put_subflows_array)
+BTF_SET_END(bpf_mptcp_sched_kfunc_ids)
+
+static const struct btf_kfunc_id_set bpf_mptcp_sched_kfunc_set = {
+ .owner = THIS_MODULE,
+ .check_set = &bpf_mptcp_sched_kfunc_ids,
+};
+
+static int __init bpf_mptcp_sched_kfunc_init(void)
+{
+ return register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS,
+ &bpf_mptcp_sched_kfunc_set);
+}
+late_initcall(bpf_mptcp_sched_kfunc_init);
+
struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
{
if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
@@ -165,3 +182,30 @@ struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
return NULL;
}
EXPORT_SYMBOL(bpf_mptcp_sock_from_subflow);
+
+struct mptcp_subflows_array *bpf_mptcp_get_subflows_array(struct mptcp_sock *msk)
+{
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_subflows_array *array;
+
+ array = kzalloc(sizeof(*array), GFP_KERNEL);
+ if (!array)
+ return array;
+
+ mptcp_for_each_subflow(msk, subflow)
+ array->subflows[array->nr++] = subflow;
+
+ return array;
+}
+EXPORT_SYMBOL(bpf_mptcp_get_subflows_array);
+
+void bpf_mptcp_put_subflows_array(struct mptcp_subflows_array *array)
+{
+ int i;
+
+ for (i = 0; i < MPTCP_SUBFLOWS_MAX; i++)
+ array->subflows[i] = NULL;
+ array->nr = 0;
+ kfree(array);
+}
+EXPORT_SYMBOL(bpf_mptcp_put_subflows_array);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index a58c5c84e72b..5581b37028ee 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -490,6 +490,13 @@ struct mptcp_subflow_context {
struct rcu_head rcu;
};
+#define MPTCP_SUBFLOWS_MAX 8
+
+struct mptcp_subflows_array {
+ u8 nr;
+ struct mptcp_subflow_context *subflows[MPTCP_SUBFLOWS_MAX];
+};
+
static inline struct mptcp_subflow_context *
mptcp_subflow_ctx(const struct sock *sk)
{
--
2.34.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH mptcp-next v5 3/4] selftests: bpf: add bpf_rr scheduler
2022-04-13 14:30 [PATCH mptcp-next v5 0/4] BPF round-robin scheduler Geliang Tang
2022-04-13 14:30 ` [PATCH mptcp-next v5 1/4] mptcp: add last_snd write access Geliang Tang
2022-04-13 14:30 ` [PATCH mptcp-next v5 2/4] mptcp: add bpf get_subflows helper Geliang Tang
@ 2022-04-13 14:30 ` Geliang Tang
2022-04-13 14:30 ` [PATCH mptcp-next v5 4/4] selftests: bpf: add bpf_rr test Geliang Tang
3 siblings, 0 replies; 5+ messages in thread
From: Geliang Tang @ 2022-04-13 14:30 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch implements the round-robin BPF MPTCP scheduler, named bpf_rr,
which always picks the next available subflow to send data. If no such
next subflow available, picks the first one.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
.../testing/selftests/bpf/bpf_mptcp_helpers.h | 12 +++++
.../selftests/bpf/progs/mptcp_bpf_rr.c | 54 +++++++++++++++++++
2 files changed, 66 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
diff --git a/tools/testing/selftests/bpf/bpf_mptcp_helpers.h b/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
index 4f82baee998c..140ead91e2c6 100644
--- a/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
@@ -27,4 +27,16 @@ struct mptcp_sched_ops {
void *owner;
};
+struct mptcp_subflow_context {
+ __u32 token;
+ struct sock *tcp_sock; /* tcp sk backpointer */
+} __attribute__((preserve_access_index));
+
+#define MPTCP_SUBFLOWS_MAX 8
+
+struct mptcp_subflows_array {
+ __u8 nr;
+ struct mptcp_subflow_context *subflows[MPTCP_SUBFLOWS_MAX];
+};
+
#endif
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
new file mode 100644
index 000000000000..531ab12a93b1
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_rr.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2022, SUSE. */
+
+#include <linux/bpf.h>
+#include <linux/stddef.h>
+#include <linux/tcp.h>
+#include "bpf_mptcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+extern struct mptcp_subflows_array *
+bpf_mptcp_get_subflows_array(struct mptcp_sock *msk) __ksym;
+extern void bpf_mptcp_put_subflows_array(struct mptcp_subflows_array *array) __ksym;
+
+SEC("struct_ops/mptcp_sched_rr_init")
+void BPF_PROG(mptcp_sched_rr_init, struct mptcp_sock *msk)
+{
+}
+
+SEC("struct_ops/mptcp_sched_rr_release")
+void BPF_PROG(mptcp_sched_rr_release, struct mptcp_sock *msk)
+{
+}
+
+struct sock *BPF_STRUCT_OPS(bpf_rr_get_subflow, struct mptcp_sock *msk, bool retrans)
+{
+ struct mptcp_subflow_context *subflow;
+ struct mptcp_subflows_array *array;
+ struct sock *ssk = msk->first;
+
+ array = bpf_mptcp_get_subflows_array(msk);
+ for (int i = 0; i < MPTCP_SUBFLOWS_MAX; i++) {
+ if (i == array->nr)
+ break;
+
+ subflow = array->subflows[i];
+ if (subflow->tcp_sock != msk->last_snd) {
+ ssk = subflow->tcp_sock;
+ break;
+ }
+ }
+ bpf_mptcp_put_subflows_array(array);
+
+ msk->last_snd = ssk;
+ return ssk;
+}
+
+SEC(".struct_ops")
+struct mptcp_sched_ops rr = {
+ .init = (void *)mptcp_sched_rr_init,
+ .release = (void *)mptcp_sched_rr_release,
+ .get_subflow = (void *)bpf_rr_get_subflow,
+ .name = "bpf_rr",
+};
--
2.34.1
^ permalink raw reply related [flat|nested] 5+ messages in thread
* [PATCH mptcp-next v5 4/4] selftests: bpf: add bpf_rr test
2022-04-13 14:30 [PATCH mptcp-next v5 0/4] BPF round-robin scheduler Geliang Tang
` (2 preceding siblings ...)
2022-04-13 14:30 ` [PATCH mptcp-next v5 3/4] selftests: bpf: add bpf_rr scheduler Geliang Tang
@ 2022-04-13 14:30 ` Geliang Tang
3 siblings, 0 replies; 5+ messages in thread
From: Geliang Tang @ 2022-04-13 14:30 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch adds the round-robin BPF MPTCP scheduler test. Use sysctl to
set net.mptcp.scheduler to use this sched. Add a veth net device to
simulate the multiple addresses case. Use 'ip mptcp endpoint' command to
add this new endpoint to PM netlink.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
.../testing/selftests/bpf/prog_tests/mptcp.c | 37 +++++++++++++++++++
1 file changed, 37 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index 01679077521d..20f382da6812 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -6,6 +6,7 @@
#include "cgroup_helpers.h"
#include "network_helpers.h"
#include "mptcp_bpf_first.skel.h"
+#include "mptcp_bpf_rr.skel.h"
#ifndef TCP_CA_NAME_MAX
#define TCP_CA_NAME_MAX 16
@@ -362,10 +363,46 @@ static void test_first(void)
mptcp_bpf_first__destroy(first_skel);
}
+static void test_rr(void)
+{
+ struct mptcp_bpf_rr *rr_skel;
+ int server_fd, client_fd;
+ struct bpf_link *link;
+
+ rr_skel = mptcp_bpf_rr__open_and_load();
+ if (CHECK(!rr_skel, "bpf_rr__open_and_load", "failed\n"))
+ return;
+
+ link = bpf_map__attach_struct_ops(rr_skel->maps.rr);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
+ mptcp_bpf_rr__destroy(rr_skel);
+ return;
+ }
+
+ system("ip link add veth1 type veth; \
+ ip addr add 10.0.1.1/24 dev veth1; \
+ ip link set veth1 up");
+ system("ip mptcp endpoint add 10.0.1.1 subflow");
+ system("sysctl -q net.mptcp.scheduler=bpf_rr");
+ server_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ client_fd = connect_to_mptcp_fd(server_fd, 0);
+
+ send_data(server_fd, client_fd);
+
+ close(client_fd);
+ close(server_fd);
+ system("ip mptcp endpoint flush");
+ system("ip link del veth1");
+ bpf_link__destroy(link);
+ mptcp_bpf_rr__destroy(rr_skel);
+}
+
void test_mptcp(void)
{
if (test__start_subtest("base"))
test_base();
if (test__start_subtest("first"))
test_first();
+ if (test__start_subtest("rr"))
+ test_rr();
}
--
2.34.1
^ permalink raw reply related [flat|nested] 5+ messages in thread