* [PATCH mptcp-next v9 1/8] mptcp: add struct mptcp_sched_ops
2022-04-04 2:09 [PATCH mptcp-next v9 0/8] BPF packet scheduler Geliang Tang
@ 2022-04-04 2:09 ` Geliang Tang
2022-04-04 2:09 ` [PATCH mptcp-next v9 2/8] mptcp: register default scheduler Geliang Tang
` (6 subsequent siblings)
7 siblings, 0 replies; 13+ messages in thread
From: Geliang Tang @ 2022-04-04 2:09 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch defines struct mptcp_sched_ops, which has three struct members,
name, owner and list, and three function pointers, init, release and
get_subflow.
Add the scheduler registering, unregistering and finding functions to add
or delete or find a packet scheduler on mptcp_sched_list.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
include/net/mptcp.h | 13 ++++++++++
net/mptcp/Makefile | 2 +-
net/mptcp/protocol.h | 3 +++
net/mptcp/sched.c | 56 ++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 73 insertions(+), 1 deletion(-)
create mode 100644 net/mptcp/sched.c
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
index 877077b53200..aea7ed9a2250 100644
--- a/include/net/mptcp.h
+++ b/include/net/mptcp.h
@@ -95,6 +95,19 @@ struct mptcp_out_options {
#endif
};
+#define MPTCP_SCHED_NAME_MAX 16
+
+struct mptcp_sched_ops {
+ struct sock * (*get_subflow)(struct mptcp_sock *msk);
+
+ char name[MPTCP_SCHED_NAME_MAX];
+ struct module *owner;
+ struct list_head list;
+
+ void (*init)(struct mptcp_sock *msk);
+ void (*release)(struct mptcp_sock *msk);
+} ____cacheline_aligned_in_smp;
+
#ifdef CONFIG_MPTCP
extern struct request_sock_ops mptcp_subflow_request_sock_ops;
diff --git a/net/mptcp/Makefile b/net/mptcp/Makefile
index 168c55d1c917..a37330760b0c 100644
--- a/net/mptcp/Makefile
+++ b/net/mptcp/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_MPTCP) += mptcp.o
mptcp-y := protocol.o subflow.o options.o token.o crypto.o ctrl.o pm.o diag.o \
- mib.o pm_netlink.o sockopt.o
+ mib.o pm_netlink.o sockopt.o sched.o
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_MPTCP_DIAG) += mptcp_diag.o
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 187c932deef0..7cd2c1c3d25c 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -608,6 +608,9 @@ int mptcp_subflow_create_socket(struct sock *sk, struct socket **new_sock);
void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
struct sockaddr_storage *addr,
unsigned short family);
+struct mptcp_sched_ops *mptcp_sched_find(const char *name);
+int mptcp_register_scheduler(struct mptcp_sched_ops *sched);
+void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched);
static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
new file mode 100644
index 000000000000..c5d3bbafba71
--- /dev/null
+++ b/net/mptcp/sched.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Multipath TCP
+ *
+ * Copyright (c) 2022, SUSE.
+ */
+
+#define pr_fmt(fmt) "MPTCP: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/rculist.h>
+#include <linux/spinlock.h>
+#include "protocol.h"
+
+static DEFINE_SPINLOCK(mptcp_sched_list_lock);
+static LIST_HEAD(mptcp_sched_list);
+
+/* Must be called with rcu read lock held */
+struct mptcp_sched_ops *mptcp_sched_find(const char *name)
+{
+ struct mptcp_sched_ops *sched, *ret = NULL;
+
+ list_for_each_entry_rcu(sched, &mptcp_sched_list, list) {
+ if (!strcmp(sched->name, name)) {
+ ret = sched;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
+{
+ if (!sched->get_subflow)
+ return -EINVAL;
+
+ spin_lock(&mptcp_sched_list_lock);
+ if (mptcp_sched_find(sched->name)) {
+ spin_unlock(&mptcp_sched_list_lock);
+ return -EEXIST;
+ }
+ list_add_tail_rcu(&sched->list, &mptcp_sched_list);
+ spin_unlock(&mptcp_sched_list_lock);
+
+ pr_debug("%s registered", sched->name);
+ return 0;
+}
+
+void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
+{
+ spin_lock(&mptcp_sched_list_lock);
+ list_del_rcu(&sched->list);
+ spin_unlock(&mptcp_sched_list_lock);
+}
--
2.34.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH mptcp-next v9 2/8] mptcp: register default scheduler
2022-04-04 2:09 [PATCH mptcp-next v9 0/8] BPF packet scheduler Geliang Tang
2022-04-04 2:09 ` [PATCH mptcp-next v9 1/8] mptcp: add struct mptcp_sched_ops Geliang Tang
@ 2022-04-04 2:09 ` Geliang Tang
2022-04-04 2:09 ` [PATCH mptcp-next v9 3/8] mptcp: add a new sysctl scheduler Geliang Tang
` (5 subsequent siblings)
7 siblings, 0 replies; 13+ messages in thread
From: Geliang Tang @ 2022-04-04 2:09 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch defines the default packet scheduler mptcp_sched_default,
register it in mptcp_sched_init(), which is invoked in mptcp_proto_init().
Skip deleting this default scheduler in mptcp_unregister_scheduler().
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/protocol.c | 3 ++-
net/mptcp/protocol.h | 2 ++
net/mptcp/sched.c | 14 ++++++++++++++
3 files changed, 18 insertions(+), 1 deletion(-)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index b2c654992de0..8f0e71c38336 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1424,7 +1424,7 @@ bool mptcp_subflow_active(struct mptcp_subflow_context *subflow)
* returns the subflow that will transmit the next DSS
* additionally updates the rtx timeout
*/
-static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
+struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
{
struct subflow_send_info send_info[SSK_MODE_MAX];
struct mptcp_subflow_context *subflow;
@@ -3804,6 +3804,7 @@ void __init mptcp_proto_init(void)
mptcp_subflow_init();
mptcp_pm_init();
+ mptcp_sched_init();
mptcp_token_init();
if (proto_register(&mptcp_prot, MPTCP_USE_SLAB) != 0)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 7cd2c1c3d25c..a264964f8d95 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -611,6 +611,8 @@ void mptcp_info2sockaddr(const struct mptcp_addr_info *info,
struct mptcp_sched_ops *mptcp_sched_find(const char *name);
int mptcp_register_scheduler(struct mptcp_sched_ops *sched);
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched);
+struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk);
+void mptcp_sched_init(void);
static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index c5d3bbafba71..52828eb741c0 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -13,6 +13,12 @@
#include <linux/spinlock.h>
#include "protocol.h"
+static struct mptcp_sched_ops mptcp_sched_default = {
+ .get_subflow = mptcp_subflow_get_send,
+ .name = "default",
+ .owner = THIS_MODULE,
+};
+
static DEFINE_SPINLOCK(mptcp_sched_list_lock);
static LIST_HEAD(mptcp_sched_list);
@@ -50,7 +56,15 @@ int mptcp_register_scheduler(struct mptcp_sched_ops *sched)
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched)
{
+ if (sched == &mptcp_sched_default)
+ return;
+
spin_lock(&mptcp_sched_list_lock);
list_del_rcu(&sched->list);
spin_unlock(&mptcp_sched_list_lock);
}
+
+void mptcp_sched_init(void)
+{
+ mptcp_register_scheduler(&mptcp_sched_default);
+}
--
2.34.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH mptcp-next v9 3/8] mptcp: add a new sysctl scheduler
2022-04-04 2:09 [PATCH mptcp-next v9 0/8] BPF packet scheduler Geliang Tang
2022-04-04 2:09 ` [PATCH mptcp-next v9 1/8] mptcp: add struct mptcp_sched_ops Geliang Tang
2022-04-04 2:09 ` [PATCH mptcp-next v9 2/8] mptcp: register default scheduler Geliang Tang
@ 2022-04-04 2:09 ` Geliang Tang
2022-04-04 2:09 ` [PATCH mptcp-next v9 4/8] mptcp: add sched in mptcp_sock Geliang Tang
` (4 subsequent siblings)
7 siblings, 0 replies; 13+ messages in thread
From: Geliang Tang @ 2022-04-04 2:09 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch adds a new sysctl, named scheduler, to support for selection
of different schedulers. Export mptcp_get_scheduler helper to get this
sysctl.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
Documentation/networking/mptcp-sysctl.rst | 8 ++++++++
net/mptcp/ctrl.c | 14 ++++++++++++++
net/mptcp/protocol.h | 1 +
3 files changed, 23 insertions(+)
diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst
index e263dfcc4b40..d9e69fdc7ea3 100644
--- a/Documentation/networking/mptcp-sysctl.rst
+++ b/Documentation/networking/mptcp-sysctl.rst
@@ -75,3 +75,11 @@ stale_loss_cnt - INTEGER
This is a per-namespace sysctl.
Default: 4
+
+scheduler - STRING
+ Select the scheduler of your choice.
+
+ Support for selection of different schedulers. This is a per-namespace
+ sysctl.
+
+ Default: "default"
diff --git a/net/mptcp/ctrl.c b/net/mptcp/ctrl.c
index ae20b7d92e28..c46c22a84d23 100644
--- a/net/mptcp/ctrl.c
+++ b/net/mptcp/ctrl.c
@@ -32,6 +32,7 @@ struct mptcp_pernet {
u8 checksum_enabled;
u8 allow_join_initial_addr_port;
u8 pm_type;
+ char scheduler[MPTCP_SCHED_NAME_MAX];
};
static struct mptcp_pernet *mptcp_get_pernet(const struct net *net)
@@ -69,6 +70,11 @@ int mptcp_get_pm_type(const struct net *net)
return mptcp_get_pernet(net)->pm_type;
}
+const char *mptcp_get_scheduler(const struct net *net)
+{
+ return mptcp_get_pernet(net)->scheduler;
+}
+
static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
{
pernet->mptcp_enabled = 1;
@@ -77,6 +83,7 @@ static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
pernet->allow_join_initial_addr_port = 1;
pernet->stale_loss_cnt = 4;
pernet->pm_type = MPTCP_PM_TYPE_KERNEL;
+ strcpy(pernet->scheduler, "default");
}
#ifdef CONFIG_SYSCTL
@@ -128,6 +135,12 @@ static struct ctl_table mptcp_sysctl_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = &mptcp_pm_type_max
},
+ {
+ .procname = "scheduler",
+ .maxlen = MPTCP_SCHED_NAME_MAX,
+ .mode = 0644,
+ .proc_handler = proc_dostring,
+ },
{}
};
@@ -149,6 +162,7 @@ static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
table[3].data = &pernet->allow_join_initial_addr_port;
table[4].data = &pernet->stale_loss_cnt;
table[5].data = &pernet->pm_type;
+ table[6].data = &pernet->scheduler;
hdr = register_net_sysctl(net, MPTCP_SYSCTL_PATH, table);
if (!hdr)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index a264964f8d95..65a08110268d 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -586,6 +586,7 @@ int mptcp_is_checksum_enabled(const struct net *net);
int mptcp_allow_join_id0(const struct net *net);
unsigned int mptcp_stale_loss_cnt(const struct net *net);
int mptcp_get_pm_type(const struct net *net);
+const char *mptcp_get_scheduler(const struct net *net);
void mptcp_subflow_fully_established(struct mptcp_subflow_context *subflow,
struct mptcp_options_received *mp_opt);
bool __mptcp_retransmit_pending_data(struct sock *sk);
--
2.34.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH mptcp-next v9 4/8] mptcp: add sched in mptcp_sock
2022-04-04 2:09 [PATCH mptcp-next v9 0/8] BPF packet scheduler Geliang Tang
` (2 preceding siblings ...)
2022-04-04 2:09 ` [PATCH mptcp-next v9 3/8] mptcp: add a new sysctl scheduler Geliang Tang
@ 2022-04-04 2:09 ` Geliang Tang
2022-04-04 2:09 ` [PATCH mptcp-next v9 5/8] mptcp: add get_subflow wrapper Geliang Tang
` (3 subsequent siblings)
7 siblings, 0 replies; 13+ messages in thread
From: Geliang Tang @ 2022-04-04 2:09 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch adds a new struct member sched in struct mptcp_sock.
And two helpers mptcp_init_sched() and mptcp_release_sched() to
init and release it.
Init it with the sysctl scheduler in mptcp_init_sock(), copy the
scheduler from the parent in mptcp_sk_clone(), and release it in
__mptcp_destroy_sock().
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/protocol.c | 7 +++++++
net/mptcp/protocol.h | 4 ++++
net/mptcp/sched.c | 34 ++++++++++++++++++++++++++++++++++
3 files changed, 45 insertions(+)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 8f0e71c38336..37f4f6f3661d 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -2657,6 +2657,11 @@ static int mptcp_init_sock(struct sock *sk)
if (ret)
return ret;
+ ret = mptcp_init_sched(mptcp_sk(sk),
+ mptcp_sched_find(mptcp_get_scheduler(net)));
+ if (ret)
+ return ret;
+
/* fetch the ca name; do it outside __mptcp_init_sock(), so that clone will
* propagate the correct value
*/
@@ -2816,6 +2821,7 @@ static void __mptcp_destroy_sock(struct sock *sk)
sk_stop_timer(sk, &sk->sk_timer);
mptcp_data_unlock(sk);
msk->pm.status = 0;
+ mptcp_release_sched(msk);
/* clears msk->subflow, allowing the following loop to close
* even the initial subflow
@@ -2993,6 +2999,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk,
msk->snd_una = msk->write_seq;
msk->wnd_end = msk->snd_nxt + req->rsk_rcv_wnd;
msk->setsockopt_seq = mptcp_sk(sk)->setsockopt_seq;
+ mptcp_init_sched(msk, mptcp_sk(sk)->sched);
if (mp_opt->suboptions & OPTIONS_MPTCP_MPC) {
msk->can_ack = true;
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 65a08110268d..1016dac065c8 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -288,6 +288,7 @@ struct mptcp_sock {
struct socket *subflow; /* outgoing connect/listener/!mp_capable */
struct sock *first;
struct mptcp_pm_data pm;
+ struct mptcp_sched_ops *sched;
struct {
u32 space; /* bytes copied in last measurement window */
u32 copied; /* bytes copied in this measurement window */
@@ -614,6 +615,9 @@ int mptcp_register_scheduler(struct mptcp_sched_ops *sched);
void mptcp_unregister_scheduler(struct mptcp_sched_ops *sched);
struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk);
void mptcp_sched_init(void);
+int mptcp_init_sched(struct mptcp_sock *msk,
+ struct mptcp_sched_ops *sched);
+void mptcp_release_sched(struct mptcp_sock *msk);
static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{
diff --git a/net/mptcp/sched.c b/net/mptcp/sched.c
index 52828eb741c0..5d20d252088f 100644
--- a/net/mptcp/sched.c
+++ b/net/mptcp/sched.c
@@ -68,3 +68,37 @@ void mptcp_sched_init(void)
{
mptcp_register_scheduler(&mptcp_sched_default);
}
+
+int mptcp_init_sched(struct mptcp_sock *msk,
+ struct mptcp_sched_ops *sched)
+{
+ struct mptcp_sched_ops *sched_init = &mptcp_sched_default;
+
+ if (sched)
+ sched_init = sched;
+
+ if (!bpf_try_module_get(sched_init, sched_init->owner))
+ return -EBUSY;
+
+ msk->sched = sched_init;
+ if (msk->sched->init)
+ msk->sched->init(msk);
+
+ pr_debug("sched=%s", msk->sched->name);
+
+ return 0;
+}
+
+void mptcp_release_sched(struct mptcp_sock *msk)
+{
+ struct mptcp_sched_ops *sched = msk->sched;
+
+ if (!sched)
+ return;
+
+ msk->sched = NULL;
+ if (sched->release)
+ sched->release(msk);
+
+ bpf_module_put(sched, sched->owner);
+}
--
2.34.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH mptcp-next v9 5/8] mptcp: add get_subflow wrapper
2022-04-04 2:09 [PATCH mptcp-next v9 0/8] BPF packet scheduler Geliang Tang
` (3 preceding siblings ...)
2022-04-04 2:09 ` [PATCH mptcp-next v9 4/8] mptcp: add sched in mptcp_sock Geliang Tang
@ 2022-04-04 2:09 ` Geliang Tang
2022-04-04 2:09 ` [PATCH mptcp-next v9 6/8] mptcp: add bpf_mptcp_sched_ops Geliang Tang
` (2 subsequent siblings)
7 siblings, 0 replies; 13+ messages in thread
From: Geliang Tang @ 2022-04-04 2:09 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch defines a new wrapper mptcp_sched_get_subflow(), invoke
get_subflow() of msk->sched in it. Use the wrapper instead of using
mptcp_subflow_get_send() directly.
Move the msk->last_snd setting out of get_subflow() of msk->sched,
set it after invoking get_subflow() as the return value of it.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
net/mptcp/protocol.c | 7 +++----
net/mptcp/protocol.h | 9 +++++++++
2 files changed, 12 insertions(+), 4 deletions(-)
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 37f4f6f3661d..c9e49b2561af 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1507,7 +1507,6 @@ struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk)
subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem +
READ_ONCE(ssk->sk_pacing_rate) * burst,
burst + wmem);
- msk->last_snd = ssk;
msk->snd_burst = burst;
return ssk;
}
@@ -1567,7 +1566,7 @@ void __mptcp_push_pending(struct sock *sk, unsigned int flags)
int ret = 0;
prev_ssk = ssk;
- ssk = mptcp_subflow_get_send(msk);
+ ssk = mptcp_sched_get_subflow(msk);
/* First check. If the ssk has changed since
* the last round, release prev_ssk
@@ -1636,7 +1635,7 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
* check for a different subflow usage only after
* spooling the first chunk of data
*/
- xmit_ssk = first ? ssk : mptcp_subflow_get_send(mptcp_sk(sk));
+ xmit_ssk = first ? ssk : mptcp_sched_get_subflow(mptcp_sk(sk));
if (!xmit_ssk)
goto out;
if (xmit_ssk != ssk) {
@@ -3137,7 +3136,7 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
return;
if (!sock_owned_by_user(sk)) {
- struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk));
+ struct sock *xmit_ssk = mptcp_sched_get_subflow(mptcp_sk(sk));
if (xmit_ssk == ssk)
__mptcp_subflow_push_pending(sk, ssk);
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 1016dac065c8..3caa1a08b7e8 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -619,6 +619,15 @@ int mptcp_init_sched(struct mptcp_sock *msk,
struct mptcp_sched_ops *sched);
void mptcp_release_sched(struct mptcp_sock *msk);
+static inline struct sock *mptcp_sched_get_subflow(struct mptcp_sock *msk)
+{
+ struct sock *ssk = INDIRECT_CALL_INET_1(msk->sched->get_subflow,
+ mptcp_subflow_get_send, msk);
+
+ msk->last_snd = ssk;
+ return ssk;
+}
+
static inline bool __mptcp_subflow_active(struct mptcp_subflow_context *subflow)
{
struct sock *ssk = mptcp_subflow_tcp_sock(subflow);
--
2.34.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH mptcp-next v9 6/8] mptcp: add bpf_mptcp_sched_ops
2022-04-04 2:09 [PATCH mptcp-next v9 0/8] BPF packet scheduler Geliang Tang
` (4 preceding siblings ...)
2022-04-04 2:09 ` [PATCH mptcp-next v9 5/8] mptcp: add get_subflow wrapper Geliang Tang
@ 2022-04-04 2:09 ` Geliang Tang
2022-04-05 0:29 ` Mat Martineau
2022-04-04 2:09 ` [PATCH mptcp-next v9 7/8] selftests: bpf: add bpf_first scheduler Geliang Tang
2022-04-04 2:10 ` [PATCH mptcp-next v9 8/8] selftests: bpf: add bpf_first test Geliang Tang
7 siblings, 1 reply; 13+ messages in thread
From: Geliang Tang @ 2022-04-04 2:09 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch implements a new struct bpf_struct_ops, bpf_mptcp_sched_ops.
Register and unregister the bpf scheduler in .reg and .unreg.
This implementation is similar to BPF TCP CC. And some code in this patch
is from bpf_tcp_ca.c
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
kernel/bpf/bpf_struct_ops_types.h | 4 ++
net/mptcp/bpf.c | 102 ++++++++++++++++++++++++++++++
2 files changed, 106 insertions(+)
diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h
index 5678a9ddf817..5a6b0c0d8d3d 100644
--- a/kernel/bpf/bpf_struct_ops_types.h
+++ b/kernel/bpf/bpf_struct_ops_types.h
@@ -8,5 +8,9 @@ BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
#ifdef CONFIG_INET
#include <net/tcp.h>
BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
+#ifdef CONFIG_MPTCP
+#include <net/mptcp.h>
+BPF_STRUCT_OPS_TYPE(mptcp_sched_ops)
+#endif
#endif
#endif
diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
index 535602ba2582..647cb174d917 100644
--- a/net/mptcp/bpf.c
+++ b/net/mptcp/bpf.c
@@ -10,8 +10,110 @@
#define pr_fmt(fmt) "MPTCP: " fmt
#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/btf.h>
+#include <linux/btf_ids.h>
#include "protocol.h"
+extern struct bpf_struct_ops bpf_mptcp_sched_ops;
+extern struct btf *btf_vmlinux;
+
+static u32 optional_ops[] = {
+ offsetof(struct mptcp_sched_ops, init),
+ offsetof(struct mptcp_sched_ops, release),
+ offsetof(struct mptcp_sched_ops, get_subflow),
+};
+
+static const struct bpf_func_proto *
+bpf_mptcp_sched_get_func_proto(enum bpf_func_id func_id,
+ const struct bpf_prog *prog)
+{
+ return bpf_base_func_proto(func_id);
+}
+
+static const struct bpf_verifier_ops bpf_mptcp_sched_verifier_ops = {
+ .get_func_proto = bpf_mptcp_sched_get_func_proto,
+ .is_valid_access = btf_ctx_access,
+ .btf_struct_access = btf_struct_access,
+};
+
+static int bpf_mptcp_sched_reg(void *kdata)
+{
+ return mptcp_register_scheduler(kdata);
+}
+
+static void bpf_mptcp_sched_unreg(void *kdata)
+{
+ mptcp_unregister_scheduler(kdata);
+}
+
+static int bpf_mptcp_sched_check_member(const struct btf_type *t,
+ const struct btf_member *member)
+{
+ return 0;
+}
+
+static bool is_optional(u32 member_offset)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
+ if (member_offset == optional_ops[i])
+ return true;
+ }
+
+ return false;
+}
+
+static int bpf_mptcp_sched_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ const struct mptcp_sched_ops *usched;
+ struct mptcp_sched_ops *sched;
+ int prog_fd;
+ u32 moff;
+
+ usched = (const struct mptcp_sched_ops *)udata;
+ sched = (struct mptcp_sched_ops *)kdata;
+
+ moff = __btf_member_bit_offset(t, member) / 8;
+ switch (moff) {
+ case offsetof(struct mptcp_sched_ops, name):
+ if (bpf_obj_name_cpy(sched->name, usched->name,
+ sizeof(sched->name)) <= 0)
+ return -EINVAL;
+ if (mptcp_sched_find(usched->name))
+ return -EEXIST;
+ return 1;
+ }
+
+ if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
+ return 0;
+
+ /* Ensure bpf_prog is provided for compulsory func ptr */
+ prog_fd = (int)(*(unsigned long *)(udata + moff));
+ if (!prog_fd && !is_optional(moff))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int bpf_mptcp_sched_init(struct btf *btf)
+{
+ return 0;
+}
+
+struct bpf_struct_ops bpf_mptcp_sched_ops = {
+ .verifier_ops = &bpf_mptcp_sched_verifier_ops,
+ .reg = bpf_mptcp_sched_reg,
+ .unreg = bpf_mptcp_sched_unreg,
+ .check_member = bpf_mptcp_sched_check_member,
+ .init_member = bpf_mptcp_sched_init_member,
+ .init = bpf_mptcp_sched_init,
+ .name = "mptcp_sched_ops",
+};
+
struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
{
if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
--
2.34.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* Re: [PATCH mptcp-next v9 6/8] mptcp: add bpf_mptcp_sched_ops
2022-04-04 2:09 ` [PATCH mptcp-next v9 6/8] mptcp: add bpf_mptcp_sched_ops Geliang Tang
@ 2022-04-05 0:29 ` Mat Martineau
2022-04-05 11:37 ` Geliang Tang
0 siblings, 1 reply; 13+ messages in thread
From: Mat Martineau @ 2022-04-05 0:29 UTC (permalink / raw)
To: Geliang Tang; +Cc: mptcp
On Mon, 4 Apr 2022, Geliang Tang wrote:
> This patch implements a new struct bpf_struct_ops, bpf_mptcp_sched_ops.
> Register and unregister the bpf scheduler in .reg and .unreg.
>
> This implementation is similar to BPF TCP CC. And some code in this patch
> is from bpf_tcp_ca.c
>
> Signed-off-by: Geliang Tang <geliang.tang@suse.com>
> ---
> kernel/bpf/bpf_struct_ops_types.h | 4 ++
> net/mptcp/bpf.c | 102 ++++++++++++++++++++++++++++++
> 2 files changed, 106 insertions(+)
>
> diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h
> index 5678a9ddf817..5a6b0c0d8d3d 100644
> --- a/kernel/bpf/bpf_struct_ops_types.h
> +++ b/kernel/bpf/bpf_struct_ops_types.h
> @@ -8,5 +8,9 @@ BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
> #ifdef CONFIG_INET
> #include <net/tcp.h>
> BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
> +#ifdef CONFIG_MPTCP
> +#include <net/mptcp.h>
> +BPF_STRUCT_OPS_TYPE(mptcp_sched_ops)
> +#endif
> #endif
> #endif
> diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
> index 535602ba2582..647cb174d917 100644
> --- a/net/mptcp/bpf.c
> +++ b/net/mptcp/bpf.c
> @@ -10,8 +10,110 @@
> #define pr_fmt(fmt) "MPTCP: " fmt
>
> #include <linux/bpf.h>
> +#include <linux/bpf_verifier.h>
> +#include <linux/btf.h>
> +#include <linux/btf_ids.h>
> #include "protocol.h"
>
> +extern struct bpf_struct_ops bpf_mptcp_sched_ops;
> +extern struct btf *btf_vmlinux;
> +
> +static u32 optional_ops[] = {
> + offsetof(struct mptcp_sched_ops, init),
> + offsetof(struct mptcp_sched_ops, release),
> + offsetof(struct mptcp_sched_ops, get_subflow),
> +};
> +
> +static const struct bpf_func_proto *
> +bpf_mptcp_sched_get_func_proto(enum bpf_func_id func_id,
> + const struct bpf_prog *prog)
> +{
> + return bpf_base_func_proto(func_id);
> +}
> +
> +static const struct bpf_verifier_ops bpf_mptcp_sched_verifier_ops = {
> + .get_func_proto = bpf_mptcp_sched_get_func_proto,
> + .is_valid_access = btf_ctx_access,
Hi Geliang -
I'm mostly comparing this code to bpf_tcp_ca.c to try to get a frame of
reference for how BPF works.
Using 'btf_ctx_access' here seems like a less strict check than what the
CA code uses. bpf_tracing_btf_ctx_access() has more constraints like only
allowing READs. What's the reasoning behind the difference? I want to be
sure access is not more open than it needs to be.
> + .btf_struct_access = btf_struct_access,
Similar question here - bpf_tcp_ca.c has more strict checking.
- Mat
> +};
> +
> +static int bpf_mptcp_sched_reg(void *kdata)
> +{
> + return mptcp_register_scheduler(kdata);
> +}
> +
> +static void bpf_mptcp_sched_unreg(void *kdata)
> +{
> + mptcp_unregister_scheduler(kdata);
> +}
> +
> +static int bpf_mptcp_sched_check_member(const struct btf_type *t,
> + const struct btf_member *member)
> +{
> + return 0;
> +}
> +
> +static bool is_optional(u32 member_offset)
> +{
> + unsigned int i;
> +
> + for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
> + if (member_offset == optional_ops[i])
> + return true;
> + }
> +
> + return false;
> +}
> +
> +static int bpf_mptcp_sched_init_member(const struct btf_type *t,
> + const struct btf_member *member,
> + void *kdata, const void *udata)
> +{
> + const struct mptcp_sched_ops *usched;
> + struct mptcp_sched_ops *sched;
> + int prog_fd;
> + u32 moff;
> +
> + usched = (const struct mptcp_sched_ops *)udata;
> + sched = (struct mptcp_sched_ops *)kdata;
> +
> + moff = __btf_member_bit_offset(t, member) / 8;
> + switch (moff) {
> + case offsetof(struct mptcp_sched_ops, name):
> + if (bpf_obj_name_cpy(sched->name, usched->name,
> + sizeof(sched->name)) <= 0)
> + return -EINVAL;
> + if (mptcp_sched_find(usched->name))
> + return -EEXIST;
> + return 1;
> + }
> +
> + if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
> + return 0;
> +
> + /* Ensure bpf_prog is provided for compulsory func ptr */
> + prog_fd = (int)(*(unsigned long *)(udata + moff));
> + if (!prog_fd && !is_optional(moff))
> + return -EINVAL;
> +
> + return 0;
> +}
> +
> +static int bpf_mptcp_sched_init(struct btf *btf)
> +{
> + return 0;
> +}
> +
> +struct bpf_struct_ops bpf_mptcp_sched_ops = {
> + .verifier_ops = &bpf_mptcp_sched_verifier_ops,
> + .reg = bpf_mptcp_sched_reg,
> + .unreg = bpf_mptcp_sched_unreg,
> + .check_member = bpf_mptcp_sched_check_member,
> + .init_member = bpf_mptcp_sched_init_member,
> + .init = bpf_mptcp_sched_init,
> + .name = "mptcp_sched_ops",
> +};
> +
> struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
> {
> if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
> --
> 2.34.1
>
>
>
--
Mat Martineau
Intel
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH mptcp-next v9 6/8] mptcp: add bpf_mptcp_sched_ops
2022-04-05 0:29 ` Mat Martineau
@ 2022-04-05 11:37 ` Geliang Tang
2022-04-06 23:53 ` Mat Martineau
0 siblings, 1 reply; 13+ messages in thread
From: Geliang Tang @ 2022-04-05 11:37 UTC (permalink / raw)
To: Mat Martineau; +Cc: mptcp
Hi Mat,
On Mon, Apr 04, 2022 at 05:29:43PM -0700, Mat Martineau wrote:
> On Mon, 4 Apr 2022, Geliang Tang wrote:
>
> > This patch implements a new struct bpf_struct_ops, bpf_mptcp_sched_ops.
> > Register and unregister the bpf scheduler in .reg and .unreg.
> >
> > This implementation is similar to BPF TCP CC. And some code in this patch
> > is from bpf_tcp_ca.c
> >
> > Signed-off-by: Geliang Tang <geliang.tang@suse.com>
> > ---
> > kernel/bpf/bpf_struct_ops_types.h | 4 ++
> > net/mptcp/bpf.c | 102 ++++++++++++++++++++++++++++++
> > 2 files changed, 106 insertions(+)
> >
> > diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h
> > index 5678a9ddf817..5a6b0c0d8d3d 100644
> > --- a/kernel/bpf/bpf_struct_ops_types.h
> > +++ b/kernel/bpf/bpf_struct_ops_types.h
> > @@ -8,5 +8,9 @@ BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
> > #ifdef CONFIG_INET
> > #include <net/tcp.h>
> > BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
> > +#ifdef CONFIG_MPTCP
> > +#include <net/mptcp.h>
> > +BPF_STRUCT_OPS_TYPE(mptcp_sched_ops)
> > +#endif
> > #endif
> > #endif
> > diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
> > index 535602ba2582..647cb174d917 100644
> > --- a/net/mptcp/bpf.c
> > +++ b/net/mptcp/bpf.c
> > @@ -10,8 +10,110 @@
> > #define pr_fmt(fmt) "MPTCP: " fmt
> >
> > #include <linux/bpf.h>
> > +#include <linux/bpf_verifier.h>
> > +#include <linux/btf.h>
> > +#include <linux/btf_ids.h>
> > #include "protocol.h"
> >
> > +extern struct bpf_struct_ops bpf_mptcp_sched_ops;
> > +extern struct btf *btf_vmlinux;
> > +
> > +static u32 optional_ops[] = {
> > + offsetof(struct mptcp_sched_ops, init),
> > + offsetof(struct mptcp_sched_ops, release),
> > + offsetof(struct mptcp_sched_ops, get_subflow),
> > +};
> > +
> > +static const struct bpf_func_proto *
> > +bpf_mptcp_sched_get_func_proto(enum bpf_func_id func_id,
> > + const struct bpf_prog *prog)
> > +{
> > + return bpf_base_func_proto(func_id);
> > +}
> > +
> > +static const struct bpf_verifier_ops bpf_mptcp_sched_verifier_ops = {
> > + .get_func_proto = bpf_mptcp_sched_get_func_proto,
> > + .is_valid_access = btf_ctx_access,
>
> Hi Geliang -
>
> I'm mostly comparing this code to bpf_tcp_ca.c to try to get a frame of
> reference for how BPF works.
>
> Using 'btf_ctx_access' here seems like a less strict check than what the CA
> code uses. bpf_tracing_btf_ctx_access() has more constraints like only
> allowing READs. What's the reasoning behind the difference? I want to be
> sure access is not more open than it needs to be.
I agree, bpf_tracing_btf_ctx_access() is much better. I sent a squash-to
patch to fix this. I want to keep the accesses more generic, not do specific,
additional checks unless we need them in the future. So I uesd btf_ctx_access
and btf_struct_access.
Thanks,
-Geliang
>
> > + .btf_struct_access = btf_struct_access,
>
> Similar question here - bpf_tcp_ca.c has more strict checking.
>
> - Mat
>
> > +};
> > +
> > +static int bpf_mptcp_sched_reg(void *kdata)
> > +{
> > + return mptcp_register_scheduler(kdata);
> > +}
> > +
> > +static void bpf_mptcp_sched_unreg(void *kdata)
> > +{
> > + mptcp_unregister_scheduler(kdata);
> > +}
> > +
> > +static int bpf_mptcp_sched_check_member(const struct btf_type *t,
> > + const struct btf_member *member)
> > +{
> > + return 0;
> > +}
> > +
> > +static bool is_optional(u32 member_offset)
> > +{
> > + unsigned int i;
> > +
> > + for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
> > + if (member_offset == optional_ops[i])
> > + return true;
> > + }
> > +
> > + return false;
> > +}
> > +
> > +static int bpf_mptcp_sched_init_member(const struct btf_type *t,
> > + const struct btf_member *member,
> > + void *kdata, const void *udata)
> > +{
> > + const struct mptcp_sched_ops *usched;
> > + struct mptcp_sched_ops *sched;
> > + int prog_fd;
> > + u32 moff;
> > +
> > + usched = (const struct mptcp_sched_ops *)udata;
> > + sched = (struct mptcp_sched_ops *)kdata;
> > +
> > + moff = __btf_member_bit_offset(t, member) / 8;
> > + switch (moff) {
> > + case offsetof(struct mptcp_sched_ops, name):
> > + if (bpf_obj_name_cpy(sched->name, usched->name,
> > + sizeof(sched->name)) <= 0)
> > + return -EINVAL;
> > + if (mptcp_sched_find(usched->name))
> > + return -EEXIST;
> > + return 1;
> > + }
> > +
> > + if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
> > + return 0;
> > +
> > + /* Ensure bpf_prog is provided for compulsory func ptr */
> > + prog_fd = (int)(*(unsigned long *)(udata + moff));
> > + if (!prog_fd && !is_optional(moff))
> > + return -EINVAL;
> > +
> > + return 0;
> > +}
> > +
> > +static int bpf_mptcp_sched_init(struct btf *btf)
> > +{
> > + return 0;
> > +}
> > +
> > +struct bpf_struct_ops bpf_mptcp_sched_ops = {
> > + .verifier_ops = &bpf_mptcp_sched_verifier_ops,
> > + .reg = bpf_mptcp_sched_reg,
> > + .unreg = bpf_mptcp_sched_unreg,
> > + .check_member = bpf_mptcp_sched_check_member,
> > + .init_member = bpf_mptcp_sched_init_member,
> > + .init = bpf_mptcp_sched_init,
> > + .name = "mptcp_sched_ops",
> > +};
> > +
> > struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
> > {
> > if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
> > --
> > 2.34.1
> >
> >
> >
>
> --
> Mat Martineau
> Intel
>
^ permalink raw reply [flat|nested] 13+ messages in thread
* Re: [PATCH mptcp-next v9 6/8] mptcp: add bpf_mptcp_sched_ops
2022-04-05 11:37 ` Geliang Tang
@ 2022-04-06 23:53 ` Mat Martineau
0 siblings, 0 replies; 13+ messages in thread
From: Mat Martineau @ 2022-04-06 23:53 UTC (permalink / raw)
To: Geliang Tang; +Cc: mptcp
On Tue, 5 Apr 2022, Geliang Tang wrote:
> Hi Mat,
>
> On Mon, Apr 04, 2022 at 05:29:43PM -0700, Mat Martineau wrote:
>> On Mon, 4 Apr 2022, Geliang Tang wrote:
>>
>>> This patch implements a new struct bpf_struct_ops, bpf_mptcp_sched_ops.
>>> Register and unregister the bpf scheduler in .reg and .unreg.
>>>
>>> This implementation is similar to BPF TCP CC. And some code in this patch
>>> is from bpf_tcp_ca.c
>>>
>>> Signed-off-by: Geliang Tang <geliang.tang@suse.com>
>>> ---
>>> kernel/bpf/bpf_struct_ops_types.h | 4 ++
>>> net/mptcp/bpf.c | 102 ++++++++++++++++++++++++++++++
>>> 2 files changed, 106 insertions(+)
>>>
>>> diff --git a/kernel/bpf/bpf_struct_ops_types.h b/kernel/bpf/bpf_struct_ops_types.h
>>> index 5678a9ddf817..5a6b0c0d8d3d 100644
>>> --- a/kernel/bpf/bpf_struct_ops_types.h
>>> +++ b/kernel/bpf/bpf_struct_ops_types.h
>>> @@ -8,5 +8,9 @@ BPF_STRUCT_OPS_TYPE(bpf_dummy_ops)
>>> #ifdef CONFIG_INET
>>> #include <net/tcp.h>
>>> BPF_STRUCT_OPS_TYPE(tcp_congestion_ops)
>>> +#ifdef CONFIG_MPTCP
>>> +#include <net/mptcp.h>
>>> +BPF_STRUCT_OPS_TYPE(mptcp_sched_ops)
>>> +#endif
>>> #endif
>>> #endif
>>> diff --git a/net/mptcp/bpf.c b/net/mptcp/bpf.c
>>> index 535602ba2582..647cb174d917 100644
>>> --- a/net/mptcp/bpf.c
>>> +++ b/net/mptcp/bpf.c
>>> @@ -10,8 +10,110 @@
>>> #define pr_fmt(fmt) "MPTCP: " fmt
>>>
>>> #include <linux/bpf.h>
>>> +#include <linux/bpf_verifier.h>
>>> +#include <linux/btf.h>
>>> +#include <linux/btf_ids.h>
>>> #include "protocol.h"
>>>
>>> +extern struct bpf_struct_ops bpf_mptcp_sched_ops;
>>> +extern struct btf *btf_vmlinux;
>>> +
>>> +static u32 optional_ops[] = {
>>> + offsetof(struct mptcp_sched_ops, init),
>>> + offsetof(struct mptcp_sched_ops, release),
>>> + offsetof(struct mptcp_sched_ops, get_subflow),
>>> +};
>>> +
>>> +static const struct bpf_func_proto *
>>> +bpf_mptcp_sched_get_func_proto(enum bpf_func_id func_id,
>>> + const struct bpf_prog *prog)
>>> +{
>>> + return bpf_base_func_proto(func_id);
>>> +}
>>> +
>>> +static const struct bpf_verifier_ops bpf_mptcp_sched_verifier_ops = {
>>> + .get_func_proto = bpf_mptcp_sched_get_func_proto,
>>> + .is_valid_access = btf_ctx_access,
>>
>> Hi Geliang -
>>
>> I'm mostly comparing this code to bpf_tcp_ca.c to try to get a frame of
>> reference for how BPF works.
>>
>> Using 'btf_ctx_access' here seems like a less strict check than what the CA
>> code uses. bpf_tracing_btf_ctx_access() has more constraints like only
>> allowing READs. What's the reasoning behind the difference? I want to be
>> sure access is not more open than it needs to be.
>
> I agree, bpf_tracing_btf_ctx_access() is much better. I sent a squash-to
> patch to fix this. I want to keep the accesses more generic, not do specific,
> additional checks unless we need them in the future. So I uesd btf_ctx_access
> and btf_struct_access.
>
Ok, thanks for the squash-to patch.
>>
>>> + .btf_struct_access = btf_struct_access,
While I realize it's simpler to refer to the existing function, I think
it's safer to add the BPF_READ check similar to what the BPF CA code does.
- Mat
>>
>> Similar question here - bpf_tcp_ca.c has more strict checking.
>>
>> - Mat
>>
>>> +};
>>> +
>>> +static int bpf_mptcp_sched_reg(void *kdata)
>>> +{
>>> + return mptcp_register_scheduler(kdata);
>>> +}
>>> +
>>> +static void bpf_mptcp_sched_unreg(void *kdata)
>>> +{
>>> + mptcp_unregister_scheduler(kdata);
>>> +}
>>> +
>>> +static int bpf_mptcp_sched_check_member(const struct btf_type *t,
>>> + const struct btf_member *member)
>>> +{
>>> + return 0;
>>> +}
>>> +
>>> +static bool is_optional(u32 member_offset)
>>> +{
>>> + unsigned int i;
>>> +
>>> + for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
>>> + if (member_offset == optional_ops[i])
>>> + return true;
>>> + }
>>> +
>>> + return false;
>>> +}
>>> +
>>> +static int bpf_mptcp_sched_init_member(const struct btf_type *t,
>>> + const struct btf_member *member,
>>> + void *kdata, const void *udata)
>>> +{
>>> + const struct mptcp_sched_ops *usched;
>>> + struct mptcp_sched_ops *sched;
>>> + int prog_fd;
>>> + u32 moff;
>>> +
>>> + usched = (const struct mptcp_sched_ops *)udata;
>>> + sched = (struct mptcp_sched_ops *)kdata;
>>> +
>>> + moff = __btf_member_bit_offset(t, member) / 8;
>>> + switch (moff) {
>>> + case offsetof(struct mptcp_sched_ops, name):
>>> + if (bpf_obj_name_cpy(sched->name, usched->name,
>>> + sizeof(sched->name)) <= 0)
>>> + return -EINVAL;
>>> + if (mptcp_sched_find(usched->name))
>>> + return -EEXIST;
>>> + return 1;
>>> + }
>>> +
>>> + if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
>>> + return 0;
>>> +
>>> + /* Ensure bpf_prog is provided for compulsory func ptr */
>>> + prog_fd = (int)(*(unsigned long *)(udata + moff));
>>> + if (!prog_fd && !is_optional(moff))
>>> + return -EINVAL;
>>> +
>>> + return 0;
>>> +}
>>> +
>>> +static int bpf_mptcp_sched_init(struct btf *btf)
>>> +{
>>> + return 0;
>>> +}
>>> +
>>> +struct bpf_struct_ops bpf_mptcp_sched_ops = {
>>> + .verifier_ops = &bpf_mptcp_sched_verifier_ops,
>>> + .reg = bpf_mptcp_sched_reg,
>>> + .unreg = bpf_mptcp_sched_unreg,
>>> + .check_member = bpf_mptcp_sched_check_member,
>>> + .init_member = bpf_mptcp_sched_init_member,
>>> + .init = bpf_mptcp_sched_init,
>>> + .name = "mptcp_sched_ops",
>>> +};
>>> +
>>> struct mptcp_sock *bpf_mptcp_sock_from_subflow(struct sock *sk)
>>> {
>>> if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk_is_mptcp(sk))
>>> --
>>> 2.34.1
>>>
>>>
>>>
>>
>> --
>> Mat Martineau
>> Intel
>>
>
>
--
Mat Martineau
Intel
^ permalink raw reply [flat|nested] 13+ messages in thread
* [PATCH mptcp-next v9 7/8] selftests: bpf: add bpf_first scheduler
2022-04-04 2:09 [PATCH mptcp-next v9 0/8] BPF packet scheduler Geliang Tang
` (5 preceding siblings ...)
2022-04-04 2:09 ` [PATCH mptcp-next v9 6/8] mptcp: add bpf_mptcp_sched_ops Geliang Tang
@ 2022-04-04 2:09 ` Geliang Tang
2022-04-04 2:10 ` [PATCH mptcp-next v9 8/8] selftests: bpf: add bpf_first test Geliang Tang
7 siblings, 0 replies; 13+ messages in thread
From: Geliang Tang @ 2022-04-04 2:09 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch implements the simplest MPTCP scheduler, named bpf_first,
which always picks the first subflow to send data. It's a sample of
MPTCP BPF scheduler implementations.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
.../testing/selftests/bpf/bpf_mptcp_helpers.h | 12 ++++++++
.../selftests/bpf/progs/mptcp_bpf_first.c | 30 +++++++++++++++++++
2 files changed, 42 insertions(+)
create mode 100644 tools/testing/selftests/bpf/progs/mptcp_bpf_first.c
diff --git a/tools/testing/selftests/bpf/bpf_mptcp_helpers.h b/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
index b5a43b108982..5135eb6710e8 100644
--- a/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_mptcp_helpers.h
@@ -14,4 +14,16 @@ struct mptcp_sock {
char ca_name[TCP_CA_NAME_MAX];
} __attribute__((preserve_access_index));
+#define MPTCP_SCHED_NAME_MAX 16
+
+struct mptcp_sched_ops {
+ char name[MPTCP_SCHED_NAME_MAX];
+
+ void (*init)(struct mptcp_sock *msk);
+ void (*release)(struct mptcp_sock *msk);
+
+ struct sock * (*get_subflow)(struct mptcp_sock *msk);
+ void *owner;
+};
+
#endif
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_first.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_first.c
new file mode 100644
index 000000000000..21890a60f9c2
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_first.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2022, SUSE. */
+
+#include <linux/bpf.h>
+#include "bpf_mptcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+SEC("struct_ops/mptcp_sched_first_init")
+void BPF_PROG(mptcp_sched_first_init, struct mptcp_sock *msk)
+{
+}
+
+SEC("struct_ops/mptcp_sched_first_release")
+void BPF_PROG(mptcp_sched_first_release, struct mptcp_sock *msk)
+{
+}
+
+struct sock *BPF_STRUCT_OPS(bpf_first_get_subflow, struct mptcp_sock *msk)
+{
+ return msk->first;
+}
+
+SEC(".struct_ops")
+struct mptcp_sched_ops first = {
+ .init = (void *)mptcp_sched_first_init,
+ .release = (void *)mptcp_sched_first_release,
+ .get_subflow = (void *)bpf_first_get_subflow,
+ .name = "bpf_first",
+};
--
2.34.1
^ permalink raw reply related [flat|nested] 13+ messages in thread
* [PATCH mptcp-next v9 8/8] selftests: bpf: add bpf_first test
2022-04-04 2:09 [PATCH mptcp-next v9 0/8] BPF packet scheduler Geliang Tang
` (6 preceding siblings ...)
2022-04-04 2:09 ` [PATCH mptcp-next v9 7/8] selftests: bpf: add bpf_first scheduler Geliang Tang
@ 2022-04-04 2:10 ` Geliang Tang
2022-04-05 10:23 ` selftests: bpf: add bpf_first test: Tests Results MPTCP CI
7 siblings, 1 reply; 13+ messages in thread
From: Geliang Tang @ 2022-04-04 2:10 UTC (permalink / raw)
To: mptcp; +Cc: Geliang Tang
This patch expends the MPTCP test base to support MPTCP packet
scheduler tests. Add the bpf_first scheduler test in it. Use sysctl
to set net.mptcp.scheduler to use this sched.
Some code in send_data() is from bpf_tcp_ca.c.
Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
.../testing/selftests/bpf/prog_tests/mptcp.c | 114 ++++++++++++++++++
1 file changed, 114 insertions(+)
diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index 7e704f5aab05..467769e229f5 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -4,6 +4,9 @@
#include <test_progs.h>
#include "cgroup_helpers.h"
#include "network_helpers.h"
+#include "mptcp_bpf_first.skel.h"
+
+#define min(a, b) ((a) < (b) ? (a) : (b))
#ifndef TCP_CA_NAME_MAX
#define TCP_CA_NAME_MAX 16
@@ -19,6 +22,8 @@ struct mptcp_storage {
};
static char monitor_log_path[64];
+static const unsigned int total_bytes = 10 * 1024 * 1024;
+static int stop, duration;
static int verify_tsk(int map_fd, int client_fd)
{
@@ -251,8 +256,117 @@ void test_base(void)
close(cgroup_fd);
}
+static void *server(void *arg)
+{
+ int lfd = (int)(long)arg, err = 0, fd;
+ ssize_t nr_sent = 0, bytes = 0;
+ char batch[1500];
+
+ fd = accept(lfd, NULL, NULL);
+ while (fd == -1) {
+ if (errno == EINTR)
+ continue;
+ err = -errno;
+ goto done;
+ }
+
+ if (settimeo(fd, 0)) {
+ err = -errno;
+ goto done;
+ }
+
+ while (bytes < total_bytes && !READ_ONCE(stop)) {
+ nr_sent = send(fd, &batch,
+ min(total_bytes - bytes, sizeof(batch)), 0);
+ if (nr_sent == -1 && errno == EINTR)
+ continue;
+ if (nr_sent == -1) {
+ err = -errno;
+ break;
+ }
+ bytes += nr_sent;
+ }
+
+ CHECK(bytes != total_bytes, "send", "%zd != %u nr_sent:%zd errno:%d\n",
+ bytes, total_bytes, nr_sent, errno);
+
+done:
+ if (fd >= 0)
+ close(fd);
+ if (err) {
+ WRITE_ONCE(stop, 1);
+ return ERR_PTR(err);
+ }
+ return NULL;
+}
+
+static void send_data(int lfd, int fd)
+{
+ ssize_t nr_recv = 0, bytes = 0;
+ pthread_t srv_thread;
+ void *thread_ret;
+ char batch[1500];
+ int err;
+
+ WRITE_ONCE(stop, 0);
+
+ err = pthread_create(&srv_thread, NULL, server, (void *)(long)lfd);
+ if (CHECK(err != 0, "pthread_create", "err:%d errno:%d\n", err, errno))
+ return;
+
+ /* recv total_bytes */
+ while (bytes < total_bytes && !READ_ONCE(stop)) {
+ nr_recv = recv(fd, &batch,
+ min(total_bytes - bytes, sizeof(batch)), 0);
+ if (nr_recv == -1 && errno == EINTR)
+ continue;
+ if (nr_recv == -1)
+ break;
+ bytes += nr_recv;
+ }
+
+ CHECK(bytes != total_bytes, "recv", "%zd != %u nr_recv:%zd errno:%d\n",
+ bytes, total_bytes, nr_recv, errno);
+
+ WRITE_ONCE(stop, 1);
+
+ pthread_join(srv_thread, &thread_ret);
+ CHECK(IS_ERR(thread_ret), "pthread_join", "thread_ret:%ld",
+ PTR_ERR(thread_ret));
+}
+
+static void test_first(void)
+{
+ struct mptcp_bpf_first *first_skel;
+ int server_fd, client_fd;
+ struct bpf_link *link;
+
+ first_skel = mptcp_bpf_first__open_and_load();
+ if (CHECK(!first_skel, "bpf_first__open_and_load", "failed\n"))
+ return;
+
+ link = bpf_map__attach_struct_ops(first_skel->maps.first);
+ if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
+ mptcp_bpf_first__destroy(first_skel);
+ return;
+ }
+
+ system("sysctl -q net.mptcp.scheduler=bpf_first");
+ server_fd = start_mptcp_server(AF_INET, NULL, 0, 0);
+ client_fd = connect_to_mptcp_fd(server_fd, 0);
+
+ send_data(server_fd, client_fd);
+
+ close(client_fd);
+ close(server_fd);
+ bpf_link__destroy(link);
+ mptcp_bpf_first__destroy(first_skel);
+}
+
void test_mptcp(void)
{
if (test__start_subtest("base"))
test_base();
+ if (test__start_subtest("first"))
+ test_first();
}
--
2.34.1
^ permalink raw reply related [flat|nested] 13+ messages in thread