All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH mptcp-next v2 0/2] add bpf_stale scheduler
@ 2023-08-03  6:51 Geliang Tang
  2023-08-03  6:51 ` [PATCH mptcp-next v2 1/2] selftests/bpf: Add " Geliang Tang
  2023-08-03  6:51 ` [PATCH mptcp-next v2 2/2] selftests/bpf: Add bpf_stale test Geliang Tang
  0 siblings, 2 replies; 5+ messages in thread
From: Geliang Tang @ 2023-08-03  6:51 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

v2:
 - store subflow ids instead of storing subflow pointers in sk_storage.

v1:
- This patchset adds the new bpf_stale scheduler. Use sk_storage to save
the stale map instead of using subflow->stale flag to manage it.

Geliang Tang (2):
  selftests/bpf: Add bpf_stale scheduler
  selftests/bpf: Add bpf_stale test

 tools/testing/selftests/bpf/bpf_tcp_helpers.h |   1 +
 .../testing/selftests/bpf/prog_tests/mptcp.c  |  38 +++++
 .../selftests/bpf/progs/mptcp_bpf_stale.c     | 161 ++++++++++++++++++
 3 files changed, 200 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c

-- 
2.35.3


^ permalink raw reply	[flat|nested] 5+ messages in thread

* [PATCH mptcp-next v2 1/2] selftests/bpf: Add bpf_stale scheduler
  2023-08-03  6:51 [PATCH mptcp-next v2 0/2] add bpf_stale scheduler Geliang Tang
@ 2023-08-03  6:51 ` Geliang Tang
  2023-08-09  0:56   ` Mat Martineau
  2023-08-03  6:51 ` [PATCH mptcp-next v2 2/2] selftests/bpf: Add bpf_stale test Geliang Tang
  1 sibling, 1 reply; 5+ messages in thread
From: Geliang Tang @ 2023-08-03  6:51 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

This patch implements the setting a subflow as stale/unstale in BPF MPTCP
scheduler, named bpf_stale. The staled subflow id will be added into a
map in sk_storage.

Two helper mptcp_subflow_set_stale() and mptcp_subflow_clear_stale() are
added.

In this test, subflow 1 is set as stale in bpf_stale_data_init(). Each
subflow is checked whether it's a stale one in bpf_stale_get_subflow() to
select a unstale subflow to send data.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 tools/testing/selftests/bpf/bpf_tcp_helpers.h |   1 +
 .../selftests/bpf/progs/mptcp_bpf_stale.c     | 161 ++++++++++++++++++
 2 files changed, 162 insertions(+)
 create mode 100644 tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c

diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
index 776c54948a4a..0a7c9fca6a07 100644
--- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
+++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
@@ -239,6 +239,7 @@ struct mptcp_subflow_context {
 	unsigned long avg_pacing_rate;
 	__u32	backup : 1;
 	__u8	stale_count;
+	__u32	subflow_id;
 	struct	sock *tcp_sock;	    /* tcp sk backpointer */
 } __attribute__((preserve_access_index));
 
diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c
new file mode 100644
index 000000000000..995c7448c394
--- /dev/null
+++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2023, SUSE. */
+
+#include <linux/bpf.h>
+#include "bpf_tcp_helpers.h"
+
+char _license[] SEC("license") = "GPL";
+
+struct mptcp_stale_storage {
+	__u8 nr;
+	__u32 ids[MPTCP_SUBFLOWS_MAX];
+};
+
+struct {
+	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
+	__uint(map_flags, BPF_F_NO_PREALLOC);
+	__type(key, int);
+	__type(value, struct mptcp_stale_storage);
+} mptcp_stale_map SEC(".maps");
+
+static void mptcp_subflow_set_stale(struct mptcp_stale_storage *storage,
+				    __u32 subflow_id)
+{
+	if (!subflow_id)
+		return;
+
+	for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
+		if (storage->ids[i] == subflow_id)
+			return;
+	}
+
+	if (storage->nr < MPTCP_SUBFLOWS_MAX - 1)
+		storage->ids[storage->nr++] = subflow_id;
+}
+
+static void mptcp_subflow_clear_stale(struct mptcp_stale_storage *storage,
+				      __u32 subflow_id)
+{
+	if (!subflow_id)
+		return;
+
+	for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
+		if (storage->ids[i] == subflow_id) {
+			for (int j = i; j < MPTCP_SUBFLOWS_MAX - 1; j++) {
+				if (!storage->ids[j + 1])
+					break;
+				storage->ids[j] = storage->ids[j + 1];
+				storage->ids[j + 1] = 0;
+			}
+			storage->nr--;
+			return;
+		}
+	}
+}
+
+static bool mptcp_subflow_is_stale(struct mptcp_stale_storage *storage,
+				   __u32 subflow_id)
+{
+	for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
+		if (storage->ids[i] == subflow_id)
+			return true;
+	}
+
+	return false;
+}
+
+static bool mptcp_subflow_is_active(struct mptcp_sched_data *data,
+				    __u32 subflow_id)
+{
+	for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
+		struct mptcp_subflow_context *subflow;
+
+		subflow = mptcp_subflow_ctx_by_pos(data, i);
+		if (!subflow)
+			break;
+		if (subflow->subflow_id == subflow_id)
+			return true;
+	}
+
+	return false;
+}
+
+SEC("struct_ops/mptcp_sched_stale_init")
+void BPF_PROG(mptcp_sched_stale_init, struct mptcp_sock *msk)
+{
+}
+
+SEC("struct_ops/mptcp_sched_stale_release")
+void BPF_PROG(mptcp_sched_stale_release, struct mptcp_sock *msk)
+{
+}
+
+void BPF_STRUCT_OPS(bpf_stale_data_init, struct mptcp_sock *msk,
+		    struct mptcp_sched_data *data)
+{
+	struct mptcp_subflow_context *subflow;
+	struct mptcp_stale_storage *storage;
+
+	mptcp_sched_data_set_contexts(msk, data);
+
+	storage = bpf_sk_storage_get(&mptcp_stale_map, msk, 0,
+				     BPF_LOCAL_STORAGE_GET_F_CREATE);
+	if (!storage)
+		return;
+
+	for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
+		if (!mptcp_subflow_is_active(data, storage->ids[i]))
+			mptcp_subflow_clear_stale(storage, storage->ids[i]);
+	}
+
+	subflow = mptcp_subflow_ctx_by_pos(data, 0);
+	if (subflow) {
+		mptcp_subflow_set_stale(storage, subflow->subflow_id);
+		mptcp_subflow_clear_stale(storage, subflow->subflow_id);
+	}
+
+	subflow = mptcp_subflow_ctx_by_pos(data, 1);
+	if (subflow) {
+		mptcp_subflow_set_stale(storage, subflow->subflow_id);
+		mptcp_subflow_clear_stale(storage, subflow->subflow_id);
+		mptcp_subflow_set_stale(storage, subflow->subflow_id);
+	}
+}
+
+int BPF_STRUCT_OPS(bpf_stale_get_subflow, struct mptcp_sock *msk,
+		   const struct mptcp_sched_data *data)
+{
+	struct mptcp_stale_storage *storage;
+	int nr = -1;
+
+	storage = bpf_sk_storage_get(&mptcp_stale_map, msk, 0,
+				     BPF_LOCAL_STORAGE_GET_F_CREATE);
+	if (!storage)
+		return -1;
+
+	for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
+		struct mptcp_subflow_context *subflow;
+
+		subflow = mptcp_subflow_ctx_by_pos(data, i);
+		if (!subflow)
+			break;
+
+		if (mptcp_subflow_is_stale(storage, subflow->subflow_id))
+			continue;
+
+		nr = i;
+	}
+
+	if (nr != -1)
+		mptcp_subflow_set_scheduled(mptcp_subflow_ctx_by_pos(data, nr), true);
+	return 0;
+}
+
+SEC(".struct_ops")
+struct mptcp_sched_ops stale = {
+	.init		= (void *)mptcp_sched_stale_init,
+	.release	= (void *)mptcp_sched_stale_release,
+	.data_init	= (void *)bpf_stale_data_init,
+	.get_subflow	= (void *)bpf_stale_get_subflow,
+	.name		= "bpf_stale",
+};
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* [PATCH mptcp-next v2 2/2] selftests/bpf: Add bpf_stale test
  2023-08-03  6:51 [PATCH mptcp-next v2 0/2] add bpf_stale scheduler Geliang Tang
  2023-08-03  6:51 ` [PATCH mptcp-next v2 1/2] selftests/bpf: Add " Geliang Tang
@ 2023-08-03  6:51 ` Geliang Tang
  2023-08-03  8:02   ` selftests/bpf: Add bpf_stale test: Tests Results MPTCP CI
  1 sibling, 1 reply; 5+ messages in thread
From: Geliang Tang @ 2023-08-03  6:51 UTC (permalink / raw)
  To: mptcp; +Cc: Geliang Tang

This patch adds the bpf_stale scheduler test: test_stale(). Use sysctl to
set net.mptcp.scheduler to use this sched. Add two veth net devices to
simulate the multiple addresses case. Use 'ip mptcp endpoint' command to
add the new endpoint ADDR_2 to PM netlink. Send data and check bytes_sent
of 'ss' output after it to make sure the data has been only sent on ADDR_1
since ADDR_2 is set as stale.

Signed-off-by: Geliang Tang <geliang.tang@suse.com>
---
 .../testing/selftests/bpf/prog_tests/mptcp.c  | 38 +++++++++++++++++++
 1 file changed, 38 insertions(+)

diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c
index 09770aa31f4a..5accc34d35e5 100644
--- a/tools/testing/selftests/bpf/prog_tests/mptcp.c
+++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c
@@ -12,6 +12,7 @@
 #include "mptcp_bpf_rr.skel.h"
 #include "mptcp_bpf_red.skel.h"
 #include "mptcp_bpf_burst.skel.h"
+#include "mptcp_bpf_stale.skel.h"
 
 char NS_TEST[32];
 
@@ -524,6 +525,41 @@ static void test_burst(void)
 	mptcp_bpf_burst__destroy(burst_skel);
 }
 
+static void test_stale(void)
+{
+	struct mptcp_bpf_stale *stale_skel;
+	int server_fd, client_fd;
+	struct nstoken *nstoken;
+	struct bpf_link *link;
+
+	stale_skel = mptcp_bpf_stale__open_and_load();
+	if (!ASSERT_OK_PTR(stale_skel, "bpf_stale__open_and_load"))
+		return;
+
+	link = bpf_map__attach_struct_ops(stale_skel->maps.stale);
+	if (!ASSERT_OK_PTR(link, "bpf_map__attach_struct_ops")) {
+		mptcp_bpf_stale__destroy(stale_skel);
+		return;
+	}
+
+	nstoken = sched_init("subflow", "bpf_stale");
+	if (!ASSERT_OK_PTR(nstoken, "sched_init:bpf_stale"))
+		goto fail;
+	server_fd = start_mptcp_server(AF_INET, ADDR_1, PORT_1, 0);
+	client_fd = connect_to_fd(server_fd, 0);
+
+	send_data(server_fd, client_fd, "bpf_stale");
+	ASSERT_OK(has_bytes_sent(ADDR_1), "has_bytes_sent addr_1");
+	ASSERT_GT(has_bytes_sent(ADDR_2), 0, "has_bytes_sent addr_2");
+
+	close(client_fd);
+	close(server_fd);
+fail:
+	cleanup_netns(nstoken);
+	bpf_link__destroy(link);
+	mptcp_bpf_stale__destroy(stale_skel);
+}
+
 void test_mptcp(void)
 {
 	if (test__start_subtest("base"))
@@ -540,4 +576,6 @@ void test_mptcp(void)
 		test_red();
 	if (test__start_subtest("burst"))
 		test_burst();
+	if (test__start_subtest("stale"))
+		test_stale();
 }
-- 
2.35.3


^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: selftests/bpf: Add bpf_stale test: Tests Results
  2023-08-03  6:51 ` [PATCH mptcp-next v2 2/2] selftests/bpf: Add bpf_stale test Geliang Tang
@ 2023-08-03  8:02   ` MPTCP CI
  0 siblings, 0 replies; 5+ messages in thread
From: MPTCP CI @ 2023-08-03  8:02 UTC (permalink / raw)
  To: Geliang Tang; +Cc: mptcp

Hi Geliang,

Thank you for your modifications, that's great!

Our CI did some validations and here is its report:

- KVM Validation: normal (except selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/5811024205447168
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5811024205447168/summary/summary.txt

- KVM Validation: normal (only selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/5248074252025856
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/5248074252025856/summary/summary.txt

- KVM Validation: debug (only selftest_mptcp_join):
  - Success! ✅:
  - Task: https://cirrus-ci.com/task/4966599275315200
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/4966599275315200/summary/summary.txt

- KVM Validation: debug (except selftest_mptcp_join):
  - Unstable: 1 failed test(s): packetdrill_mp_join 🔴:
  - Task: https://cirrus-ci.com/task/6373974158868480
  - Summary: https://api.cirrus-ci.com/v1/artifact/task/6373974158868480/summary/summary.txt

Initiator: Patchew Applier
Commits: https://github.com/multipath-tcp/mptcp_net-next/commits/2174a36d15db


If there are some issues, you can reproduce them using the same environment as
the one used by the CI thanks to a docker image, e.g.:

    $ cd [kernel source code]
    $ docker run -v "${PWD}:${PWD}:rw" -w "${PWD}" --privileged --rm -it \
        --pull always mptcp/mptcp-upstream-virtme-docker:latest \
        auto-debug

For more details:

    https://github.com/multipath-tcp/mptcp-upstream-virtme-docker


Please note that despite all the efforts that have been already done to have a
stable tests suite when executed on a public CI like here, it is possible some
reported issues are not due to your modifications. Still, do not hesitate to
help us improve that ;-)

Cheers,
MPTCP GH Action bot
Bot operated by Matthieu Baerts (Tessares)

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [PATCH mptcp-next v2 1/2] selftests/bpf: Add bpf_stale scheduler
  2023-08-03  6:51 ` [PATCH mptcp-next v2 1/2] selftests/bpf: Add " Geliang Tang
@ 2023-08-09  0:56   ` Mat Martineau
  0 siblings, 0 replies; 5+ messages in thread
From: Mat Martineau @ 2023-08-09  0:56 UTC (permalink / raw)
  To: Geliang Tang; +Cc: mptcp

On Thu, 3 Aug 2023, Geliang Tang wrote:

> This patch implements the setting a subflow as stale/unstale in BPF MPTCP
> scheduler, named bpf_stale. The staled subflow id will be added into a
> map in sk_storage.
>
> Two helper mptcp_subflow_set_stale() and mptcp_subflow_clear_stale() are
> added.
>
> In this test, subflow 1 is set as stale in bpf_stale_data_init(). Each
> subflow is checked whether it's a stale one in bpf_stale_get_subflow() to
> select a unstale subflow to send data.
>
> Signed-off-by: Geliang Tang <geliang.tang@suse.com>
> ---
> tools/testing/selftests/bpf/bpf_tcp_helpers.h |   1 +
> .../selftests/bpf/progs/mptcp_bpf_stale.c     | 161 ++++++++++++++++++
> 2 files changed, 162 insertions(+)
> create mode 100644 tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c
>
> diff --git a/tools/testing/selftests/bpf/bpf_tcp_helpers.h b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
> index 776c54948a4a..0a7c9fca6a07 100644
> --- a/tools/testing/selftests/bpf/bpf_tcp_helpers.h
> +++ b/tools/testing/selftests/bpf/bpf_tcp_helpers.h
> @@ -239,6 +239,7 @@ struct mptcp_subflow_context {
> 	unsigned long avg_pacing_rate;
> 	__u32	backup : 1;
> 	__u8	stale_count;
> +	__u32	subflow_id;
> 	struct	sock *tcp_sock;	    /* tcp sk backpointer */
> } __attribute__((preserve_access_index));
>
> diff --git a/tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c b/tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c
> new file mode 100644
> index 000000000000..995c7448c394
> --- /dev/null
> +++ b/tools/testing/selftests/bpf/progs/mptcp_bpf_stale.c
> @@ -0,0 +1,161 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/* Copyright (c) 2023, SUSE. */
> +
> +#include <linux/bpf.h>
> +#include "bpf_tcp_helpers.h"
> +
> +char _license[] SEC("license") = "GPL";
> +
> +struct mptcp_stale_storage {
> +	__u8 nr;
> +	__u32 ids[MPTCP_SUBFLOWS_MAX];
> +};
> +
> +struct {
> +	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
> +	__uint(map_flags, BPF_F_NO_PREALLOC);
> +	__type(key, int);
> +	__type(value, struct mptcp_stale_storage);
> +} mptcp_stale_map SEC(".maps");
> +
> +static void mptcp_subflow_set_stale(struct mptcp_stale_storage *storage,
> +				    __u32 subflow_id)
> +{
> +	if (!subflow_id)
> +		return;
> +
> +	for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
> +		if (storage->ids[i] == subflow_id)
> +			return;
> +	}
> +
> +	if (storage->nr < MPTCP_SUBFLOWS_MAX - 1)
> +		storage->ids[storage->nr++] = subflow_id;
> +}
> +
> +static void mptcp_subflow_clear_stale(struct mptcp_stale_storage *storage,
> +				      __u32 subflow_id)
> +{
> +	if (!subflow_id)
> +		return;
> +
> +	for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
> +		if (storage->ids[i] == subflow_id) {
> +			for (int j = i; j < MPTCP_SUBFLOWS_MAX - 1; j++) {
> +				if (!storage->ids[j + 1])
> +					break;
> +				storage->ids[j] = storage->ids[j + 1];
> +				storage->ids[j + 1] = 0;
> +			}
> +			storage->nr--;
> +			return;
> +		}
> +	}
> +}
> +
> +static bool mptcp_subflow_is_stale(struct mptcp_stale_storage *storage,
> +				   __u32 subflow_id)
> +{
> +	for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
> +		if (storage->ids[i] == subflow_id)
> +			return true;
> +	}
> +
> +	return false;
> +}
> +
> +static bool mptcp_subflow_is_active(struct mptcp_sched_data *data,
> +				    __u32 subflow_id)
> +{
> +	for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
> +		struct mptcp_subflow_context *subflow;
> +
> +		subflow = mptcp_subflow_ctx_by_pos(data, i);
> +		if (!subflow)
> +			break;
> +		if (subflow->subflow_id == subflow_id)
> +			return true;
> +	}
> +
> +	return false;
> +}
> +
> +SEC("struct_ops/mptcp_sched_stale_init")
> +void BPF_PROG(mptcp_sched_stale_init, struct mptcp_sock *msk)
> +{

It looks like the sk_storage should be initialized here so it can be set 
up once, not every time the scheduler is called.

> +}
> +
> +SEC("struct_ops/mptcp_sched_stale_release")
> +void BPF_PROG(mptcp_sched_stale_release, struct mptcp_sock *msk)
> +{

Should this release the sk_storage?

> +}
> +
> +void BPF_STRUCT_OPS(bpf_stale_data_init, struct mptcp_sock *msk,
> +		    struct mptcp_sched_data *data)
> +{
> +	struct mptcp_subflow_context *subflow;
> +	struct mptcp_stale_storage *storage;
> +
> +	mptcp_sched_data_set_contexts(msk, data);
> +
> +	storage = bpf_sk_storage_get(&mptcp_stale_map, msk, 0,
> +				     BPF_LOCAL_STORAGE_GET_F_CREATE);
> +	if (!storage)
> +		return;
> +
> +	for (int i = 0; i < storage->nr && i < MPTCP_SUBFLOWS_MAX; i++) {
> +		if (!mptcp_subflow_is_active(data, storage->ids[i]))
> +			mptcp_subflow_clear_stale(storage, storage->ids[i]);
> +	}

Why is this loop here? Is it to handle invalid subflow ids for subflows 
that have been closed?


> +
> +	subflow = mptcp_subflow_ctx_by_pos(data, 0);
> +	if (subflow) {
> +		mptcp_subflow_set_stale(storage, subflow->subflow_id);
> +		mptcp_subflow_clear_stale(storage, subflow->subflow_id);

Why set and then clear?

> +	}
> +
> +	subflow = mptcp_subflow_ctx_by_pos(data, 1);
> +	if (subflow) {
> +		mptcp_subflow_set_stale(storage, subflow->subflow_id);
> +		mptcp_subflow_clear_stale(storage, subflow->subflow_id);
> +		mptcp_subflow_set_stale(storage, subflow->subflow_id);

Same question for set/clear/set

> +	}
> +}
> +
> +int BPF_STRUCT_OPS(bpf_stale_get_subflow, struct mptcp_sock *msk,
> +		   const struct mptcp_sched_data *data)
> +{
> +	struct mptcp_stale_storage *storage;
> +	int nr = -1;
> +
> +	storage = bpf_sk_storage_get(&mptcp_stale_map, msk, 0,
> +				     BPF_LOCAL_STORAGE_GET_F_CREATE);
> +	if (!storage)
> +		return -1;
> +
> +	for (int i = 0; i < data->subflows && i < MPTCP_SUBFLOWS_MAX; i++) {
> +		struct mptcp_subflow_context *subflow;
> +
> +		subflow = mptcp_subflow_ctx_by_pos(data, i);
> +		if (!subflow)
> +			break;
> +
> +		if (mptcp_subflow_is_stale(storage, subflow->subflow_id))
> +			continue;
> +
> +		nr = i;
> +	}
> +
> +	if (nr != -1)
> +		mptcp_subflow_set_scheduled(mptcp_subflow_ctx_by_pos(data, nr), true);
> +	return 0;

I missed this detail on a previous review: if no subflow is scheduled, it 
should return '1' to indicate an error.

> +}
> +
> +SEC(".struct_ops")
> +struct mptcp_sched_ops stale = {
> +	.init		= (void *)mptcp_sched_stale_init,
> +	.release	= (void *)mptcp_sched_stale_release,
> +	.data_init	= (void *)bpf_stale_data_init,
> +	.get_subflow	= (void *)bpf_stale_get_subflow,
> +	.name		= "bpf_stale",
> +};
> -- 
> 2.35.3
>
>
>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2023-08-09  0:57 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-03  6:51 [PATCH mptcp-next v2 0/2] add bpf_stale scheduler Geliang Tang
2023-08-03  6:51 ` [PATCH mptcp-next v2 1/2] selftests/bpf: Add " Geliang Tang
2023-08-09  0:56   ` Mat Martineau
2023-08-03  6:51 ` [PATCH mptcp-next v2 2/2] selftests/bpf: Add bpf_stale test Geliang Tang
2023-08-03  8:02   ` selftests/bpf: Add bpf_stale test: Tests Results MPTCP CI

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.